repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
uber/pyro
tests/contrib/funsor/test_tmc.py
1
6347
# Copyright Contributors to the Pyro project. # SPDX-License-Identifier: Apache-2.0 import logging import math import pytest import torch from torch.autograd import grad from torch.distributions import constraints from tests.common import assert_equal # put all funsor-related imports here, so test collection works without funsor try: import funsor import pyro.contrib.funsor from pyroapi import distributions as dist from pyroapi import infer, pyro, pyro_backend funsor.set_backend("torch") except ImportError: pytestmark = pytest.mark.skip(reason="funsor is not installed") logger = logging.getLogger(__name__) @pytest.mark.parametrize("depth", [1, 2, 3, 4, 5]) @pytest.mark.parametrize("num_samples", [None, 200]) @pytest.mark.parametrize("max_plate_nesting", [2, 3]) @pytest.mark.parametrize("tmc_strategy", ["diagonal", "mixture"]) def test_tmc_categoricals(depth, max_plate_nesting, num_samples, tmc_strategy): def model(): x = pyro.sample("x0", dist.Categorical(pyro.param("q0"))) with pyro.plate("local", 3): for i in range(1, depth): x = pyro.sample("x{}".format(i), dist.Categorical(pyro.param("q{}".format(i))[..., x, :])) with pyro.plate("data", 4): pyro.sample("y", dist.Bernoulli(pyro.param("qy")[..., x]), obs=data) with pyro_backend("pyro"): # initialize qs = [pyro.param("q0", torch.tensor([0.4, 0.6], requires_grad=True))] for i in range(1, depth): qs.append(pyro.param( "q{}".format(i), torch.randn(2, 2).abs().detach().requires_grad_(), constraint=constraints.simplex )) qs.append(pyro.param("qy", torch.tensor([0.75, 0.25], requires_grad=True))) qs = [q.unconstrained() for q in qs] data = (torch.rand(4, 3) > 0.5).to(dtype=qs[-1].dtype, device=qs[-1].device) with pyro_backend("pyro"): elbo = infer.TraceTMC_ELBO(max_plate_nesting=max_plate_nesting) enum_model = infer.config_enumerate( model, default="parallel", expand=False, num_samples=num_samples, tmc=tmc_strategy) expected_loss = (-elbo.differentiable_loss(enum_model, lambda: None)).exp() expected_grads = grad(expected_loss, qs) with pyro_backend("contrib.funsor"): tmc = infer.TraceTMC_ELBO(max_plate_nesting=max_plate_nesting) tmc_model = infer.config_enumerate( model, default="parallel", expand=False, num_samples=num_samples, tmc=tmc_strategy) actual_loss = (-tmc.differentiable_loss(tmc_model, lambda: None)).exp() actual_grads = grad(actual_loss, qs) prec = 0.05 assert_equal(actual_loss, expected_loss, prec=prec, msg="".join([ "\nexpected loss = {}".format(expected_loss), "\n actual loss = {}".format(actual_loss), ])) for actual_grad, expected_grad in zip(actual_grads, expected_grads): assert_equal(actual_grad, expected_grad, prec=prec, msg="".join([ "\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()), "\n actual grad = {}".format(actual_grad.detach().cpu().numpy()), ])) @pytest.mark.parametrize("depth", [1, 2, 3, 4]) @pytest.mark.parametrize("num_samples,expand", [(400, False)]) @pytest.mark.parametrize("max_plate_nesting", [1]) @pytest.mark.parametrize("guide_type", ["prior", "factorized", "nonfactorized"]) @pytest.mark.parametrize("reparameterized", [False, True], ids=["dice", "pathwise"]) @pytest.mark.parametrize("tmc_strategy", ["diagonal", "mixture"]) def test_tmc_normals_chain_gradient(depth, num_samples, max_plate_nesting, expand, guide_type, reparameterized, tmc_strategy): def model(reparameterized): Normal = dist.Normal if reparameterized else dist.testing.fakes.NonreparameterizedNormal x = pyro.sample("x0", Normal(pyro.param("q2"), math.sqrt(1. / depth))) for i in range(1, depth): x = pyro.sample("x{}".format(i), Normal(x, math.sqrt(1. / depth))) pyro.sample("y", Normal(x, 1.), obs=torch.tensor(float(1))) def factorized_guide(reparameterized): Normal = dist.Normal if reparameterized else dist.testing.fakes.NonreparameterizedNormal pyro.sample("x0", Normal(pyro.param("q2"), math.sqrt(1. / depth))) for i in range(1, depth): pyro.sample("x{}".format(i), Normal(0., math.sqrt(float(i+1) / depth))) def nonfactorized_guide(reparameterized): Normal = dist.Normal if reparameterized else dist.testing.fakes.NonreparameterizedNormal x = pyro.sample("x0", Normal(pyro.param("q2"), math.sqrt(1. / depth))) for i in range(1, depth): x = pyro.sample("x{}".format(i), Normal(x, math.sqrt(1. / depth))) with pyro_backend("contrib.funsor"): # compare reparameterized and nonreparameterized gradient estimates q2 = pyro.param("q2", torch.tensor(0.5, requires_grad=True)) qs = (q2.unconstrained(),) tmc = infer.TraceTMC_ELBO(max_plate_nesting=max_plate_nesting) tmc_model = infer.config_enumerate( model, default="parallel", expand=expand, num_samples=num_samples, tmc=tmc_strategy) guide = factorized_guide if guide_type == "factorized" else \ nonfactorized_guide if guide_type == "nonfactorized" else \ lambda *args: None tmc_guide = infer.config_enumerate( guide, default="parallel", expand=expand, num_samples=num_samples, tmc=tmc_strategy) # convert to linear space for unbiasedness actual_loss = (-tmc.differentiable_loss(tmc_model, tmc_guide, reparameterized)).exp() actual_grads = grad(actual_loss, qs) # gold values from Funsor expected_grads = (torch.tensor( {1: 0.0999, 2: 0.0860, 3: 0.0802, 4: 0.0771}[depth] ),) grad_prec = 0.05 if reparameterized else 0.1 for actual_grad, expected_grad in zip(actual_grads, expected_grads): print(actual_loss) assert_equal(actual_grad, expected_grad, prec=grad_prec, msg="".join([ "\nexpected grad = {}".format(expected_grad.detach().cpu().numpy()), "\n actual grad = {}".format(actual_grad.detach().cpu().numpy()), ]))
apache-2.0
ArcherSys/ArcherSys
Lib/site-packages/notebook/terminal/api_handlers.py
6
1298
import json from tornado import web, gen from ..base.handlers import APIHandler, json_errors from ..utils import url_path_join class TerminalRootHandler(APIHandler): @web.authenticated @json_errors def get(self): tm = self.terminal_manager terms = [{'name': name} for name in tm.terminals] self.finish(json.dumps(terms)) @web.authenticated @json_errors def post(self): """POST /terminals creates a new terminal and redirects to it""" name, _ = self.terminal_manager.new_named_terminal() self.finish(json.dumps({'name': name})) class TerminalHandler(APIHandler): SUPPORTED_METHODS = ('GET', 'DELETE') @web.authenticated @json_errors def get(self, name): tm = self.terminal_manager if name in tm.terminals: self.finish(json.dumps({'name': name})) else: raise web.HTTPError(404, "Terminal not found: %r" % name) @web.authenticated @json_errors @gen.coroutine def delete(self, name): tm = self.terminal_manager if name in tm.terminals: yield tm.terminate(name, force=True) self.set_status(204) self.finish() else: raise web.HTTPError(404, "Terminal not found: %r" % name)
mit
emmanuj/numi
numi.py
1
1737
#!/usr/bin/env python import timeit import subprocess import os import StringIO import json import urllib import time import sys, daemon #python daemon is an external library and needs to be installed class ChangDir: def __init__(self, path): self.currentPath = os.getcwd() #returns current working directory os.chdir(path) #return to the original directory def __del__(self): os.chdir(self.currentPath) def checkCommitsAndUpdate(gitpath, phpserverpath, branch="master"): dirchanger = ChangDir(gitpath) output = subprocess.check_output(["git", "log","-1"]) buf = StringIO.StringIO(output) commit_local = buf.readline() if("commit" in commit_local): commit_local = str(commit_local.split(" ")[1]).strip() print commit_local #retrieve the last push from github f = urllib.urlopen(phpserverpath) jsonObj = json.loads(f.read()) commit_remote = str(jsonObj['commits'][0]['id']).strip() print commit_remote if commit_remote != commit_local: output = subprocess.check_output(["git", "pull","origin", branch]) print output else: print "Already up-to-date" def main(argv): gitapth ='' phpserverpath='' if len(argv) != 3: print "Usage: numi.py <git local repo path> <php server path> [branch(optional)]" print "" else: gitpath = argv[1] phpserverpath = argv[2] while True: if len(argv) == 4: checkCommitsAndUpdate(gitpath, phpserverpath, argv[3]) else: checkCommitsAndUpdate(gitpath, phpserverpath) time.sleep(30) if __name__ == "__main__": #with daemon.DaemonContext(): main(sys.argv)
gpl-2.0
Kazade/NeHe-Website
google_appengine/lib/jinja2-2.6/examples/bench.py
75
10922
"""\ This benchmark compares some python templating engines with Jinja 2 so that we get a picture of how fast Jinja 2 is for a semi real world template. If a template engine is not installed the test is skipped.\ """ import sys import cgi from timeit import Timer from jinja2 import Environment as JinjaEnvironment context = { 'page_title': 'mitsuhiko\'s benchmark', 'table': [dict(a=1,b=2,c=3,d=4,e=5,f=6,g=7,h=8,i=9,j=10) for x in range(1000)] } jinja_template = JinjaEnvironment( line_statement_prefix='%', variable_start_string="${", variable_end_string="}" ).from_string("""\ <!doctype html> <html> <head> <title>${page_title|e}</title> </head> <body> <div class="header"> <h1>${page_title|e}</h1> </div> <ul class="navigation"> % for href, caption in [ ('index.html', 'Index'), ('downloads.html', 'Downloads'), ('products.html', 'Products') ] <li><a href="${href|e}">${caption|e}</a></li> % endfor </ul> <div class="table"> <table> % for row in table <tr> % for cell in row <td>${cell}</td> % endfor </tr> % endfor </table> </div> </body> </html>\ """) def test_jinja(): jinja_template.render(context) try: from tornado.template import Template except ImportError: test_tornado = None else: tornado_template = Template("""\ <!doctype html> <html> <head> <title>{{ page_title }}</title> </head> <body> <div class="header"> <h1>{{ page_title }}</h1> </div> <ul class="navigation"> {% for href, caption in [ \ ('index.html', 'Index'), \ ('downloads.html', 'Downloads'), \ ('products.html', 'Products') \ ] %} <li><a href="{{ href }}">{{ caption }}</a></li> {% end %} </ul> <div class="table"> <table> {% for row in table %} <tr> {% for cell in row %} <td>{{ cell }}</td> {% end %} </tr> {% end %} </table> </div> </body> </html>\ """) def test_tornado(): tornado_template.generate(**context) try: from django.conf import settings settings.configure() from django.template import Template as DjangoTemplate, Context as DjangoContext except ImportError: test_django = None else: django_template = DjangoTemplate("""\ <!doctype html> <html> <head> <title>{{ page_title }}</title> </head> <body> <div class="header"> <h1>{{ page_title }}</h1> </div> <ul class="navigation"> {% for href, caption in navigation %} <li><a href="{{ href }}">{{ caption }}</a></li> {% endfor %} </ul> <div class="table"> <table> {% for row in table %} <tr> {% for cell in row %} <td>{{ cell }}</td> {% endfor %} </tr> {% endfor %} </table> </div> </body> </html>\ """) def test_django(): c = DjangoContext(context) c['navigation'] = [('index.html', 'Index'), ('downloads.html', 'Downloads'), ('products.html', 'Products')] django_template.render(c) try: from mako.template import Template as MakoTemplate except ImportError: test_mako = None else: mako_template = MakoTemplate("""\ <!doctype html> <html> <head> <title>${page_title|h}</title> </head> <body> <div class="header"> <h1>${page_title|h}</h1> </div> <ul class="navigation"> % for href, caption in [('index.html', 'Index'), ('downloads.html', 'Downloads'), ('products.html', 'Products')]: <li><a href="${href|h}">${caption|h}</a></li> % endfor </ul> <div class="table"> <table> % for row in table: <tr> % for cell in row: <td>${cell}</td> % endfor </tr> % endfor </table> </div> </body> </html>\ """) def test_mako(): mako_template.render(**context) try: from genshi.template import MarkupTemplate as GenshiTemplate except ImportError: test_genshi = None else: genshi_template = GenshiTemplate("""\ <html xmlns="http://www.w3.org/1999/xhtml" xmlns:py="http://genshi.edgewall.org/"> <head> <title>${page_title}</title> </head> <body> <div class="header"> <h1>${page_title}</h1> </div> <ul class="navigation"> <li py:for="href, caption in [ ('index.html', 'Index'), ('downloads.html', 'Downloads'), ('products.html', 'Products')]"><a href="${href}">${caption}</a></li> </ul> <div class="table"> <table> <tr py:for="row in table"> <td py:for="cell in row">${cell}</td> </tr> </table> </div> </body> </html>\ """) def test_genshi(): genshi_template.generate(**context).render('html', strip_whitespace=False) try: from Cheetah.Template import Template as CheetahTemplate except ImportError: test_cheetah = None else: cheetah_template = CheetahTemplate("""\ #import cgi <!doctype html> <html> <head> <title>$cgi.escape($page_title)</title> </head> <body> <div class="header"> <h1>$cgi.escape($page_title)</h1> </div> <ul class="navigation"> #for $href, $caption in [('index.html', 'Index'), ('downloads.html', 'Downloads'), ('products.html', 'Products')]: <li><a href="$cgi.escape($href)">$cgi.escape($caption)</a></li> #end for </ul> <div class="table"> <table> #for $row in $table: <tr> #for $cell in $row: <td>$cell</td> #end for </tr> #end for </table> </div> </body> </html>\ """, searchList=[dict(context)]) def test_cheetah(): unicode(cheetah_template) try: import tenjin except ImportError: test_tenjin = None else: tenjin_template = tenjin.Template() tenjin_template.convert("""\ <!doctype html> <html> <head> <title>${page_title}</title> </head> <body> <div class="header"> <h1>${page_title}</h1> </div> <ul class="navigation"> <?py for href, caption in [('index.html', 'Index'), ('downloads.html', 'Downloads'), ('products.html', 'Products')]: ?> <li><a href="${href}">${caption}</a></li> <?py #end ?> </ul> <div class="table"> <table> <?py for row in table: ?> <tr> <?py for cell in row: ?> <td>#{cell}</td> <?py #end ?> </tr> <?py #end ?> </table> </div> </body> </html>\ """) def test_tenjin(): from tenjin.helpers import escape, to_str tenjin_template.render(context, locals()) try: from spitfire.compiler import util as SpitfireTemplate from spitfire.compiler.analyzer import o2_options as spitfire_optimizer except ImportError: test_spitfire = None else: spitfire_template = SpitfireTemplate.load_template("""\ <!doctype html> <html> <head> <title>$cgi.escape($page_title)</title> </head> <body> <div class="header"> <h1>$cgi.escape($page_title)</h1> </div> <ul class="navigation"> #for $href, $caption in [('index.html', 'Index'), ('downloads.html', 'Downloads'), ('products.html', 'Products')] <li><a href="$cgi.escape($href)">$cgi.escape($caption)</a></li> #end for </ul> <div class="table"> <table> #for $row in $table <tr> #for $cell in $row <td>$cell</td> #end for </tr> #end for </table> </div> </body> </html>\ """, 'spitfire_tmpl', spitfire_optimizer, {'enable_filters': False}) spitfire_context = dict(context, **{'cgi': cgi}) def test_spitfire(): spitfire_template(search_list=[spitfire_context]).main() try: from chameleon.zpt.template import PageTemplate except ImportError: test_chameleon = None else: chameleon_template = PageTemplate("""\ <html xmlns:tal="http://xml.zope.org/namespaces/tal"> <head> <title tal:content="page_title">Page Title</title> </head> <body> <div class="header"> <h1 tal:content="page_title">Page Title</h1> </div> <ul class="navigation"> <li tal:repeat="item sections"><a tal:attributes="href item[0]" tal:content="item[1]">caption</a></li> </ul> <div class="table"> <table> <tr tal:repeat="row table"> <td tal:repeat="cell row" tal:content="row[cell]">cell</td> </tr> </table> </div> </body> </html>\ """) chameleon_context = dict(context) chameleon_context['sections'] = [ ('index.html', 'Index'), ('downloads.html', 'Downloads'), ('products.html', 'Products') ] def test_chameleon(): chameleon_template.render(**chameleon_context) try: from chameleon.zpt.template import PageTemplate from chameleon.genshi import language except ImportError: test_chameleon_genshi = None else: chameleon_genshi_template = PageTemplate("""\ <html xmlns="http://www.w3.org/1999/xhtml" xmlns:py="http://genshi.edgewall.org/"> <head> <title>${page_title}</title> </head> <body> <div class="header"> <h1>${page_title}</h1> </div> <ul class="navigation"> <li py:for="info in sections"><a href="${info[0]}">${info[1]}</a></li> </ul> <div class="table"> <table> <tr py:for="row in table"> <td py:for="cell in row">${row[cell]}</td> </tr> </table> </div> </body> </html>\ """, parser=language.Parser()) chameleon_genshi_context = dict(context) chameleon_genshi_context['sections'] = [ ('index.html', 'Index'), ('downloads.html', 'Downloads'), ('products.html', 'Products') ] def test_chameleon_genshi(): chameleon_genshi_template.render(**chameleon_genshi_context) sys.stdout.write('\r' + '\n'.join(( '=' * 80, 'Template Engine BigTable Benchmark'.center(80), '=' * 80, __doc__, '-' * 80 )) + '\n') for test in 'jinja', 'mako', 'tornado', 'tenjin', 'spitfire', 'django', 'genshi', 'cheetah', 'chameleon', 'chameleon_genshi': if locals()['test_' + test] is None: sys.stdout.write(' %-20s*not installed*\n' % test) continue t = Timer(setup='from __main__ import test_%s as bench' % test, stmt='bench()') sys.stdout.write(' >> %-20s<running>' % test) sys.stdout.flush() sys.stdout.write('\r %-20s%.4f seconds\n' % (test, t.timeit(number=50) / 50)) sys.stdout.write('-' * 80 + '\n') sys.stdout.write('''\ WARNING: The results of this benchmark are useless to compare the performance of template engines and should not be taken seriously in any way. It's testing the performance of simple loops and has no real-world usefulnes. It only used to check if changes on the Jinja code affect performance in a good or bad way and how it roughly compares to others. ''' + '=' * 80 + '\n')
bsd-3-clause
OshynSong/scikit-learn
examples/linear_model/plot_sgd_weighted_samples.py
344
1458
""" ===================== SGD: Weighted samples ===================== Plot decision function of a weighted dataset, where the size of points is proportional to its weight. """ print(__doc__) import numpy as np import matplotlib.pyplot as plt from sklearn import linear_model # we create 20 points np.random.seed(0) X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)] y = [1] * 10 + [-1] * 10 sample_weight = 100 * np.abs(np.random.randn(20)) # and assign a bigger weight to the last 10 samples sample_weight[:10] *= 10 # plot the weighted data points xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500)) plt.figure() plt.scatter(X[:, 0], X[:, 1], c=y, s=sample_weight, alpha=0.9, cmap=plt.cm.bone) ## fit the unweighted model clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100) clf.fit(X, y) Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) no_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['solid']) ## fit the weighted model clf = linear_model.SGDClassifier(alpha=0.01, n_iter=100) clf.fit(X, y, sample_weight=sample_weight) Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()]) Z = Z.reshape(xx.shape) samples_weights = plt.contour(xx, yy, Z, levels=[0], linestyles=['dashed']) plt.legend([no_weights.collections[0], samples_weights.collections[0]], ["no weights", "with weights"], loc="lower left") plt.xticks(()) plt.yticks(()) plt.show()
bsd-3-clause
mrosenstihl/PulsePrograms
autoHahnEcho/fit_T2.py
1
1457
#!/usr/bin/env python import numpy as N import pylab as P import sys import time import scipy.odr runs = 10000 def T2(p,x): amplitude, T2, beta = p return amplitude * N.exp(-(2*x/T2)**beta) def InvRec(p,x): amplitude, b, T1, beta = p return amplitude *( 1 - b * N.exp(-(x/T1)**beta)) def func(p,x,y): return T2(p,x) - y filename = sys.argv[1] try: picfile = sys.argv[2] except: picfile = None data = N.loadtxt(filename) x = data[:,0] y = data[:,1] n = len(y) p0 = [ N.abs(y).max(), x[(N.abs(y)/y.max()-2/N.e).argmin()], 1] print "Startparameter:",p0 odr_model = scipy.odr.Model(T2) odr_data = scipy.odr.Data(x=x,y=y) odr = scipy.odr.ODR(odr_data, odr_model, p0, ifixx=(0,)) odr.run() #res,covvar,misc,info,success = leastsq(func, p0, args=(x,y), full_output=1) #print "A: %.2f\nb: %.2f\n T1:%.2fs\nbeta: %.2f\n"%(amplitude, b, T1, beta) amplitude, T1, beta = odr.output.beta a_err, T1_err, beta_err = odr.output.sd_beta resultstring = " A : %8.2f +/- %4.2f \n T2 :%8.2f +/- %4.2f ms\n beta: %8.2f +/- %4.2f\n"%(amplitude, a_err, T1*1e3, T1_err*1e3, beta, beta_err) print resultstring P.semilogx(x,y,'bo',label='Data') xr = N.logspace(N.log10(x.min()),N.log10(x.max()),1024) P.semilogx(xr, T2(odr.output.beta,xr),'r-',label='Fit') P.ylim(y.min()*1.2,y.max()*1.5) P.xlabel('Time(tau)/s') P.ylabel('Signal/a.u.') P.text(0.05,0.7,resultstring,transform = P.gca().transAxes) P.legend() if not picfile == None: P.savefig(picfile) P.show()
bsd-2-clause
ingadhoc/odoo
addons/base_iban/base_iban.py
278
8657
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import string from openerp.osv import fields, osv from openerp.tools.translate import _ # Reference Examples of IBAN _ref_iban = { 'al':'ALkk BBBS SSSK CCCC CCCC CCCC CCCC', 'ad':'ADkk BBBB SSSS CCCC CCCC CCCC', 'at':'ATkk BBBB BCCC CCCC CCCC', 'be': 'BEkk BBBC CCCC CCKK', 'ba': 'BAkk BBBS SSCC CCCC CCKK', 'bg': 'BGkk BBBB SSSS DDCC CCCC CC', 'bh': 'BHkk BBBB SSSS SSSS SSSS SS', 'cr': 'CRkk BBBC CCCC CCCC CCCC C', 'hr': 'HRkk BBBB BBBC CCCC CCCC C', 'cy': 'CYkk BBBS SSSS CCCC CCCC CCCC CCCC', 'cz': 'CZkk BBBB SSSS SSCC CCCC CCCC', 'dk': 'DKkk BBBB CCCC CCCC CC', 'do': 'DOkk BBBB CCCC CCCC CCCC CCCC CCCC', 'ee': 'EEkk BBSS CCCC CCCC CCCK', 'fo': 'FOkk CCCC CCCC CCCC CC', 'fi': 'FIkk BBBB BBCC CCCC CK', 'fr': 'FRkk BBBB BGGG GGCC CCCC CCCC CKK', 'ge': 'GEkk BBCC CCCC CCCC CCCC CC', 'de': 'DEkk BBBB BBBB CCCC CCCC CC', 'gi': 'GIkk BBBB CCCC CCCC CCCC CCC', 'gr': 'GRkk BBBS SSSC CCCC CCCC CCCC CCC', 'gl': 'GLkk BBBB CCCC CCCC CC', 'hu': 'HUkk BBBS SSSC CCCC CCCC CCCC CCCC', 'is':'ISkk BBBB SSCC CCCC XXXX XXXX XX', 'ie': 'IEkk BBBB SSSS SSCC CCCC CC', 'il': 'ILkk BBBS SSCC CCCC CCCC CCC', 'it': 'ITkk KBBB BBSS SSSC CCCC CCCC CCC', 'kz': 'KZkk BBBC CCCC CCCC CCCC', 'kw': 'KWkk BBBB CCCC CCCC CCCC CCCC CCCC CC', 'lv': 'LVkk BBBB CCCC CCCC CCCC C', 'lb': 'LBkk BBBB CCCC CCCC CCCC CCCC CCCC', 'li': 'LIkk BBBB BCCC CCCC CCCC C', 'lt': 'LTkk BBBB BCCC CCCC CCCC', 'lu': 'LUkk BBBC CCCC CCCC CCCC' , 'mk': 'MKkk BBBC CCCC CCCC CKK', 'mt': 'MTkk BBBB SSSS SCCC CCCC CCCC CCCC CCC', 'mr': 'MRkk BBBB BSSS SSCC CCCC CCCC CKK', 'mu': 'MUkk BBBB BBSS CCCC CCCC CCCC CCCC CC', 'mc': 'MCkk BBBB BGGG GGCC CCCC CCCC CKK', 'me': 'MEkk BBBC CCCC CCCC CCCC KK', 'nl': 'NLkk BBBB CCCC CCCC CC', 'no': 'NOkk BBBB CCCC CCK', 'pl':'PLkk BBBS SSSK CCCC CCCC CCCC CCCC', 'pt': 'PTkk BBBB SSSS CCCC CCCC CCCK K', 'ro': 'ROkk BBBB CCCC CCCC CCCC CCCC', 'sm': 'SMkk KBBB BBSS SSSC CCCC CCCC CCC', 'sa': 'SAkk BBCC CCCC CCCC CCCC CCCC', 'rs': 'RSkk BBBC CCCC CCCC CCCC KK', 'sk': 'SKkk BBBB SSSS SSCC CCCC CCCC', 'si': 'SIkk BBSS SCCC CCCC CKK', 'es': 'ESkk BBBB SSSS KKCC CCCC CCCC', 'se': 'SEkk BBBB CCCC CCCC CCCC CCCC', 'ch': 'CHkk BBBB BCCC CCCC CCCC C', 'tn': 'TNkk BBSS SCCC CCCC CCCC CCCC', 'tr': 'TRkk BBBB BRCC CCCC CCCC CCCC CC', 'ae': 'AEkk BBBC CCCC CCCC CCCC CCC', 'gb': 'GBkk BBBB SSSS SSCC CCCC CC', } def _format_iban(iban_str): ''' This function removes all characters from given 'iban_str' that isn't a alpha numeric and converts it to upper case. ''' res = "" if iban_str: for char in iban_str: if char.isalnum(): res += char.upper() return res def _pretty_iban(iban_str): "return iban_str in groups of four characters separated by a single space" res = [] while iban_str: res.append(iban_str[:4]) iban_str = iban_str[4:] return ' '.join(res) class res_partner_bank(osv.osv): _inherit = "res.partner.bank" def create(self, cr, uid, vals, context=None): #overwrite to format the iban number correctly if (vals.get('state',False)=='iban') and vals.get('acc_number', False): vals['acc_number'] = _format_iban(vals['acc_number']) vals['acc_number'] = _pretty_iban(vals['acc_number']) return super(res_partner_bank, self).create(cr, uid, vals, context) def write(self, cr, uid, ids, vals, context=None): #overwrite to format the iban number correctly if (vals.get('state',False)=='iban') and vals.get('acc_number', False): vals['acc_number'] = _format_iban(vals['acc_number']) vals['acc_number'] = _pretty_iban(vals['acc_number']) return super(res_partner_bank, self).write(cr, uid, ids, vals, context) def is_iban_valid(self, cr, uid, iban, context=None): """ Check if IBAN is valid or not @param iban: IBAN as string @return: True if IBAN is valid, False otherwise """ if not iban: return False iban = _format_iban(iban).lower() if iban[:2] in _ref_iban and len(iban) != len(_format_iban(_ref_iban[iban[:2]])): return False #the four first digits have to be shifted to the end iban = iban[4:] + iban[:4] #letters have to be transformed into numbers (a = 10, b = 11, ...) iban2 = "" for char in iban: if char.isalpha(): iban2 += str(ord(char)-87) else: iban2 += char #iban is correct if modulo 97 == 1 return int(iban2) % 97 == 1 def check_iban(self, cr, uid, ids, context=None): ''' Check the IBAN number ''' for bank_acc in self.browse(cr, uid, ids, context=context): if bank_acc.state != 'iban': continue if not self.is_iban_valid(cr, uid, bank_acc.acc_number, context=context): return False return True def _construct_constraint_msg(self, cr, uid, ids, context=None): def default_iban_check(iban_cn): return iban_cn and iban_cn[0] in string.ascii_lowercase and iban_cn[1] in string.ascii_lowercase iban_country = self.browse(cr, uid, ids)[0].acc_number and self.browse(cr, uid, ids)[0].acc_number[:2].lower() if default_iban_check(iban_country): if iban_country in _ref_iban: return _('The IBAN does not seem to be correct. You should have entered something like this %s'), \ ('%s \nWhere B = National bank code, S = Branch code,'\ ' C = Account No, K = Check digit' % _ref_iban[iban_country]) return _('This IBAN does not pass the validation check, please verify it'), () return _('The IBAN is invalid, it should begin with the country code'), () def _check_bank(self, cr, uid, ids, context=None): for partner_bank in self.browse(cr, uid, ids, context=context): if partner_bank.state == 'iban' and not partner_bank.bank.bic: return False return True def get_bban_from_iban(self, cr, uid, ids, context=None): ''' This function returns the bank account number computed from the iban account number, thanks to the mapping_list dictionary that contains the rules associated to its country. ''' res = {} mapping_list = { #TODO add rules for others countries 'be': lambda x: x[4:], 'fr': lambda x: x[14:], 'ch': lambda x: x[9:], 'gb': lambda x: x[14:], } for record in self.browse(cr, uid, ids, context=context): if not record.acc_number: res[record.id] = False continue res[record.id] = False for code, function in mapping_list.items(): if record.acc_number.lower().startswith(code): res[record.id] = function(record.acc_number) break return res _columns = { # Deprecated: we keep it for backward compatibility, to be removed in v7 # We use acc_number instead of IBAN since v6.1, but we keep this field # to not break community modules. 'iban': fields.related('acc_number', string='IBAN', size=34, readonly=True, help="International Bank Account Number", type="char"), } _constraints = [ (check_iban, _construct_constraint_msg, ["iban", "acc_number", "state"]), (_check_bank, '\nPlease define BIC/Swift code on bank for bank type IBAN Account to make valid payments', ['bic']) ] # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
scorphus/django
django/contrib/contenttypes/fields.py
26
23881
from __future__ import unicode_literals from collections import defaultdict from django.contrib.contenttypes.models import ContentType from django.core import checks from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist from django.db import DEFAULT_DB_ALIAS, connection, models, router, transaction from django.db.models import DO_NOTHING, signals from django.db.models.base import ModelBase from django.db.models.fields.related import ( ForeignObject, ForeignObjectRel, ForeignRelatedObjectsDescriptor, ) from django.db.models.query_utils import PathInfo from django.utils.encoding import python_2_unicode_compatible, smart_text from django.utils.functional import cached_property @python_2_unicode_compatible class GenericForeignKey(object): """ Provide a generic many-to-one relation through the ``content_type`` and ``object_id`` fields. This class also doubles as an accessor to the related object (similar to ReverseSingleRelatedObjectDescriptor) by adding itself as a model attribute. """ # Field flags auto_created = False concrete = False editable = False hidden = False is_relation = True many_to_many = False many_to_one = True one_to_many = False one_to_one = False related_model = None remote_field = None allow_unsaved_instance_assignment = False def __init__(self, ct_field='content_type', fk_field='object_id', for_concrete_model=True): self.ct_field = ct_field self.fk_field = fk_field self.for_concrete_model = for_concrete_model self.editable = False self.rel = None self.column = None def contribute_to_class(self, cls, name, **kwargs): self.name = name self.model = cls self.cache_attr = "_%s_cache" % name cls._meta.add_field(self, virtual=True) # Only run pre-initialization field assignment on non-abstract models if not cls._meta.abstract: signals.pre_init.connect(self.instance_pre_init, sender=cls) setattr(cls, name, self) def __str__(self): model = self.model app = model._meta.app_label return '%s.%s.%s' % (app, model._meta.object_name, self.name) def check(self, **kwargs): errors = [] errors.extend(self._check_field_name()) errors.extend(self._check_object_id_field()) errors.extend(self._check_content_type_field()) return errors def _check_field_name(self): if self.name.endswith("_"): return [ checks.Error( 'Field names must not end with an underscore.', hint=None, obj=self, id='fields.E001', ) ] else: return [] def _check_object_id_field(self): try: self.model._meta.get_field(self.fk_field) except FieldDoesNotExist: return [ checks.Error( "The GenericForeignKey object ID references the non-existent field '%s'." % self.fk_field, hint=None, obj=self, id='contenttypes.E001', ) ] else: return [] def _check_content_type_field(self): """ Check if field named `field_name` in model `model` exists and is a valid content_type field (is a ForeignKey to ContentType). """ try: field = self.model._meta.get_field(self.ct_field) except FieldDoesNotExist: return [ checks.Error( "The GenericForeignKey content type references the non-existent field '%s.%s'." % ( self.model._meta.object_name, self.ct_field ), hint=None, obj=self, id='contenttypes.E002', ) ] else: if not isinstance(field, models.ForeignKey): return [ checks.Error( "'%s.%s' is not a ForeignKey." % ( self.model._meta.object_name, self.ct_field ), hint=( "GenericForeignKeys must use a ForeignKey to " "'contenttypes.ContentType' as the 'content_type' field." ), obj=self, id='contenttypes.E003', ) ] elif field.remote_field.model != ContentType: return [ checks.Error( "'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'." % ( self.model._meta.object_name, self.ct_field ), hint=( "GenericForeignKeys must use a ForeignKey to " "'contenttypes.ContentType' as the 'content_type' field." ), obj=self, id='contenttypes.E004', ) ] else: return [] def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs): """ Handle initializing an object with the generic FK instead of content_type and object_id fields. """ if self.name in kwargs: value = kwargs.pop(self.name) if value is not None: kwargs[self.ct_field] = self.get_content_type(obj=value) kwargs[self.fk_field] = value._get_pk_val() else: kwargs[self.ct_field] = None kwargs[self.fk_field] = None def get_content_type(self, obj=None, id=None, using=None): if obj is not None: return ContentType.objects.db_manager(obj._state.db).get_for_model( obj, for_concrete_model=self.for_concrete_model) elif id is not None: return ContentType.objects.db_manager(using).get_for_id(id) else: # This should never happen. I love comments like this, don't you? raise Exception("Impossible arguments to GFK.get_content_type!") def get_prefetch_queryset(self, instances, queryset=None): if queryset is not None: raise ValueError("Custom queryset can't be used for this lookup.") # For efficiency, group the instances by content type and then do one # query per model fk_dict = defaultdict(set) # We need one instance for each group in order to get the right db: instance_dict = {} ct_attname = self.model._meta.get_field(self.ct_field).get_attname() for instance in instances: # We avoid looking for values if either ct_id or fkey value is None ct_id = getattr(instance, ct_attname) if ct_id is not None: fk_val = getattr(instance, self.fk_field) if fk_val is not None: fk_dict[ct_id].add(fk_val) instance_dict[ct_id] = instance ret_val = [] for ct_id, fkeys in fk_dict.items(): instance = instance_dict[ct_id] ct = self.get_content_type(id=ct_id, using=instance._state.db) ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys)) # For doing the join in Python, we have to match both the FK val and the # content type, so we use a callable that returns a (fk, class) pair. def gfk_key(obj): ct_id = getattr(obj, ct_attname) if ct_id is None: return None else: model = self.get_content_type(id=ct_id, using=obj._state.db).model_class() return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)), model) return (ret_val, lambda obj: (obj._get_pk_val(), obj.__class__), gfk_key, True, self.cache_attr) def is_cached(self, instance): return hasattr(instance, self.cache_attr) def __get__(self, instance, instance_type=None): if instance is None: return self try: return getattr(instance, self.cache_attr) except AttributeError: rel_obj = None # Make sure to use ContentType.objects.get_for_id() to ensure that # lookups are cached (see ticket #5570). This takes more code than # the naive ``getattr(instance, self.ct_field)``, but has better # performance when dealing with GFKs in loops and such. f = self.model._meta.get_field(self.ct_field) ct_id = getattr(instance, f.get_attname(), None) if ct_id is not None: ct = self.get_content_type(id=ct_id, using=instance._state.db) try: rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field)) except ObjectDoesNotExist: pass setattr(instance, self.cache_attr, rel_obj) return rel_obj def __set__(self, instance, value): ct = None fk = None if value is not None: ct = self.get_content_type(obj=value) fk = value._get_pk_val() if not self.allow_unsaved_instance_assignment and fk is None: raise ValueError( 'Cannot assign "%r": "%s" instance isn\'t saved in the database.' % (value, value._meta.object_name) ) setattr(instance, self.ct_field, ct) setattr(instance, self.fk_field, fk) setattr(instance, self.cache_attr, value) class GenericRel(ForeignObjectRel): """ Used by GenericRelation to store information about the relation. """ def __init__(self, field, to, related_name=None, related_query_name=None, limit_choices_to=None): super(GenericRel, self).__init__( field, to, related_name=related_query_name or '+', related_query_name=related_query_name, limit_choices_to=limit_choices_to, on_delete=DO_NOTHING, ) class GenericRelation(ForeignObject): """ Provide a reverse to a relation created by a GenericForeignKey. """ # Field flags auto_created = False many_to_many = False many_to_one = False one_to_many = True one_to_one = False rel_class = GenericRel def __init__(self, to, object_id_field='object_id', content_type_field='content_type', for_concrete_model=True, related_query_name=None, limit_choices_to=None, **kwargs): kwargs['rel'] = self.rel_class( self, to, related_query_name=related_query_name, limit_choices_to=limit_choices_to, ) kwargs['blank'] = True kwargs['on_delete'] = models.CASCADE kwargs['editable'] = False kwargs['serialize'] = False # This construct is somewhat of an abuse of ForeignObject. This field # represents a relation from pk to object_id field. But, this relation # isn't direct, the join is generated reverse along foreign key. So, # the from_field is object_id field, to_field is pk because of the # reverse join. super(GenericRelation, self).__init__( to, from_fields=[object_id_field], to_fields=[], **kwargs) self.object_id_field_name = object_id_field self.content_type_field_name = content_type_field self.for_concrete_model = for_concrete_model def check(self, **kwargs): errors = super(GenericRelation, self).check(**kwargs) errors.extend(self._check_generic_foreign_key_existence()) return errors def _check_generic_foreign_key_existence(self): target = self.remote_field.model if isinstance(target, ModelBase): fields = target._meta.virtual_fields if any(isinstance(field, GenericForeignKey) and field.ct_field == self.content_type_field_name and field.fk_field == self.object_id_field_name for field in fields): return [] else: return [ checks.Error( ("The GenericRelation defines a relation with the model " "'%s.%s', but that model does not have a GenericForeignKey.") % ( target._meta.app_label, target._meta.object_name ), hint=None, obj=self, id='contenttypes.E004', ) ] else: return [] def resolve_related_fields(self): self.to_fields = [self.model._meta.pk.name] return [(self.remote_field.model._meta.get_field(self.object_id_field_name), self.model._meta.pk)] def get_path_info(self): opts = self.remote_field.model._meta target = opts.pk return [PathInfo(self.model._meta, opts, (target,), self.remote_field, True, False)] def get_reverse_path_info(self): opts = self.model._meta from_opts = self.remote_field.model._meta return [PathInfo(from_opts, opts, (opts.pk,), self, not self.unique, False)] def get_choices_default(self): return super(GenericRelation, self).get_choices(include_blank=False) def value_to_string(self, obj): qs = getattr(obj, self.name).all() return smart_text([instance._get_pk_val() for instance in qs]) def contribute_to_class(self, cls, name, **kwargs): kwargs['virtual_only'] = True super(GenericRelation, self).contribute_to_class(cls, name, **kwargs) self.model = cls setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self.remote_field)) def set_attributes_from_rel(self): pass def get_internal_type(self): return "ManyToManyField" def get_content_type(self): """ Return the content type associated with this field's model. """ return ContentType.objects.get_for_model(self.model, for_concrete_model=self.for_concrete_model) def get_extra_restriction(self, where_class, alias, remote_alias): field = self.remote_field.model._meta.get_field(self.content_type_field_name) contenttype_pk = self.get_content_type().pk cond = where_class() lookup = field.get_lookup('exact')(field.get_col(remote_alias), contenttype_pk) cond.add(lookup, 'AND') return cond def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS): """ Return all objects related to ``objs`` via this ``GenericRelation``. """ return self.remote_field.model._base_manager.db_manager(using).filter(**{ "%s__pk" % self.content_type_field_name: ContentType.objects.db_manager(using).get_for_model( self.model, for_concrete_model=self.for_concrete_model).pk, "%s__in" % self.object_id_field_name: [obj.pk for obj in objs] }) class ReverseGenericRelatedObjectsDescriptor(ForeignRelatedObjectsDescriptor): """ Accessor to the related objects manager on the one-to-many relation created by GenericRelation. In the example:: class Post(Model): comments = GenericRelation(Comment) ``post.comments`` is a ReverseGenericRelatedObjectsDescriptor instance. """ @cached_property def related_manager_cls(self): return create_generic_related_manager( self.rel.model._default_manager.__class__, self.rel, ) def create_generic_related_manager(superclass, rel): """ Factory function to create a manager that subclasses another manager (generally the default manager of a given model) and adds behaviors specific to generic relations. """ class GenericRelatedObjectManager(superclass): def __init__(self, instance=None): super(GenericRelatedObjectManager, self).__init__() self.instance = instance self.model = rel.model content_type = ContentType.objects.db_manager(instance._state.db).get_for_model( instance, for_concrete_model=rel.field.for_concrete_model) self.content_type = content_type qn = connection.ops.quote_name join_cols = rel.field.get_joining_columns(reverse_join=True)[0] self.source_col_name = qn(join_cols[0]) self.target_col_name = qn(join_cols[1]) self.content_type_field_name = rel.field.content_type_field_name self.object_id_field_name = rel.field.object_id_field_name self.prefetch_cache_name = rel.field.attname self.pk_val = instance._get_pk_val() self.core_filters = { '%s__pk' % self.content_type_field_name: content_type.id, self.object_id_field_name: self.pk_val, } def __call__(self, **kwargs): # We use **kwargs rather than a kwarg argument to enforce the # `manager='manager_name'` syntax. manager = getattr(self.model, kwargs.pop('manager')) manager_class = create_generic_related_manager(manager.__class__, rel) return manager_class(instance=self.instance) do_not_call_in_templates = True def __str__(self): return repr(self) def get_queryset(self): try: return self.instance._prefetched_objects_cache[self.prefetch_cache_name] except (AttributeError, KeyError): db = self._db or router.db_for_read(self.model, instance=self.instance) return super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**self.core_filters) def get_prefetch_queryset(self, instances, queryset=None): if queryset is None: queryset = super(GenericRelatedObjectManager, self).get_queryset() queryset._add_hints(instance=instances[0]) queryset = queryset.using(queryset._db or self._db) query = { '%s__pk' % self.content_type_field_name: self.content_type.id, '%s__in' % self.object_id_field_name: set(obj._get_pk_val() for obj in instances) } # We (possibly) need to convert object IDs to the type of the # instances' PK in order to match up instances: object_id_converter = instances[0]._meta.pk.to_python return (queryset.filter(**query), lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)), lambda obj: obj._get_pk_val(), False, self.prefetch_cache_name) def add(self, *objs, **kwargs): bulk = kwargs.pop('bulk', True) db = router.db_for_write(self.model, instance=self.instance) def check_and_update_obj(obj): if not isinstance(obj, self.model): raise TypeError("'%s' instance expected, got %r" % ( self.model._meta.object_name, obj )) setattr(obj, self.content_type_field_name, self.content_type) setattr(obj, self.object_id_field_name, self.pk_val) if bulk: pks = [] for obj in objs: if obj._state.adding or obj._state.db != db: raise ValueError( "%r instance isn't saved. Use bulk=False or save " "the object first. but must be." % obj ) check_and_update_obj(obj) pks.append(obj.pk) self.model._base_manager.using(db).filter(pk__in=pks).update(**{ self.content_type_field_name: self.content_type, self.object_id_field_name: self.pk_val, }) else: with transaction.atomic(using=db, savepoint=False): for obj in objs: check_and_update_obj(obj) obj.save() add.alters_data = True def remove(self, *objs, **kwargs): if not objs: return bulk = kwargs.pop('bulk', True) self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk) remove.alters_data = True def clear(self, **kwargs): bulk = kwargs.pop('bulk', True) self._clear(self, bulk) clear.alters_data = True def _clear(self, queryset, bulk): db = router.db_for_write(self.model, instance=self.instance) queryset = queryset.using(db) if bulk: # `QuerySet.delete()` creates its own atomic block which # contains the `pre_delete` and `post_delete` signal handlers. queryset.delete() else: with transaction.atomic(using=db, savepoint=False): for obj in queryset: obj.delete() _clear.alters_data = True def set(self, objs, **kwargs): # Force evaluation of `objs` in case it's a queryset whose value # could be affected by `manager.clear()`. Refs #19816. objs = tuple(objs) bulk = kwargs.pop('bulk', True) clear = kwargs.pop('clear', False) db = router.db_for_write(self.model, instance=self.instance) with transaction.atomic(using=db, savepoint=False): if clear: self.clear() self.add(*objs, bulk=bulk) else: old_objs = set(self.using(db).all()) new_objs = [] for obj in objs: if obj in old_objs: old_objs.remove(obj) else: new_objs.append(obj) self.remove(*old_objs) self.add(*new_objs, bulk=bulk) set.alters_data = True def create(self, **kwargs): kwargs[self.content_type_field_name] = self.content_type kwargs[self.object_id_field_name] = self.pk_val db = router.db_for_write(self.model, instance=self.instance) return super(GenericRelatedObjectManager, self).using(db).create(**kwargs) create.alters_data = True def get_or_create(self, **kwargs): kwargs[self.content_type_field_name] = self.content_type kwargs[self.object_id_field_name] = self.pk_val db = router.db_for_write(self.model, instance=self.instance) return super(GenericRelatedObjectManager, self).using(db).get_or_create(**kwargs) get_or_create.alters_data = True def update_or_create(self, **kwargs): kwargs[self.content_type_field_name] = self.content_type kwargs[self.object_id_field_name] = self.pk_val db = router.db_for_write(self.model, instance=self.instance) return super(GenericRelatedObjectManager, self).using(db).update_or_create(**kwargs) update_or_create.alters_data = True return GenericRelatedObjectManager
bsd-3-clause
JoeriHermans/ml-scripts
scripts/adverserial-variational-optimization/avo.py
2
11733
# Adverserial Variational Optimization import math import numpy as np import random import sys import torch import torch.nn.functional as F from sklearn.utils import check_random_state from torch.autograd import Variable def main(): # Assume there exists some true parameterization. # Beam Energy = 43 Gev, and Fermi's Constant is 0.9 theta_true = [43.0, 0.9] # Assume there is an experiment drawing (real) samples from nature. p_r = real_experiment(theta_true, 100000) # Initialize the prior of theta, parameterized by a Gaussian. proposal = {'mu': [], 'sigma': []} # Check if a custom mu has been specified. if '--mu' in sys.argv: mu = sys.argv[sys.argv.index('--mu') + 1].split(",") mu = [float(e) for e in mu] proposal['mu'] = mu #proposal['sigma'] = [np.log(.1), np.log(.01)] proposal['sigma'] = [np.log(.1), np.log(.1)] else: # Add random beam energy. add_prior_beam_energy(proposal) # Add random Fermi constant. add_prior_fermi_constant(proposal) # Check if a custom sigma has been specified. if '--sigma' in sys.argv: sigma = sys.argv[sys.argv.index('--sigma') + 1].split(",") sigma = [np.log(float(e)) for e in sigma] proposal['sigma'] = sigma else: # Initialize default sigma. proposal['sigma'] = [np.log(.1), np.log(.1)] # Convert the proposal lists to PyTorch Tensors. proposal['mu'] = torch.FloatTensor(proposal['mu']) proposal['sigma'] = torch.FloatTensor(proposal['sigma']) # Inference on theta is done using a critic network in an adverserial setting. if '--sigmoid' in sys.argv: critic = CriticWithSigmoid(num_hidden=50) else: critic = Critic(num_hidden=50) # Obtain the batch size from the arguments. if '--batch-size' in sys.argv: batch_size = int(sys.argv[sys.argv.index('--batch-size') + 1]) else: batch_size = 256 # Check if the variables need to be normalized. if '--normalize' in sys.argv: proposal['mu'] = normalize(proposal['mu']) # Fit the proposal distribution to the real distribution using the critic. fit(proposal=proposal, p_r=p_r, critic=critic, theta_true=theta_true, batch_size=batch_size) # Display the current parameterization of the proposal distribution. print("\nProposal Distribution:") print(" - Beam Energy:") print(" mu: " + str(proposal['mu'][0])) print(" sigma: " + str(proposal['sigma'][0])) print(" - Fermi's Constant:") print(" mu: " + str(proposal['mu'][1])) print(" sigma: " + str(proposal['sigma'][1])) print("\nTrue Distribution:") print(" - Beam Energy: " + str(theta_true[0])) print(" - Fermi's Constant: " + str(theta_true[1])) def normalize(mu): min_mu = torch.FloatTensor([30, 0]) max_mu = torch.FloatTensor([60, 2]) if '--normalize' in sys.argv: mu = (mu - min_mu) / (max_mu - min_mu) return mu def denormalize(mu): min_mu = torch.FloatTensor([30, 0]) max_mu = torch.FloatTensor([60, 2]) if '--normalize' in sys.argv: mu = mu * (max_mu - min_mu) + min_mu return mu def fit(proposal, p_r, critic, theta_true, num_iterations=100000, batch_size=256): critic_optimizer = torch.optim.Adam(critic.parameters(), lr=0.01) for iteration in range(0, num_iterations): print("True Mu: " + str(theta_true)) print("Current Mu: " + str(denormalize(proposal['mu']))) print("Current Sigma: " + str(proposal['sigma'].exp())) # Fit the critic network. fit_critic(proposal, p_r, critic, critic_optimizer, batch_size=batch_size, num_critic_iterations=100) # Fit the proposal distribution. fit_proposal(proposal, p_r, critic, batch_size) def fit_critic(proposal, p_r, critic, optimizer, num_critic_iterations=4000, batch_size=256): # Generate the simulation data. x_g = sample_generated_data(proposal, batch_size) # Fit the critic optimally. for iteration in range(0, num_critic_iterations): # Fetch the real data. x_r = sample_real_data(p_r, batch_size) # Reset the gradients. critic.zero_grad() # Forward pass with real data. y_r = critic(x_r) # Forward pass with generated data. y_g = critic(x_g) # Obtain gradient penalty (GP). gp = compute_gradient_penalty(critic, x_r.data, x_g.data) # Compute the loss, and the accompanying gradients. loss = y_g - y_r + gp loss.mean().backward() optimizer.step() # Display the loss of the critic at the last step. print("Loss: " + str(loss.mean().data.numpy()[0])) def fit_proposal(proposal, p_r, critic, batch_size=256, gamma=5.0): gradient_u_mu = torch.FloatTensor([0, 0]) gradient_u_sigma = torch.FloatTensor([0, 0]) gradient_entropy_sigma = torch.FloatTensor([0, 0]) # Draw several thetas from the current proposal distribution. thetas = draw_gaussian(proposal, batch_size) # Compute the q-gradient for every theta. for theta in thetas: # Draw a sample from the simulator. x = torch.autograd.Variable(simulator(theta, 1)) likelihood_x = critic(x).mean().view(-1) mu = torch.autograd.Variable(proposal['mu'], requires_grad=True) sigma = torch.autograd.Variable(proposal['sigma'], requires_grad=True) # Compute the gradient of the Gaussian logpdf. theta = torch.autograd.Variable(normalize(theta), requires_grad=True) logpdf = gaussian_logpdf(mu, sigma, theta) logpdf.sum().backward() gradient_logpdf_mu = mu.grad.data gradient_logpdf_sigma = sigma.grad.data # Add the logpdf gradient to the current variational upperbound. gradient_u_mu += -likelihood_x.data * gradient_logpdf_mu gradient_u_sigma += -likelihood_x.data * gradient_logpdf_sigma # Compute the gradient of the entropy. sigma = torch.autograd.Variable(proposal['sigma'], requires_grad=True) differential_entropy = gaussian_differential_entropy(sigma) differential_entropy.sum().backward() gradient_entropy_sigma = sigma.grad.data # Compute the final adverserial gradient. gradient_u_mu = .01 * ((1. / batch_size) * gradient_u_mu) gradient_u_sigma = .01 * ((1. / batch_size) * gradient_u_sigma + gamma * gradient_entropy_sigma) # Apply the gradient to the proposal distribution. proposal['mu'] -= gradient_u_mu proposal['sigma'] -= gradient_u_sigma #proposal['sigma'] = proposal['sigma'].exp().log() + 0.01 def compute_gradient_penalty(critic, real, fake, l=5.0): # Compute x_hat and its output. epsilon = torch.rand(real.size()) x_hat = epsilon * real + ((1. - epsilon) * fake) x_hat = torch.autograd.Variable(x_hat, requires_grad=True) y_hat = critic(x_hat) # Compute the associated gradients. gradients = torch.autograd.grad(outputs=y_hat, inputs=x_hat, grad_outputs=torch.ones(y_hat.size()), create_graph=True, retain_graph=True, only_inputs=True)[0] # Prevent norm 0 causing NaN. gradients = gradients + 1e-16 # Compute the gradient penalty. gradient_penalty = l * ((gradients.norm(2, dim=1) - 1.) ** 2) return gradient_penalty def sample_real_data(p_r, batch_size=256): samples = torch.zeros((batch_size, 1)) num_samples_p_r = len(p_r) for index in range(0, batch_size): random_index = random.randint(0, num_samples_p_r - 1) samples[index, :] = p_r[random_index] return torch.autograd.Variable(samples, requires_grad=True) def sample_generated_data(proposal, batch_size=256): # Sample `batch_size` thetas according to our proposal distribution. thetas = draw_gaussian(proposal, batch_size) # Obtain the individual Gaussians. theta_beam_energy = thetas[:, 0] theta_fermi_constant = thetas[:, 1] # Sample according to the proposal distribution. samples = torch.zeros((batch_size, 1)) for sample_index, theta in enumerate(thetas): samples[sample_index, :] = simulator(theta, 1) return torch.autograd.Variable(samples, requires_grad=True) def gaussian_logpdf(mu, sigma, theta): #sigma = sigma.exp() #logpdf = -(sigma.log() + np.log((2. * np.pi) ** .5) + (theta - mu) ** 2 / (2. * sigma ** 2)) logpdf = -(sigma + np.log((2. * np.pi) ** .5) + (theta - mu) ** 2 / (2. * sigma.exp() ** 2)) return logpdf def gaussian_differential_entropy(sigma): #sigma = sigma.exp() #dentropy = (sigma.log() * (2. * np.pi * np.e) ** .5).log() dentropy = (sigma * (2. * np.pi * np.e) ** .5).log() return dentropy def add_prior_beam_energy(prior): g = random_gaussian(mu=[30, 60], sigma=1.0) add_prior(prior, g['mu'], g['sigma']) def add_prior_fermi_constant(prior): g = random_gaussian(mu=[0, 2], sigma=1.0) add_prior(prior, g['mu'], g['sigma']) def add_prior(prior, mu, sigma): prior['mu'].append(mu) prior['sigma'].append(sigma) def random_gaussian(mu=[-1, 1], sigma=5.0): return {'mu': np.random.uniform(mu[0], mu[1]), 'sigma': np.log(np.random.uniform(0.0, sigma))} def draw_gaussian(d, num_samples, random_state=None): num_parameters = len(d['mu']) thetas = torch.zeros((num_samples, num_parameters)) mu = denormalize(d['mu']) sigma = d['sigma'].exp() for i in range(0, num_samples): gaussian = torch.normal(mu, sigma) thetas[i, :] = gaussian return thetas def real_experiment(theta, n_samples): return simulator(theta, n_samples) def simulator(theta, n_samples, random_state=None): rng = check_random_state(random_state) samples = simulator_rej_sample_costheta(n_samples, theta, rng) return torch.from_numpy(samples.reshape(-1, 1)).float() def simulator_rej_sample_costheta(n_samples, theta, rng): sqrtshalf = theta[0] gf = theta[1] ntrials = 0 samples = [] x = torch.linspace(-1, 1, steps=1000) maxval = torch.max(simulator_diffxsec(x, sqrtshalf, gf)) while len(samples) < n_samples: ntrials = ntrials + 1 xprop = rng.uniform(-1, 1) ycut = rng.rand() yprop = (simulator_diffxsec(xprop, sqrtshalf, gf) / maxval)[0] if (yprop / maxval) < ycut: continue samples.append(xprop) return np.array(samples) def simulator_diffxsec(costheta, sqrtshalf, gf): norm = 2. * (1. + 1. / 3.) return ((1 + costheta ** 2) + simulator_a_fb(sqrtshalf, gf) * costheta) / norm def simulator_a_fb(sqrtshalf, gf): mz = 90 gf_nom = 0.9 sqrts = sqrtshalf * 2. x = torch.FloatTensor([(sqrts - mz) / mz * 10]) a_fb_en = torch.tanh(x) a_fb_gf = gf / gf_nom return 2 * a_fb_en * a_fb_gf class Critic(torch.nn.Module): def __init__(self, num_hidden): super(Critic, self).__init__() self.fc_1 = torch.nn.Linear(1, num_hidden) self.fc_2 = torch.nn.Linear(num_hidden, num_hidden) self.fc_3 = torch.nn.Linear(num_hidden, 1) def forward(self, x): x = F.relu(self.fc_1(x)) x = F.relu(self.fc_2(x)) x = (self.fc_3(x)) return x class CriticWithSigmoid(torch.nn.Module): def __init__(self, num_hidden): super(CriticWithSigmoid, self).__init__() self.fc_1 = torch.nn.Linear(1, num_hidden) self.fc_2 = torch.nn.Linear(num_hidden, num_hidden) self.fc_3 = torch.nn.Linear(num_hidden, 1) def forward(self, x): x = F.relu(self.fc_1(x)) x = F.relu(self.fc_2(x)) x = F.sigmoid(self.fc_3(x)) return x if __name__ == '__main__': main()
gpl-3.0
rbbratta/virt-test
libvirt/tests/src/virsh_cmd/domain/virsh_setvcpus.py
1
6257
import re, os, logging, commands from autotest.client.shared import error from virttest import remote, libvirt_vm, virsh, libvirt_xml from xml.dom.minidom import parse def run_virsh_setvcpus(test, params, env): """ Test command: virsh setvcpus. The conmand can change the number of virtual CPUs in the guest domain. 1.Prepare test environment,destroy or suspend a VM. 2.Perform virsh setvcpus operation. 3.Recover test environment. 4.Confirm the test result. """ vm_name = params.get("main_vm") vm = env.get_vm(vm_name) xml_file = params.get("setvcpus_xml_file", "vm.xml") virsh.dumpxml(vm_name, extra="", to_file=xml_file) tmp_file = params.get("setvcpus_tmp_file", "tmp.xml") pre_vm_state = params.get("setvcpus_pre_vm_state") command = params.get("setvcpus_command", "setvcpus") options = params.get("setvcpus_options") domain = params.get("setvcpus_domain") count = params.get("setvcpus_count") extra_param = params.get("setvcpus_extra_param") count_option = "%s %s" % (count, extra_param) status_error = params.get("status_error") def get_current_vcpus(): """ Get current vcpu number. """ vcpus_set = "" virsh.dumpxml(vm_name, extra="", to_file=tmp_file) dom = parse(tmp_file) root = dom.documentElement vcpus_2 = root.getElementsByTagName("vcpu") for n in vcpus_2: vcpus_set += n.getAttribute("current") vcpus_set = int(vcpus_set) dom.unlink() return vcpus_set if vm.is_alive(): vm.destroy() vm_xml = libvirt_xml.VMXML() vm_xml.set_vm_vcpus(vm_name, 2) vm.start() vm.wait_for_login() if status_error == "no": vcpus_new = len(vm.vcpuinfo()) domid = vm.get_id() domuuid = vm.get_uuid() if pre_vm_state == "paused": vm.pause() elif pre_vm_state == "shut off": vm.destroy() if domain == "remote_name": remote_ssh_addr = params.get("remote_ip", None) remote_addr = params.get("local_ip", None) remote_password = params.get("remote_password", None) host_type = virsh.driver() if host_type == "qemu": remote_string = "qemu+ssh://%s/system" % remote_addr elif host_type == "xen": remote_string = "xen+ssh://%s" % remote_addr command = "virsh -c %s setvcpus %s 1 --live" % (remote_string, vm_name) if virsh.has_command_help_match(command, "--live") == None: status_error = "yes" session = remote.remote_login("ssh", remote_ssh_addr, "22", "root", remote_password, "#") session.cmd_output('LANG=C') status, output = session.cmd_status_output(command, internal_timeout=5) session.close() vcpus_current = len(vm.vcpuinfo()) else: if domain == "name": dom_option = vm_name elif domain == "id": dom_option = domid if params.get("setvcpus_hex_id") != None: dom_option = hex(int(domid)) elif params.get("setvcpus_invalid_id") != None: dom_option = params.get("setvcpus_invalid_id") elif domain == "uuid": dom_option = domuuid if params.get("setvcpus_invalid_uuid") != None: dom_option = params.get("setvcpus_invalid_uuid") else: dom_option = domain option_list = options.split(" ") for item in option_list: if virsh.has_command_help_match(command, item) == None: status_error = "yes" break status = virsh.setvcpus(dom_option, count_option, options, ignore_status=True).exit_status if pre_vm_state == "paused": virsh.resume(vm_name, ignore_status=True) if status_error == "no": if status == 0: if pre_vm_state == "shut off": if options == "--config": vcpus_set = len(vm.vcpuinfo()) elif options == "--current": vcpus_set = get_current_vcpus() elif options == "--maximum --config": vcpus_set = "" dom = parse("/etc/libvirt/qemu/%s.xml" % vm_name) vcpus_set = dom.getElementsByTagName("vcpu")[0].firstChild.data vcpus_set = int(vcpus_set) dom.unlink() else: vcpus_set = len(vm.vcpuinfo()) if domain == "id": cmd_chk = "cat /etc/libvirt/qemu/%s.xml" % vm_name output1 = commands.getoutput(cmd_chk) logging.info("guest-info:\n%s" % output1) virsh.destroy(vm_name) virsh.undefine(vm_name) virsh.define(xml_file) if os.path.exists(xml_file): os.remove(xml_file) if os.path.exists(tmp_file): os.remove(tmp_file) #check status_error if status_error == "yes": if status == 0: raise error.TestFail("Run successfully with wrong command!") else: if status != 0: raise error.TestFail("Run failed with right command") else: if options == "--maximum --config": if vcpus_set != 4: raise error.TestFail("Run failed with right command1") elif domain == "id": if options == "--config": if vcpus_set != vcpus_new or not re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1): raise error.TestFail("Run failed with right command2") elif options == "--config --live": if vcpus_set != 1 or not re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1): raise error.TestFail("Run failed with right command3") else: if vcpus_set != 1 or re.search('<vcpu current=\'1\'>%s</vcpu>' % vcpus_new, output1): raise error.TestFail("Run failed with right command4") else: if vcpus_set != 1: raise error.TestFail("Run failed with right command5")
gpl-2.0
pjg101/SickRage
lib/hachoir_parser/container/action_script.py
84
24565
""" SWF (Macromedia/Adobe Flash) file parser. Documentation: - Alexis' SWF Reference: http://www.m2osw.com/swf_alexref.html - Tamarin ABC format: http://www.m2osw.com/abc_format.html Authors: Sebastien Ponce, Robert Xiao Creation date: 26 April 2008 """ from hachoir_parser import Parser from hachoir_core.field import (FieldSet, ParserError, Bit, Bits, UInt8, UInt32, Int16, UInt16, Float32, Float64, CString, Enum, Bytes, RawBytes, NullBits, String, SubFile, Field) from hachoir_core.endian import LITTLE_ENDIAN, BIG_ENDIAN from hachoir_core.field.float import FloatExponent from struct import unpack class FlashPackedInteger(Bits): def __init__(self, parent, name, signed=False, nbits=30, description=None): Bits.__init__(self, parent, name, 8, description) stream = self._parent.stream addr = self.absolute_address size = 0 value = 0 mult = 1 while True: byte = stream.readBits(addr+size, 8, LITTLE_ENDIAN) value += mult * (byte & 0x7f) size += 8 mult <<= 7 if byte < 128: break self._size = size if signed and (1 << (nbits-1)) <= value: value -= (1 << nbits) self.createValue = lambda: value class FlashU30(FlashPackedInteger): def __init__(self, parent, name, description=None): FlashPackedInteger.__init__(self, parent, name, signed=False, nbits=30, description=description) class FlashS32(FlashPackedInteger): def __init__(self, parent, name, description=None): FlashPackedInteger.__init__(self, parent, name, signed=True, nbits=32, description=description) class FlashU32(FlashPackedInteger): def __init__(self, parent, name, description=None): FlashPackedInteger.__init__(self, parent, name, signed=False, nbits=32, description=description) class FlashFloat64(FieldSet): def createFields(self): yield Bits(self, "mantissa_high", 20) yield FloatExponent(self, "exponent", 11) yield Bit(self, "negative") yield Bits(self, "mantissa_low", 32) def createValue(self): # Manual computation: # mantissa = mantissa_high * 2^32 + mantissa_low # float = 2^exponent + (1 + mantissa / 2^52) # (and float is negative if negative=True) bytes = self.parent.stream.readBytes( self.absolute_address, self.size//8) # Mix bytes: xxxxyyyy <=> yyyyxxxx bytes = bytes[4:8] + bytes[0:4] return unpack('<d', bytes)[0] TYPE_INFO = { 0x00: (CString, "Cstring[]"), 0x01: (Float32, "Float[]"), 0x02: (None, "Null[]"), 0x03: (None, "Undefined[]"), 0x04: (UInt8, "Register[]"), 0x05: (UInt8, "Boolean[]"), 0x06: (FlashFloat64, "Double[]"), 0x07: (UInt32, "Integer[]"), 0x08: (UInt8, "Dictionary_Lookup_Index[]"), 0x09: (UInt16, "Large_Dictionary_Lookup_Index[]"), } def parseBranch(parent, size): yield Int16(parent, "offset") def parseDeclareFunction(parent, size): yield CString(parent, "name") argCount = UInt16(parent, "arg_count") yield argCount for i in range(argCount.value): yield CString(parent, "arg[]") yield UInt16(parent, "function_length") def parseDeclareFunctionV7(parent, size): yield CString(parent, "name") argCount = UInt16(parent, "arg_count") yield argCount yield UInt8(parent, "reg_count") yield Bits(parent, "reserved", 7) yield Bit(parent, "preload_global") yield Bit(parent, "preload_parent") yield Bit(parent, "preload_root") yield Bit(parent, "suppress_super") yield Bit(parent, "preload_super") yield Bit(parent, "suppress_arguments") yield Bit(parent, "preload_arguments") yield Bit(parent, "suppress_this") yield Bit(parent, "preload_this") for i in range(argCount.value): yield UInt8(parent, "register[]") yield CString(parent, "arg[]") yield UInt16(parent, "function_length") def parseTry(parent, size): yield Bits(parent, "reserved", 5) catchInReg = Bit(parent, "catch_in_register") yield catchInReg yield Bit(parent, "finally") yield Bit(parent, "catch") yield UInt8(parent, "try_size") yield UInt8(parent, "catch_size") yield UInt8(parent, "finally_size") if catchInReg.value: yield CString(parent, "name") else: yield UInt8(parent, "register") def parsePushData(parent, size): while not parent.eof: codeobj = UInt8(parent, "data_type[]") yield codeobj code = codeobj.value if code not in TYPE_INFO: raise ParserError("Unknown type in Push_Data : " + hex(code)) parser, name = TYPE_INFO[code] if parser: yield parser(parent, name) # else: # yield Field(parent, name, 0) def parseSetTarget(parent, size): yield CString(parent, "target") def parseWith(parent, size): yield UInt16(parent, "size") def parseGetURL(parent, size): yield CString(parent, "url") yield CString(parent, "target") def parseGetURL2(parent, size): yield UInt8(parent, "method") def parseGotoExpression(parent, size): yield UInt8(parent, "play") def parseGotoFrame(parent, size): yield UInt16(parent, "frame_no") def parseGotoLabel(parent, size): yield CString(parent, "label") def parseWaitForFrame(parent, size): yield UInt16(parent, "frame") yield UInt8(parent, "skip") def parseWaitForFrameDyn(parent, size): yield UInt8(parent, "skip") def parseDeclareDictionary(parent, size): count = UInt16(parent, "count") yield count for i in range(count.value): yield CString(parent, "dictionnary[]") def parseStoreRegister(parent, size): yield UInt8(parent, "register") def parseStrictMode(parent, size): yield UInt8(parent, "strict") class Instruction(FieldSet): ACTION_INFO = { 0x00: ("end[]", "End", None), 0x99: ("Branch_Always[]", "Branch Always", parseBranch), 0x9D: ("Branch_If_True[]", "Branch If True", parseBranch), 0x3D: ("Call_Function[]", "Call Function", None), 0x52: ("Call_Method[]", "Call Method", None), 0x9B: ("Declare_Function[]", "Declare Function", parseDeclareFunction), 0x8E: ("Declare_Function_V7[]", "Declare Function (V7)", parseDeclareFunctionV7), 0x3E: ("Return[]", "Return", None), 0x2A: ("Throw[]", "Throw", None), 0x8F: ("Try[]", "Try", parseTry), # Stack Control 0x4C: ("Duplicate[]", "Duplicate", None), 0x96: ("Push_Data[]", "Push Data", parsePushData), 0x4D: ("Swap[]", "Swap", None), # Action Script Context 0x8B: ("Set_Target[]", "Set Target", parseSetTarget), 0x20: ("Set_Target_dynamic[]", "Set Target (dynamic)", None), 0x94: ("With[]", "With", parseWith), # Movie Control 0x9E: ("Call_Frame[]", "Call Frame", None), 0x83: ("Get_URL[]", "Get URL", parseGetURL), 0x9A: ("Get_URL2[]", "Get URL2", parseGetURL2), 0x9F: ("Goto_Expression[]", "Goto Expression", parseGotoExpression), 0x81: ("Goto_Frame[]", "Goto Frame", parseGotoFrame), 0x8C: ("Goto_Label[]", "Goto Label", parseGotoLabel), 0x04: ("Next_Frame[]", "Next Frame", None), 0x06: ("Play[]", "Play", None), 0x05: ("Previous_Frame[]", "Previous Frame", None), 0x07: ("Stop[]", "Stop", None), 0x08: ("Toggle_Quality[]", "Toggle Quality", None), 0x8A: ("Wait_For_Frame[]", "Wait For Frame", parseWaitForFrame), 0x8D: ("Wait_For_Frame_dynamic[]", "Wait For Frame (dynamic)", parseWaitForFrameDyn), # Sound 0x09: ("Stop_Sound[]", "Stop Sound", None), # Arithmetic 0x0A: ("Add[]", "Add", None), 0x47: ("Add_typed[]", "Add (typed)", None), 0x51: ("Decrement[]", "Decrement", None), 0x0D: ("Divide[]", "Divide", None), 0x50: ("Increment[]", "Increment", None), 0x18: ("Integral_Part[]", "Integral Part", None), 0x3F: ("Modulo[]", "Modulo", None), 0x0C: ("Multiply[]", "Multiply", None), 0x4A: ("Number[]", "Number", None), 0x0B: ("Subtract[]", "Subtract", None), # Comparisons 0x0E: ("Equal[]", "Equal", None), 0x49: ("Equal_typed[]", "Equal (typed)", None), 0x66: ("Strict_Equal[]", "Strict Equal", None), 0x67: ("Greater_Than_typed[]", "Greater Than (typed)", None), 0x0F: ("Less_Than[]", "Less Than", None), 0x48: ("Less_Than_typed[]", "Less Than (typed)", None), 0x13: ("String_Equal[]", "String Equal", None), 0x68: ("String_Greater_Than[]", "String Greater Than", None), 0x29: ("String_Less_Than[]", "String Less Than", None), # Logical and Bit Wise 0x60: ("And[]", "And", None), 0x10: ("Logical_And[]", "Logical And", None), 0x12: ("Logical_Not[]", "Logical Not", None), 0x11: ("Logical_Or[]", "Logical Or", None), 0x61: ("Or[]", "Or", None), 0x63: ("Shift_Left[]", "Shift Left", None), 0x64: ("Shift_Right[]", "Shift Right", None), 0x65: ("Shift_Right_Unsigned[]", "Shift Right Unsigned", None), 0x62: ("Xor[]", "Xor", None), # Strings & Characters (See the String Object also) 0x33: ("Chr[]", "Chr", None), 0x37: ("Chr_multi-bytes[]", "Chr (multi-bytes)", None), 0x21: ("Concatenate_Strings[]", "Concatenate Strings", None), 0x32: ("Ord[]", "Ord", None), 0x36: ("Ord_multi-bytes[]", "Ord (multi-bytes)", None), 0x4B: ("String[]", "String", None), 0x14: ("String_Length[]", "String Length", None), 0x31: ("String_Length_multi-bytes[]", "String Length (multi-bytes)", None), 0x15: ("SubString[]", "SubString", None), 0x35: ("SubString_multi-bytes[]", "SubString (multi-bytes)", None), # Properties 0x22: ("Get_Property[]", "Get Property", None), 0x23: ("Set_Property[]", "Set Property", None), # Objects 0x2B: ("Cast_Object[]", "Cast Object", None), 0x42: ("Declare_Array[]", "Declare Array", None), 0x88: ("Declare_Dictionary[]", "Declare Dictionary", parseDeclareDictionary), 0x43: ("Declare_Object[]", "Declare Object", None), 0x3A: ("Delete[]", "Delete", None), 0x3B: ("Delete_All[]", "Delete All", None), 0x24: ("Duplicate_Sprite[]", "Duplicate Sprite", None), 0x46: ("Enumerate[]", "Enumerate", None), 0x55: ("Enumerate_Object[]", "Enumerate Object", None), 0x69: ("Extends[]", "Extends", None), 0x4E: ("Get_Member[]", "Get Member", None), 0x45: ("Get_Target[]", "Get Target", None), 0x2C: ("Implements[]", "Implements", None), 0x54: ("Instance_Of[]", "Instance Of", None), 0x40: ("New[]", "New", None), 0x53: ("New_Method[]", "New Method", None), 0x25: ("Remove_Sprite[]", "Remove Sprite", None), 0x4F: ("Set_Member[]", "Set Member", None), 0x44: ("Type_Of[]", "Type Of", None), # Variables 0x41: ("Declare_Local_Variable[]", "Declare Local Variable", None), 0x1C: ("Get_Variable[]", "Get Variable", None), 0x3C: ("Set_Local_Variable[]", "Set Local Variable", None), 0x1D: ("Set_Variable[]", "Set Variable", None), # Miscellaneous 0x2D: ("FSCommand2[]", "FSCommand2", None), 0x34: ("Get_Timer[]", "Get Timer", None), 0x30: ("Random[]", "Random", None), 0x27: ("Start_Drag[]", "Start Drag", None), 0x28: ("Stop_Drag[]", "Stop Drag", None), 0x87: ("Store_Register[]", "Store Register", parseStoreRegister), 0x89: ("Strict_Mode[]", "Strict Mode", parseStrictMode), 0x26: ("Trace[]", "Trace", None), } def __init__(self, *args): FieldSet.__init__(self, *args) code = self["action_id"].value if code & 128: self._size = (3 + self["action_length"].value) * 8 else: self._size = 8 if code in self.ACTION_INFO: self._name, self._description, self.parser = self.ACTION_INFO[code] else: self.parser = None def createFields(self): yield Bits(self, "action_id", 8) if not (self["action_id"].value & 128): return yield UInt16(self, "action_length") size = self["action_length"].value if not size: return if self.parser: for field in self.parser(self, size): yield field else: yield RawBytes(self, "action_data", size) def createDescription(self): return self._description def __str__(self): r = str(self._description) for f in self: if f.name not in ("action_id", "action_length", "count") and not f.name.startswith("data_type") : r = r + "\n " + str((self.address+f.address)/8) + " " + str(f.name) + "=" + str(f.value) return r class ActionScript(FieldSet): def createFields(self): while not self.eof: yield Instruction(self, "instr[]") def __str__(self): r = "" for f in self: r = r + str(f.address/8) + " " + str(f) + "\n" return r def parseActionScript(parent, size): yield ActionScript(parent, "action", size=size*8) def FindABC(field): while not getattr(field, "isABC", False): field = field.parent if field is None: return None return field def GetConstant(field, pool, index): if index == 0: return None return FindABC(field)["constant_%s_pool/constant[%i]"%(pool, index)] def GetMultiname(field, index): fld = GetConstant(field, "multiname", index) if fld is None: return "*" if "name_index" not in fld: return "?" fld2 = GetConstant(fld, "string", fld["name_index"].value) if fld2 is None: return "*" return fld2.value class ABCStringIndex(FlashU30): def createDisplay(self): fld = GetConstant(self, "string", self.value) if fld is None: return "*" return fld.value class ABCNSIndex(FlashU30): def createDisplay(self): fld = GetConstant(self, "namespace", self.value) if fld is None: return "*" return fld.display class ABCMethodIndex(FlashU30): def createDisplay(self): fld = FindABC(self)["method_array/method[%i]"%self.value] if fld is None: return "*" return fld.description class ABCMultinameIndex(FlashU30): def createDisplay(self): return GetMultiname(self, self.value) class ABCConstantPool(FieldSet): def __init__(self, parent, name, klass): FieldSet.__init__(self, parent, 'constant_%s_pool'%name) self.klass = klass def createFields(self): ctr = FlashU30(self, "count") yield ctr for i in xrange(ctr.value-1): yield self.klass(self, "constant[%i]"%(i+1)) class ABCObjectArray(FieldSet): def __init__(self, parent, name, klass): self.arrname = name FieldSet.__init__(self, parent, name+'_array') self.klass = klass def createFields(self): ctr = FlashU30(self, "count") yield ctr for i in xrange(ctr.value): yield self.klass(self, self.arrname+"[]") class ABCClassArray(FieldSet): def __init__(self, parent, name): FieldSet.__init__(self, parent, name+'_array') def createFields(self): ctr = FlashU30(self, "count") yield ctr for i in xrange(ctr.value): yield ABCInstanceInfo(self, "instance[]") for i in xrange(ctr.value): yield ABCClassInfo(self, "class[]") class ABCConstantString(FieldSet): def createFields(self): yield FlashU30(self, "length") size = self["length"].value if size: yield String(self, "data", size, charset="UTF-8") def createDisplay(self): if "data" in self: return self["data"].display else: return "<empty>" def createValue(self): if "data" in self: return self["data"].value else: return "" class ABCConstantNamespace(FieldSet): NAMESPACE_KIND = {8: "Namespace", 5: "PrivateNamespace", 22: "PackageNamespace", 23: "PacakgeInternalNamespace", 24: "ProtectedNamespace", 25: "ExplicitNamespace", 26: "MultinameL"} def createFields(self): yield Enum(UInt8(self, "kind"), self.NAMESPACE_KIND) yield ABCStringIndex(self, "name_index") def createDisplay(self): return "%s %s"%(self["kind"].display, self["name_index"].display) def createValue(self): return self["name_index"].value class ABCConstantNamespaceSet(FieldSet): def createFields(self): ctr = FlashU30(self, "namespace_count") yield ctr for i in xrange(ctr.value): yield ABCNSIndex(self, "namespace_index[]") def createDescription(self): ret = [fld.display for fld in self.array("namespace_index")] return ', '.join(ret) class ABCConstantMultiname(FieldSet): MULTINAME_KIND = {7: "Qname", 13: "QnameA", 9: "Multiname", 14: "MultinameA", 15: "RTQname", 16: "RTQnameA", 27: "MultinameL", 17: "RTQnameL", 18: "RTQnameLA"} def createFields(self): yield Enum(UInt8(self, "kind"), self.MULTINAME_KIND) kind = self["kind"].value if kind in (7,13): # Qname yield FlashU30(self, "namespace_index") yield ABCStringIndex(self, "name_index") elif kind in (9,14): # Multiname yield ABCStringIndex(self, "name_index") yield FlashU30(self, "namespace_set_index") elif kind in (15,16): # RTQname yield ABCStringIndex(self, "name_index") elif kind == 27: # MultinameL yield FlashU30(self, "namespace_set_index") elif kind in (17,18): # RTQnameL pass def createDisplay(self): kind = self["kind"].display if "name_index" in self: return kind + " " + self["name_index"].display return kind def createValue(self): return self["kind"].value class ABCTrait(FieldSet): TRAIT_KIND = {0: "slot", 1: "method", 2: "getter", 3: "setter", 4: "class", 5: "function", 6: "const",} def createFields(self): yield ABCMultinameIndex(self, "name_index") yield Enum(Bits(self, "kind", 4), self.TRAIT_KIND) yield Enum(Bit(self, "is_final"), {True:'final',False:'virtual'}) yield Enum(Bit(self, "is_override"), {True:'override',False:'new'}) yield Bit(self, "has_metadata") yield Bits(self, "unused", 1) kind = self["kind"].value if kind in (0,6): # slot, const yield FlashU30(self, "slot_id") yield ABCMultinameIndex(self, "type_index") ### TODO reference appropriate constant pool using value_kind yield FlashU30(self, "value_index") if self['value_index'].value != 0: yield UInt8(self, "value_kind") elif kind in (1,2,3): # method, getter, setter yield FlashU30(self, "disp_id") yield ABCMethodIndex(self, "method_info") elif kind == 4: # class yield FlashU30(self, "disp_id") yield FlashU30(self, "class_info") elif kind == 5: # function yield FlashU30(self, "disp_id") yield ABCMethodIndex(self, "method_info") if self['has_metadata'].value: yield ABCObjectArray(self, "metadata", FlashU30) class ABCValueKind(FieldSet): def createFields(self): yield FlashU30(self, "value_index") yield UInt8(self, "value_kind") class ABCMethodInfo(FieldSet): def createFields(self): yield FlashU30(self, "param_count") yield ABCMultinameIndex(self, "ret_type") for i in xrange(self["param_count"].value): yield ABCMultinameIndex(self, "param_type[]") yield ABCStringIndex(self, "name_index") yield Bit(self, "need_arguments") yield Bit(self, "need_activation") yield Bit(self, "need_rest") yield Bit(self, "has_optional") yield Bit(self, "ignore_rest") yield Bit(self, "explicit") yield Bit(self, "setsdxns") yield Bit(self, "has_paramnames") if self["has_optional"].value: yield ABCObjectArray(self, "optional", ABCValueKind) if self["has_paramnames"].value: for i in xrange(self["param_count"].value): yield FlashU30(self, "param_name[]") def createDescription(self): ret = GetMultiname(self, self["ret_type"].value) ret += " " + self["name_index"].display ret += "(" + ", ".join(GetMultiname(self, fld.value) for fld in self.array("param_type")) + ")" return ret class ABCMetadataInfo(FieldSet): def createFields(self): yield ABCStringIndex(self, "name_index") yield FlashU30(self, "values_count") count = self["values_count"].value for i in xrange(count): yield FlashU30(self, "key[]") for i in xrange(count): yield FlashU30(self, "value[]") class ABCInstanceInfo(FieldSet): def createFields(self): yield ABCMultinameIndex(self, "name_index") yield ABCMultinameIndex(self, "super_index") yield Bit(self, "is_sealed") yield Bit(self, "is_final") yield Bit(self, "is_interface") yield Bit(self, "is_protected") yield Bits(self, "unused", 4) if self['is_protected'].value: yield ABCNSIndex(self, "protectedNS") yield FlashU30(self, "interfaces_count") for i in xrange(self["interfaces_count"].value): yield ABCMultinameIndex(self, "interface[]") yield ABCMethodIndex(self, "iinit_index") yield ABCObjectArray(self, "trait", ABCTrait) class ABCClassInfo(FieldSet): def createFields(self): yield ABCMethodIndex(self, "cinit_index") yield ABCObjectArray(self, "trait", ABCTrait) class ABCScriptInfo(FieldSet): def createFields(self): yield ABCMethodIndex(self, "init_index") yield ABCObjectArray(self, "trait", ABCTrait) class ABCException(FieldSet): def createFields(self): yield FlashU30(self, "start") yield FlashU30(self, "end") yield FlashU30(self, "target") yield FlashU30(self, "type_index") yield FlashU30(self, "name_index") class ABCMethodBody(FieldSet): def createFields(self): yield ABCMethodIndex(self, "method_info") yield FlashU30(self, "max_stack") yield FlashU30(self, "max_regs") yield FlashU30(self, "scope_depth") yield FlashU30(self, "max_scope") yield FlashU30(self, "code_length") yield RawBytes(self, "code", self['code_length'].value) yield ABCObjectArray(self, "exception", ABCException) yield ABCObjectArray(self, "trait", ABCTrait) def parseABC(parent, size): code = parent["code"].value if code == parent.TAG_DO_ABC_DEFINE: yield UInt32(parent, "action_flags") yield CString(parent, "action_name") yield UInt16(parent, "minor_version") yield UInt16(parent, "major_version") parent.isABC = True yield ABCConstantPool(parent, "int", FlashS32) yield ABCConstantPool(parent, "uint", FlashU32) yield ABCConstantPool(parent, "double", Float64) yield ABCConstantPool(parent, "string", ABCConstantString) yield ABCConstantPool(parent, "namespace", ABCConstantNamespace) yield ABCConstantPool(parent, "namespace_set", ABCConstantNamespaceSet) yield ABCConstantPool(parent, "multiname", ABCConstantMultiname) yield ABCObjectArray(parent, "method", ABCMethodInfo) yield ABCObjectArray(parent, "metadata", ABCMetadataInfo) yield ABCClassArray(parent, "class") yield ABCObjectArray(parent, "script", ABCScriptInfo) yield ABCObjectArray(parent, "body", ABCMethodBody)
gpl-3.0
scs/uclinux
user/python/python-2.4.4/Lib/test/test_multibytecodec.py
5
4296
#!/usr/bin/env python # # test_multibytecodec.py # Unit test for multibytecodec itself # # $CJKCodecs: test_multibytecodec.py,v 1.8 2004/06/19 06:09:55 perky Exp $ from test import test_support from test import test_multibytecodec_support import unittest, StringIO, codecs, sys class Test_StreamWriter(unittest.TestCase): if len(u'\U00012345') == 2: # UCS2 def test_gb18030(self): s= StringIO.StringIO() c = codecs.lookup('gb18030')[3](s) c.write(u'123') self.assertEqual(s.getvalue(), '123') c.write(u'\U00012345') self.assertEqual(s.getvalue(), '123\x907\x959') c.write(u'\U00012345'[0]) self.assertEqual(s.getvalue(), '123\x907\x959') c.write(u'\U00012345'[1] + u'\U00012345' + u'\uac00\u00ac') self.assertEqual(s.getvalue(), '123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851') c.write(u'\U00012345'[0]) self.assertEqual(s.getvalue(), '123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851') self.assertRaises(UnicodeError, c.reset) self.assertEqual(s.getvalue(), '123\x907\x959\x907\x959\x907\x959\x827\xcf5\x810\x851') # standard utf-8 codecs has broken StreamReader if test_multibytecodec_support.__cjkcodecs__: def test_utf_8(self): s= StringIO.StringIO() c = codecs.lookup('utf-8')[3](s) c.write(u'123') self.assertEqual(s.getvalue(), '123') c.write(u'\U00012345') self.assertEqual(s.getvalue(), '123\xf0\x92\x8d\x85') c.write(u'\U00012345'[0]) self.assertEqual(s.getvalue(), '123\xf0\x92\x8d\x85') c.write(u'\U00012345'[1] + u'\U00012345' + u'\uac00\u00ac') self.assertEqual(s.getvalue(), '123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85' '\xea\xb0\x80\xc2\xac') c.write(u'\U00012345'[0]) self.assertEqual(s.getvalue(), '123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85' '\xea\xb0\x80\xc2\xac') c.reset() self.assertEqual(s.getvalue(), '123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85' '\xea\xb0\x80\xc2\xac\xed\xa0\x88') c.write(u'\U00012345'[1]) self.assertEqual(s.getvalue(), '123\xf0\x92\x8d\x85\xf0\x92\x8d\x85\xf0\x92\x8d\x85' '\xea\xb0\x80\xc2\xac\xed\xa0\x88\xed\xbd\x85') else: # UCS4 pass def test_nullcoding(self): self.assertEqual(''.decode('gb18030'), u'') self.assertEqual(unicode('', 'gb18030'), u'') self.assertEqual(u''.encode('gb18030'), '') def test_str_decode(self): self.assertEqual('abcd'.encode('gb18030'), 'abcd') def test_streamwriter_strwrite(self): s = StringIO.StringIO() wr = codecs.getwriter('gb18030')(s) wr.write('abcd') self.assertEqual(s.getvalue(), 'abcd') class Test_ISO2022(unittest.TestCase): def test_g2(self): iso2022jp2 = '\x1b(B:hu4:unit\x1b.A\x1bNi de famille' uni = u':hu4:unit\xe9 de famille' self.assertEqual(iso2022jp2.decode('iso2022-jp-2'), uni) def test_iso2022_jp_g0(self): self.failIf('\x0e' in u'\N{SOFT HYPHEN}'.encode('iso-2022-jp-2')) for encoding in ('iso-2022-jp-2004', 'iso-2022-jp-3'): e = u'\u3406'.encode(encoding) self.failIf(filter(lambda x: x >= '\x80', e)) def test_bug1572832(self): if sys.maxunicode >= 0x10000: myunichr = unichr else: myunichr = lambda x: unichr(0xD7C0+(x>>10)) + unichr(0xDC00+(x&0x3FF)) for x in xrange(0x10000, 0x110000): # Any ISO 2022 codec will cause the segfault myunichr(x).encode('iso_2022_jp', 'ignore') def test_main(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(Test_StreamWriter)) suite.addTest(unittest.makeSuite(Test_ISO2022)) test_support.run_suite(suite) if __name__ == "__main__": test_main()
gpl-2.0
pas256/ansible
lib/ansible/plugins/lookup/hashi_vault.py
44
2701
# (c) 2015, Jonathan Davila <jdavila(at)ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # # USAGE: {{ lookup('hashi_vault', 'secret=secret/hello token=c975b780-d1be-8016-866b-01d0f9b688a5 url=http://myvault:8200')}} # # You can skip setting the url if you set the VAULT_ADDR environment variable # or if you want it to default to localhost:8200 # # NOTE: Due to a current limitation in the HVAC library there won't # necessarily be an error if a bad endpoint is specified. # # Requires hvac library. Install with pip. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os from ansible.errors import AnsibleError from ansible.plugins.lookup import LookupBase ANSIBLE_HASHI_VAULT_ADDR = 'http://127.0.0.1:8200' if os.getenv('VAULT_ADDR') is not None: ANSIBLE_HASHI_VAULT_ADDR = os.environ['VAULT_ADDR'] class HashiVault: def __init__(self, **kwargs): try: import hvac except ImportError: AnsibleError("Please pip install hvac to use this module") self.url = kwargs.pop('url') self.secret = kwargs.pop('secret') self.token = kwargs.pop('token') self.client = hvac.Client(url=self.url, token=self.token) if self.client.is_authenticated(): pass else: raise AnsibleError("Invalid Hashicorp Vault Token Specified") def get(self): data = self.client.read(self.secret) if data is None: raise AnsibleError("The secret %s doesn't seem to exist" % self.secret) else: return data['data']['value'] class LookupModule(LookupBase): def run(self, terms, variables, **kwargs): vault_args = terms[0].split(' ') vault_dict = {} ret = [] for param in vault_args: key, value = param.split('=') vault_dict[key] = value vault_conn = HashiVault(**vault_dict) for term in terms: key = term.split()[0] value = vault_conn.get() ret.append(value) return ret
gpl-3.0
ParadropLabs/Paradrop
tests/pdinstall/test_main.py
1
2416
import os from mock import MagicMock, Mock, patch def test_getArgs(): """ Test pdinstall.main.getArgs """ from pdinstall.main import getArgs argv = ["install", "--source", "paradrop_0.1.0_all.snap"] args = getArgs(argv) assert args.sources == [argv[2]] @patch("os.remove") @patch("os.listdir") def test_cleanupServiceFiles(listdir, remove): """ Test pdinstall.main.cleanupServiceFiles """ from pdinstall.main import SERVICE_FILES_DIR, cleanupServiceFiles listdir.return_value = ["paradrop_pd_0.1.0.service", "syslog.service"] cleanupServiceFiles("paradrop") path = os.path.join(SERVICE_FILES_DIR, "paradrop_pd_0.1.0.service") remove.assert_called_once_with(path) @patch("pdinstall.snappy.Snap.getFile") def test_getSnaps(getFile): """ Test pdinstall.main.getSnaps """ from pdinstall.main import getSnaps sources = ["paradrop_0.2.0_all.snap", "paradrop_0.1.0_all.snap"] getFile.return_value = True snaps = getSnaps(sources) assert len(snaps) == 2 getFile.return_value = False snaps = getSnaps(sources) assert snaps == [] @patch("pdinstall.main.cleanupServiceFiles") @patch("pdinstall.snappy.installSnap") def test_installFromList(installSnap, cleanupServiceFiles): """ Test pdinstall.main.installFromList """ from pdinstall.main import installFromList args = Mock() args.ignore_version = False snaps = list() snaps.append(MagicMock()) snaps[0].name = "paradrop" snaps[0].isInstalled.return_value = True assert installFromList(snaps, args) snaps[0].isInstalled.return_value = False installSnap.return_value = True assert installFromList(snaps, args) installSnap.return_value = False assert installFromList(snaps, args) is False assert cleanupServiceFiles.called_once_with("paradrop") @patch("pdinstall.main.installFromList") @patch("pdinstall.main.getSnaps") @patch("pdinstall.main.getArgs") def test_main(getArgs, getSnaps, installFromList): """ Test pdinstall.main.main """ from pdinstall.main import main installFromList.return_value = True args = Mock() args.command = "install" args.sources = ["fake.snap"] getArgs.return_value = args assert main() == 0 assert getArgs.called assert getSnaps.called installFromList.return_value = False assert main() != 0
apache-2.0
n3wb13/OpenNfrGui-5.0-1
lib/python/Screens/ServiceInfo.py
1
10815
from Components.HTMLComponent import HTMLComponent from Components.GUIComponent import GUIComponent from Screens.Screen import Screen from Components.ActionMap import ActionMap from Components.Label import Label from ServiceReference import ServiceReference from enigma import eListboxPythonMultiContent, eListbox, gFont, iServiceInformation, eServiceCenter from Tools.Transponder import ConvertToHumanReadable from Components.Converter.ChannelNumbers import channelnumbers from enigma import getDesktop RT_HALIGN_LEFT = 0 TYPE_TEXT = 0 TYPE_VALUE_HEX = 1 TYPE_VALUE_DEC = 2 TYPE_VALUE_HEX_DEC = 3 TYPE_SLIDER = 4 TYPE_VALUE_ORBIT_DEC = 5 def to_unsigned(x): return x & 0xFFFFFFFF def ServiceInfoListEntry(a, b, valueType=TYPE_TEXT, param=4): print "b:", b if not isinstance(b, str): if valueType == TYPE_VALUE_HEX: b = ("0x%0" + str(param) + "x") % to_unsigned(b) elif valueType == TYPE_VALUE_DEC: b = str(b) elif valueType == TYPE_VALUE_HEX_DEC: b = ("0x%0" + str(param) + "x (%dd)") % (to_unsigned(b), b) elif valueType == TYPE_VALUE_ORBIT_DEC: direction = 'E' if b > 1800: b = 3600 - b direction = 'W' b = "%d.%d%s" % (b // 10, b % 10, direction) else: b = str(b) if getDesktop(0).size().width() == 1920: return [ #PyObject *type, *px, *py, *pwidth, *pheight, *pfnt, *pstring, *pflags; (eListboxPythonMultiContent.TYPE_TEXT, 0, 0, 450, 40, 0, RT_HALIGN_LEFT, ""), (eListboxPythonMultiContent.TYPE_TEXT, 0, 0, 450, 40, 0, RT_HALIGN_LEFT, a), (eListboxPythonMultiContent.TYPE_TEXT, 260, 0, 490, 40, 0, RT_HALIGN_LEFT, b) ] if getDesktop(0).size().width() == 1280: return [ #PyObject *type, *px, *py, *pwidth, *pheight, *pfnt, *pstring, *pflags; (eListboxPythonMultiContent.TYPE_TEXT, 0, 0, 200, 30, 0, RT_HALIGN_LEFT, ""), (eListboxPythonMultiContent.TYPE_TEXT, 0, 0, 200, 25, 0, RT_HALIGN_LEFT, a), (eListboxPythonMultiContent.TYPE_TEXT, 230, 0, 450, 25, 0, RT_HALIGN_LEFT, b) ] class ServiceInfoList(HTMLComponent, GUIComponent): def __init__(self, source): if getDesktop(0).size().width() == 1920: GUIComponent.__init__(self) self.l = eListboxPythonMultiContent() self.list = source self.l.setList(self.list) self.l.setFont(0, gFont("Regular", 32)) self.l.setItemHeight(36) else: GUIComponent.__init__(self) self.l = eListboxPythonMultiContent() self.list = source self.l.setList(self.list) self.l.setFont(0, gFont("Regular", 23)) self.l.setItemHeight(25) GUI_WIDGET = eListbox def postWidgetCreate(self, instance): self.instance.setContent(self.l) TYPE_SERVICE_INFO = 1 TYPE_TRANSPONDER_INFO = 2 class ServiceInfo(Screen): def __init__(self, session, serviceref=None): Screen.__init__(self, session) Screen.setTitle(self, _("Service Information")) self["actions"] = ActionMap(["OkCancelActions", "ColorActions"], { "ok": self.close, "cancel": self.close, "red": self.information, "green": self.pids, "yellow": self.transponder, "blue": self.tuner }, -1) if serviceref: self.type = TYPE_TRANSPONDER_INFO self.skinName="ServiceInfoSimple" info = eServiceCenter.getInstance().info(serviceref) self.transponder_info = info.getInfoObject(serviceref, iServiceInformation.sTransponderData) # info is a iStaticServiceInformation, not a iServiceInformation self.info = None self.feinfo = None else: self.type = TYPE_SERVICE_INFO self["key_red"] = self["red"] = Label(_("Service")) self["key_green"] = self["green"] = Label(_("PIDs")) self["key_yellow"] = self["yellow"] = Label(_("Multiplex")) self["key_blue"] = self["blue"] = Label(_("Tuner status")) service = session.nav.getCurrentService() if service is not None: self.info = service.info() self.feinfo = service.frontendInfo() else: self.info = None self.feinfo = None tlist = [ ] self["infolist"] = ServiceInfoList(tlist) self.onShown.append(self.information) def information(self): if self.type == TYPE_SERVICE_INFO: if self.session.nav.getCurrentlyPlayingServiceOrGroup(): name = ServiceReference(self.session.nav.getCurrentlyPlayingServiceReference()).getServiceName() refstr = self.session.nav.getCurrentlyPlayingServiceReference().toString() else: name = _("N/A") refstr = _("N/A") aspect = "-" videocodec = "-" resolution = "-" if self.info: videocodec = ("MPEG2", "MPEG4", "MPEG1", "MPEG4-II", "VC1", "VC1-SM", "-" )[self.info and self.info.getInfo(iServiceInformation.sVideoType)] width = self.info.getInfo(iServiceInformation.sVideoWidth) height = self.info.getInfo(iServiceInformation.sVideoHeight) if width > 0 and height > 0: resolution = "%dx%d" % (width,height) resolution += ("i", "p", "")[self.info.getInfo(iServiceInformation.sProgressive)] resolution += str((self.info.getInfo(iServiceInformation.sFrameRate) + 500) / 1000) aspect = self.getServiceInfoValue(iServiceInformation.sAspect) if aspect in ( 1, 2, 5, 6, 9, 0xA, 0xD, 0xE ): aspect = "4:3" else: aspect = "16:9" Labels = ( (_("Name"), name, TYPE_TEXT), (_("Provider"), self.getServiceInfoValue(iServiceInformation.sProvider), TYPE_TEXT), (_("Videoformat"), aspect, TYPE_TEXT), (_("Videosize"), resolution, TYPE_TEXT), (_("Videocodec"), videocodec, TYPE_TEXT), (_("Namespace"), self.getServiceInfoValue(iServiceInformation.sNamespace), TYPE_VALUE_HEX, 8), (_("Service reference"), refstr, TYPE_TEXT)) self.fillList(Labels) else: if self.transponder_info: tp_info = ConvertToHumanReadable(self.transponder_info) conv = { "tuner_type" : _("Transponder type"), "system" : _("System"), "modulation" : _("Modulation"), "orbital_position" : _("Orbital position"), "frequency" : _("Frequency"), "symbol_rate" : _("Symbol rate"), "bandwidth" : _("Bandwidth"), "polarization" : _("Polarization"), "inversion" : _("Inversion"), "pilot" : _("Pilot"), "rolloff" : _("Roll-off"), "fec_inner" : _("FEC"), "code_rate_lp" : _("Coderate LP"), "code_rate_hp" : _("Coderate HP"), "constellation" : _("Constellation"), "transmission_mode": _("Transmission mode"), "guard_interval" : _("Guard interval"), "hierarchy_information": _("Hierarchy information") } Labels = [(conv[i], tp_info[i], i == "orbital_position" and TYPE_VALUE_ORBIT_DEC or TYPE_VALUE_DEC) for i in tp_info.keys() if i in conv] self.fillList(Labels) def pids(self): if self.type == TYPE_SERVICE_INFO: Labels = ( (_("Video PID"), self.getServiceInfoValue(iServiceInformation.sVideoPID), TYPE_VALUE_HEX_DEC, 4), (_("Audio PID"), self.getServiceInfoValue(iServiceInformation.sAudioPID), TYPE_VALUE_HEX_DEC, 4), (_("PCR PID"), self.getServiceInfoValue(iServiceInformation.sPCRPID), TYPE_VALUE_HEX_DEC, 4), (_("PMT PID"), self.getServiceInfoValue(iServiceInformation.sPMTPID), TYPE_VALUE_HEX_DEC, 4), (_("TXT PID"), self.getServiceInfoValue(iServiceInformation.sTXTPID), TYPE_VALUE_HEX_DEC, 4), (_("TSID"), self.getServiceInfoValue(iServiceInformation.sTSID), TYPE_VALUE_HEX_DEC, 4), (_("ONID"), self.getServiceInfoValue(iServiceInformation.sONID), TYPE_VALUE_HEX_DEC, 4), (_("SID"), self.getServiceInfoValue(iServiceInformation.sSID), TYPE_VALUE_HEX_DEC, 4)) self.fillList(Labels) def showFrontendData(self, real): if self.type == TYPE_SERVICE_INFO: frontendData = self.feinfo and self.feinfo.getAll(real) Labels = self.getFEData(frontendData) self.fillList(Labels) def transponder(self): if self.type == TYPE_SERVICE_INFO: self.showFrontendData(True) def tuner(self): if self.type == TYPE_SERVICE_INFO: self.showFrontendData(False) def getFEData(self, frontendDataOrg): if frontendDataOrg and len(frontendDataOrg): frontendData = ConvertToHumanReadable(frontendDataOrg) if frontendDataOrg["tuner_type"] == "DVB-S": return ((_("NIM"), chr(ord('A') + frontendData["tuner_number"]), TYPE_TEXT), (_("Type"), frontendData["tuner_type"], TYPE_TEXT), (_("System"), frontendData["system"], TYPE_TEXT), (_("Modulation"), frontendData["modulation"], TYPE_TEXT), (_("Orbital position"), frontendData["orbital_position"], TYPE_VALUE_DEC), (_("Frequency"), frontendData["frequency"], TYPE_VALUE_DEC), (_("Symbol rate"), frontendData["symbol_rate"], TYPE_VALUE_DEC), (_("Polarization"), frontendData["polarization"], TYPE_TEXT), (_("Inversion"), frontendData["inversion"], TYPE_TEXT), (_("FEC"), frontendData["fec_inner"], TYPE_TEXT), (_("Pilot"), frontendData.get("pilot", None), TYPE_TEXT), (_("Roll-off"), frontendData.get("rolloff", None), TYPE_TEXT)) elif frontendDataOrg["tuner_type"] == "DVB-C": return ((_("NIM"), chr(ord('A') + frontendData["tuner_number"]), TYPE_TEXT), (_("Type"), frontendData["tuner_type"], TYPE_TEXT), (_("Modulation"), frontendData["modulation"], TYPE_TEXT), (_("Frequency"), frontendData["frequency"], TYPE_VALUE_DEC), (_("Symbol rate"), frontendData["symbol_rate"], TYPE_VALUE_DEC), (_("Inversion"), frontendData["inversion"], TYPE_TEXT), (_("FEC"), frontendData["fec_inner"], TYPE_TEXT)) elif frontendDataOrg["tuner_type"] == "DVB-T": return ((_("NIM"), chr(ord('A') + frontendData["tuner_number"]), TYPE_TEXT), (_("Type"), frontendData["tuner_type"], TYPE_TEXT), (_("Frequency"), frontendData["frequency"], TYPE_VALUE_DEC), (_("Channel"), channelnumbers.getChannelNumber(frontendData["frequency"], frontendData["tuner_number"]), TYPE_VALUE_DEC), (_("Inversion"), frontendData["inversion"], TYPE_TEXT), (_("Bandwidth"), frontendData["bandwidth"], TYPE_VALUE_DEC), (_("Code rate LP"), frontendData["code_rate_lp"], TYPE_TEXT), (_("Code rate HP"), frontendData["code_rate_hp"], TYPE_TEXT), (_("Constellation"), frontendData["constellation"], TYPE_TEXT), (_("Transmission mode"), frontendData["transmission_mode"], TYPE_TEXT), (_("Guard interval"), frontendData["guard_interval"], TYPE_TEXT), (_("Hierarchy info"), frontendData["hierarchy_information"], TYPE_TEXT)) return [ ] def fillList(self, Labels): tlist = [ ] for item in Labels: if item[1] is None: continue value = item[1] if len(item) < 4: tlist.append(ServiceInfoListEntry(item[0]+":", value, item[2])) else: tlist.append(ServiceInfoListEntry(item[0]+":", value, item[2], item[3])) self["infolist"].l.setList(tlist) def getServiceInfoValue(self, what): if self.info is None: return "" v = self.info.getInfo(what) if v == -2: v = self.info.getInfoString(what) elif v == -1: v = _("N/A") return v
gpl-2.0
truthcoin/blocksize-market
contrib/fedpeg/rotating_consensus.py
10
4908
#!/usr/bin/env python2 from time import sleep, time import socket import threading import zmq import traceback # For error printing import sys, os sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../../../python-bitcoinrpc")) from bitcoinrpc.authproxy import JSONRPCException zmq_context = zmq.Context() zmq_poller = zmq.Poller() if (zmq.zmq_version() < 4): print("It is highly recommended you use a version of ZMQ > 4") class ConsensusPublisher: def __init__(self, port): self.socket = zmq_context.socket(zmq.PUB) self.socket.bind("tcp://*:%d" % port) zmq_poller.register(self.socket, zmq.POLLOUT) def send_message(self, msg): self.socket.send("42 %s" % msg.encode("ascii", "strict")) class ConsensusSocket: def __init__(self, host, port, proxy): self.host = host self.isSelf = False self.sock = zmq_context.socket(zmq.SUB) if proxy != None: self.sock.setsockopt(zmq.SOCKS_PROXY, proxy) self.sock.setsockopt(zmq.RECONNECT_IVL, 500) self.sock.setsockopt(zmq.RECONNECT_IVL_MAX, 10000) self.sock.connect("tcp://%s:%d" % (host, port)) self.sock.setsockopt(zmq.SUBSCRIBE, "42") zmq_poller.register(self.sock, zmq.POLLIN) def read_message(self): if not dict(zmq_poller.poll()).has_key(self.sock): return None topic, msg = self.sock.recv().split(" ", 1) return msg class Self: def __init__(self, host): self.host = host self.isSelf = True def read_message(self): return None class RotatingConsensus: def __init__(self, nodes_list, my_host, port, interval, proxy): self.interval = interval self.nodes = [Self(my_host)] for host in nodes_list: self.nodes.append(ConsensusSocket(host, port, proxy)) self.nodes.sort(key=lambda node: node.host) self.publisher = ConsensusPublisher(port) thread = threading.Thread(target=self.main_loop) thread.daemon = True thread.start() def main_loop(self): while True: sleep(self.interval - time() % self.interval) start_time = int(time()) step = int(time()) % (self.interval * len(self.nodes)) / self.interval for node in self.nodes: msg = "" while msg != None: msg = node.read_message() if self.nodes[step].isSelf: print("Starting master round (as %s)" % self.nodes[step].host) sleep(self.interval / 10) msg = self._gen_master_msg() if msg == None: print("gen_master_msg threw or returned None") self._round_failed() continue if time() - start_time > self.interval / 5: print("gen_master_msg took longer than interval/5: Skipping round!") self._round_failed() continue self.publisher.send_message(msg) sleep(self.interval / 2 - (time() - start_time)) else: print("Starting round with master %s" % self.nodes[step].host) sleep(self.interval / 4) msg = self.nodes[step].read_message() if msg == None: print("Missed message from master") self._round_failed() continue broadcast_msg = self._recv_master_msg(msg) if broadcast_msg == None: print("recv_master_msg threw or returned None") self._round_failed() continue if time() - start_time > self.interval / 2: print("recv_master_msg took longer than interval/4: Skipping round!") self._round_failed() continue self.publisher.send_message(broadcast_msg) sleep(self.interval / 2 - (time() - start_time)) msgs = [] for node in self.nodes: msg = node.read_message() if msg != None: msgs.append((node.host, msg)) self._round_done(msgs) if time() > start_time + self.interval: print("round_done took longer than interval/2: We skipped a round!") def _gen_master_msg(self): try: return self.gen_master_msg() except Exception as e: if isinstance(e, JSONRPCException): print(e.error) print("gen_master_msg threw!") print(traceback.format_exc()) return None def _recv_master_msg(self, msg): try: return self.recv_master_msg(msg) except Exception as e: if isinstance(e, JSONRPCException): print(e.error) print("recv_master_msg threw!") print(traceback.format_exc()) return None def _round_done(self, peer_messages): try: self.round_done(peer_messages) except Exception as e: if isinstance(e, JSONRPCException): print(e.error) print("round_done threw!") print(traceback.format_exc()) def _round_failed(self): try: self.round_failed() except Exception as e: if isinstance(e, JSONRPCException): print(e.error) print("round_failed threw!") print(traceback.format_exc()) #OVERRIDE THESE: def gen_master_msg(self): return "MASTER INITIAL BROADCAST" def recv_master_msg(self, msg): print("GOT '%s' from master" % msg) return "PEER RESPONSE BROADCAST" def round_done(self, peer_messages): print("Finished round...") for msg in peer_messages: print("Got %s from %s" % (msg[1], msg[0])) def round_failed(self): return
mit
atombrella/django-rest-framework
tests/utils.py
5
1646
from django.core.exceptions import ObjectDoesNotExist from rest_framework.compat import NoReverseMatch class MockObject(object): def __init__(self, **kwargs): self._kwargs = kwargs for key, val in kwargs.items(): setattr(self, key, val) def __str__(self): kwargs_str = ', '.join([ '%s=%s' % (key, value) for key, value in sorted(self._kwargs.items()) ]) return '<MockObject %s>' % kwargs_str class MockQueryset(object): def __init__(self, iterable): self.items = iterable def __getitem__(self, val): return self.items[val] def get(self, **lookup): for item in self.items: if all([ getattr(item, key, None) == value for key, value in lookup.items() ]): return item raise ObjectDoesNotExist() class BadType(object): """ When used as a lookup with a `MockQueryset`, these objects will raise a `TypeError`, as occurs in Django when making queryset lookups with an incorrect type for the lookup value. """ def __eq__(self): raise TypeError() def mock_reverse(view_name, args=None, kwargs=None, request=None, format=None): args = args or [] kwargs = kwargs or {} value = (args + list(kwargs.values()) + ['-'])[0] prefix = 'http://example.org' if request else '' suffix = ('.' + format) if (format is not None) else '' return '%s/%s/%s%s/' % (prefix, view_name, value, suffix) def fail_reverse(view_name, args=None, kwargs=None, request=None, format=None): raise NoReverseMatch()
bsd-2-clause
CSC-ORG/Dynamic-Dashboard-2015
engine/lib/python2.7/site-packages/setuptools/tests/test_easy_install.py
135
15441
"""Easy install Tests """ import sys import os import shutil import tempfile import unittest import site import contextlib import textwrap import tarfile import logging import distutils.core from setuptools.compat import StringIO, BytesIO, next, urlparse from setuptools.sandbox import run_setup, SandboxViolation from setuptools.command.easy_install import ( easy_install, fix_jython_executable, get_script_args, nt_quote_arg) from setuptools.command.easy_install import PthDistributions from setuptools.command import easy_install as easy_install_pkg from setuptools.dist import Distribution from pkg_resources import working_set, VersionConflict from pkg_resources import Distribution as PRDistribution import setuptools.tests.server import pkg_resources class FakeDist(object): def get_entry_map(self, group): if group != 'console_scripts': return {} return {'name': 'ep'} def as_requirement(self): return 'spec' WANTED = """\ #!%s # EASY-INSTALL-ENTRY-SCRIPT: 'spec','console_scripts','name' __requires__ = 'spec' import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.exit( load_entry_point('spec', 'console_scripts', 'name')() ) """ % nt_quote_arg(fix_jython_executable(sys.executable, "")) SETUP_PY = """\ from setuptools import setup setup(name='foo') """ class TestEasyInstallTest(unittest.TestCase): def test_install_site_py(self): dist = Distribution() cmd = easy_install(dist) cmd.sitepy_installed = False cmd.install_dir = tempfile.mkdtemp() try: cmd.install_site_py() sitepy = os.path.join(cmd.install_dir, 'site.py') self.assertTrue(os.path.exists(sitepy)) finally: shutil.rmtree(cmd.install_dir) def test_get_script_args(self): dist = FakeDist() old_platform = sys.platform try: name, script = [i for i in next(get_script_args(dist))][0:2] finally: sys.platform = old_platform self.assertEqual(script, WANTED) def test_no_find_links(self): # new option '--no-find-links', that blocks find-links added at # the project level dist = Distribution() cmd = easy_install(dist) cmd.check_pth_processing = lambda: True cmd.no_find_links = True cmd.find_links = ['link1', 'link2'] cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok') cmd.args = ['ok'] cmd.ensure_finalized() self.assertEqual(cmd.package_index.scanned_urls, {}) # let's try without it (default behavior) cmd = easy_install(dist) cmd.check_pth_processing = lambda: True cmd.find_links = ['link1', 'link2'] cmd.install_dir = os.path.join(tempfile.mkdtemp(), 'ok') cmd.args = ['ok'] cmd.ensure_finalized() keys = sorted(cmd.package_index.scanned_urls.keys()) self.assertEqual(keys, ['link1', 'link2']) class TestPTHFileWriter(unittest.TestCase): def test_add_from_cwd_site_sets_dirty(self): '''a pth file manager should set dirty if a distribution is in site but also the cwd ''' pth = PthDistributions('does-not_exist', [os.getcwd()]) self.assertTrue(not pth.dirty) pth.add(PRDistribution(os.getcwd())) self.assertTrue(pth.dirty) def test_add_from_site_is_ignored(self): if os.name != 'nt': location = '/test/location/does-not-have-to-exist' else: location = 'c:\\does_not_exist' pth = PthDistributions('does-not_exist', [location, ]) self.assertTrue(not pth.dirty) pth.add(PRDistribution(location)) self.assertTrue(not pth.dirty) class TestUserInstallTest(unittest.TestCase): def setUp(self): self.dir = tempfile.mkdtemp() setup = os.path.join(self.dir, 'setup.py') f = open(setup, 'w') f.write(SETUP_PY) f.close() self.old_cwd = os.getcwd() os.chdir(self.dir) self.old_enable_site = site.ENABLE_USER_SITE self.old_file = easy_install_pkg.__file__ self.old_base = site.USER_BASE site.USER_BASE = tempfile.mkdtemp() self.old_site = site.USER_SITE site.USER_SITE = tempfile.mkdtemp() easy_install_pkg.__file__ = site.USER_SITE def tearDown(self): os.chdir(self.old_cwd) shutil.rmtree(self.dir) shutil.rmtree(site.USER_BASE) shutil.rmtree(site.USER_SITE) site.USER_BASE = self.old_base site.USER_SITE = self.old_site site.ENABLE_USER_SITE = self.old_enable_site easy_install_pkg.__file__ = self.old_file def test_user_install_implied(self): site.ENABLE_USER_SITE = True # disabled sometimes #XXX: replace with something meaningfull dist = Distribution() dist.script_name = 'setup.py' cmd = easy_install(dist) cmd.args = ['py'] cmd.ensure_finalized() self.assertTrue(cmd.user, 'user should be implied') def test_multiproc_atexit(self): try: __import__('multiprocessing') except ImportError: # skip the test if multiprocessing is not available return log = logging.getLogger('test_easy_install') logging.basicConfig(level=logging.INFO, stream=sys.stderr) log.info('this should not break') def test_user_install_not_implied_without_usersite_enabled(self): site.ENABLE_USER_SITE = False # usually enabled #XXX: replace with something meaningfull dist = Distribution() dist.script_name = 'setup.py' cmd = easy_install(dist) cmd.args = ['py'] cmd.initialize_options() self.assertFalse(cmd.user, 'NOT user should be implied') def test_local_index(self): # make sure the local index is used # when easy_install looks for installed # packages new_location = tempfile.mkdtemp() target = tempfile.mkdtemp() egg_file = os.path.join(new_location, 'foo-1.0.egg-info') f = open(egg_file, 'w') try: f.write('Name: foo\n') finally: f.close() sys.path.append(target) old_ppath = os.environ.get('PYTHONPATH') os.environ['PYTHONPATH'] = os.path.pathsep.join(sys.path) try: dist = Distribution() dist.script_name = 'setup.py' cmd = easy_install(dist) cmd.install_dir = target cmd.args = ['foo'] cmd.ensure_finalized() cmd.local_index.scan([new_location]) res = cmd.easy_install('foo') self.assertEqual(os.path.realpath(res.location), os.path.realpath(new_location)) finally: sys.path.remove(target) for basedir in [new_location, target, ]: if not os.path.exists(basedir) or not os.path.isdir(basedir): continue try: shutil.rmtree(basedir) except: pass if old_ppath is not None: os.environ['PYTHONPATH'] = old_ppath else: del os.environ['PYTHONPATH'] def test_setup_requires(self): """Regression test for Distribute issue #318 Ensure that a package with setup_requires can be installed when setuptools is installed in the user site-packages without causing a SandboxViolation. """ test_pkg = create_setup_requires_package(self.dir) test_setup_py = os.path.join(test_pkg, 'setup.py') try: with quiet_context(): with reset_setup_stop_context(): run_setup(test_setup_py, ['install']) except SandboxViolation: self.fail('Installation caused SandboxViolation') class TestSetupRequires(unittest.TestCase): def test_setup_requires_honors_fetch_params(self): """ When easy_install installs a source distribution which specifies setup_requires, it should honor the fetch parameters (such as allow-hosts, index-url, and find-links). """ # set up a server which will simulate an alternate package index. p_index = setuptools.tests.server.MockServer() p_index.start() netloc = 1 p_index_loc = urlparse(p_index.url)[netloc] if p_index_loc.endswith(':0'): # Some platforms (Jython) don't find a port to which to bind, # so skip this test for them. return with quiet_context(): # create an sdist that has a build-time dependency. with TestSetupRequires.create_sdist() as dist_file: with tempdir_context() as temp_install_dir: with environment_context(PYTHONPATH=temp_install_dir): ei_params = ['--index-url', p_index.url, '--allow-hosts', p_index_loc, '--exclude-scripts', '--install-dir', temp_install_dir, dist_file] with reset_setup_stop_context(): with argv_context(['easy_install']): # attempt to install the dist. It should fail because # it doesn't exist. self.assertRaises(SystemExit, easy_install_pkg.main, ei_params) # there should have been two or three requests to the server # (three happens on Python 3.3a) self.assertTrue(2 <= len(p_index.requests) <= 3) self.assertEqual(p_index.requests[0].path, '/does-not-exist/') @staticmethod @contextlib.contextmanager def create_sdist(): """ Return an sdist with a setup_requires dependency (of something that doesn't exist) """ with tempdir_context() as dir: dist_path = os.path.join(dir, 'setuptools-test-fetcher-1.0.tar.gz') make_trivial_sdist( dist_path, textwrap.dedent(""" import setuptools setuptools.setup( name="setuptools-test-fetcher", version="1.0", setup_requires = ['does-not-exist'], ) """).lstrip()) yield dist_path def test_setup_requires_overrides_version_conflict(self): """ Regression test for issue #323. Ensures that a distribution's setup_requires requirements can still be installed and used locally even if a conflicting version of that requirement is already on the path. """ pr_state = pkg_resources.__getstate__() fake_dist = PRDistribution('does-not-matter', project_name='foobar', version='0.0') working_set.add(fake_dist) try: with tempdir_context() as temp_dir: test_pkg = create_setup_requires_package(temp_dir) test_setup_py = os.path.join(test_pkg, 'setup.py') with quiet_context() as (stdout, stderr): with reset_setup_stop_context(): try: # Don't even need to install the package, just # running the setup.py at all is sufficient run_setup(test_setup_py, ['--name']) except VersionConflict: self.fail('Installing setup.py requirements ' 'caused a VersionConflict') lines = stdout.readlines() self.assertTrue(len(lines) > 0) self.assertTrue(lines[-1].strip(), 'test_pkg') finally: pkg_resources.__setstate__(pr_state) def create_setup_requires_package(path): """Creates a source tree under path for a trivial test package that has a single requirement in setup_requires--a tarball for that requirement is also created and added to the dependency_links argument. """ test_setup_attrs = { 'name': 'test_pkg', 'version': '0.0', 'setup_requires': ['foobar==0.1'], 'dependency_links': [os.path.abspath(path)] } test_pkg = os.path.join(path, 'test_pkg') test_setup_py = os.path.join(test_pkg, 'setup.py') os.mkdir(test_pkg) f = open(test_setup_py, 'w') f.write(textwrap.dedent("""\ import setuptools setuptools.setup(**%r) """ % test_setup_attrs)) f.close() foobar_path = os.path.join(path, 'foobar-0.1.tar.gz') make_trivial_sdist( foobar_path, textwrap.dedent("""\ import setuptools setuptools.setup( name='foobar', version='0.1' ) """)) return test_pkg def make_trivial_sdist(dist_path, setup_py): """Create a simple sdist tarball at dist_path, containing just a setup.py, the contents of which are provided by the setup_py string. """ setup_py_file = tarfile.TarInfo(name='setup.py') try: # Python 3 (StringIO gets converted to io module) MemFile = BytesIO except AttributeError: MemFile = StringIO setup_py_bytes = MemFile(setup_py.encode('utf-8')) setup_py_file.size = len(setup_py_bytes.getvalue()) dist = tarfile.open(dist_path, 'w:gz') try: dist.addfile(setup_py_file, fileobj=setup_py_bytes) finally: dist.close() @contextlib.contextmanager def tempdir_context(cd=lambda dir:None): temp_dir = tempfile.mkdtemp() orig_dir = os.getcwd() try: cd(temp_dir) yield temp_dir finally: cd(orig_dir) shutil.rmtree(temp_dir) @contextlib.contextmanager def environment_context(**updates): old_env = os.environ.copy() os.environ.update(updates) try: yield finally: for key in updates: del os.environ[key] os.environ.update(old_env) @contextlib.contextmanager def argv_context(repl): old_argv = sys.argv[:] sys.argv[:] = repl yield sys.argv[:] = old_argv @contextlib.contextmanager def reset_setup_stop_context(): """ When the setuptools tests are run using setup.py test, and then one wants to invoke another setup() command (such as easy_install) within those tests, it's necessary to reset the global variable in distutils.core so that the setup() command will run naturally. """ setup_stop_after = distutils.core._setup_stop_after distutils.core._setup_stop_after = None yield distutils.core._setup_stop_after = setup_stop_after @contextlib.contextmanager def quiet_context(): """ Redirect stdout/stderr to StringIO objects to prevent console output from distutils commands. """ old_stdout = sys.stdout old_stderr = sys.stderr new_stdout = sys.stdout = StringIO() new_stderr = sys.stderr = StringIO() try: yield new_stdout, new_stderr finally: new_stdout.seek(0) new_stderr.seek(0) sys.stdout = old_stdout sys.stderr = old_stderr
mit
nyalldawson/QGIS
python/plugins/processing/algs/gdal/ClipRasterByExtent.py
15
6970
# -*- coding: utf-8 -*- """ *************************************************************************** ClipRasterByExtent.py --------------------- Date : September 2013 Copyright : (C) 2013 by Alexander Bruy Email : alexander bruy at gmail dot com *************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * *************************************************************************** """ __author__ = 'Alexander Bruy' __date__ = 'September 2013' __copyright__ = '(C) 2013, Alexander Bruy' import os from qgis.PyQt.QtGui import QIcon from qgis.core import (QgsRasterFileWriter, QgsProcessingException, QgsProcessingParameterDefinition, QgsProcessingParameterRasterLayer, QgsProcessingParameterEnum, QgsProcessingParameterExtent, QgsProcessingParameterString, QgsProcessingParameterNumber, QgsProcessingParameterRasterDestination) from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm from processing.algs.gdal.GdalUtils import GdalUtils pluginPath = os.path.split(os.path.split(os.path.dirname(__file__))[0])[0] class ClipRasterByExtent(GdalAlgorithm): INPUT = 'INPUT' EXTENT = 'PROJWIN' NODATA = 'NODATA' OPTIONS = 'OPTIONS' DATA_TYPE = 'DATA_TYPE' EXTRA = 'EXTRA' OUTPUT = 'OUTPUT' def __init__(self): super().__init__() def initAlgorithm(self, config=None): self.TYPES = [self.tr('Use Input Layer Data Type'), 'Byte', 'Int16', 'UInt16', 'UInt32', 'Int32', 'Float32', 'Float64', 'CInt16', 'CInt32', 'CFloat32', 'CFloat64'] self.addParameter(QgsProcessingParameterRasterLayer(self.INPUT, self.tr('Input layer'))) self.addParameter(QgsProcessingParameterExtent(self.EXTENT, self.tr('Clipping extent'))) self.addParameter(QgsProcessingParameterNumber(self.NODATA, self.tr('Assign a specified nodata value to output bands'), type=QgsProcessingParameterNumber.Double, defaultValue=None, optional=True)) options_param = QgsProcessingParameterString(self.OPTIONS, self.tr('Additional creation options'), defaultValue='', optional=True) options_param.setFlags(options_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced) options_param.setMetadata({ 'widget_wrapper': { 'class': 'processing.algs.gdal.ui.RasterOptionsWidget.RasterOptionsWidgetWrapper'}}) self.addParameter(options_param) dataType_param = QgsProcessingParameterEnum(self.DATA_TYPE, self.tr('Output data type'), self.TYPES, allowMultiple=False, defaultValue=0) dataType_param.setFlags(dataType_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced) self.addParameter(dataType_param) extra_param = QgsProcessingParameterString(self.EXTRA, self.tr('Additional command-line parameters'), defaultValue=None, optional=True) extra_param.setFlags(extra_param.flags() | QgsProcessingParameterDefinition.FlagAdvanced) self.addParameter(extra_param) self.addParameter(QgsProcessingParameterRasterDestination(self.OUTPUT, self.tr('Clipped (extent)'))) def name(self): return 'cliprasterbyextent' def displayName(self): return self.tr('Clip raster by extent') def group(self): return self.tr('Raster extraction') def groupId(self): return 'rasterextraction' def icon(self): return QIcon(os.path.join(pluginPath, 'images', 'gdaltools', 'raster-clip.png')) def commandName(self): return "gdal_translate" def getConsoleCommands(self, parameters, context, feedback, executing=True): inLayer = self.parameterAsRasterLayer(parameters, self.INPUT, context) if inLayer is None: raise QgsProcessingException('Invalid input layer {}'.format(parameters[self.INPUT] if self.INPUT in parameters else 'INPUT')) bbox = self.parameterAsExtent(parameters, self.EXTENT, context, inLayer.crs()) if self.NODATA in parameters and parameters[self.NODATA] is not None: nodata = self.parameterAsDouble(parameters, self.NODATA, context) else: nodata = None options = self.parameterAsString(parameters, self.OPTIONS, context) out = self.parameterAsOutputLayer(parameters, self.OUTPUT, context) self.setOutputValue(self.OUTPUT, out) arguments = [] arguments.append('-projwin') arguments.append(str(bbox.xMinimum())) arguments.append(str(bbox.yMaximum())) arguments.append(str(bbox.xMaximum())) arguments.append(str(bbox.yMinimum())) if nodata is not None: arguments.append('-a_nodata {}'.format(nodata)) data_type = self.parameterAsEnum(parameters, self.DATA_TYPE, context) if data_type: arguments.append('-ot ' + self.TYPES[data_type]) arguments.append('-of') arguments.append(QgsRasterFileWriter.driverForExtension(os.path.splitext(out)[1])) if options: arguments.extend(GdalUtils.parseCreationOptions(options)) if self.EXTRA in parameters and parameters[self.EXTRA] not in (None, ''): extra = self.parameterAsString(parameters, self.EXTRA, context) arguments.append(extra) arguments.append(inLayer.source()) arguments.append(out) return [self.commandName(), GdalUtils.escapeAndJoin(arguments)]
gpl-2.0
AlessandroZ/LaZagne
Mac/lazagne/config/crypto/pyDes.py
1
32333
############################################################################# # Documentation # ############################################################################# # Author: Todd Whiteman # Date: 28th April, 2010 # Version: 2.0.1 # License: MIT # Homepage: http://twhiteman.netfirms.com/des.html # # This is a pure python implementation of the DES encryption algorithm. # It's pure python to avoid portability issues, since most DES # implementations are programmed in C (for performance reasons). # # Triple DES class is also implemented, utilizing the DES base. Triple DES # is either DES-EDE3 with a 24 byte key, or DES-EDE2 with a 16 byte key. # # See the README.txt that should come with this python module for the # implementation methods used. # # Thanks to: # * David Broadwell for ideas, comments and suggestions. # * Mario Wolff for pointing out and debugging some triple des CBC errors. # * Santiago Palladino for providing the PKCS5 padding technique. # * Shaya for correcting the PAD_PKCS5 triple des CBC errors. # """A pure python implementation of the DES and TRIPLE DES encryption algorithms. Class initialization -------------------- pyDes.des(key, [mode], [IV], [pad], [padmode]) pyDes.triple_des(key, [mode], [IV], [pad], [padmode]) key -> Bytes containing the encryption key. 8 bytes for DES, 16 or 24 bytes for Triple DES mode -> Optional argument for encryption type, can be either pyDes.ECB (Electronic Code Book) or pyDes.CBC (Cypher Block Chaining) IV -> Optional Initial Value bytes, must be supplied if using CBC mode. Length must be 8 bytes. pad -> Optional argument, set the pad character (PAD_NORMAL) to use during all encrypt/decrypt operations done with this instance. padmode -> Optional argument, set the padding mode (PAD_NORMAL or PAD_PKCS5) to use during all encrypt/decrypt operations done with this instance. I recommend to use PAD_PKCS5 padding, as then you never need to worry about any padding issues, as the padding can be removed unambiguously upon decrypting data that was encrypted using PAD_PKCS5 padmode. Common methods -------------- encrypt(data, [pad], [padmode]) decrypt(data, [pad], [padmode]) data -> Bytes to be encrypted/decrypted pad -> Optional argument. Only when using padmode of PAD_NORMAL. For encryption, adds this characters to the end of the data block when data is not a multiple of 8 bytes. For decryption, will remove the trailing characters that match this pad character from the last 8 bytes of the unencrypted data block. padmode -> Optional argument, set the padding mode, must be one of PAD_NORMAL or PAD_PKCS5). Defaults to PAD_NORMAL. Example ------- from pyDes import * data = "Please encrypt my data" k = des("DESCRYPT", CBC, "\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5) # For Python3, you'll need to use bytes, i.e.: # data = b"Please encrypt my data" # k = des(b"DESCRYPT", CBC, b"\0\0\0\0\0\0\0\0", pad=None, padmode=PAD_PKCS5) d = k.encrypt(data) print "Encrypted: %r" % d print "Decrypted: %r" % k.decrypt(d) assert k.decrypt(d, padmode=PAD_PKCS5) == data See the module source (pyDes.py) for more examples of use. You can also run the pyDes.py file without and arguments to see a simple test. Note: This code was not written for high-end systems needing a fast implementation, but rather a handy portable solution with small usage. """ import sys # _pythonMajorVersion is used to handle Python2 and Python3 differences. _pythonMajorVersion = sys.version_info[0] # Modes of crypting / cyphering ECB = 0 CBC = 1 # Modes of padding PAD_NORMAL = 1 PAD_PKCS5 = 2 # PAD_PKCS5: is a method that will unambiguously remove all padding # characters after decryption, when originally encrypted with # this padding mode. # For a good description of the PKCS5 padding technique, see: # http://www.faqs.org/rfcs/rfc1423.html # The base class shared by des and triple des. class _baseDes(object): def __init__(self, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL): if IV: IV = self._guardAgainstUnicode(IV) if pad: pad = self._guardAgainstUnicode(pad) self.block_size = 8 # Sanity checking of arguments. if pad and padmode == PAD_PKCS5: raise ValueError("Cannot use a pad character with PAD_PKCS5") if IV and len(IV) != self.block_size: raise ValueError("Invalid Initial Value (IV), must be a multiple of " + str(self.block_size) + " bytes") # Set the passed in variables self._mode = mode self._iv = IV self._padding = pad self._padmode = padmode def getKey(self): """getKey() -> bytes""" return self.__key def setKey(self, key): """Will set the crypting key for this object.""" key = self._guardAgainstUnicode(key) self.__key = key def getMode(self): """getMode() -> pyDes.ECB or pyDes.CBC""" return self._mode def setMode(self, mode): """Sets the type of crypting mode, pyDes.ECB or pyDes.CBC""" self._mode = mode def getPadding(self): """getPadding() -> bytes of length 1. Padding character.""" return self._padding def setPadding(self, pad): """setPadding() -> bytes of length 1. Padding character.""" if pad is not None: pad = self._guardAgainstUnicode(pad) self._padding = pad def getPadMode(self): """getPadMode() -> pyDes.PAD_NORMAL or pyDes.PAD_PKCS5""" return self._padmode def setPadMode(self, mode): """Sets the type of padding mode, pyDes.PAD_NORMAL or pyDes.PAD_PKCS5""" self._padmode = mode def getIV(self): """getIV() -> bytes""" return self._iv def setIV(self, IV): """Will set the Initial Value, used in conjunction with CBC mode""" if not IV or len(IV) != self.block_size: raise ValueError("Invalid Initial Value (IV), must be a multiple of " + str(self.block_size) + " bytes") IV = self._guardAgainstUnicode(IV) self._iv = IV def _padData(self, data, pad, padmode): # Pad data depending on the mode if padmode is None: # Get the default padding mode. padmode = self.getPadMode() if pad and padmode == PAD_PKCS5: raise ValueError("Cannot use a pad character with PAD_PKCS5") if padmode == PAD_NORMAL: if len(data) % self.block_size == 0: # No padding required. return data if not pad: # Get the default padding. pad = self.getPadding() if not pad: raise ValueError("Data must be a multiple of " + str( self.block_size) + " bytes in length. Use padmode=PAD_PKCS5 or set the pad character.") data += (self.block_size - (len(data) % self.block_size)) * pad elif padmode == PAD_PKCS5: pad_len = 8 - (len(data) % self.block_size) if _pythonMajorVersion < 3: data += pad_len * chr(pad_len) else: data += bytes([pad_len] * pad_len) return data def _unpadData(self, data, pad, padmode): # Unpad data depending on the mode. if not data: return data if pad and padmode == PAD_PKCS5: raise ValueError("Cannot use a pad character with PAD_PKCS5") if padmode is None: # Get the default padding mode. padmode = self.getPadMode() if padmode == PAD_NORMAL: if not pad: # Get the default padding. pad = self.getPadding() if pad: data = data[:-self.block_size] + \ data[-self.block_size:].rstrip(pad) elif padmode == PAD_PKCS5: if _pythonMajorVersion < 3: pad_len = ord(data[-1]) else: pad_len = data[-1] data = data[:-pad_len] return data def _guardAgainstUnicode(self, data): # Only accept byte strings or ascii unicode values, otherwise # there is no way to correctly decode the data into bytes. if _pythonMajorVersion < 3: if isinstance(data, unicode): # noqa raise ValueError("pyDes can only work with bytes, not Unicode strings.") else: if isinstance(data, str): # Only accept ascii unicode values. try: return data.encode('ascii') except UnicodeEncodeError: pass raise ValueError("pyDes can only work with encoded strings, not Unicode.") return data ############################################################################# # DES # ############################################################################# class des(_baseDes): """DES encryption/decrytpion class Supports ECB (Electronic Code Book) and CBC (Cypher Block Chaining) modes. pyDes.des(key,[mode], [IV]) key -> Bytes containing the encryption key, must be exactly 8 bytes mode -> Optional argument for encryption type, can be either pyDes.ECB (Electronic Code Book), pyDes.CBC (Cypher Block Chaining) IV -> Optional Initial Value bytes, must be supplied if using CBC mode. Must be 8 bytes in length. pad -> Optional argument, set the pad character (PAD_NORMAL) to use during all encrypt/decrypt operations done with this instance. padmode -> Optional argument, set the padding mode (PAD_NORMAL or PAD_PKCS5) to use during all encrypt/decrypt operations done with this instance. """ # Permutation and translation tables for DES __pc1 = [56, 48, 40, 32, 24, 16, 8, 0, 57, 49, 41, 33, 25, 17, 9, 1, 58, 50, 42, 34, 26, 18, 10, 2, 59, 51, 43, 35, 62, 54, 46, 38, 30, 22, 14, 6, 61, 53, 45, 37, 29, 21, 13, 5, 60, 52, 44, 36, 28, 20, 12, 4, 27, 19, 11, 3 ] # number left rotations of pc1 __left_rotations = [ 1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1 ] # permuted choice key (table 2) __pc2 = [ 13, 16, 10, 23, 0, 4, 2, 27, 14, 5, 20, 9, 22, 18, 11, 3, 25, 7, 15, 6, 26, 19, 12, 1, 40, 51, 30, 36, 46, 54, 29, 39, 50, 44, 32, 47, 43, 48, 38, 55, 33, 52, 45, 41, 49, 35, 28, 31 ] # initial permutation IP __ip = [57, 49, 41, 33, 25, 17, 9, 1, 59, 51, 43, 35, 27, 19, 11, 3, 61, 53, 45, 37, 29, 21, 13, 5, 63, 55, 47, 39, 31, 23, 15, 7, 56, 48, 40, 32, 24, 16, 8, 0, 58, 50, 42, 34, 26, 18, 10, 2, 60, 52, 44, 36, 28, 20, 12, 4, 62, 54, 46, 38, 30, 22, 14, 6 ] # Expansion table for turning 32 bit blocks into 48 bits __expansion_table = [ 31, 0, 1, 2, 3, 4, 3, 4, 5, 6, 7, 8, 7, 8, 9, 10, 11, 12, 11, 12, 13, 14, 15, 16, 15, 16, 17, 18, 19, 20, 19, 20, 21, 22, 23, 24, 23, 24, 25, 26, 27, 28, 27, 28, 29, 30, 31, 0 ] # The (in)famous S-boxes __sbox = [ # S1 [14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7, 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8, 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0, 15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13], # S2 [15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10, 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5, 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15, 13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9], # S3 [10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8, 13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1, 13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7, 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12], # S4 [7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15, 13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9, 10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4, 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14], # S5 [2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9, 14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6, 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14, 11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3], # S6 [12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11, 10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8, 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6, 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13], # S7 [4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1, 13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6, 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2, 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12], # S8 [13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7, 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2, 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8, 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11], ] # 32-bit permutation function P used on the output of the S-boxes __p = [ 15, 6, 19, 20, 28, 11, 27, 16, 0, 14, 22, 25, 4, 17, 30, 9, 1, 7, 23, 13, 31, 26, 2, 8, 18, 12, 29, 5, 21, 10, 3, 24 ] # final permutation IP^-1 __fp = [ 39, 7, 47, 15, 55, 23, 63, 31, 38, 6, 46, 14, 54, 22, 62, 30, 37, 5, 45, 13, 53, 21, 61, 29, 36, 4, 44, 12, 52, 20, 60, 28, 35, 3, 43, 11, 51, 19, 59, 27, 34, 2, 42, 10, 50, 18, 58, 26, 33, 1, 41, 9, 49, 17, 57, 25, 32, 0, 40, 8, 48, 16, 56, 24 ] # Type of crypting being done ENCRYPT = 0x00 DECRYPT = 0x01 # Initialisation def __init__(self, key, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL): # Sanity checking of arguments. if len(key) != 8: raise ValueError("Invalid DES key size. Key must be exactly 8 bytes long.") _baseDes.__init__(self, mode, IV, pad, padmode) self.key_size = 8 self.L = [] self.R = [] self.Kn = [[0] * 48] * 16 # 16 48-bit keys (K1 - K16) self.final = [] self.setKey(key) def setKey(self, key): """Will set the crypting key for this object. Must be 8 bytes.""" _baseDes.setKey(self, key) self.__create_sub_keys() def __String_to_BitList(self, data): """Turn the string data, into a list of bits (1, 0)'s""" if _pythonMajorVersion < 3: # Turn the strings into integers. Python 3 uses a bytes # class, which already has this behaviour. data = [ord(c) for c in data] l = len(data) * 8 result = [0] * l pos = 0 for ch in data: i = 7 while i >= 0: if ch & (1 << i) != 0: result[pos] = 1 else: result[pos] = 0 pos += 1 i -= 1 return result def __BitList_to_String(self, data): """Turn the list of bits -> data, into a string""" result = [] pos = 0 c = 0 while pos < len(data): c += data[pos] << (7 - (pos % 8)) if (pos % 8) == 7: result.append(c) c = 0 pos += 1 if _pythonMajorVersion < 3: return ''.join([chr(c) for c in result]) else: return bytes(result) def __permutate(self, table, block): """Permutate this block with the specified table""" return list(map(lambda x: block[x], table)) # Transform the secret key, so that it is ready for data processing # Create the 16 subkeys, K[1] - K[16] def __create_sub_keys(self): """Create the 16 subkeys K[1] to K[16] from the given key""" key = self.__permutate(des.__pc1, self.__String_to_BitList(self.getKey())) i = 0 # Split into Left and Right sections self.L = key[:28] self.R = key[28:] while i < 16: j = 0 # Perform circular left shifts while j < des.__left_rotations[i]: self.L.append(self.L[0]) del self.L[0] self.R.append(self.R[0]) del self.R[0] j += 1 # Create one of the 16 subkeys through pc2 permutation self.Kn[i] = self.__permutate(des.__pc2, self.L + self.R) i += 1 # Main part of the encryption algorithm, the number cruncher :) def __des_crypt(self, block, crypt_type): """Crypt the block of data through DES bit-manipulation""" block = self.__permutate(des.__ip, block) self.L = block[:32] self.R = block[32:] # Encryption starts from Kn[1] through to Kn[16] if crypt_type == des.ENCRYPT: iteration = 0 iteration_adjustment = 1 # Decryption starts from Kn[16] down to Kn[1] else: iteration = 15 iteration_adjustment = -1 i = 0 while i < 16: # Make a copy of R[i-1], this will later become L[i] tempR = self.R[:] # Permutate R[i - 1] to start creating R[i] self.R = self.__permutate(des.__expansion_table, self.R) # Exclusive or R[i - 1] with K[i], create B[1] to B[8] whilst here self.R = list(map(lambda x, y: x ^ y, self.R, self.Kn[iteration])) B = [self.R[:6], self.R[6:12], self.R[12:18], self.R[18:24], self.R[24:30], self.R[30:36], self.R[36:42], self.R[42:]] # Optimization: Replaced below commented code with above # j = 0 # B = [] # while j < len(self.R): # self.R[j] = self.R[j] ^ self.Kn[iteration][j] # j += 1 # if j % 6 == 0: # B.append(self.R[j-6:j]) # Permutate B[1] to B[8] using the S-Boxes j = 0 Bn = [0] * 32 pos = 0 while j < 8: # Work out the offsets m = (B[j][0] << 1) + B[j][5] n = (B[j][1] << 3) + (B[j][2] << 2) + (B[j][3] << 1) + B[j][4] # Find the permutation value v = des.__sbox[j][(m << 4) + n] # Turn value into bits, add it to result: Bn Bn[pos] = (v & 8) >> 3 Bn[pos + 1] = (v & 4) >> 2 Bn[pos + 2] = (v & 2) >> 1 Bn[pos + 3] = v & 1 pos += 4 j += 1 # Permutate the concatination of B[1] to B[8] (Bn) self.R = self.__permutate(des.__p, Bn) # Xor with L[i - 1] self.R = list(map(lambda x, y: x ^ y, self.R, self.L)) # Optimization: This now replaces the below commented code # j = 0 # while j < len(self.R): # self.R[j] = self.R[j] ^ self.L[j] # j += 1 # L[i] becomes R[i - 1] self.L = tempR i += 1 iteration += iteration_adjustment # Final permutation of R[16]L[16] self.final = self.__permutate(des.__fp, self.R + self.L) return self.final # Data to be encrypted/decrypted def crypt(self, data, crypt_type): """Crypt the data in blocks, running it through des_crypt()""" # Error check the data if not data: return '' if len(data) % self.block_size != 0: if crypt_type == des.DECRYPT: # Decryption must work on 8 byte blocks raise ValueError( "Invalid data length, data must be a multiple of " + str(self.block_size) + " bytes\n.") if not self.getPadding(): raise ValueError("Invalid data length, data must be a multiple of " + str( self.block_size) + " bytes\n. Try setting the optional padding character") else: data += (self.block_size - (len(data) % self.block_size)) * self.getPadding() # print "Len of data: %f" % (len(data) / self.block_size) if self.getMode() == CBC: if self.getIV(): iv = self.__String_to_BitList(self.getIV()) else: raise ValueError("For CBC mode, you must supply the Initial Value (IV) for ciphering") # Split the data into blocks, crypting each one seperately i = 0 dict = {} result = [] # cached = 0 # lines = 0 while i < len(data): # Test code for caching encryption results # lines += 1 # if dict.has_key(data[i:i+8]): # print "Cached result for: %s" % data[i:i+8] # cached += 1 # result.append(dict[data[i:i+8]]) # i += 8 # continue block = self.__String_to_BitList(data[i:i + 8]) # Xor with IV if using CBC mode if self.getMode() == CBC: if crypt_type == des.ENCRYPT: block = list(map(lambda x, y: x ^ y, block, iv)) # j = 0 # while j < len(block): # block[j] = block[j] ^ iv[j] # j += 1 processed_block = self.__des_crypt(block, crypt_type) if crypt_type == des.DECRYPT: processed_block = list(map(lambda x, y: x ^ y, processed_block, iv)) # j = 0 # while j < len(processed_block): # processed_block[j] = processed_block[j] ^ iv[j] # j += 1 iv = block else: iv = processed_block else: processed_block = self.__des_crypt(block, crypt_type) # Add the resulting crypted block to our list # d = self.__BitList_to_String(processed_block) # result.append(d) result.append(self.__BitList_to_String(processed_block)) # dict[data[i:i+8]] = d i += 8 # print "Lines: %d, cached: %d" % (lines, cached) # Return the full crypted string if _pythonMajorVersion < 3: return ''.join(result) else: return bytes.fromhex('').join(result) def encrypt(self, data, pad=None, padmode=None): """encrypt(data, [pad], [padmode]) -> bytes data : Bytes to be encrypted pad : Optional argument for encryption padding. Must only be one byte padmode : Optional argument for overriding the padding mode. The data must be a multiple of 8 bytes and will be encrypted with the already specified key. Data does not have to be a multiple of 8 bytes if the padding character is supplied, or the padmode is set to PAD_PKCS5, as bytes will then added to ensure the be padded data is a multiple of 8 bytes. """ data = self._guardAgainstUnicode(data) if pad is not None: pad = self._guardAgainstUnicode(pad) data = self._padData(data, pad, padmode) return self.crypt(data, des.ENCRYPT) def decrypt(self, data, pad=None, padmode=None): """decrypt(data, [pad], [padmode]) -> bytes data : Bytes to be decrypted pad : Optional argument for decryption padding. Must only be one byte padmode : Optional argument for overriding the padding mode. The data must be a multiple of 8 bytes and will be decrypted with the already specified key. In PAD_NORMAL mode, if the optional padding character is supplied, then the un-encrypted data will have the padding characters removed from the end of the bytes. This pad removal only occurs on the last 8 bytes of the data (last data block). In PAD_PKCS5 mode, the special padding end markers will be removed from the data after decrypting. """ data = self._guardAgainstUnicode(data) if pad is not None: pad = self._guardAgainstUnicode(pad) data = self.crypt(data, des.DECRYPT) return self._unpadData(data, pad, padmode) ############################################################################# # Triple DES # ############################################################################# class triple_des(_baseDes): """Triple DES encryption/decrytpion class This algorithm uses the DES-EDE3 (when a 24 byte key is supplied) or the DES-EDE2 (when a 16 byte key is supplied) encryption methods. Supports ECB (Electronic Code Book) and CBC (Cypher Block Chaining) modes. pyDes.des(key, [mode], [IV]) key -> Bytes containing the encryption key, must be either 16 or 24 bytes long mode -> Optional argument for encryption type, can be either pyDes.ECB (Electronic Code Book), pyDes.CBC (Cypher Block Chaining) IV -> Optional Initial Value bytes, must be supplied if using CBC mode. Must be 8 bytes in length. pad -> Optional argument, set the pad character (PAD_NORMAL) to use during all encrypt/decrypt operations done with this instance. padmode -> Optional argument, set the padding mode (PAD_NORMAL or PAD_PKCS5) to use during all encrypt/decrypt operations done with this instance. """ def __init__(self, key, mode=ECB, IV=None, pad=None, padmode=PAD_NORMAL): _baseDes.__init__(self, mode, IV, pad, padmode) self.setKey(key) def setKey(self, key): """Will set the crypting key for this object. Either 16 or 24 bytes long.""" self.key_size = 24 # Use DES-EDE3 mode if len(key) != self.key_size: if len(key) == 16: # Use DES-EDE2 mode self.key_size = 16 else: raise ValueError("Invalid triple DES key size. Key must be either 16 or 24 bytes long") if self.getMode() == CBC: if not self.getIV(): # Use the first 8 bytes of the key self._iv = key[:self.block_size] if len(self.getIV()) != self.block_size: raise ValueError("Invalid IV, must be 8 bytes in length") self.__key1 = des(key[:8], self._mode, self._iv, self._padding, self._padmode) self.__key2 = des(key[8:16], self._mode, self._iv, self._padding, self._padmode) if self.key_size == 16: self.__key3 = self.__key1 else: self.__key3 = des(key[16:], self._mode, self._iv, self._padding, self._padmode) _baseDes.setKey(self, key) # Override setter methods to work on all 3 keys. def setMode(self, mode): """Sets the type of crypting mode, pyDes.ECB or pyDes.CBC""" _baseDes.setMode(self, mode) for key in (self.__key1, self.__key2, self.__key3): key.setMode(mode) def setPadding(self, pad): """setPadding() -> bytes of length 1. Padding character.""" _baseDes.setPadding(self, pad) for key in (self.__key1, self.__key2, self.__key3): key.setPadding(pad) def setPadMode(self, mode): """Sets the type of padding mode, pyDes.PAD_NORMAL or pyDes.PAD_PKCS5""" _baseDes.setPadMode(self, mode) for key in (self.__key1, self.__key2, self.__key3): key.setPadMode(mode) def setIV(self, IV): """Will set the Initial Value, used in conjunction with CBC mode""" _baseDes.setIV(self, IV) for key in (self.__key1, self.__key2, self.__key3): key.setIV(IV) def encrypt(self, data, pad=None, padmode=None): """encrypt(data, [pad], [padmode]) -> bytes data : bytes to be encrypted pad : Optional argument for encryption padding. Must only be one byte padmode : Optional argument for overriding the padding mode. The data must be a multiple of 8 bytes and will be encrypted with the already specified key. Data does not have to be a multiple of 8 bytes if the padding character is supplied, or the padmode is set to PAD_PKCS5, as bytes will then added to ensure the be padded data is a multiple of 8 bytes. """ ENCRYPT = des.ENCRYPT DECRYPT = des.DECRYPT data = self._guardAgainstUnicode(data) if pad is not None: pad = self._guardAgainstUnicode(pad) # Pad the data accordingly. data = self._padData(data, pad, padmode) if self.getMode() == CBC: self.__key1.setIV(self.getIV()) self.__key2.setIV(self.getIV()) self.__key3.setIV(self.getIV()) i = 0 result = [] while i < len(data): block = self.__key1.crypt(data[i:i + 8], ENCRYPT) block = self.__key2.crypt(block, DECRYPT) block = self.__key3.crypt(block, ENCRYPT) self.__key1.setIV(block) self.__key2.setIV(block) self.__key3.setIV(block) result.append(block) i += 8 if _pythonMajorVersion < 3: return ''.join(result) else: return bytes.fromhex('').join(result) else: data = self.__key1.crypt(data, ENCRYPT) data = self.__key2.crypt(data, DECRYPT) return self.__key3.crypt(data, ENCRYPT) def decrypt(self, data, pad=None, padmode=None): """decrypt(data, [pad], [padmode]) -> bytes data : bytes to be encrypted pad : Optional argument for decryption padding. Must only be one byte padmode : Optional argument for overriding the padding mode. The data must be a multiple of 8 bytes and will be decrypted with the already specified key. In PAD_NORMAL mode, if the optional padding character is supplied, then the un-encrypted data will have the padding characters removed from the end of the bytes. This pad removal only occurs on the last 8 bytes of the data (last data block). In PAD_PKCS5 mode, the special padding end markers will be removed from the data after decrypting, no pad character is required for PAD_PKCS5. """ ENCRYPT = des.ENCRYPT DECRYPT = des.DECRYPT data = self._guardAgainstUnicode(data) if pad is not None: pad = self._guardAgainstUnicode(pad) if self.getMode() == CBC: self.__key1.setIV(self.getIV()) self.__key2.setIV(self.getIV()) self.__key3.setIV(self.getIV()) i = 0 result = [] while i < len(data): iv = data[i:i + 8] block = self.__key3.crypt(iv, DECRYPT) block = self.__key2.crypt(block, ENCRYPT) block = self.__key1.crypt(block, DECRYPT) self.__key1.setIV(iv) self.__key2.setIV(iv) self.__key3.setIV(iv) result.append(block) i += 8 if _pythonMajorVersion < 3: data = ''.join(result) else: data = bytes.fromhex('').join(result) else: data = self.__key3.crypt(data, DECRYPT) data = self.__key2.crypt(data, ENCRYPT) data = self.__key1.crypt(data, DECRYPT) return self._unpadData(data, pad, padmode)
lgpl-3.0
Intel-Corporation/tensorflow
tensorflow/python/autograph/pyct/qual_names.py
24
8125
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for manipulating qualified names. A qualified name is a uniform way to refer to simple (e.g. 'foo') and composite (e.g. 'foo.bar') syntactic symbols. This is *not* related to the __qualname__ attribute used by inspect, which refers to scopes. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import gast from tensorflow.python.autograph.pyct import anno from tensorflow.python.autograph.pyct import parser class Symbol(collections.namedtuple('Symbol', ['name'])): """Represents a Python symbol.""" class StringLiteral(collections.namedtuple('StringLiteral', ['value'])): """Represents a Python string literal.""" def __str__(self): return '\'%s\'' % self.value def __repr__(self): return str(self) class NumberLiteral(collections.namedtuple('NumberLiteral', ['value'])): """Represents a Python numeric literal.""" def __str__(self): return '%s' % self.value def __repr__(self): return str(self) # TODO(mdan): Use subclasses to remove the has_attr has_subscript booleans. class QN(object): """Represents a qualified name.""" def __init__(self, base, attr=None, subscript=None): if attr is not None and subscript is not None: raise ValueError('A QN can only be either an attr or a subscript, not ' 'both: attr={}, subscript={}.'.format(attr, subscript)) self._has_attr = False self._has_subscript = False if attr is not None: if not isinstance(base, QN): raise ValueError( 'for attribute QNs, base must be a QN; got instead "%s"' % base) if not isinstance(attr, str): raise ValueError('attr may only be a string; got instead "%s"' % attr) self._parent = base # TODO(mdan): Get rid of the tuple - it can only have 1 or 2 elements now. self.qn = (base, attr) self._has_attr = True elif subscript is not None: if not isinstance(base, QN): raise ValueError('For subscript QNs, base must be a QN.') self._parent = base self.qn = (base, subscript) self._has_subscript = True else: if not isinstance(base, (str, StringLiteral, NumberLiteral)): # TODO(mdan): Require Symbol instead of string. raise ValueError( 'for simple QNs, base must be a string or a Literal object;' ' got instead "%s"' % type(base)) assert '.' not in base and '[' not in base and ']' not in base self._parent = None self.qn = (base,) def is_symbol(self): return isinstance(self.qn[0], str) def is_simple(self): return len(self.qn) <= 1 def is_composite(self): return len(self.qn) > 1 def has_subscript(self): return self._has_subscript def has_attr(self): return self._has_attr @property def parent(self): if self._parent is None: raise ValueError('Cannot get parent of simple name "%s".' % self.qn[0]) return self._parent @property def owner_set(self): """Returns all the symbols (simple or composite) that own this QN. In other words, if this symbol was modified, the symbols in the owner set may also be affected. Examples: 'a.b[c.d]' has two owners, 'a' and 'a.b' """ owners = set() if self.has_attr() or self.has_subscript(): owners.add(self.parent) owners.update(self.parent.owner_set) return owners @property def support_set(self): """Returns the set of simple symbols that this QN relies on. This would be the smallest set of symbols necessary for the QN to statically resolve (assuming properties and index ranges are verified at runtime). Examples: 'a.b' has only one support symbol, 'a' 'a[i]' has two support symbols, 'a' and 'i' """ # TODO(mdan): This might be the set of Name nodes in the AST. Track those? roots = set() if self.has_attr(): roots.update(self.parent.support_set) elif self.has_subscript(): roots.update(self.parent.support_set) roots.update(self.qn[1].support_set) else: roots.add(self) return roots def __hash__(self): return hash(self.qn + (self._has_attr, self._has_subscript)) def __eq__(self, other): return (isinstance(other, QN) and self.qn == other.qn and self.has_subscript() == other.has_subscript() and self.has_attr() == other.has_attr()) def __str__(self): if self.has_subscript(): return str(self.qn[0]) + '[' + str(self.qn[1]) + ']' if self.has_attr(): return '.'.join(map(str, self.qn)) else: return str(self.qn[0]) def __repr__(self): return str(self) def ssf(self): """Simple symbol form.""" ssfs = [n.ssf() if isinstance(n, QN) else n for n in self.qn] ssf_string = '' for i in range(0, len(self.qn) - 1): if self.has_subscript(): delimiter = '_sub_' else: delimiter = '_' ssf_string += ssfs[i] + delimiter return ssf_string + ssfs[-1] def ast(self): # The caller must adjust the context appropriately. if self.has_subscript(): return gast.Subscript(self.parent.ast(), gast.Index(self.qn[-1].ast()), None) if self.has_attr(): return gast.Attribute(self.parent.ast(), self.qn[-1], None) base = self.qn[0] if isinstance(base, str): return gast.Name(base, None, None) elif isinstance(base, StringLiteral): return gast.Str(base.value) elif isinstance(base, NumberLiteral): return gast.Num(base.value) else: assert False, ('the constructor should prevent types other than ' 'str, StringLiteral and NumberLiteral') class QnResolver(gast.NodeTransformer): """Annotates nodes with QN information. Note: Not using NodeAnnos to avoid circular dependencies. """ def visit_Name(self, node): node = self.generic_visit(node) anno.setanno(node, anno.Basic.QN, QN(node.id)) return node def visit_Attribute(self, node): node = self.generic_visit(node) if anno.hasanno(node.value, anno.Basic.QN): anno.setanno(node, anno.Basic.QN, QN(anno.getanno(node.value, anno.Basic.QN), attr=node.attr)) return node def visit_Subscript(self, node): # TODO(mdan): This may no longer apply if we overload getitem. node = self.generic_visit(node) s = node.slice if not isinstance(s, gast.Index): # TODO(mdan): Support range and multi-dimensional indices. # Continuing silently because some demos use these. return node if isinstance(s.value, gast.Num): subscript = QN(NumberLiteral(s.value.n)) elif isinstance(s.value, gast.Str): subscript = QN(StringLiteral(s.value.s)) else: # The index may be an expression, case in which a name doesn't make sense. if anno.hasanno(node.slice.value, anno.Basic.QN): subscript = anno.getanno(node.slice.value, anno.Basic.QN) else: return node if anno.hasanno(node.value, anno.Basic.QN): anno.setanno(node, anno.Basic.QN, QN(anno.getanno(node.value, anno.Basic.QN), subscript=subscript)) return node def resolve(node): return QnResolver().visit(node) def from_str(qn_str): node = parser.parse_expression(qn_str) node = resolve(node) return anno.getanno(node, anno.Basic.QN)
apache-2.0
skoslowski/gnuradio
gr-blocks/python/blocks/qa_vco.py
3
1851
#!/usr/bin/env python # # Copyright 2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # SPDX-License-Identifier: GPL-3.0-or-later # # from __future__ import division from gnuradio import gr, gr_unittest, blocks import math def sig_source_f(samp_rate, freq, amp, N): t = [float(x) / samp_rate for x in range(N)] y = [amp*math.cos(2.*math.pi*freq*x) for x in t] return y def sig_source_c(samp_rate, freq, amp, N): t = [float(x) / samp_rate for x in range(N)] y = [math.cos(2.*math.pi*freq*x) + \ 1j*math.sin(2.*math.pi*freq*x) for x in t] return y class test_vco(gr_unittest.TestCase): def setUp (self): self.tb = gr.top_block () def tearDown (self): self.tb = None def test_001(self): src_data = 200*[0,] + 200*[0.5,] + 200*[1,] expected_result = 200*[1,] + \ sig_source_f(1, 0.125, 1, 200) + \ sig_source_f(1, 0.25, 1, 200) src = blocks.vector_source_f(src_data) op = blocks.vco_f(1, math.pi / 2.0, 1) dst = blocks.vector_sink_f() self.tb.connect(src, op, dst) self.tb.run() result_data = dst.data() self.assertFloatTuplesAlmostEqual(expected_result, result_data, 5) def test_002(self): src_data = 200*[0,] + 200*[0.5,] + 200*[1,] expected_result = 200*[1,] + \ sig_source_c(1, 0.125, 1, 200) + \ sig_source_c(1, 0.25, 1, 200) src = blocks.vector_source_f(src_data) op = blocks.vco_c(1, math.pi / 2.0, 1) dst = blocks.vector_sink_c() self.tb.connect(src, op, dst) self.tb.run() result_data = dst.data() self.assertComplexTuplesAlmostEqual(expected_result, result_data, 5) if __name__ == '__main__': gr_unittest.run(test_vco, "test_vco.xml")
gpl-3.0
VitalPet/c2c-rd-addons
chricar_sale_internal_shippment/__openerp__.py
4
1728
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # Copyright (C) 2010-2012 ChriCar Beteiligungs- und Beratungs- GmbH (<http://www.camptocamp.at>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'sequence': 500, 'name': 'Sale order internal picking', 'version': '0.9', 'category': 'Sales Management', 'description': """ This module adds an extra internal picking to sale order on request Purpose SO -> internal: shiping products to company location at customer site -> out: from company location at customer site to customer """, 'author': 'ChriCar Beteiligungs- und Beratungs- GmbH', 'depends': [ 'base','sale','stock' ], 'data': ['sale_view.xml','wizard/make_ship_internal.xml', ], #'data': ['product_view.xml'], 'demo_xml': [], 'installable': False, 'active': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
hefen1/chromium
build/android/buildbot/bb_utils.py
124
3235
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import json import optparse import os import pipes import subprocess import sys import bb_annotations sys.path.append(os.path.join(os.path.dirname(__file__), '..')) from pylib import constants TESTING = 'BUILDBOT_TESTING' in os.environ BB_BUILD_DIR = os.path.abspath( os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, os.pardir, os.pardir, os.pardir, os.pardir)) CHROME_SRC = os.path.abspath( os.path.join(os.path.dirname(__file__), '..', '..', '..')) # TODO: Figure out how to merge this with pylib.cmd_helper.OutDirectory(). CHROME_OUT_DIR = os.path.join(CHROME_SRC, 'out') GOMA_DIR = os.environ.get('GOMA_DIR', os.path.join(BB_BUILD_DIR, 'goma')) GSUTIL_PATH = os.path.join(BB_BUILD_DIR, 'third_party', 'gsutil', 'gsutil') def CommandToString(command): """Returns quoted command that can be run in bash shell.""" return ' '.join(map(pipes.quote, command)) def SpawnCmd(command, stdout=None, cwd=CHROME_SRC): """Spawn a process without waiting for termination.""" print '>', CommandToString(command) sys.stdout.flush() if TESTING: class MockPopen(object): @staticmethod def wait(): return 0 @staticmethod def communicate(): return '', '' return MockPopen() return subprocess.Popen(command, cwd=cwd, stdout=stdout) def RunCmd(command, flunk_on_failure=True, halt_on_failure=False, warning_code=constants.WARNING_EXIT_CODE, stdout=None, cwd=CHROME_SRC): """Run a command relative to the chrome source root.""" code = SpawnCmd(command, stdout, cwd).wait() print '<', CommandToString(command) if code != 0: print 'ERROR: process exited with code %d' % code if code != warning_code and flunk_on_failure: bb_annotations.PrintError() else: bb_annotations.PrintWarning() # Allow steps to have both halting (i.e. 1) and non-halting exit codes. if code != warning_code and halt_on_failure: print 'FATAL %d != %d' % (code, warning_code) sys.exit(1) return code def GetParser(): def ConvertJson(option, _, value, parser): setattr(parser.values, option.dest, json.loads(value)) parser = optparse.OptionParser() parser.add_option('--build-properties', action='callback', callback=ConvertJson, type='string', default={}, help='build properties in JSON format') parser.add_option('--factory-properties', action='callback', callback=ConvertJson, type='string', default={}, help='factory properties in JSON format') return parser def EncodeProperties(options): return ['--factory-properties=%s' % json.dumps(options.factory_properties), '--build-properties=%s' % json.dumps(options.build_properties)] def RunSteps(steps, step_cmds, options): unknown_steps = set(steps) - set(step for step, _ in step_cmds) if unknown_steps: print >> sys.stderr, 'FATAL: Unknown steps %s' % list(unknown_steps) sys.exit(1) for step, cmd in step_cmds: if step in steps: cmd(options)
bsd-3-clause
CristianBB/SickRage
sickbeard/search_queue.py
6
11064
# Author: Nic Wolfe <nic@wolfeden.ca> # URL: http://code.google.com/p/sickbeard/ # # This file is part of SickRage. # # SickRage is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # SickRage is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with SickRage. If not, see <http://www.gnu.org/licenses/>. import time import traceback import threading import sickbeard from sickbeard import common from sickbeard import logger from sickbeard import generic_queue from sickbeard import search, failed_history, history from sickbeard import ui search_queue_lock = threading.Lock() BACKLOG_SEARCH = 10 DAILY_SEARCH = 20 FAILED_SEARCH = 30 MANUAL_SEARCH = 40 MANUAL_SEARCH_HISTORY = [] MANUAL_SEARCH_HISTORY_SIZE = 100 class SearchQueue(generic_queue.GenericQueue): def __init__(self): generic_queue.GenericQueue.__init__(self) self.queue_name = "SEARCHQUEUE" def is_in_queue(self, show, segment): for cur_item in self.queue: if isinstance(cur_item, BacklogQueueItem) and cur_item.show == show and cur_item.segment == segment: return True return False def is_ep_in_queue(self, segment): for cur_item in self.queue: if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and cur_item.segment == segment: return True return False def is_show_in_queue(self, show): for cur_item in self.queue: if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and cur_item.show.indexerid == show: return True return False def get_all_ep_from_queue(self, show): ep_obj_list = [] for cur_item in self.queue: if isinstance(cur_item, (ManualSearchQueueItem, FailedQueueItem)) and str(cur_item.show.indexerid) == show: ep_obj_list.append(cur_item) return ep_obj_list def pause_backlog(self): self.min_priority = generic_queue.QueuePriorities.HIGH def unpause_backlog(self): self.min_priority = 0 def is_backlog_paused(self): # backlog priorities are NORMAL, this should be done properly somewhere return self.min_priority >= generic_queue.QueuePriorities.NORMAL def is_manualsearch_in_progress(self): # Only referenced in webserve.py, only current running manualsearch or failedsearch is needed!! if isinstance(self.currentItem, (ManualSearchQueueItem, FailedQueueItem)): return True return False def is_backlog_in_progress(self): for cur_item in self.queue + [self.currentItem]: if isinstance(cur_item, BacklogQueueItem): return True return False def is_dailysearch_in_progress(self): for cur_item in self.queue + [self.currentItem]: if isinstance(cur_item, DailySearchQueueItem): return True return False def queue_length(self): length = {'backlog': 0, 'daily': 0, 'manual': 0, 'failed': 0} for cur_item in self.queue: if isinstance(cur_item, DailySearchQueueItem): length['daily'] += 1 elif isinstance(cur_item, BacklogQueueItem): length['backlog'] += 1 elif isinstance(cur_item, ManualSearchQueueItem): length['manual'] += 1 elif isinstance(cur_item, FailedQueueItem): length['failed'] += 1 return length def add_item(self, item): if isinstance(item, DailySearchQueueItem): # daily searches generic_queue.GenericQueue.add_item(self, item) elif isinstance(item, BacklogQueueItem) and not self.is_in_queue(item.show, item.segment): # backlog searches generic_queue.GenericQueue.add_item(self, item) elif isinstance(item, (ManualSearchQueueItem, FailedQueueItem)) and not self.is_ep_in_queue(item.segment): # manual and failed searches generic_queue.GenericQueue.add_item(self, item) else: logger.log(u"Not adding item, it's already in the queue", logger.DEBUG) class DailySearchQueueItem(generic_queue.QueueItem): def __init__(self): self.success = None generic_queue.QueueItem.__init__(self, u'Daily Search', DAILY_SEARCH) def run(self): generic_queue.QueueItem.run(self) try: logger.log(u"Beginning daily search for new episodes") foundResults = search.searchForNeededEpisodes() if not len(foundResults): logger.log(u"No needed episodes found") else: for result in foundResults: # just use the first result for now logger.log(u"Downloading " + result.name + " from " + result.provider.name) self.success = search.snatchEpisode(result) # give the CPU a break time.sleep(common.cpu_presets[sickbeard.CPU_PRESET]) generic_queue.QueueItem.finish(self) except Exception: logger.log(traceback.format_exc(), logger.DEBUG) if self.success is None: self.success = False self.finish() class ManualSearchQueueItem(generic_queue.QueueItem): def __init__(self, show, segment, downCurQuality=False): generic_queue.QueueItem.__init__(self, u'Manual Search', MANUAL_SEARCH) self.priority = generic_queue.QueuePriorities.HIGH self.name = 'MANUAL-' + str(show.indexerid) self.success = None self.show = show self.segment = segment self.started = None self.downCurQuality = downCurQuality def run(self): generic_queue.QueueItem.run(self) try: logger.log(u"Beginning manual search for: [" + self.segment.prettyName() + "]") self.started = True searchResult = search.searchProviders(self.show, [self.segment], True, self.downCurQuality) if searchResult: # just use the first result for now logger.log(u"Downloading " + searchResult[0].name + " from " + searchResult[0].provider.name) self.success = search.snatchEpisode(searchResult[0]) # give the CPU a break time.sleep(common.cpu_presets[sickbeard.CPU_PRESET]) else: ui.notifications.message('No downloads were found', "Couldn't find a download for <i>%s</i>" % self.segment.prettyName()) logger.log(u"Unable to find a download for: [" + self.segment.prettyName() + "]") except Exception: logger.log(traceback.format_exc(), logger.DEBUG) ### Keep a list with the 100 last executed searches fifo(MANUAL_SEARCH_HISTORY, self, MANUAL_SEARCH_HISTORY_SIZE) if self.success is None: self.success = False self.finish() class BacklogQueueItem(generic_queue.QueueItem): def __init__(self, show, segment): generic_queue.QueueItem.__init__(self, u'Backlog', BACKLOG_SEARCH) self.priority = generic_queue.QueuePriorities.LOW self.name = 'BACKLOG-' + str(show.indexerid) self.success = None self.show = show self.segment = segment def run(self): generic_queue.QueueItem.run(self) if not self.show.paused: try: logger.log(u"Beginning backlog search for: [" + self.show.name + "]") searchResult = search.searchProviders(self.show, self.segment, False) if searchResult: for result in searchResult: # just use the first result for now logger.log(u"Downloading " + result.name + " from " + result.provider.name) search.snatchEpisode(result) # give the CPU a break time.sleep(common.cpu_presets[sickbeard.CPU_PRESET]) else: logger.log(u"No needed episodes found during backlog search for: [" + self.show.name + "]") except Exception: logger.log(traceback.format_exc(), logger.DEBUG) self.finish() class FailedQueueItem(generic_queue.QueueItem): def __init__(self, show, segment, downCurQuality=False): generic_queue.QueueItem.__init__(self, u'Retry', FAILED_SEARCH) self.priority = generic_queue.QueuePriorities.HIGH self.name = 'RETRY-' + str(show.indexerid) self.show = show self.segment = segment self.success = None self.started = None self.downCurQuality = downCurQuality def run(self): generic_queue.QueueItem.run(self) self.started = True try: for epObj in self.segment: logger.log(u"Marking episode as bad: [" + epObj.prettyName() + "]") failed_history.markFailed(epObj) (release, provider) = failed_history.findRelease(epObj) if release: failed_history.logFailed(release) history.logFailed(epObj, release, provider) failed_history.revertEpisode(epObj) logger.log(u"Beginning failed download search for: [" + epObj.prettyName() + "]") # If it is wanted, self.downCurQuality doesnt matter # if it isnt wanted, we need to make sure to not overwrite the existing ep that we reverted to! searchResult = search.searchProviders(self.show, self.segment, True, False) if searchResult: for result in searchResult: # just use the first result for now logger.log(u"Downloading " + result.name + " from " + result.provider.name) search.snatchEpisode(result) # give the CPU a break time.sleep(common.cpu_presets[sickbeard.CPU_PRESET]) else: pass # logger.log(u"No valid episode found to retry for: [" + self.segment.prettyName() + "]") except Exception: logger.log(traceback.format_exc(), logger.DEBUG) ### Keep a list with the 100 last executed searches fifo(MANUAL_SEARCH_HISTORY, self, MANUAL_SEARCH_HISTORY_SIZE) if self.success is None: self.success = False self.finish() def fifo(myList, item, maxSize = 100): if len(myList) >= maxSize: myList.pop(0) myList.append(item)
gpl-3.0
gladk/woodem
py/_monkey/aliases.py
1
6327
'''Define various convenience attributes, such as ``Node.dem`` for accessing DEM data of a node (equivalent to ``node->getData<DemData>()`` in c++).''' from woo import core from woo.core import Master import sys, warnings core.Field.nod=core.Field.nodes ## proxy for attribute-like access to Scene.labels ## http://stackoverflow.com/questions/16061041/proxy-class-for-accessing-other-class-items-as-attributes-getitem-infinite class LabelMapperProxy(object): 'Proxy for attribute-like access to :obj:`woo.core.LabelMapper`.' def __init__(self,mapper,prefix=''): self.__dict__['_mapper'],self.__dict__['_prefix']=mapper,prefix def __getattr__(self,key): # some mapper method was requested if key=='__dir__': return lambda: self._mapper.__dir__(self._prefix) if key.startswith('_'): if self._prefix: raise AttributeError('Attributes/methods starting with _ must be obtained from root LabelMapper or proxy (this instance has prefix "'+self._prefix+'")') else: return getattr(self._mapper,key) # submodule requested, return proxy with new prefix if self._mapper._whereIs(self._prefix+key)==core.LabelMapper.inMod: return LabelMapperProxy(self._mapper,prefix=self._prefix+key+'.') # return object return self._mapper[self._prefix+key] def __setattr__(self,key,val): self._mapper[self._prefix+key]=val def __delattr__(self,key): del self._mapper[self._prefix+key] # this is necessary for py3k def __dir__(self): return self._mapper.__dir__(prefix=self._prefix) # def __len__(self): return self._mapper.__len__(prefix=self._prefix) # def _newModule(self,mod): return self._mapper._newModule(self._prefix+mod) def Scene_lab(scene): return LabelMapperProxy(scene.labels,prefix='') core.Scene.lab=property(Scene_lab) ## deprecated classes # if old class name is used, the new object is constructed and a warning is issued about old name being used # keep chronologically ordered, oldest first; script/rename-class.py appends at the end _deprecated={ ('woo.dem','FlexFacet'):('woo.fem','Membrane'), ('woo.gl','Gl1_FlexFacet'):('woo.gl','Gl1_Membrane'), ('woo.dem','In2_FlexFacet_ElastMat'):('woo.fem','In2_Membrane_ElastMat'), # ('woo.dem','ParticleFactory'):('woo.dem','ParticleInlet'), ('woo.dem','RandomFactory'):('woo.dem','RandomInlet'), ('woo.dem','BoxFactory'):('woo.dem','BoxInlet'), ('woo.dem','CylinderFactory'):('woo.dem','CylinderInlet'), ('woo.dem','BoxFactory2d'):('woo.dem','BoxInlet2d'), ('woo.dem','ConveyorFactory'):('woo.dem','ConveyorInlet'), ('woo.dem','BoxDeleter'):('woo.dem','BoxOutlet'), # ('woo.gl','TraceGlRep'):('woo.dem','TraceGlRep'), ('woo.gl','ScalarRange'):('woo.core','ScalarRange'), ('woo.gl','NodeGlRep'):('woo.core','NodeVisRep'), # ### END_RENAMED_CLASSES_LIST ### (do not delete this line; scripts/rename-class.py uses it } def injectDeprecated(): ''' Inject decprecated classes into woo modules as needed. ''' proxyNamespace={} class warnWrap: def __init__(self,_old,_new): self.old,self.new=_old,_new def __call__(self,*args,**kw): warnings.warn("%s.%s was renamed to %s.%s; update your code!"%(self.old[0],self.old[1],self.new.__module__,self.new.__name__),DeprecationWarning,stacklevel=2); return self.new(*args,**kw) # deprecated names for deprec,curr in _deprecated.items(): # try to import both modules try: mDep,mCurr=sys.modules[deprec[0]],sys.modules[curr[0]] except KeyError: continue # new name not found?! try: setattr(mDep,deprec[1],warnWrap(deprec,getattr(mCurr,curr[1]))) except AttributeError: pass injectDeprecated() try: from woo import dem dem.DemField.par=dem.DemField.particles dem.DemField.con=dem.DemField.contacts dem.Particle.mat=dem.Particle.material core.Scene.dem=property(lambda s: dem.DemField.sceneGetField(s)) core.Scene.hasDem=property(lambda s: dem.DemField.sceneHasField(s)) # DemData defines those methods, which are used for transparent access to respective data field core.Node.dem=property(dem.DemData._getDataOnNode,dem.DemData._setDataOnNode) # those are deprecated def deprecWrapper(self,_oldName,_newName,_newFunc,*args,**kw): warnings.warn('The %s function is deprecated, use %s instead.'%(_oldName,_newName),DeprecationWarning,stacklevel=3) return _newFunc(self,*args,**kw) dem.ParticleContainer.append=lambda self, *args,**kw: deprecWrapper(self,'dem.ParticleContainer.append','dem.ParticleContainer.add',dem.ParticleContainer.add,*args,**kw) dem.ParticleContainer.appendClumped=lambda self, *args,**kw: deprecWrapper(self,'dem.ParticleContainer.appendClumped','dem.ParticleContainer.addClumped',dem.ParticleContainer.addClumped,*args,**kw) # nicer names import woo.utils dem.DemField.minimalEngines=staticmethod(woo.utils.defaultEngines) dem.Sphere.make=staticmethod(woo.utils.sphere) dem.Ellipsoid.make=staticmethod(woo.utils.ellipsoid) dem.Capsule.make=staticmethod(woo.utils.capsule) dem.Wall.make=staticmethod(woo.utils.wall) dem.Wall.makeBox=staticmethod(woo.utils.wallBox) dem.InfCylinder.make=staticmethod(woo.utils.infCylinder) dem.Facet.make=staticmethod(woo.utils.facet) dem.Rod.make=staticmethod(woo.utils.rod) dem.Truss.make=staticmethod(woo.utils.truss) except ImportError: core.Scene.hasDem=lambda o: False try: from woo import fem fem.Membrane.make=staticmethod(woo.utils.membrane) fem.Tetra.make=staticmethod(woo.utils.tetra) fem.Tet4.make=staticmethod(woo.utils.tet4) except ImportError: pass try: from woo import sparc core.Scene.sparc=property(lambda s: sparc.SparcField.sceneGetField(s)) core.Scene.hasSparc=property(lambda s: sparc.SparcField.sceneHasField(s)) core.Node.sparc=property(sparc.SparcData._getDataOnNode,sparc.SparcData._setDataOnNode) except ImportError: core.Scene.hasSparc=lambda o: False try: import woo.cld core.Scene.clDem=property(lambda s: cld.CLDemField.sceneGetField(s)) core.Scene.hasClDem=property(lambda s: woo.clDem.CLDemField.sceneHasField(s)) core.Node.clDem=property(woo.cld.CLDemData._getDataOnNode,woo.cld.CLDemData._setDataOnNode) except ImportError: core.Scene.hasClDem=lambda o: False try: from woo import gl core.Node.gl=property(gl.GlData._getDataOnNode,gl.GlData._setDataOnNode) except ImportError: pass #try: # from woo import ancf # core.Node.ancf=property(ancf.AncfData._getDataOnNode,ancf.AncfData._setDataOnNode) #except ImportError: pass
gpl-2.0
rjw57/trafficdb
migrations/versions/7dc1a66bb8_initial_db.py
1
1114
"""Initial database migration Revision ID: 7dc1a66bb8 Revises: None Create Date: 2014-09-24 15:36:35.984051 """ # revision identifiers, used by Alembic. revision = '7dc1a66bb8' down_revision = None from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.create_table('links', sa.Column('id', sa.Integer(), nullable=False), sa.PrimaryKeyConstraint('id') ) op.create_table('observations', sa.Column('id', sa.Integer(), nullable=False), sa.Column('value', sa.Float(), nullable=True), sa.Column('type', sa.Enum('SPEED', 'FLOW', 'OCCUPANCY', name='observation_types'), nullable=True), sa.Column('observed_at', sa.DateTime(), nullable=True), sa.Column('link_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['link_id'], ['links.id'], ), sa.PrimaryKeyConstraint('id') ) ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_table('observations') op.drop_table('links') ### end Alembic commands ###
mit
yuvipanda/paws
images/nbserve/nginx.py
1
6626
#!/usr/bin/python3 import os CONFIG = r""" # Let nginx automatically determine the number of worker processes # to run. This defaults to number of cores on the host. worker_processes auto; # Do not daemonize - we'll either run this under a supervisor # ourselves, or jupyterhub will manage the process, restarting # it when it dies as necessary daemon off; # Set number of connections accepted per worker events { worker_connections 768; } # This needs to be in 'main' since otherwise nginx # will try to write to /var/log/nginx/error.log and failed # because it does not have permissions error_log stderr info; # We do not really need / care about a pidfile pid /dev/null; http { sendfile on; tcp_nopush on; tcp_nodelay on; keepalive_timeout 65; # Some complex notebooks take a long time to render proxy_read_timeout 180s; proxy_connect_timeout 180s; uwsgi_read_timeout 180s; types_hash_max_size 2048; # server_tokens off; # These are varilous temp file paths, many that we do not use. # They are by default set to /var/lib/nginx/*, which causes # problems when running as non-root, as we are here. So we # shall set them all to /tmp. FIXME: Find proper paths for # these somewhere (perhaps on current-dir?) client_body_temp_path /tmp; proxy_temp_path /tmp; fastcgi_temp_path /tmp; uwsgi_temp_path /tmp; scgi_temp_path /tmp; # access_log does not support 'stderr' directive directly access_log /dev/stderr; # nginx needs an async way to resolve hostnames to IPs, and # the default `gethostbyname` setup does not allow for this. # While ideally nginx should parse /etc/resolv.conf itself, # it does not do so at this time, and needs us to set the DNS # server explicitly. This can be specified by the user, but # defaults to a value we parse out of /etc/resolv.conf. # NOTE: This causes issues when resolving localhost and # other hostnames traditionally set in /etc/hosts, since # DNS servers respond erratically to queries for them. resolver %s ipv6=off; # This is used to support websocket proxying. We need to set # the 'Upgrade' HTTP header to either 'upgrade' (for websockets) # or 'close' (for everything else). # See https://www.nginx.com/resources/admin-guide/reverse-proxy/ # for more details. map $http_upgrade $connection_upgrade { default upgrade; '' close; } # Shared memory area for caching username to id mappings lua_shared_dict usernamemapping 16m; lua_ssl_trusted_certificate /etc/ssl/certs/ca-certificates.crt; lua_ssl_verify_depth 10; # Serve things with appropriate mimetypes include /usr/local/openresty/nginx/conf/mime.types; # This is the 'regular' server, that sees all public # traffic and proxies them to the appropriate backend server. server { listen 0.0.0.0:8000; location ~ \/\. { deny all; } # No port numbes in redirects port_in_redirect off; location ~ ^/user/([^/]+)/notebooks/(.*)$ { rewrite /user/([^/]+)/notebooks/(.*)$ /User:$1/$2 permanent; } # Only after the User: redirect! Otherwise our backend can't find the file. location ~ ^/\d+/.*\.ipynb$ { include /usr/local/openresty/nginx/conf/uwsgi_params; uwsgi_pass uwsgi://%s:8000; } location / { index index.html index.ipynb Index.ipynb; fancyindex on; alias /data/project/paws/userhomes/; } location /accelredir { internal; alias /data/project/paws/userhomes/; } location /User: { rewrite_by_lua_block { local m = ngx.re.match(ngx.var.uri, "/User:([^/]+)(.*)"); if m then local userid = ngx.shared.usernamemapping:get(m[1]); if userid == nil then local http = require "resty.http"; local httpc = http.new(); local apiurl = "https://meta.wikimedia.org/w/api.php?" .. "action=query&format=json&formatversion=2" .. "&prop=&list=users&meta=&usprop=centralids" .. "&ususers=" .. ngx.escape_uri(m[1]); local res, err = httpc:request_uri(apiurl); local cjson = require "cjson"; local resp_data = cjson.decode(res.body); ngx.log(ngx.ERR, res.body); if resp_data["query"]["users"][1]["missing"] then ngx.exit(404); end userid = resp_data["query"]["users"][1]["centralids"]["CentralAuth"] ngx.shared.usernamemapping:set(m[1], userid); end ngx.req.set_uri("/" .. userid .. m[2], true, true); end } proxy_http_version 1.1; # This is required for websockets to be proxied correctly proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection $connection_upgrade; # This is required for the target servers to know what # exactly the original protocol / URI / Host was. proxy_set_header X-Forwarded-Proto $scheme; proxy_set_header X-Original-URI $request_uri; proxy_set_header Host $host:$server_port; } } } """ def get_nameservers(ipv4only=True): """ Return a list of nameservers from parsing /etc/resolv.conf. If ipv4only is set, filter out ipv6 nameservers. This is because nginx freaks out in some formats of ipv6 that otherwise seem ok. """ nameservers = [] with open("/etc/resolv.conf") as f: for line in f: if line.strip().startswith("nameserver"): nameservers += line.strip().split(" ")[1:] if ipv4only: nameservers = [n for n in nameservers if ":" not in n] return nameservers with open("/tmp/nginx.conf", "w") as f: # Not using the nicer .format since it gets confused by the { } in the # nginx config itself :( params = ( " ".join(get_nameservers()), os.environ["RENDERER_PORT_8000_TCP_ADDR"], ) f.write(CONFIG % params) os.execl( "/usr/local/openresty/bin/openresty", "/usr/local/openresty/bin/openresty", "-c", "/tmp/nginx.conf", )
mit
kskalski/grpc
src/python/grpcio/grpc/framework/foundation/stream.py
48
1389
# Copyright 2015 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Interfaces related to streams of values or objects.""" import abc import six class Consumer(six.with_metaclass(abc.ABCMeta)): """Interface for consumers of finite streams of values or objects.""" @abc.abstractmethod def consume(self, value): """Accepts a value. Args: value: Any value accepted by this Consumer. """ raise NotImplementedError() @abc.abstractmethod def terminate(self): """Indicates to this Consumer that no more values will be supplied.""" raise NotImplementedError() @abc.abstractmethod def consume_and_terminate(self, value): """Supplies a value and signals that no more values will be supplied. Args: value: Any value accepted by this Consumer. """ raise NotImplementedError()
apache-2.0
barisdinc/QB50
9600rx.py
1
4886
#!/usr/bin/env python from gnuradio import gr,usrp,blks,packetradio from gnuradio.eng_option import eng_option import gnuradio.gr.gr_threading as _threading from math import pi from optparse import OptionParser from gnuradio.eng_option import eng_option import time from ax25 import * # # 64e6 (250) 256000 (3/5) 153600 (16) 9600 # class queue_watcher_thread(_threading.Thread): def __init__(self, rcvd_pktq, callback): _threading.Thread.__init__(self) self.setDaemon(1) self.rcvd_pktq = rcvd_pktq self.callback = callback self.keep_running = True self.start() def stop(self): self.keep_running = False def run(self): while self.keep_running: msg = self.rcvd_pktq.delete_head() if self.callback: self.callback(msg.to_string()) def main(): parser=OptionParser(option_class=eng_option) parser.add_option("-R", "--rx-subdev-spec", type="subdev", default=None,help="select USRP Rx side A or B (default=A)") parser.add_option("-f", "--freq", type="eng_float", default=436.6625e6,help="set frequency to FREQ", metavar="FREQ") parser.add_option("-g", "--gain", type="eng_float", default=None,help="set gain in dB (default is midpoint)") parser.add_option("-d", "--do-logging", action="store_true", default=False, help="enable logging on datafiles") parser.add_option("-s", "--use-datafile", action="store_true", default=False, help="use usrp.dat (256kbps) as input") (options, args) = parser.parse_args() if len(args) !=0: parser.print_help() sys.exit(1) bitrate=9600 usrp_decim=250 if_rate=64e6/usrp_decim #256e3 sf=(if_rate*3)/5 #153600 bit_oversampling=8 sw_decim=int(sf/bitrate/bit_oversampling) #2 bf=sf/sw_decim nbfmdev=3e3 nbfmk=if_rate/(2*pi*nbfmdev) fg = gr.flow_graph () if options.do_logging: logger1 = gr.file_sink(gr.sizeof_gr_complex, "usrpout.dat") logger2 = gr.file_sink(gr.sizeof_float, "demod.dat") logger3 = gr.file_sink(gr.sizeof_float, "clkrec.dat") logger4 = gr.file_sink(gr.sizeof_char, "slicer.dat") if options.use_datafile: src = gr.file_source(gr.sizeof_gr_complex,"usrp.dat") else: u=usrp.source_c() u.set_decim_rate(usrp_decim) if options.rx_subdev_spec is None: subdev_spec=usrp.pick_rx_subdevice(u) else: subdev_spec=options.rx_subdev_spec subdev=usrp.selected_subdev(u, subdev_spec) print "Using RX d'board %s" % (subdev.side_and_name(),) u.set_mux(usrp.determine_rx_mux_value(u, subdev_spec)) print "MUX:%x" % (usrp.determine_rx_mux_value(u, subdev_spec)) if options.gain is None: g=subdev.gain_range() gain=float(g[0]+g[1])/2 else: gain=options.gain subdev.set_gain(gain) print "Gain set to",str(gain) r=usrp.tune(u, 0, subdev, options.freq) if r: print "Frequency set to",options.freq else: print "Frequency set to",options.freq,"failed" src=u chan_taps = gr.firdes.low_pass(1,if_rate,13e3,4e3,gr.firdes.WIN_HANN) chan = gr.fir_filter_ccf(1,chan_taps) #256e3 fmdem = gr.quadrature_demod_cf(nbfmk) alpha = 0.0001 freqoff = gr.single_pole_iir_filter_ff(alpha) sub = gr.sub_ff() res_taps = blks.design_filter(3,5,0.4) res = blks.rational_resampler_fff(fg,3,5,res_taps) #153600 lp_taps = gr.firdes.low_pass(sw_decim,sf,6e3,4e3,gr.firdes.WIN_HANN) lp = gr.fir_filter_fff(sw_decim,lp_taps) #76800 (9600*8) _def_gain_mu = 0.05 _def_mu = 0.5 _def_freq_error = 0.00 _def_omega_relative_limit = 0.005 _omega = bit_oversampling*(1+_def_freq_error) _gain_omega = .25 * _def_gain_mu * _def_gain_mu clkrec = gr.clock_recovery_mm_ff(_omega, _gain_omega, _def_mu, _def_gain_mu, _def_omega_relative_limit) slicer = gr.binary_slicer_fb() pktq = gr.msg_queue() sink = packetradio.hdlc_framer(pktq,1) watcher=queue_watcher_thread(pktq,rx_callback) fg.connect(src,chan,fmdem) fg.connect(fmdem,(sub,0)) fg.connect(fmdem,freqoff,(sub,1)) fg.connect(sub,res,lp,clkrec,slicer,sink) if options.do_logging: fg.connect(src,logger1) fg.connect(sub,logger2) fg.connect(clkrec,logger3) fg.connect(slicer,logger4) fg.start() fg.wait() def rx_callback(payload): string=printpacket(payload) print "\n=====",time.asctime(time.localtime()),"\n",string,"=====\n" if __name__ == '__main__': try: main() except KeyboardInterrupt: pass
gpl-3.0
HaebinShin/tensorflow
tensorflow/contrib/learn/python/learn/estimators/estimator.py
1
33804
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base Estimator class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import inspect import os import tempfile import time import numpy as np import six from tensorflow.contrib import framework as contrib_framework from tensorflow.contrib import layers from tensorflow.contrib.learn.python.learn import graph_actions from tensorflow.contrib.learn.python.learn import monitors as monitors_lib from tensorflow.contrib.learn.python.learn.estimators import _sklearn as sklearn from tensorflow.contrib.learn.python.learn.estimators import run_config from tensorflow.contrib.learn.python.learn.estimators import tensor_signature from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError from tensorflow.contrib.learn.python.learn.learn_io import data_feeder from tensorflow.contrib.learn.python.learn.utils import checkpoints from tensorflow.python.framework import ops from tensorflow.python.framework import random_seed from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import logging_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import device_setter from tensorflow.python.training import saver class ModeKeys(object): """Standard names for model modes. The following standard keys are defined: * `TRAIN`: training mode. * `EVAL`: evaluation mode. * `INFER`: inference mode. """ TRAIN = 'train' EVAL = 'eval' INFER = 'infer' def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1): """Make inputs into input and feed functions.""" if input_fn is None: if x is None: raise ValueError('Either x or input_fn must be provided.') if contrib_framework.is_tensor(x) or (y is not None and contrib_framework.is_tensor(y)): raise ValueError('Inputs cannot be tensors. Please provide input_fn.') if feed_fn is not None: raise ValueError('Can not provide both feed_fn and x or y.') df = data_feeder.setup_train_data_feeder(x, y, n_classes=None, batch_size=batch_size, shuffle=shuffle, epochs=epochs) return df.input_builder, df.get_feed_dict_fn() if (x is not None) or (y is not None): raise ValueError('Can not provide both input_fn and x or y.') if batch_size is not None: raise ValueError('Can not provide both input_fn and batch_size.') return input_fn, feed_fn def infer_real_valued_columns_from_input_fn(input_fn): """Creates `FeatureColumn` objects for inputs defined by `input_fn`. This interprets all inputs as dense, fixed-length float values. This creates a local graph in which it calls `input_fn` to build the tensors, then discards it. Args: input_fn: Function returning a tuple of input and target `Tensor` objects. Returns: List of `FeatureColumn` objects. """ with ops.Graph().as_default(): features, _ = input_fn() return layers.infer_real_valued_columns(features) def infer_real_valued_columns_from_input(x): """Creates `FeatureColumn` objects for inputs defined by input `x`. This interprets all inputs as dense, fixed-length float values. Args: x: Real-valued matrix of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. Returns: List of `FeatureColumn` objects. """ input_fn, _ = _get_input_fn( x=x, y=None, input_fn=None, feed_fn=None, batch_size=None) return infer_real_valued_columns_from_input_fn(input_fn) def _get_arguments(func): """Returns list of arguments this function has.""" if hasattr(func, '__code__'): # Regular function. return inspect.getargspec(func).args elif hasattr(func, '__call__'): # Callable object. return _get_arguments(func.__call__) elif hasattr(func, 'func'): # Partial function. return _get_arguments(func.func) class BaseEstimator(sklearn.BaseEstimator): """Abstract BaseEstimator class to train and evaluate TensorFlow models. Concrete implementation of this class should provide the following functions: * _get_train_ops * _get_eval_ops * _get_predict_ops `Estimator` implemented below is a good example of how to use this class. """ __metaclass__ = abc.ABCMeta # TODO(wicke): Remove this once launcher takes over config functionality _Config = run_config.RunConfig # pylint: disable=invalid-name def __init__(self, model_dir=None, config=None): """Initializes a BaseEstimator instance. Args: model_dir: Directory to save model parameters, graph and etc. config: A RunConfig instance. """ # Model directory. self._model_dir = model_dir if self._model_dir is None: self._model_dir = tempfile.mkdtemp() logging.warning('Using temporary folder as model directory: %s', self._model_dir) # Create a run configuration if config is None: self._config = BaseEstimator._Config() else: self._config = config # Set device function depending if there are replicas or not. if self._config.num_ps_replicas > 0: ps_ops = ['Variable', 'AutoReloadVariable'] self._device_fn = device_setter.replica_device_setter( ps_tasks=self._config.num_ps_replicas, merge_devices=False, ps_ops=ps_ops) else: self._device_fn = None # Features and targets TensorSignature objects. # TODO(wicke): Rename these to something more descriptive self._features_info = None self._targets_info = None self._graph = None def fit(self, x=None, y=None, input_fn=None, steps=None, batch_size=None, monitors=None, max_steps=None): """Trains a model given training data `x` predictions and `y` targets. Args: x: Matrix of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. The training input samples for fitting the model. If set, `input_fn` must be `None`. y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be iterator that returns array of targets. The training target values (class labels in classification, real numbers in regression). If set, `input_fn` must be `None`. input_fn: Input function. If set, `x`, `y`, and `batch_size` must be `None`. steps: Number of steps for which to train model. If `None`, train forever. If set, `max_steps` must be `None`. batch_size: minibatch size to use on the input, defaults to first dimension of `x`. Must be `None` if `input_fn` is provided. monitors: List of `BaseMonitor` subclass instances. Used for callbacks inside the training loop. max_steps: Number of total steps for which to train model. If `None`, train forever. If set, `steps` must be `None`. Two calls to `fit(steps=100)` means 200 training iterations. On the other hand, two calls to `fit(max_steps=100)` means that the second call will not do any iteration since first call did all 100 steps. Returns: `self`, for chaining. Raises: ValueError: If `x` or `y` are not `None` while `input_fn` is not `None`. ValueError: If both `steps` and `max_steps` are not `None`. """ if (steps is not None) and (max_steps is not None): raise ValueError('Can not provide both steps and max_steps.') input_fn, feed_fn = _get_input_fn(x, y, input_fn, feed_fn=None, batch_size=batch_size, shuffle=True, epochs=None) loss = self._train_model(input_fn=input_fn, feed_fn=feed_fn, steps=steps, monitors=monitors, max_steps=max_steps) logging.info('Loss for final step: %s.', loss) return self def partial_fit( self, x=None, y=None, input_fn=None, steps=1, batch_size=None, monitors=None): """Incremental fit on a batch of samples. This method is expected to be called several times consecutively on different or the same chunks of the dataset. This either can implement iterative training or out-of-core/online training. This is especially useful when the whole dataset is too big to fit in memory at the same time. Or when model is taking long time to converge, and you want to split up training into subparts. Args: x: Matrix of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. The training input samples for fitting the model. If set, `input_fn` must be `None`. y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be iterator that returns array of targets. The training target values (class labels in classification, real numbers in regression). If set, `input_fn` must be `None`. input_fn: Input function. If set, `x`, `y`, and `batch_size` must be `None`. steps: Number of steps for which to train model. If `None`, train forever. batch_size: minibatch size to use on the input, defaults to first dimension of `x`. Must be `None` if `input_fn` is provided. monitors: List of `BaseMonitor` subclass instances. Used for callbacks inside the training loop. Returns: `self`, for chaining. Raises: ValueError: If at least one of `x` and `y` is provided, and `input_fn` is provided. """ logging.warning('The current implementation of partial_fit is not optimized' 'for use in a loop. Consider using fit() instead.') return self.fit(x=x, y=y, input_fn=input_fn, steps=steps, batch_size=batch_size, monitors=monitors) def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None, steps=None, metrics=None, name=None): """Evaluates given model with provided evaluation data. Evaluates on the given input data. If `input_fn` is provided, that input function should raise an end-of-input exception (`OutOfRangeError` or `StopIteration`) after one epoch of the training data has been provided. By default, the whole evaluation dataset is used. If `steps` is provided, only `steps` batches of size `batch_size` are processed. The return value is a dict containing the metrics specified in `metrics`, as well as an entry `global_step` which contains the value of the global step for which this evaluation was performed. Args: x: Matrix of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. The training input samples for fitting the model. If set, `input_fn` must be `None`. y: Vector or matrix [n_samples] or [n_samples, n_outputs]. Can be iterator that returns array of targets. The training target values (class labels in classification, real numbers in regression). If set, `input_fn` must be `None`. input_fn: Input function. If set, `x`, `y`, and `batch_size` must be `None`. feed_fn: Function creating a feed dict every time it is called. Called once per iteration. batch_size: minibatch size to use on the input, defaults to first dimension of `x`, if specified. Must be `None` if `input_fn` is provided. steps: Number of steps for which to evaluate model. If `None`, evaluate until running tensors generated by `metrics` raises an exception. metrics: Dict of metric ops to run. If `None`, the default metric functions are used; if `{}`, no metrics are used. If model has one output (i.e., returning single predction), keys are `str`, e.g. `'accuracy'` - just a name of the metric that will show up in the logs / summaries. Otherwise, keys are tuple of two `str`, e.g. `('accuracy', 'classes')`- name of the metric and name of `Tensor` in the predictions to run this metric on. Metric ops should support streaming, e.g., returning update_op and value tensors. See more details in ../../../../metrics/python/metrics/ops/streaming_metrics.py. name: Name of the evaluation if user needs to run multiple evaluations on different data sets, such as on training data vs test data. Returns: Returns `dict` with evaluation results. Raises: ValueError: If at least one of `x` or `y` is provided, and at least one of `input_fn` or `feed_fn` is provided. Or if `metrics` is not `None` or `dict`. """ input_fn, feed_fn = _get_input_fn(x, y, input_fn=input_fn, feed_fn=feed_fn, batch_size=batch_size, shuffle=False, epochs=1) if metrics is not None and not isinstance(metrics, dict): raise ValueError('Metrics argument should be None or dict. ' 'Got %s.' % metrics) eval_results, global_step = self._evaluate_model(input_fn=input_fn, feed_fn=feed_fn, steps=steps, metrics=metrics, name=name) if eval_results is not None: eval_results.update({'global_step': global_step}) return eval_results def predict(self, x=None, input_fn=None, batch_size=None, outputs=None): """Returns predictions for given features. Args: x: Matrix of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. The training input samples for fitting the model. If set, `input_fn` must be `None`. input_fn: Input function. If set, `x` and 'batch_size' must be `None`. batch_size: Override default batch size. If set, 'input_fn' must be 'None'. outputs: list of `str`, name of the output to predict. If `None`, returns all. Returns: Numpy array of predicted classes or regression values. Raises: ValueError: If x and input_fn are both provided or both `None`. """ input_fn, feed_fn = _get_input_fn(x, None, input_fn=input_fn, feed_fn=None, batch_size=batch_size, shuffle=False, epochs=1) return self._infer_model(input_fn=input_fn, feed_fn=feed_fn, outputs=outputs) def get_variable_value(self, name): """Returns value of the variable given by name. Args: name: string, name of the tensor. Returns: Numpy array - value of the tensor. """ if name.endswith(':0'): name = name[:-2] return checkpoints.load_variable(self.model_dir, name) def get_variable_names(self): """Returns list of all variable names in this model. Returns: List of names. """ return [name for name, _ in checkpoints.list_variables(self.model_dir)] @property def model_dir(self): return self._model_dir @abc.abstractproperty def _get_train_ops(self, features, targets): """Method that builds model graph and returns trainer ops. Expected to be overriden by sub-classes that require custom support. Args: features: `Tensor` or `dict` of `Tensor` objects. targets: `Tensor` or `dict` of `Tensor` objects. Returns: Tuple of train `Operation` and loss `Tensor`. """ pass @abc.abstractproperty def _get_predict_ops(self, features): """Method that builds model graph and returns prediction ops. Args: features: `Tensor` or `dict` of `Tensor` objects. Returns: predictions: `Tensor` or `dict` of `Tensor` objects. """ pass def _get_eval_ops(self, features, targets, metrics): """Method that builds model graph and returns evaluation ops. Expected to be overriden by sub-classes that require custom support. Args: features: `Tensor` or `dict` of `Tensor` objects. targets: `Tensor` or `dict` of `Tensor` objects. metrics: Dict of metric ops to run. If None, the default metric functions are used; if {}, no metrics are used. If model has one output (i.e., returning single predction), keys are `str`, e.g. `'accuracy'` - just a name of the metric that will show up in the logs / summaries. Otherwise, keys are tuple of two `str`, e.g. `('accuracy', 'classes')` - name of the metric and name of `Tensor` in the predictions to run this metric on. Metric ops should support streaming, e.g., returning update_op and value tensors. See more details in ../../../../metrics/python/metrics/ops/streaming_metrics.py. Returns: metrics: `dict` of `Tensor` objects. """ raise NotImplementedError('_get_eval_ops not implemented in BaseEstimator') def _get_feature_ops_from_example(self, examples_batch): """Returns feature parser for given example batch using features info. This function requires `fit()` has been called. Args: examples_batch: batch of tf.Example Returns: features: `Tensor` or `dict` of `Tensor` objects. Raises: ValueError: If `_features_info` attribute is not available (usually because `fit()` has not been called). """ if self._features_info is None: raise ValueError('Features information missing, was fit() ever called?') return tensor_signature.create_example_parser_from_signatures( self._features_info, examples_batch) def _check_inputs(self, features, targets): if self._features_info is not None: logging.warning('Given features: %s, required signatures: %s.' % (str(features), str(self._features_info))) if not tensor_signature.tensors_compatible(features, self._features_info): raise ValueError('Features are incompatible with given information. ' 'Given features: %s, required signatures: %s.' % (str(features), str(self._features_info))) else: self._features_info = tensor_signature.create_signatures(features) logging.warning('Setting feature info to %s', str(self._features_info)) if targets is not None: if self._targets_info is not None: logging.warning('Given targets: %s, required signatures: %s.' % (str(targets), str(self._targets_info))) if not tensor_signature.tensors_compatible(targets, self._targets_info): raise ValueError('Targets are incompatible with given information. ' 'Given targets: %s, required signatures: %s.' % (str(targets), str(self._targets_info))) else: self._targets_info = tensor_signature.create_signatures(targets) logging.warning('Setting targets info to %s', str(self._targets_info)) def _train_model(self, input_fn, steps, feed_fn=None, init_op=None, init_feed_fn=None, init_fn=None, device_fn=None, monitors=None, log_every_steps=100, fail_on_nan_loss=True, max_steps=None): # TODO(wicke): Remove this once Model and associated code are gone. if hasattr(self._config, 'execution_mode'): if self._config.execution_mode not in ('all', 'train'): return # Stagger startup of worker sessions based on task id. sleep_secs = min( self._config.training_worker_max_startup_secs, self._config.task * self._config.training_worker_session_startup_stagger_secs) if sleep_secs: logging.info('Waiting %d secs before starting task %d.', sleep_secs, self._config.task) time.sleep(sleep_secs) # Device allocation device_fn = device_fn or self._device_fn self._graph = ops.Graph() with self._graph.as_default() as g, g.device(device_fn): random_seed.set_random_seed(self._config.tf_random_seed) global_step = contrib_framework.create_global_step(g) features, targets = input_fn() self._check_inputs(features, targets) train_op, loss_op = self._get_train_ops(features, targets) # Add default monitors. if monitors is None: monitors = [] is_chief = self._config.task == 0 if is_chief: monitors += monitors_lib.get_default_monitors( loss_op=loss_op, summary_op=logging_ops.get_summary_op(), save_summary_steps=self._config.save_summary_steps, summary_writer=graph_actions.get_summary_writer(self._model_dir)) else: monitors = [] # Setup monitors. for monitor in monitors: monitor.set_estimator(self) return graph_actions.train( graph=g, output_dir=self._model_dir, train_op=train_op, loss_op=loss_op, global_step_tensor=global_step, init_op=init_op, init_feed_dict=init_feed_fn() if init_feed_fn is not None else None, init_fn=init_fn, log_every_steps=log_every_steps, supervisor_is_chief=is_chief, supervisor_master=self._config.master, supervisor_save_model_secs=self._config.save_checkpoints_secs, keep_checkpoint_max=self._config.keep_checkpoint_max, feed_fn=feed_fn, steps=steps, fail_on_nan_loss=fail_on_nan_loss, monitors=monitors, max_steps=max_steps) def _extract_metric_update_ops(self, eval_dict): """Separate update operations from metric value operations.""" update_ops = [] value_ops = {} for name, metric_ops in eval_dict.items(): if isinstance(metric_ops, (list, tuple)): if len(metric_ops) == 2: value_ops[name] = metric_ops[0] update_ops.append(metric_ops[1]) else: logging.warning( 'Ignoring metric {}. It returned a list|tuple with len {}, ' 'expected 2'.format(name, len(metric_ops))) value_ops[name] = metric_ops else: value_ops[name] = metric_ops if update_ops: update_ops = control_flow_ops.group(*update_ops) else: update_ops = None return update_ops, value_ops def _evaluate_model(self, input_fn, steps, feed_fn=None, metrics=None, name=''): # TODO(wicke): Remove this once Model and associated code are gone. if (hasattr(self._config, 'execution_mode') and self._config.execution_mode not in ('all', 'evaluate', 'eval_evalset')): return None, None # Check that model has been trained. checkpoint_path = self._model_dir latest_path = saver.latest_checkpoint(checkpoint_path) if not latest_path: raise NotFittedError("Couldn't find trained model at %s." % checkpoint_path) # Setup output directory. eval_dir = os.path.join(self._model_dir, 'eval' if not name else 'eval_' + name) with ops.Graph().as_default() as g: random_seed.set_random_seed(self._config.tf_random_seed) global_step = contrib_framework.create_global_step(g) features, targets = input_fn() self._check_inputs(features, targets) eval_dict = self._get_eval_ops(features, targets, metrics) update_op, eval_dict = self._extract_metric_update_ops(eval_dict) eval_results, current_global_step = graph_actions.evaluate( graph=g, output_dir=eval_dir, checkpoint_path=checkpoint_path, eval_dict=eval_dict, update_op=update_op, global_step_tensor=global_step, supervisor_master=self._config.master, feed_fn=feed_fn, max_steps=steps) return eval_results, current_global_step def _get_features_from_input_fn(self, input_fn): result = input_fn() if isinstance(result, (list, tuple)): return result[0] return result def _infer_model(self, input_fn, feed_fn=None, outputs=None): # Check that model has been trained. checkpoint_path = saver.latest_checkpoint(self._model_dir) if not checkpoint_path: raise NotFittedError("Couldn't find trained model at %s." % self._model_dir) with ops.Graph().as_default() as g: random_seed.set_random_seed(self._config.tf_random_seed) contrib_framework.create_global_step(g) features = self._get_features_from_input_fn(input_fn) predictions = self._get_predict_ops(features) # If predictions is single output - wrap it into dict, and remember to # return not a dict. return_dict = True if not isinstance(predictions, dict): predictions, return_dict = {'predictions': predictions}, False # Filter what to run predictions on, if outputs provided. if outputs: existing_keys = predictions.keys() predictions = { key: value for key, value in predictions.items() if key in outputs } if not predictions: raise ValueError('Expected to run at least one output from %s, ' 'provided %s.' % (existing_keys, outputs)) if feed_fn is None: preds = graph_actions.infer(checkpoint_path, predictions) else: preds = {} def _feed_fn(): while True: yield feed_fn() outputs = graph_actions.run_feeds( output_dict=predictions, feed_dicts=_feed_fn(), restore_checkpoint_path=checkpoint_path) for key in predictions: preds[key] = np.concatenate( [output[key] for output in outputs], axis=0) if return_dict: return preds return preds['predictions'] class Estimator(BaseEstimator): """Estimator class is the basic TensorFlow model trainer/evaluator. """ def __init__(self, model_fn=None, model_dir=None, config=None, params=None): """Constructs an Estimator instance. Args: model_fn: Model function, takes features and targets tensors or dicts of tensors and returns predictions and loss tensors. Supports next three signatures for the function: * `(features, targets) -> (predictions, loss, train_op)` * `(features, targets, mode) -> (predictions, loss, train_op)` * `(features, targets, mode, params) -> (predictions, loss, train_op)` Where * `features` are single `Tensor` or `dict` of `Tensor`s (depending on data passed to `fit`), * `targets` are `Tensor` or `dict` of `Tensor`s (for multi-head model). * `mode` represents if this training, evaluation or prediction. See `ModeKeys` for example keys. * `params` is a `dict` of hyperparameters. Will receive what is passed to Estimator in `params` parameter. This allows to configure Estimators from hyper parameter tunning. model_dir: Directory to save model parameters, graph and etc. config: Configuration object. params: `dict` of hyper parameters that will be passed into `model_fn`. Keys are names of parameters, values are basic python types. Raises: ValueError: parameters of `model_fn` don't match `params`. """ super(Estimator, self).__init__(model_dir=model_dir, config=config) if model_fn is not None: # Check number of arguments of the given function matches requirements. model_fn_args = _get_arguments(model_fn) if params is not None and 'params' not in model_fn_args: raise ValueError('Estimator\'s model_fn (%s) has less than 4 ' 'arguments, but not None params (%s) are passed.' % (model_fn, params)) if params is None and 'params' in model_fn_args: logging.warning('Estimator\'s model_fn (%s) has includes params ' 'argument, but params are not passed to Estimator.' % model_fn) self._model_fn = model_fn self.params = params def _call_model_fn(self, features, targets, mode): """Calls model function with support of 2, 3 or 4 arguments.""" model_fn_args = _get_arguments(self._model_fn) if 'mode' in model_fn_args: if 'params' in model_fn_args: return self._model_fn(features, targets, mode=mode, params=self.params) else: return self._model_fn(features, targets, mode=mode) return self._model_fn(features, targets) def _get_train_ops(self, features, targets): """Method that builds model graph and returns trainer ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. targets: `Tensor` or `dict` of `Tensor` objects. Returns: Tuple of train `Operation` and loss `Tensor`. """ _, loss, train_op = self._call_model_fn(features, targets, ModeKeys.TRAIN) return train_op, loss def _get_eval_ops(self, features, targets, metrics): """Method that builds model graph and returns evaluation ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. targets: `Tensor` or `dict` of `Tensor` objects. metrics: Dict of metric ops to run. If None, the default metric functions are used; if {}, no metrics are used. If model has one output (i.e., returning single predction), keys are `str`, e.g. `'accuracy'` - just a name of the metric that will show up in the logs / summaries. Otherwise, keys are tuple of two `str`, e.g. `('accuracy', 'classes')` - name of the metric and name of `Tensor` in the predictions to run this metric on. Metric ops should support streaming, e.g., returning update_op and value tensors. See more details in ../../../../metrics/python/metrics/ops/streaming_metrics.py. Returns: metrics: `dict` of `Tensor` objects. Raises: ValueError: if `metrics` don't match `targets`. """ predictions, loss, _ = self._call_model_fn(features, targets, ModeKeys.EVAL) result = {'loss': loss} metrics = metrics or {} if isinstance(targets, dict) and len(targets) == 1: # Unpack single target into just tensor. targets = targets[list(targets.keys())[0]] for name, metric in six.iteritems(metrics): if isinstance(name, tuple): # Multi-head metrics. if not isinstance(predictions, dict): raise ValueError( 'Metrics passed provide (name, prediction), ' 'but predictions are not dict. ' 'Metrics: %s, Predictions: %s.' % (metrics, predictions)) # Here are two options: targets are single Tensor or a dict. if isinstance(targets, dict) and name[1] in targets: # If targets are dict and the prediction name is in it, apply metric. result[name[0]] = metric(predictions[name[1]], targets[name[1]]) else: # Otherwise pass the targets to the metric. result[name[0]] = metric(predictions[name[1]], targets) else: # Single head metrics. if isinstance(predictions, dict): raise ValueError( 'Metrics passed provide only name, no prediction, ' 'but predictions are dict. ' 'Metrics: %s, Targets: %s.' % (metrics, targets)) result[name] = metric(predictions, targets) return result def _get_predict_ops(self, features): """Method that builds model graph and returns prediction ops. Expected to be overriden by sub-classes that require custom support. This implementation uses `model_fn` passed as parameter to constructor to build model. Args: features: `Tensor` or `dict` of `Tensor` objects. Returns: predictions: `Tensor` or `dict` of `Tensor` objects. """ targets = tensor_signature.create_placeholders_from_signatures( self._targets_info) predictions, _, _ = self._call_model_fn(features, targets, ModeKeys.INFER) return predictions
apache-2.0
MichaelDoyle/Diamond
src/collectors/onewire/test/testonewire.py
68
1389
#!/usr/bin/python # coding=utf-8 ############################################################################### from test import CollectorTestCase from test import get_collector_config from test import unittest from mock import patch from diamond.collector import Collector from onewire import OneWireCollector ############################################################################### class TestOneWireCollector(CollectorTestCase): def setUp(self): config = get_collector_config('OneWireCollector', { 'owfs': self.getFixturePath('.'), 'scan': {'temperature': 't'}, 'id:28.2F702A010000': {'presure': 'p11'}}) self.collector = OneWireCollector(config, None) def test_import(self): self.assertTrue(OneWireCollector) @patch.object(Collector, 'publish') def test(self, publish_mock): self.collector.collect() metrics = { '28_A76569020000.t': 22.4375, '28_2F702A010000.p11': 999 } self.setDocExample(collector=self.collector.__class__.__name__, metrics=metrics, defaultpath=self.collector.config['path']) self.assertPublishedMany(publish_mock, metrics) ############################################################################### if __name__ == "__main__": unittest.main()
mit
chosen1/namebench
nb_third_party/dns/rdtypes/IN/A.py
248
2055
# Copyright (C) 2003-2007, 2009, 2010 Nominum, Inc. # # Permission to use, copy, modify, and distribute this software and its # documentation for any purpose with or without fee is hereby granted, # provided that the above copyright notice and this permission notice # appear in all copies. # # THE SOFTWARE IS PROVIDED "AS IS" AND NOMINUM DISCLAIMS ALL WARRANTIES # WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL NOMINUM BE LIABLE FOR # ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES # WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN # ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT # OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. import dns.exception import dns.ipv4 import dns.rdata import dns.tokenizer class A(dns.rdata.Rdata): """A record. @ivar address: an IPv4 address @type address: string (in the standard "dotted quad" format)""" __slots__ = ['address'] def __init__(self, rdclass, rdtype, address): super(A, self).__init__(rdclass, rdtype) # check that it's OK junk = dns.ipv4.inet_aton(address) self.address = address def to_text(self, origin=None, relativize=True, **kw): return self.address def from_text(cls, rdclass, rdtype, tok, origin = None, relativize = True): address = tok.get_identifier() tok.get_eol() return cls(rdclass, rdtype, address) from_text = classmethod(from_text) def to_wire(self, file, compress = None, origin = None): file.write(dns.ipv4.inet_aton(self.address)) def from_wire(cls, rdclass, rdtype, wire, current, rdlen, origin = None): address = dns.ipv4.inet_ntoa(wire[current : current + rdlen]) return cls(rdclass, rdtype, address) from_wire = classmethod(from_wire) def _cmp(self, other): sa = dns.ipv4.inet_aton(self.address) oa = dns.ipv4.inet_aton(other.address) return cmp(sa, oa)
apache-2.0
huanqi/leetcode-python
unique_paths_ii/solution2.py
3
1435
""" Follow up for "Unique Paths": Now consider if some obstacles are added to the grids. How many unique paths would there be? An obstacle and empty space is marked as 1 and 0 respectively in the grid. For example, There is one obstacle in the middle of a 3x3 grid as illustrated below. [ [0,0,0], [0,1,0], [0,0,0] ] The total number of unique paths is 2. Note: m and n will be at most 100. """ class Solution: # @param obstacleGrid, a list of lists of integers # @return an integer def uniquePathsWithObstacles(self, obstacleGrid): n = len(obstacleGrid) m = len(obstacleGrid[0]) t = [[-1 for i in range(m)] for j in range(n)] return self.unique_paths(obstacleGrid, m - 1, n - 1, t) def unique_paths(self, grid, x, y, t): if x == 0 and y == 0: t[y][x] = 1 if grid[y][x] == 0 else 0 return t[y][x] elif grid[y][x] == 1: t[y][x] = 0 return t[y][x] elif t[y][x] != -1: return t[y][x] elif x > 0 and y == 0: t[y][x] = self.unique_paths(grid, x - 1, y, t) return t[y][x] elif y > 0 and x == 0: t[y][x] = self.unique_paths(grid, x, y - 1, t) return t[y][x] else: a = self.unique_paths(grid, x - 1, y, t) b = self.unique_paths(grid, x, y - 1, t) t[y][x] = a + b return t[y][x]
bsd-2-clause
pedrosino/tnoodle
git-tools/requests/auth.py
294
6173
# -*- coding: utf-8 -*- """ requests.auth ~~~~~~~~~~~~~ This module contains the authentication handlers for Requests. """ import os import re import time import hashlib import logging from base64 import b64encode from .compat import urlparse, str from .cookies import extract_cookies_to_jar from .utils import parse_dict_header log = logging.getLogger(__name__) CONTENT_TYPE_FORM_URLENCODED = 'application/x-www-form-urlencoded' CONTENT_TYPE_MULTI_PART = 'multipart/form-data' def _basic_auth_str(username, password): """Returns a Basic Auth string.""" return 'Basic ' + b64encode(('%s:%s' % (username, password)).encode('latin1')).strip().decode('latin1') class AuthBase(object): """Base class that all auth implementations derive from""" def __call__(self, r): raise NotImplementedError('Auth hooks must be callable.') class HTTPBasicAuth(AuthBase): """Attaches HTTP Basic Authentication to the given Request object.""" def __init__(self, username, password): self.username = username self.password = password def __call__(self, r): r.headers['Authorization'] = _basic_auth_str(self.username, self.password) return r class HTTPProxyAuth(HTTPBasicAuth): """Attaches HTTP Proxy Authentication to a given Request object.""" def __call__(self, r): r.headers['Proxy-Authorization'] = _basic_auth_str(self.username, self.password) return r class HTTPDigestAuth(AuthBase): """Attaches HTTP Digest Authentication to the given Request object.""" def __init__(self, username, password): self.username = username self.password = password self.last_nonce = '' self.nonce_count = 0 self.chal = {} self.pos = None def build_digest_header(self, method, url): realm = self.chal['realm'] nonce = self.chal['nonce'] qop = self.chal.get('qop') algorithm = self.chal.get('algorithm') opaque = self.chal.get('opaque') if algorithm is None: _algorithm = 'MD5' else: _algorithm = algorithm.upper() # lambdas assume digest modules are imported at the top level if _algorithm == 'MD5' or _algorithm == 'MD5-SESS': def md5_utf8(x): if isinstance(x, str): x = x.encode('utf-8') return hashlib.md5(x).hexdigest() hash_utf8 = md5_utf8 elif _algorithm == 'SHA': def sha_utf8(x): if isinstance(x, str): x = x.encode('utf-8') return hashlib.sha1(x).hexdigest() hash_utf8 = sha_utf8 KD = lambda s, d: hash_utf8("%s:%s" % (s, d)) if hash_utf8 is None: return None # XXX not implemented yet entdig = None p_parsed = urlparse(url) path = p_parsed.path if p_parsed.query: path += '?' + p_parsed.query A1 = '%s:%s:%s' % (self.username, realm, self.password) A2 = '%s:%s' % (method, path) HA1 = hash_utf8(A1) HA2 = hash_utf8(A2) if nonce == self.last_nonce: self.nonce_count += 1 else: self.nonce_count = 1 ncvalue = '%08x' % self.nonce_count s = str(self.nonce_count).encode('utf-8') s += nonce.encode('utf-8') s += time.ctime().encode('utf-8') s += os.urandom(8) cnonce = (hashlib.sha1(s).hexdigest()[:16]) noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, HA2) if _algorithm == 'MD5-SESS': HA1 = hash_utf8('%s:%s:%s' % (HA1, nonce, cnonce)) if qop is None: respdig = KD(HA1, "%s:%s" % (nonce, HA2)) elif qop == 'auth' or 'auth' in qop.split(','): respdig = KD(HA1, noncebit) else: # XXX handle auth-int. return None self.last_nonce = nonce # XXX should the partial digests be encoded too? base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \ 'response="%s"' % (self.username, realm, nonce, path, respdig) if opaque: base += ', opaque="%s"' % opaque if algorithm: base += ', algorithm="%s"' % algorithm if entdig: base += ', digest="%s"' % entdig if qop: base += ', qop="auth", nc=%s, cnonce="%s"' % (ncvalue, cnonce) return 'Digest %s' % (base) def handle_401(self, r, **kwargs): """Takes the given response and tries digest-auth, if needed.""" if self.pos is not None: # Rewind the file position indicator of the body to where # it was to resend the request. r.request.body.seek(self.pos) num_401_calls = getattr(self, 'num_401_calls', 1) s_auth = r.headers.get('www-authenticate', '') if 'digest' in s_auth.lower() and num_401_calls < 2: setattr(self, 'num_401_calls', num_401_calls + 1) pat = re.compile(r'digest ', flags=re.IGNORECASE) self.chal = parse_dict_header(pat.sub('', s_auth, count=1)) # Consume content and release the original connection # to allow our new request to reuse the same one. r.content r.raw.release_conn() prep = r.request.copy() extract_cookies_to_jar(prep._cookies, r.request, r.raw) prep.prepare_cookies(prep._cookies) prep.headers['Authorization'] = self.build_digest_header( prep.method, prep.url) _r = r.connection.send(prep, **kwargs) _r.history.append(r) _r.request = prep return _r setattr(self, 'num_401_calls', 1) return r def __call__(self, r): # If we have a saved nonce, skip the 401 if self.last_nonce: r.headers['Authorization'] = self.build_digest_header(r.method, r.url) try: self.pos = r.body.tell() except AttributeError: pass r.register_hook('response', self.handle_401) return r
gpl-3.0
ChrisCinelli/pgessays
env/lib/python2.7/site-packages/genshi/template/tests/interpolation.py
30
7537
# -*- coding: utf-8 -*- # # Copyright (C) 2007-2008 Edgewall Software # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://genshi.edgewall.org/wiki/License. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://genshi.edgewall.org/log/. import doctest import sys import unittest from genshi.core import TEXT from genshi.template.base import TemplateSyntaxError, EXPR from genshi.template.interpolation import interpolate class InterpolateTestCase(unittest.TestCase): def test_interpolate_string(self): parts = list(interpolate('bla')) self.assertEqual(1, len(parts)) self.assertEqual(TEXT, parts[0][0]) self.assertEqual('bla', parts[0][1]) def test_interpolate_simple(self): parts = list(interpolate('${bla}')) self.assertEqual(1, len(parts)) self.assertEqual(EXPR, parts[0][0]) self.assertEqual('bla', parts[0][1].source) def test_interpolate_escaped(self): parts = list(interpolate('$${bla}')) self.assertEqual(1, len(parts)) self.assertEqual(TEXT, parts[0][0]) self.assertEqual('${bla}', parts[0][1]) def test_interpolate_dobuleescaped(self): parts = list(interpolate('$$${bla}')) self.assertEqual(2, len(parts)) self.assertEqual(TEXT, parts[0][0]) self.assertEqual('$', parts[0][1]) self.assertEqual(EXPR, parts[1][0]) self.assertEqual('bla', parts[1][1].source) def test_interpolate_short(self): parts = list(interpolate('$bla')) self.assertEqual(1, len(parts)) self.assertEqual(EXPR, parts[0][0]) self.assertEqual('bla', parts[0][1].source) def test_interpolate_short_escaped(self): parts = list(interpolate('$$bla')) self.assertEqual(1, len(parts)) self.assertEqual(TEXT, parts[0][0]) self.assertEqual('$bla', parts[0][1]) def test_interpolate_short_escaped_2(self): parts = list(interpolate('my $$bla = 2')) self.assertEqual(1, len(parts)) self.assertEqual(TEXT, parts[0][0]) self.assertEqual('my $bla = 2', parts[0][1]) def test_interpolate_short_doubleescaped(self): parts = list(interpolate('$$$bla')) self.assertEqual(2, len(parts)) self.assertEqual(TEXT, parts[0][0]) self.assertEqual('$', parts[0][1]) self.assertEqual(EXPR, parts[1][0]) self.assertEqual('bla', parts[1][1].source) def test_interpolate_short_starting_with_underscore(self): parts = list(interpolate('$_bla')) self.assertEqual(1, len(parts)) self.assertEqual(EXPR, parts[0][0]) self.assertEqual('_bla', parts[0][1].source) def test_interpolate_short_containing_underscore(self): parts = list(interpolate('$foo_bar')) self.assertEqual(1, len(parts)) self.assertEqual(EXPR, parts[0][0]) self.assertEqual('foo_bar', parts[0][1].source) def test_interpolate_short_starting_with_dot(self): parts = list(interpolate('$.bla')) self.assertEqual(1, len(parts)) self.assertEqual(TEXT, parts[0][0]) self.assertEqual('$.bla', parts[0][1]) def test_interpolate_short_containing_dot(self): parts = list(interpolate('$foo.bar')) self.assertEqual(1, len(parts)) self.assertEqual(EXPR, parts[0][0]) self.assertEqual('foo.bar', parts[0][1].source) def test_interpolate_short_starting_with_digit(self): parts = list(interpolate('$0bla')) self.assertEqual(1, len(parts)) self.assertEqual(TEXT, parts[0][0]) self.assertEqual('$0bla', parts[0][1]) def test_interpolate_short_containing_digit(self): parts = list(interpolate('$foo0')) self.assertEqual(1, len(parts)) self.assertEqual(EXPR, parts[0][0]) self.assertEqual('foo0', parts[0][1].source) def test_interpolate_short_starting_with_digit(self): parts = list(interpolate('$0bla')) self.assertEqual(1, len(parts)) self.assertEqual(TEXT, parts[0][0]) self.assertEqual('$0bla', parts[0][1]) def test_interpolate_short_containing_digit(self): parts = list(interpolate('$foo0')) self.assertEqual(1, len(parts)) self.assertEqual(EXPR, parts[0][0]) self.assertEqual('foo0', parts[0][1].source) def test_interpolate_full_nested_brackets(self): parts = list(interpolate('${{1:2}}')) self.assertEqual(1, len(parts)) self.assertEqual(EXPR, parts[0][0]) self.assertEqual('{1:2}', parts[0][1].source) def test_interpolate_full_mismatched_brackets(self): try: list(interpolate('${{1:2}')) except TemplateSyntaxError, e: pass else: self.fail('Expected TemplateSyntaxError') def test_interpolate_quoted_brackets_1(self): parts = list(interpolate('${"}"}')) self.assertEqual(1, len(parts)) self.assertEqual(EXPR, parts[0][0]) self.assertEqual('"}"', parts[0][1].source) def test_interpolate_quoted_brackets_2(self): parts = list(interpolate("${'}'}")) self.assertEqual(1, len(parts)) self.assertEqual(EXPR, parts[0][0]) self.assertEqual("'}'", parts[0][1].source) def test_interpolate_quoted_brackets_3(self): parts = list(interpolate("${'''}'''}")) self.assertEqual(1, len(parts)) self.assertEqual(EXPR, parts[0][0]) self.assertEqual("'''}'''", parts[0][1].source) def test_interpolate_quoted_brackets_4(self): parts = list(interpolate("${'''}\"\"\"'''}")) self.assertEqual(1, len(parts)) self.assertEqual(EXPR, parts[0][0]) self.assertEqual("'''}\"\"\"'''", parts[0][1].source) def test_interpolate_quoted_brackets_5(self): parts = list(interpolate(r"${'\'}'}")) self.assertEqual(1, len(parts)) self.assertEqual(EXPR, parts[0][0]) self.assertEqual(r"'\'}'", parts[0][1].source) def test_interpolate_mixed1(self): parts = list(interpolate('$foo bar $baz')) self.assertEqual(3, len(parts)) self.assertEqual(EXPR, parts[0][0]) self.assertEqual('foo', parts[0][1].source) self.assertEqual(TEXT, parts[1][0]) self.assertEqual(' bar ', parts[1][1]) self.assertEqual(EXPR, parts[2][0]) self.assertEqual('baz', parts[2][1].source) def test_interpolate_mixed2(self): parts = list(interpolate('foo $bar baz')) self.assertEqual(3, len(parts)) self.assertEqual(TEXT, parts[0][0]) self.assertEqual('foo ', parts[0][1]) self.assertEqual(EXPR, parts[1][0]) self.assertEqual('bar', parts[1][1].source) self.assertEqual(TEXT, parts[2][0]) self.assertEqual(' baz', parts[2][1]) def test_interpolate_triplequoted(self): parts = list(interpolate('${"""foo\nbar"""}')) self.assertEqual(1, len(parts)) self.assertEqual('"""foo\nbar"""', parts[0][1].source) def suite(): suite = unittest.TestSuite() suite.addTest(doctest.DocTestSuite(interpolate.__module__)) suite.addTest(unittest.makeSuite(InterpolateTestCase, 'test')) return suite if __name__ == '__main__': unittest.main(defaultTest='suite')
bsd-2-clause
FRidh/scipy
scipy/io/tests/test_fortran.py
95
2591
''' Tests for fortran sequential files ''' import tempfile import shutil from os import path from glob import iglob import re from numpy.testing import assert_equal, assert_allclose, run_module_suite import numpy as np from scipy.io import FortranFile DATA_PATH = path.join(path.dirname(__file__), 'data') def test_fortranfiles_read(): for filename in iglob(path.join(DATA_PATH, "fortran-*-*x*x*.dat")): m = re.search('fortran-([^-]+)-(\d+)x(\d+)x(\d+).dat', filename, re.I) if not m: raise RuntimeError("Couldn't match %s filename to regex" % filename) dims = (int(m.group(2)), int(m.group(3)), int(m.group(4))) f = FortranFile(filename, 'r', '<u4') data = f.read_record(dtype=m.group(1).replace('s', '<')).reshape(dims) f.close() counter = 0 for k in range(dims[2]): for j in range(dims[1]): for i in range(dims[0]): assert_equal(counter, data[i,j,k]) counter += 1 def test_fortranfiles_mixed_record(): filename = path.join(DATA_PATH, "fortran-mixed.dat") with FortranFile(filename, 'r', '<u4') as f: record = f.read_record('<i4,<f4,<i8,(2)<f8') assert_equal(record['f0'][0], 1) assert_allclose(record['f1'][0], 2.3) assert_equal(record['f2'][0], 4) assert_allclose(record['f3'][0], [5.6, 7.8]) def test_fortranfiles_write(): for filename in iglob(path.join(DATA_PATH, "fortran-*-*x*x*.dat")): m = re.search('fortran-([^-]+)-(\d+)x(\d+)x(\d+).dat', filename, re.I) if not m: raise RuntimeError("Couldn't match %s filename to regex" % filename) dims = (int(m.group(2)), int(m.group(3)), int(m.group(4))) counter = 0 data = np.zeros(dims, dtype=m.group(1).replace('s', '<')) for k in range(dims[2]): for j in range(dims[1]): for i in range(dims[0]): data[i,j,k] = counter counter += 1 tmpdir = tempfile.mkdtemp() try: testFile = path.join(tmpdir,path.basename(filename)) f = FortranFile(testFile, 'w','<u4') f.write_record(data) f.close() originalfile = open(filename, 'rb') newfile = open(testFile, 'rb') assert_equal(originalfile.read(), newfile.read(), err_msg=filename) originalfile.close() newfile.close() finally: shutil.rmtree(tmpdir) if __name__ == "__main__": run_module_suite()
bsd-3-clause
zobe123/Plex-CS
lib/mako/parsetree.py
60
20434
# mako/parsetree.py # Copyright (C) 2006-2015 the Mako authors and contributors <see AUTHORS file> # # This module is part of Mako and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """defines the parse tree components for Mako templates.""" from mako import exceptions, ast, util, filters, compat import re class Node(object): """base class for a Node in the parse tree.""" def __init__(self, source, lineno, pos, filename): self.source = source self.lineno = lineno self.pos = pos self.filename = filename @property def exception_kwargs(self): return {'source': self.source, 'lineno': self.lineno, 'pos': self.pos, 'filename': self.filename} def get_children(self): return [] def accept_visitor(self, visitor): def traverse(node): for n in node.get_children(): n.accept_visitor(visitor) method = getattr(visitor, "visit" + self.__class__.__name__, traverse) method(self) class TemplateNode(Node): """a 'container' node that stores the overall collection of nodes.""" def __init__(self, filename): super(TemplateNode, self).__init__('', 0, 0, filename) self.nodes = [] self.page_attributes = {} def get_children(self): return self.nodes def __repr__(self): return "TemplateNode(%s, %r)" % ( util.sorted_dict_repr(self.page_attributes), self.nodes) class ControlLine(Node): """defines a control line, a line-oriented python line or end tag. e.g.:: % if foo: (markup) % endif """ has_loop_context = False def __init__(self, keyword, isend, text, **kwargs): super(ControlLine, self).__init__(**kwargs) self.text = text self.keyword = keyword self.isend = isend self.is_primary = keyword in ['for', 'if', 'while', 'try', 'with'] self.nodes = [] if self.isend: self._declared_identifiers = [] self._undeclared_identifiers = [] else: code = ast.PythonFragment(text, **self.exception_kwargs) self._declared_identifiers = code.declared_identifiers self._undeclared_identifiers = code.undeclared_identifiers def get_children(self): return self.nodes def declared_identifiers(self): return self._declared_identifiers def undeclared_identifiers(self): return self._undeclared_identifiers def is_ternary(self, keyword): """return true if the given keyword is a ternary keyword for this ControlLine""" return keyword in { 'if':set(['else', 'elif']), 'try':set(['except', 'finally']), 'for':set(['else']) }.get(self.keyword, []) def __repr__(self): return "ControlLine(%r, %r, %r, %r)" % ( self.keyword, self.text, self.isend, (self.lineno, self.pos) ) class Text(Node): """defines plain text in the template.""" def __init__(self, content, **kwargs): super(Text, self).__init__(**kwargs) self.content = content def __repr__(self): return "Text(%r, %r)" % (self.content, (self.lineno, self.pos)) class Code(Node): """defines a Python code block, either inline or module level. e.g.:: inline: <% x = 12 %> module level: <%! import logger %> """ def __init__(self, text, ismodule, **kwargs): super(Code, self).__init__(**kwargs) self.text = text self.ismodule = ismodule self.code = ast.PythonCode(text, **self.exception_kwargs) def declared_identifiers(self): return self.code.declared_identifiers def undeclared_identifiers(self): return self.code.undeclared_identifiers def __repr__(self): return "Code(%r, %r, %r)" % ( self.text, self.ismodule, (self.lineno, self.pos) ) class Comment(Node): """defines a comment line. # this is a comment """ def __init__(self, text, **kwargs): super(Comment, self).__init__(**kwargs) self.text = text def __repr__(self): return "Comment(%r, %r)" % (self.text, (self.lineno, self.pos)) class Expression(Node): """defines an inline expression. ${x+y} """ def __init__(self, text, escapes, **kwargs): super(Expression, self).__init__(**kwargs) self.text = text self.escapes = escapes self.escapes_code = ast.ArgumentList(escapes, **self.exception_kwargs) self.code = ast.PythonCode(text, **self.exception_kwargs) def declared_identifiers(self): return [] def undeclared_identifiers(self): # TODO: make the "filter" shortcut list configurable at parse/gen time return self.code.undeclared_identifiers.union( self.escapes_code.undeclared_identifiers.difference( set(filters.DEFAULT_ESCAPES.keys()) ) ).difference(self.code.declared_identifiers) def __repr__(self): return "Expression(%r, %r, %r)" % ( self.text, self.escapes_code.args, (self.lineno, self.pos) ) class _TagMeta(type): """metaclass to allow Tag to produce a subclass according to its keyword""" _classmap = {} def __init__(cls, clsname, bases, dict): if getattr(cls, '__keyword__', None) is not None: cls._classmap[cls.__keyword__] = cls super(_TagMeta, cls).__init__(clsname, bases, dict) def __call__(cls, keyword, attributes, **kwargs): if ":" in keyword: ns, defname = keyword.split(':') return type.__call__(CallNamespaceTag, ns, defname, attributes, **kwargs) try: cls = _TagMeta._classmap[keyword] except KeyError: raise exceptions.CompileException( "No such tag: '%s'" % keyword, source=kwargs['source'], lineno=kwargs['lineno'], pos=kwargs['pos'], filename=kwargs['filename'] ) return type.__call__(cls, keyword, attributes, **kwargs) class Tag(compat.with_metaclass(_TagMeta, Node)): """abstract base class for tags. <%sometag/> <%someothertag> stuff </%someothertag> """ __keyword__ = None def __init__(self, keyword, attributes, expressions, nonexpressions, required, **kwargs): """construct a new Tag instance. this constructor not called directly, and is only called by subclasses. :param keyword: the tag keyword :param attributes: raw dictionary of attribute key/value pairs :param expressions: a set of identifiers that are legal attributes, which can also contain embedded expressions :param nonexpressions: a set of identifiers that are legal attributes, which cannot contain embedded expressions :param \**kwargs: other arguments passed to the Node superclass (lineno, pos) """ super(Tag, self).__init__(**kwargs) self.keyword = keyword self.attributes = attributes self._parse_attributes(expressions, nonexpressions) missing = [r for r in required if r not in self.parsed_attributes] if len(missing): raise exceptions.CompileException( "Missing attribute(s): %s" % ",".join([repr(m) for m in missing]), **self.exception_kwargs) self.parent = None self.nodes = [] def is_root(self): return self.parent is None def get_children(self): return self.nodes def _parse_attributes(self, expressions, nonexpressions): undeclared_identifiers = set() self.parsed_attributes = {} for key in self.attributes: if key in expressions: expr = [] for x in re.compile(r'(\${.+?})', re.S).split(self.attributes[key]): m = re.compile(r'^\${(.+?)}$', re.S).match(x) if m: code = ast.PythonCode(m.group(1).rstrip(), **self.exception_kwargs) # we aren't discarding "declared_identifiers" here, # which we do so that list comprehension-declared # variables aren't counted. As yet can't find a # condition that requires it here. undeclared_identifiers = \ undeclared_identifiers.union( code.undeclared_identifiers) expr.append('(%s)' % m.group(1)) else: if x: expr.append(repr(x)) self.parsed_attributes[key] = " + ".join(expr) or repr('') elif key in nonexpressions: if re.search(r'\${.+?}', self.attributes[key]): raise exceptions.CompileException( "Attibute '%s' in tag '%s' does not allow embedded " "expressions" % (key, self.keyword), **self.exception_kwargs) self.parsed_attributes[key] = repr(self.attributes[key]) else: raise exceptions.CompileException( "Invalid attribute for tag '%s': '%s'" % (self.keyword, key), **self.exception_kwargs) self.expression_undeclared_identifiers = undeclared_identifiers def declared_identifiers(self): return [] def undeclared_identifiers(self): return self.expression_undeclared_identifiers def __repr__(self): return "%s(%r, %s, %r, %r)" % (self.__class__.__name__, self.keyword, util.sorted_dict_repr(self.attributes), (self.lineno, self.pos), self.nodes ) class IncludeTag(Tag): __keyword__ = 'include' def __init__(self, keyword, attributes, **kwargs): super(IncludeTag, self).__init__( keyword, attributes, ('file', 'import', 'args'), (), ('file',), **kwargs) self.page_args = ast.PythonCode( "__DUMMY(%s)" % attributes.get('args', ''), **self.exception_kwargs) def declared_identifiers(self): return [] def undeclared_identifiers(self): identifiers = self.page_args.undeclared_identifiers.\ difference(set(["__DUMMY"])).\ difference(self.page_args.declared_identifiers) return identifiers.union(super(IncludeTag, self). undeclared_identifiers()) class NamespaceTag(Tag): __keyword__ = 'namespace' def __init__(self, keyword, attributes, **kwargs): super(NamespaceTag, self).__init__( keyword, attributes, ('file',), ('name','inheritable', 'import','module'), (), **kwargs) self.name = attributes.get('name', '__anon_%s' % hex(abs(id(self)))) if not 'name' in attributes and not 'import' in attributes: raise exceptions.CompileException( "'name' and/or 'import' attributes are required " "for <%namespace>", **self.exception_kwargs) if 'file' in attributes and 'module' in attributes: raise exceptions.CompileException( "<%namespace> may only have one of 'file' or 'module'", **self.exception_kwargs ) def declared_identifiers(self): return [] class TextTag(Tag): __keyword__ = 'text' def __init__(self, keyword, attributes, **kwargs): super(TextTag, self).__init__( keyword, attributes, (), ('filter'), (), **kwargs) self.filter_args = ast.ArgumentList( attributes.get('filter', ''), **self.exception_kwargs) def undeclared_identifiers(self): return self.filter_args.\ undeclared_identifiers.\ difference(filters.DEFAULT_ESCAPES.keys()).union( self.expression_undeclared_identifiers ) class DefTag(Tag): __keyword__ = 'def' def __init__(self, keyword, attributes, **kwargs): expressions = ['buffered', 'cached'] + [ c for c in attributes if c.startswith('cache_')] super(DefTag, self).__init__( keyword, attributes, expressions, ('name', 'filter', 'decorator'), ('name',), **kwargs) name = attributes['name'] if re.match(r'^[\w_]+$', name): raise exceptions.CompileException( "Missing parenthesis in %def", **self.exception_kwargs) self.function_decl = ast.FunctionDecl("def " + name + ":pass", **self.exception_kwargs) self.name = self.function_decl.funcname self.decorator = attributes.get('decorator', '') self.filter_args = ast.ArgumentList( attributes.get('filter', ''), **self.exception_kwargs) is_anonymous = False is_block = False @property def funcname(self): return self.function_decl.funcname def get_argument_expressions(self, **kw): return self.function_decl.get_argument_expressions(**kw) def declared_identifiers(self): return self.function_decl.allargnames def undeclared_identifiers(self): res = [] for c in self.function_decl.defaults: res += list(ast.PythonCode(c, **self.exception_kwargs). undeclared_identifiers) return set(res).union( self.filter_args.\ undeclared_identifiers.\ difference(filters.DEFAULT_ESCAPES.keys()) ).union( self.expression_undeclared_identifiers ).difference( self.function_decl.allargnames ) class BlockTag(Tag): __keyword__ = 'block' def __init__(self, keyword, attributes, **kwargs): expressions = ['buffered', 'cached', 'args'] + [ c for c in attributes if c.startswith('cache_')] super(BlockTag, self).__init__( keyword, attributes, expressions, ('name','filter', 'decorator'), (), **kwargs) name = attributes.get('name') if name and not re.match(r'^[\w_]+$',name): raise exceptions.CompileException( "%block may not specify an argument signature", **self.exception_kwargs) if not name and attributes.get('args', None): raise exceptions.CompileException( "Only named %blocks may specify args", **self.exception_kwargs ) self.body_decl = ast.FunctionArgs(attributes.get('args', ''), **self.exception_kwargs) self.name = name self.decorator = attributes.get('decorator', '') self.filter_args = ast.ArgumentList( attributes.get('filter', ''), **self.exception_kwargs) is_block = True @property def is_anonymous(self): return self.name is None @property def funcname(self): return self.name or "__M_anon_%d" % (self.lineno, ) def get_argument_expressions(self, **kw): return self.body_decl.get_argument_expressions(**kw) def declared_identifiers(self): return self.body_decl.allargnames def undeclared_identifiers(self): return (self.filter_args.\ undeclared_identifiers.\ difference(filters.DEFAULT_ESCAPES.keys()) ).union(self.expression_undeclared_identifiers) class CallTag(Tag): __keyword__ = 'call' def __init__(self, keyword, attributes, **kwargs): super(CallTag, self).__init__(keyword, attributes, ('args'), ('expr',), ('expr',), **kwargs) self.expression = attributes['expr'] self.code = ast.PythonCode(self.expression, **self.exception_kwargs) self.body_decl = ast.FunctionArgs(attributes.get('args', ''), **self.exception_kwargs) def declared_identifiers(self): return self.code.declared_identifiers.union(self.body_decl.allargnames) def undeclared_identifiers(self): return self.code.undeclared_identifiers.\ difference(self.code.declared_identifiers) class CallNamespaceTag(Tag): def __init__(self, namespace, defname, attributes, **kwargs): super(CallNamespaceTag, self).__init__( namespace + ":" + defname, attributes, tuple(attributes.keys()) + ('args', ), (), (), **kwargs) self.expression = "%s.%s(%s)" % ( namespace, defname, ",".join(["%s=%s" % (k, v) for k, v in self.parsed_attributes.items() if k != 'args']) ) self.code = ast.PythonCode(self.expression, **self.exception_kwargs) self.body_decl = ast.FunctionArgs( attributes.get('args', ''), **self.exception_kwargs) def declared_identifiers(self): return self.code.declared_identifiers.union(self.body_decl.allargnames) def undeclared_identifiers(self): return self.code.undeclared_identifiers.\ difference(self.code.declared_identifiers) class InheritTag(Tag): __keyword__ = 'inherit' def __init__(self, keyword, attributes, **kwargs): super(InheritTag, self).__init__( keyword, attributes, ('file',), (), ('file',), **kwargs) class PageTag(Tag): __keyword__ = 'page' def __init__(self, keyword, attributes, **kwargs): expressions = ['cached', 'args', 'expression_filter', 'enable_loop'] + [ c for c in attributes if c.startswith('cache_')] super(PageTag, self).__init__( keyword, attributes, expressions, (), (), **kwargs) self.body_decl = ast.FunctionArgs(attributes.get('args', ''), **self.exception_kwargs) self.filter_args = ast.ArgumentList( attributes.get('expression_filter', ''), **self.exception_kwargs) def declared_identifiers(self): return self.body_decl.allargnames
gpl-3.0
javilonas/NCam
cross/Toolchain-SamsungTV/arm-v7a8v3r1-linux-gnueabi/lib/libstdc++.so.6.0.16-gdb.py
1
2352
# -*- python -*- # Copyright (C) 2009, 2010 Free Software Foundation, Inc. # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import sys import gdb import os import os.path pythondir = '/opt/vd/arm-v7a8v3r1/share/gcc-4.6.4/python' libdir = '/opt/vd/arm-v7a8v3r1/arm-v7a8v3r1-linux-gnueabi/lib' # This file might be loaded when there is no current objfile. This # can happen if the user loads it manually. In this case we don't # update sys.path; instead we just hope the user managed to do that # beforehand. if gdb.current_objfile () is not None: # Update module path. We want to find the relative path from libdir # to pythondir, and then we want to apply that relative path to the # directory holding the objfile with which this file is associated. # This preserves relocatability of the gcc tree. # Do a simple normalization that removes duplicate separators. pythondir = os.path.normpath (pythondir) libdir = os.path.normpath (libdir) prefix = os.path.commonprefix ([libdir, pythondir]) # In some bizarre configuration we might have found a match in the # middle of a directory name. if prefix[-1] != '/': prefix = os.path.dirname (prefix) + '/' # Strip off the prefix. pythondir = pythondir[len (prefix):] libdir = libdir[len (prefix):] # Compute the ".."s needed to get from libdir to the prefix. dotdots = ('..' + os.sep) * len (libdir.split (os.sep)) objfile = gdb.current_objfile ().filename dir_ = os.path.join (os.path.dirname (objfile), dotdots, pythondir) if not dir_ in sys.path: sys.path.insert(0, dir_) # Load the pretty-printers. from libstdcxx.v6.printers import register_libstdcxx_printers register_libstdcxx_printers (gdb.current_objfile ())
gpl-3.0
alexmojaki/blaze
blaze/compute/tests/test_numpy_compute.py
6
16540
from __future__ import absolute_import, division, print_function import pytest import numpy as np import pandas as pd from datetime import datetime, date from blaze.compute.core import compute, compute_up from blaze.expr import symbol, by, exp, summary, Broadcast, join, concat from blaze import sin from odo import into from datashape import discover, to_numpy, dshape x = np.array([(1, 'Alice', 100), (2, 'Bob', -200), (3, 'Charlie', 300), (4, 'Denis', 400), (5, 'Edith', -500)], dtype=[('id', 'i8'), ('name', 'S7'), ('amount', 'i8')]) t = symbol('t', discover(x)) def eq(a, b): c = a == b if isinstance(c, np.ndarray): return c.all() return c def test_symbol(): assert eq(compute(t, x), x) def test_eq(): assert eq(compute(t['amount'] == 100, x), x['amount'] == 100) def test_selection(): assert eq(compute(t[t['amount'] == 100], x), x[x['amount'] == 0]) assert eq(compute(t[t['amount'] < 0], x), x[x['amount'] < 0]) def test_arithmetic(): assert eq(compute(t['amount'] + t['id'], x), x['amount'] + x['id']) assert eq(compute(t['amount'] * t['id'], x), x['amount'] * x['id']) assert eq(compute(t['amount'] % t['id'], x), x['amount'] % x['id']) def test_UnaryOp(): assert eq(compute(exp(t['amount']), x), np.exp(x['amount'])) assert eq(compute(abs(-t['amount']), x), abs(-x['amount'])) def test_Neg(): assert eq(compute(-t['amount'], x), -x['amount']) def test_invert_not(): assert eq(compute(~(t.amount > 0), x), ~(x['amount'] > 0)) def test_Reductions(): assert compute(t['amount'].mean(), x) == x['amount'].mean() assert compute(t['amount'].count(), x) == len(x['amount']) assert compute(t['amount'].sum(), x) == x['amount'].sum() assert compute(t['amount'].min(), x) == x['amount'].min() assert compute(t['amount'].max(), x) == x['amount'].max() assert compute(t['amount'].nunique(), x) == len(np.unique(x['amount'])) assert compute(t['amount'].var(), x) == x['amount'].var() assert compute(t['amount'].std(), x) == x['amount'].std() assert compute(t['amount'].var(unbiased=True), x) == x['amount'].var(ddof=1) assert compute(t['amount'].std(unbiased=True), x) == x['amount'].std(ddof=1) assert compute((t['amount'] > 150).any(), x) == True assert compute((t['amount'] > 250).all(), x) == False assert compute(t['amount'][0], x) == x['amount'][0] assert compute(t['amount'][-1], x) == x['amount'][-1] def test_count_string(): s = symbol('name', 'var * ?string') x = np.array(['Alice', np.nan, 'Bob', 'Denis', 'Edith'], dtype='object') assert compute(s.count(), x) == 4 def test_reductions_on_recarray(): assert compute(t.count(), x) == len(x) def test_count_nan(): t = symbol('t', '3 * ?real') x = np.array([1.0, np.nan, 2.0]) assert compute(t.count(), x) == 2 def test_distinct(): x = np.array([('Alice', 100), ('Alice', -200), ('Bob', 100), ('Bob', 100)], dtype=[('name', 'S5'), ('amount', 'i8')]) t = symbol('t', 'var * {name: string, amount: int64}') assert eq(compute(t['name'].distinct(), x), np.unique(x['name'])) assert eq(compute(t.distinct(), x), np.unique(x)) def test_distinct_on_recarray(): rec = pd.DataFrame( [[0, 1], [0, 2], [1, 1], [1, 2]], columns=('a', 'b'), ).to_records(index=False) s = symbol('s', discover(rec)) assert ( compute(s.distinct('a'), rec) == pd.DataFrame( [[0, 1], [1, 1]], columns=('a', 'b'), ).to_records(index=False) ).all() def test_distinct_on_structured_array(): arr = np.array( [(0., 1.), (0., 2.), (1., 1.), (1., 2.)], dtype=[('a', 'f4'), ('b', 'f4')], ) s = symbol('s', discover(arr)) assert( compute(s.distinct('a'), arr) == np.array([(0., 1.), (1., 1.)], dtype=arr.dtype) ).all() def test_distinct_on_str(): rec = pd.DataFrame( [['a', 'a'], ['a', 'b'], ['b', 'a'], ['b', 'b']], columns=('a', 'b'), ).to_records(index=False).astype([('a', '<U1'), ('b', '<U1')]) s = symbol('s', discover(rec)) assert ( compute(s.distinct('a'), rec) == pd.DataFrame( [['a', 'a'], ['b', 'a']], columns=('a', 'b'), ).to_records(index=False).astype([('a', '<U1'), ('b', '<U1')]) ).all() def test_sort(): assert eq(compute(t.sort('amount'), x), np.sort(x, order='amount')) assert eq(compute(t.sort('amount', ascending=False), x), np.sort(x, order='amount')[::-1]) assert eq(compute(t.sort(['amount', 'id']), x), np.sort(x, order=['amount', 'id'])) assert eq(compute(t.amount.sort(), x), np.sort(x['amount'])) def test_head(): assert eq(compute(t.head(2), x), x[:2]) def test_tail(): assert eq(compute(t.tail(2), x), x[-2:]) def test_label(): expected = x['amount'] * 10 expected = np.array(expected, dtype=[('foo', 'i8')]) assert eq(compute((t['amount'] * 10).label('foo'), x), expected) def test_relabel(): expected = np.array(x, dtype=[('ID', 'i8'), ('NAME', 'S7'), ('amount', 'i8')]) result = compute(t.relabel({'name': 'NAME', 'id': 'ID'}), x) assert result.dtype.names == expected.dtype.names assert eq(result, expected) def test_by(): expr = by(t.amount > 0, count=t.id.count()) result = compute(expr, x) assert set(map(tuple, into(list, result))) == set([(False, 2), (True, 3)]) def test_compute_up_field(): assert eq(compute(t['name'], x), x['name']) def test_compute_up_projection(): assert eq(compute_up(t[['name', 'amount']], x), x[['name', 'amount']]) ax = np.arange(30, dtype='f4').reshape((5, 3, 2)) a = symbol('a', discover(ax)) def test_slice(): inds = [0, slice(2), slice(1, 3), slice(None, None, 2), [1, 2, 3], (0, 1), (0, slice(1, 3)), (slice(0, 3), slice(3, 1, -1)), (0, [1, 2])] for s in inds: assert (compute(a[s], ax) == ax[s]).all() def test_array_reductions(): for axis in [None, 0, 1, (0, 1), (2, 1)]: assert eq(compute(a.sum(axis=axis), ax), ax.sum(axis=axis)) assert eq(compute(a.std(axis=axis), ax), ax.std(axis=axis)) def test_array_reductions_with_keepdims(): for axis in [None, 0, 1, (0, 1), (2, 1)]: assert eq(compute(a.sum(axis=axis, keepdims=True), ax), ax.sum(axis=axis, keepdims=True)) def test_summary_on_ndarray(): assert compute(summary(total=a.sum(), min=a.min()), ax) == \ (ax.min(), ax.sum()) result = compute(summary(total=a.sum(), min=a.min(), keepdims=True), ax) expected = np.array([(ax.min(), ax.sum())], dtype=[('min', 'float32'), ('total', 'float64')]) assert result.ndim == ax.ndim assert eq(expected, result) def test_summary_on_ndarray_with_axis(): for axis in [0, 1, (1, 0)]: expr = summary(total=a.sum(), min=a.min(), axis=axis) result = compute(expr, ax) shape, dtype = to_numpy(expr.dshape) expected = np.empty(shape=shape, dtype=dtype) expected['total'] = ax.sum(axis=axis) expected['min'] = ax.min(axis=axis) assert eq(result, expected) def test_utcfromtimestamp(): t = symbol('t', '1 * int64') data = np.array([0, 1]) expected = np.array(['1970-01-01T00:00:00Z', '1970-01-01T00:00:01Z'], dtype='M8[us]') assert eq(compute(t.utcfromtimestamp, data), expected) def test_nelements_structured_array(): assert compute(t.nelements(), x) == len(x) assert compute(t.nelements(keepdims=True), x) == (len(x),) def test_nelements_array(): t = symbol('t', '5 * 4 * 3 * float64') x = np.random.randn(*t.shape) result = compute(t.nelements(axis=(0, 1)), x) np.testing.assert_array_equal(result, np.array([20, 20, 20])) result = compute(t.nelements(axis=1), x) np.testing.assert_array_equal(result, 4 * np.ones((5, 3))) def test_nrows(): assert compute(t.nrows, x) == len(x) dts = np.array(['2000-06-25T12:30:04Z', '2000-06-28T12:50:05Z'], dtype='M8[us]') s = symbol('s', 'var * datetime') def test_datetime_truncation(): assert eq(compute(s.truncate(1, 'day'), dts), dts.astype('M8[D]')) assert eq(compute(s.truncate(2, 'seconds'), dts), np.array(['2000-06-25T12:30:04Z', '2000-06-28T12:50:04Z'], dtype='M8[s]')) assert eq(compute(s.truncate(2, 'weeks'), dts), np.array(['2000-06-18', '2000-06-18'], dtype='M8[D]')) assert into(list, compute(s.truncate(1, 'week'), dts))[0].isoweekday() == 7 def test_hour(): dts = [datetime(2000, 6, 20, 1, 00, 00), datetime(2000, 6, 20, 12, 59, 59), datetime(2000, 6, 20, 12, 00, 00), datetime(2000, 6, 20, 11, 59, 59)] dts = into(np.ndarray, dts) assert eq(compute(s.truncate(1, 'hour'), dts), into(np.ndarray, [datetime(2000, 6, 20, 1, 0), datetime(2000, 6, 20, 12, 0), datetime(2000, 6, 20, 12, 0), datetime(2000, 6, 20, 11, 0)])) def test_month(): dts = [datetime(2000, 7, 1), datetime(2000, 6, 30), datetime(2000, 6, 1), datetime(2000, 5, 31)] dts = into(np.ndarray, dts) assert eq(compute(s.truncate(1, 'month'), dts), into(np.ndarray, [date(2000, 7, 1), date(2000, 6, 1), date(2000, 6, 1), date(2000, 5, 1)])) def test_truncate_on_np_datetime64_scalar(): s = symbol('s', 'datetime') data = np.datetime64('2000-01-02T12:30:00Z') assert compute(s.truncate(1, 'day'), data) == data.astype('M8[D]') def test_numpy_and_python_datetime_truncate_agree_on_start_of_week(): s = symbol('s', 'datetime') n = np.datetime64('2014-11-11') p = datetime(2014, 11, 11) expr = s.truncate(1, 'week') assert compute(expr, n) == compute(expr, p) def test_add_multiple_ndarrays(): a = symbol('a', '5 * 4 * int64') b = symbol('b', '5 * 4 * float32') x = np.arange(9, dtype='int64').reshape(3, 3) y = (x + 1).astype('float32') expr = sin(a) + 2 * b scope = {a: x, b: y} expected = sin(x) + 2 * y # check that we cast correctly assert expr.dshape == dshape('5 * 4 * float64') np.testing.assert_array_equal(compute(expr, scope), expected) np.testing.assert_array_equal(compute(expr, scope, optimize=False), expected) nA = np.arange(30, dtype='f4').reshape((5, 6)) ny = np.arange(6, dtype='f4') A = symbol('A', discover(nA)) y = symbol('y', discover(ny)) def test_transpose(): assert eq(compute(A.T, nA), nA.T) assert eq(compute(A.transpose((0, 1)), nA), nA) def test_dot(): assert eq(compute(y.dot(y), {y: ny}), np.dot(ny, ny)) assert eq(compute(A.dot(y), {A: nA, y: ny}), np.dot(nA, ny)) def test_subexpr_datetime(): data = pd.date_range(start='01/01/2010', end='01/04/2010', freq='D').values s = symbol('s', discover(data)) result = compute(s.truncate(days=2).day, data) expected = np.array([31, 2, 2, 4]) np.testing.assert_array_equal(result, expected) def test_mixed_types(): x = np.array([[(4, 180), (4, 184), (4, 188), (4, 192), (4, 196)], [(4, 660), (4, 664), (4, 668), (4, 672), (4, 676)], [(4, 1140), (4, 1144), (4, 1148), (4, 1152), (4, 1156)], [(4, 1620), (4, 1624), (4, 1628), (4, 1632), (4, 1636)], [(4, 2100), (4, 2104), (4, 2108), (4, 2112), (4, 2116)]], dtype=[('count', '<i4'), ('total', '<i8')]) aggregate = symbol('aggregate', discover(x)) result = compute(aggregate.total.sum(axis=(0,)) / aggregate['count'].sum(axis=(0,)), x) expected = (x['total'].sum(axis=0, keepdims=True) / x['count'].sum(axis=0, keepdims=True)).squeeze() np.testing.assert_array_equal(result, expected) def test_broadcast_compute_against_numbers_and_arrays(): A = symbol('A', '5 * float32') a = symbol('a', 'float32') b = symbol('b', 'float32') x = np.arange(5, dtype='f4') expr = Broadcast((A, b), (a, b), a + b) result = compute(expr, {A: x, b: 10}) assert eq(result, x + 10) def test_map(): pytest.importorskip('numba') a = np.arange(10.0) f = lambda x: np.sin(x) + 1.03 * np.cos(x) ** 2 x = symbol('x', discover(a)) expr = x.map(f, 'float64') result = compute(expr, a) expected = f(a) # make sure we're not going to pandas here assert type(result) == np.ndarray assert type(result) == type(expected) np.testing.assert_array_equal(result, expected) def test_vector_norm(): x = np.arange(30).reshape((5, 6)) s = symbol('x', discover(x)) assert eq(compute(s.vnorm(), x), np.linalg.norm(x)) assert eq(compute(s.vnorm(ord=1), x), np.linalg.norm(x.flatten(), ord=1)) assert eq(compute(s.vnorm(ord=4, axis=0), x), np.linalg.norm(x, ord=4, axis=0)) expr = s.vnorm(ord=4, axis=0, keepdims=True) assert expr.shape == compute(expr, x).shape def test_join(): cities = np.array([('Alice', 'NYC'), ('Alice', 'LA'), ('Bob', 'Chicago')], dtype=[('name', 'S7'), ('city', 'O')]) c = symbol('cities', discover(cities)) expr = join(t, c, 'name') result = compute(expr, {t: x, c: cities}) assert (b'Alice', 1, 100, 'LA') in into(list, result) def test_query_with_strings(): b = np.array([('a', 1), ('b', 2), ('c', 3)], dtype=[('x', 'S1'), ('y', 'i4')]) s = symbol('s', discover(b)) assert compute(s[s.x == b'b'], b).tolist() == [(b'b', 2)] @pytest.mark.parametrize('keys', [['a'], list('bc')]) def test_isin(keys): b = np.array([('a', 1), ('b', 2), ('c', 3), ('a', 4), ('c', 5), ('b', 6)], dtype=[('x', 'S1'), ('y', 'i4')]) s = symbol('s', discover(b)) result = compute(s.x.isin(keys), b) expected = np.in1d(b['x'], keys) np.testing.assert_array_equal(result, expected) def test_nunique_recarray(): b = np.array([('a', 1), ('b', 2), ('c', 3), ('a', 4), ('c', 5), ('b', 6), ('a', 1), ('b', 2)], dtype=[('x', 'S1'), ('y', 'i4')]) s = symbol('s', discover(b)) expr = s.nunique() assert compute(expr, b) == len(np.unique(b)) def test_str_repeat(): a = np.array(('a', 'b', 'c')) s = symbol('s', discover(a)) expr = s.repeat(3) assert all(compute(expr, a) == np.char.multiply(a, 3)) def test_str_interp(): a = np.array(('%s', '%s', '%s')) s = symbol('s', discover(a)) expr = s.interp(1) assert all(compute(expr, a) == np.char.mod(a, 1)) def test_timedelta_arith(): dates = np.arange('2014-01-01', '2014-02-01', dtype='datetime64') delta = np.timedelta64(1, 'D') sym = symbol('s', discover(dates)) assert (compute(sym + delta, dates) == dates + delta).all() assert (compute(sym - delta, dates) == dates - delta).all() def test_coerce(): x = np.arange(1, 3) s = symbol('s', discover(x)) np.testing.assert_array_equal(compute(s.coerce('float64'), x), np.arange(1.0, 3.0)) def test_concat_arr(): s_data = np.arange(15) t_data = np.arange(15, 30) s = symbol('s', discover(s_data)) t = symbol('t', discover(t_data)) assert ( compute(concat(s, t), {s: s_data, t: t_data}) == np.arange(30) ).all() def test_concat_mat(): s_data = np.arange(15).reshape(5, 3) t_data = np.arange(15, 30).reshape(5, 3) s = symbol('s', discover(s_data)) t = symbol('t', discover(t_data)) assert ( compute(concat(s, t), {s: s_data, t: t_data}) == np.arange(30).reshape(10, 3) ).all() assert ( compute(concat(s, t, axis=1), {s: s_data, t: t_data}) == np.concatenate((s_data, t_data), axis=1) ).all()
bsd-3-clause
mrniranjan/sssd
src/sbus/codegen/sbus_Template.py
6
9834
# # Authors: # Pavel Brezina <pbrezina@redhat.com> # # Copyright (C) 2017 Red Hat # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # import os import re import errno import textwrap import os.path class Template: def __init__(self, name, templateFile, template): template = self.removeLines(template) self.templateFile = templateFile self.name = name self.loops = {} self.toggles = {} self.template = self.parse(template) self.output = "" def parse(self, template): template = self.parseLoops(template) template = self.parseToggles(template) return template def parseLoops(self, template): template = self.Pattern.Loop.sub(self.processLoops, template) return self.Pattern.LoopLine.sub(self.processLoops, template) def processLoops(self, match): name = match.group(1) template = self.removeLines(match.group(2)) index = 0 if name not in self.loops: self.loops[name] = self.Loop() index = self.loops[name].addTemplate(template) return '$${loop:%s:%d}' % (name, index) def parseToggles(self, template): template = self.Pattern.Toggle.sub(self.processToggles, template) return self.Pattern.ToggleLine.sub(self.processToggles, template) def processToggles(self, match): name = match.group(1) if_visible = self.removeLines(match.group(2)) if_hidden = self.removeLines(match.group(4)) index = 0 if name not in self.toggles: self.toggles[name] = self.Toggle() index = self.toggles[name].addTemplate(if_visible, if_hidden) return '$${toggle:%s:%d}' % (name, index) def add(self, loop_name, values): """Add new item into <loop name="$loop_name"> template. Setting its attributes to $values. """ if loop_name not in self.loops: return self self.loops[loop_name].set(values) return self def show(self, toggle_name, isVisible): """Make <toggle name="$toggle_name"> either visible or hidden within the template. """ if not self.hasToggle(toggle_name): return self.toggles[toggle_name].show(isVisible) def hasToggle(self, name): return name in self.toggles def hasLoop(self, name): return name in self.loops def set(self, values): """Set template attributes to $values, push generated content into the output file and reset this template. """ template = self.template for key, toggle in self.toggles.items(): for idx, toggletpl in enumerate(toggle.templates): pattern = "$${toggle:%s:%d}" % (key, idx) template = template.replace(pattern, toggletpl.generate()) self.output = self.Set(template, values) self.templateFile.push(self.generate()) self.clear() def pushOriginal(self): """Push original template into the output file """ self.templateFile.push(self.template) def clear(self): for loop in self.loops.values(): loop.clear() for toggle in self.toggles.values(): toggle.show(False) self.output = "" def generate(self): output = self.output for key, loop in self.loops.items(): for idx, content in enumerate(loop.templates): pattern = "$${loop:%s:%d}" % (key, idx) output = output.replace(pattern, loop.get(idx), 1) return output @staticmethod def Set(content, values): output = content for key, value in values.items(): output = output.replace("${" + key + "}", str(value)) return output def removeLines(self, content): """Remove unneeded lines and spaces. There are some additional lines and spaces that may end up in the template after parsing. This method will remove new line after <@template-tag> and spaces from otherwise empty lines. """ if content is None: return content content = self.Pattern.NewLine.sub('', content, 1) content = self.Pattern.EmptyLine.sub('', content) return content class Pattern: Template = re.compile( ' *<template name="(\S+)">(.*?)</template>\r?\n?', re.MULTILINE | re.DOTALL ) Loop = re.compile( ' *<loop name="(\S+)">(.*?)</loop>\r?\n?', re.MULTILINE | re.DOTALL ) LoopLine = re.compile( '<loop line name="(\S+)">(.*?)</loop>', re.MULTILINE | re.DOTALL ) Toggle = re.compile( ' *<toggle name="(\S+)">(.*?)(<or>(.*?))?</toggle>\r?\n?', re.MULTILINE | re.DOTALL ) ToggleLine = re.compile( '<toggle line name="(\S+)">(.*?)(<or>(.*?))?</toggle>', re.MULTILINE | re.DOTALL ) NewLine = re.compile('^\r?\n') EmptyLine = re.compile('^ *$', re.MULTILINE) class Loop: def __init__(self): self.templates = [] self.num_templates = 0 def addTemplate(self, template): self.templates.append(self.LoopTemplate(template)) self.num_templates += 1 return self.num_templates - 1 def set(self, values): for template in self.templates: template.set(values) def clear(self): for template in self.templates: template.clear() def get(self, index): return self.templates[index].generate() class LoopTemplate: def __init__(self, template): self.template = template self.output = "" def set(self, values): self.output += Template.Set(self.template, values) def clear(self): self.output = "" def generate(self): return self.output class Toggle: def __init__(self): self.templates = [] self.num_templates = 0 self.visible = False def addTemplate(self, if_visible, if_hidden): toggletpl = self.ToggleTemplate(self, if_visible, if_hidden) self.templates.append(toggletpl) self.num_templates += 1 return self.num_templates - 1 def show(self, isVisible): self.visible = isVisible class ToggleTemplate: def __init__(self, toggle, if_visible, if_hidden): self.toggle = toggle self.if_visible = if_visible self.if_hidden = if_hidden def generate(self): if self.toggle.visible: return self.if_visible elif self.if_hidden is not None: return self.if_hidden return '' class TemplateFile: """Parse file contents into templates. Obtain template with .get and set its content. When all the content is set, you can call .generate to obtain generated content or .write to write it to a file. """ def __init__(self, path): with open(path, "r") as file: contents = file.read() self.templates = {} self.output = "" self.parse(contents) def parse(self, template): for (name, content) in Template.Pattern.Template.findall(template): content = textwrap.dedent(content) self.templates[name] = Template(name, self, content) def get(self, name): return self.templates[name] def has(self, name): return name in self.templates def push(self, content): self.output += content def generate(self): return self.output def write(self, filename, postprocess=None): dirname = os.path.dirname(filename) if not os.path.exists(dirname): try: os.makedirs(dirname) except OSError as exception: if exception.errno == errno.EEXIST and os.path.isdir(filename): pass else: raise output = self.generate() if postprocess is not None: output = postprocess(output) if not self.needsOverride(filename, output): return with open(filename, "w") as file: file.write(output) def needsOverride(self, filename, content): """ Do not override the file unless it is not yet present or its current content differs from the generated one. This ensure that the file is in correct state and yet it is not rebuild during make unless necessary. """ if not os.path.isfile(filename): return True with open(filename, "r") as file: current_content = file.read() if current_content != content: return True return False def __str__(self): return self.generate()
gpl-3.0
thinker0/aurora
src/test/python/apache/aurora/config/test_base.py
4
7904
# # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import pytest from pystachio import Default, Float, String, Struct from twitter.common.contextutil import temporary_file from apache.aurora.config import AuroraConfig, PortResolver from apache.aurora.config.schema.base import Announcer, Empty, Job, Process, Resources, Task from gen.apache.aurora.api.ttypes import Resource resolve = PortResolver.resolve def test_all_static(): portmap = {} assert resolve(portmap) == {} portmap = {'port': '80'} assert resolve(portmap) == {'port': 80} def test_binding(): portmap = {'aurora': 'http', 'http': 80} assert resolve(portmap) == {'aurora': 80, 'http': 80} portmap = {'aurora': 'http', 'http': 'unbound'} assert resolve(portmap) == {'aurora': 'unbound', 'http': 'unbound'} def test_cycle(): portmap = {'aurora': 'http', 'http': 'aurora'} with pytest.raises(PortResolver.CycleException): resolve(portmap) portmap = {'aurora': 'http', 'http': 'https', 'https': 'aurora'} with pytest.raises(PortResolver.CycleException): resolve(portmap) MESOS_CONFIG = """ HELLO_WORLD = Job( name = 'hello_world', role = 'john_doe', environment = 'staging42', cluster = 'smf1-test', task = Task( name = 'main', processes = [Process(name='hello_world', cmdline='echo {{mesos.instance}} {{mesos.hostname}}')], resources = Resources(cpu = 0.1, ram = 64 * 1048576, disk = 64 * 1048576), ) ) jobs = [HELLO_WORLD] """ LIMITED_MESOS_CONFIG = """ HELLO_WORLD = Job( name = 'hello_world', role = 'john_doe', environment = 'staging42', cluster = 'smf1-test', task = Task( name = 'main', processes = [Process(name = 'hello_world_fails_0', cmdline = 'echo hello world', max_failures = 0), Process(name = 'hello_world_fails_50', cmdline = 'echo hello world', max_failures = 50), Process(name = 'hello_world_fails_100', cmdline = 'echo hello world', max_failures = 100), Process(name = 'hello_world_fails_200', cmdline = 'echo hello world', max_failures = 200)], resources = Resources(cpu = 0.1, ram = 64 * 1048576, disk = 64 * 1048576), ) ) jobs = [HELLO_WORLD] """ REIFIED_CONFIG = Job( name='hello_world', role='john_doe', environment='staging42', cluster='smf1-test', task=Task( name='main', processes=[Process(name='hello_world', cmdline='echo {{mesos.instance}} {{mesos.hostname}}')], resources=Resources(cpu=0.1, ram=64 * 1048576, disk=64 * 1048576), ) ) REIFIED_LIMITED_CONFIG = Job( name='hello_world', role='john_doe', environment='staging42', cluster='smf1-test', task=Task( name='main', processes=[ Process(name='hello_world_fails_0', cmdline='echo hello world', max_failures=0), Process(name='hello_world_fails_50', cmdline='echo hello world', max_failures=50), Process(name='hello_world_fails_100', cmdline='echo hello world', max_failures=100), Process(name='hello_world_fails_200', cmdline='echo hello world', max_failures=200) ], resources=Resources(cpu=0.1, ram=64 * 1048576, disk=64 * 1048576), ) ) EMPTY_MESOS_CONFIG = """ foo = Job(name = "hello_world") """ UNDERSPECIFIED_MESOS_CONFIG = """ jobs = [ Job(name = "hello_world") ] """ BAD_MESOS_CONFIG = """ jobs = 1234 """ def test_empty_config(): with pytest.raises(AuroraConfig.InvalidConfig): with temporary_file() as fp: fp.write(UNDERSPECIFIED_MESOS_CONFIG) fp.flush() AuroraConfig.load(fp.name) def test_simple_config(): with temporary_file() as fp: fp.write(MESOS_CONFIG) fp.flush() proxy_config1 = AuroraConfig.load(fp.name) proxy_config2 = AuroraConfig.load(fp.name, name="hello_world") assert proxy_config1.job() assert proxy_config1._job == proxy_config2._job assert proxy_config1._job == REIFIED_CONFIG assert proxy_config1.name() == 'hello_world' assert proxy_config1.role() == 'john_doe' assert proxy_config1.cluster() == 'smf1-test' assert proxy_config1.ports() == set() def test_schema_equality(): one = REIFIED_CONFIG two = REIFIED_CONFIG other = REIFIED_LIMITED_CONFIG assert one() == two() assert not one() == other() assert one() != other() assert not one() != two() assert one() in {two(): 'foo'} assert one() not in {other(): 'bar'} def make_config(announce, *ports): process = Process( name='hello', cmdline=' '.join('{{thermos.ports[%s]}}' % port for port in ports)) return AuroraConfig(Job( name='hello_world', environment='staging42', role='john_doe', cluster='smf1-test', announce=announce, task=Task( name='main', processes=[process], resources=Resources(cpu=0.1, ram=64 * 1048576, disk=64 * 1048576)))) def test_ports(): announce = Announcer(portmap={'http': 80}) assert make_config(announce).ports() == set() assert make_config(announce, 'http').ports() == set() assert make_config(announce, 'http', 'thrift').ports() == set(['thrift']) announce = Announcer(portmap={'http': 'aurora'}) assert make_config(announce).ports() == set(['aurora']) assert make_config(announce, 'http').ports() == set(['aurora']) assert make_config(announce, 'http', 'thrift').ports() == set(['thrift', 'aurora']) announce = Announcer(portmap={'aurora': 'http'}) assert make_config(announce).ports() == set(['http']) assert make_config(announce, 'http').ports() == set(['http']) assert make_config(announce, 'http', 'thrift').ports() == set(['http', 'thrift']) assert make_config(Empty).ports() == set() assert make_config(Empty, 'http').ports() == set(['http']) assert make_config(Empty, 'http', 'thrift').ports() == set(['http', 'thrift']) def test_static_port_aliasing(): announce = Announcer(primary_port='thrift', portmap={'thrift': 8081, 'health': 8300, 'aurora': 'health'}) config = make_config(announce) assert config.ports() == set() for resource in list(config.job().taskConfig.resources): assert resource.namedPort is None config = make_config(announce, 'thrift') assert config.ports() == set() for resource in list(config.job().taskConfig.resources): assert resource.namedPort is None config = make_config(announce, 'thrift', 'health') assert config.ports() == set() for resource in list(config.job().taskConfig.resources): assert resource.namedPort is None config = make_config(announce, 'derp') assert config.ports() == set(['derp']) assert Resource(namedPort='derp') in list(config.job().taskConfig.resources) def test_pystachio_schema_regression(): class ChildOld(Struct): interval = Default(Float, 1.0) # noqa class ChildNew(Struct): interval = Default(Float, 1.0) # noqa endpoint = Default(String, '/health') # noqa class ParentOld(Struct): child = Default(ChildOld, ChildOld()) # noqa class ParentNew(Struct): child = Default(ChildNew, ChildNew()) # noqa new_parent = ParentNew() old_parent = ParentOld.json_loads(new_parent.json_dumps()) assert old_parent.child().interval().get() == 1.0 old_parent = ParentOld() new_parent = ParentNew.json_loads(old_parent.json_dumps()) assert new_parent.child().interval().get() == 1.0 assert new_parent.child().endpoint().get() == '/health'
apache-2.0
nicky-ji/edx-nicky
cms/djangoapps/contentstore/views/tests/test_import_export.py
10
11686
""" Unit tests for course import and export """ import copy import json import logging import os import shutil import tarfile import tempfile from path import path from uuid import uuid4 from django.test.utils import override_settings from django.conf import settings from contentstore.utils import reverse_course_url from xmodule.modulestore.tests.factories import ItemFactory from contentstore.tests.utils import CourseTestCase from student import auth from student.roles import CourseInstructorRole, CourseStaffRole TEST_DATA_CONTENTSTORE = copy.deepcopy(settings.CONTENTSTORE) TEST_DATA_CONTENTSTORE['DOC_STORE_CONFIG']['db'] = 'test_xcontent_%s' % uuid4().hex log = logging.getLogger(__name__) @override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE) class ImportTestCase(CourseTestCase): """ Unit tests for importing a course """ def setUp(self): super(ImportTestCase, self).setUp() self.url = reverse_course_url('import_handler', self.course.id) self.content_dir = path(tempfile.mkdtemp()) def touch(name): """ Equivalent to shell's 'touch'""" with file(name, 'a'): os.utime(name, None) # Create tar test files ----------------------------------------------- # OK course: good_dir = tempfile.mkdtemp(dir=self.content_dir) # test course being deeper down than top of tar file embedded_dir = os.path.join(good_dir, "grandparent", "parent") os.makedirs(os.path.join(embedded_dir, "course")) with open(os.path.join(embedded_dir, "course.xml"), "w+") as f: f.write('<course url_name="2013_Spring" org="EDx" course="0.00x"/>') with open(os.path.join(embedded_dir, "course", "2013_Spring.xml"), "w+") as f: f.write('<course></course>') self.good_tar = os.path.join(self.content_dir, "good.tar.gz") with tarfile.open(self.good_tar, "w:gz") as gtar: gtar.add(good_dir) # Bad course (no 'course.xml' file): bad_dir = tempfile.mkdtemp(dir=self.content_dir) touch(os.path.join(bad_dir, "bad.xml")) self.bad_tar = os.path.join(self.content_dir, "bad.tar.gz") with tarfile.open(self.bad_tar, "w:gz") as btar: btar.add(bad_dir) self.unsafe_common_dir = path(tempfile.mkdtemp(dir=self.content_dir)) def tearDown(self): shutil.rmtree(self.content_dir) def test_no_coursexml(self): """ Check that the response for a tar.gz import without a course.xml is correct. """ with open(self.bad_tar) as btar: resp = self.client.post( self.url, { "name": self.bad_tar, "course-data": [btar] }) self.assertEquals(resp.status_code, 415) # Check that `import_status` returns the appropriate stage (i.e., the # stage at which import failed). resp_status = self.client.get( reverse_course_url( 'import_status_handler', self.course.id, kwargs={'filename': os.path.split(self.bad_tar)[1]} ) ) self.assertEquals(json.loads(resp_status.content)["ImportStatus"], 2) def test_with_coursexml(self): """ Check that the response for a tar.gz import with a course.xml is correct. """ with open(self.good_tar) as gtar: args = {"name": self.good_tar, "course-data": [gtar]} resp = self.client.post(self.url, args) self.assertEquals(resp.status_code, 200) def test_import_in_existing_course(self): """ Check that course is imported successfully in existing course and users have their access roles """ # Create a non_staff user and add it to course staff only __, nonstaff_user = self.create_non_staff_authed_user_client(authenticate=False) auth.add_users(self.user, CourseStaffRole(self.course.id), nonstaff_user) course = self.store.get_course(self.course.id) self.assertIsNotNone(course) display_name_before_import = course.display_name # Check that global staff user can import course with open(self.good_tar) as gtar: args = {"name": self.good_tar, "course-data": [gtar]} resp = self.client.post(self.url, args) self.assertEquals(resp.status_code, 200) course = self.store.get_course(self.course.id) self.assertIsNotNone(course) display_name_after_import = course.display_name # Check that course display name have changed after import self.assertNotEqual(display_name_before_import, display_name_after_import) # Now check that non_staff user has his same role self.assertFalse(CourseInstructorRole(self.course.id).has_user(nonstaff_user)) self.assertTrue(CourseStaffRole(self.course.id).has_user(nonstaff_user)) # Now course staff user can also successfully import course self.client.login(username=nonstaff_user.username, password='foo') with open(self.good_tar) as gtar: args = {"name": self.good_tar, "course-data": [gtar]} resp = self.client.post(self.url, args) self.assertEquals(resp.status_code, 200) # Now check that non_staff user has his same role self.assertFalse(CourseInstructorRole(self.course.id).has_user(nonstaff_user)) self.assertTrue(CourseStaffRole(self.course.id).has_user(nonstaff_user)) ## Unsafe tar methods ##################################################### # Each of these methods creates a tarfile with a single type of unsafe # content. def _fifo_tar(self): """ Tar file with FIFO """ fifop = self.unsafe_common_dir / "fifo.file" fifo_tar = self.unsafe_common_dir / "fifo.tar.gz" os.mkfifo(fifop) with tarfile.open(fifo_tar, "w:gz") as tar: tar.add(fifop) return fifo_tar def _symlink_tar(self): """ Tarfile with symlink to path outside directory. """ outsidep = self.unsafe_common_dir / "unsafe_file.txt" symlinkp = self.unsafe_common_dir / "symlink.txt" symlink_tar = self.unsafe_common_dir / "symlink.tar.gz" outsidep.symlink(symlinkp) with tarfile.open(symlink_tar, "w:gz") as tar: tar.add(symlinkp) return symlink_tar def _outside_tar(self): """ Tarfile with file that extracts to outside directory. Extracting this tarfile in directory <dir> will put its contents directly in <dir> (rather than <dir/tarname>). """ outside_tar = self.unsafe_common_dir / "unsafe_file.tar.gz" with tarfile.open(outside_tar, "w:gz") as tar: tar.addfile(tarfile.TarInfo(str(self.content_dir / "a_file"))) return outside_tar def _outside_tar2(self): """ Tarfile with file that extracts to outside directory. The path here matches the basename (`self.unsafe_common_dir`), but then "cd's out". E.g. "/usr/../etc" == "/etc", but the naive basename of the first (but not the second) is "/usr" Extracting this tarfile in directory <dir> will also put its contents directly in <dir> (rather than <dir/tarname>). """ outside_tar = self.unsafe_common_dir / "unsafe_file.tar.gz" with tarfile.open(outside_tar, "w:gz") as tar: tar.addfile(tarfile.TarInfo(str(self.unsafe_common_dir / "../a_file"))) return outside_tar def test_unsafe_tar(self): """ Check that safety measure work. This includes: 'tarbombs' which include files or symlinks with paths outside or directly in the working directory, 'special files' (character device, block device or FIFOs), all raise exceptions/400s. """ def try_tar(tarpath): with open(tarpath) as tar: args = {"name": tarpath, "course-data": [tar]} resp = self.client.post(self.url, args) self.assertEquals(resp.status_code, 400) self.assertTrue("SuspiciousFileOperation" in resp.content) try_tar(self._fifo_tar()) try_tar(self._symlink_tar()) try_tar(self._outside_tar()) try_tar(self._outside_tar2()) # Check that `import_status` returns the appropriate stage (i.e., # either 3, indicating all previous steps are completed, or 0, # indicating no upload in progress) resp_status = self.client.get( reverse_course_url( 'import_status_handler', self.course.id, kwargs={'filename': os.path.split(self.good_tar)[1]} ) ) import_status = json.loads(resp_status.content)["ImportStatus"] self.assertIn(import_status, (0, 3)) @override_settings(CONTENTSTORE=TEST_DATA_CONTENTSTORE) class ExportTestCase(CourseTestCase): """ Tests for export_handler. """ def setUp(self): """ Sets up the test course. """ super(ExportTestCase, self).setUp() self.url = reverse_course_url('export_handler', self.course.id) def test_export_html(self): """ Get the HTML for the page. """ resp = self.client.get_html(self.url) self.assertEquals(resp.status_code, 200) self.assertContains(resp, "Export My Course Content") def test_export_json_unsupported(self): """ JSON is unsupported. """ resp = self.client.get(self.url, HTTP_ACCEPT='application/json') self.assertEquals(resp.status_code, 406) def test_export_targz(self): """ Get tar.gz file, using HTTP_ACCEPT. """ resp = self.client.get(self.url, HTTP_ACCEPT='application/x-tgz') self._verify_export_succeeded(resp) def test_export_targz_urlparam(self): """ Get tar.gz file, using URL parameter. """ resp = self.client.get(self.url + '?_accept=application/x-tgz') self._verify_export_succeeded(resp) def _verify_export_succeeded(self, resp): """ Export success helper method. """ self.assertEquals(resp.status_code, 200) self.assertTrue(resp.get('Content-Disposition').startswith('attachment')) def test_export_failure_top_level(self): """ Export failure. """ fake_xblock = ItemFactory.create(parent_location=self.course.location, category='aawefawef') self.store.publish(fake_xblock.location, self.user.id) self._verify_export_failure(u'/container/i4x://MITx/999/course/Robot_Super_Course') def test_export_failure_subsection_level(self): """ Slightly different export failure. """ vertical = ItemFactory.create(parent_location=self.course.location, category='vertical', display_name='foo') ItemFactory.create( parent_location=vertical.location, category='aawefawef' ) self._verify_export_failure(u'/container/i4x://MITx/999/vertical/foo') def _verify_export_failure(self, expectedText): """ Export failure helper method. """ resp = self.client.get(self.url, HTTP_ACCEPT='application/x-tgz') self.assertEquals(resp.status_code, 200) self.assertIsNone(resp.get('Content-Disposition')) self.assertContains(resp, 'Unable to create xml for module') self.assertContains(resp, expectedText)
agpl-3.0
subramani95/neutron
neutron/tests/unit/hyperv/test_hyperv_security_groups_driver.py
12
7796
# Copyright 2014 Cloudbase Solutions SRL # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # @author: Claudiu Belu, Cloudbase Solutions Srl """ Unit tests for the Hyper-V Security Groups Driver. """ import mock from oslo.config import cfg from neutron.plugins.hyperv.agent import security_groups_driver as sg_driver from neutron.plugins.hyperv.agent import utilsfactory from neutron.tests import base CONF = cfg.CONF class TestHyperVSecurityGroupsDriver(base.BaseTestCase): _FAKE_DEVICE = 'fake_device' _FAKE_ID = 'fake_id' _FAKE_DIRECTION = 'ingress' _FAKE_ETHERTYPE = 'IPv4' _FAKE_ETHERTYPE_IPV6 = 'IPv6' _FAKE_DEST_IP_PREFIX = 'fake_dest_ip_prefix' _FAKE_SOURCE_IP_PREFIX = 'fake_source_ip_prefix' _FAKE_PARAM_NAME = 'fake_param_name' _FAKE_PARAM_VALUE = 'fake_param_value' _FAKE_PORT_MIN = 9001 _FAKE_PORT_MAX = 9011 def setUp(self): super(TestHyperVSecurityGroupsDriver, self).setUp() self._mock_windows_version = mock.patch.object(utilsfactory, 'get_hypervutils') self._mock_windows_version.start() self._driver = sg_driver.HyperVSecurityGroupsDriver() self._driver._utils = mock.MagicMock() @mock.patch('neutron.plugins.hyperv.agent.security_groups_driver' '.HyperVSecurityGroupsDriver._create_port_rules') def test_prepare_port_filter(self, mock_create_rules): mock_port = self._get_port() mock_utils_method = self._driver._utils.create_default_reject_all_rules self._driver.prepare_port_filter(mock_port) self.assertEqual(mock_port, self._driver._security_ports[self._FAKE_DEVICE]) mock_utils_method.assert_called_once_with(self._FAKE_ID) self._driver._create_port_rules.assert_called_once_with( self._FAKE_ID, mock_port['security_group_rules']) def test_update_port_filter(self): mock_port = self._get_port() new_mock_port = self._get_port() new_mock_port['id'] += '2' new_mock_port['security_group_rules'][0]['ethertype'] += "2" self._driver._security_ports[mock_port['device']] = mock_port self._driver._create_port_rules = mock.MagicMock() self._driver._remove_port_rules = mock.MagicMock() self._driver.update_port_filter(new_mock_port) self._driver._remove_port_rules.assert_called_once_with( mock_port['id'], mock_port['security_group_rules']) self._driver._create_port_rules.assert_called_once_with( new_mock_port['id'], new_mock_port['security_group_rules']) self.assertEqual(new_mock_port, self._driver._security_ports[new_mock_port['device']]) @mock.patch('neutron.plugins.hyperv.agent.security_groups_driver' '.HyperVSecurityGroupsDriver.prepare_port_filter') def test_update_port_filter_new_port(self, mock_method): mock_port = self._get_port() self._driver.prepare_port_filter = mock.MagicMock() self._driver.update_port_filter(mock_port) self._driver.prepare_port_filter.assert_called_once_with(mock_port) def test_remove_port_filter(self): mock_port = self._get_port() self._driver._security_ports[mock_port['device']] = mock_port self._driver.remove_port_filter(mock_port) self.assertFalse(mock_port['device'] in self._driver._security_ports) def test_create_port_rules_exception(self): fake_rule = self._create_security_rule() self._driver._utils.create_security_rule.side_effect = Exception( 'Generated Exception for testing.') self._driver._create_port_rules(self._FAKE_ID, [fake_rule]) def test_create_param_map(self): fake_rule = self._create_security_rule() self._driver._get_rule_remote_address = mock.MagicMock( return_value=self._FAKE_SOURCE_IP_PREFIX) actual = self._driver._create_param_map(fake_rule) expected = { 'direction': self._driver._ACL_PROP_MAP[ 'direction'][self._FAKE_DIRECTION], 'acl_type': self._driver._ACL_PROP_MAP[ 'ethertype'][self._FAKE_ETHERTYPE], 'local_port': '%s-%s' % (self._FAKE_PORT_MIN, self._FAKE_PORT_MAX), 'protocol': self._driver._ACL_PROP_MAP['default'], 'remote_address': self._FAKE_SOURCE_IP_PREFIX } self.assertEqual(expected, actual) @mock.patch('neutron.plugins.hyperv.agent.security_groups_driver' '.HyperVSecurityGroupsDriver._create_param_map') def test_create_port_rules(self, mock_method): fake_rule = self._create_security_rule() mock_method.return_value = { self._FAKE_PARAM_NAME: self._FAKE_PARAM_VALUE} self._driver._create_port_rules(self._FAKE_ID, [fake_rule]) self._driver._utils.create_security_rule.assert_called_once_with( self._FAKE_ID, fake_param_name=self._FAKE_PARAM_VALUE) def test_convert_any_address_to_same_ingress(self): rule = self._create_security_rule() actual = self._driver._get_rule_remote_address(rule) self.assertEqual(self._FAKE_SOURCE_IP_PREFIX, actual) def test_convert_any_address_to_same_egress(self): rule = self._create_security_rule() rule['direction'] += '2' actual = self._driver._get_rule_remote_address(rule) self.assertEqual(self._FAKE_DEST_IP_PREFIX, actual) def test_convert_any_address_to_ipv4(self): rule = self._create_security_rule() del rule['source_ip_prefix'] actual = self._driver._get_rule_remote_address(rule) self.assertEqual(self._driver._ACL_PROP_MAP['address_default']['IPv4'], actual) def test_convert_any_address_to_ipv6(self): rule = self._create_security_rule() del rule['source_ip_prefix'] rule['ethertype'] = self._FAKE_ETHERTYPE_IPV6 actual = self._driver._get_rule_remote_address(rule) self.assertEqual(self._driver._ACL_PROP_MAP['address_default']['IPv6'], actual) def test_get_rule_protocol_icmp(self): self._test_get_rule_protocol( 'icmp', self._driver._ACL_PROP_MAP['protocol']['icmp']) def test_get_rule_protocol_no_icmp(self): self._test_get_rule_protocol('tcp', 'tcp') def _test_get_rule_protocol(self, protocol, expected): rule = self._create_security_rule() rule['protocol'] = protocol actual = self._driver._get_rule_protocol(rule) self.assertEqual(expected, actual) def _get_port(self): return { 'device': self._FAKE_DEVICE, 'id': self._FAKE_ID, 'security_group_rules': [self._create_security_rule()] } def _create_security_rule(self): return { 'direction': self._FAKE_DIRECTION, 'ethertype': self._FAKE_ETHERTYPE, 'dest_ip_prefix': self._FAKE_DEST_IP_PREFIX, 'source_ip_prefix': self._FAKE_SOURCE_IP_PREFIX, 'port_range_min': self._FAKE_PORT_MIN, 'port_range_max': self._FAKE_PORT_MAX }
apache-2.0
timsnyder/bokeh
bokeh/core/property/auto.py
2
2646
#----------------------------------------------------------------------------- # Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors. # All rights reserved. # # The full license is in the file LICENSE.txt, distributed with this software. #----------------------------------------------------------------------------- ''' Provide the Auto property. ''' #----------------------------------------------------------------------------- # Boilerplate #----------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function, unicode_literals import logging log = logging.getLogger(__name__) #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Standard library imports # External imports # Bokeh imports from .enum import Enum #----------------------------------------------------------------------------- # Globals and constants #----------------------------------------------------------------------------- __all__ = ( 'Auto', ) #----------------------------------------------------------------------------- # General API #----------------------------------------------------------------------------- class Auto(Enum): ''' Accepts only the string "auto". Useful for properties that can be configured to behave "automatically". Example: This property is often most useful in conjunction with the :class:`~bokeh.core.properties.Either` property. .. code-block:: python >>> class AutoModel(HasProps): ... prop = Either(Float, Auto) ... >>> m = AutoModel() >>> m.prop = 10.2 >>> m.prop = "auto" >>> m.prop = "foo" # ValueError !! >>> m.prop = [1, 2, 3] # ValueError !! ''' def __init__(self): super(Auto, self).__init__("auto") def __str__(self): return self.__class__.__name__ def _sphinx_type(self): return self._sphinx_prop_link() #----------------------------------------------------------------------------- # Dev API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Private API #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Code #-----------------------------------------------------------------------------
bsd-3-clause
google/containerregistry
client/v1/save_.py
2
3111
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """This package provides tools for saving docker images.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import io import json import tarfile from containerregistry.client import docker_name from containerregistry.client.v1 import docker_image import six def multi_image_tarball( tag_to_image, tar): """Produce a "docker save" compatible tarball from the DockerImages. Args: tag_to_image: A dictionary of tags to the images they label. tar: the open tarfile into which we are writing the image tarball. """ def add_file(filename, contents): info = tarfile.TarInfo(filename) info.size = len(contents) tar.addfile(tarinfo=info, fileobj=io.BytesIO(contents)) seen = set() repositories = {} # Each layer is encoded as a directory in the larger tarball of the form: # {layer_id}\ # layer.tar # VERSION # json for (tag, image) in six.iteritems(tag_to_image): # Add this image's repositories entry. repo = str(tag.as_repository()) tags = repositories.get(repo, {}) tags[tag.tag] = image.top() repositories[repo] = tags for layer_id in image.ancestry(image.top()): # Add each layer_id exactly once. if layer_id in seen or json.loads(image.json(layer_id)).get('throwaway'): continue seen.add(layer_id) # VERSION generally seems to contain 1.0, not entirely sure # what the point of this is. add_file(layer_id + '/VERSION', b'1.0') # Add the unzipped layer tarball content = image.uncompressed_layer(layer_id) add_file(layer_id + '/layer.tar', content) # Now the json metadata add_file(layer_id + '/json', image.json(layer_id).encode('utf8')) # Add the metadata tagging the top layer. add_file('repositories', json.dumps(repositories, sort_keys=True).encode('utf8')) def tarball(name, image, tar): """Produce a "docker save" compatible tarball from the DockerImage. Args: name: The tag name to write into the repositories file. image: a docker image to save. tar: the open tarfile into which we are writing the image tarball. """ def add_file(filename, contents): info = tarfile.TarInfo(filename) info.size = len(contents) tar.addfile(tarinfo=info, fileobj=io.BytesIO(contents)) multi_image_tarball({name: image}, tar) # Add our convenience file with the top layer's ID. add_file('top', image.top().encode('utf8'))
apache-2.0
ahkscript/sjBot
commands/#ahkscript,#ahk,#Sjc_Bot/ahk.py
1
1245
#!/usr/bin/env python3 import re import urllib.parse import json import difflib owner = False aliases = ['ahksearch', 'search', 'a'] def ahk(con, sjBot, commands, trigger, host, channel, *query): """Searches the AHK docs for something. If its not found it will then search the forum.""" with open('commands/docs.json') as dfile: links = json.loads(dfile.read()) matches = difflib.get_close_matches(' '.join(query).lower(), links, cutoff=0.5) if len(matches) > 0: return '\x02{}\x02 - http://ahkscript.org/docs/{}'.format(matches[0], links[matches[0]]) search = urllib.parse.quote(' '.join(query)) data = json.loads(sjBot['url_download']('https://www.googleapis.com/' 'customsearch/v1?key={}&cx=009062493091172133168:_o2f4moc9ce&q=' '{}'.format(sjBot['settings']['google_key'], search))) print( data['searchInformation']['totalResults'] == '0' ) if data['searchInformation']['totalResults'] == '0': return 'No information found.' if len(data['items']) > 0: item = data['items'][0] return '\x02{}\x02 - {}'.format(data['items'][0]['title'], data['items'][0]['formattedUrl']) return None
gpl-3.0
jetskijoe/SickGear
lib/sqlalchemy/sql/ddl.py
78
28858
# sql/ddl.py # Copyright (C) 2009-2014 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ Provides the hierarchy of DDL-defining schema items as well as routines to invoke them for a create/drop call. """ from .. import util from .elements import ClauseElement from .visitors import traverse from .base import Executable, _generative, SchemaVisitor, _bind_or_error from ..util import topological from .. import event from .. import exc class _DDLCompiles(ClauseElement): def _compiler(self, dialect, **kw): """Return a compiler appropriate for this ClauseElement, given a Dialect.""" return dialect.ddl_compiler(dialect, self, **kw) class DDLElement(Executable, _DDLCompiles): """Base class for DDL expression constructs. This class is the base for the general purpose :class:`.DDL` class, as well as the various create/drop clause constructs such as :class:`.CreateTable`, :class:`.DropTable`, :class:`.AddConstraint`, etc. :class:`.DDLElement` integrates closely with SQLAlchemy events, introduced in :ref:`event_toplevel`. An instance of one is itself an event receiving callable:: event.listen( users, 'after_create', AddConstraint(constraint).execute_if(dialect='postgresql') ) .. seealso:: :class:`.DDL` :class:`.DDLEvents` :ref:`event_toplevel` :ref:`schema_ddl_sequences` """ _execution_options = Executable.\ _execution_options.union({'autocommit': True}) target = None on = None dialect = None callable_ = None def _execute_on_connection(self, connection, multiparams, params): return connection._execute_ddl(self, multiparams, params) def execute(self, bind=None, target=None): """Execute this DDL immediately. Executes the DDL statement in isolation using the supplied :class:`.Connectable` or :class:`.Connectable` assigned to the ``.bind`` property, if not supplied. If the DDL has a conditional ``on`` criteria, it will be invoked with None as the event. :param bind: Optional, an ``Engine`` or ``Connection``. If not supplied, a valid :class:`.Connectable` must be present in the ``.bind`` property. :param target: Optional, defaults to None. The target SchemaItem for the execute call. Will be passed to the ``on`` callable if any, and may also provide string expansion data for the statement. See ``execute_at`` for more information. """ if bind is None: bind = _bind_or_error(self) if self._should_execute(target, bind): return bind.execute(self.against(target)) else: bind.engine.logger.info( "DDL execution skipped, criteria not met.") @util.deprecated("0.7", "See :class:`.DDLEvents`, as well as " ":meth:`.DDLElement.execute_if`.") def execute_at(self, event_name, target): """Link execution of this DDL to the DDL lifecycle of a SchemaItem. Links this ``DDLElement`` to a ``Table`` or ``MetaData`` instance, executing it when that schema item is created or dropped. The DDL statement will be executed using the same Connection and transactional context as the Table create/drop itself. The ``.bind`` property of this statement is ignored. :param event: One of the events defined in the schema item's ``.ddl_events``; e.g. 'before-create', 'after-create', 'before-drop' or 'after-drop' :param target: The Table or MetaData instance for which this DDLElement will be associated with. A DDLElement instance can be linked to any number of schema items. ``execute_at`` builds on the ``append_ddl_listener`` interface of :class:`.MetaData` and :class:`.Table` objects. Caveat: Creating or dropping a Table in isolation will also trigger any DDL set to ``execute_at`` that Table's MetaData. This may change in a future release. """ def call_event(target, connection, **kw): if self._should_execute_deprecated(event_name, target, connection, **kw): return connection.execute(self.against(target)) event.listen(target, "" + event_name.replace('-', '_'), call_event) @_generative def against(self, target): """Return a copy of this DDL against a specific schema item.""" self.target = target @_generative def execute_if(self, dialect=None, callable_=None, state=None): """Return a callable that will execute this DDLElement conditionally. Used to provide a wrapper for event listening:: event.listen( metadata, 'before_create', DDL("my_ddl").execute_if(dialect='postgresql') ) :param dialect: May be a string, tuple or a callable predicate. If a string, it will be compared to the name of the executing database dialect:: DDL('something').execute_if(dialect='postgresql') If a tuple, specifies multiple dialect names:: DDL('something').execute_if(dialect=('postgresql', 'mysql')) :param callable_: A callable, which will be invoked with four positional arguments as well as optional keyword arguments: :ddl: This DDL element. :target: The :class:`.Table` or :class:`.MetaData` object which is the target of this event. May be None if the DDL is executed explicitly. :bind: The :class:`.Connection` being used for DDL execution :tables: Optional keyword argument - a list of Table objects which are to be created/ dropped within a MetaData.create_all() or drop_all() method call. :state: Optional keyword argument - will be the ``state`` argument passed to this function. :checkfirst: Keyword argument, will be True if the 'checkfirst' flag was set during the call to ``create()``, ``create_all()``, ``drop()``, ``drop_all()``. If the callable returns a true value, the DDL statement will be executed. :param state: any value which will be passed to the callable\_ as the ``state`` keyword argument. .. seealso:: :class:`.DDLEvents` :ref:`event_toplevel` """ self.dialect = dialect self.callable_ = callable_ self.state = state def _should_execute(self, target, bind, **kw): if self.on is not None and \ not self._should_execute_deprecated(None, target, bind, **kw): return False if isinstance(self.dialect, util.string_types): if self.dialect != bind.engine.name: return False elif isinstance(self.dialect, (tuple, list, set)): if bind.engine.name not in self.dialect: return False if self.callable_ is not None and \ not self.callable_(self, target, bind, state=self.state, **kw): return False return True def _should_execute_deprecated(self, event, target, bind, **kw): if self.on is None: return True elif isinstance(self.on, util.string_types): return self.on == bind.engine.name elif isinstance(self.on, (tuple, list, set)): return bind.engine.name in self.on else: return self.on(self, event, target, bind, **kw) def __call__(self, target, bind, **kw): """Execute the DDL as a ddl_listener.""" if self._should_execute(target, bind, **kw): return bind.execute(self.against(target)) def _check_ddl_on(self, on): if (on is not None and (not isinstance(on, util.string_types + (tuple, list, set)) and not util.callable(on))): raise exc.ArgumentError( "Expected the name of a database dialect, a tuple " "of names, or a callable for " "'on' criteria, got type '%s'." % type(on).__name__) def bind(self): if self._bind: return self._bind def _set_bind(self, bind): self._bind = bind bind = property(bind, _set_bind) def _generate(self): s = self.__class__.__new__(self.__class__) s.__dict__ = self.__dict__.copy() return s class DDL(DDLElement): """A literal DDL statement. Specifies literal SQL DDL to be executed by the database. DDL objects function as DDL event listeners, and can be subscribed to those events listed in :class:`.DDLEvents`, using either :class:`.Table` or :class:`.MetaData` objects as targets. Basic templating support allows a single DDL instance to handle repetitive tasks for multiple tables. Examples:: from sqlalchemy import event, DDL tbl = Table('users', metadata, Column('uid', Integer)) event.listen(tbl, 'before_create', DDL('DROP TRIGGER users_trigger')) spow = DDL('ALTER TABLE %(table)s SET secretpowers TRUE') event.listen(tbl, 'after_create', spow.execute_if(dialect='somedb')) drop_spow = DDL('ALTER TABLE users SET secretpowers FALSE') connection.execute(drop_spow) When operating on Table events, the following ``statement`` string substitions are available:: %(table)s - the Table name, with any required quoting applied %(schema)s - the schema name, with any required quoting applied %(fullname)s - the Table name including schema, quoted if needed The DDL's "context", if any, will be combined with the standard substutions noted above. Keys present in the context will override the standard substitutions. """ __visit_name__ = "ddl" def __init__(self, statement, on=None, context=None, bind=None): """Create a DDL statement. :param statement: A string or unicode string to be executed. Statements will be processed with Python's string formatting operator. See the ``context`` argument and the ``execute_at`` method. A literal '%' in a statement must be escaped as '%%'. SQL bind parameters are not available in DDL statements. :param on: .. deprecated:: 0.7 See :meth:`.DDLElement.execute_if`. Optional filtering criteria. May be a string, tuple or a callable predicate. If a string, it will be compared to the name of the executing database dialect:: DDL('something', on='postgresql') If a tuple, specifies multiple dialect names:: DDL('something', on=('postgresql', 'mysql')) If a callable, it will be invoked with four positional arguments as well as optional keyword arguments: :ddl: This DDL element. :event: The name of the event that has triggered this DDL, such as 'after-create' Will be None if the DDL is executed explicitly. :target: The ``Table`` or ``MetaData`` object which is the target of this event. May be None if the DDL is executed explicitly. :connection: The ``Connection`` being used for DDL execution :tables: Optional keyword argument - a list of Table objects which are to be created/ dropped within a MetaData.create_all() or drop_all() method call. If the callable returns a true value, the DDL statement will be executed. :param context: Optional dictionary, defaults to None. These values will be available for use in string substitutions on the DDL statement. :param bind: Optional. A :class:`.Connectable`, used by default when ``execute()`` is invoked without a bind argument. .. seealso:: :class:`.DDLEvents` :mod:`sqlalchemy.event` """ if not isinstance(statement, util.string_types): raise exc.ArgumentError( "Expected a string or unicode SQL statement, got '%r'" % statement) self.statement = statement self.context = context or {} self._check_ddl_on(on) self.on = on self._bind = bind def __repr__(self): return '<%s@%s; %s>' % ( type(self).__name__, id(self), ', '.join([repr(self.statement)] + ['%s=%r' % (key, getattr(self, key)) for key in ('on', 'context') if getattr(self, key)])) class _CreateDropBase(DDLElement): """Base class for DDL constucts that represent CREATE and DROP or equivalents. The common theme of _CreateDropBase is a single ``element`` attribute which refers to the element to be created or dropped. """ def __init__(self, element, on=None, bind=None): self.element = element self._check_ddl_on(on) self.on = on self.bind = bind def _create_rule_disable(self, compiler): """Allow disable of _create_rule using a callable. Pass to _create_rule using util.portable_instancemethod(self._create_rule_disable) to retain serializability. """ return False class CreateSchema(_CreateDropBase): """Represent a CREATE SCHEMA statement. .. versionadded:: 0.7.4 The argument here is the string name of the schema. """ __visit_name__ = "create_schema" def __init__(self, name, quote=None, **kw): """Create a new :class:`.CreateSchema` construct.""" self.quote = quote super(CreateSchema, self).__init__(name, **kw) class DropSchema(_CreateDropBase): """Represent a DROP SCHEMA statement. The argument here is the string name of the schema. .. versionadded:: 0.7.4 """ __visit_name__ = "drop_schema" def __init__(self, name, quote=None, cascade=False, **kw): """Create a new :class:`.DropSchema` construct.""" self.quote = quote self.cascade = cascade super(DropSchema, self).__init__(name, **kw) class CreateTable(_CreateDropBase): """Represent a CREATE TABLE statement.""" __visit_name__ = "create_table" def __init__(self, element, on=None, bind=None): """Create a :class:`.CreateTable` construct. :param element: a :class:`.Table` that's the subject of the CREATE :param on: See the description for 'on' in :class:`.DDL`. :param bind: See the description for 'bind' in :class:`.DDL`. """ super(CreateTable, self).__init__(element, on=on, bind=bind) self.columns = [CreateColumn(column) for column in element.columns ] class _DropView(_CreateDropBase): """Semi-public 'DROP VIEW' construct. Used by the test suite for dialect-agnostic drops of views. This object will eventually be part of a public "view" API. """ __visit_name__ = "drop_view" class CreateColumn(_DDLCompiles): """Represent a :class:`.Column` as rendered in a CREATE TABLE statement, via the :class:`.CreateTable` construct. This is provided to support custom column DDL within the generation of CREATE TABLE statements, by using the compiler extension documented in :ref:`sqlalchemy.ext.compiler_toplevel` to extend :class:`.CreateColumn`. Typical integration is to examine the incoming :class:`.Column` object, and to redirect compilation if a particular flag or condition is found:: from sqlalchemy import schema from sqlalchemy.ext.compiler import compiles @compiles(schema.CreateColumn) def compile(element, compiler, **kw): column = element.element if "special" not in column.info: return compiler.visit_create_column(element, **kw) text = "%s SPECIAL DIRECTIVE %s" % ( column.name, compiler.type_compiler.process(column.type) ) default = compiler.get_column_default_string(column) if default is not None: text += " DEFAULT " + default if not column.nullable: text += " NOT NULL" if column.constraints: text += " ".join( compiler.process(const) for const in column.constraints) return text The above construct can be applied to a :class:`.Table` as follows:: from sqlalchemy import Table, Metadata, Column, Integer, String from sqlalchemy import schema metadata = MetaData() table = Table('mytable', MetaData(), Column('x', Integer, info={"special":True}, primary_key=True), Column('y', String(50)), Column('z', String(20), info={"special":True}) ) metadata.create_all(conn) Above, the directives we've added to the :attr:`.Column.info` collection will be detected by our custom compilation scheme:: CREATE TABLE mytable ( x SPECIAL DIRECTIVE INTEGER NOT NULL, y VARCHAR(50), z SPECIAL DIRECTIVE VARCHAR(20), PRIMARY KEY (x) ) The :class:`.CreateColumn` construct can also be used to skip certain columns when producing a ``CREATE TABLE``. This is accomplished by creating a compilation rule that conditionally returns ``None``. This is essentially how to produce the same effect as using the ``system=True`` argument on :class:`.Column`, which marks a column as an implicitly-present "system" column. For example, suppose we wish to produce a :class:`.Table` which skips rendering of the Postgresql ``xmin`` column against the Postgresql backend, but on other backends does render it, in anticipation of a triggered rule. A conditional compilation rule could skip this name only on Postgresql:: from sqlalchemy.schema import CreateColumn @compiles(CreateColumn, "postgresql") def skip_xmin(element, compiler, **kw): if element.element.name == 'xmin': return None else: return compiler.visit_create_column(element, **kw) my_table = Table('mytable', metadata, Column('id', Integer, primary_key=True), Column('xmin', Integer) ) Above, a :class:`.CreateTable` construct will generate a ``CREATE TABLE`` which only includes the ``id`` column in the string; the ``xmin`` column will be omitted, but only against the Postgresql backend. .. versionadded:: 0.8.3 The :class:`.CreateColumn` construct supports skipping of columns by returning ``None`` from a custom compilation rule. .. versionadded:: 0.8 The :class:`.CreateColumn` construct was added to support custom column creation styles. """ __visit_name__ = 'create_column' def __init__(self, element): self.element = element class DropTable(_CreateDropBase): """Represent a DROP TABLE statement.""" __visit_name__ = "drop_table" class CreateSequence(_CreateDropBase): """Represent a CREATE SEQUENCE statement.""" __visit_name__ = "create_sequence" class DropSequence(_CreateDropBase): """Represent a DROP SEQUENCE statement.""" __visit_name__ = "drop_sequence" class CreateIndex(_CreateDropBase): """Represent a CREATE INDEX statement.""" __visit_name__ = "create_index" class DropIndex(_CreateDropBase): """Represent a DROP INDEX statement.""" __visit_name__ = "drop_index" class AddConstraint(_CreateDropBase): """Represent an ALTER TABLE ADD CONSTRAINT statement.""" __visit_name__ = "add_constraint" def __init__(self, element, *args, **kw): super(AddConstraint, self).__init__(element, *args, **kw) element._create_rule = util.portable_instancemethod( self._create_rule_disable) class DropConstraint(_CreateDropBase): """Represent an ALTER TABLE DROP CONSTRAINT statement.""" __visit_name__ = "drop_constraint" def __init__(self, element, cascade=False, **kw): self.cascade = cascade super(DropConstraint, self).__init__(element, **kw) element._create_rule = util.portable_instancemethod( self._create_rule_disable) class DDLBase(SchemaVisitor): def __init__(self, connection): self.connection = connection class SchemaGenerator(DDLBase): def __init__(self, dialect, connection, checkfirst=False, tables=None, **kwargs): super(SchemaGenerator, self).__init__(connection, **kwargs) self.checkfirst = checkfirst self.tables = tables self.preparer = dialect.identifier_preparer self.dialect = dialect self.memo = {} def _can_create_table(self, table): self.dialect.validate_identifier(table.name) if table.schema: self.dialect.validate_identifier(table.schema) return not self.checkfirst or \ not self.dialect.has_table(self.connection, table.name, schema=table.schema) def _can_create_sequence(self, sequence): return self.dialect.supports_sequences and \ ( (not self.dialect.sequences_optional or not sequence.optional) and ( not self.checkfirst or not self.dialect.has_sequence( self.connection, sequence.name, schema=sequence.schema) ) ) def visit_metadata(self, metadata): if self.tables is not None: tables = self.tables else: tables = list(metadata.tables.values()) collection = [t for t in sort_tables(tables) if self._can_create_table(t)] seq_coll = [s for s in metadata._sequences.values() if s.column is None and self._can_create_sequence(s)] metadata.dispatch.before_create(metadata, self.connection, tables=collection, checkfirst=self.checkfirst, _ddl_runner=self) for seq in seq_coll: self.traverse_single(seq, create_ok=True) for table in collection: self.traverse_single(table, create_ok=True) metadata.dispatch.after_create(metadata, self.connection, tables=collection, checkfirst=self.checkfirst, _ddl_runner=self) def visit_table(self, table, create_ok=False): if not create_ok and not self._can_create_table(table): return table.dispatch.before_create(table, self.connection, checkfirst=self.checkfirst, _ddl_runner=self) for column in table.columns: if column.default is not None: self.traverse_single(column.default) self.connection.execute(CreateTable(table)) if hasattr(table, 'indexes'): for index in table.indexes: self.traverse_single(index) table.dispatch.after_create(table, self.connection, checkfirst=self.checkfirst, _ddl_runner=self) def visit_sequence(self, sequence, create_ok=False): if not create_ok and not self._can_create_sequence(sequence): return self.connection.execute(CreateSequence(sequence)) def visit_index(self, index): self.connection.execute(CreateIndex(index)) class SchemaDropper(DDLBase): def __init__(self, dialect, connection, checkfirst=False, tables=None, **kwargs): super(SchemaDropper, self).__init__(connection, **kwargs) self.checkfirst = checkfirst self.tables = tables self.preparer = dialect.identifier_preparer self.dialect = dialect self.memo = {} def visit_metadata(self, metadata): if self.tables is not None: tables = self.tables else: tables = list(metadata.tables.values()) collection = [ t for t in reversed(sort_tables(tables)) if self._can_drop_table(t) ] seq_coll = [ s for s in metadata._sequences.values() if s.column is None and self._can_drop_sequence(s) ] metadata.dispatch.before_drop( metadata, self.connection, tables=collection, checkfirst=self.checkfirst, _ddl_runner=self) for table in collection: self.traverse_single(table, drop_ok=True) for seq in seq_coll: self.traverse_single(seq, drop_ok=True) metadata.dispatch.after_drop( metadata, self.connection, tables=collection, checkfirst=self.checkfirst, _ddl_runner=self) def _can_drop_table(self, table): self.dialect.validate_identifier(table.name) if table.schema: self.dialect.validate_identifier(table.schema) return not self.checkfirst or self.dialect.has_table(self.connection, table.name, schema=table.schema) def _can_drop_sequence(self, sequence): return self.dialect.supports_sequences and \ ((not self.dialect.sequences_optional or not sequence.optional) and (not self.checkfirst or self.dialect.has_sequence( self.connection, sequence.name, schema=sequence.schema)) ) def visit_index(self, index): self.connection.execute(DropIndex(index)) def visit_table(self, table, drop_ok=False): if not drop_ok and not self._can_drop_table(table): return table.dispatch.before_drop(table, self.connection, checkfirst=self.checkfirst, _ddl_runner=self) for column in table.columns: if column.default is not None: self.traverse_single(column.default) self.connection.execute(DropTable(table)) table.dispatch.after_drop(table, self.connection, checkfirst=self.checkfirst, _ddl_runner=self) def visit_sequence(self, sequence, drop_ok=False): if not drop_ok and not self._can_drop_sequence(sequence): return self.connection.execute(DropSequence(sequence)) def sort_tables(tables, skip_fn=None, extra_dependencies=None): """sort a collection of Table objects in order of their foreign-key dependency.""" tables = list(tables) tuples = [] if extra_dependencies is not None: tuples.extend(extra_dependencies) def visit_foreign_key(fkey): if fkey.use_alter: return elif skip_fn and skip_fn(fkey): return parent_table = fkey.column.table if parent_table in tables: child_table = fkey.parent.table if parent_table is not child_table: tuples.append((parent_table, child_table)) for table in tables: traverse(table, {'schema_visitor': True}, {'foreign_key': visit_foreign_key}) tuples.extend( [parent, table] for parent in table._extra_dependencies ) return list(topological.sort(tuples, tables))
gpl-3.0
bjoshua/ansible-modules-extras
packaging/language/npm.py
73
8566
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2013, Chris Hoffman <christopher.hoffman@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: npm short_description: Manage node.js packages with npm description: - Manage node.js packages with Node Package Manager (npm) version_added: 1.2 author: "Chris Hoffman (@chrishoffman)" options: name: description: - The name of a node.js library to install required: false path: description: - The base path where to install the node.js libraries required: false version: description: - The version to be installed required: false global: description: - Install the node.js library globally required: false default: no choices: [ "yes", "no" ] executable: description: - The executable location for npm. - This is useful if you are using a version manager, such as nvm required: false ignore_scripts: description: - Use the --ignore-scripts flag when installing. required: false choices: [ "yes", "no" ] default: no version_added: "1.8" production: description: - Install dependencies in production mode, excluding devDependencies required: false choices: [ "yes", "no" ] default: no registry: description: - The registry to install modules from. required: false version_added: "1.6" state: description: - The state of the node.js library required: false default: present choices: [ "present", "absent", "latest" ] ''' EXAMPLES = ''' description: Install "coffee-script" node.js package. - npm: name=coffee-script path=/app/location description: Install "coffee-script" node.js package on version 1.6.1. - npm: name=coffee-script version=1.6.1 path=/app/location description: Install "coffee-script" node.js package globally. - npm: name=coffee-script global=yes description: Remove the globally package "coffee-script". - npm: name=coffee-script global=yes state=absent description: Install "coffee-script" node.js package from custom registry. - npm: name=coffee-script registry=http://registry.mysite.com description: Install packages based on package.json. - npm: path=/app/location description: Update packages based on package.json to their latest version. - npm: path=/app/location state=latest description: Install packages based on package.json using the npm installed with nvm v0.10.1. - npm: path=/app/location executable=/opt/nvm/v0.10.1/bin/npm state=present ''' import os try: import json except ImportError: import simplejson as json class Npm(object): def __init__(self, module, **kwargs): self.module = module self.glbl = kwargs['glbl'] self.name = kwargs['name'] self.version = kwargs['version'] self.path = kwargs['path'] self.registry = kwargs['registry'] self.production = kwargs['production'] self.ignore_scripts = kwargs['ignore_scripts'] if kwargs['executable']: self.executable = kwargs['executable'].split(' ') else: self.executable = [module.get_bin_path('npm', True)] if kwargs['version']: self.name_version = self.name + '@' + self.version else: self.name_version = self.name def _exec(self, args, run_in_check_mode=False, check_rc=True): if not self.module.check_mode or (self.module.check_mode and run_in_check_mode): cmd = self.executable + args if self.glbl: cmd.append('--global') if self.production: cmd.append('--production') if self.ignore_scripts: cmd.append('--ignore-scripts') if self.name: cmd.append(self.name_version) if self.registry: cmd.append('--registry') cmd.append(self.registry) #If path is specified, cd into that path and run the command. cwd = None if self.path: self.path = os.path.abspath(os.path.expanduser(self.path)) if not os.path.exists(self.path): os.makedirs(self.path) if not os.path.isdir(self.path): self.module.fail_json(msg="path %s is not a directory" % self.path) cwd = self.path rc, out, err = self.module.run_command(cmd, check_rc=check_rc, cwd=cwd) return out return '' def list(self): cmd = ['list', '--json'] installed = list() missing = list() data = json.loads(self._exec(cmd, True, False)) if 'dependencies' in data: for dep in data['dependencies']: if 'missing' in data['dependencies'][dep] and data['dependencies'][dep]['missing']: missing.append(dep) elif 'invalid' in data['dependencies'][dep] and data['dependencies'][dep]['invalid']: missing.append(dep) else: installed.append(dep) if self.name and self.name not in installed: missing.append(self.name) #Named dependency not installed else: missing.append(self.name) return installed, missing def install(self): return self._exec(['install']) def update(self): return self._exec(['update']) def uninstall(self): return self._exec(['uninstall']) def list_outdated(self): outdated = list() data = self._exec(['outdated'], True, False) for dep in data.splitlines(): if dep: # node.js v0.10.22 changed the `npm outdated` module separator # from "@" to " ". Split on both for backwards compatibility. pkg, other = re.split('\s|@', dep, 1) outdated.append(pkg) return outdated def main(): arg_spec = dict( name=dict(default=None), path=dict(default=None), version=dict(default=None), production=dict(default='no', type='bool'), executable=dict(default=None), registry=dict(default=None), state=dict(default='present', choices=['present', 'absent', 'latest']), ignore_scripts=dict(default=False, type='bool'), ) arg_spec['global'] = dict(default='no', type='bool') module = AnsibleModule( argument_spec=arg_spec, supports_check_mode=True ) name = module.params['name'] path = module.params['path'] version = module.params['version'] glbl = module.params['global'] production = module.params['production'] executable = module.params['executable'] registry = module.params['registry'] state = module.params['state'] ignore_scripts = module.params['ignore_scripts'] if not path and not glbl: module.fail_json(msg='path must be specified when not using global') if state == 'absent' and not name: module.fail_json(msg='uninstalling a package is only available for named packages') npm = Npm(module, name=name, path=path, version=version, glbl=glbl, production=production, \ executable=executable, registry=registry, ignore_scripts=ignore_scripts) changed = False if state == 'present': installed, missing = npm.list() if len(missing): changed = True npm.install() elif state == 'latest': installed, missing = npm.list() outdated = npm.list_outdated() if len(missing) or len(outdated): changed = True npm.install() else: #absent installed, missing = npm.list() if name in installed: changed = True npm.uninstall() module.exit_json(changed=changed) # import module snippets from ansible.module_utils.basic import * main()
gpl-3.0
nycbjr/kernel_sony_sgp3xx
tools/perf/scripts/python/net_dropmonitor.py
4235
1554
# Monitor the system for dropped packets and proudce a report of drop locations and counts import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * drop_log = {} kallsyms = [] def get_kallsyms_table(): global kallsyms try: f = open("/proc/kallsyms", "r") linecount = 0 for line in f: linecount = linecount+1 f.seek(0) except: return j = 0 for line in f: loc = int(line.split()[0], 16) name = line.split()[2] j = j +1 if ((j % 100) == 0): print "\r" + str(j) + "/" + str(linecount), kallsyms.append({ 'loc': loc, 'name' : name}) print "\r" + str(j) + "/" + str(linecount) kallsyms.sort() return def get_sym(sloc): loc = int(sloc) for i in kallsyms: if (i['loc'] >= loc): return (i['name'], i['loc']-loc) return (None, 0) def print_drop_table(): print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT") for i in drop_log.keys(): (sym, off) = get_sym(i) if sym == None: sym = i print "%25s %25s %25s" % (sym, off, drop_log[i]) def trace_begin(): print "Starting trace (Ctrl-C to dump results)" def trace_end(): print "Gathering kallsyms data" get_kallsyms_table() print_drop_table() # called from perf, when it finds a correspoinding event def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, protocol, location): slocation = str(location) try: drop_log[slocation] = drop_log[slocation] + 1 except: drop_log[slocation] = 1
gpl-2.0
elba7r/system
erpnext/stock/doctype/delivery_note/delivery_note.py
8
15561
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.utils import flt, cint from frappe import msgprint, _ import frappe.defaults from frappe.model.mapper import get_mapped_doc from erpnext.controllers.selling_controller import SellingController from frappe.desk.notifications import clear_doctype_notifications form_grid_templates = { "items": "templates/form_grid/item_grid.html" } class DeliveryNote(SellingController): def __init__(self, arg1, arg2=None): super(DeliveryNote, self).__init__(arg1, arg2) self.status_updater = [{ 'source_dt': 'Delivery Note Item', 'target_dt': 'Sales Order Item', 'join_field': 'so_detail', 'target_field': 'delivered_qty', 'target_parent_dt': 'Sales Order', 'target_parent_field': 'per_delivered', 'target_ref_field': 'qty', 'source_field': 'qty', 'percent_join_field': 'against_sales_order', 'status_field': 'delivery_status', 'keyword': 'Delivered', 'second_source_dt': 'Sales Invoice Item', 'second_source_field': 'qty', 'second_join_field': 'so_detail', 'overflow_type': 'delivery', 'second_source_extra_cond': """ and exists(select name from `tabSales Invoice` where name=`tabSales Invoice Item`.parent and update_stock = 1)""" }, { 'source_dt': 'Delivery Note Item', 'target_dt': 'Sales Invoice Item', 'join_field': 'si_detail', 'target_field': 'delivered_qty', 'target_parent_dt': 'Sales Invoice', 'target_ref_field': 'qty', 'source_field': 'qty', 'percent_join_field': 'against_sales_invoice', 'overflow_type': 'delivery', 'no_tolerance': 1 }, { 'source_dt': 'Delivery Note Item', 'target_dt': 'Sales Order Item', 'join_field': 'so_detail', 'target_field': 'returned_qty', 'target_parent_dt': 'Sales Order', 'source_field': '-1 * qty', 'extra_cond': """ and exists (select name from `tabDelivery Note` where name=`tabDelivery Note Item`.parent and is_return=1)""" }] def before_print(self): def toggle_print_hide(meta, fieldname): df = meta.get_field(fieldname) if self.get("print_without_amount"): df.set("__print_hide", 1) else: df.delete_key("__print_hide") item_meta = frappe.get_meta("Delivery Note Item") print_hide_fields = { "parent": ["grand_total", "rounded_total", "in_words", "currency", "total", "taxes"], "items": ["rate", "amount", "price_list_rate", "discount_percentage"] } for key, fieldname in print_hide_fields.items(): for f in fieldname: toggle_print_hide(self.meta if key == "parent" else item_meta, f) def set_actual_qty(self): for d in self.get('items'): if d.item_code and d.warehouse: actual_qty = frappe.db.sql("""select actual_qty from `tabBin` where item_code = %s and warehouse = %s""", (d.item_code, d.warehouse)) d.actual_qty = actual_qty and flt(actual_qty[0][0]) or 0 def so_required(self): """check in manage account if sales order required or not""" if frappe.db.get_value("Selling Settings", None, 'so_required') == 'Yes': for d in self.get('items'): if not d.against_sales_order: frappe.throw(_("Sales Order required for Item {0}").format(d.item_code)) def validate(self): super(DeliveryNote, self).validate() self.set_status() self.so_required() self.validate_proj_cust() self.check_close_sales_order("against_sales_order") self.validate_for_items() self.validate_warehouse() self.validate_uom_is_integer("stock_uom", "qty") self.validate_with_previous_doc() from erpnext.stock.doctype.packed_item.packed_item import make_packing_list make_packing_list(self) self.update_current_stock() if not self.installation_status: self.installation_status = 'Not Installed' def validate_with_previous_doc(self): for fn in (("Sales Order", "against_sales_order", "so_detail"), ("Sales Invoice", "against_sales_invoice", "si_detail")): if filter(None, [getattr(d, fn[1], None) for d in self.get("items")]): super(DeliveryNote, self).validate_with_previous_doc({ fn[0]: { "ref_dn_field": fn[1], "compare_fields": [["customer", "="], ["company", "="], ["project", "="], ["currency", "="]], }, }) if cint(frappe.db.get_single_value('Selling Settings', 'maintain_same_sales_rate')) and not self.is_return: self.validate_rate_with_reference_doc([["Sales Order", "against_sales_order", "so_detail"], ["Sales Invoice", "against_sales_invoice", "si_detail"]]) def validate_proj_cust(self): """check for does customer belong to same project as entered..""" if self.project and self.customer: res = frappe.db.sql("""select name from `tabProject` where name = %s and (customer = %s or ifnull(customer,'')='')""", (self.project, self.customer)) if not res: frappe.throw(_("Customer {0} does not belong to project {1}").format(self.customer, self.project)) def validate_for_items(self): check_list, chk_dupl_itm = [], [] if cint(frappe.db.get_single_value("Selling Settings", "allow_multiple_items")): return for d in self.get('items'): e = [d.item_code, d.description, d.warehouse, d.against_sales_order or d.against_sales_invoice, d.batch_no or ''] f = [d.item_code, d.description, d.against_sales_order or d.against_sales_invoice] if frappe.db.get_value("Item", d.item_code, "is_stock_item") == 1: if e in check_list: msgprint(_("Note: Item {0} entered multiple times").format(d.item_code)) else: check_list.append(e) else: if f in chk_dupl_itm: msgprint(_("Note: Item {0} entered multiple times").format(d.item_code)) else: chk_dupl_itm.append(f) def validate_warehouse(self): super(DeliveryNote, self).validate_warehouse() for d in self.get_item_list(): if frappe.db.get_value("Item", d['item_code'], "is_stock_item") == 1: if not d['warehouse']: frappe.throw(_("Warehouse required for stock Item {0}").format(d["item_code"])) def update_current_stock(self): if self.get("_action") and self._action != "update_after_submit": for d in self.get('items'): d.actual_qty = frappe.db.get_value("Bin", {"item_code": d.item_code, "warehouse": d.warehouse}, "actual_qty") for d in self.get('packed_items'): bin_qty = frappe.db.get_value("Bin", {"item_code": d.item_code, "warehouse": d.warehouse}, ["actual_qty", "projected_qty"], as_dict=True) if bin_qty: d.actual_qty = flt(bin_qty.actual_qty) d.projected_qty = flt(bin_qty.projected_qty) def on_submit(self): self.validate_packed_qty() # Check for Approving Authority frappe.get_doc('Authorization Control').validate_approving_authority(self.doctype, self.company, self.base_grand_total, self) # update delivered qty in sales order self.update_prevdoc_status() self.update_billing_status() if not self.is_return: self.check_credit_limit() # Updating stock ledger should always be called after updating prevdoc status, # because updating reserved qty in bin depends upon updated delivered qty in SO self.update_stock_ledger() self.make_gl_entries() def on_cancel(self): self.check_close_sales_order("against_sales_order") self.check_next_docstatus() self.update_prevdoc_status() self.update_billing_status() # Updating stock ledger should always be called after updating prevdoc status, # because updating reserved qty in bin depends upon updated delivered qty in SO self.update_stock_ledger() self.cancel_packing_slips() self.make_gl_entries_on_cancel() def check_credit_limit(self): from erpnext.selling.doctype.customer.customer import check_credit_limit validate_against_credit_limit = False for d in self.get("items"): if not (d.against_sales_order or d.against_sales_invoice): validate_against_credit_limit = True break if validate_against_credit_limit: check_credit_limit(self.customer, self.company) def validate_packed_qty(self): """ Validate that if packed qty exists, it should be equal to qty """ if not any([flt(d.get('packed_qty')) for d in self.get("items")]): return has_error = False for d in self.get("items"): if flt(d.get('qty')) != flt(d.get('packed_qty')): frappe.msgprint(_("Packed quantity must equal quantity for Item {0} in row {1}").format(d.item_code, d.idx)) has_error = True if has_error: raise frappe.ValidationError def check_next_docstatus(self): submit_rv = frappe.db.sql("""select t1.name from `tabSales Invoice` t1,`tabSales Invoice Item` t2 where t1.name = t2.parent and t2.delivery_note = %s and t1.docstatus = 1""", (self.name)) if submit_rv: frappe.throw(_("Sales Invoice {0} has already been submitted").format(submit_rv[0][0])) submit_in = frappe.db.sql("""select t1.name from `tabInstallation Note` t1, `tabInstallation Note Item` t2 where t1.name = t2.parent and t2.prevdoc_docname = %s and t1.docstatus = 1""", (self.name)) if submit_in: frappe.throw(_("Installation Note {0} has already been submitted").format(submit_in[0][0])) def cancel_packing_slips(self): """ Cancel submitted packing slips related to this delivery note """ res = frappe.db.sql("""SELECT name FROM `tabPacking Slip` WHERE delivery_note = %s AND docstatus = 1""", self.name) if res: for r in res: ps = frappe.get_doc('Packing Slip', r[0]) ps.cancel() frappe.msgprint(_("Packing Slip(s) cancelled")) def update_status(self, status): self.set_status(update=True, status=status) self.notify_update() clear_doctype_notifications(self) def update_billing_status(self, update_modified=True): updated_delivery_notes = [self.name] for d in self.get("items"): if d.si_detail and not d.so_detail: d.db_set('billed_amt', d.amount, update_modified=update_modified) elif d.so_detail: updated_delivery_notes += update_billed_amount_based_on_so(d.so_detail, update_modified) for dn in set(updated_delivery_notes): dn_doc = self if (dn == self.name) else frappe.get_doc("Delivery Note", dn) dn_doc.update_billing_percentage(update_modified=update_modified) self.load_from_db() def update_billed_amount_based_on_so(so_detail, update_modified=True): # Billed against Sales Order directly billed_against_so = frappe.db.sql("""select sum(amount) from `tabSales Invoice Item` where so_detail=%s and (dn_detail is null or dn_detail = '') and docstatus=1""", so_detail) billed_against_so = billed_against_so and billed_against_so[0][0] or 0 # Get all Delivery Note Item rows against the Sales Order Item row dn_details = frappe.db.sql("""select dn_item.name, dn_item.amount, dn_item.si_detail, dn_item.parent from `tabDelivery Note Item` dn_item, `tabDelivery Note` dn where dn.name=dn_item.parent and dn_item.so_detail=%s and dn.docstatus=1 and dn.is_return = 0 order by dn.posting_date asc, dn.posting_time asc, dn.name asc""", so_detail, as_dict=1) updated_dn = [] for dnd in dn_details: billed_amt_agianst_dn = 0 # If delivered against Sales Invoice if dnd.si_detail: billed_amt_agianst_dn = flt(dnd.amount) billed_against_so -= billed_amt_agianst_dn else: # Get billed amount directly against Delivery Note billed_amt_agianst_dn = frappe.db.sql("""select sum(amount) from `tabSales Invoice Item` where dn_detail=%s and docstatus=1""", dnd.name) billed_amt_agianst_dn = billed_amt_agianst_dn and billed_amt_agianst_dn[0][0] or 0 # Distribute billed amount directly against SO between DNs based on FIFO if billed_against_so and billed_amt_agianst_dn < dnd.amount: pending_to_bill = flt(dnd.amount) - billed_amt_agianst_dn if pending_to_bill <= billed_against_so: billed_amt_agianst_dn += pending_to_bill billed_against_so -= pending_to_bill else: billed_amt_agianst_dn += billed_against_so billed_against_so = 0 frappe.db.set_value("Delivery Note Item", dnd.name, "billed_amt", billed_amt_agianst_dn, update_modified=update_modified) updated_dn.append(dnd.parent) return updated_dn def get_list_context(context=None): from erpnext.controllers.website_list_for_contact import get_list_context list_context = get_list_context(context) list_context.update({ 'show_sidebar': True, 'show_search': True, 'no_breadcrumbs': True, 'title': _('Shipments'), }) return list_context def get_invoiced_qty_map(delivery_note): """returns a map: {dn_detail: invoiced_qty}""" invoiced_qty_map = {} for dn_detail, qty in frappe.db.sql("""select dn_detail, qty from `tabSales Invoice Item` where delivery_note=%s and docstatus=1""", delivery_note): if not invoiced_qty_map.get(dn_detail): invoiced_qty_map[dn_detail] = 0 invoiced_qty_map[dn_detail] += qty return invoiced_qty_map @frappe.whitelist() def make_sales_invoice(source_name, target_doc=None): invoiced_qty_map = get_invoiced_qty_map(source_name) def update_accounts(source, target): target.is_pos = 0 target.ignore_pricing_rule = 1 target.run_method("set_missing_values") if len(target.get("items")) == 0: frappe.throw(_("All these items have already been invoiced")) target.run_method("calculate_taxes_and_totals") def update_item(source_doc, target_doc, source_parent): target_doc.qty = source_doc.qty - invoiced_qty_map.get(source_doc.name, 0) doc = get_mapped_doc("Delivery Note", source_name, { "Delivery Note": { "doctype": "Sales Invoice", "validation": { "docstatus": ["=", 1] } }, "Delivery Note Item": { "doctype": "Sales Invoice Item", "field_map": { "name": "dn_detail", "parent": "delivery_note", "so_detail": "so_detail", "against_sales_order": "sales_order", "serial_no": "serial_no" }, "postprocess": update_item, "filter": lambda d: abs(d.qty) - abs(invoiced_qty_map.get(d.name, 0))<=0 }, "Sales Taxes and Charges": { "doctype": "Sales Taxes and Charges", "add_if_empty": True }, "Sales Team": { "doctype": "Sales Team", "field_map": { "incentives": "incentives" }, "add_if_empty": True } }, target_doc, update_accounts) return doc @frappe.whitelist() def make_installation_note(source_name, target_doc=None): def update_item(obj, target, source_parent): target.qty = flt(obj.qty) - flt(obj.installed_qty) target.serial_no = obj.serial_no doclist = get_mapped_doc("Delivery Note", source_name, { "Delivery Note": { "doctype": "Installation Note", "validation": { "docstatus": ["=", 1] } }, "Delivery Note Item": { "doctype": "Installation Note Item", "field_map": { "name": "prevdoc_detail_docname", "parent": "prevdoc_docname", "parenttype": "prevdoc_doctype", }, "postprocess": update_item, "condition": lambda doc: doc.installed_qty < doc.qty } }, target_doc) return doclist @frappe.whitelist() def make_packing_slip(source_name, target_doc=None): doclist = get_mapped_doc("Delivery Note", source_name, { "Delivery Note": { "doctype": "Packing Slip", "field_map": { "name": "delivery_note", "letter_head": "letter_head" }, "validation": { "docstatus": ["=", 0] } } }, target_doc) return doclist @frappe.whitelist() def make_sales_return(source_name, target_doc=None): from erpnext.controllers.sales_and_purchase_return import make_return_doc return make_return_doc("Delivery Note", source_name, target_doc) @frappe.whitelist() def update_delivery_note_status(docname, status): dn = frappe.get_doc("Delivery Note", docname) dn.update_status(status)
gpl-3.0
TaDaa/.vim
dependencies/windows/ycm/ycm/python/ycm/completers/completer_utils.py
1
2054
#!/usr/bin/env python # # Copyright (C) 2013 Strahinja Val Markovic <val@markovic.io> # # This file is part of YouCompleteMe. # # YouCompleteMe is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # YouCompleteMe is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with YouCompleteMe. If not, see <http://www.gnu.org/licenses/>. from collections import defaultdict from copy import deepcopy import vim DEFAULT_FILETYPE_TRIGGERS = { 'c' : ['->', '.'], 'objc' : ['->', '.'], 'ocaml' : ['.', '#'], 'cpp,objcpp' : ['->', '.', '::'], 'perl' : ['->'], 'php' : ['->', '::'], 'cs,java,javascript,d,vim,python,perl6,scala,vb,elixir,go' : ['.'], 'ruby' : ['.', '::'], 'lua' : ['.', ':'], 'erlang' : [':'], } def _FiletypeTriggerDictFromSpec( trigger_dict_spec ): triggers_for_filetype = defaultdict( set ) for key, value in trigger_dict_spec.iteritems(): filetypes = key.split( ',' ) for filetype in filetypes: triggers_for_filetype[ filetype ].update( value ) return triggers_for_filetype def _FiletypeDictUnion( dict_one, dict_two ): """Returns a new filetye dict that's a union of the provided two dicts. Dict params are supposed to be type defaultdict(set).""" final_dict = deepcopy( dict_one ) for key, value in dict_two.iteritems(): final_dict[ key ].update( value ) return final_dict def TriggersForFiletype(): user_triggers = _FiletypeTriggerDictFromSpec( vim.eval( 'g:ycm_semantic_triggers' ) ) default_triggers = _FiletypeTriggerDictFromSpec( DEFAULT_FILETYPE_TRIGGERS ) return _FiletypeDictUnion( default_triggers, user_triggers )
mit
paoloach/cc2530sim
googletest/googlemock/test/gmock_output_test.py
986
5999
#!/usr/bin/env python # # Copyright 2008, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Tests the text output of Google C++ Mocking Framework. SYNOPSIS gmock_output_test.py --build_dir=BUILD/DIR --gengolden # where BUILD/DIR contains the built gmock_output_test_ file. gmock_output_test.py --gengolden gmock_output_test.py """ __author__ = 'wan@google.com (Zhanyong Wan)' import os import re import sys import gmock_test_utils # The flag for generating the golden file GENGOLDEN_FLAG = '--gengolden' PROGRAM_PATH = gmock_test_utils.GetTestExecutablePath('gmock_output_test_') COMMAND = [PROGRAM_PATH, '--gtest_stack_trace_depth=0', '--gtest_print_time=0'] GOLDEN_NAME = 'gmock_output_test_golden.txt' GOLDEN_PATH = os.path.join(gmock_test_utils.GetSourceDir(), GOLDEN_NAME) def ToUnixLineEnding(s): """Changes all Windows/Mac line endings in s to UNIX line endings.""" return s.replace('\r\n', '\n').replace('\r', '\n') def RemoveReportHeaderAndFooter(output): """Removes Google Test result report's header and footer from the output.""" output = re.sub(r'.*gtest_main.*\n', '', output) output = re.sub(r'\[.*\d+ tests.*\n', '', output) output = re.sub(r'\[.* test environment .*\n', '', output) output = re.sub(r'\[=+\] \d+ tests .* ran.*', '', output) output = re.sub(r'.* FAILED TESTS\n', '', output) return output def RemoveLocations(output): """Removes all file location info from a Google Test program's output. Args: output: the output of a Google Test program. Returns: output with all file location info (in the form of 'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or 'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by 'FILE:#: '. """ return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\:', 'FILE:#:', output) def NormalizeErrorMarker(output): """Normalizes the error marker, which is different on Windows vs on Linux.""" return re.sub(r' error: ', ' Failure\n', output) def RemoveMemoryAddresses(output): """Removes memory addresses from the test output.""" return re.sub(r'@\w+', '@0x#', output) def RemoveTestNamesOfLeakedMocks(output): """Removes the test names of leaked mock objects from the test output.""" return re.sub(r'\(used in test .+\) ', '', output) def GetLeakyTests(output): """Returns a list of test names that leak mock objects.""" # findall() returns a list of all matches of the regex in output. # For example, if '(used in test FooTest.Bar)' is in output, the # list will contain 'FooTest.Bar'. return re.findall(r'\(used in test (.+)\)', output) def GetNormalizedOutputAndLeakyTests(output): """Normalizes the output of gmock_output_test_. Args: output: The test output. Returns: A tuple (the normalized test output, the list of test names that have leaked mocks). """ output = ToUnixLineEnding(output) output = RemoveReportHeaderAndFooter(output) output = NormalizeErrorMarker(output) output = RemoveLocations(output) output = RemoveMemoryAddresses(output) return (RemoveTestNamesOfLeakedMocks(output), GetLeakyTests(output)) def GetShellCommandOutput(cmd): """Runs a command in a sub-process, and returns its STDOUT in a string.""" return gmock_test_utils.Subprocess(cmd, capture_stderr=False).output def GetNormalizedCommandOutputAndLeakyTests(cmd): """Runs a command and returns its normalized output and a list of leaky tests. Args: cmd: the shell command. """ # Disables exception pop-ups on Windows. os.environ['GTEST_CATCH_EXCEPTIONS'] = '1' return GetNormalizedOutputAndLeakyTests(GetShellCommandOutput(cmd)) class GMockOutputTest(gmock_test_utils.TestCase): def testOutput(self): (output, leaky_tests) = GetNormalizedCommandOutputAndLeakyTests(COMMAND) golden_file = open(GOLDEN_PATH, 'rb') golden = golden_file.read() golden_file.close() # The normalized output should match the golden file. self.assertEquals(golden, output) # The raw output should contain 2 leaked mock object errors for # test GMockOutputTest.CatchesLeakedMocks. self.assertEquals(['GMockOutputTest.CatchesLeakedMocks', 'GMockOutputTest.CatchesLeakedMocks'], leaky_tests) if __name__ == '__main__': if sys.argv[1:] == [GENGOLDEN_FLAG]: (output, _) = GetNormalizedCommandOutputAndLeakyTests(COMMAND) golden_file = open(GOLDEN_PATH, 'wb') golden_file.write(output) golden_file.close() else: gmock_test_utils.Main()
gpl-2.0
jhawkesworth/ansible
lib/ansible/galaxy/login.py
27
4573
######################################################################## # # (C) 2015, Chris Houseknecht <chouse@ansible.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # ######################################################################## from __future__ import (absolute_import, division, print_function) __metaclass__ = type import getpass import json from ansible.errors import AnsibleError, AnsibleOptionsError from ansible.module_utils.six.moves import input from ansible.module_utils.six.moves.urllib.parse import quote as urlquote, urlparse from ansible.module_utils.six.moves.urllib.error import HTTPError from ansible.module_utils.urls import open_url from ansible.utils.color import stringc from ansible.utils.display import Display display = Display() class GalaxyLogin(object): ''' Class to handle authenticating user with Galaxy API prior to performing CUD operations ''' GITHUB_AUTH = 'https://api.github.com/authorizations' def __init__(self, galaxy, github_token=None): self.galaxy = galaxy self.github_username = None self.github_password = None if github_token is None: self.get_credentials() def get_credentials(self): display.display(u'\n\n' + "We need your " + stringc("GitHub login", 'bright cyan') + " to identify you.", screen_only=True) display.display("This information will " + stringc("not be sent to Galaxy", 'bright cyan') + ", only to " + stringc("api.github.com.", "yellow"), screen_only=True) display.display("The password will not be displayed." + u'\n\n', screen_only=True) display.display("Use " + stringc("--github-token", 'yellow') + " if you do not want to enter your password." + u'\n\n', screen_only=True) try: self.github_username = input("GitHub Username: ") except Exception: pass try: self.github_password = getpass.getpass("Password for %s: " % self.github_username) except Exception: pass if not self.github_username or not self.github_password: raise AnsibleError("Invalid GitHub credentials. Username and password are required.") def remove_github_token(self): ''' If for some reason an ansible-galaxy token was left from a prior login, remove it. We cannot retrieve the token after creation, so we are forced to create a new one. ''' try: tokens = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username, url_password=self.github_password, force_basic_auth=True,)) except HTTPError as e: res = json.load(e) raise AnsibleError(res['message']) for token in tokens: if token['note'] == 'ansible-galaxy login': display.vvvvv('removing token: %s' % token['token_last_eight']) try: open_url('https://api.github.com/authorizations/%d' % token['id'], url_username=self.github_username, url_password=self.github_password, method='DELETE', force_basic_auth=True) except HTTPError as e: res = json.load(e) raise AnsibleError(res['message']) def create_github_token(self): ''' Create a personal authorization token with a note of 'ansible-galaxy login' ''' self.remove_github_token() args = json.dumps({"scopes": ["public_repo"], "note": "ansible-galaxy login"}) try: data = json.load(open_url(self.GITHUB_AUTH, url_username=self.github_username, url_password=self.github_password, force_basic_auth=True, data=args)) except HTTPError as e: res = json.load(e) raise AnsibleError(res['message']) return data['token']
gpl-3.0
cloudbau/nova
nova/tests/compute/test_compute_api.py
2
64148
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Unit tests for compute API.""" import datetime import iso8601 import mox from nova.compute import api as compute_api from nova.compute import cells_api as compute_cells_api from nova.compute import flavors from nova.compute import instance_actions from nova.compute import task_states from nova.compute import utils as compute_utils from nova.compute import vm_states from nova import context from nova import db from nova import exception from nova.objects import base as obj_base from nova.objects import instance as instance_obj from nova.objects import instance_info_cache from nova.objects import migration as migration_obj from nova.objects import service as service_obj from nova.openstack.common import timeutils from nova.openstack.common import uuidutils from nova import quota from nova import test from nova.tests.image import fake as fake_image from nova.tests.objects import test_migration from nova.tests.objects import test_service FAKE_IMAGE_REF = 'fake-image-ref' NODENAME = 'fakenode1' class _ComputeAPIUnitTestMixIn(object): def setUp(self): super(_ComputeAPIUnitTestMixIn, self).setUp() self.user_id = 'fake' self.project_id = 'fake' self.context = context.RequestContext(self.user_id, self.project_id) def _create_flavor(self, params=None): flavor = {'id': 1, 'flavorid': 1, 'name': 'm1.tiny', 'memory_mb': 512, 'vcpus': 1, 'vcpu_weight': None, 'root_gb': 1, 'ephemeral_gb': 0, 'rxtx_factor': 1, 'swap': 0, 'deleted': 0, 'disabled': False, 'is_public': True, } if params: flavor.update(params) return flavor def _create_instance_obj(self, params=None, flavor=None): """Create a test instance.""" if not params: params = {} if flavor is None: flavor = self._create_flavor() def make_fake_sys_meta(): sys_meta = params.pop("system_metadata", {}) for key in flavors.system_metadata_flavor_props: sys_meta['instance_type_%s' % key] = flavor[key] return sys_meta now = timeutils.utcnow() instance = instance_obj.Instance() instance.metadata = {} instance.metadata.update(params.pop('metadata', {})) instance.system_metadata = make_fake_sys_meta() instance.system_metadata.update(params.pop('system_metadata', {})) instance._context = self.context instance.id = 1 instance.uuid = uuidutils.generate_uuid() instance.cell_name = 'api!child' instance.vm_state = vm_states.ACTIVE instance.task_state = None instance.image_ref = FAKE_IMAGE_REF instance.reservation_id = 'r-fakeres' instance.user_id = self.user_id instance.project_id = self.project_id instance.host = 'fake_host' instance.node = NODENAME instance.instance_type_id = flavor['id'] instance.ami_launch_index = 0 instance.memory_mb = 0 instance.vcpus = 0 instance.root_gb = 0 instance.ephemeral_gb = 0 instance.architecture = 'x86_64' instance.os_type = 'Linux' instance.locked = False instance.created_at = now instance.updated_at = now instance.launched_at = now instance.disable_terminate = False instance.info_cache = instance_info_cache.InstanceInfoCache() if params: instance.update(params) instance.obj_reset_changes() return instance def test_create_quota_exceeded_messages(self): image_href = "image_href" image_id = 0 instance_type = self._create_flavor() self.mox.StubOutWithMock(self.compute_api, "_get_image") self.mox.StubOutWithMock(quota.QUOTAS, "limit_check") self.mox.StubOutWithMock(quota.QUOTAS, "reserve") quota_exception = exception.OverQuota( quotas={'instances': 1, 'cores': 1, 'ram': 1}, usages=dict((r, {'in_use': 1, 'reserved': 1}) for r in ['instances', 'cores', 'ram']), overs=['instances']) for _unused in range(2): self.compute_api._get_image(self.context, image_href).AndReturn( (image_id, {})) quota.QUOTAS.limit_check(self.context, metadata_items=mox.IsA(int)) quota.QUOTAS.reserve(self.context, instances=40, cores=mox.IsA(int), ram=mox.IsA(int)).AndRaise(quota_exception) self.mox.ReplayAll() for min_count, message in [(20, '20-40'), (40, '40')]: try: self.compute_api.create(self.context, instance_type, "image_href", min_count=min_count, max_count=40) except exception.TooManyInstances as e: self.assertEqual(message, e.kwargs['req']) else: self.fail("Exception not raised") def test_suspend(self): # Ensure instance can be suspended. instance = self._create_instance_obj() self.assertEqual(instance.vm_state, vm_states.ACTIVE) self.assertEqual(instance.task_state, None) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') if self.is_cells: rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'suspend_instance') instance.save(expected_task_state=None) self.compute_api._record_action_start(self.context, instance, instance_actions.SUSPEND) rpcapi.suspend_instance(self.context, instance) self.mox.ReplayAll() self.compute_api.suspend(self.context, instance) self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertEqual(task_states.SUSPENDING, instance.task_state) def test_resume(self): # Ensure instance can be resumed (if suspended). instance = self._create_instance_obj( params=dict(vm_state=vm_states.SUSPENDED)) self.assertEqual(instance.vm_state, vm_states.SUSPENDED) self.assertEqual(instance.task_state, None) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') if self.is_cells: rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'resume_instance') instance.save(expected_task_state=None) self.compute_api._record_action_start(self.context, instance, instance_actions.RESUME) rpcapi.resume_instance(self.context, instance) self.mox.ReplayAll() self.compute_api.resume(self.context, instance) self.assertEqual(vm_states.SUSPENDED, instance.vm_state) self.assertEqual(task_states.RESUMING, instance.task_state) def test_start(self): params = dict(vm_state=vm_states.STOPPED) instance = self._create_instance_obj(params=params) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') instance.save(expected_task_state=None) self.compute_api._record_action_start(self.context, instance, instance_actions.START) if self.is_cells: rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'start_instance') rpcapi.start_instance(self.context, instance) self.mox.ReplayAll() self.compute_api.start(self.context, instance) self.assertEqual(task_states.POWERING_ON, instance.task_state) def test_start_invalid_state(self): instance = self._create_instance_obj() self.assertEqual(instance.vm_state, vm_states.ACTIVE) self.assertRaises(exception.InstanceInvalidState, self.compute_api.start, self.context, instance) def test_start_no_host(self): params = dict(vm_state=vm_states.STOPPED, host='') instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceNotReady, self.compute_api.start, self.context, instance) def _test_stop(self, vm_state, force=False): # Make sure 'progress' gets reset params = dict(task_state=None, progress=99, vm_state=vm_state) instance = self._create_instance_obj(params=params) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') instance.save(expected_task_state=None) self.compute_api._record_action_start(self.context, instance, instance_actions.STOP) if self.is_cells: rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'stop_instance') rpcapi.stop_instance(self.context, instance, do_cast=True) self.mox.ReplayAll() if force: self.compute_api.force_stop(self.context, instance) else: self.compute_api.stop(self.context, instance) self.assertEqual(task_states.POWERING_OFF, instance.task_state) self.assertEqual(0, instance.progress) def test_stop(self): self._test_stop(vm_states.ACTIVE) def test_stop_stopped_instance_with_bypass(self): self._test_stop(vm_states.STOPPED, force=True) def test_stop_invalid_state(self): params = dict(vm_state=vm_states.PAUSED) instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceInvalidState, self.compute_api.stop, self.context, instance) def test_stop_a_stopped_inst(self): params = {'vm_state': vm_states.STOPPED} instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceInvalidState, self.compute_api.stop, self.context, instance) def test_stop_no_host(self): params = {'host': ''} instance = self._create_instance_obj(params=params) self.assertRaises(exception.InstanceNotReady, self.compute_api.stop, self.context, instance) def _test_reboot_type(self, vm_state, reboot_type, task_state=None): # Ensure instance can be soft rebooted. inst = self._create_instance_obj() inst.vm_state = vm_state inst.task_state = task_state self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api, 'update') self.mox.StubOutWithMock(inst, 'save') inst.save(expected_task_state=[None, task_states.REBOOTING]) self.context.elevated().AndReturn(self.context) self.compute_api._record_action_start(self.context, inst, instance_actions.REBOOT) if self.is_cells: rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'reboot_instance') rpcapi.reboot_instance(self.context, instance=inst, block_device_info=None, reboot_type=reboot_type) self.mox.ReplayAll() self.compute_api.reboot(self.context, inst, reboot_type) def _test_reboot_type_fails(self, reboot_type, **updates): inst = self._create_instance_obj() inst.update(updates) self.assertRaises(exception.InstanceInvalidState, self.compute_api.reboot, self.context, inst, reboot_type) def test_reboot_hard_active(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD') def test_reboot_hard_error(self): self._test_reboot_type(vm_states.ERROR, 'HARD') def test_reboot_hard_rebooting(self): self._test_reboot_type(vm_states.ACTIVE, 'HARD', task_state=task_states.REBOOTING) def test_reboot_hard_rescued(self): self._test_reboot_type_fails('HARD', vm_state=vm_states.RESCUED) def test_reboot_hard_error_not_launched(self): self._test_reboot_type_fails('HARD', vm_state=vm_states.ERROR, launched_at=None) def test_reboot_soft(self): self._test_reboot_type(vm_states.ACTIVE, 'SOFT') def test_reboot_soft_error(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR) def test_reboot_soft_paused(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.PAUSED) def test_reboot_soft_stopped(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.STOPPED) def test_reboot_soft_suspended(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.SUSPENDED) def test_reboot_soft_rebooting(self): self._test_reboot_type_fails('SOFT', task_state=task_states.REBOOTING) def test_reboot_soft_rescued(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.RESCUED) def test_reboot_soft_error_not_launched(self): self._test_reboot_type_fails('SOFT', vm_state=vm_states.ERROR, launched_at=None) def _test_delete_resized_part(self, inst): migration = migration_obj.Migration._from_db_object( self.context, migration_obj.Migration(), test_migration.fake_db_migration()) self.mox.StubOutWithMock(migration_obj.Migration, 'get_by_instance_and_status') self.context.elevated().AndReturn(self.context) migration_obj.Migration.get_by_instance_and_status( self.context, inst.uuid, 'finished').AndReturn(migration) self.compute_api._downsize_quota_delta(self.context, inst ).AndReturn('deltas') self.compute_api._reserve_quota_delta(self.context, 'deltas' ).AndReturn('rsvs') self.compute_api._record_action_start( self.context, inst, instance_actions.CONFIRM_RESIZE) self.compute_api.compute_rpcapi.confirm_resize( self.context, inst, migration, migration['source_compute'], 'rsvs', cast=False) def _test_downed_host_part(self, inst, updates, delete_time, delete_type): inst.info_cache.delete() compute_utils.notify_about_instance_usage( mox.IgnoreArg(), self.context, inst, '%s.start' % delete_type) self.context.elevated().AndReturn(self.context) self.compute_api.network_api.deallocate_for_instance( self.context, inst) db.instance_system_metadata_get(self.context, inst.uuid).AndReturn('sys-meta') state = ('soft' in delete_type and vm_states.SOFT_DELETED or vm_states.DELETED) updates.update({'vm_state': state, 'task_state': None, 'terminated_at': delete_time}) inst.save() db.instance_destroy(self.context, inst.uuid, constraint=None) compute_utils.notify_about_instance_usage( mox.IgnoreArg(), self.context, inst, '%s.end' % delete_type, system_metadata='sys-meta') def _test_delete(self, delete_type, **attrs): reservations = 'fake-resv' inst = self._create_instance_obj() inst.update(attrs) inst._context = self.context delete_time = datetime.datetime(1955, 11, 5, 9, 30, tzinfo=iso8601.iso8601.Utc()) timeutils.set_time_override(delete_time) task_state = (delete_type == 'soft_delete' and task_states.SOFT_DELETING or task_states.DELETING) updates = {'progress': 0, 'task_state': task_state} if delete_type == 'soft_delete': updates['deleted_at'] = delete_time self.mox.StubOutWithMock(inst, 'save') self.mox.StubOutWithMock(db, 'block_device_mapping_get_all_by_instance') self.mox.StubOutWithMock(self.compute_api, '_create_reservations') self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(db, 'service_get_by_compute_host') self.mox.StubOutWithMock(self.compute_api.servicegroup_api, 'service_is_up') self.mox.StubOutWithMock(db, 'migration_get_by_instance_and_status') self.mox.StubOutWithMock(self.compute_api, '_downsize_quota_delta') self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.StubOutWithMock(inst.info_cache, 'delete') self.mox.StubOutWithMock(self.compute_api.network_api, 'deallocate_for_instance') self.mox.StubOutWithMock(db, 'instance_system_metadata_get') self.mox.StubOutWithMock(db, 'instance_destroy') self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage') self.mox.StubOutWithMock(quota.QUOTAS, 'commit') rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'confirm_resize') if self.is_cells: rpcapi = self.compute_api.cells_rpcapi self.mox.StubOutWithMock(rpcapi, 'terminate_instance') self.mox.StubOutWithMock(rpcapi, 'soft_delete_instance') db.block_device_mapping_get_all_by_instance( self.context, inst.uuid).AndReturn([]) inst.save() self.compute_api._create_reservations( self.context, inst, inst.instance_type_id, inst.project_id, inst.user_id).AndReturn(reservations) # NOTE(comstud): This is getting messy. But what we are wanting # to test is: # If cells is enabled and we're the API cell: # * Cast to cells_rpcapi.<method> with reservations=None # * Commit reservations # Otherwise: # * Check for downed host # * If downed host: # * Clean up instance, destroying it, sending notifications. # (Tested in _test_downed_host_part()) # * Commit reservations # * If not downed host: # * Record the action start. # * Cast to compute_rpcapi.<method> with the reservations cast = True commit_quotas = True if not self.is_cells: if inst.vm_state == vm_states.RESIZED: self._test_delete_resized_part(inst) self.context.elevated().AndReturn(self.context) db.service_get_by_compute_host( self.context, inst.host).AndReturn( test_service.fake_service) self.compute_api.servicegroup_api.service_is_up( mox.IsA(service_obj.Service)).AndReturn( inst.host != 'down-host') if inst.host == 'down-host': self._test_downed_host_part(inst, updates, delete_time, delete_type) cast = False else: # Happens on the manager side commit_quotas = False if cast: if not self.is_cells: self.compute_api._record_action_start(self.context, inst, instance_actions.DELETE) if commit_quotas: cast_reservations = None else: cast_reservations = reservations if delete_type == 'soft_delete': rpcapi.soft_delete_instance(self.context, inst, reservations=cast_reservations) elif delete_type in ['delete', 'force_delete']: rpcapi.terminate_instance(self.context, inst, [], reservations=cast_reservations) if commit_quotas: # Local delete or when is_cells is True. quota.QUOTAS.commit(self.context, reservations, project_id=inst.project_id, user_id=inst.user_id) self.mox.ReplayAll() getattr(self.compute_api, delete_type)(self.context, inst) for k, v in updates.items(): self.assertEqual(inst[k], v) def test_delete(self): self._test_delete('delete') def test_delete_if_not_launched(self): self._test_delete('delete', launched_at=None) def test_delete_in_resizing(self): self._test_delete('delete', task_state=task_states.RESIZE_FINISH) def test_delete_in_resized(self): self._test_delete('delete', vm_state=vm_states.RESIZED) def test_delete_with_down_host(self): self._test_delete('delete', host='down-host') def test_delete_soft_with_down_host(self): self._test_delete('soft_delete', host='down-host') def test_delete_soft(self): self._test_delete('soft_delete') def test_delete_forced(self): self._test_delete('force_delete', vm_state=vm_states.SOFT_DELETED) def test_delete_fast_if_host_not_set(self): inst = self._create_instance_obj() inst.host = '' updates = {'progress': 0, 'task_state': task_states.DELETING} self.mox.StubOutWithMock(inst, 'save') self.mox.StubOutWithMock(db, 'block_device_mapping_get_all_by_instance') self.mox.StubOutWithMock(db, 'constraint') self.mox.StubOutWithMock(db, 'instance_destroy') self.mox.StubOutWithMock(self.compute_api, '_create_reservations') self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage') if self.is_cells: rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'terminate_instance') db.block_device_mapping_get_all_by_instance(self.context, inst.uuid).AndReturn([]) inst.save() self.compute_api._create_reservations(self.context, inst, inst.instance_type_id, inst.project_id, inst.user_id ).AndReturn(None) if self.is_cells: rpcapi.terminate_instance(self.context, inst, [], reservations=None) else: compute_utils.notify_about_instance_usage(mox.IgnoreArg(), self.context, inst, 'delete.start') db.constraint(host=mox.IgnoreArg()).AndReturn('constraint') db.instance_destroy(self.context, inst.uuid, constraint='constraint') compute_utils.notify_about_instance_usage( mox.IgnoreArg(), self.context, inst, 'delete.end', system_metadata=inst.system_metadata) self.mox.ReplayAll() self.compute_api.delete(self.context, inst) for k, v in updates.items(): self.assertEqual(inst[k], v) def test_local_delete_with_deleted_volume(self): bdms = [{'id': 'bmd_id', 'volume_id': 'volume_id', 'delete_on_termiantion': False}] def _fake_do_delete(context, instance, bdms, rservations=None, local=False): pass inst = self._create_instance_obj() inst._context = self.context self.mox.StubOutWithMock(inst, 'destroy') self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(inst.info_cache, 'delete') self.mox.StubOutWithMock(self.compute_api.network_api, 'deallocate_for_instance') self.mox.StubOutWithMock(db, 'instance_system_metadata_get') self.mox.StubOutWithMock(compute_utils, 'notify_about_instance_usage') self.mox.StubOutWithMock(self.compute_api.volume_api, 'terminate_connection') self.mox.StubOutWithMock(db, 'block_device_mapping_destroy') if self.is_cells: rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'terminate_instance') inst.info_cache.delete() compute_utils.notify_about_instance_usage(mox.IgnoreArg(), self.context, inst, 'delete.start') self.context.elevated().MultipleTimes().AndReturn(self.context) if not self.is_cells: self.compute_api.network_api.deallocate_for_instance( self.context, inst) db.instance_system_metadata_get(self.context, inst.uuid ).AndReturn('sys-meta') self.compute_api.volume_api.terminate_connection( mox.IgnoreArg(), mox.IgnoreArg(), mox.IgnoreArg()).\ AndRaise(exception. VolumeNotFound('volume_id')) db.block_device_mapping_destroy(self.context, mox.IgnoreArg()) inst.destroy() compute_utils.notify_about_instance_usage( mox.IgnoreArg(), self.context, inst, 'delete.end', system_metadata='sys-meta') self.mox.ReplayAll() self.compute_api._local_delete(self.context, inst, bdms, 'delete', _fake_do_delete) def test_delete_disabled(self): inst = self._create_instance_obj() inst.disable_terminate = True self.mox.StubOutWithMock(db, 'instance_update_and_get_original') self.mox.ReplayAll() self.compute_api.delete(self.context, inst) def test_delete_soft_rollback(self): inst = self._create_instance_obj() self.mox.StubOutWithMock(db, 'block_device_mapping_get_all_by_instance') self.mox.StubOutWithMock(inst, 'save') delete_time = datetime.datetime(1955, 11, 5) timeutils.set_time_override(delete_time) db.block_device_mapping_get_all_by_instance( self.context, inst.uuid).AndReturn([]) inst.save().AndRaise(test.TestingException) self.mox.ReplayAll() self.assertRaises(test.TestingException, self.compute_api.soft_delete, self.context, inst) def _test_confirm_resize(self, mig_ref_passed=False): params = dict(vm_state=vm_states.RESIZED) fake_inst = self._create_instance_obj(params=params) fake_mig = migration_obj.Migration._from_db_object( self.context, migration_obj.Migration(), test_migration.fake_db_migration()) self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(migration_obj.Migration, 'get_by_instance_and_status') self.mox.StubOutWithMock(self.compute_api, '_downsize_quota_delta') self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta') self.mox.StubOutWithMock(fake_mig, 'save') self.mox.StubOutWithMock(quota.QUOTAS, 'commit') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'confirm_resize') self.context.elevated().AndReturn(self.context) if not mig_ref_passed: migration_obj.Migration.get_by_instance_and_status( self.context, fake_inst['uuid'], 'finished').AndReturn( fake_mig) self.compute_api._downsize_quota_delta(self.context, fake_inst).AndReturn('deltas') resvs = ['resvs'] self.compute_api._reserve_quota_delta(self.context, 'deltas').AndReturn(resvs) def _check_mig(expected_task_state=None): self.assertEqual('confirming', fake_mig.status) fake_mig.save().WithSideEffects(_check_mig) if self.is_cells: quota.QUOTAS.commit(self.context, resvs) resvs = [] self.compute_api._record_action_start(self.context, fake_inst, 'confirmResize') self.compute_api.compute_rpcapi.confirm_resize( self.context, fake_inst, fake_mig, 'compute-source', resvs) self.mox.ReplayAll() if mig_ref_passed: self.compute_api.confirm_resize(self.context, fake_inst, migration=fake_mig) else: self.compute_api.confirm_resize(self.context, fake_inst) def test_confirm_resize(self): self._test_confirm_resize() def test_confirm_resize_with_migration_ref(self): self._test_confirm_resize(mig_ref_passed=True) def _test_revert_resize(self): params = dict(vm_state=vm_states.RESIZED) fake_inst = self._create_instance_obj(params=params) fake_mig = migration_obj.Migration._from_db_object( self.context, migration_obj.Migration(), test_migration.fake_db_migration()) self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(migration_obj.Migration, 'get_by_instance_and_status') self.mox.StubOutWithMock(self.compute_api, '_reverse_upsize_quota_delta') self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta') self.mox.StubOutWithMock(fake_inst, 'save') self.mox.StubOutWithMock(fake_mig, 'save') self.mox.StubOutWithMock(quota.QUOTAS, 'commit') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'revert_resize') self.context.elevated().AndReturn(self.context) migration_obj.Migration.get_by_instance_and_status( self.context, fake_inst['uuid'], 'finished').AndReturn( fake_mig) self.compute_api._reverse_upsize_quota_delta( self.context, fake_mig).AndReturn('deltas') resvs = ['resvs'] self.compute_api._reserve_quota_delta(self.context, 'deltas').AndReturn(resvs) def _check_state(expected_task_state=None): self.assertEqual(task_states.RESIZE_REVERTING, fake_inst.task_state) fake_inst.save(expected_task_state=None).WithSideEffects( _check_state) def _check_mig(expected_task_state=None): self.assertEqual('reverting', fake_mig.status) fake_mig.save().WithSideEffects(_check_mig) if self.is_cells: quota.QUOTAS.commit(self.context, resvs) resvs = [] self.compute_api._record_action_start(self.context, fake_inst, 'revertResize') self.compute_api.compute_rpcapi.revert_resize( self.context, fake_inst, fake_mig, 'compute-dest', resvs) self.mox.ReplayAll() self.compute_api.revert_resize(self.context, fake_inst) def test_revert_resize(self): self._test_revert_resize() def _test_resize(self, flavor_id_passed=True, same_host=False, allow_same_host=False, allow_mig_same_host=False, project_id=None, extra_kwargs=None): if extra_kwargs is None: extra_kwargs = {} self.flags(allow_resize_to_same_host=allow_same_host, allow_migrate_to_same_host=allow_mig_same_host) params = {} if project_id is not None: # To test instance w/ different project id than context (admin) params['project_id'] = project_id fake_inst = self._create_instance_obj(params=params) self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') self.mox.StubOutWithMock(self.compute_api, '_upsize_quota_delta') self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta') self.mox.StubOutWithMock(fake_inst, 'save') self.mox.StubOutWithMock(quota.QUOTAS, 'commit') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_task_api, 'resize_instance') current_flavor = flavors.extract_flavor(fake_inst) if flavor_id_passed: new_flavor = dict(id=200, flavorid='new-flavor-id', name='new_flavor', disabled=False) flavors.get_flavor_by_flavor_id( 'new-flavor-id', read_deleted='no').AndReturn(new_flavor) else: new_flavor = current_flavor resvs = ['resvs'] self.compute_api._upsize_quota_delta( self.context, new_flavor, current_flavor).AndReturn('deltas') self.compute_api._reserve_quota_delta(self.context, 'deltas', project_id=fake_inst['project_id']).AndReturn(resvs) def _check_state(expected_task_state=None): self.assertEqual(task_states.RESIZE_PREP, fake_inst.task_state) self.assertEqual(fake_inst.progress, 0) for key, value in extra_kwargs.items(): self.assertEqual(value, getattr(fake_inst, key)) fake_inst.save(expected_task_state=None).WithSideEffects( _check_state) if allow_same_host: filter_properties = {'ignore_hosts': []} else: filter_properties = {'ignore_hosts': [fake_inst['host']]} if not flavor_id_passed and not allow_mig_same_host: filter_properties['ignore_hosts'].append(fake_inst['host']) if self.is_cells: quota.QUOTAS.commit(self.context, resvs, project_id=fake_inst['project_id']) resvs = [] mig = migration_obj.Migration() def _get_migration(): return mig def _check_mig(ctxt): self.assertEqual(fake_inst.uuid, mig.instance_uuid) self.assertEqual(current_flavor['id'], mig.old_instance_type_id) self.assertEqual(new_flavor['id'], mig.new_instance_type_id) self.assertEqual('finished', mig.status) self.stubs.Set(migration_obj, 'Migration', _get_migration) self.mox.StubOutWithMock(self.context, 'elevated') self.mox.StubOutWithMock(mig, 'create') self.context.elevated().AndReturn(self.context) mig.create(self.context).WithSideEffects(_check_mig) self.compute_api._record_action_start(self.context, fake_inst, 'resize') scheduler_hint = {'filter_properties': filter_properties} self.compute_api.compute_task_api.resize_instance( self.context, fake_inst, extra_kwargs, scheduler_hint=scheduler_hint, flavor=new_flavor, reservations=resvs) self.mox.ReplayAll() if flavor_id_passed: self.compute_api.resize(self.context, fake_inst, flavor_id='new-flavor-id', **extra_kwargs) else: self.compute_api.resize(self.context, fake_inst, **extra_kwargs) def _test_migrate(self, *args, **kwargs): self._test_resize(*args, flavor_id_passed=True, **kwargs) def test_resize(self): self._test_resize() def test_resize_with_kwargs(self): self._test_resize(extra_kwargs=dict(cow='moo')) def test_resize_same_host_and_allowed(self): self._test_resize(same_host=True, allow_same_host=True) def test_resize_same_host_and_not_allowed(self): self._test_resize(same_host=True, allow_same_host=False) def test_resize_different_project_id(self): self._test_resize(project_id='different') def test_migrate(self): self._test_migrate() def test_migrate_with_kwargs(self): self._test_migrate(extra_kwargs=dict(cow='moo')) def test_migrate_same_host_and_allowed(self): self._test_migrate(same_host=True, allow_same_host=True) def test_migrate_same_host_and_not_allowed(self): self._test_migrate(same_host=True, allow_same_host=False) def test_migrate_different_project_id(self): self._test_migrate(project_id='different') def test_resize_invalid_flavor_fails(self): self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') # Should never reach these. self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta') self.mox.StubOutWithMock(self.compute_api, 'update') self.mox.StubOutWithMock(quota.QUOTAS, 'commit') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_task_api, 'resize_instance') fake_inst = obj_base.obj_to_primitive(self._create_instance_obj()) exc = exception.FlavorNotFound(flavor_id='flavor-id') flavors.get_flavor_by_flavor_id('flavor-id', read_deleted='no').AndRaise(exc) self.mox.ReplayAll() self.assertRaises(exception.FlavorNotFound, self.compute_api.resize, self.context, fake_inst, flavor_id='flavor-id') def test_resize_disabled_flavor_fails(self): self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') # Should never reach these. self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta') self.mox.StubOutWithMock(self.compute_api, 'update') self.mox.StubOutWithMock(quota.QUOTAS, 'commit') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_task_api, 'resize_instance') fake_inst = obj_base.obj_to_primitive(self._create_instance_obj()) fake_flavor = dict(id=200, flavorid='flavor-id', name='foo', disabled=True) flavors.get_flavor_by_flavor_id( 'flavor-id', read_deleted='no').AndReturn(fake_flavor) self.mox.ReplayAll() self.assertRaises(exception.FlavorNotFound, self.compute_api.resize, self.context, fake_inst, flavor_id='flavor-id') def test_resize_same_flavor_fails(self): self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') # Should never reach these. self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta') self.mox.StubOutWithMock(self.compute_api, 'update') self.mox.StubOutWithMock(quota.QUOTAS, 'commit') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_task_api, 'resize_instance') fake_inst = obj_base.obj_to_primitive(self._create_instance_obj()) fake_flavor = flavors.extract_flavor(fake_inst) flavors.get_flavor_by_flavor_id( fake_flavor['flavorid'], read_deleted='no').AndReturn(fake_flavor) self.mox.ReplayAll() # Pass in flavor_id.. same as current flavor. self.assertRaises(exception.CannotResizeToSameFlavor, self.compute_api.resize, self.context, fake_inst, flavor_id=fake_flavor['flavorid']) def test_resize_quota_exceeds_fails(self): self.mox.StubOutWithMock(flavors, 'get_flavor_by_flavor_id') self.mox.StubOutWithMock(self.compute_api, '_upsize_quota_delta') self.mox.StubOutWithMock(self.compute_api, '_reserve_quota_delta') # Should never reach these. self.mox.StubOutWithMock(self.compute_api, 'update') self.mox.StubOutWithMock(quota.QUOTAS, 'commit') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') self.mox.StubOutWithMock(self.compute_api.compute_task_api, 'resize_instance') fake_inst = obj_base.obj_to_primitive(self._create_instance_obj()) current_flavor = flavors.extract_flavor(fake_inst) fake_flavor = dict(id=200, flavorid='flavor-id', name='foo', disabled=False) flavors.get_flavor_by_flavor_id( 'flavor-id', read_deleted='no').AndReturn(fake_flavor) deltas = dict(resource=0) self.compute_api._upsize_quota_delta( self.context, fake_flavor, current_flavor).AndReturn(deltas) usage = dict(in_use=0, reserved=0) over_quota_args = dict(quotas={'resource': 0}, usages={'resource': usage}, overs=['resource']) self.compute_api._reserve_quota_delta(self.context, deltas, project_id=fake_inst['project_id']).AndRaise( exception.OverQuota(**over_quota_args)) self.mox.ReplayAll() self.assertRaises(exception.TooManyInstances, self.compute_api.resize, self.context, fake_inst, flavor_id='flavor-id') def test_pause(self): # Ensure instance can be paused. instance = self._create_instance_obj() self.assertEqual(instance.vm_state, vm_states.ACTIVE) self.assertEqual(instance.task_state, None) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') if self.is_cells: rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'pause_instance') instance.save(expected_task_state=None) self.compute_api._record_action_start(self.context, instance, instance_actions.PAUSE) rpcapi.pause_instance(self.context, instance) self.mox.ReplayAll() self.compute_api.pause(self.context, instance) self.assertEqual(vm_states.ACTIVE, instance.vm_state) self.assertEqual(task_states.PAUSING, instance.task_state) def test_unpause(self): # Ensure instance can be unpaused. params = dict(vm_state=vm_states.PAUSED) instance = self._create_instance_obj(params=params) self.assertEqual(instance.vm_state, vm_states.PAUSED) self.assertEqual(instance.task_state, None) self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api, '_record_action_start') if self.is_cells: rpcapi = self.compute_api.cells_rpcapi else: rpcapi = self.compute_api.compute_rpcapi self.mox.StubOutWithMock(rpcapi, 'unpause_instance') instance.save(expected_task_state=None) self.compute_api._record_action_start(self.context, instance, instance_actions.UNPAUSE) rpcapi.unpause_instance(self.context, instance) self.mox.ReplayAll() self.compute_api.unpause(self.context, instance) self.assertEqual(vm_states.PAUSED, instance.vm_state) self.assertEqual(task_states.UNPAUSING, instance.task_state) def test_swap_volume_volume_api_usage(self): # This test ensures that volume_id arguments are passed to volume_api # and that volumes return to previous states in case of error. def fake_vol_api_begin_detaching(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) volumes[volume_id]['status'] = 'detaching' def fake_vol_api_roll_detaching(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) if volumes[volume_id]['status'] == 'detaching': volumes[volume_id]['status'] = 'in-use' def fake_vol_api_reserve(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) self.assertEqual(volumes[volume_id]['status'], 'available') volumes[volume_id]['status'] = 'attaching' def fake_vol_api_unreserve(context, volume_id): self.assertTrue(uuidutils.is_uuid_like(volume_id)) if volumes[volume_id]['status'] == 'attaching': volumes[volume_id]['status'] = 'available' def fake_swap_volume_exc(context, instance, old_volume_id, new_volume_id): raise AttributeError # Random exception # Should fail if VM state is not valid instance = {'vm_state': vm_states.BUILDING, 'launched_at': timeutils.utcnow(), 'locked': False, 'availability_zone': 'fake_az', 'uuid': 'fake'} volumes = {} old_volume_id = uuidutils.generate_uuid() volumes[old_volume_id] = {'id': old_volume_id, 'display_name': 'old_volume', 'attach_status': 'attached', 'instance_uuid': 'fake', 'size': 5, 'status': 'in-use'} new_volume_id = uuidutils.generate_uuid() volumes[new_volume_id] = {'id': new_volume_id, 'display_name': 'new_volume', 'attach_status': 'detached', 'instance_uuid': None, 'size': 5, 'status': 'available'} self.assertRaises(exception.InstanceInvalidState, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) instance['vm_state'] = vm_states.ACTIVE # Should fail if old volume is not attached volumes[old_volume_id]['attach_status'] = 'detached' self.assertRaises(exception.VolumeUnattached, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) self.assertEquals(volumes[old_volume_id]['status'], 'in-use') self.assertEquals(volumes[new_volume_id]['status'], 'available') volumes[old_volume_id]['attach_status'] = 'attached' # Should fail if old volume's instance_uuid is not that of the instance volumes[old_volume_id]['instance_uuid'] = 'fake2' self.assertRaises(exception.InvalidVolume, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) self.assertEquals(volumes[old_volume_id]['status'], 'in-use') self.assertEquals(volumes[new_volume_id]['status'], 'available') volumes[old_volume_id]['instance_uuid'] = 'fake' # Should fail if new volume is attached volumes[new_volume_id]['attach_status'] = 'attached' self.assertRaises(exception.InvalidVolume, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) self.assertEquals(volumes[old_volume_id]['status'], 'in-use') self.assertEquals(volumes[new_volume_id]['status'], 'available') volumes[new_volume_id]['attach_status'] = 'detached' # Should fail if new volume is smaller than the old volume volumes[new_volume_id]['size'] = 4 self.assertRaises(exception.InvalidVolume, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) self.assertEquals(volumes[old_volume_id]['status'], 'in-use') self.assertEquals(volumes[new_volume_id]['status'], 'available') volumes[new_volume_id]['size'] = 5 # Fail call to swap_volume self.stubs.Set(self.compute_api.volume_api, 'begin_detaching', fake_vol_api_begin_detaching) self.stubs.Set(self.compute_api.volume_api, 'roll_detaching', fake_vol_api_roll_detaching) self.stubs.Set(self.compute_api.volume_api, 'reserve_volume', fake_vol_api_reserve) self.stubs.Set(self.compute_api.volume_api, 'unreserve_volume', fake_vol_api_unreserve) self.stubs.Set(self.compute_api.compute_rpcapi, 'swap_volume', fake_swap_volume_exc) self.assertRaises(AttributeError, self.compute_api.swap_volume, self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) self.assertEquals(volumes[old_volume_id]['status'], 'in-use') self.assertEquals(volumes[new_volume_id]['status'], 'available') # Should succeed self.stubs.Set(self.compute_api.compute_rpcapi, 'swap_volume', lambda c, instance, old_volume_id, new_volume_id: True) self.compute_api.swap_volume(self.context, instance, volumes[old_volume_id], volumes[new_volume_id]) def _test_snapshot_and_backup(self, is_snapshot=True, with_base_ref=False, min_ram=None, min_disk=None, create_fails=False): # 'cache_in_nova' is for testing non-inheritable properties # 'user_id' should also not be carried from sys_meta into # image property...since it should be set explicitly by # _create_image() in compute api. fake_sys_meta = dict(image_foo='bar', blah='bug?', image_cache_in_nova='dropped', cache_in_nova='dropped', user_id='meow') if with_base_ref: fake_sys_meta['image_base_image_ref'] = 'fake-base-ref' params = dict(system_metadata=fake_sys_meta) instance = self._create_instance_obj(params=params) fake_sys_meta.update(instance.system_metadata) extra_props = dict(cow='moo', cat='meow') self.mox.StubOutWithMock(compute_utils, 'get_image_metadata') self.mox.StubOutWithMock(self.compute_api.image_service, 'create') self.mox.StubOutWithMock(instance, 'save') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'snapshot_instance') self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'backup_instance') image_type = is_snapshot and 'snapshot' or 'backup' expected_sys_meta = dict(fake_sys_meta) expected_sys_meta.pop('cache_in_nova') expected_sys_meta.pop('image_cache_in_nova') expected_sys_meta.pop('user_id') expected_sys_meta['foo'] = expected_sys_meta.pop('image_foo') if with_base_ref: expected_sys_meta['base_image_ref'] = expected_sys_meta.pop( 'image_base_image_ref') expected_props = {'instance_uuid': instance.uuid, 'user_id': self.context.user_id, 'image_type': image_type} expected_props.update(extra_props) expected_props.update(expected_sys_meta) expected_meta = {'name': 'fake-name', 'is_public': False, 'properties': expected_props} if is_snapshot: if min_ram is not None: expected_meta['min_ram'] = min_ram if min_disk is not None: expected_meta['min_disk'] = min_disk else: expected_props['backup_type'] = 'fake-backup-type' compute_utils.get_image_metadata( self.context, self.compute_api.image_service, FAKE_IMAGE_REF, instance).AndReturn(expected_meta) fake_image = dict(id='fake-image-id') mock_method = self.compute_api.image_service.create( self.context, expected_meta) if create_fails: mock_method.AndRaise(test.TestingException()) else: mock_method.AndReturn(fake_image) def check_state(expected_task_state=None): expected_state = (is_snapshot and task_states.IMAGE_SNAPSHOT or task_states.IMAGE_BACKUP) self.assertEqual(expected_state, instance.task_state) if not create_fails: instance.save(expected_task_state=None).WithSideEffects( check_state) if is_snapshot: self.compute_api.compute_rpcapi.snapshot_instance( self.context, instance, fake_image['id']) else: self.compute_api.compute_rpcapi.backup_instance( self.context, instance, fake_image['id'], 'fake-backup-type', 'fake-rotation') self.mox.ReplayAll() got_exc = False try: if is_snapshot: res = self.compute_api.snapshot(self.context, instance, 'fake-name', extra_properties=extra_props) else: res = self.compute_api.backup(self.context, instance, 'fake-name', 'fake-backup-type', 'fake-rotation', extra_properties=extra_props) self.assertEqual(fake_image, res) except test.TestingException: got_exc = True self.assertEqual(create_fails, got_exc) def test_snapshot(self): self._test_snapshot_and_backup() def test_snapshot_fails(self): self._test_snapshot_and_backup(create_fails=True) def test_snapshot_invalid_state(self): instance = self._create_instance_obj() instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_SNAPSHOT self.assertRaises(exception.InstanceInvalidState, self.compute_api.snapshot, self.context, instance, 'fake-name') instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_BACKUP self.assertRaises(exception.InstanceInvalidState, self.compute_api.snapshot, self.context, instance, 'fake-name') instance.vm_state = vm_states.BUILDING instance.task_state = None self.assertRaises(exception.InstanceInvalidState, self.compute_api.snapshot, self.context, instance, 'fake-name') def test_snapshot_with_base_image_ref(self): self._test_snapshot_and_backup(with_base_ref=True) def test_snapshot_min_ram(self): self._test_snapshot_and_backup(min_ram=42) def test_snapshot_min_disk(self): self._test_snapshot_and_backup(min_disk=42) def test_backup(self): self._test_snapshot_and_backup(is_snapshot=False) def test_backup_fails(self): self._test_snapshot_and_backup(is_snapshot=False, create_fails=True) def test_backup_invalid_state(self): instance = self._create_instance_obj() instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_SNAPSHOT self.assertRaises(exception.InstanceInvalidState, self.compute_api.snapshot, self.context, instance, 'fake-name', 'fake', 'fake') instance.vm_state = vm_states.ACTIVE instance.task_state = task_states.IMAGE_BACKUP self.assertRaises(exception.InstanceInvalidState, self.compute_api.backup, self.context, instance, 'fake-name', 'fake', 'fake') instance.vm_state = vm_states.BUILDING instance.task_state = None self.assertRaises(exception.InstanceInvalidState, self.compute_api.snapshot, self.context, instance, 'fake-name', 'fake', 'fake') def test_backup_with_base_image_ref(self): self._test_snapshot_and_backup(is_snapshot=False, with_base_ref=True) def test_volume_snapshot_create(self): volume_id = '1' create_info = {'id': 'eyedee'} fake_bdm = { 'instance': { 'uuid': 'fake_uuid', 'vm_state': vm_states.ACTIVE, }, } def fake_get_bdm(context, _volume_id, columns_to_join): self.assertEqual(volume_id, _volume_id) return fake_bdm self.stubs.Set(self.compute_api.db, 'block_device_mapping_get_by_volume_id', fake_get_bdm) self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'volume_snapshot_create') self.compute_api.compute_rpcapi.volume_snapshot_create(self.context, fake_bdm['instance'], volume_id, create_info) self.mox.ReplayAll() snapshot = self.compute_api.volume_snapshot_create(self.context, volume_id, create_info) expected_snapshot = { 'snapshot': { 'id': create_info['id'], 'volumeId': volume_id, }, } self.assertEqual(snapshot, expected_snapshot) def test_volume_snapshot_delete(self): volume_id = '1' snapshot_id = '2' fake_bdm = { 'instance': { 'uuid': 'fake_uuid', 'vm_state': vm_states.ACTIVE, }, } def fake_get_bdm(context, _volume_id, columns_to_join): self.assertEqual(volume_id, _volume_id) return fake_bdm self.stubs.Set(self.compute_api.db, 'block_device_mapping_get_by_volume_id', fake_get_bdm) self.mox.StubOutWithMock(self.compute_api.compute_rpcapi, 'volume_snapshot_delete') self.compute_api.compute_rpcapi.volume_snapshot_delete(self.context, fake_bdm['instance'], volume_id, snapshot_id, {}) self.mox.ReplayAll() self.compute_api.volume_snapshot_delete(self.context, volume_id, snapshot_id, {}) def _create_instance_with_disabled_disk_config(self): sys_meta = {"image_auto_disk_config": "Disabled"} params = {"system_metadata": sys_meta} return obj_base.obj_to_primitive(self._create_instance_obj( params=params)) def _setup_fake_image_with_disabled_disk_config(self): self.fake_image = { 'id': 1, 'name': 'fake_name', 'status': 'active', 'properties': {"auto_disk_config": "Disabled"}, } def fake_show(obj, context, image_id): return self.fake_image fake_image.stub_out_image_service(self.stubs) self.stubs.Set(fake_image._FakeImageService, 'show', fake_show) return self.fake_image['id'] def test_resize_with_disabled_auto_disk_config_fails(self): fake_inst = self._create_instance_with_disabled_disk_config() self.assertRaises(exception.AutoDiskConfigDisabledByImage, self.compute_api.resize, self.context, fake_inst, auto_disk_config=True) def test_create_with_disabled_auto_disk_config_fails(self): image_id = self._setup_fake_image_with_disabled_disk_config() self.assertRaises(exception.AutoDiskConfigDisabledByImage, self.compute_api.create, self.context, "fake_flavor", image_id, auto_disk_config=True) def test_rebuild_with_disabled_auto_disk_config_fails(self): fake_inst = self._create_instance_with_disabled_disk_config() image_id = self._setup_fake_image_with_disabled_disk_config() self.assertRaises(exception.AutoDiskConfigDisabledByImage, self.compute_api.rebuild, self.context, fake_inst, image_id, "new password", auto_disk_config=True) class ComputeAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase): def setUp(self): super(ComputeAPIUnitTestCase, self).setUp() self.compute_api = compute_api.API() self.is_cells = False class ComputeCellsAPIUnitTestCase(_ComputeAPIUnitTestMixIn, test.NoDBTestCase): def setUp(self): super(ComputeCellsAPIUnitTestCase, self).setUp() self.flags(cell_type='api', enable=True, group='cells') self.compute_api = compute_cells_api.ComputeCellsAPI() self.is_cells = True
apache-2.0
knz/slcore
slc/tools/slc/mt/common/regmagic.py
2
13418
from ...msg import die, warn import re class RegMagic: def __init__(self, rd): self._regs = { 'i' : { 'l' : [None]*32, 'g' : [None]*32, 's' : [None]*32, 'd' : [None]*32 }, 'f' : { 'l' : [None]*32, 'g' : [None]*32, 's' : [None]*32, 'd' : [None]*32 }, } self._reg_inv = map(lambda x:[], xrange(32)) self._freg_inv = map(lambda x:[], xrange(32)) self._aliases = {} self.rd = rd self.comprefix = rd.comprefix aliases = self._aliases for r,t in rd.reg_mapping.items(): cat = r[0] if r[1] == 'f': species = 'f' nr = int(r[2:]) legnr = rd.legacy_fregs[t] inv_dic = self._freg_inv else: species = 'i' nr = int(r[1:]) legnr = rd.legacy_regs[t] inv_dic = self._reg_inv reg = { 'species' : species, 'cat' : cat, 'name' : r, 'legname' : t, 'nr' : nr, 'legnr' : legnr } if aliases.has_key(r): die("mt.regmagic: alias %s already defined" % r) aliases[r] = reg if self._regs[species][cat][nr] is not None: die("mt.regmagic: register %s already defined" % r) self._regs[species][cat][nr] = reg inv_dic[legnr].append(reg) for a,k in rd.reg_aliases.items(): if aliases.has_key(a): die("mt.regmagic: alias %s already defined" % a) aliases[a] = aliases[k] # Consistency check for i in self._reg_inv: if len(i) == 0: die("mt.regmagic: all legacy integer registers are not used") if i in self._freg_inv: if len(i) == 0: die("mt.regmagic: all legacy fp registers are not used") # machine-specific post-initialisation rd.post_init_regmagic(self) ### services ### def mapcall(self,args, funcname = '<unknown>', loc = None): """ Compute a calling convention for the given actual argument list. """ dic = { 'i' : { 'sh' : [], 'gl' : [] }, 'f' : { 'sh' : [], 'gl' : [] }, } anames = {} #print args for a in args: if a.type not in ['sharg', 'shfarg', 'glarg', 'glfarg', 'shparm','shfparm', 'glparm','glfparm', 'glparm_mutable', 'glfparm_mutable']: die("unknown thread parameter/argument type: %s" % a.type, loc) cat = a.type[:2] if a.type[2] == 'f': species = 'f' else: species = 'i' if a.type.endswith('_mutable'): mode = 'w' else: mode = 'r' na = {'loc':getattr(a, 'loc', None), 'cat':cat, 'species':species, 'ctype':a.ctype, 'name':a.name, 'init':getattr(a, 'init', None), 'glmode':mode} anames[a.name] = na dic[species][cat].append(na) # Check if some parameters will use memory passing escape = 0 if len(dic['f']['sh']) * 2 + len(dic['f']['gl']) > self.rd.fargregs: escape = 1 if len(dic['i']['sh']) * 2 + len(dic['i']['gl']) > (self.rd.iargregs - escape): escape = 1 # Start allocate islots_avail = self.rd.iargregs - escape fslots_avail = self.rd.fargregs sislots = [] sfslots = [] gislots = [] gfslots = [] # Allocate shareds first if len(dic['i']['sh']) * 2 > islots_avail: die("%s: too many int shareds, cannot pass them via memory yet" % funcname, loc) shc = 0 for s in dic['i']['sh']: s['mode'] = 'reg' s['regnr'] = shc shc += 1 islots_avail -= 2 sislots.append(s) nrishareds = shc if len(dic['f']['sh']) > fslots_avail: die("%s: too many float shareds, cannot pass them via memory yet" % funcname, loc) shc = 0 for s in dic['f']['sh']: s['mode'] = 'reg' s['regnr'] = shc shc += 1 fslots_avail -= 2 sfslots.append(s) nrfshareds = shc # Allocate fp globals glc = 0 memlayout = [] offset = 0 for s in dic['f']['gl']: if fslots_avail > 0: s['mode'] = 'reg' s['regnr'] = glc glc += 1 fslots_avail -= 1 gfslots.append(s) else: s['mode'] = 'mem' s['offset'] = offset offset += 1 memlayout.append(s) nrfglobals = glc glc = 0 for s in dic['i']['gl']: if islots_avail > 0: s['mode'] = 'reg' s['regnr'] = glc glc += 1 islots_avail -= 1 gislots.append(s) else: s['mode'] = 'mem' s['offset'] = offset offset += 1 memlayout.append(s) nriglobals = glc ret = {} if escape == 0: ret['gl_mem_offset'] = None else: warn("%s: some arguments are passed via memory" % funcname, loc) nriglobals = glc + 1 ret['gl_mem_offset'] = glc # nriglobals = nriglobals + 1 # count PV ret['nrargregs'] = { 'gli' : nriglobals, 'shi' : nrishareds, 'glf' : nrfglobals, 'shf' : nrfshareds } ret['args'] = dic ret['nargs'] = anames ret['memlayout'] = memlayout ret['sislots'] = sislots ret['sfslots'] = sfslots ret['gislots'] = gislots ret['gfslots'] = gfslots #pprint.pprint(ret) return ret def makecom(self, com): return '%s %s' % (self.rd.comprefix, com) def makecanon(self, spec, nr, legname_hint = None): if self.rd.canon_is_numeric: p = '' if spec == 'f': p = 'f' return '%s%s%d' % (self.rd.regprefix, p, nr) else: if legname_hint is not None: return legname_hint if spec == 'f': d = self._freg_inv else: d = self._reg_inv return '%s%s' % (self.rd.regprefix, d[nr][0]['legname']) def get_legacy(self, legname): """ Return the legacy canonical register name for a given legacy register alias. """ if legname.startswith('f') and legname != 'fp': return self.makecanon('f', self.rd.legacy_fregs[legname], legname_hint = legname) else: return self.makecanon('i', self.rd.legacy_regs[legname], legname_hint = legname) def vreg_to_legacy(self, vreg): """ Return the legacy canonical register name for a given virtual register. """ return self.makecanon(vreg['species'], vreg['legnr']) def get_vreg(self,vname): """ Return the virtual register for a given (virtual) alias. """ vname = vname.lstrip(self.rd.regprefix) return self._aliases[vname] def vname_to_legacy(self,vname): """ Return the legacy canonical register name for a given virtual register alias. """ return self.vreg_to_legacy(self.get_vreg(vname)) def get_vregs_for_legacy(self,legname): """ Return the list of virtual registers that can overlap for the given legacy register alias. """ if legname.startswith('%sf' % self.rd.regprefix) and legname != '%sfp' % self.rd.regprefix: return self._freg_inv[self.rd.legacy_fregs[legname[1:]]] elif legname.startswith('f') and legname != 'fp': return self._freg_inv[self.rd.legacy_fregs[legname]] elif legname.startswith(self.rd.regprefix): return self._reg_inv[self.rd.legacy_regs[legname[1:]]] else: return self._reg_inv[self.rd.legacy_regs[legname]] def alias_to_vname(self,alias): """ Return the canonical virtual register name for a given (virtual) alias. """ return '$' + self.rd.reg_aliases[alias] def legacy_to_canon(self, legname): """ Returnt the canonical virtual register name for a given legacy alias. """ lo = self.rd.mt_locals_offset flo = self.rd.mt_flocals_offset if legname.startswith('f') and legname != 'fp': r = self._freg_inv[self.rd.legacy_fregs[legname[1:]]] pref = 'f' off = r[0]['nr'] + flo else: r = self._reg_inv[self.rd.legacy_regs[legname]] pref = '' off = r[0]['nr'] + lo assert len(r) == 1 assert r[0]['cat'] == 'l' return '$%s%d' % (pref, off) def makecrepl(self,funname): """ Create a substitution function suitable for re.sub, for renaming C-used registers to the base of the register window. """ subst = {} regs = self._regs lo = self.rd.mt_locals_offset flo = self.rd.mt_flocals_offset for (spec, pref) in [('i',''),('f','f')]: for r in (r for r in regs[spec]['l'] if r is not None): key = "$%s%d" % (pref,r['legnr']) assert not subst.has_key(key) rname = r['name'][1:] v = '$' isfloat = False if rname[0] == 'f': v += 'f' rname = rname[1:] isfloat = True # all registers shifted by offset, except $31 (zero) rl = int(rname) if isfloat: rl += flo elif rl != 31: rl += lo v = '%s%d' % (v, rl) subst[key] = v #import sys #print >>sys.stderr, "XXX", subst def repl(match): k = match.group(1) if k not in subst: raise die("in function %s: unsupported use of register %s" % (funname, k)) return subst[k] return repl def makerepl(self, gli, shi, glf, shf): """ Create a substitution function suitable for re.sub, based on the values given to .registers. """ subst = {} regs = self._regs for (spec, cat, nr, pref) in [('i','d',shi, ''), ('i','s',shi, ''), ('i','g',gli, ''), ('f','d',shf, 'f'), ('f','s',shf, 'f'), ('f','g',glf, 'f')]: for i in xrange(nr): r = regs[spec][cat][i] key = "%s%d" % (pref,r['legnr']) assert not subst.has_key(key) subst[key] = '$' + r['name'] for (spec, pref) in [('i',''),('f','f')]: for r in (r for r in regs[spec]['l'] if r is not None): key = "%s%d" % (pref,r['legnr']) assert not subst.has_key(key) subst[key] = '$' + r['name'] #print "MAKEREPL: ", subst def repl(match): r = match.group(1) return subst.get(r[1:], r) return repl def makelegre(self): """ Create a regexp that matches all legacy register names. """ pats = set() r = self.rd.legacy_regs rf = self.rd.legacy_fregs p = self.rd.regprefix for k in r.keys() + rf.keys(): # find whether there is a numerical suffix num = False if k[-1:] in "0123456789": k = k[:-1] num = True if k[-1:] in "0123456789": k = k[:-1] if len(k) == 0: continue # gen one pattern if num is True: k += r'\d+' pats.add(k) restr = r'(\%s(?:%s))' % (p, '|'.join(pats)) #print "XXX", restr return re.compile(restr) def makelegcanonrepl(self): """ Create a subsitution function suitable for re.sub, to replace all legacy register aliases by a $N/$fN equivalent. """ subst = {} r = self.rd.legacy_regs rf = self.rd.legacy_fregs for (k,v) in r.items(): subst[k] = '$%d' % v for (k,v) in rf.items(): subst[k] = '$f%d' % v #print "YYY", subst def repl(match): r = match.group(1) return subst.get(r[1:], r) return repl def get_dot_registers(self, gli, shi, glf, shf): return ".registers %d %d %d %d %d %d" % (gli, shi, self.rd.ilocalregs, glf, shf, self.rd.flocalregs)
gpl-3.0
CristianBB/SickRage
lib/unidecode/x0cc.py
253
4749
data = ( 'jjyim', # 0x00 'jjyib', # 0x01 'jjyibs', # 0x02 'jjyis', # 0x03 'jjyiss', # 0x04 'jjying', # 0x05 'jjyij', # 0x06 'jjyic', # 0x07 'jjyik', # 0x08 'jjyit', # 0x09 'jjyip', # 0x0a 'jjyih', # 0x0b 'jji', # 0x0c 'jjig', # 0x0d 'jjigg', # 0x0e 'jjigs', # 0x0f 'jjin', # 0x10 'jjinj', # 0x11 'jjinh', # 0x12 'jjid', # 0x13 'jjil', # 0x14 'jjilg', # 0x15 'jjilm', # 0x16 'jjilb', # 0x17 'jjils', # 0x18 'jjilt', # 0x19 'jjilp', # 0x1a 'jjilh', # 0x1b 'jjim', # 0x1c 'jjib', # 0x1d 'jjibs', # 0x1e 'jjis', # 0x1f 'jjiss', # 0x20 'jjing', # 0x21 'jjij', # 0x22 'jjic', # 0x23 'jjik', # 0x24 'jjit', # 0x25 'jjip', # 0x26 'jjih', # 0x27 'ca', # 0x28 'cag', # 0x29 'cagg', # 0x2a 'cags', # 0x2b 'can', # 0x2c 'canj', # 0x2d 'canh', # 0x2e 'cad', # 0x2f 'cal', # 0x30 'calg', # 0x31 'calm', # 0x32 'calb', # 0x33 'cals', # 0x34 'calt', # 0x35 'calp', # 0x36 'calh', # 0x37 'cam', # 0x38 'cab', # 0x39 'cabs', # 0x3a 'cas', # 0x3b 'cass', # 0x3c 'cang', # 0x3d 'caj', # 0x3e 'cac', # 0x3f 'cak', # 0x40 'cat', # 0x41 'cap', # 0x42 'cah', # 0x43 'cae', # 0x44 'caeg', # 0x45 'caegg', # 0x46 'caegs', # 0x47 'caen', # 0x48 'caenj', # 0x49 'caenh', # 0x4a 'caed', # 0x4b 'cael', # 0x4c 'caelg', # 0x4d 'caelm', # 0x4e 'caelb', # 0x4f 'caels', # 0x50 'caelt', # 0x51 'caelp', # 0x52 'caelh', # 0x53 'caem', # 0x54 'caeb', # 0x55 'caebs', # 0x56 'caes', # 0x57 'caess', # 0x58 'caeng', # 0x59 'caej', # 0x5a 'caec', # 0x5b 'caek', # 0x5c 'caet', # 0x5d 'caep', # 0x5e 'caeh', # 0x5f 'cya', # 0x60 'cyag', # 0x61 'cyagg', # 0x62 'cyags', # 0x63 'cyan', # 0x64 'cyanj', # 0x65 'cyanh', # 0x66 'cyad', # 0x67 'cyal', # 0x68 'cyalg', # 0x69 'cyalm', # 0x6a 'cyalb', # 0x6b 'cyals', # 0x6c 'cyalt', # 0x6d 'cyalp', # 0x6e 'cyalh', # 0x6f 'cyam', # 0x70 'cyab', # 0x71 'cyabs', # 0x72 'cyas', # 0x73 'cyass', # 0x74 'cyang', # 0x75 'cyaj', # 0x76 'cyac', # 0x77 'cyak', # 0x78 'cyat', # 0x79 'cyap', # 0x7a 'cyah', # 0x7b 'cyae', # 0x7c 'cyaeg', # 0x7d 'cyaegg', # 0x7e 'cyaegs', # 0x7f 'cyaen', # 0x80 'cyaenj', # 0x81 'cyaenh', # 0x82 'cyaed', # 0x83 'cyael', # 0x84 'cyaelg', # 0x85 'cyaelm', # 0x86 'cyaelb', # 0x87 'cyaels', # 0x88 'cyaelt', # 0x89 'cyaelp', # 0x8a 'cyaelh', # 0x8b 'cyaem', # 0x8c 'cyaeb', # 0x8d 'cyaebs', # 0x8e 'cyaes', # 0x8f 'cyaess', # 0x90 'cyaeng', # 0x91 'cyaej', # 0x92 'cyaec', # 0x93 'cyaek', # 0x94 'cyaet', # 0x95 'cyaep', # 0x96 'cyaeh', # 0x97 'ceo', # 0x98 'ceog', # 0x99 'ceogg', # 0x9a 'ceogs', # 0x9b 'ceon', # 0x9c 'ceonj', # 0x9d 'ceonh', # 0x9e 'ceod', # 0x9f 'ceol', # 0xa0 'ceolg', # 0xa1 'ceolm', # 0xa2 'ceolb', # 0xa3 'ceols', # 0xa4 'ceolt', # 0xa5 'ceolp', # 0xa6 'ceolh', # 0xa7 'ceom', # 0xa8 'ceob', # 0xa9 'ceobs', # 0xaa 'ceos', # 0xab 'ceoss', # 0xac 'ceong', # 0xad 'ceoj', # 0xae 'ceoc', # 0xaf 'ceok', # 0xb0 'ceot', # 0xb1 'ceop', # 0xb2 'ceoh', # 0xb3 'ce', # 0xb4 'ceg', # 0xb5 'cegg', # 0xb6 'cegs', # 0xb7 'cen', # 0xb8 'cenj', # 0xb9 'cenh', # 0xba 'ced', # 0xbb 'cel', # 0xbc 'celg', # 0xbd 'celm', # 0xbe 'celb', # 0xbf 'cels', # 0xc0 'celt', # 0xc1 'celp', # 0xc2 'celh', # 0xc3 'cem', # 0xc4 'ceb', # 0xc5 'cebs', # 0xc6 'ces', # 0xc7 'cess', # 0xc8 'ceng', # 0xc9 'cej', # 0xca 'cec', # 0xcb 'cek', # 0xcc 'cet', # 0xcd 'cep', # 0xce 'ceh', # 0xcf 'cyeo', # 0xd0 'cyeog', # 0xd1 'cyeogg', # 0xd2 'cyeogs', # 0xd3 'cyeon', # 0xd4 'cyeonj', # 0xd5 'cyeonh', # 0xd6 'cyeod', # 0xd7 'cyeol', # 0xd8 'cyeolg', # 0xd9 'cyeolm', # 0xda 'cyeolb', # 0xdb 'cyeols', # 0xdc 'cyeolt', # 0xdd 'cyeolp', # 0xde 'cyeolh', # 0xdf 'cyeom', # 0xe0 'cyeob', # 0xe1 'cyeobs', # 0xe2 'cyeos', # 0xe3 'cyeoss', # 0xe4 'cyeong', # 0xe5 'cyeoj', # 0xe6 'cyeoc', # 0xe7 'cyeok', # 0xe8 'cyeot', # 0xe9 'cyeop', # 0xea 'cyeoh', # 0xeb 'cye', # 0xec 'cyeg', # 0xed 'cyegg', # 0xee 'cyegs', # 0xef 'cyen', # 0xf0 'cyenj', # 0xf1 'cyenh', # 0xf2 'cyed', # 0xf3 'cyel', # 0xf4 'cyelg', # 0xf5 'cyelm', # 0xf6 'cyelb', # 0xf7 'cyels', # 0xf8 'cyelt', # 0xf9 'cyelp', # 0xfa 'cyelh', # 0xfb 'cyem', # 0xfc 'cyeb', # 0xfd 'cyebs', # 0xfe 'cyes', # 0xff )
gpl-3.0
xuleiboy1234/autoTitle
tensorflow/tensorflow/python/ops/rnn_cell.py
71
1461
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Module for constructing RNN Cells. ## Base interface for all RNN Cells @@RNNCell ## RNN Cells for use with TensorFlow's core RNN methods @@BasicRNNCell @@BasicLSTMCell @@GRUCell @@LSTMCell ## Classes storing split `RNNCell` state @@LSTMStateTuple ## RNN Cell wrappers (RNNCells that wrap other RNNCells) @@MultiRNNCell @@DropoutWrapper @@DeviceWrapper @@ResidualWrapper """ from __future__ import absolute_import from __future__ import division from __future__ import print_function # go/tf-wildcard-import # pylint: disable=wildcard-import from tensorflow.python.ops.rnn_cell_impl import * # pylint: enable=wildcard-import from tensorflow.python.util.all_util import remove_undocumented _allowed_symbols = [] remove_undocumented(__name__, _allowed_symbols)
mit
Metaswitch/calico-nova
nova/tests/unit/scheduler/filters/test_image_props_filters.py
66
10627
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nova.compute import arch from nova.compute import hv_type from nova.compute import vm_mode from nova.scheduler.filters import image_props_filter from nova import test from nova.tests.unit.scheduler import fakes from nova import utils class TestImagePropsFilter(test.NoDBTestCase): def setUp(self): super(TestImagePropsFilter, self).setUp() self.filt_cls = image_props_filter.ImagePropertiesFilter() def test_image_properties_filter_passes_same_inst_props_and_version(self): img_props = {'properties': {'_architecture': arch.X86_64, 'hypervisor_type': hv_type.KVM, 'vm_mode': vm_mode.HVM, 'hypervisor_version_requires': '>=6.0,<6.2' }} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = utils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_fails_different_inst_props(self): img_props = {'properties': {'architecture': arch.ARMV7, 'hypervisor_type': hv_type.QEMU, 'vm_mode': vm_mode.HVM}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = utils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(self.filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_fails_different_hyper_version(self): img_props = {'properties': {'architecture': arch.X86_64, 'hypervisor_type': hv_type.KVM, 'vm_mode': vm_mode.HVM, 'hypervisor_version_requires': '>=6.2'}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = utils.convert_version_to_int('6.0.0') capabilities = {'enabled': True, 'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(self.filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_passes_partial_inst_props(self): img_props = {'properties': {'architecture': arch.X86_64, 'vm_mode': vm_mode.HVM}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = utils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_fails_partial_inst_props(self): img_props = {'properties': {'architecture': arch.X86_64, 'vm_mode': vm_mode.HVM}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = utils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [(arch.X86_64, hv_type.XEN, vm_mode.XEN)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(self.filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_passes_without_inst_props(self): filter_properties = {'request_spec': {}} hypervisor_version = utils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_fails_without_host_props(self): img_props = {'properties': {'architecture': arch.X86_64, 'hypervisor_type': hv_type.KVM, 'vm_mode': vm_mode.HVM}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = utils.convert_version_to_int('6.0.0') capabilities = {'enabled': True, 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(self.filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_passes_without_hyper_version(self): img_props = {'properties': {'architecture': arch.X86_64, 'hypervisor_type': hv_type.KVM, 'vm_mode': vm_mode.HVM, 'hypervisor_version_requires': '>=6.0'}} filter_properties = {'request_spec': {'image': img_props}} capabilities = {'enabled': True, 'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)]} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_fails_with_unsupported_hyper_ver(self): img_props = {'properties': {'architecture': arch.X86_64, 'hypervisor_type': hv_type.KVM, 'vm_mode': vm_mode.HVM, 'hypervisor_version_requires': '>=6.0'}} filter_properties = {'request_spec': {'image': img_props}} capabilities = {'enabled': True, 'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': 5000} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertFalse(self.filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_pv_mode_compat(self): # if an old image has 'pv' for a vm_mode it should be treated as xen img_props = {'properties': {'vm_mode': 'pv'}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = utils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [(arch.X86_64, hv_type.XEN, vm_mode.XEN)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_hvm_mode_compat(self): # if an old image has 'hv' for a vm_mode it should be treated as xen img_props = {'properties': {'vm_mode': 'hv'}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = utils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [(arch.X86_64, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_xen_arch_compat(self): # if an old image has 'x86_32' for arch it should be treated as i686 img_props = {'properties': {'architecture': 'x86_32'}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = utils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [(arch.I686, hv_type.KVM, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_xen_hv_type_compat(self): # if an old image has 'xapi' for hv_type it should be treated as xen img_props = {'properties': {'hypervisor_type': 'xapi'}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = utils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [(arch.I686, hv_type.XEN, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, filter_properties)) def test_image_properties_filter_baremetal_vmmode_compat(self): # if an old image has 'baremetal' for vmmode it should be # treated as hvm img_props = {'properties': {'vm_mode': 'baremetal'}} filter_properties = {'request_spec': {'image': img_props}} hypervisor_version = utils.convert_version_to_int('6.0.0') capabilities = {'supported_instances': [(arch.I686, hv_type.BAREMETAL, vm_mode.HVM)], 'hypervisor_version': hypervisor_version} host = fakes.FakeHostState('host1', 'node1', capabilities) self.assertTrue(self.filt_cls.host_passes(host, filter_properties))
apache-2.0
scs/uclinux
user/python/python-2.4.4/Mac/Modules/launch/launchscan.py
5
2914
# Scan an Apple header file, generating a Python file of generator calls. import sys import os from bgenlocations import TOOLBOXDIR, BGENDIR sys.path.append(BGENDIR) from scantools import Scanner LONG = "LaunchServices" SHORT = "launch" OBJECT = "NOTUSED" def main(): input = LONG + ".h" output = SHORT + "gen.py" defsoutput = TOOLBOXDIR + LONG + ".py" scanner = MyScanner(input, output, defsoutput) scanner.scan() scanner.close() scanner.gentypetest(SHORT+"typetest.py") print "=== Testing definitions output code ===" execfile(defsoutput, {}, {}) print "=== Done scanning and generating, now importing the generated code... ===" exec "import " + SHORT + "support" print "=== Done. It's up to you to compile it now! ===" class MyScanner(Scanner): def destination(self, type, name, arglist): classname = "Function" listname = "functions" if arglist: t, n, m = arglist[0] # This is non-functional today if t == OBJECT and m == "InMode": classname = "Method" listname = "methods" return classname, listname def writeinitialdefs(self): self.defsfile.write("def FOUR_CHAR_CODE(x): return x\n") self.defsfile.write("from Carbon.Files import *\n") self.defsfile.write("kLSRequestAllInfo = -1\n") self.defsfile.write("kLSRolesAll = -1\n") self.defsfile.write("kLSUnknownType = FOUR_CHAR_CODE('\\0\\0\\0\\0')\n") self.defsfile.write("kLSUnknownCreator = FOUR_CHAR_CODE('\\0\\0\\0\\0')\n") self.defsfile.write("kLSInvalidExtensionIndex = -1\n") def makeblacklistnames(self): return [ "LSInit", "LSTerm", "kLSRequestAllInfo", "kLSRolesAll", "kLSInvalidExtensionIndex", "kLSUnknownType", "kLSUnknownCreator" ] def makeblacklisttypes(self): return [ "LSLaunchFSRefSpec_ptr", "LSLaunchURLSpec_ptr", ] def makerepairinstructions(self): return [ # LSGetApplicationForInfo ([('CFStringRef', 'inExtension', 'InMode')], [('OptCFStringRef', 'inExtension', 'InMode')]), # LSFindApplicationForInfo ([('CFStringRef', 'inBundleID', 'InMode')], [('OptCFStringRef', 'inBundleID', 'InMode')]), ([('CFStringRef', 'inName', 'InMode')], [('OptCFStringRef', 'inName', 'InMode')]), # Unicode filenames passed as length, buffer. LSGetExtensionInfo ([('UniCharCount', '*', 'InMode'), ('UniChar_ptr', '*', 'InMode')], [('UnicodeReverseInBuffer', '*', 'InMode')] ), ] if __name__ == "__main__": main()
gpl-2.0
youssef-poisson/angular
node_modules/npm/node_modules/node-gyp/gyp/pylib/gyp/__init__.py
289
21425
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import copy import gyp.input import optparse import os.path import re import shlex import sys import traceback from gyp.common import GypError # Default debug modes for GYP debug = {} # List of "official" debug modes, but you can use anything you like. DEBUG_GENERAL = 'general' DEBUG_VARIABLES = 'variables' DEBUG_INCLUDES = 'includes' def DebugOutput(mode, message, *args): if 'all' in gyp.debug or mode in gyp.debug: ctx = ('unknown', 0, 'unknown') try: f = traceback.extract_stack(limit=2) if f: ctx = f[0][:3] except: pass if args: message %= args print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]), ctx[1], ctx[2], message) def FindBuildFiles(): extension = '.gyp' files = os.listdir(os.getcwd()) build_files = [] for file in files: if file.endswith(extension): build_files.append(file) return build_files def Load(build_files, format, default_variables={}, includes=[], depth='.', params=None, check=False, circular_check=True): """ Loads one or more specified build files. default_variables and includes will be copied before use. Returns the generator for the specified format and the data returned by loading the specified build files. """ if params is None: params = {} if '-' in format: format, params['flavor'] = format.split('-', 1) default_variables = copy.copy(default_variables) # Default variables provided by this program and its modules should be # named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace, # avoiding collisions with user and automatic variables. default_variables['GENERATOR'] = format default_variables['GENERATOR_FLAVOR'] = params.get('flavor', '') # Format can be a custom python file, or by default the name of a module # within gyp.generator. if format.endswith('.py'): generator_name = os.path.splitext(format)[0] path, generator_name = os.path.split(generator_name) # Make sure the path to the custom generator is in sys.path # Don't worry about removing it once we are done. Keeping the path # to each generator that is used in sys.path is likely harmless and # arguably a good idea. path = os.path.abspath(path) if path not in sys.path: sys.path.insert(0, path) else: generator_name = 'gyp.generator.' + format # These parameters are passed in order (as opposed to by key) # because ActivePython cannot handle key parameters to __import__. generator = __import__(generator_name, globals(), locals(), generator_name) for (key, val) in generator.generator_default_variables.items(): default_variables.setdefault(key, val) # Give the generator the opportunity to set additional variables based on # the params it will receive in the output phase. if getattr(generator, 'CalculateVariables', None): generator.CalculateVariables(default_variables, params) # Give the generator the opportunity to set generator_input_info based on # the params it will receive in the output phase. if getattr(generator, 'CalculateGeneratorInputInfo', None): generator.CalculateGeneratorInputInfo(params) # Fetch the generator specific info that gets fed to input, we use getattr # so we can default things and the generators only have to provide what # they need. generator_input_info = { 'non_configuration_keys': getattr(generator, 'generator_additional_non_configuration_keys', []), 'path_sections': getattr(generator, 'generator_additional_path_sections', []), 'extra_sources_for_rules': getattr(generator, 'generator_extra_sources_for_rules', []), 'generator_supports_multiple_toolsets': getattr(generator, 'generator_supports_multiple_toolsets', False), 'generator_wants_static_library_dependencies_adjusted': getattr(generator, 'generator_wants_static_library_dependencies_adjusted', True), 'generator_wants_sorted_dependencies': getattr(generator, 'generator_wants_sorted_dependencies', False), 'generator_filelist_paths': getattr(generator, 'generator_filelist_paths', None), } # Process the input specific to this generator. result = gyp.input.Load(build_files, default_variables, includes[:], depth, generator_input_info, check, circular_check, params['parallel'], params['root_targets']) return [generator] + result def NameValueListToDict(name_value_list): """ Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary of the pairs. If a string is simply NAME, then the value in the dictionary is set to True. If VALUE can be converted to an integer, it is. """ result = { } for item in name_value_list: tokens = item.split('=', 1) if len(tokens) == 2: # If we can make it an int, use that, otherwise, use the string. try: token_value = int(tokens[1]) except ValueError: token_value = tokens[1] # Set the variable to the supplied value. result[tokens[0]] = token_value else: # No value supplied, treat it as a boolean and set it. result[tokens[0]] = True return result def ShlexEnv(env_name): flags = os.environ.get(env_name, []) if flags: flags = shlex.split(flags) return flags def FormatOpt(opt, value): if opt.startswith('--'): return '%s=%s' % (opt, value) return opt + value def RegenerateAppendFlag(flag, values, predicate, env_name, options): """Regenerate a list of command line flags, for an option of action='append'. The |env_name|, if given, is checked in the environment and used to generate an initial list of options, then the options that were specified on the command line (given in |values|) are appended. This matches the handling of environment variables and command line flags where command line flags override the environment, while not requiring the environment to be set when the flags are used again. """ flags = [] if options.use_environment and env_name: for flag_value in ShlexEnv(env_name): value = FormatOpt(flag, predicate(flag_value)) if value in flags: flags.remove(value) flags.append(value) if values: for flag_value in values: flags.append(FormatOpt(flag, predicate(flag_value))) return flags def RegenerateFlags(options): """Given a parsed options object, and taking the environment variables into account, returns a list of flags that should regenerate an equivalent options object (even in the absence of the environment variables.) Any path options will be normalized relative to depth. The format flag is not included, as it is assumed the calling generator will set that as appropriate. """ def FixPath(path): path = gyp.common.FixIfRelativePath(path, options.depth) if not path: return os.path.curdir return path def Noop(value): return value # We always want to ignore the environment when regenerating, to avoid # duplicate or changed flags in the environment at the time of regeneration. flags = ['--ignore-environment'] for name, metadata in options._regeneration_metadata.iteritems(): opt = metadata['opt'] value = getattr(options, name) value_predicate = metadata['type'] == 'path' and FixPath or Noop action = metadata['action'] env_name = metadata['env_name'] if action == 'append': flags.extend(RegenerateAppendFlag(opt, value, value_predicate, env_name, options)) elif action in ('store', None): # None is a synonym for 'store'. if value: flags.append(FormatOpt(opt, value_predicate(value))) elif options.use_environment and env_name and os.environ.get(env_name): flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name)))) elif action in ('store_true', 'store_false'): if ((action == 'store_true' and value) or (action == 'store_false' and not value)): flags.append(opt) elif options.use_environment and env_name: print >>sys.stderr, ('Warning: environment regeneration unimplemented ' 'for %s flag %r env_name %r' % (action, opt, env_name)) else: print >>sys.stderr, ('Warning: regeneration unimplemented for action %r ' 'flag %r' % (action, opt)) return flags class RegeneratableOptionParser(optparse.OptionParser): def __init__(self): self.__regeneratable_options = {} optparse.OptionParser.__init__(self) def add_option(self, *args, **kw): """Add an option to the parser. This accepts the same arguments as OptionParser.add_option, plus the following: regenerate: can be set to False to prevent this option from being included in regeneration. env_name: name of environment variable that additional values for this option come from. type: adds type='path', to tell the regenerator that the values of this option need to be made relative to options.depth """ env_name = kw.pop('env_name', None) if 'dest' in kw and kw.pop('regenerate', True): dest = kw['dest'] # The path type is needed for regenerating, for optparse we can just treat # it as a string. type = kw.get('type') if type == 'path': kw['type'] = 'string' self.__regeneratable_options[dest] = { 'action': kw.get('action'), 'type': type, 'env_name': env_name, 'opt': args[0], } optparse.OptionParser.add_option(self, *args, **kw) def parse_args(self, *args): values, args = optparse.OptionParser.parse_args(self, *args) values._regeneration_metadata = self.__regeneratable_options return values, args def gyp_main(args): my_name = os.path.basename(sys.argv[0]) parser = RegeneratableOptionParser() usage = 'usage: %s [options ...] [build_file ...]' parser.set_usage(usage.replace('%s', '%prog')) parser.add_option('--build', dest='configs', action='append', help='configuration for build after project generation') parser.add_option('--check', dest='check', action='store_true', help='check format of gyp files') parser.add_option('--config-dir', dest='config_dir', action='store', env_name='GYP_CONFIG_DIR', default=None, help='The location for configuration files like ' 'include.gypi.') parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE', action='append', default=[], help='turn on a debugging ' 'mode for debugging GYP. Supported modes are "variables", ' '"includes" and "general" or "all" for all of them.') parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL', env_name='GYP_DEFINES', help='sets variable VAR to value VAL') parser.add_option('--depth', dest='depth', metavar='PATH', type='path', help='set DEPTH gyp variable to a relative path to PATH') parser.add_option('-f', '--format', dest='formats', action='append', env_name='GYP_GENERATORS', regenerate=False, help='output formats to generate') parser.add_option('-G', dest='generator_flags', action='append', default=[], metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS', help='sets generator flag FLAG to VAL') parser.add_option('--generator-output', dest='generator_output', action='store', default=None, metavar='DIR', type='path', env_name='GYP_GENERATOR_OUTPUT', help='puts generated build files under DIR') parser.add_option('--ignore-environment', dest='use_environment', action='store_false', default=True, regenerate=False, help='do not read options from environment variables') parser.add_option('-I', '--include', dest='includes', action='append', metavar='INCLUDE', type='path', help='files to include in all loaded .gyp files') # --no-circular-check disables the check for circular relationships between # .gyp files. These relationships should not exist, but they've only been # observed to be harmful with the Xcode generator. Chromium's .gyp files # currently have some circular relationships on non-Mac platforms, so this # option allows the strict behavior to be used on Macs and the lenient # behavior to be used elsewhere. # TODO(mark): Remove this option when http://crbug.com/35878 is fixed. parser.add_option('--no-circular-check', dest='circular_check', action='store_false', default=True, regenerate=False, help="don't check for circular relationships between files") parser.add_option('--no-parallel', action='store_true', default=False, help='Disable multiprocessing') parser.add_option('-S', '--suffix', dest='suffix', default='', help='suffix to add to generated files') parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store', default=None, metavar='DIR', type='path', help='directory to use as the root of the source tree') parser.add_option('-R', '--root-target', dest='root_targets', action='append', metavar='TARGET', help='include only TARGET and its deep dependencies') options, build_files_arg = parser.parse_args(args) build_files = build_files_arg # Set up the configuration directory (defaults to ~/.gyp) if not options.config_dir: home = None home_dot_gyp = None if options.use_environment: home_dot_gyp = os.environ.get('GYP_CONFIG_DIR', None) if home_dot_gyp: home_dot_gyp = os.path.expanduser(home_dot_gyp) if not home_dot_gyp: home_vars = ['HOME'] if sys.platform in ('cygwin', 'win32'): home_vars.append('USERPROFILE') for home_var in home_vars: home = os.getenv(home_var) if home != None: home_dot_gyp = os.path.join(home, '.gyp') if not os.path.exists(home_dot_gyp): home_dot_gyp = None else: break else: home_dot_gyp = os.path.expanduser(options.config_dir) if home_dot_gyp and not os.path.exists(home_dot_gyp): home_dot_gyp = None if not options.formats: # If no format was given on the command line, then check the env variable. generate_formats = [] if options.use_environment: generate_formats = os.environ.get('GYP_GENERATORS', []) if generate_formats: generate_formats = re.split(r'[\s,]', generate_formats) if generate_formats: options.formats = generate_formats else: # Nothing in the variable, default based on platform. if sys.platform == 'darwin': options.formats = ['xcode'] elif sys.platform in ('win32', 'cygwin'): options.formats = ['msvs'] else: options.formats = ['make'] if not options.generator_output and options.use_environment: g_o = os.environ.get('GYP_GENERATOR_OUTPUT') if g_o: options.generator_output = g_o options.parallel = not options.no_parallel for mode in options.debug: gyp.debug[mode] = 1 # Do an extra check to avoid work when we're not debugging. if DEBUG_GENERAL in gyp.debug: DebugOutput(DEBUG_GENERAL, 'running with these options:') for option, value in sorted(options.__dict__.items()): if option[0] == '_': continue if isinstance(value, basestring): DebugOutput(DEBUG_GENERAL, " %s: '%s'", option, value) else: DebugOutput(DEBUG_GENERAL, " %s: %s", option, value) if not build_files: build_files = FindBuildFiles() if not build_files: raise GypError((usage + '\n\n%s: error: no build_file') % (my_name, my_name)) # TODO(mark): Chromium-specific hack! # For Chromium, the gyp "depth" variable should always be a relative path # to Chromium's top-level "src" directory. If no depth variable was set # on the command line, try to find a "src" directory by looking at the # absolute path to each build file's directory. The first "src" component # found will be treated as though it were the path used for --depth. if not options.depth: for build_file in build_files: build_file_dir = os.path.abspath(os.path.dirname(build_file)) build_file_dir_components = build_file_dir.split(os.path.sep) components_len = len(build_file_dir_components) for index in xrange(components_len - 1, -1, -1): if build_file_dir_components[index] == 'src': options.depth = os.path.sep.join(build_file_dir_components) break del build_file_dir_components[index] # If the inner loop found something, break without advancing to another # build file. if options.depth: break if not options.depth: raise GypError('Could not automatically locate src directory. This is' 'a temporary Chromium feature that will be removed. Use' '--depth as a workaround.') # If toplevel-dir is not set, we assume that depth is the root of our source # tree. if not options.toplevel_dir: options.toplevel_dir = options.depth # -D on the command line sets variable defaults - D isn't just for define, # it's for default. Perhaps there should be a way to force (-F?) a # variable's value so that it can't be overridden by anything else. cmdline_default_variables = {} defines = [] if options.use_environment: defines += ShlexEnv('GYP_DEFINES') if options.defines: defines += options.defines cmdline_default_variables = NameValueListToDict(defines) if DEBUG_GENERAL in gyp.debug: DebugOutput(DEBUG_GENERAL, "cmdline_default_variables: %s", cmdline_default_variables) # Set up includes. includes = [] # If ~/.gyp/include.gypi exists, it'll be forcibly included into every # .gyp file that's loaded, before anything else is included. if home_dot_gyp != None: default_include = os.path.join(home_dot_gyp, 'include.gypi') if os.path.exists(default_include): print 'Using overrides found in ' + default_include includes.append(default_include) # Command-line --include files come after the default include. if options.includes: includes.extend(options.includes) # Generator flags should be prefixed with the target generator since they # are global across all generator runs. gen_flags = [] if options.use_environment: gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS') if options.generator_flags: gen_flags += options.generator_flags generator_flags = NameValueListToDict(gen_flags) if DEBUG_GENERAL in gyp.debug.keys(): DebugOutput(DEBUG_GENERAL, "generator_flags: %s", generator_flags) # Generate all requested formats (use a set in case we got one format request # twice) for format in set(options.formats): params = {'options': options, 'build_files': build_files, 'generator_flags': generator_flags, 'cwd': os.getcwd(), 'build_files_arg': build_files_arg, 'gyp_binary': sys.argv[0], 'home_dot_gyp': home_dot_gyp, 'parallel': options.parallel, 'root_targets': options.root_targets, 'target_arch': cmdline_default_variables.get('target_arch', '')} # Start with the default variables from the command line. [generator, flat_list, targets, data] = Load( build_files, format, cmdline_default_variables, includes, options.depth, params, options.check, options.circular_check) # TODO(mark): Pass |data| for now because the generator needs a list of # build files that came in. In the future, maybe it should just accept # a list, and not the whole data dict. # NOTE: flat_list is the flattened dependency graph specifying the order # that targets may be built. Build systems that operate serially or that # need to have dependencies defined before dependents reference them should # generate targets in the order specified in flat_list. generator.GenerateOutput(flat_list, targets, data, params) if options.configs: valid_configs = targets[flat_list[0]]['configurations'].keys() for conf in options.configs: if conf not in valid_configs: raise GypError('Invalid config specified via --build: %s' % conf) generator.PerformBuild(data, options.configs, params) # Done return 0 def main(args): try: return gyp_main(args) except GypError, e: sys.stderr.write("gyp: %s\n" % e) return 1 # NOTE: setuptools generated console_scripts calls function with no arguments def script_main(): return main(sys.argv[1:]) if __name__ == '__main__': sys.exit(script_main())
gpl-2.0
kenshay/ImageScript
Script_Runner/PYTHON/Lib/site-packages/chardet/sbcsgroupprober.py
273
3546
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from .charsetgroupprober import CharSetGroupProber from .sbcharsetprober import SingleByteCharSetProber from .langcyrillicmodel import (Win1251CyrillicModel, Koi8rModel, Latin5CyrillicModel, MacCyrillicModel, Ibm866Model, Ibm855Model) from .langgreekmodel import Latin7GreekModel, Win1253GreekModel from .langbulgarianmodel import Latin5BulgarianModel, Win1251BulgarianModel # from .langhungarianmodel import Latin2HungarianModel, Win1250HungarianModel from .langthaimodel import TIS620ThaiModel from .langhebrewmodel import Win1255HebrewModel from .hebrewprober import HebrewProber from .langturkishmodel import Latin5TurkishModel class SBCSGroupProber(CharSetGroupProber): def __init__(self): super(SBCSGroupProber, self).__init__() self.probers = [ SingleByteCharSetProber(Win1251CyrillicModel), SingleByteCharSetProber(Koi8rModel), SingleByteCharSetProber(Latin5CyrillicModel), SingleByteCharSetProber(MacCyrillicModel), SingleByteCharSetProber(Ibm866Model), SingleByteCharSetProber(Ibm855Model), SingleByteCharSetProber(Latin7GreekModel), SingleByteCharSetProber(Win1253GreekModel), SingleByteCharSetProber(Latin5BulgarianModel), SingleByteCharSetProber(Win1251BulgarianModel), # TODO: Restore Hungarian encodings (iso-8859-2 and windows-1250) # after we retrain model. # SingleByteCharSetProber(Latin2HungarianModel), # SingleByteCharSetProber(Win1250HungarianModel), SingleByteCharSetProber(TIS620ThaiModel), SingleByteCharSetProber(Latin5TurkishModel), ] hebrew_prober = HebrewProber() logical_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel, False, hebrew_prober) visual_hebrew_prober = SingleByteCharSetProber(Win1255HebrewModel, True, hebrew_prober) hebrew_prober.set_model_probers(logical_hebrew_prober, visual_hebrew_prober) self.probers.extend([hebrew_prober, logical_hebrew_prober, visual_hebrew_prober]) self.reset()
gpl-3.0
phenoxim/nova
nova/tests/unit/conductor/tasks/test_base.py
54
1604
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from nova.conductor.tasks import base from nova import test class FakeTask(base.TaskBase): def __init__(self, context, instance, fail=False): super(FakeTask, self).__init__(context, instance) self.fail = fail def _execute(self): if self.fail: raise Exception else: pass class TaskBaseTestCase(test.NoDBTestCase): def setUp(self): super(TaskBaseTestCase, self).setUp() self.task = FakeTask(mock.MagicMock(), mock.MagicMock()) @mock.patch.object(FakeTask, 'rollback') def test_wrapper_exception(self, fake_rollback): self.task.fail = True try: self.task.execute() except Exception: pass fake_rollback.assert_called_once_with() @mock.patch.object(FakeTask, 'rollback') def test_wrapper_no_exception(self, fake_rollback): try: self.task.execute() except Exception: pass self.assertFalse(fake_rollback.called)
apache-2.0
mrquim/repository.mrquim
script.module.youtube.dl/lib/youtube_dl/extractor/videopress.py
48
3010
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( determine_ext, float_or_none, parse_age_limit, qualities, random_birthday, try_get, unified_timestamp, urljoin, ) class VideoPressIE(InfoExtractor): _VALID_URL = r'https?://videopress\.com/embed/(?P<id>[\da-zA-Z]+)' _TESTS = [{ 'url': 'https://videopress.com/embed/kUJmAcSf', 'md5': '706956a6c875873d51010921310e4bc6', 'info_dict': { 'id': 'kUJmAcSf', 'ext': 'mp4', 'title': 'VideoPress Demo', 'thumbnail': r're:^https?://.*\.jpg', 'duration': 634.6, 'timestamp': 1434983935, 'upload_date': '20150622', 'age_limit': 0, }, }, { # 17+, requires birth_* params 'url': 'https://videopress.com/embed/iH3gstfZ', 'only_matching': True, }] @staticmethod def _extract_urls(webpage): return re.findall( r'<iframe[^>]+src=["\']((?:https?://)?videopress\.com/embed/[\da-zA-Z]+)', webpage) def _real_extract(self, url): video_id = self._match_id(url) query = random_birthday('birth_year', 'birth_month', 'birth_day') video = self._download_json( 'https://public-api.wordpress.com/rest/v1.1/videos/%s' % video_id, video_id, query=query) title = video['title'] def base_url(scheme): return try_get( video, lambda x: x['file_url_base'][scheme], compat_str) base_url = base_url('https') or base_url('http') QUALITIES = ('std', 'dvd', 'hd') quality = qualities(QUALITIES) formats = [] for format_id, f in video['files'].items(): if not isinstance(f, dict): continue for ext, path in f.items(): if ext in ('mp4', 'ogg'): formats.append({ 'url': urljoin(base_url, path), 'format_id': '%s-%s' % (format_id, ext), 'ext': determine_ext(path, ext), 'quality': quality(format_id), }) original_url = try_get(video, lambda x: x['original'], compat_str) if original_url: formats.append({ 'url': original_url, 'format_id': 'original', 'quality': len(QUALITIES), }) self._sort_formats(formats) return { 'id': video_id, 'title': title, 'description': video.get('description'), 'thumbnail': video.get('poster'), 'duration': float_or_none(video.get('duration'), 1000), 'timestamp': unified_timestamp(video.get('upload_date')), 'age_limit': parse_age_limit(video.get('rating')), 'formats': formats, }
gpl-2.0
ltilve/ChromiumGStreamerBackend
third_party/pycoverage/coverage/templite.py
160
6868
"""A simple Python template renderer, for a nano-subset of Django syntax.""" # Coincidentally named the same as http://code.activestate.com/recipes/496702/ import re from coverage.backward import set # pylint: disable=W0622 class CodeBuilder(object): """Build source code conveniently.""" def __init__(self, indent=0): self.code = [] self.indent_amount = indent def add_line(self, line): """Add a line of source to the code. Don't include indentations or newlines. """ self.code.append(" " * self.indent_amount) self.code.append(line) self.code.append("\n") def add_section(self): """Add a section, a sub-CodeBuilder.""" sect = CodeBuilder(self.indent_amount) self.code.append(sect) return sect def indent(self): """Increase the current indent for following lines.""" self.indent_amount += 4 def dedent(self): """Decrease the current indent for following lines.""" self.indent_amount -= 4 def __str__(self): return "".join([str(c) for c in self.code]) def get_function(self, fn_name): """Compile the code, and return the function `fn_name`.""" assert self.indent_amount == 0 g = {} code_text = str(self) exec(code_text, g) return g[fn_name] class Templite(object): """A simple template renderer, for a nano-subset of Django syntax. Supported constructs are extended variable access:: {{var.modifer.modifier|filter|filter}} loops:: {% for var in list %}...{% endfor %} and ifs:: {% if var %}...{% endif %} Comments are within curly-hash markers:: {# This will be ignored #} Construct a Templite with the template text, then use `render` against a dictionary context to create a finished string. """ def __init__(self, text, *contexts): """Construct a Templite with the given `text`. `contexts` are dictionaries of values to use for future renderings. These are good for filters and global values. """ self.text = text self.context = {} for context in contexts: self.context.update(context) # We construct a function in source form, then compile it and hold onto # it, and execute it to render the template. code = CodeBuilder() code.add_line("def render(ctx, dot):") code.indent() vars_code = code.add_section() self.all_vars = set() self.loop_vars = set() code.add_line("result = []") code.add_line("a = result.append") code.add_line("e = result.extend") code.add_line("s = str") buffered = [] def flush_output(): """Force `buffered` to the code builder.""" if len(buffered) == 1: code.add_line("a(%s)" % buffered[0]) elif len(buffered) > 1: code.add_line("e([%s])" % ",".join(buffered)) del buffered[:] # Split the text to form a list of tokens. toks = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text) ops_stack = [] for tok in toks: if tok.startswith('{{'): # An expression to evaluate. buffered.append("s(%s)" % self.expr_code(tok[2:-2].strip())) elif tok.startswith('{#'): # Comment: ignore it and move on. continue elif tok.startswith('{%'): # Action tag: split into words and parse further. flush_output() words = tok[2:-2].strip().split() if words[0] == 'if': # An if statement: evaluate the expression to determine if. assert len(words) == 2 ops_stack.append('if') code.add_line("if %s:" % self.expr_code(words[1])) code.indent() elif words[0] == 'for': # A loop: iterate over expression result. assert len(words) == 4 and words[2] == 'in' ops_stack.append('for') self.loop_vars.add(words[1]) code.add_line( "for c_%s in %s:" % ( words[1], self.expr_code(words[3]) ) ) code.indent() elif words[0].startswith('end'): # Endsomething. Pop the ops stack end_what = words[0][3:] if ops_stack[-1] != end_what: raise SyntaxError("Mismatched end tag: %r" % end_what) ops_stack.pop() code.dedent() else: raise SyntaxError("Don't understand tag: %r" % words[0]) else: # Literal content. If it isn't empty, output it. if tok: buffered.append("%r" % tok) flush_output() for var_name in self.all_vars - self.loop_vars: vars_code.add_line("c_%s = ctx[%r]" % (var_name, var_name)) if ops_stack: raise SyntaxError("Unmatched action tag: %r" % ops_stack[-1]) code.add_line("return ''.join(result)") code.dedent() self.render_function = code.get_function('render') def expr_code(self, expr): """Generate a Python expression for `expr`.""" if "|" in expr: pipes = expr.split("|") code = self.expr_code(pipes[0]) for func in pipes[1:]: self.all_vars.add(func) code = "c_%s(%s)" % (func, code) elif "." in expr: dots = expr.split(".") code = self.expr_code(dots[0]) args = [repr(d) for d in dots[1:]] code = "dot(%s, %s)" % (code, ", ".join(args)) else: self.all_vars.add(expr) code = "c_%s" % expr return code def render(self, context=None): """Render this template by applying it to `context`. `context` is a dictionary of values to use in this rendering. """ # Make the complete context we'll use. ctx = dict(self.context) if context: ctx.update(context) return self.render_function(ctx, self.do_dots) def do_dots(self, value, *dots): """Evaluate dotted expressions at runtime.""" for dot in dots: try: value = getattr(value, dot) except AttributeError: value = value[dot] if hasattr(value, '__call__'): value = value() return value
bsd-3-clause
chokribr/inveniotest
modules/bibrank/lib/bibrank.py
13
11592
## -*- mode: python; coding: utf-8; -*- ## ## This file is part of Invenio. ## Copyright (C) 2007, 2008, 2010, 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """ BibRank ranking daemon. Usage: bibrank [options] Ranking examples: bibrank -wjif -a --id=0-30000,30001-860000 --verbose=9 bibrank -wjif -d --modified='2002-10-27 13:57:26' bibrank -wwrd --rebalance --collection=Articles bibrank -wwrd -a -i 234-250,293,300-500 -u admin Ranking options: -w, --run=r1[,r2] runs each rank method in the order given -c, --collection=c1[,c2] select according to collection -i, --id=low[-high] select according to doc recID -m, --modified=from[,to] select according to modification date -l, --lastupdate select according to last update -a, --add add or update words for selected records -d, --del delete words for selected records -S, --stat show statistics for a method -R, --recalculate recalculate weigth data, used by word frequency and citation methods, should be used if ca 1% of the document has been changed since last time -R was used -E, --extcites=NUM print the top entries of the external cites table. These are entries that should be entered in your collection, since they have been cited by NUM or more other records present in the system. Useful for cataloguers to input external papers manually. Repairing options: -k, --check check consistency for all records in the table(s) check if update of ranking data is necessary -r, --repair try to repair all records in the table(s) Scheduling options: -u, --user=USER user name to store task, password needed -s, --sleeptime=SLEEP time after which to repeat tasks (no) e.g.: 1s, 30m, 24h, 7d -t, --time=TIME moment for the task to be active (now) e.g.: +15s, 5m, 3h , 2002-10-27 13:57:26 General options: -h, --help print this help and exit -V, --version print version and exit -v, --verbose=LEVEL verbose level (from 0 to 9, default 1) """ __revision__ = "$Id$" import sys import ConfigParser from invenio.config import CFG_ETCDIR from invenio.dbquery import run_sql from invenio.bibtask import task_init, write_message, task_get_option, \ task_set_option, get_datetime, task_sleep_now_if_required # pylint: disable=W0611 # Disabling unused import pylint check, since these are needed to get # imported here, and are called later dynamically. from invenio.bibrank_tag_based_indexer import \ single_tag_rank_method, \ citation, \ download_weight_filtering_user, \ download_weight_total, \ file_similarity_by_times_downloaded, \ index_term_count from invenio.bibrank_word_indexer import word_similarity #@UnusedImport from invenio.bibrank_citerank_indexer import citerank #@UnusedImport from invenio.solrutils_bibrank_indexer import word_similarity_solr #@UnusedImport from invenio.xapianutils_bibrank_indexer import word_similarity_xapian #@UnusedImport from invenio.bibrank_selfcites_task import process_updates as selfcites # pylint: enable=W0611 nb_char_in_line = 50 # for verbose pretty printing chunksize = 1000 # default size of chunks that the records will be treated by base_process_size = 4500 # process base size def split_ranges(parse_string): """Split ranges of numbers""" recIDs = [] ranges = parse_string.split(",") for rang in ranges: tmp_recIDs = rang.split("-") if len(tmp_recIDs)==1: recIDs.append([int(tmp_recIDs[0]), int(tmp_recIDs[0])]) else: if int(tmp_recIDs[0]) > int(tmp_recIDs[1]): # sanity check tmp = tmp_recIDs[0] tmp_recIDs[0] = tmp_recIDs[1] tmp_recIDs[1] = tmp recIDs.append([int(tmp_recIDs[0]), int(tmp_recIDs[1])]) return recIDs def get_date_range(var): "Returns the two dates contained as a low,high tuple" limits = var.split(",") if len(limits)==1: low = get_datetime(limits[0]) return low, None if len(limits)==2: low = get_datetime(limits[0]) high = get_datetime(limits[1]) return low, high def task_run_core(): """Run the indexing task. The row argument is the BibSched task queue row, containing if, arguments, etc. Return 1 in case of success and 0 in case of failure. """ if not task_get_option("run"): task_set_option("run", [name[0] for name in run_sql("SELECT name from rnkMETHOD")]) for key in task_get_option("run"): task_sleep_now_if_required(can_stop_too=True) write_message("") filename = CFG_ETCDIR + "/bibrank/" + key + ".cfg" write_message("Getting configuration from file: %s" % filename, verbose=9) config = ConfigParser.ConfigParser() try: config.readfp(open(filename)) except StandardError: write_message("Cannot find configuration file: %s. " "The rankmethod may also not be registered using " "the BibRank Admin Interface." % filename, sys.stderr) raise #Using the function variable to call the function related to the #rank method cfg_function = config.get("rank_method", "function") func_object = globals().get(cfg_function) if func_object: func_object(key) else: write_message("Cannot run method '%s', no function to call" % key) return True def main(): """Main that construct all the bibtask.""" task_init(authorization_action='runbibrank', authorization_msg="BibRank Task Submission", description="""Ranking examples: bibrank -wjif -a --id=0-30000,30001-860000 --verbose=9 bibrank -wjif -d --modified='2002-10-27 13:57:26' bibrank -wjif --rebalance --collection=Articles bibrank -wsbr -a -i 234-250,293,300-500 -u admin bibrank -u admin -w citation -E 10 bibrank -u admin -w citation -A """, help_specific_usage="""Ranking options: -w, --run=r1[,r2] runs each rank method in the order given -c, --collection=c1[,c2] select according to collection -i, --id=low[-high] select according to doc recID -m, --modified=from[,to] select according to modification date -l, --lastupdate select according to last update -a, --add add or update words for selected records -d, --del delete words for selected records -S, --stat show statistics for a method -R, --recalculate recalculate weight data, used by word frequency and citation methods, should be used if ca 1% of the documents have been changed since last time -R was used. NOTE: This will replace the entire set of weights, regardless of date/id selection. -E, --extcites=NUM print the top entries of the external cites table. These are entries that should be entered in your collection, since they have been cited by NUM or more other records present in the system. Useful for cataloguers to input external papers manually. -A --author-citations Calculate author citations. Repairing options: -k, --check check consistency for all records in the table(s) check if update of ranking data is necessary -r, --repair try to repair all records in the table(s) """, version=__revision__, specific_params=("AE:ladSi:m:c:kUrRM:f:w:", [ "author-citations", "print-extcites=", "lastupdate", "add", "del", "repair", "maxmem", "flush", "stat", "rebalance", "id=", "collection=", "check", "modified=", "update", "run="]), task_submit_elaborate_specific_parameter_fnc= task_submit_elaborate_specific_parameter, task_run_fnc=task_run_core) def task_submit_elaborate_specific_parameter(key, value, opts, dummy): """Elaborate a specific parameter of CLI bibrank.""" if key in ("-a", "--add"): task_set_option("cmd", "add") if ("-x","") in opts or ("--del","") in opts: raise StandardError, "--add incompatible with --del" elif key in ("--run", "-w"): task_set_option("run", []) run = value.split(",") for run_key in range(0, len(run)): task_get_option('run').append(run[run_key]) elif key in ("-r", "--repair"): task_set_option("cmd", "repair") elif key in ("-E", "--print-extcites"): try: task_set_option("print-extcites", int(value)) except: task_set_option("print-extcites", 10) # default fallback value task_set_option("cmd", "print-missing") elif key in ("-A", "--author-citations"): task_set_option("author-citations", "1") elif key in ("-d", "--del"): task_set_option("cmd", "del") elif key in ("-k", "--check"): task_set_option("cmd", "check") elif key in ("-S", "--stat"): task_set_option("cmd", "stat") elif key in ("-i", "--id"): task_set_option("id", task_get_option("id") + split_ranges(value)) task_set_option("last_updated", "") elif key in ("-c", "--collection"): task_set_option("collection", value) elif key in ("-R", "--rebalance"): task_set_option("quick", "no") elif key in ("-f", "--flush"): task_set_option("flush", int(value)) elif key in ("-M", "--maxmem"): task_set_option("maxmem", int(value)) if task_get_option("maxmem") < base_process_size + 1000: raise StandardError, "Memory usage should be higher than %d kB" % \ (base_process_size + 1000) elif key in ("-m", "--modified"): task_set_option("modified", get_date_range(value))#2002-10-27 13:57:26) task_set_option("last_updated", "") elif key in ("-l", "--lastupdate"): task_set_option("last_updated", "last_updated") else: return False return True if __name__ == "__main__": main()
gpl-2.0
philipn/sycamore
Sycamore/support/pytz/zoneinfo/America/Cuiaba.py
2
5648
'''tzinfo timezone information for America/Cuiaba.''' from pytz.tzinfo import DstTzInfo from pytz.tzinfo import memorized_datetime as d from pytz.tzinfo import memorized_ttinfo as i class Cuiaba(DstTzInfo): '''America/Cuiaba timezone definition. See datetime.tzinfo for details''' zone = 'America/Cuiaba' _utc_transition_times = [ d(1,1,1,0,0,0), d(1914,1,1,3,44,20), d(1931,10,3,15,0,0), d(1932,4,1,3,0,0), d(1932,10,3,4,0,0), d(1933,4,1,3,0,0), d(1949,12,1,4,0,0), d(1950,4,16,4,0,0), d(1950,12,1,4,0,0), d(1951,4,1,3,0,0), d(1951,12,1,4,0,0), d(1952,4,1,3,0,0), d(1952,12,1,4,0,0), d(1953,3,1,3,0,0), d(1963,12,9,4,0,0), d(1964,3,1,3,0,0), d(1965,1,31,4,0,0), d(1965,3,31,3,0,0), d(1965,12,1,4,0,0), d(1966,3,1,3,0,0), d(1966,11,1,4,0,0), d(1967,3,1,3,0,0), d(1967,11,1,4,0,0), d(1968,3,1,3,0,0), d(1985,11,2,4,0,0), d(1986,3,15,3,0,0), d(1986,10,25,4,0,0), d(1987,2,14,3,0,0), d(1987,10,25,4,0,0), d(1988,2,7,3,0,0), d(1988,10,16,4,0,0), d(1989,1,29,3,0,0), d(1989,10,15,4,0,0), d(1990,2,11,3,0,0), d(1990,10,21,4,0,0), d(1991,2,17,3,0,0), d(1991,10,20,4,0,0), d(1992,2,9,3,0,0), d(1992,10,25,4,0,0), d(1993,1,31,3,0,0), d(1993,10,17,4,0,0), d(1994,2,20,3,0,0), d(1994,10,16,4,0,0), d(1995,2,19,3,0,0), d(1995,10,15,4,0,0), d(1996,2,11,3,0,0), d(1996,10,6,4,0,0), d(1997,2,16,3,0,0), d(1997,10,6,4,0,0), d(1998,3,1,3,0,0), d(1998,10,11,4,0,0), d(1999,2,21,3,0,0), d(1999,10,3,4,0,0), d(2000,2,27,3,0,0), d(2000,10,8,4,0,0), d(2001,2,18,3,0,0), d(2001,10,14,4,0,0), d(2002,2,17,3,0,0), d(2002,11,3,4,0,0), d(2003,2,16,3,0,0), d(2004,11,2,4,0,0), d(2005,2,20,3,0,0), d(2005,10,16,4,0,0), d(2006,2,19,3,0,0), d(2006,10,15,4,0,0), d(2007,2,18,3,0,0), d(2007,10,21,4,0,0), d(2008,2,17,3,0,0), d(2008,10,19,4,0,0), d(2009,2,15,3,0,0), d(2009,10,18,4,0,0), d(2010,2,21,3,0,0), d(2010,10,17,4,0,0), d(2011,2,20,3,0,0), d(2011,10,16,4,0,0), d(2012,2,19,3,0,0), d(2012,10,21,4,0,0), d(2013,2,17,3,0,0), d(2013,10,20,4,0,0), d(2014,2,16,3,0,0), d(2014,10,19,4,0,0), d(2015,2,15,3,0,0), d(2015,10,18,4,0,0), d(2016,2,21,3,0,0), d(2016,10,16,4,0,0), d(2017,2,19,3,0,0), d(2017,10,15,4,0,0), d(2018,2,18,3,0,0), d(2018,10,21,4,0,0), d(2019,2,17,3,0,0), d(2019,10,20,4,0,0), d(2020,2,16,3,0,0), d(2020,10,18,4,0,0), d(2021,2,21,3,0,0), d(2021,10,17,4,0,0), d(2022,2,20,3,0,0), d(2022,10,16,4,0,0), d(2023,2,19,3,0,0), d(2023,10,15,4,0,0), d(2024,2,18,3,0,0), d(2024,10,20,4,0,0), d(2025,2,16,3,0,0), d(2025,10,19,4,0,0), d(2026,2,15,3,0,0), d(2026,10,18,4,0,0), d(2027,2,21,3,0,0), d(2027,10,17,4,0,0), d(2028,2,20,3,0,0), d(2028,10,15,4,0,0), d(2029,2,18,3,0,0), d(2029,10,21,4,0,0), d(2030,2,17,3,0,0), d(2030,10,20,4,0,0), d(2031,2,16,3,0,0), d(2031,10,19,4,0,0), d(2032,2,15,3,0,0), d(2032,10,17,4,0,0), d(2033,2,20,3,0,0), d(2033,10,16,4,0,0), d(2034,2,19,3,0,0), d(2034,10,15,4,0,0), d(2035,2,18,3,0,0), d(2035,10,21,4,0,0), d(2036,2,17,3,0,0), d(2036,10,19,4,0,0), d(2037,2,15,3,0,0), d(2037,10,18,4,0,0), ] _transition_info = [ i(-13440,0,'LMT'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), i(-14400,0,'AMT'), i(-10800,3600,'AMST'), ] Cuiaba = Cuiaba()
gpl-2.0
dllsf/odootest
addons/product_expiry/__openerp__.py
61
1831
############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name' : 'Products Expiry Date', 'version' : '1.0', 'author' : 'OpenERP SA', 'category' : 'Specific Industry Applications', 'depends' : ['stock'], 'demo' : ['product_expiry_demo.xml'], 'description': """ Track different dates on products and production lots. ====================================================== Following dates can be tracked: ------------------------------- - end of life - best before date - removal date - alert date Also implements the removal strategy First Expiry First Out (FEFO) widely used, for example, in food industries. """, 'data' : ['product_expiry_view.xml', 'product_expiry_data.xml'], 'auto_install': False, 'installable': True, 'images': ['images/production_lots_dates.jpeg','images/products_dates.jpeg'], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
pombredanne/grr
lib/flows/general/webhistory_test.py
6
7815
#!/usr/bin/env python """Test the webhistory flows.""" import os from grr.client import client_utils_linux from grr.client import client_utils_osx from grr.client.client_actions import standard from grr.lib import action_mocks from grr.lib import aff4 from grr.lib import artifact_test from grr.lib import flags from grr.lib import test_lib from grr.lib import utils # pylint: disable=unused-import from grr.lib.flows.general import webhistory # pylint: enable=unused-import from grr.lib.rdfvalues import client as rdf_client from grr.lib.rdfvalues import paths as rdf_paths class WebHistoryFlowTest(test_lib.FlowTestsBaseclass): pass class TestWebHistory(WebHistoryFlowTest): """Test the browser history flows.""" def setUp(self): super(TestWebHistory, self).setUp() # Set up client info self.client = aff4.FACTORY.Open(self.client_id, mode="rw", token=self.token) self.client.Set(self.client.Schema.SYSTEM("Linux")) user_list = self.client.Schema.USER() user_list.Append(rdf_client.User(username="test", full_name="test user", homedir="/home/test/", last_logon=250)) self.client.AddAttribute(self.client.Schema.USER, user_list) self.client.Close() self.client_mock = action_mocks.ActionMock( "ReadBuffer", "FingerprintFile", "HashBuffer", "TransferBuffer", "StatFile", "Find", "ListDirectory", "Grep") # Mock the client to make it look like the root partition is mounted off the # test image. This will force all flow access to come off the image. def MockGetMountpoints(): return { "/": (os.path.join(self.base_path, "test_img.dd"), "ext2") } self.orig_linux_mp = client_utils_linux.GetMountpoints self.orig_osx_mp = client_utils_osx.GetMountpoints client_utils_linux.GetMountpoints = MockGetMountpoints client_utils_osx.GetMountpoints = MockGetMountpoints # We wiped the data_store so we have to retransmit all blobs. standard.HASH_CACHE = utils.FastStore(100) def tearDown(self): super(TestWebHistory, self).tearDown() client_utils_linux.GetMountpoints = self.orig_linux_mp client_utils_osx.GetMountpoints = self.orig_osx_mp def testChromeHistoryFetch(self): """Test that downloading the Chrome history works.""" # Run the flow in the simulated way for _ in test_lib.TestFlowHelper( "ChromeHistory", self.client_mock, check_flow_errors=False, client_id=self.client_id, username="test", token=self.token, output="analysis/testfoo", pathtype=rdf_paths.PathSpec.PathType.TSK): pass # Now check that the right files were downloaded. fs_path = "/home/test/.config/google-chrome/Default/History" # Check if the History file is created. output_path = self.client_id.Add("fs/tsk").Add( self.base_path.replace("\\", "/")).Add( "test_img.dd").Add(fs_path.replace("\\", "/")) fd = aff4.FACTORY.Open(output_path, token=self.token) self.assertTrue(fd.size > 20000) # Check for analysis file. output_path = self.client_id.Add("analysis/testfoo") fd = aff4.FACTORY.Open(output_path, token=self.token) self.assertTrue(fd.size > 20000) self.assertTrue(fd.Read(5000).find("funnycats.exe") != -1) def testFirefoxHistoryFetch(self): """Test that downloading the Firefox history works.""" # Run the flow in the simulated way for _ in test_lib.TestFlowHelper( "FirefoxHistory", self.client_mock, check_flow_errors=False, client_id=self.client_id, username="test", token=self.token, output="analysis/ff_out", pathtype=rdf_paths.PathSpec.PathType.TSK): pass # Now check that the right files were downloaded. fs_path = "/home/test/.mozilla/firefox/adts404t.default/places.sqlite" # Check if the History file is created. output_path = self.client_id.Add("fs/tsk").Add( "/".join([self.base_path.replace("\\", "/"), "test_img.dd"])).Add(fs_path.replace("\\", "/")) fd = aff4.FACTORY.Open(output_path, token=self.token) self.assertTrue(fd.size > 20000) self.assertEqual(fd.read(15), "SQLite format 3") # Check for analysis file. output_path = self.client_id.Add("analysis/ff_out") fd = aff4.FACTORY.Open(output_path, token=self.token) self.assertTrue(fd.size > 400) data = fd.Read(1000) self.assertTrue(data.find("Welcome to Firefox") != -1) self.assertTrue(data.find("sport.orf.at") != -1) def testCacheGrep(self): """Test the Cache Grep plugin.""" # Run the flow in the simulated way for _ in test_lib.TestFlowHelper( "CacheGrep", self.client_mock, check_flow_errors=False, client_id=self.client_id, grep_users=["test"], data_regex="ENIAC", output="analysis/cachegrep/{u}", pathtype=rdf_paths.PathSpec.PathType.TSK, token=self.token): pass # Check if the collection file was created. output_path = self.client_id.Add("analysis/cachegrep").Add("test") fd = aff4.FACTORY.Open(output_path, aff4_type="RDFValueCollection", token=self.token) # There should be one hit. self.assertEqual(len(fd), 1) # Get the first hit. hits = list(fd) self.assertIsInstance(hits[0], rdf_client.StatEntry) self.assertEqual(hits[0].pathspec.last.path, "/home/test/.config/google-chrome/Default/Cache/data_1") class TestWebHistoryWithArtifacts(WebHistoryFlowTest, artifact_test.ArtifactTest): """Test the browser history flows.""" def setUp(self): super(TestWebHistoryWithArtifacts, self).setUp() self.SetLinuxClient() fd = aff4.FACTORY.Open(self.client_id, token=self.token, mode="rw") self.kb = fd.Schema.KNOWLEDGE_BASE() self.kb.users.Append(rdf_client.KnowledgeBaseUser(username="test", full_name="test user", homedir="/home/test/", last_logon=250)) self.kb.os = "Linux" fd.AddAttribute(fd.Schema.KNOWLEDGE_BASE, self.kb) fd.Flush() self.client_mock = action_mocks.ActionMock( "ReadBuffer", "FingerprintFile", "HashBuffer", "TransferBuffer", "StatFile", "Find", "ListDirectory") def testChrome(self): """Check we can run WMI based artifacts.""" with self.MockClientMountPointsWithImage( os.path.join(self.base_path, "test_img.dd")): fd = self.RunCollectorAndGetCollection( ["ChromeHistory"], client_mock=self.client_mock, use_tsk=True, knowledge_base=self.kb) self.assertEqual(len(fd), 71) self.assertTrue("/home/john/Downloads/funcats_scr.exe" in [d.download_path for d in fd]) self.assertTrue("http://www.java.com/" in [d.url for d in fd]) self.assertTrue(fd[0].source_urn.Path().endswith( "/home/test/.config/google-chrome/Default/History")) def testFirefox(self): """Check we can run WMI based artifacts.""" with self.MockClientMountPointsWithImage( os.path.join(self.base_path, "test_img.dd")): fd = self.RunCollectorAndGetCollection( ["FirefoxHistory"], client_mock=self.client_mock, use_tsk=True) self.assertEqual(len(fd), 5) self.assertEqual(fd[0].access_time.AsSecondsFromEpoch(), 1340623334) self.assertTrue("http://sport.orf.at/" in [d.url for d in fd]) self.assertTrue(fd[0].source_urn.Path().endswith( "/home/test/.mozilla/firefox/adts404t.default/places.sqlite")) def main(argv): # Run the full test suite test_lib.GrrTestProgram(argv=argv) if __name__ == "__main__": flags.StartMain(main)
apache-2.0
emilydolson/forestcat
pyrobot/aima/logic.py
2
33894
"""Representations and Inference for Logic (Chapters 7-10) Covers both Propositional and First-Order Logic. First we have four important data types: KB Abstract class holds a knowledge base of logical expressions KB_Agent Abstract class subclasses agents.Agent Expr A logical expression substitution Implemented as a dictionary of var:value pairs, {x:1, y:x} Be careful: some functions take an Expr as argument, and some take a KB. Then we implement various functions for doing logical inference: pl_true Evaluate a propositional logical sentence in a model tt_entails Say if a statement is entailed by a KB pl_resolution Do resolution on propositional sentences dpll_satisfiable See if a propositional sentence is satisfiable WalkSAT (not yet implemented) And a few other functions: to_cnf Convert to conjunctive normal form unify Do unification of two FOL sentences diff, simp Symbolic differentiation and simplification """ from __future__ import generators import re import agents from utils import * #______________________________________________________________________________ class KB: """A Knowledge base to which you can tell and ask sentences. To create a KB, first subclass this class and implement tell, ask_generator, and retract. Why ask_generator instead of ask? The book is a bit vague on what ask means -- For a Propositional Logic KB, ask(P & Q) returns True or False, but for an FOL KB, something like ask(Brother(x, y)) might return many substitutions such as {x: Cain, y: Able}, {x: Able, y: Cain}, {x: George, y: Jeb}, etc. So ask_generator generates these one at a time, and ask either returns the first one or returns False.""" def __init__(self, sentence=None): abstract() def tell(self, sentence): "Add the sentence to the KB" abstract() def ask(self, query): """Ask returns a substitution that makes the query true, or it returns False. It is implemented in terms of ask_generator.""" try: return self.ask_generator(query).next() except StopIteration: return False def ask_generator(self, query): "Yield all the substitutions that make query true." abstract() def retract(self, sentence): "Remove the sentence from the KB" abstract() class PropKB(KB): "A KB for Propositional Logic. Inefficient, with no indexing." def __init__(self, sentence=None): self.clauses = [] if sentence: self.tell(sentence) def tell(self, sentence): "Add the sentence's clauses to the KB" self.clauses.extend(conjuncts(to_cnf(sentence))) def ask_generator(self, query): "Yield the empty substitution if KB implies query; else False" if not tt_entails(Expr('&', *self.clauses), query): return yield {} def retract(self, sentence): "Remove the sentence's clauses from the KB" for c in conjuncts(to_cnf(sentence)): if c in self.clauses: self.clauses.remove(c) #______________________________________________________________________________ class KB_Agent(agents.Agent): """A generic logical knowledge-based agent. [Fig. 7.1]""" def __init__(self, KB): t = 0 def program(percept): KB.tell(self.make_percept_sentence(percept, t)) action = KB.ask(self.make_action_query(t)) KB.tell(self.make_action_sentence(action, t)) t = t + 1 return action self.program = program def make_percept_sentence(self, percept, t): return(Expr("Percept")(percept, t)) def make_action_query(self, t): return(expr("ShouldDo(action, %d)" % t)) def make_action_sentence(self, action, t): return(Expr("Did")(action, t)) #______________________________________________________________________________ class Expr: """A symbolic mathematical expression. We use this class for logical expressions, and for terms within logical expressions. In general, an Expr has an op (operator) and a list of args. The op can be: Null-ary (no args) op: A number, representing the number itself. (e.g. Expr(42) => 42) A symbol, representing a variable or constant (e.g. Expr('F') => F) Unary (1 arg) op: '~', '-', representing NOT, negation (e.g. Expr('~', Expr('P')) => ~P) Binary (2 arg) op: '>>', '<<', representing forward and backward implication '+', '-', '*', '/', '**', representing arithmetic operators '<', '>', '>=', '<=', representing comparison operators '<=>', '^', representing logical equality and XOR N-ary (0 or more args) op: '&', '|', representing conjunction and disjunction A symbol, representing a function term or FOL proposition Exprs can be constructed with operator overloading: if x and y are Exprs, then so are x + y and x & y, etc. Also, if F and x are Exprs, then so is F(x); it works by overloading the __call__ method of the Expr F. Note that in the Expr that is created by F(x), the op is the str 'F', not the Expr F. See http://www.python.org/doc/current/ref/specialnames.html to learn more about operator overloading in Python. WARNING: x == y and x != y are NOT Exprs. The reason is that we want to write code that tests 'if x == y:' and if x == y were the same as Expr('==', x, y), then the result would always be true; not what a programmer would expect. But we still need to form Exprs representing equalities and disequalities. We concentrate on logical equality (or equivalence) and logical disequality (or XOR). You have 3 choices: (1) Expr('<=>', x, y) and Expr('^', x, y) Note that ^ is bitwose XOR in Python (and Java and C++) (2) expr('x <=> y') and expr('x =/= y'). See the doc string for the function expr. (3) (x % y) and (x ^ y). It is very ugly to have (x % y) mean (x <=> y), but we need SOME operator to make (2) work, and this seems the best choice. WARNING: if x is an Expr, then so is x + 1, because the int 1 gets coerced to an Expr by the constructor. But 1 + x is an error, because 1 doesn't know how to add an Expr. (Adding an __radd__ method to Expr wouldn't help, because int.__add__ is still called first.) Therefore, you should use Expr(1) + x instead, or ONE + x, or expr('1 + x'). """ def __init__(self, op, *args): "Op is a string or number; args are Exprs (or are coerced to Exprs)." assert isinstance(op, str) or (isnumber(op) and not args) self.op = num_or_str(op) self.args = map(expr, args) ## Coerce args to Exprs def __call__(self, *args): """Self must be a symbol with no args, such as Expr('F'). Create a new Expr with 'F' as op and the args as arguments.""" assert is_symbol(self.op) and not self.args return Expr(self.op, *args) def __repr__(self): "Show something like 'P' or 'P(x, y)', or '~P' or '(P | Q | R)'" if len(self.args) == 0: # Constant or proposition with arity 0 return str(self.op) elif is_symbol(self.op): # Functional or Propositional operator return '%s(%s)' % (self.op, ', '.join(map(repr, self.args))) elif len(self.args) == 1: # Prefix operator return self.op + repr(self.args[0]) else: # Infix operator return '(%s)' % (' '+self.op+' ').join(map(repr, self.args)) def __eq__(self, other): """x and y are equal iff their ops and args are equal.""" return (other is self) or (isinstance(other, Expr) and self.op == other.op and self.args == other.args) def __hash__(self): "Need a hash method so Exprs can live in dicts." return hash(self.op) ^ hash(tuple(self.args)) # See http://www.python.org/doc/current/lib/module-operator.html # Not implemented: not, abs, pos, concat, contains, *item, *slice def __lt__(self, other): return Expr('<', self, other) def __le__(self, other): return Expr('<=', self, other) def __ge__(self, other): return Expr('>=', self, other) def __gt__(self, other): return Expr('>', self, other) def __add__(self, other): return Expr('+', self, other) def __sub__(self, other): return Expr('-', self, other) def __and__(self, other): return Expr('&', self, other) def __div__(self, other): return Expr('/', self, other) def __truediv__(self, other):return Expr('/', self, other) def __invert__(self): return Expr('~', self) def __lshift__(self, other): return Expr('<<', self, other) def __rshift__(self, other): return Expr('>>', self, other) def __mul__(self, other): return Expr('*', self, other) def __neg__(self): return Expr('-', self) def __or__(self, other): return Expr('|', self, other) def __pow__(self, other): return Expr('**', self, other) def __xor__(self, other): return Expr('^', self, other) def __mod__(self, other): return Expr('<=>', self, other) ## (x % y) def expr(s): """Create an Expr representing a logic expression by parsing the input string. Symbols and numbers are automatically converted to Exprs. In addition you can use alternative spellings of these operators: 'x ==> y' parses as (x >> y) # Implication 'x <== y' parses as (x << y) # Reverse implication 'x <=> y' parses as (x % y) # Logical equivalence 'x =/= y' parses as (x ^ y) # Logical disequality (xor) But BE CAREFUL; precedence of implication is wrong. expr('P & Q ==> R & S') is ((P & (Q >> R)) & S); so you must use expr('(P & Q) ==> (R & S)'). Ex: expr('P <=> Q(1)') ==> Expr('<=>', P, Q(1)) expr('P & Q | ~R(x, F(x))')""" if isinstance(s, Expr): return s if isnumber(s): return Expr(s) ## Replace the alternative spellings of operators with canonical spellings s = s.replace('==>', '>>').replace('<==', '<<') s = s.replace('<=>', '%').replace('=/=', '^') ## Replace a symbol or number, such as 'P' with 'Expr("P")' s = re.sub(r'([a-zA-Z0-9_.]+)', r'Expr("\1")', s) ## Now eval the string. (A security hole; do not use with an adversary.) return eval(s, {'Expr':Expr}) def is_symbol(s): "A string s is a symbol if it starts with an alphabetic char." return isinstance(s, str) and s[0].isalpha() def is_var_symbol(s): "A logic variable symbol is an initial-lowercase string." return is_symbol(s) and s[0].islower() def is_prop_symbol(s): """A proposition logic symbol is an initial-uppercase string other than TRUE or FALSE.""" return is_symbol(s) and s[0].isupper() and s != 'TRUE' and s != 'FALSE' ## Useful constant Exprs used in examples and code: TRUE, FALSE, ZERO, ONE, TWO = map(Expr, ['TRUE', 'FALSE', 0, 1, 2]) A, B, C, F, G, P, Q, x, y, z = map(Expr, 'ABCFGPQxyz') #______________________________________________________________________________ def tt_entails(kb, alpha): """Use truth tables to determine if KB entails sentence alpha. [Fig. 7.10] Ex: tt_entails(expr('P & Q'), expr('Q')) ==> True""" return tt_check_all(kb, alpha, prop_symbols(kb & alpha), {}) def tt_check_all(kb, alpha, symbols, model): "Auxiliary routine to implement tt_entails." if not symbols: if pl_true(kb, model): return pl_true(alpha, model) else: return True assert result != None else: P, rest = symbols[0], symbols[1:] return (tt_check_all(kb, alpha, rest, extend(model, P, True)) and tt_check_all(kb, alpha, rest, extend(model, P, False))) def prop_symbols(x): "Return a list of all propositional symbols in x." if not isinstance(x, Expr): return [] elif is_prop_symbol(x.op): return [x] else: s = Set(()) for arg in x.args: s.union_update(prop_symbols(arg)) return list(s) def tt_true(alpha): """Is the sentence alpha a tautology? (alpha will be coerced to an expr.) Ex: tt_true(expr("(P >> Q) <=> (~P | Q)")) ==> True""" return tt_entails(TRUE, expr(alpha)) def pl_true(exp, model={}): """Return True if the propositional logic expression is true in the model, and False if it is false. If the model does not specify the value for every proposition, this may return None to indicate 'not obvious'; this may happen even when the expression is tautological.""" op, args = exp.op, exp.args if exp == TRUE: return True elif exp == FALSE: return False elif is_prop_symbol(op): return model.get(exp) elif op == '~': p = pl_true(args[0], model) if p == None: return None else: return not p elif op == '|': result = False for arg in args: p = pl_true(arg, model) if p == True: return True if p == None: result = None return result elif op == '&': result = True for arg in args: p = pl_true(arg, model) if p == False: return False if p == None: result = None return result p, q = args if op == '>>': return pl_true(~p | q, model) elif op == '<<': return pl_true(p | ~q, model) pt = pl_true(p, model) if pt == None: return None qt = pl_true(q, model) if qt == None: return None if op == '<=>': return pt == qt elif op == '^': return pt != qt else: raise ValueError, "illegal operator in logic expression" + str(exp) #______________________________________________________________________________ ## Convert to Conjunctive Normal Form (CNF) def to_cnf(s): """Convert a propositional logical sentence s to conjunctive normal form. That is, of the form ((A | ~B | ...) & (B | C | ...) & ...) [p. 215] Ex: def cnf(s): return str(to_cnf(expr(s))) cnf("B <=> (P1|P2)") ==> "((~P1 | B) & (~P2 | B) & (P1 | P2 | ~B))" cnf("a | (b & c) | d") ==> "((b | a | d) & (c | a | d))" """ s = eliminate_implications(s) # Steps 1, 2 from p. 215 s = move_not_inwards(s) # Step 3 return distribute_and_over_or(s) # Step 4 def eliminate_implications(s): """Change >>, <<, and <=> into &, |, and ~. That is, return an Expr that is equivalent to s, but has only &, |, and ~ as logical operators. Ex: eliminate_implications(A >> (~B << C)) ==> ((~B | ~C) | ~A)""" if not s.args or is_symbol(s.op): return s ## (Atoms are unchanged.) args = map(eliminate_implications, s.args) a, b = args[0], args[-1] if s.op == '>>': return (b | ~a) elif s.op == '<<': return (a | ~b) elif s.op == '<=>': return (a | ~b) & (b | ~a) else: return Expr(s.op, *args) def move_not_inwards(s): """Rewrite sentence s by moving negation sign inward. Ex: move_not_inwards(~(A|B)) ==> ~A&~B; move_not_inwards(~(A&B)) ==> ~A|~B move_not_inwards(~(~(A|~B)|~~C)) ==>((A | ~B) & ~C)""" if s.op == '~': NOT = lambda b: move_not_inwards(~b) a = s.args[0] if a.op == '~': return move_not_inwards(a.args[0]) # ~~A ==> A if a.op =='&': return NaryExpr('|', *map(NOT, a.args)) if a.op =='|': return NaryExpr('&', *map(NOT, a.args)) return s elif is_symbol(s.op) or not s.args: return s else: return Expr(s.op, *map(move_not_inwards, s.args)) def distribute_and_over_or(s): """Given a sentence s consisting of conjunctions and disjunctions of literals, return an equivalent sentence in CNF. Ex: distribute_and_over_or((A & B) | C) ==> ((A | C) & (B | C))""" if s.op == '|': s = NaryExpr('|', *s.args) if len(s.args) == 0: return FALSE if len(s.args) == 1: return distribute_and_over_or(s.args[0]) conj = find_if((lambda d: d.op == '&'), s.args) if not conj: return NaryExpr(s.op, *s.args) others = [a for a in s.args if a is not conj] if len(others) == 1: rest = others[0] else: rest = NaryExpr('|', *others) return NaryExpr('&', *map(distribute_and_over_or, [(c|rest) for c in conj.args])) elif s.op == '&': return NaryExpr('&', *map(distribute_and_over_or, s.args)) else: return s _NaryExprTable = {'&':TRUE, '|':FALSE, '+':ZERO, '*':ONE} def NaryExpr(op, *args): """Create an Expr, but with an nary, associative op, so we can promote nested instances of the same op up to the top level. Ex: str(NaryExpr('&',(A&B),(B|C),(B&C))) ==> '(A & B & (B | C) & B & C)'""" arglist = [] for arg in args: if arg.op == op: arglist.extend(arg.args) else: arglist.append(arg) if len(args) == 1: return args[0] elif len(args) == 0: return _NaryExprTable[op] else: return Expr(op, *arglist) def conjuncts(s): """Return a list of the conjuncts in the sentence s. Ex: conjuncts(A & B) ==> [A, B]; conjuncts(A | B) ==> [A | B]""" if isinstance(s, Expr) and s.op == '&': return s.args else: return [s] def disjuncts(s): """Return a list of the disjuncts in the sentence s. Ex: disjuncts(A | B) ==> [A, B]; disjuncts(A & B) ==> [A & B]""" if isinstance(s, Expr) and s.op == '|': return s.args else: return [s] #______________________________________________________________________________ def pl_resolution(KB, alpha): "Propositional Logic Resolution: say if alpha follows from KB. [Fig. 7.12]" clauses = KB.clauses + conjuncts(to_cnf(~alpha)) new = Set() while True: n = len(clauses) pairs = [(clauses[i], clauses[j]) for i in range(n) for j in range(i+1, n)] for (ci, cj) in pairs: resolvents = pl_resolve(ci, cj) if FALSE in resolvents: return True new.union_update(resolvents) if new.issubset(clauses): return False for c in new: if c not in clauses: clauses.append(c) def pl_resolve(ci, cj): """Return all clauses that can be obtained by resolving clauses ci and cj. Ex: pl_resolve(to_cnf(A|B|C), to_cnf(~B|~C|F)) str(_) ==> '[(A | C | ~C | F), (A | B | ~B | F)]'""" clauses = [] for di in disjuncts(ci): for dj in disjuncts(cj): if di == ~dj or ~di == dj: dnew = unique(removeall(di, disjuncts(ci)) + removeall(dj, disjuncts(cj))) clauses.append(NaryExpr('|', *dnew)) return clauses #______________________________________________________________________________ class PropHornKB(PropKB): "A KB of Propositional Horn clauses." def tell(self, sentence): "Add a Horn Clauses to this KB." op = sentence.op assert op == '>>' or is_prop_symbol(op), "Must be Horn Clause" self.clauses.append(sentence) def ask_generator(self, query): "Yield the empty substitution if KB implies query; else False" if not pl_fc_entails(self.clauses, query): return yield {} def retract(self, sentence): "Remove the sentence's clauses from the KB" for c in conjuncts(to_cnf(sentence)): if c in self.clauses: self.clauses.remove(c) def clauses_with_premise(self, p): """The list of clauses in KB that have p in the premise. This could be cached away for O(1) speed, but we'll recompute it.""" return [c for c in self.clauses if c.op == '>>' and p in conjuncts(c.args[0])] def pl_fc_entails(KB, q): """Use forward chaining to see if a HornKB entails the symbol q. [Fig. 7.14] Ex: pl_fc_entails(Fig[7,15], expr('Q')) ==> True""" count = dict([(c, len(conjuncts(c.args[0]))) for c in KB.clauses if c.op == '>>']) inferred = DefaultDict(False) agenda = [s for s in KB.clauses if is_prop_symbol(s.op)] if q in agenda: return True while agenda: p = agenda.pop() if not inferred[p]: inferred[p] = True for c in KB.clauses_with_premise(p): count[c] -= 1 if count[c] == 0: if c.args[1] == q: return True agenda.append(c.args[1]) return False ## Wumpus World example [Fig. 7.13] Fig[7,13] = expr("(B11 <=> (P12 | P21)) & ~B11") ## Propositional Logic Forward Chanining example [Fig. 7.15] Fig[7,15] = PropHornKB() for s in "P>>Q (L&M)>>P (B&L)>>M (A&P)>>L (A&B)>>L A B".split(): Fig[7,15].tell(expr(s)) #______________________________________________________________________________ # DPLL-Satisfiable [Fig. 7.16] def dpll_satisfiable(s): """Check satisfiability of a propositional sentence. This differs from the book code in two ways: (1) it returns a model rather than True when it succeeds; this is more useful. (2) The function find_pure_symbol is passed a list of unknown clauses, rather than a list of all clauses and the model; this is more efficient. Ex: dpll_satisfiable(A&~B) ==> {A:1, B:0}; dpll_satisfiable(P&~P) ==> False """ clauses = conjuncts(to_cnf(s)) symbols = prop_symbols(s) return dpll(clauses, symbols, {}) def dpll(clauses, symbols, model): "See if the clauses are true in a partial model." unknown_clauses = [] ## clauses with an unknown truth value for c in clauses: val = pl_true(c, model) if val == False: return False if val != True: unknown_clauses.append(c) if not unknown_clauses: return model P, value = find_pure_symbol(symbols, unknown_clauses) if P: return dpll(clauses, removeall(P, symbols), extend(model, P, value)) P, value = find_unit_clause(clauses, model) if P: return dpll(clauses, removeall(P, symbols), extend(model, P, value)) P = symbols.pop() return (dpll(clauses, symbols, extend(model, P, True)) or dpll(clauses, symbols, extend(model, P, False))) def find_pure_symbol(symbols, unknown_clauses): """Find a symbol and its value if it appears only as a positive literal (or only as a negative) in clauses. Ex: find_pure_symbol([A, B, C], [A|~B,~B|~C,C|A]) ==> A, True""" for s in symbols: found_pos, found_neg = False, False for c in unknown_clauses: if not found_pos and s in disjuncts(c): found_pos = True if not found_neg and ~s in disjuncts(c): found_neg = True if found_pos != found_neg: return s, found_pos return None, None def find_unit_clause(clauses, model): """A unit clause has only 1 variable that is not bound in the model. Ex: find_unit_clause([A|B|C, B|~C, A|~B], {A:True}) ==> B, False""" for clause in clauses: num_not_in_model = 0 for literal in disjuncts(clause): sym = literal_symbol(literal) if sym not in model: num_not_in_model += 1 P, value = sym, (literal.op != '~') if num_not_in_model == 1: return P, value return None, None def literal_symbol(literal): """The symbol in this literal (without the negation). Ex: literal_symbol(P) ==> P; literal_symbol(~P) ==> P""" if literal.op == '~': return literal.args[0] else: return literal #______________________________________________________________________________ # Walk-SAT [Fig. 7.17] def WalkSAT(clauses, p=0.5, max_flips=10000): ## model is a random assignment of true/false to the symbols in clauses ## See ~/aima1e/print1/manual/knowledge+logic-answers.tex ??? model = dict([(s, random.choice([True, False])) for s in prop_symbols(clauses)]) for i in range(max_flips): satisfied, unsatisfied = [], [] for clause in clauses: if_(pl_true(clause, model), satisfied, unsatisfied).append(clause) if not unsatisfied: ## if model satisfies all the clauses return model clause = random.choice(unsatisfied) if probability(p): sym = random.choice(prop_symbols(clause)) else: ## Flip the symbol in clause that miximizes number of sat. clauses raise NotImplementedError model[sym] = not model[sym] # PL-Wumpus-Agent [Fig. 7.19] class PLWumpusAgent(agents.Agent): "An agent for the wumpus world that does logical inference. [Fig. 7.19]""" def __init__(self): KB = FOLKB() x, y, orientation = 1, 1, (1, 0) visited = Set() ## squares already visited action = None plan = [] def program(percept): stench, breeze, glitter = percept x, y, orientation = update_position(x, y, orientation, action) KB.tell('%sS_%d,%d' % (if_(stench, '', '~'), x, y)) KB.tell('%sB_%d,%d' % (if_(breeze, '', '~'), x, y)) if glitter: action = 'Grab' elif plan: action = plan.pop() else: for [i, j] in fringe(visited): if KB.ask('~P_%d,%d & ~W_%d,%d' % (i, j, i, j)) != False: raise NotImplementedError KB.ask('~P_%d,%d | ~W_%d,%d' % (i, j, i, j)) != False if action == None: action = random.choice(['Forward', 'Right', 'Left']) return action self.program = program def update_position(x, y, orientation, action): if action == 'TurnRight': orientation = turn_right(orientation) elif action == 'TurnLeft': orientation = turn_left(orientation) elif action == 'Forward': x, y = x + vector_add((x, y), orientation) return x, y, orientation #______________________________________________________________________________ def unify(x, y, s): """Unify expressions x,y with substitution s; return a substitution that would make x,y equal, or None if x,y can not unify. x and y can be variables (e.g. Expr('x')), constants, lists, or Exprs. [Fig. 9.1] Ex: unify(x + y, y + C, {}) ==> {y: C, x: y}""" if s == None: return None elif x == y: return s elif is_variable(x): return unify_var(x, y, s) elif is_variable(y): return unify_var(y, x, s) elif isinstance(x, Expr) and isinstance(y, Expr): return unify(x.args, y.args, unify(x.op, y.op, s)) elif isinstance(x, str) or isinstance(y, str) or not x or not y: return if_(x == y, s, None) elif issequence(x) and issequence(y) and len(x) == len(y): return unify(x[1:], y[1:], unify(x[0], y[0], s)) else: return None def is_variable(x): "A variable is an Expr with no args and a lowercase symbol as the op." return isinstance(x, Expr) and not x.args and is_var_symbol(x.op) def unify_var(var, x, s): if var in s: return unify(s[var], x, s) elif occur_check(var, x): return None else: return extend(s, var, x) def occur_check(var, x): "Return true if var occurs anywhere in x." if var == x: return True elif isinstance(x, Expr): return var.op == x.op or occur_check(var, x.args) elif not isinstance(x, str) and issequence(x): for xi in x: if occur_check(var, xi): return True return False def extend(s, var, val): """Copy the substitution s and extend it by setting var to val; return copy. Ex: extend({x: 1}, y, 2) ==> {x: 1, y: 2}""" s2 = s.copy() s2[var] = val return s2 def subst(s, x): """Substitute the substitution s into the expression x. Ex: subst({x: 42, y:0}, F(x) + y) ==> (F(42) + 0)""" if isinstance(x, list): return [subst(s, xi) for xi in x] elif isinstance(x, tuple): return tuple([subst(s, xi) for xi in x]) elif not isinstance(x, Expr): return x elif is_var_symbol(x.op): return s.get(x, x) else: return Expr(x.op, *[subst(s, arg) for arg in x.args]) def fol_fc_ask(KB, alpha): """Inefficient forward chaining for first-order logic. [Fig. 9.3] KB is an FOLHornKB and alpha must be an atomic sentence.""" while True: new = {} for r in KB.clauses: r1 = standardize_apart(r) ps, q = conjuncts(r.args[0]), r.args[1] raise NotImplementedError def standardize_apart(sentence, dic): """Replace all the variables in sentence with new variables.""" if not isinstance(sentence, Expr): return sentence elif is_var_symbol(sentence.op): if sentence in dic: return dic[sentence] else: standardize_apart.counter += 1 dic[sentence] = Expr('V_%d' % standardize-apart.counter) return dic[sentence] else: return Expr(sentence.op, *[standardize-apart(a, dic) for a in sentence.args]) standardize_apart.counter = 0 def fol_bc_ask(KB, goals, theta): "A simple backward-chaining algorithm for first-order logic. [Fig. 9.6]" if not goals: yield theta q1 = subst(theta, goals[0]) raise NotImplementedError #______________________________________________________________________________ # Example application (not in the book). # You can use the Expr class to do symbolic differentiation. This used to be # a part of AI; now it is considered a separate field, Symbolic Algebra. def diff(y, x): """Return the symbolic derivative, dy/dx, as an Expr. However, you probably want to simplify the results with simp. Ex: diff(x * x, x) ==> (x * ONE) + (x * ONE) simp(diff(x * x, x)) ==> (TWO * x)""" if y == x: return ONE elif not y.args: return ZERO else: u, op, v = y.args[0], y.op, y.args[-1] if op == '+': return diff(u, x) + diff(v, x) elif op == '-' and len(args) == 1: return -diff(u, x) elif op == '-': return diff(u, x) - diff(v, x) elif op == '*': return u * diff(v, x) + v * diff(u, x) elif op == '/': return (v*diff(u, x) - u*diff(v, x)) / (v * v) elif op == '**' and isnumber(x.op): return (v * u ** (v - 1) * diff(u, x)) elif op == '**': return (v * u ** (v - 1) * diff(u, x) + u ** v * Expr('log')(u) * diff(v, x)) elif op == 'log': return diff(u, x) / u else: raise ValueError("Unknown op: %s in diff(%s, %s)" % (op, y, x)) def simp(x): if not x.args: return x args = map(simp, x.args) u, op, v = args[0], x.op, args[-1] if op == '+': if v == ZERO: return u if u == ZERO: return v if u == v: return TWO * u if u == -v or v == -u: return ZERO elif op == '-' and len(args) == 1: if u.op == '-' and len(u.args) == 1: return u.args[0] ## --y ==> y elif op == '-': if v == ZERO: return u if u == ZERO: return -v if u == v: return ZERO if u == -v or v == -u: return ZERO elif op == '*': if u == ZERO or v == ZERO: return ZERO if u == ONE: return v if v == ONE: return u if u == v: return u ** 2 elif op == '/': if u == ZERO: return ZERO if v == ZERO: return Expr('Undefined') if u == v: return ONE if u == -v or v == -u: return ZERO elif op == '**': if u == ZERO: return ZERO if v == ZERO: return ONE if u == ONE: return ONE if v == ONE: return u elif op == 'log': if u == ONE: return ZERO else: raise ValueError("Unknown op: " + op) ## If we fall through to here, we can not simplify further return Expr(op, *args) def d(y, x): "Differentiate and then simplify." return simp(diff(y, x)) _docex = """# More tests for Logic. ### PropKB kb = PropKB() kb.tell(A & B) kb.tell(B >> C) kb.ask(C) ==> {} ## The result {} means true, with no substitutions kb.ask(P) ==> False kb.retract(B) kb.ask(C) ==> False pl_true(P, {}) ==> None pl_true(P | Q, {P: True}) ==> True # Notice that the function pl_true cannot reason by cases: pl_true(P | ~P) ==> None # However, tt_true can: tt_true(P | ~P) ==> True # The following are tautologies from [Fig. 7.11]: tt_true("(A & B) <=> (B & A)") ==> True tt_true("(A | B) <=> (B | A)") ==> True tt_true("((A & B) & C) <=> (A & (B & C))") ==> True tt_true("((A | B) | C) <=> (A | (B | C))") ==> True tt_true("~~A <=> A") ==> True tt_true("(A >> B) <=> (~B >> ~A)") ==> True tt_true("(A >> B) <=> (~A | B)") ==> True tt_true("(A <=> B) <=> ((A >> B) & (B >> A))") ==> True tt_true("~(A & B) <=> (~A | ~B)") ==> True tt_true("~(A | B) <=> (~A & ~B)") ==> True tt_true("(A & (B | C)) <=> ((A & B) | (A & C))") ==> True tt_true("(A | (B & C)) <=> ((A | B) & (A | C))") ==> True # The following are not tautologies: tt_true(A & ~A) ==> False tt_true(A & B) ==> False ### [Fig. 7.13] alpha = expr("~P12") str(to_cnf(Fig[7,13] & ~alpha)) ==> '((~P12 | B11) & (~P21 | B11) & (P12 | P21 | ~B11) & ~B11 & P12)' tt_entails(Fig[7,13], alpha) ==> True pl_resolution(PropKB(Fig[7,13]), alpha) ==> True ### [Fig. 7.15] pl_fc_entails(Fig[7,15], expr('SomethingSilly')) ==> False ### Unification: unify(x, x, {}) ==> {} unify(x, 3, {}) ==> {x: 3} str(to_cnf((P&Q) | (~P & ~Q))) ==> '((~P | P) & (~Q | P) & (~P | Q) & (~Q | Q))' """
agpl-3.0
foss-transportationmodeling/rettina-server
flask/local/lib/python2.7/site-packages/whoosh/legacy.py
95
3459
# Copyright 2012 Matt Chaput. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR # IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO # EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, # OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # # The views and conclusions contained in the software and documentation are # those of the authors and should not be interpreted as representing official # policies, either expressed or implied, of Matt Chaput. """ This module contains code for maintaining backwards compatibility with old index formats. """ from whoosh.util.loading import RenamingUnpickler def load_110_toc(stream, gen, schema, version): # Between version -110 and version -111, I reorganized the modules and # changed the implementation of the NUMERIC field, so we have to change the # classes the unpickler tries to load if we need to read an old schema # Read the length of the pickled schema picklen = stream.read_varint() if schema: # If the user passed us a schema, use it and skip the one on disk stream.seek(picklen, 1) else: # Remap the old classes and functions to their moved versions as we # unpickle the schema scuts = {"wf": "whoosh.fields", "wsn": "whoosh.support.numeric", "wcw2": "whoosh.codec.whoosh2"} objmap = {"%(wf)s.NUMERIC": "%(wcw2)s.OLD_NUMERIC", "%(wf)s.DATETIME": "%(wcw2)s.OLD_DATETIME", "%(wsn)s.int_to_text": "%(wcw2)s.int_to_text", "%(wsn)s.text_to_int": "%(wcw2)s.text_to_int", "%(wsn)s.long_to_text": "%(wcw2)s.long_to_text", "%(wsn)s.text_to_long": "%(wcw2)s.text_to_long", "%(wsn)s.float_to_text": "%(wcw2)s.float_to_text", "%(wsn)s.text_to_float": "%(wcw2)s.text_to_float", } ru = RenamingUnpickler(stream, objmap, shortcuts=scuts) schema = ru.load() # Read the generation number index_gen = stream.read_int() assert gen == index_gen # Unused number _ = stream.read_int() # Unpickle the list of segment objects segments = stream.read_pickle() return schema, segments # Map TOC version numbers to functions to load that version toc_loaders = {-110: load_110_toc} # Map segment class names to functions to load the segment segment_loaders = {}
apache-2.0
macobo/TurtleSnap
TurtleSnap.py
1
2381
import turtle import os import time import random import subprocess CURRENT_FOLDER = os.path.dirname(os.path.abspath(__file__)) CREATE_GIF_SH = os.path.join(CURRENT_FOLDER, "creategif.sh") def take_picture(canvas, filename): canvas.postscript(file=filename) def execute_file(file): exec(file.read() + "\nimport turtle\nturtle.exitonclick()", {}) class Counter(): def __init__(self): self.snapshot = 0 def take_picture(self, root_prefix): filename = root_prefix + "{:03d}.ps".format(self.snapshot) take_picture( turtle.getcanvas(), filename) self.snapshot += 1 def make_turtle_gif(user_program, output_file, snapshot_delay, frame_delay): def tick(): #print("snip") counter.take_picture(root_prefix) root.after(snapshot_delay, tick) def exitonclick(): turtle.exitonclick = lambda *a, **kw: None counter.take_picture(root_prefix) # prefix for temporary files root_prefix = ".temp_shot-%s-%03d-" % \ (time.strftime("%Y%m%d%H%M%S"), random.randrange(1000)) # do a last picture when we're done counter = Counter() turtle.exitonclick = exitonclick turtle.setup(1920, 1080) root = turtle.getcanvas()._root() root.after(snapshot_delay, tick) # start the users program execute_file(user_program) counter.take_picture(root_prefix) print("Creating gif", output_file, repr(root_prefix)) subprocess.call( [CREATE_GIF_SH, root_prefix, output_file, str(frame_delay)]) if __name__ == "__main__": import argparse parser = argparse.ArgumentParser( description='Convert what a program draws using turtle to an animated gif') parser.add_argument('program_path', type=argparse.FileType('r'), help="Path to a program that uses turtle") parser.add_argument('output_path', help="File path for the output gif") parser.add_argument('-s', '--snapshot_delay', type=int, default=500, metavar="delay", help="How often to take a snapshot (in ms)") parser.add_argument('-f', '--frame_delay', type=int, default=20, metavar="delay", help="Delay between frames in gif") args = parser.parse_args() make_turtle_gif( args.program_path, args.output_path, args.snapshot_delay, args.frame_delay)
mit
ChawalitK/odoo
addons/website_portal_sale/controllers/main.py
11
2109
# -*- coding: utf-8 -*- import datetime from openerp import http from openerp.exceptions import AccessError from openerp.http import request from openerp.addons.website_portal.controllers.main import website_account class website_account(website_account): @http.route(['/my/home'], type='http', auth="user", website=True) def account(self, **kw): """ Add sales documents to main account page """ response = super(website_account, self).account() partner = request.env.user.partner_id res_sale_order = request.env['sale.order'] res_invoices = request.env['account.invoice'] quotations = res_sale_order.search([ ('message_partner_ids', 'child_of', [partner.commercial_partner_id.id]), ('state', 'in', ['sent', 'cancel']) ]) orders = res_sale_order.search([ ('message_partner_ids', 'child_of', [partner.commercial_partner_id.id]), ('state', 'in', ['sale', 'done']) ]) invoices = res_invoices.search([ ('message_partner_ids', 'child_of', [partner.commercial_partner_id.id]), ('state', 'in', ['open', 'paid', 'cancelled']) ]) response.qcontext.update({ 'date': datetime.date.today().strftime('%Y-%m-%d'), 'quotations': quotations, 'orders': orders, 'invoices': invoices, }) return response @http.route(['/my/orders/<int:order>'], type='http', auth="user", website=True) def orders_followup(self, order=None): order = request.env['sale.order'].browse([order]) try: order.check_access_rights('read') order.check_access_rule('read') except AccessError: return request.website.render("website.403") order_invoice_lines = {il.product_id.id: il.invoice_id for il in order.invoice_ids.mapped('invoice_line_ids')} return request.website.render("website_portal_sale.orders_followup", { 'order': order.sudo(), 'order_invoice_lines': order_invoice_lines, })
gpl-3.0
PySide/Tools
pysideuic/port_v3/load_plugin.py
8
1414
# This file is part of the PySide project. # # Copyright (C) 2009-2011 Nokia Corporation and/or its subsidiary(-ies). # Copyright (C) 2010 Riverbank Computing Limited. # Copyright (C) 2009 Torsten Marek # # Contact: PySide team <pyside@openbossa.org> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # version 2 as published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA from pysideuic.exceptions import WidgetPluginError def load_plugin(plugin, plugin_globals, plugin_locals): """ Load the given plugin (which is an open file). Return True if the plugin was loaded, or False if it wanted to be ignored. Raise an exception if there was an error. """ try: exec(plugin.read(), plugin_globals, plugin_locals) except ImportError: return False except Exception as e: raise WidgetPluginError("%s: %s" % (e.__class__, str(e))) return True
gpl-2.0
sam-m888/gramps
gramps/plugins/lib/librecords.py
5
24107
# encoding:utf-8 # # Gramps - a GTK+/GNOME based genealogy program - Records plugin # # Copyright (C) 2008-2011 Reinhard Müller # Copyright (C) 2010 Jakim Friant # Copyright (C) 2013-2016 Paul Franklin # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. # #------------------------------------------------------------------------ # # Standard Python modules # #------------------------------------------------------------------------ import datetime #------------------------------------------------------------------------ # # Gramps modules # #------------------------------------------------------------------------ from gramps.gen.const import GRAMPS_LOCALE as glocale _ = glocale.translation.sgettext from gramps.gen.lib import (ChildRefType, Date, Span, Name, StyledText, StyledTextTag, StyledTextTagType) from gramps.gen.display.name import displayer as name_displayer from gramps.gen.utils.alive import probably_alive from gramps.gen.proxy import LivingProxyDb #------------------------------------------------------------------------ # # List of records # #------------------------------------------------------------------------ def _T_(value): # enable deferred translations (see Python docs 22.1.3.4) return value # _T_ is a gramps-defined keyword -- see po/update_po.py and po/genpot.sh RECORDS = [ (_T_("Youngest living person"), 'person_youngestliving', True), (_T_("Oldest living person"), 'person_oldestliving', True), (_T_("Person died at youngest age"), 'person_youngestdied', False), (_T_("Person died at oldest age"), 'person_oldestdied', True), (_T_("Person married at youngest age"), 'person_youngestmarried', True), (_T_("Person married at oldest age"), 'person_oldestmarried', True), (_T_("Person divorced at youngest age"), 'person_youngestdivorced', False), (_T_("Person divorced at oldest age"), 'person_oldestdivorced', False), (_T_("Youngest father"), 'person_youngestfather', True), (_T_("Youngest mother"), 'person_youngestmother', True), (_T_("Oldest father"), 'person_oldestfather', True), (_T_("Oldest mother"), 'person_oldestmother', True), (_T_("Father with most children"), 'person_mostkidsfather', False), (_T_("Mother with most children"), 'person_mostkidsmother', False), (_T_("Father with most grandchildren"), 'person_mostgrandkidsfather',False), (_T_("Mother with most grandchildren"), 'person_mostgrandkidsmother',False), (_T_("Couple with most children"), 'family_mostchildren', True), (_T_("Living couple married most recently"), 'family_youngestmarried',True), (_T_("Living couple married most long ago"), 'family_oldestmarried', True), (_T_("Shortest past marriage"), 'family_shortest', False), (_T_("Longest past marriage"), 'family_longest', True), (_T_("Couple with smallest age difference"), 'family_smallestagediff', True), (_T_("Couple with biggest age difference"), 'family_biggestagediff', True)] #------------------------------------------------------------------------ # # Global functions # #------------------------------------------------------------------------ def _good_date(date): return (date is not None and date.is_valid()) def _find_death_date(db, person): death_ref = person.get_death_ref() if death_ref: death = db.get_event_from_handle(death_ref.ref) return death.get_date_object() else: event_list = person.get_primary_event_ref_list() for event_ref in event_list: event = db.get_event_from_handle(event_ref.ref) if event.get_type().is_death_fallback(): return event.get_date_object() return None def find_records(db, filter, top_size, callname, trans_text=glocale.translation.sgettext, name_format=None, living_mode=LivingProxyDb.MODE_INCLUDE_ALL, user=None): """ @param trans_text: allow deferred translation of strings @type trans_text: a GrampsLocale sgettext instance trans_text is a defined keyword (see po/update_po.py, po/genpot.sh) :param name_format: optional format to control display of person's name :type name_format: None or int :param living_mode: enable optional control of living people's records :type living_mode: int """ def get_unfiltered_person_from_handle(person_handle): if living_mode == LivingProxyDb.MODE_INCLUDE_ALL: return db.get_person_from_handle(person_handle) else: # we are in the proxy so get the person before proxy changes return db.get_unfiltered_person(person_handle) today = datetime.date.today() today_date = Date(today.year, today.month, today.day) # Person records person_youngestliving = [] person_oldestliving = [] person_youngestdied = [] person_oldestdied = [] person_youngestmarried = [] person_oldestmarried = [] person_youngestdivorced = [] person_oldestdivorced = [] person_youngestfather = [] person_youngestmother = [] person_oldestfather = [] person_oldestmother = [] person_mostkidsfather = [] person_mostkidsmother = [] person_mostgrandkidsfather = [] person_mostgrandkidsmother = [] person_handle_list = db.iter_person_handles() # the next "if" will turn person_handle_list from a generator into a # list, but only when this code is called from a report (which has a # filter) and not when called from a gramplet (which has no filter); # so the next line drains the generator and turns it into a list # always, so the gramplet can use it later, in the second loop person_handle_list = list(person_handle_list) if filter: person_handle_list = filter.apply(db, person_handle_list, user=user) for person_handle in person_handle_list: person = db.get_person_from_handle(person_handle) unfil_person = get_unfiltered_person_from_handle(person_handle) if person is None: continue # FIXME this should check for a "fallback" birth also/instead birth_ref = person.get_birth_ref() if not birth_ref: # No birth event, so we can't calculate any age. continue birth = db.get_event_from_handle(birth_ref.ref) birth_date = birth.get_date_object() death_date = _find_death_date(db, person) if not _good_date(birth_date): # Birth date unknown or incomplete, so we can't calculate any age. continue name = _get_styled_primary_name(person, callname, trans_text=trans_text, name_format=name_format) if death_date is None: if probably_alive(unfil_person, db): # Still living, look for age records _record(person_youngestliving, person_oldestliving, today_date - birth_date, name, 'Person', person_handle, top_size) elif _good_date(death_date): # Already died, look for age records _record(person_youngestdied, person_oldestdied, death_date - birth_date, name, 'Person', person_handle, top_size) for family_handle in person.get_family_handle_list(): family = db.get_family_from_handle(family_handle) marriage_date = None divorce_date = None for event_ref in family.get_event_ref_list(): event = db.get_event_from_handle(event_ref.ref) if (event.get_type().is_marriage() and (event_ref.get_role().is_family() or event_ref.get_role().is_primary())): marriage_date = event.get_date_object() elif (event.get_type().is_divorce() and (event_ref.get_role().is_family() or event_ref.get_role().is_primary())): divorce_date = event.get_date_object() if _good_date(marriage_date): _record(person_youngestmarried, person_oldestmarried, marriage_date - birth_date, name, 'Person', person_handle, top_size) if _good_date(divorce_date): _record(person_youngestdivorced, person_oldestdivorced, divorce_date - birth_date, name, 'Person', person_handle, top_size) for child_ref in family.get_child_ref_list(): if person.get_gender() == person.MALE: relation = child_ref.get_father_relation() elif person.get_gender() == person.FEMALE: relation = child_ref.get_mother_relation() else: continue if relation != ChildRefType.BIRTH: continue child = db.get_person_from_handle(child_ref.ref) # FIXME this should check for a "fallback" birth also/instead child_birth_ref = child.get_birth_ref() if not child_birth_ref: continue child_birth = db.get_event_from_handle(child_birth_ref.ref) child_birth_date = child_birth.get_date_object() if not _good_date(child_birth_date): continue if person.get_gender() == person.MALE: _record(person_youngestfather, person_oldestfather, child_birth_date - birth_date, name, 'Person', person_handle, top_size) elif person.get_gender() == person.FEMALE: _record(person_youngestmother, person_oldestmother, child_birth_date - birth_date, name, 'Person', person_handle, top_size) for person_handle in person_handle_list: # this "person loop" doesn't care about a person's birth or death person = db.get_person_from_handle(person_handle) if person is None: continue name = _get_styled_primary_name(person, callname, trans_text=trans_text, name_format=name_format) person_child_list = get_birth_children(db, person) if person.get_gender() == person.MALE: _record(None, person_mostkidsfather, len(person_child_list), name, 'Person', person_handle, top_size) elif person.get_gender() == person.FEMALE: _record(None, person_mostkidsmother, len(person_child_list), name, 'Person', person_handle, top_size) person_grandchild_list = [] for child in person_child_list: person_grandchild_list += get_birth_children(db, child) if person.get_gender() == person.MALE: _record(None, person_mostgrandkidsfather, len(person_grandchild_list), name, 'Person', person_handle, top_size) elif person.get_gender() == person.FEMALE: _record(None, person_mostgrandkidsmother, len(person_grandchild_list), name, 'Person', person_handle, top_size) # Family records family_mostchildren = [] family_youngestmarried = [] family_oldestmarried = [] family_shortest = [] family_longest = [] family_smallestagediff = [] family_biggestagediff = [] for family in db.iter_families(): #family = db.get_family_from_handle(family_handle) if living_mode != LivingProxyDb.MODE_INCLUDE_ALL: # FIXME no iter_families method in LivingProxyDb so do it this way family = db.get_family_from_handle(family.get_handle()) father_handle = family.get_father_handle() if not father_handle: continue mother_handle = family.get_mother_handle() if not mother_handle: continue # Test if either father or mother are in filter if filter: # we don't want many progress reports popping up, so no user=user if not filter.apply(db, [father_handle, mother_handle]): continue father = db.get_person_from_handle(father_handle) unfil_father = get_unfiltered_person_from_handle(father_handle) if father is None: continue mother = db.get_person_from_handle(mother_handle) unfil_mother = get_unfiltered_person_from_handle(mother_handle) if mother is None: continue name = StyledText(trans_text("%(father)s and %(mother)s")) % { 'father': _get_styled_primary_name(father, callname, trans_text=trans_text, name_format=name_format), 'mother': _get_styled_primary_name(mother, callname, trans_text=trans_text, name_format=name_format)} if (living_mode == LivingProxyDb.MODE_INCLUDE_ALL or (not probably_alive(unfil_father, db) and not probably_alive(unfil_mother, db))): _record(None, family_mostchildren, len(family.get_child_ref_list()), name, 'Family', family.handle, top_size) father_birth_ref = father.get_birth_ref() if father_birth_ref: father_birth_date = db.get_event_from_handle(father_birth_ref.ref).get_date_object() else: father_birth_date = None mother_birth_ref = mother.get_birth_ref() if mother_birth_ref: mother_birth_date = db.get_event_from_handle(mother_birth_ref.ref).get_date_object() else: mother_birth_date = None if _good_date(father_birth_date) and _good_date(mother_birth_date): if father_birth_date >> mother_birth_date: _record(family_smallestagediff, family_biggestagediff, father_birth_date - mother_birth_date, name, 'Family', family.handle, top_size) elif mother_birth_date >> father_birth_date: _record(family_smallestagediff, family_biggestagediff, mother_birth_date - father_birth_date, name, 'Family', family.handle, top_size) marriage_date = None divorce = None divorce_date = None for event_ref in family.get_event_ref_list(): event = db.get_event_from_handle(event_ref.ref) if (event.get_type().is_marriage() and (event_ref.get_role().is_family() or event_ref.get_role().is_primary())): marriage_date = event.get_date_object() if (event and event.get_type().is_divorce() and (event_ref.get_role().is_family() or event_ref.get_role().is_primary())): divorce = event divorce_date = event.get_date_object() father_death_date = _find_death_date(db, father) mother_death_date = _find_death_date(db, mother) if not _good_date(marriage_date): # Not married or marriage date unknown continue if divorce is not None and not _good_date(divorce_date): # Divorced but date unknown or inexact continue if (not probably_alive(unfil_father, db) and not _good_date(father_death_date)): # Father died but death date unknown or inexact continue if (not probably_alive(unfil_mother, db) and not _good_date(mother_death_date)): # Mother died but death date unknown or inexact continue if (divorce_date is None and father_death_date is None and mother_death_date is None): # Still married and alive if (probably_alive(unfil_father, db) and probably_alive(unfil_mother, db)): _record(family_youngestmarried, family_oldestmarried, today_date - marriage_date, name, 'Family', family.handle, top_size) elif (_good_date(divorce_date) or _good_date(father_death_date) or _good_date(mother_death_date)): end = None if _good_date(father_death_date) and _good_date(mother_death_date): end = min(father_death_date, mother_death_date) elif _good_date(father_death_date): end = father_death_date elif _good_date(mother_death_date): end = mother_death_date if _good_date(divorce_date): if end: end = min(end, divorce_date) else: end = divorce_date duration = end - marriage_date _record(family_shortest, family_longest, duration, name, 'Family', family.handle, top_size) #python 3 workaround: assign locals to tmp so we work with runtime version tmp = locals() return [(trans_text(text), varname, tmp[varname]) for (text, varname, default) in RECORDS] def _record(lowest, highest, value, text, handle_type, handle, top_size): if value < 0: # ignore erroneous data return # (since the data-verification tool already finds it) if isinstance(value, Span): low_value = value.minmax[0] high_value = value.minmax[1] else: low_value = value high_value = value if lowest is not None: lowest.append((high_value, value, text, handle_type, handle)) lowest.sort(key=lambda a: a[0]) # FIXME: Ist das lambda notwendig? for i in range(top_size, len(lowest)): if lowest[i-1][0] < lowest[i][0]: del lowest[i:] break if highest is not None: highest.append((low_value, value, text, handle_type, handle)) highest.sort(reverse=True) for i in range(top_size, len(highest)): if highest[i-1][0] > highest[i][0]: del highest[i:] break def get_birth_children(db, person): """ return all the birth children of a person, in a list """ person_child_list = [] for family_handle in person.get_family_handle_list(): family = db.get_family_from_handle(family_handle) for child_ref in family.get_child_ref_list(): if person.get_gender() == person.MALE: relation = child_ref.get_father_relation() elif person.get_gender() == person.FEMALE: relation = child_ref.get_mother_relation() else: continue # no records are kept for unknown-sex parents if relation != ChildRefType.BIRTH: continue # only count birth children child = db.get_person_from_handle(child_ref.ref) if child not in person_child_list: person_child_list += [child] return person_child_list #------------------------------------------------------------------------ # # Reusable functions (could be methods of gen.lib.*) # #------------------------------------------------------------------------ CALLNAME_DONTUSE = 0 CALLNAME_REPLACE = 1 CALLNAME_UNDERLINE_ADD = 2 def _get_styled(name, callname, placeholder=False, trans_text=glocale.translation.sgettext, name_format=None): """ Return a StyledText object with the name formatted according to the parameters: @param callname: whether the callname should be used instead of the first name (CALLNAME_REPLACE), underlined within the first name (CALLNAME_UNDERLINE_ADD) or not used at all (CALLNAME_DONTUSE). @param placeholder: whether a series of underscores should be inserted as a placeholder if first name or surname are missing. @param trans_text: allow deferred translation of strings @type trans_text: a GrampsLocale sgettext instance trans_text is a defined keyword (see po/update_po.py, po/genpot.sh) :param name_format: optional format to control display of person's name :type name_format: None or int """ # Make a copy of the name object so we don't mess around with the real # data. n = Name(source=name) # Insert placeholders. if placeholder: if not n.first_name: n.first_name = "____________" if not n.surname: n.surname = "____________" if n.call: if callname == CALLNAME_REPLACE: # Replace first name with call name. n.first_name = n.call elif callname == CALLNAME_UNDERLINE_ADD: if n.call not in n.first_name: # Add call name to first name. # translators: used in French+Russian, ignore otherwise n.first_name = trans_text('"%(callname)s" (%(firstname)s)') % { 'callname': n.call, 'firstname': n.first_name } real_format = name_displayer.get_default_format() if name_format is not None: name_displayer.set_default_format(name_format) text = name_displayer.display_name(n) name_displayer.set_default_format(real_format) tags = [] if n.call: if callname == CALLNAME_UNDERLINE_ADD: # "name" in next line is on purpose: only underline the call name # if it was a part of the *original* first name if n.call in name.first_name: # Underline call name callpos = text.find(n.call) tags = [StyledTextTag(StyledTextTagType.UNDERLINE, True, [(callpos, callpos + len(n.call))])] return StyledText(text, tags) def _get_styled_primary_name(person, callname, placeholder=False, trans_text=glocale.translation.sgettext, name_format=None): """ Return a StyledText object with the person's name formatted according to the parameters: @param callname: whether the callname should be used instead of the first name (CALLNAME_REPLACE), underlined within the first name (CALLNAME_UNDERLINE_ADD) or not used at all (CALLNAME_DONTUSE). @param placeholder: whether a series of underscores should be inserted as a placeholder if first name or surname are missing. @param trans_text: allow deferred translation of strings @type trans_text: a GrampsLocale sgettext instance trans_text is a defined keyword (see po/update_po.py, po/genpot.sh) :param name_format: optional format to control display of person's name :type name_format: None or int """ return _get_styled(person.get_primary_name(), callname, trans_text=trans_text, placeholder=placeholder, name_format=name_format)
gpl-2.0
ULHPC/easybuild-easyblocks
easybuild/easyblocks/a/anaconda.py
1
2886
## # Copyright 2009-2017 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), # Flemish Research Foundation (FWO) (http://www.fwo.be/en) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # http://github.com/hpcugent/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. ## """ EasyBuild support for building and installing Anaconda, implemented as an easyblock @author: Jillian Rowe (New York University Abu Dhabi) @author: Kenneth Hoste (HPC-UGent) """ import os import stat import easybuild.tools.environment as env from easybuild.easyblocks.generic.binary import Binary from easybuild.framework.easyconfig import CUSTOM from easybuild.tools.build_log import EasyBuildError from easybuild.tools.filetools import adjust_permissions, rmtree2 from easybuild.tools.run import run_cmd class EB_Anaconda(Binary): """Support for building/installing Anaconda.""" def install_step(self): """Copy all files in build directory to the install directory""" rmtree2(self.installdir) install_script = self.src[0]['name'] adjust_permissions(os.path.join(self.builddir, install_script), stat.S_IRUSR|stat.S_IXUSR) cmd = "%s ./%s -p %s -b -f" % (self.cfg['preinstallopts'], install_script, self.installdir) self.log.info("Installing %s using command '%s'..." % (self.name, cmd)) run_cmd(cmd, log_all=True, simple=True) def make_module_req_guess(self): """ A dictionary of possible directories to look for. """ return { 'MANPATH': ['man', os.path.join('share', 'man')], 'PATH': ['bin', 'sbin'], 'PKG_CONFIG_PATH': [os.path.join(x, 'pkgconfig') for x in ['lib', 'lib32', 'lib64', 'share']], } def sanity_check_step(self): """ Custom sanity check for Anaconda """ custom_paths = { 'files': [os.path.join('bin', x) for x in ['2to3', 'conda', 'ipython', 'pydoc', 'python', 'sqlite3']], 'dirs': ['bin', 'etc', 'lib', 'pkgs'], } super(EB_Anaconda, self).sanity_check_step(custom_paths=custom_paths)
gpl-2.0
ioannistsanaktsidis/invenio
modules/bibformat/web/admin/bibformatadmin.py
18
49800
## This file is part of Invenio. ## Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Invenio BibFormat Administrator Interface.""" __revision__ = "$Id$" __lastupdated__ = """$Date$""" import MySQLdb from invenio import bibformatadminlib, \ bibformat_dblayer,\ bibformat_engine from invenio.bibformat import format_with_format_template from invenio.bibrankadminlib import check_user from invenio.webpage import page, error_page from invenio.webuser import getUid, page_not_authorized, collect_user_info from invenio.messages import wash_language, gettext_set_language from invenio.urlutils import wash_url_argument, redirect_to_url from invenio.search_engine import search_pattern, \ create_basic_search_units from invenio.bibformat_config import InvenioBibFormatError, InvenioBibFormatWarning from invenio.errorlib import register_exception from invenio.config import CFG_SITE_LANG, CFG_SITE_NAME, CFG_SITE_SECURE_URL def index(req, ln=CFG_SITE_LANG): """ Main BibFormat administration page. Displays a warning if we find out that etc/biformat dir is not writable by us (as most opeation of BibFormat must write in this directory). @param req: the request object @param ln: language @return: a web page """ warnings = [] ln = wash_language(ln) _ = gettext_set_language(ln) if not bibformatadminlib.can_write_etc_bibformat_dir(): try: raise InvenioBibFormatWarning(_('Cannot write in etc/bibformat dir of your Invenio installation. Check directory permission.')) except InvenioBibFormatWarning, exc: register_exception(stream='warning', req=req) warnings.append(exc.message) # Check if user is authorized to administer # If not, still display page but offer to log in try: uid = getUid(req) except: return error_page('Error', req) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: is_admin = True else: is_admin = False navtrail = '''<a class="navtrail" href="%s/help/admin">%s</a>''' % \ (CFG_SITE_SECURE_URL, _("Admin Area")) return page(title=_("BibFormat Admin"), body=bibformatadminlib.perform_request_index(ln=ln, warnings=warnings, is_admin=is_admin), language=ln, uid=uid, navtrail = navtrail, lastupdated=__lastupdated__, req=req) def output_formats_manage(req, ln=CFG_SITE_LANG, sortby="code"): """ Main page for output formats management. Check for authentication and print output formats list. @param req: the request object @param ln: language @param sortby: the sorting crieteria (can be 'code' or 'name') @return: a web page """ ln = wash_language(ln) _ = gettext_set_language(ln) navtrail_previous_links = bibformatadminlib.getnavtrail() try: uid = getUid(req) except: return error_page('Error', req) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: sortby = wash_url_argument(sortby, 'str') return page(title=_("Manage Output Formats"), body=bibformatadminlib.perform_request_output_formats_management(ln=ln, sortby=sortby), uid=uid, language=ln, navtrail = navtrail_previous_links, lastupdated=__lastupdated__, req=req) else: return page_not_authorized(req=req, text=auth_msg, navtrail=navtrail_previous_links) def output_format_show(req, bfo, ln=CFG_SITE_LANG, r_fld=[], r_val=[], r_tpl=[], default="", r_upd="", chosen_option="", **args): """ Show a single output format. Check for authentication and print output format settings. The page either shows the output format from file, or from user's POST session, as we want to let him edit the rules without saving. Policy is: r_fld, r_val, rules_tpl are list of attributes of the rules. If they are empty, load from file. Else use POST. The i th value of each list is one of the attributes of rule i. Rule i is the i th rule in order of evaluation. All list have the same number of item. r_upd contains an action that has to be performed on rules. It can composed of a number (i, the rule we want to modify) and an operator : "save" to save the rules, "add" or "del". syntax: operator [number] For eg: r_upd = _("Save Changes") saves all rules (no int should be specified). For eg: r_upd = _("Add New Rule") adds a rule (no int should be specified). For eg: r_upd = _("Remove Rule") + " 5" deletes rule at position 5. The number is used only for operation delete. An action can also be in **args. We must look there for string starting with '(+|-) [number]' to increase (+) or decrease (-) a rule given by its index (number). For example "+ 5" increase priority of rule 5 (put it at fourth position). The string in **args can be followed by some garbage that looks like .x or .y, as this is returned as the coordinate of the click on the <input type="image">. We HAVE to use args and reason on its keys, because for <input> of type image, iexplorer does not return the value of the tag, but only the name. Action is executed only if we are working from user's POST session (means we must have loaded the output format first, which is totally normal and expected behaviour) @param req: the request object @param bfo: the filename of the output format to show @param ln: language @param r_fld: the list of 'field' attribute for each rule @param r_val: the list of 'value' attribute for each rule @param r_tpl: the list of 'template' attribute for each rule @param default: the default format template used by this output format @param r_upd: the rule that we want to increase/decrease in order of evaluation @param chosen_option: emptry string when user has not yet confirmed to go on @return: a web page """ ln = wash_language(ln) _ = gettext_set_language(ln) navtrail_previous_links = bibformatadminlib.getnavtrail(''' &gt; <a class="navtrail" href="%s/admin/bibformat/bibformatadmin.py/output_formats_manage?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Output Formats"))) code = wash_url_argument(bfo, 'str') try: uid = getUid(req) except: return error_page('Error', req) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: bfo = wash_url_argument(bfo, 'str') default = wash_url_argument(default, 'str') r_upd = wash_url_argument(r_upd, 'str') if not bibformatadminlib.can_read_output_format(bfo): #No read permission try: raise InvenioBibFormatError(_('Output format %s cannot not be read. %s') % (bfo, "")) except InvenioBibFormatError, exc: register_exception(req=req) return page(title=_("Restricted Output Format"), body = """You don't have permission to view this output format.""", language=ln, navtrail = navtrail_previous_links, lastupdated=__lastupdated__, req=req) output_format = bibformat_engine.get_output_format(code=bfo, with_attributes=True) name = output_format['attrs']['names']['generic'] if name == "": name = bfo if not bibformatadminlib.can_write_output_format(bfo) and \ chosen_option == "":#No write permission return dialog_box(req=req, ln=ln, title="File Permission on %s" % name, message="You don't have write permission " \ "on <i>%s</i>.<br/> You can view the output " \ "format, but not edit it." % name, navtrail=navtrail_previous_links, options=[ _("Ok")]) return page(title=_('Output Format %s Rules' % name), body=bibformatadminlib.perform_request_output_format_show(bfo=bfo, ln=ln, r_fld=r_fld, r_val=r_val, r_tpl=r_tpl, default=default, r_upd=r_upd, args=args), uid=uid, language=ln, navtrail = navtrail_previous_links, lastupdated=__lastupdated__, req=req) else: return page_not_authorized(req=req, text=auth_msg, navtrail=navtrail_previous_links) def output_format_show_attributes(req, bfo, ln=CFG_SITE_LANG): """ Page for output format names and descrition attributes edition. @param req: the request object @param ln: language @param bfo: the filename of the template to show @return: a web page """ ln = wash_language(ln) _ = gettext_set_language(ln) navtrail_previous_links = bibformatadminlib.getnavtrail(''' &gt; <a class="navtrail" href="%s/admin/bibformat/bibformatadmin.py/output_formats_manage?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln , _("Manage Output Formats"))) try: uid = getUid(req) except: return error_page('Error', req) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: bfo = wash_url_argument(bfo, 'str') if not bibformatadminlib.can_read_output_format(bfo): #No read permission try: raise InvenioBibFormatError(_('Output format %s cannot not be read. %s') % (bfo, "")) except InvenioBibFormatError, exc: register_exception(req=req) return page(title=_("Restricted Output Format"), body = """You don't have permission to view this output format.""", language=ln, navtrail = navtrail_previous_links, lastupdated=__lastupdated__, req=req) output_format = bibformat_engine.get_output_format(code=bfo, with_attributes=True) name = output_format['attrs']['names']['generic'] return page(title=_("Output Format %s Attributes" % name), body=bibformatadminlib.perform_request_output_format_show_attributes(bfo, ln=ln), uid=uid, language=ln, navtrail = navtrail_previous_links , lastupdated=__lastupdated__, req=req) else: return page_not_authorized(req=req, text=auth_msg) def output_format_show_dependencies(req, bfo, ln=CFG_SITE_LANG): """ Show the dependencies of the given output format. @param req: the request object @param ln: language @param bfo: the filename of the output format to show @return: a web page """ ln = wash_language(ln) _ = gettext_set_language(ln) navtrail_previous_links = bibformatadminlib.getnavtrail(''' &gt; <a class="navtrail" href="%s/admin/bibformat/bibformatadmin.py/format_templates_manage?ln=%s">%s </a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Output Formats"))) try: uid = getUid(req) except: return error_page('Error', req) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: bfo = wash_url_argument(bfo, 'str') if not bibformatadminlib.can_read_output_format(bfo): #No read permission try: raise InvenioBibFormatError(_('Output format %s cannot not be read. %s') % (bfo, "")) except InvenioBibFormatError, exc: register_exception(req=req) return page(title=_("Restricted Output Format"), body = """You don't have permission to view this output format.""", language=ln, navtrail = navtrail_previous_links, lastupdated=__lastupdated__, req=req) format_name = bibformat_engine.get_output_format_attrs(bfo)['names']['generic'] return page(title=_("Output Format %s Dependencies" % format_name), body=bibformatadminlib.perform_request_output_format_show_dependencies(bfo, ln=ln), uid=uid, language=ln, navtrail = navtrail_previous_links, lastupdated=__lastupdated__, req=req) else: return page_not_authorized(req=req, text=auth_msg) def output_format_update_attributes(req, bfo, ln=CFG_SITE_LANG, name = "", description="", code="", content_type="", names_trans=[], visibility="0"): """ Update the name, description and code of given output format @param req: the request object @param ln: language @param description: the new description @param name: the new name @param code: the new short code (== new bfo) of the output format @param content_type: the new content_type of the output format @param bfo: the filename of the output format to update @param names_trans: the translations in the same order as the languages from get_languages() @param visibility: the visibility of the output format in the output formats list (public pages) @return: a web page (or redirection to a web page) """ ln = wash_language(ln) _ = gettext_set_language(ln) try: uid = getUid(req) except: return error_page('Error', req) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: name = wash_url_argument(name, 'str') description = wash_url_argument(description, 'str') bfo = wash_url_argument(bfo, 'str') code = wash_url_argument(code, 'str') visibility = wash_url_argument(visibility, 'int') bfo = bibformatadminlib.update_output_format_attributes(bfo, name, description, code, content_type, names_trans, visibility) redirect_to_url(req, "output_format_show?ln=%(ln)s&bfo=%(bfo)s" % {'ln':ln, 'bfo':bfo, 'names_trans':names_trans}) else: return page_not_authorized(req=req, text=auth_msg) def output_format_delete(req, bfo, ln=CFG_SITE_LANG, chosen_option=""): """ Delete an output format @param req: the request object @param bfo: the filename of the output format to delete @param ln: language @param chosen_option: empty string when user has not yet confirmed, else "Delete" to apply @return: a web page (or redirection to a web page) """ ln = wash_language(ln) _ = gettext_set_language(ln) navtrail_previous_links = bibformatadminlib.getnavtrail(''' &gt; <a class="navtrail" href="%s/admin/bibformat/bibformatadmin.py/output_formats_manage?ln=%s">%s</a> &gt; %s''' % (CFG_SITE_SECURE_URL, ln, _("Manage Output Formats"), _("Delete Output Format"))) try: uid = getUid(req) except: return error_page('Error', req) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: #Ask confirmation to user if not already done chosen_option = wash_url_argument(chosen_option, 'str') if chosen_option == "": bfo = wash_url_argument(bfo, 'str') format_name = bibformat_dblayer.get_output_format_names(bfo)['generic'] return dialog_box(req=req, ln=ln, title="Delete %s"%format_name, message="Are you sure you want to" \ "delete output format <i>%s</i>?" % format_name, navtrail=navtrail_previous_links, options=[_("Cancel"), _("Delete")]) elif chosen_option==_("Delete"): bibformatadminlib.delete_output_format(bfo) redirect_to_url(req, "output_formats_manage?ln=%(ln)s"%{'ln':ln}) else: return page_not_authorized(req=req, text=auth_msg) def output_format_add(req, ln=CFG_SITE_LANG): """ Adds a new output format @param req: the request object @param ln: language @return: a web page (or redirection to a web page) """ ln = wash_language(ln) _ = gettext_set_language(ln) try: uid = getUid(req) except: return error_page('Error', req) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: bfo = bibformatadminlib.add_output_format() if bfo == None: return page(title=_("Cannot create output format"), body = """BibFormat cannot add an output format. Check output formats directory permissions.""", language=ln, lastupdated=__lastupdated__, req=req) redirect_to_url(req, "output_format_show_attributes?ln=%(ln)s&bfo=%(bfo)s" % {'ln':ln, 'bfo':bfo}) else: return page_not_authorized(req=req, text=auth_msg) def format_templates_manage(req, ln=CFG_SITE_LANG, checking='0'): """ Main page for formats templates management. Check for authentication and print formats list. @param req: the request object @param ln: language @param checking: if 0, basic checking. Else perform extensive checking (time-consuming) @return: a web page """ ln = wash_language(ln) _ = gettext_set_language(ln) navtrail_previous_links = bibformatadminlib.getnavtrail() try: uid = getUid(req) except: return error_page('Error', req) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: checking_level = wash_url_argument(checking, 'int') return page(title=_("Manage Format Templates"), body=bibformatadminlib.perform_request_format_templates_management(ln=ln, checking=checking_level), uid=uid, language=ln, navtrail = navtrail_previous_links, lastupdated=__lastupdated__, req=req) else: return page_not_authorized(req=req, text=auth_msg, navtrail=navtrail_previous_links) def format_template_show(req, bft, code=None, ln=CFG_SITE_LANG, ln_for_preview=CFG_SITE_LANG, pattern_for_preview="", content_type_for_preview="text/html", chosen_option=""): """ Main page for template edition. Check for authentication and print formats editor. @param req: the request object @param ln: language @param code: the code being edited @param bft: the name of the template to show @param ln_for_preview: the language for the preview (for bfo) @param pattern_for_preview: the search pattern to be used for the preview (for bfo) @param content_type_for_preview: the (MIME) content type of the preview @param chosen_option: returned value for dialog_box warning @return: a web page """ ln = wash_language(ln) _ = gettext_set_language(ln) navtrail_previous_links = bibformatadminlib.getnavtrail(''' &gt; <a class="navtrail" href="%s/admin/bibformat/bibformatadmin.py/format_templates_manage?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln , _("Manage Format Templates"))) try: uid = getUid(req) except: return error_page('Error', req) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: format_template = wash_url_argument(bft, 'str') ln_preview = wash_language(ln_for_preview) pattern_preview = wash_url_argument(pattern_for_preview, 'str') if not bibformatadminlib.can_read_format_template(bft): #No read permission try: raise InvenioBibFormatError(_('Format template %s cannot not be read. %s') % (format_template, "")) except InvenioBibFormatError, exc: register_exception(req=req) return page(title=_("Restricted Format Template"), body = """You don't have permission to view this format template.""", language=ln, navtrail = navtrail_previous_links, lastupdated=__lastupdated__, req=req) format_name = bibformat_engine.get_format_template_attrs(bft)['name'] if not bibformatadminlib.can_write_format_template(bft) and \ chosen_option == "": #No write permission return dialog_box(req=req, ln=ln, title="File Permission on %s" % format_name, message="You don't have write permission " \ "on <i>%s</i>.<br/> You can view the template" \ ", but not edit it." % format_name, navtrail=navtrail_previous_links, options=[ _("Ok")]) if bft.endswith('.xsl'): format_name += ' (XSL)' return page(title=_("Format Template %s"%format_name), body=bibformatadminlib.perform_request_format_template_show(format_template, code=code, ln=ln, ln_for_preview=ln_preview, pattern_for_preview=pattern_preview, content_type_for_preview=content_type_for_preview), uid=uid, language=ln, navtrail = navtrail_previous_links, lastupdated=__lastupdated__, req=req) else: return page_not_authorized(req=req, text=auth_msg, navtrail=navtrail_previous_links) def format_template_show_attributes(req, bft, ln=CFG_SITE_LANG, new=0): """ Page for template name and descrition attributes edition. This is also the first page shown when a format template has just been added. In that case new is different from False and we can offer specific option to user (for ex let him make a duplicate of existing template). @param req: the request object @param ln: language @param bft: the name of the template to show @param new: if "False", the template has not just been added @return: a web page """ ln = wash_language(ln) _ = gettext_set_language(ln) navtrail_previous_links = bibformatadminlib.getnavtrail(''' &gt; <a class="navtrail" href="%s/admin/bibformat/bibformatadmin.py/format_templates_manage?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Format Templates"))) try: uid = getUid(req) except: return error_page('Error', req) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: format_template = wash_url_argument(bft, 'str') format_name = bibformat_engine.get_format_template_attrs(bft)['name'] is_new = wash_url_argument(new, 'int') if not bibformatadminlib.can_read_format_template(bft): #No read permission try: raise InvenioBibFormatError(_('Format template %s cannot not be read. %s') % (format_template, "")) except InvenioBibFormatError, exc: register_exception(req=req) return page(title=_("Restricted Format Template"), body = """You don't have permission to view this format template.""", language=ln, navtrail = navtrail_previous_links, lastupdated=__lastupdated__, req=req) return page(title=_("Format Template %s Attributes"%format_name), body=bibformatadminlib.perform_request_format_template_show_attributes(bft, ln=ln, new=is_new), uid=uid, language=ln, navtrail = navtrail_previous_links , lastupdated=__lastupdated__, req=req) else: return page_not_authorized(req=req, text=auth_msg) def format_template_show_dependencies(req, bft, ln=CFG_SITE_LANG): """ Show the dependencies (on elements) of the given format. @param req: the request object @param ln: language @param bft: the filename of the template to show @return: a web page """ ln = wash_language(ln) _ = gettext_set_language(ln) navtrail_previous_links = bibformatadminlib.getnavtrail(''' &gt; <a class="navtrail" href="%s/admin/bibformat/bibformatadmin.py/format_templates_manage?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Format Templates"))) try: uid = getUid(req) except: return error_page('Error', req) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: format_template = wash_url_argument(bft, 'str') format_name = bibformat_engine.get_format_template_attrs(bft)['name'] return page(title=_("Format Template %s Dependencies" % format_name), body=bibformatadminlib.perform_request_format_template_show_dependencies(bft, ln=ln), uid=uid, language=ln, navtrail = navtrail_previous_links, lastupdated=__lastupdated__, req=req) else: return page_not_authorized(req=req, text=auth_msg) def format_template_update_attributes(req, bft, ln=CFG_SITE_LANG, name = "", description="", duplicate=None): """ Update the name and description of given format template @param req: the request object @param ln: language @param description: the new description @param name: the new name @param bft: the filename of the template to update @param duplicate: the filename of template that we want to copy (the code) @return: a web page (or redirection to a web page) """ ln = wash_language(ln) _ = gettext_set_language(ln) try: uid = getUid(req) except: return error_page('Error', req) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: if duplicate is not None: duplicate = wash_url_argument(duplicate, 'str') name = wash_url_argument(name, 'str') description = wash_url_argument(description, 'str') bft = bibformatadminlib.update_format_template_attributes(bft, name, description, duplicate) redirect_to_url(req, "format_template_show?ln=%(ln)s&bft=%(bft)s" % {'ln':ln, 'bft':bft}) else: return page_not_authorized(req=req, text=auth_msg) def format_template_delete(req, bft, ln=CFG_SITE_LANG, chosen_option=""): """ Delete a format template @param req: the request object @param bft: the filename of the template to delete @param ln: language @param chosen_option: empty string when user has not yet confirm. Else "Delete" to confirm @return: a web page (or redirection to a web page) """ ln = wash_language(ln) _ = gettext_set_language(ln) navtrail_previous_links = bibformatadminlib.getnavtrail(''' &gt; <a class="navtrail" href="%s/admin/bibformat/bibformatadmin.py/format_templates_manage?ln=%s">%s</a> &gt; %s''' % (CFG_SITE_SECURE_URL, ln ,_("Manage Format Templates"),_("Delete Format Template"))) try: uid = getUid(req) except: return error_page('Error', req) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: #Ask confirmation to user if not already done chosen_option = wash_url_argument(chosen_option, 'str') if chosen_option == "": format_template = wash_url_argument(bft, 'str') format_name = bibformat_engine.get_format_template_attrs(bft)['name'] return dialog_box(req=req, ln=ln, title="Delete %s" % format_name, message="Are you sure you want to delete" \ "format template <i>%s</i>?" % format_name, navtrail=navtrail_previous_links, options=[_("Cancel"), _("Delete")]) elif chosen_option==_("Delete"): bibformatadminlib.delete_format_template(bft) redirect_to_url(req, "format_templates_manage?ln=%(ln)s" % {'ln':ln}) else: return page_not_authorized(req=req, text=auth_msg) def format_template_add(req, ln=CFG_SITE_LANG): """ Adds a new format template @param req: the request object @param ln: language @return: a web page (or redirection to a web page) """ ln = wash_language(ln) _ = gettext_set_language(ln) try: uid = getUid(req) except: return error_page('Error', req) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: bft = bibformatadminlib.add_format_template() redirect_to_url(req, "format_template_show_attributes?ln=%(ln)s&bft=%(bft)s&new=1" % {'ln':ln, 'bft':bft}) else: return page_not_authorized(req=req, text=auth_msg) def format_template_show_preview_or_save(req, bft, ln=CFG_SITE_LANG, code=None, ln_for_preview=CFG_SITE_LANG, pattern_for_preview="", content_type_for_preview='text/html', save_action=None, navtrail=""): """ Print the preview of a record with a format template. To be included inside Format template editor. If the save_action has a value, then the code should also be saved at the same time @param req: the request object @param code: the code of a template to use for formatting @param ln: language @param ln_for_preview: the language for the preview (for bfo) @param pattern_for_preview: the search pattern to be used for the preview (for bfo) @param content_type_for_preview: the content-type to use to serve the preview page @param save_action: has a value if the code has to be saved @param bft: the filename of the template to save @param navtrail: navigation trail @return: a web page """ ln = wash_language(ln) _ = gettext_set_language(ln) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: user_info = collect_user_info(req) uid = user_info['uid'] bft = wash_url_argument(bft, 'str') if save_action is not None and code is not None: #save bibformatadminlib.update_format_template_code(bft, code=code) bibformat_engine.clear_caches() if code is None: code = bibformat_engine.get_format_template(bft)['code'] ln_for_preview = wash_language(ln_for_preview) pattern_for_preview = wash_url_argument(pattern_for_preview, 'str') if pattern_for_preview == "": try: recID = search_pattern(p='-collection:DELETED').pop() except KeyError: return page(title="No Document Found", body="", uid=uid, language=ln_for_preview, navtrail = "", lastupdated=__lastupdated__, req=req, navmenuid='search') pattern_for_preview = "recid:%s" % recID else: try: recID = search_pattern(p=pattern_for_preview + \ ' -collection:DELETED').pop() except KeyError: return page(title="No Record Found for %s" % pattern_for_preview, body="", uid=uid, language=ln_for_preview, navtrail = "", lastupdated=__lastupdated__, req=req) units = create_basic_search_units(None, pattern_for_preview, None) keywords = [unit[1] for unit in units if unit[0] != '-'] bfo = bibformat_engine.BibFormatObject(recID = recID, ln = ln_for_preview, search_pattern = keywords, xml_record = None, user_info = user_info) body = format_with_format_template(bft, bfo, verbose=7, format_template_code=code) if content_type_for_preview == 'text/html': #Standard page display with CDS headers, etc. return page(title="", body=body, uid=uid, language=ln_for_preview, navtrail = navtrail, lastupdated=__lastupdated__, req=req, navmenuid='search') else: #Output with chosen content-type. req.content_type = content_type_for_preview req.send_http_header() req.write(body) else: return page_not_authorized(req=req, text=auth_msg) def format_template_show_short_doc(req, ln=CFG_SITE_LANG, search_doc_pattern=""): """ Prints the format elements documentation in a brief way. To be included inside Format template editor. @param req: the request object @param ln: language @param search_doc_pattern: a search pattern that specified which elements to display @return: a web page """ ln = wash_language(ln) _ = gettext_set_language(ln) try: uid = getUid(req) except: return error_page('Error', req) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: search_doc_pattern = wash_url_argument(search_doc_pattern, 'str') return bibformatadminlib.perform_request_format_template_show_short_doc(ln=ln, search_doc_pattern=search_doc_pattern) else: return page_not_authorized(req=req, text=auth_msg) def format_elements_doc(req, ln=CFG_SITE_LANG): """ Main page for format elements documentation. Check for authentication and print format elements list. @param req: the request object @param ln: language @return: a web page """ ln = wash_language(ln) _ = gettext_set_language(ln) navtrail_previous_links = bibformatadminlib.getnavtrail() try: uid = getUid(req) except: return error_page('Error', req) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: return page(title=_("Format Elements Documentation"), body=bibformatadminlib.perform_request_format_elements_documentation(ln=ln), uid=uid, language=ln, navtrail = navtrail_previous_links, lastupdated=__lastupdated__, req=req) else: return page_not_authorized(req=req, text=auth_msg, navtrail=navtrail_previous_links) def format_element_show_dependencies(req, bfe, ln=CFG_SITE_LANG): """ Shows format element dependencies @param req: the request object @param req: the request object @param bfe: the name of the bfe to show @param ln: language @return: a web page """ ln = wash_language(ln) _ = gettext_set_language(ln) navtrail_previous_links = bibformatadminlib.getnavtrail(''' &gt; <a class="navtrail" href="%s/admin/bibformat/bibformatadmin.py/format_elements_doc?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln , _("Format Elements Documentation"))) try: uid = getUid(req) except: return error_page('Error', req) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: bfe = wash_url_argument(bfe, 'str') return page(title=_("Format Element %s Dependencies" % bfe), body=bibformatadminlib.perform_request_format_element_show_dependencies(bfe=bfe, ln=ln), uid=uid, language=ln, navtrail = navtrail_previous_links, lastupdated=__lastupdated__, req=req) else: return page_not_authorized(req=req, text=auth_msg, navtrail=navtrail_previous_links) def format_element_test(req, bfe, ln=CFG_SITE_LANG, param_values=None): """ Allows user to test element with different parameters and check output 'param_values' is the list of values to pass to 'format' function of the element as parameters, in the order ... If params is None, this means that they have not be defined by user yet. @param req: the request object @param bfe: the name of the element to test @param ln: language @param param_values: the list of parameters to pass to element format function @return: a web page """ ln = wash_language(ln) _ = gettext_set_language(ln) navtrail_previous_links = bibformatadminlib.getnavtrail(''' &gt; <a class="navtrail" href="%s/admin/bibformat/bibformatadmin.py/format_elements_doc?ln=%s">%s</a>''' %( CFG_SITE_SECURE_URL, ln , _("Format Elements Documentation"))) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: bfe = wash_url_argument(bfe, 'str') user_info = collect_user_info(req) uid = user_info['uid'] return page(title=_("Test Format Element %s" % bfe), body=bibformatadminlib.perform_request_format_element_test(bfe=bfe, ln=ln, param_values=param_values, user_info=user_info), uid=uid, language=ln, navtrail = navtrail_previous_links, lastupdated=__lastupdated__, req=req) else: return page_not_authorized(req=req, text=auth_msg, navtrail=navtrail_previous_links) def validate_format(req, ln=CFG_SITE_LANG, bfo=None, bft=None, bfe=None): """ Returns a page showing the status of an output format or format template or format element. This page is called from output formats management page or format template management page or format elements documentation. The page only shows the status of one of the format, depending on the specified one. If multiple are specified, shows the first one. @param req: the request object @param ln: language @param bfo: an output format 6 chars code @param bft: a format element filename @param bfe: a format element name @return: a web page """ ln = wash_language(ln) _ = gettext_set_language(ln) try: uid = getUid(req) except: return error_page('Error', req) (auth_code, auth_msg) = check_user(req, 'cfgbibformat') if not auth_code: if bfo is not None: #Output format validation bfo = wash_url_argument(bfo, 'str') navtrail_previous_links = bibformatadminlib.getnavtrail(''' &gt; <a class="navtrail" href="%s/admin/bibformat/bibformatadmin.py/output_formats_manage?ln=%s">%s</a>'''%(CFG_SITE_SECURE_URL, ln, _("Manage Output Formats"))) if not bibformatadminlib.can_read_output_format(bfo): #No read permission try: raise InvenioBibFormatError(_('Output format %s cannot not be read. %s') % (bfo, "")) except InvenioBibFormatError, exc: register_exception(req=req) return page(title=_("Restricted Output Format"), body = """You don't have permission to view this output format.""", language=ln, navtrail = navtrail_previous_links, lastupdated=__lastupdated__, req=req) output_format = bibformat_engine.get_output_format(code=bfo, with_attributes=True) name = output_format['attrs']['names']['generic'] title = _("Validation of Output Format %s" % name) elif bft is not None: #Format template validation bft = wash_url_argument(bft, 'str') navtrail_previous_links = bibformatadminlib.getnavtrail(''' &gt; <a class="navtrail" href="%s/admin/bibformat/bibformatadmin.py/format_templates_manage?ln=%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln, _("Manage Format Templates"))) if not bibformatadminlib.can_read_format_template(bft): #No read permission try: raise InvenioBibFormatError(_('Format template %s cannot not be read. %s') % (bft, "")) except InvenioBibFormatError, exc: register_exception(req=req) return page(title=_("Restricted Format Template"), body = """You don't have permission to view this format template.""", language=ln, navtrail = navtrail_previous_links, lastupdated=__lastupdated__, req=req) name = bibformat_engine.get_format_template_attrs(bft)['name'] title = _("Validation of Format Template %s" % name) elif bfe is not None: #Format element validation bfe = wash_url_argument(bfe, 'str') navtrail_previous_links = bibformatadminlib.getnavtrail(''' &gt; <a class="navtrail" href="%s/admin/bibformat/bibformatadmin.py/format_elements_doc?ln=%s#%s">%s</a>''' % (CFG_SITE_SECURE_URL, ln , bfe.upper() , _("Format Elements Documentation"))) if not bibformatadminlib.can_read_format_element(bfe) and \ not bibformat_dblayer.tag_exists_for_name(bfe): #No read permission try: raise InvenioBibFormatError(_('Format element %s cannot not be read. %s') % (bfe, "")) except InvenioBibFormatError, exc: register_exception(req=req) return page(title=_("Restricted Format Element"), body = """You don't have permission to view this format element.""", language=ln, navtrail = navtrail_previous_links, lastupdated=__lastupdated__, req=req) title = _("Validation of Format Element %s" % bfe) else: #No format specified try: raise InvenioBibFormatError(_('No format specified for validation. Please specify one.')) except InvenioBibFormatError, exc: register_exception(req=req) return page(title=_("Format Validation"), body="No format has been specified.", uid=uid, language=ln, navtrail = navtrail_previous_links, lastupdated=__lastupdated__, req=req) return page(title=title, body=bibformatadminlib.perform_request_format_validate(ln=ln, bfo=bfo, bft=bft, bfe=bfe), uid=uid, language=ln, navtrail = navtrail_previous_links, lastupdated=__lastupdated__, req=req) else: navtrail_previous_links = bibformatadminlib.getnavtrail(''' &gt; <a class="navtrail" href="%s/admin/bibformat/bibformatadmin.py/?ln=%s'''%(CFG_SITE_SECURE_URL, ln)) return page_not_authorized(req=req, text=auth_msg, navtrail=navtrail_previous_links) def download_dreamweaver_floater(req): """ Trigger download of a BibFormat palette for Dreamweaver. @param req: the request object @return: the palette code to be used within Dreamweaver """ #bibformat_templates = invenio.template.load('bibformat') req.content_type = 'text/html' req.headers_out["Content-Disposition"] = "attachment; filename=BibFormat_floater.html" req.send_http_header() req.write(bibformatadminlib.perform_request_dreamweaver_floater()) def dialog_box(req, url="", ln=CFG_SITE_LANG, navtrail="", title="", message="", options=[]): """ Returns a dialog box with a given title, message and options. Used for asking confirmation on actions. The page that will receive the result must take 'chosen_option' as parameter. @param req: the request object @param url: the url used to submit the options chosen by the user @param ln: language @param navtrail: navigation trail @param title: title of the page/dialog @param message: message to display in the dialog box @param options: the list of labels for the buttons given as choice to user @return: a dialog page """ import invenio bibformat_templates = invenio.template.load('bibformat') return page(title="", body = bibformat_templates.tmpl_admin_dialog_box(url, title, message, options), language=ln, lastupdated=__lastupdated__, navtrail=navtrail, req=req)
gpl-2.0
hkawasaki/kawasaki-aio8-2
lms/djangoapps/licenses/views.py
22
2712
import logging import json import re from urlparse import urlparse from collections import namedtuple, defaultdict from edxmako.shortcuts import render_to_string from django.contrib.auth.decorators import login_required from django.contrib.auth.models import User from django.http import HttpResponse, Http404 from django.views.decorators.csrf import requires_csrf_token from licenses.models import CourseSoftware from licenses.models import get_courses_licenses, get_or_create_license, get_license log = logging.getLogger("edx.licenses") License = namedtuple('License', 'software serial') def get_licenses_by_course(user, courses): licenses = get_courses_licenses(user, courses) licenses_by_course = defaultdict(list) # create missing licenses and group by course_id for software, license in licenses.iteritems(): if license is None: licenses[software] = get_or_create_license(user, software) course_id = software.course_id serial = license.serial if license else None licenses_by_course[course_id].append(License(software, serial)) # render elements data_by_course = {} for course_id, licenses in licenses_by_course.iteritems(): context = {'licenses': licenses} template = 'licenses/serial_numbers.html' data_by_course[course_id] = render_to_string(template, context) return data_by_course @login_required @requires_csrf_token def user_software_license(request): if request.method != 'POST' or not request.is_ajax(): raise Http404 # get the course id from the referer url_path = urlparse(request.META.get('HTTP_REFERER', '')).path pattern = re.compile('^/courses/(?P<id>[^/]+/[^/]+/[^/]+)/.*/?$') match = re.match(pattern, url_path) if not match: raise Http404 course_id = match.groupdict().get('id', '') user_id = request.session.get('_auth_user_id') software_name = request.POST.get('software') generate = request.POST.get('generate', False) == 'true' try: software = CourseSoftware.objects.get(name=software_name, course_id=course_id) except CourseSoftware.DoesNotExist: raise Http404 try: user = User.objects.get(id=user_id) except User.DoesNotExist: raise Http404 if generate: software_license = get_or_create_license(user, software) else: software_license = get_license(user, software) if software_license: response = {'serial': software_license.serial} else: response = {'error': 'No serial number found'} return HttpResponse(json.dumps(response), mimetype='application/json')
agpl-3.0
avlach/univbris-ocf
vt_manager/src/python/vt_manager/communication/sfa/methods/GetCredential.py
4
1747
from vt_manager.communication.sfa.util.xrn import urn_to_hrn from vt_manager.communication.sfa.util.method import Method from vt_manager.communication.sfa.trust.credential import Credential from vt_manager.communication.sfa.util.parameter import Parameter, Mixed class GetCredential(Method): """ Retrive a credential for an object If cred == None then the behavior reverts to GetSelfCredential @param hrn human readable name of object (hrn or urn) @param cred credential object specifying rights of the caller @param type type of object (user | slice | node | authority ) @return the string representation of a credential object """ interfaces = ['registry'] accepts = [ Mixed(Parameter(str, "Credential string"), Parameter(type([str]), "List of credentials")), Parameter(str, "Human readable name (hrn or urn)"), Mixed(Parameter(str, "Record type"), Parameter(None, "Type not specified")), ] returns = Parameter(str, "String representation of a credential object") def call(self, creds, xrn, type): if type: hrn = urn_to_hrn(xrn)[0] else: hrn, type = urn_to_hrn(xrn) # check creds valid_creds = self.api.auth.checkCredentials(creds, 'getcredential') self.api.auth.verify_object_belongs_to_me(hrn) #log the call origin_hrn = Credential(string=valid_creds[0]).get_gid_caller().get_hrn() self.api.logger.info("interface: %s\tcaller-hrn: %s\ttarget-hrn: %s\tmethod-name: %s"%(self.api.interface, origin_hrn, hrn, self.name)) return self.api.manager.GetCredential(self.api, xrn, type, self.api.auth.client_gid.get_urn())
bsd-3-clause
slackhappy/graphite-web
webapp/graphite/graphlot/views.py
15
7389
import re from django.shortcuts import render_to_response from django.http import HttpResponse, Http404, HttpResponseBadRequest from django.conf import settings from graphite.util import json from graphite.render.views import parseOptions from graphite.render.evaluator import evaluateTarget from graphite.storage import STORE from django.core.urlresolvers import get_script_prefix def graphlot_render(request): """Render the main graphlot view.""" metrics = [] for target in request.GET.getlist('target'): metrics.append(dict(name=target, yaxis="one")) for target in request.GET.getlist('y2target'): metrics.append(dict(name=target, yaxis="two")) untiltime = request.GET.get('until', "-0hour") fromtime = request.GET.get('from', "-24hour") events = request.GET.get('events', "") context = { 'metric_list' : metrics, 'fromtime' : fromtime, 'untiltime' : untiltime, 'events' : events, 'slash' : get_script_prefix() } return render_to_response("graphlot.html", context) def get_data(request): """Get the data for one series.""" (graphOptions, requestOptions) = parseOptions(request) requestContext = { 'startTime' : requestOptions['startTime'], 'endTime' : requestOptions['endTime'], 'localOnly' : False, 'data' : [] } target = requestOptions['targets'][0] seriesList = evaluateTarget(requestContext, target) result = [ dict( name=timeseries.name, data=[ x for x in timeseries ], start=timeseries.start, end=timeseries.end, step=timeseries.step, ) for timeseries in seriesList ] if not result: raise Http404 return HttpResponse(json.dumps(result), mimetype="application/json") def find_metric(request): """Autocomplete helper on metric names.""" try: query = str( request.REQUEST['q'] ) except: return HttpResponseBadRequest( content="Missing required parameter 'q'", mimetype="text/plain") matches = list( STORE.find(query+"*") ) content = "\n".join([node.metric_path for node in matches ]) response = HttpResponse(content, mimetype='text/plain') return response def header(request): "View for the header frame of the browser UI" context = { 'user' : request.user, 'profile' : getProfile(request), 'documentation_url' : settings.DOCUMENTATION_URL, 'slash' : get_script_prefix() } return render_to_response("browserHeader.html", context) def browser(request): "View for the top-level frame of the browser UI" context = { 'queryString' : request.GET.urlencode(), 'target' : request.GET.get('target'), 'slash' : get_script_prefix() } if context['queryString']: context['queryString'] = context['queryString'].replace('#','%23') if context['target']: context['target'] = context['target'].replace('#','%23') #js libs terminate a querystring on # return render_to_response("browser.html", context) def search(request): query = request.POST['query'] if not query: return HttpResponse("") patterns = query.split() regexes = [re.compile(p,re.I) for p in patterns] def matches(s): for regex in regexes: if regex.search(s): return True return False results = [] index_file = open(settings.INDEX_FILE) for line in index_file: if matches(line): results.append( line.strip() ) if len(results) >= 100: break index_file.close() result_string = ','.join(results) return HttpResponse(result_string, mimetype='text/plain') def myGraphLookup(request): "View for My Graphs navigation" profile = getProfile(request,allowDefault=False) assert profile nodes = [] leafNode = { 'allowChildren' : 0, 'expandable' : 0, 'leaf' : 1, } branchNode = { 'allowChildren' : 1, 'expandable' : 1, 'leaf' : 0, } try: path = str( request.GET['path'] ) if path: if path.endswith('.'): userpath_prefix = path else: userpath_prefix = path + '.' else: userpath_prefix = "" matches = [ graph for graph in profile.mygraph_set.all().order_by('name') if graph.name.startswith(userpath_prefix) ] log.info( "myGraphLookup: username=%s, path=%s, userpath_prefix=%s, %ld graph to process" % (profile.user.username, path, userpath_prefix, len(matches)) ) branch_inserted = set() leaf_inserted = set() for graph in matches: #Now let's add the matching graph isBranch = False dotPos = graph.name.find( '.', len(userpath_prefix) ) if dotPos >= 0: isBranch = True name = graph.name[ len(userpath_prefix) : dotPos ] if name in branch_inserted: continue branch_inserted.add(name) else: name = graph.name[ len(userpath_prefix): ] if name in leaf_inserted: continue leaf_inserted.add(name) node = {'text' : str(name) } if isBranch: node.update( { 'id' : str(userpath_prefix + name + '.') } ) node.update(branchNode) else: node.update( { 'id' : str(userpath_prefix + name), 'graphUrl' : str(graph.url) } ) node.update(leafNode) nodes.append(node) except: log.exception("browser.views.myGraphLookup(): could not complete request.") if not nodes: no_graphs = { 'text' : "No saved graphs", 'id' : 'no-click' } no_graphs.update(leafNode) nodes.append(no_graphs) return json_response(nodes, request) def userGraphLookup(request): "View for User Graphs navigation" username = request.GET['path'] nodes = [] branchNode = { 'allowChildren' : 1, 'expandable' : 1, 'leaf' : 0, } leafNode = { 'allowChildren' : 0, 'expandable' : 0, 'leaf' : 1, } try: if not username: profiles = Profile.objects.exclude(user=defaultUser) for profile in profiles: if profile.mygraph_set.count(): node = { 'text' : str(profile.user.username), 'id' : str(profile.user.username) } node.update(branchNode) nodes.append(node) else: profile = getProfileByUsername(username) assert profile, "No profile for username '%s'" % username for graph in profile.mygraph_set.all().order_by('name'): node = { 'text' : str(graph.name), 'id' : str(graph.name), 'graphUrl' : str(graph.url) } node.update(leafNode) nodes.append(node) except: log.exception("browser.views.userLookup(): could not complete request for %s" % username) if not nodes: no_graphs = { 'text' : "No saved graphs", 'id' : 'no-click' } no_graphs.update(leafNode) nodes.append(no_graphs) return json_response(nodes, request) def json_response(nodes, request=None): if request: jsonp = request.REQUEST.get('jsonp', False) else: jsonp = False json_data = json.dumps(nodes) if jsonp: response = HttpResponse("%s(%s)" % (jsonp, json_data),mimetype="text/javascript") else: response = HttpResponse(json_data,mimetype="application/json") response['Pragma'] = 'no-cache' response['Cache-Control'] = 'no-cache' return response def any(iterable): #python2.4 compatibility for i in iterable: if i: return True return False
apache-2.0
Chatmetaleux/MissionPlanner
ExtLibs/Mavlink/pymavlink/generator/mavtemplate.py
68
5597
#!/usr/bin/env python ''' simple templating system for mavlink generator Copyright Andrew Tridgell 2011 Released under GNU GPL version 3 or later ''' from .mavparse import MAVParseError class MAVTemplate(object): '''simple templating system''' def __init__(self, start_var_token="${", end_var_token="}", start_rep_token="${{", end_rep_token="}}", trim_leading_lf=True, checkmissing=True): self.start_var_token = start_var_token self.end_var_token = end_var_token self.start_rep_token = start_rep_token self.end_rep_token = end_rep_token self.trim_leading_lf = trim_leading_lf self.checkmissing = checkmissing def find_end(self, text, start_token, end_token, ignore_end_token=None): '''find the of a token. Returns the offset in the string immediately after the matching end_token''' if not text.startswith(start_token): raise MAVParseError("invalid token start") offset = len(start_token) nesting = 1 while nesting > 0: idx1 = text[offset:].find(start_token) idx2 = text[offset:].find(end_token) # Check for false positives due to another similar token # For example, make sure idx2 points to the second '}' in ${{field: ${name}}} if ignore_end_token: combined_token = ignore_end_token + end_token if text[offset+idx2:offset+idx2+len(combined_token)] == combined_token: idx2 += len(ignore_end_token) if idx1 == -1 and idx2 == -1: raise MAVParseError("token nesting error") if idx1 == -1 or idx1 > idx2: offset += idx2 + len(end_token) nesting -= 1 else: offset += idx1 + len(start_token) nesting += 1 return offset def find_var_end(self, text): '''find the of a variable''' return self.find_end(text, self.start_var_token, self.end_var_token) def find_rep_end(self, text): '''find the of a repitition''' return self.find_end(text, self.start_rep_token, self.end_rep_token, ignore_end_token=self.end_var_token) def substitute(self, text, subvars={}, trim_leading_lf=None, checkmissing=None): '''substitute variables in a string''' if trim_leading_lf is None: trim_leading_lf = self.trim_leading_lf if checkmissing is None: checkmissing = self.checkmissing # handle repititions while True: subidx = text.find(self.start_rep_token) if subidx == -1: break endidx = self.find_rep_end(text[subidx:]) if endidx == -1: raise MAVParseError("missing end macro in %s" % text[subidx:]) part1 = text[0:subidx] part2 = text[subidx+len(self.start_rep_token):subidx+(endidx-len(self.end_rep_token))] part3 = text[subidx+endidx:] a = part2.split(':') field_name = a[0] rest = ':'.join(a[1:]) v = None if isinstance(subvars, dict): v = subvars.get(field_name, None) else: v = getattr(subvars, field_name, None) if v is None: raise MAVParseError('unable to find field %s' % field_name) t1 = part1 for f in v: t1 += self.substitute(rest, f, trim_leading_lf=False, checkmissing=False) if len(v) != 0 and t1[-1] in ["\n", ","]: t1 = t1[:-1] t1 += part3 text = t1 if trim_leading_lf: if text[0] == '\n': text = text[1:] while True: idx = text.find(self.start_var_token) if idx == -1: return text endidx = text[idx:].find(self.end_var_token) if endidx == -1: raise MAVParseError('missing end of variable: %s' % text[idx:idx+10]) varname = text[idx+2:idx+endidx] if isinstance(subvars, dict): if not varname in subvars: if checkmissing: raise MAVParseError("unknown variable in '%s%s%s'" % ( self.start_var_token, varname, self.end_var_token)) return text[0:idx+endidx] + self.substitute(text[idx+endidx:], subvars, trim_leading_lf=False, checkmissing=False) value = subvars[varname] else: value = getattr(subvars, varname, None) if value is None: if checkmissing: raise MAVParseError("unknown variable in '%s%s%s'" % ( self.start_var_token, varname, self.end_var_token)) return text[0:idx+endidx] + self.substitute(text[idx+endidx:], subvars, trim_leading_lf=False, checkmissing=False) text = text.replace("%s%s%s" % (self.start_var_token, varname, self.end_var_token), str(value)) return text def write(self, file, text, subvars={}, trim_leading_lf=True): '''write to a file with variable substitution''' file.write(self.substitute(text, subvars=subvars, trim_leading_lf=trim_leading_lf))
gpl-3.0
NeCTAR-RC/ceilometer
ceilometer/tests/storage/test_get_engine.py
3
1387
# -*- encoding: utf-8 -*- # # Copyright © 2012 New Dream Network, LLC (DreamHost) # # Author: Doug Hellmann <doug.hellmann@dreamhost.com> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for ceilometer/storage/ """ import mock from ceilometer.openstack.common import test from ceilometer import storage from ceilometer.storage import impl_log class EngineTest(test.BaseTestCase): def test_get_engine(self): conf = mock.Mock() conf.database.connection = 'log://localhost' engine = storage.get_engine(conf) self.assertIsInstance(engine, impl_log.LogStorage) def test_get_engine_no_such_engine(self): conf = mock.Mock() conf.database.connection = 'no-such-engine://localhost' try: storage.get_engine(conf) except RuntimeError as err: self.assertIn('no-such-engine', unicode(err))
apache-2.0
jayme-github/CouchPotatoServer
libs/sqlalchemy/orm/scoping.py
17
4600
# orm/scoping.py # Copyright (C) 2005-2012 the SQLAlchemy authors and contributors <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php from sqlalchemy import exc as sa_exc from sqlalchemy.util import ScopedRegistry, ThreadLocalRegistry, warn from sqlalchemy.orm import class_mapper from sqlalchemy.orm import exc as orm_exc from sqlalchemy.orm.session import Session __all__ = ['ScopedSession'] class ScopedSession(object): """Provides thread-local management of Sessions. Typical invocation is via the :func:`.scoped_session` function:: Session = scoped_session(sessionmaker()) The internal registry is accessible, and by default is an instance of :class:`.ThreadLocalRegistry`. See also: :ref:`unitofwork_contextual`. """ def __init__(self, session_factory, scopefunc=None): self.session_factory = session_factory if scopefunc: self.registry = ScopedRegistry(session_factory, scopefunc) else: self.registry = ThreadLocalRegistry(session_factory) def __call__(self, **kwargs): if kwargs: scope = kwargs.pop('scope', False) if scope is not None: if self.registry.has(): raise sa_exc.InvalidRequestError( "Scoped session is already present; " "no new arguments may be specified.") else: sess = self.session_factory(**kwargs) self.registry.set(sess) return sess else: return self.session_factory(**kwargs) else: return self.registry() def remove(self): """Dispose of the current contextual session.""" if self.registry.has(): self.registry().close() self.registry.clear() def configure(self, **kwargs): """reconfigure the sessionmaker used by this ScopedSession.""" if self.registry.has(): warn('At least one scoped session is already present. ' ' configure() can not affect sessions that have ' 'already been created.') self.session_factory.configure(**kwargs) def query_property(self, query_cls=None): """return a class property which produces a `Query` object against the class when called. e.g.:: Session = scoped_session(sessionmaker()) class MyClass(object): query = Session.query_property() # after mappers are defined result = MyClass.query.filter(MyClass.name=='foo').all() Produces instances of the session's configured query class by default. To override and use a custom implementation, provide a ``query_cls`` callable. The callable will be invoked with the class's mapper as a positional argument and a session keyword argument. There is no limit to the number of query properties placed on a class. """ class query(object): def __get__(s, instance, owner): try: mapper = class_mapper(owner) if mapper: if query_cls: # custom query class return query_cls(mapper, session=self.registry()) else: # session's configured query class return self.registry().query(mapper) except orm_exc.UnmappedClassError: return None return query() def instrument(name): def do(self, *args, **kwargs): return getattr(self.registry(), name)(*args, **kwargs) return do for meth in Session.public_methods: setattr(ScopedSession, meth, instrument(meth)) def makeprop(name): def set(self, attr): setattr(self.registry(), name, attr) def get(self): return getattr(self.registry(), name) return property(get, set) for prop in ('bind', 'dirty', 'deleted', 'new', 'identity_map', 'is_active', 'autoflush', 'no_autoflush'): setattr(ScopedSession, prop, makeprop(prop)) def clslevel(name): def do(cls, *args, **kwargs): return getattr(Session, name)(*args, **kwargs) return classmethod(do) for prop in ('close_all', 'object_session', 'identity_key'): setattr(ScopedSession, prop, clslevel(prop))
gpl-3.0
rev2004/android2cloud.app-engine
google_appengine/lib/django/django/templatetags/i18n.py
33
8474
from django.template import Node, resolve_variable from django.template import TemplateSyntaxError, TokenParser, Library from django.template import TOKEN_TEXT, TOKEN_VAR from django.utils import translation register = Library() class GetAvailableLanguagesNode(Node): def __init__(self, variable): self.variable = variable def render(self, context): from django.conf import settings context[self.variable] = [(k, translation.gettext(v)) for k, v in settings.LANGUAGES] return '' class GetCurrentLanguageNode(Node): def __init__(self, variable): self.variable = variable def render(self, context): context[self.variable] = translation.get_language() return '' class GetCurrentLanguageBidiNode(Node): def __init__(self, variable): self.variable = variable def render(self, context): context[self.variable] = translation.get_language_bidi() return '' class TranslateNode(Node): def __init__(self, value, noop): self.value = value self.noop = noop def render(self, context): value = resolve_variable(self.value, context) if self.noop: return value else: return translation.gettext(value) class BlockTranslateNode(Node): def __init__(self, extra_context, singular, plural=None, countervar=None, counter=None): self.extra_context = extra_context self.singular = singular self.plural = plural self.countervar = countervar self.counter = counter def render_token_list(self, tokens): result = [] for token in tokens: if token.token_type == TOKEN_TEXT: result.append(token.contents) elif token.token_type == TOKEN_VAR: result.append('%%(%s)s' % token.contents) return ''.join(result) def render(self, context): context.push() for var,val in self.extra_context.items(): context[var] = val.resolve(context) singular = self.render_token_list(self.singular) if self.plural and self.countervar and self.counter: count = self.counter.resolve(context) context[self.countervar] = count plural = self.render_token_list(self.plural) result = translation.ngettext(singular, plural, count) % context else: result = translation.gettext(singular) % context context.pop() return result def do_get_available_languages(parser, token): """ This will store a list of available languages in the context. Usage:: {% get_available_languages as languages %} {% for language in languages %} ... {% endfor %} This will just pull the LANGUAGES setting from your setting file (or the default settings) and put it into the named variable. """ args = token.contents.split() if len(args) != 3 or args[1] != 'as': raise TemplateSyntaxError, "'get_available_languages' requires 'as variable' (got %r)" % args return GetAvailableLanguagesNode(args[2]) def do_get_current_language(parser, token): """ This will store the current language in the context. Usage:: {% get_current_language as language %} This will fetch the currently active language and put it's value into the ``language`` context variable. """ args = token.contents.split() if len(args) != 3 or args[1] != 'as': raise TemplateSyntaxError, "'get_current_language' requires 'as variable' (got %r)" % args return GetCurrentLanguageNode(args[2]) def do_get_current_language_bidi(parser, token): """ This will store the current language layout in the context. Usage:: {% get_current_language_bidi as bidi %} This will fetch the currently active language's layout and put it's value into the ``bidi`` context variable. True indicates right-to-left layout, otherwise left-to-right """ args = token.contents.split() if len(args) != 3 or args[1] != 'as': raise TemplateSyntaxError, "'get_current_language_bidi' requires 'as variable' (got %r)" % args return GetCurrentLanguageBidiNode(args[2]) def do_translate(parser, token): """ This will mark a string for translation and will translate the string for the current language. Usage:: {% trans "this is a test" %} This will mark the string for translation so it will be pulled out by mark-messages.py into the .po files and will run the string through the translation engine. There is a second form:: {% trans "this is a test" noop %} This will only mark for translation, but will return the string unchanged. Use it when you need to store values into forms that should be translated later on. You can use variables instead of constant strings to translate stuff you marked somewhere else:: {% trans variable %} This will just try to translate the contents of the variable ``variable``. Make sure that the string in there is something that is in the .po file. """ class TranslateParser(TokenParser): def top(self): value = self.value() if self.more(): if self.tag() == 'noop': noop = True else: raise TemplateSyntaxError, "only option for 'trans' is 'noop'" else: noop = False return (value, noop) value, noop = TranslateParser(token.contents).top() return TranslateNode(value, noop) def do_block_translate(parser, token): """ This will translate a block of text with parameters. Usage:: {% blocktrans with foo|filter as bar and baz|filter as boo %} This is {{ bar }} and {{ boo }}. {% endblocktrans %} Additionally, this supports pluralization:: {% blocktrans count var|length as count %} There is {{ count }} object. {% plural %} There are {{ count }} objects. {% endblocktrans %} This is much like ngettext, only in template syntax. """ class BlockTranslateParser(TokenParser): def top(self): countervar = None counter = None extra_context = {} while self.more(): tag = self.tag() if tag == 'with' or tag == 'and': value = self.value() if self.tag() != 'as': raise TemplateSyntaxError, "variable bindings in 'blocktrans' must be 'with value as variable'" extra_context[self.tag()] = parser.compile_filter(value) elif tag == 'count': counter = parser.compile_filter(self.value()) if self.tag() != 'as': raise TemplateSyntaxError, "counter specification in 'blocktrans' must be 'count value as variable'" countervar = self.tag() else: raise TemplateSyntaxError, "unknown subtag %s for 'blocktrans' found" % tag return (countervar, counter, extra_context) countervar, counter, extra_context = BlockTranslateParser(token.contents).top() singular = [] plural = [] while parser.tokens: token = parser.next_token() if token.token_type in (TOKEN_VAR, TOKEN_TEXT): singular.append(token) else: break if countervar and counter: if token.contents.strip() != 'plural': raise TemplateSyntaxError, "'blocktrans' doesn't allow other block tags inside it" while parser.tokens: token = parser.next_token() if token.token_type in (TOKEN_VAR, TOKEN_TEXT): plural.append(token) else: break if token.contents.strip() != 'endblocktrans': raise TemplateSyntaxError, "'blocktrans' doesn't allow other block tags (seen %r) inside it" % token.contents return BlockTranslateNode(extra_context, singular, plural, countervar, counter) register.tag('get_available_languages', do_get_available_languages) register.tag('get_current_language', do_get_current_language) register.tag('get_current_language_bidi', do_get_current_language_bidi) register.tag('trans', do_translate) register.tag('blocktrans', do_block_translate)
mit
mspark93/VTK
Common/ComputationalGeometry/Testing/Python/CSpline.py
20
3768
#!/usr/bin/env python import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # Now create the RenderWindow, Renderer and Interactor # ren1 = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren1) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) math = vtk.vtkMath() numberOfInputPoints = 30 aSplineX = vtk.vtkCardinalSpline() aSplineY = vtk.vtkCardinalSpline() aSplineZ = vtk.vtkCardinalSpline() # generate random points inputPoints = vtk.vtkPoints() i = 0 while i < numberOfInputPoints: x = math.Random(0, 1) y = math.Random(0, 1) z = math.Random(0, 1) aSplineX.AddPoint(i, x) aSplineY.AddPoint(i, y) aSplineZ.AddPoint(i, z) inputPoints.InsertPoint(i, x, y, z) i += 1 inputData = vtk.vtkPolyData() inputData.SetPoints(inputPoints) balls = vtk.vtkSphereSource() balls.SetRadius(.01) balls.SetPhiResolution(10) balls.SetThetaResolution(10) glyphPoints = vtk.vtkGlyph3D() glyphPoints.SetInputData(inputData) glyphPoints.SetSourceConnection(balls.GetOutputPort()) glyphMapper = vtk.vtkPolyDataMapper() glyphMapper.SetInputConnection(glyphPoints.GetOutputPort()) glyph = vtk.vtkActor() glyph.SetMapper(glyphMapper) glyph.GetProperty().SetDiffuseColor(1, 0.4, 0.4) glyph.GetProperty().SetSpecular(.3) glyph.GetProperty().SetSpecularPower(30) ren1.AddActor(glyph) # create a polyline points = vtk.vtkPoints() profileData = vtk.vtkPolyData() numberOfOutputPoints = 400 offset = 1.0 def fit (): points.Reset() i = 0 while i < numberOfOutputPoints: t = (numberOfInputPoints - offset) / (numberOfOutputPoints - 1) * i points.InsertPoint(i, aSplineX.Evaluate(t), aSplineY.Evaluate(t), aSplineZ.Evaluate(t)) i += 1 profileData.Modified() fit() lines = vtk.vtkCellArray() lines.InsertNextCell(numberOfOutputPoints) i = 0 while i < numberOfOutputPoints: lines.InsertCellPoint(i) i += 1 profileData.SetPoints(points) profileData.SetLines(lines) profileTubes = vtk.vtkTubeFilter() profileTubes.SetNumberOfSides(8) profileTubes.SetInputData(profileData) profileTubes.SetRadius(.005) profileMapper = vtk.vtkPolyDataMapper() profileMapper.SetInputConnection(profileTubes.GetOutputPort()) profile = vtk.vtkActor() profile.SetMapper(profileMapper) profile.GetProperty().SetDiffuseColor(1, 1, 0.6) profile.GetProperty().SetSpecular(.3) profile.GetProperty().SetSpecularPower(30) ren1.AddActor(profile) ren1.ResetCamera() ren1.GetActiveCamera().Dolly(1.5) ren1.ResetCameraClippingRange() renWin.SetSize(400, 400) # render the image # iren.Initialize() def opened (aSplineX, aSplineY, aSplineZ): offset = 1.0 aSplineX.ClosedOff() aSplineY.ClosedOff() aSplineZ.ClosedOff() fit() renWin.Render() def varyLeft (aSplineX, aSplineY, aSplineZ): left = -1 while left <= 1: aSplineX.SetLeftValue(left) aSplineY.SetLeftValue(left) aSplineZ.SetLeftValue(left) fit() renWin.Render() left += 0.05 def varyRight (aSplineX, aSplineY, aSplineZ): right = -1 while right <= 1: aSplineX.SetRightValue(right) aSplineY.SetRightValue(right) aSplineZ.SetRightValue(right) fit() renWin.Render() right += 0.05 def constraint (value, aSplineX, aSplineY, aSplineZ): aSplineX.SetLeftConstraint(value) aSplineY.SetLeftConstraint(value) aSplineZ.SetLeftConstraint(value) aSplineX.SetRightConstraint(value) aSplineY.SetRightConstraint(value) aSplineZ.SetRightConstraint(value) def closed (aSplineX, aSplineY, aSplineZ): offset = 0.0 aSplineX.ClosedOn() aSplineY.ClosedOn() aSplineZ.ClosedOn() fit() renWin.Render() #iren.Start()
bsd-3-clause
diogommartins/pox
pox/forwarding/l2_pairs.py
47
2882
# Copyright 2012 James McCauley # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at: # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ A super simple OpenFlow learning switch that installs rules for each pair of L2 addresses. """ # These next two imports are common POX convention from pox.core import core import pox.openflow.libopenflow_01 as of # Even a simple usage of the logger is much nicer than print! log = core.getLogger() # This table maps (switch,MAC-addr) pairs to the port on 'switch' at # which we last saw a packet *from* 'MAC-addr'. # (In this case, we use a Connection object for the switch.) table = {} # To send out all ports, we can use either of the special ports # OFPP_FLOOD or OFPP_ALL. We'd like to just use OFPP_FLOOD, # but it's not clear if all switches support this, so we make # it selectable. all_ports = of.OFPP_FLOOD # Handle messages the switch has sent us because it has no # matching rule. def _handle_PacketIn (event): packet = event.parsed # Learn the source table[(event.connection,packet.src)] = event.port dst_port = table.get((event.connection,packet.dst)) if dst_port is None: # We don't know where the destination is yet. So, we'll just # send the packet out all ports (except the one it came in on!) # and hope the destination is out there somewhere. :) msg = of.ofp_packet_out(data = event.ofp) msg.actions.append(of.ofp_action_output(port = all_ports)) event.connection.send(msg) else: # Since we know the switch ports for both the source and dest # MACs, we can install rules for both directions. msg = of.ofp_flow_mod() msg.match.dl_dst = packet.src msg.match.dl_src = packet.dst msg.actions.append(of.ofp_action_output(port = event.port)) event.connection.send(msg) # This is the packet that just came in -- we want to # install the rule and also resend the packet. msg = of.ofp_flow_mod() msg.data = event.ofp # Forward the incoming packet msg.match.dl_src = packet.src msg.match.dl_dst = packet.dst msg.actions.append(of.ofp_action_output(port = dst_port)) event.connection.send(msg) log.debug("Installing %s <-> %s" % (packet.src, packet.dst)) def launch (disable_flood = False): global all_ports if disable_flood: all_ports = of.OFPP_ALL core.openflow.addListenerByName("PacketIn", _handle_PacketIn) log.info("Pair-Learning switch running.")
apache-2.0
danielchalef/Arduino
arduino-core/src/processing/app/i18n/python/transifex.py
75
2038
#!/usr/bin/env python #vim:set fileencoding=utf-8 sw=2 expandtab import update import requests import json class Transifex(object): def __init__(self, user, passwd): self.auth_ = (user, passwd) r = requests.get( 'http://www.transifex.com/api/2/project/' 'arduino-ide-15/resource/ide-15/?details', auth=self.auth_ ) r.raise_for_status() d = r.json() self.languages_ = set(lang['code'] for lang in d['available_languages']) def canonical_lang(self, lang): lang = lang.lower() for l in self.languages_: if l.lower() == lang: return l match = [] for l in self.languages_: if l.split('_', 1)[0].lower() == lang: match.append(l) if len(match) > 1: raise RuntimeError('Two or more candidates for %s: %s' % (lang, ' '.join(match))) if len(match) == 0: raise RuntimeError('No language code %s' % lang) return match[0] def translation(self, lang): r = requests.get( 'http://www.transifex.com/api/2/project/' 'arduino-ide-15/resource/ide-15/translation/%s/?file' % lang, auth=self.auth_ ) r.raise_for_status() r.encoding = 'utf-8' # workaround for a Transifex issue. return r.text def pull(self, lang, fname): new = self.translation(lang).encode('utf-8') new = map(lambda a: a + '\n', new.split('\n')) new = update.read_po(new) # remove the key '' to preserve the first comment block. first = new.pop('', ('', '', '')) catalog = update.read_po(fname) update.merge(catalog, new) (comment, rkey, rvalue) = catalog.get('', ('', 'msgid ""\n', '')) catalog[''] = (comment, rkey, first[2]) update.dump(catalog, fname) def push(self, lang, data): r = requests.put( 'http://www.transifex.com/api/2/project/' 'arduino-ide-15/resource/ide-15/translation/%s/' % lang, data=json.dumps({ 'content': data }), headers={ 'content-type': 'application/json' }, auth=self.auth_ ) r.raise_for_status()
lgpl-2.1
neoz/zer0m0n
signatures/network_tor.py
6
1719
# Copyright (C) 2012 Claudio "nex" Guarnieri (@botherder) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. from lib.cuckoo.common.abstracts import Signature class Tor(Signature): name = "network_tor" description = "Installs Tor on the infected machine" severity = 3 categories = ["network", "anonimity", "tor"] authors = ["nex"] minimum = "1.0" evented = True def on_call(self, call, process): if self.check_argument_call(call, pattern="Tor Win32 Service", api="CreateServiceA", category="services"): return True def run(self): indicators = [ ".*\\\\tor\\\\cached-certs$", ".*\\\\tor\\\\cached-consensus$", ".*\\\\tor\\\\cached-descriptors$", ".*\\\\tor\\\\geoip$", ".*\\\\tor\\\\lock$", ".*\\\\tor\\\\state$", ".*\\\\tor\\\\torrc$" ] for indicator in indicators: if self.check_file(pattern=indicator, regex=True): return True
gpl-3.0
rainwoodman/pmesh
pmesh/abopt.py
3
12379
from __future__ import absolute_import from warnings import warn warn("This module is deprecated and likely no longer maintained; a maintained version is moved to cosmo4d to minimize changes in pmesh.", DeprecationWarning) import numpy from abopt.vmad2 import ZERO, Engine, statement, programme, CodeSegment, Literal from abopt.abopt2 import VectorSpace from pmesh.pm import ParticleMesh, RealField, ComplexField def nyquist_mask(factor, v): # any nyquist modes are set to 0 if the transfer function is complex mask = (numpy.imag(factor) == 0) | \ ~numpy.bitwise_and.reduce([(ii == 0) | (ii == ni // 2) for ii, ni in zip(v.i, v.Nmesh)]) return factor * mask class ParticleMeshVectorSpace(VectorSpace): def __init__(self, pm, q): self.qshape = q.shape self.pm = pm def addmul(self, a, b, c, p=1): if isinstance(b, RealField): r = b.copy() r[...] = a + b * c ** p return r elif isinstance(b, ComplexField): r = b.copy() if isinstance(c, ComplexField): c = c.plain if isinstance(a, ComplexField): a = a.plain r.plain[...] = a + b.plain * c ** p return r elif numpy.isscalar(b): return a + b * c ** p elif isinstance(b, numpy.ndarray): assert len(b) == self.qshape[0] return a + b * c ** p else: raise TypeError("type unknown") def dot(self, a, b): if type(a) != type(b): raise TypeError("type mismatch") if isinstance(a, RealField): return a.cdot(b) elif isinstance(a, ComplexField): return a.cdot(b) elif isinstance(a, numpy.ndarray): assert len(a) == len(b) assert len(a) == self.qshape[0] return self.pm.comm.allreduce(a.dot(b)) else: raise TypeError("type unknown") class ParticleMeshEngine(Engine): def __init__(self, pm, q=None): self.pm = pm if q is None: q = pm.generate_uniform_particle_grid(shift=0.0, dtype='f4') self.q = q self.vs = ParticleMeshVectorSpace(self.pm, self.q) @programme(ain=['s'], aout=['x']) def get_x(engine, s, x): code = CodeSegment(engine) code.add(x1='s', x2=Literal(engine.q), y='x') return code @statement(aout=['real'], ain=['complex']) def c2r(engine, real, complex): real[...] = complex.c2r() @c2r.defvjp def _(engine, _real, _complex): _complex[...] = _real.c2r_vjp() @c2r.defjvp def _(engine, real_, complex_): real_[...] = complex_.c2r() @statement(aout=['complex'], ain=['real']) def r2c(engine, complex, real): complex[...] = real.r2c() @r2c.defvjp def _(engine, _complex, _real): _real[...] = _complex.r2c_vjp() @r2c.defjvp def _(engine, complex_, real_): complex_[...] = real_.r2c() @statement(aout=['complex'], ain=['complex']) def decompress(engine, complex): return @decompress.defvjp def _(engine, _complex): _complex.decompress_vjp(out=Ellipsis) @decompress.defjvp def _(engine, complex_): pass # XXX: is this correct? @staticmethod def _lowpass_filter(k, v, Neff): k0s = 2 * numpy.pi / v.BoxSize mask = numpy.bitwise_and.reduce([abs(ki) <= Neff//2 * k0 for ki, k0 in zip(k, k0s)]) return v * mask @statement(aout=['real'], ain=['real']) def lowpass(engine, real, Neff): real.r2c(out=Ellipsis).apply( lambda k, v: engine._lowpass_filter(k, v, Neff), out=Ellipsis).c2r(out=Ellipsis) @lowpass.defvjp def _(engine, _real, Neff): _real.c2r_vjp().apply( lambda k, v: engine._lowpass_filter(k, v, Neff), out=Ellipsis).r2c_vjp(out=Ellipsis) @lowpass.defjvp def _(engine, real_, Neff): real_.r2c().apply( lambda k, v: engine._lowpass_filter(k, v, Neff), out=Ellipsis).c2r(out=Ellipsis) @statement(aout=['layout'], ain=['x']) def decompose(engine, layout, x): pm = engine.pm layout[...] = pm.decompose(x) @decompose.defvjp def _(engine, _layout, _x): _x[...] = ZERO @decompose.defjvp def _(engine, layout_, x_): layout_[...] = ZERO @statement(aout=['mesh'], ain=['x', 'layout']) def paint(engine, x, mesh, layout): pm = engine.pm N = pm.comm.allreduce(len(x)) mesh[...] = pm.paint(x, layout=layout, hold=False) # to have 1 + \delta on the mesh mesh[...][...] *= 1.0 * pm.Nmesh.prod() / N @paint.defvjp def _(engine, _x, _mesh, x, layout, _layout): pm = engine.pm _layout[...] = ZERO N = pm.comm.allreduce(len(x)) _x[...], junk = pm.paint_vjp(_mesh, x, layout=layout, out_mass=False) _x[...][...] *= 1.0 * pm.Nmesh.prod() / N @paint.defjvp def _(engine, x_, mesh_, x, layout, layout_): pm = engine.pm if x_ is ZERO: x_ = None mesh_[...] = pm.paint_jvp(x, v_pos=x_, layout=layout) @statement(aout=['value'], ain=['x', 'mesh', 'layout']) def readout(engine, value, x, mesh, layout): pm = engine.pm N = pm.comm.allreduce(len(x)) value[...] = mesh.readout(x, layout=layout) @readout.defvjp def _(engine, _value, _x, _mesh, x, layout, mesh): pm = engine.pm _mesh[...], _x[...] = mesh.readout_vjp(x, _value, layout=layout) @readout.defjvp def _(engine, value_, x_, mesh_, x, layout, mesh, layout_): pm = engine.pm if mesh_ is ZERO: mesh_ = None if x_ is ZERO: x_ = None value_[...] = mesh.readout_jvp(x, v_self=mesh_, v_pos=x_, layout=layout) @statement(aout=['complex'], ain=['complex']) def transfer(engine, complex, tf): complex.apply(lambda k, v: nyquist_mask(tf(k), v) * v, out=Ellipsis) @transfer.defvjp def _(engine, tf, _complex): _complex.apply(lambda k, v: nyquist_mask(numpy.conj(tf(k)), v) * v, out=Ellipsis) @transfer.defjvp def _(engine, tf, complex_): complex_.apply(lambda k, v: nyquist_mask(tf(k), v) * v, out=Ellipsis) @statement(aout=['residual'], ain=['model']) def residual(engine, model, data, sigma, residual): """ residual = (model - data) / sigma J = 1 / sigma """ residual[...] = (model - data) / sigma @residual.defvjp def _(engine, _model, _residual, data, sigma): _model[...] = _residual / sigma @residual.defjvp def _(engine, model_, residual_, data, sigma): residual_[...] = model_ / sigma @statement(ain=['attribute', 'value'], aout=['attribute']) def assign_component(engine, attribute, value, dim): attribute[..., dim] = value @assign_component.defvjp def _(engine, _attribute, _value, dim): _value[...] = _attribute[..., dim] @assign_component.defjvp def _(engine, attribute_, value_, dim): attribute_[..., dim] = value_ @statement(ain=['x'], aout=['y']) def assign(engine, x, y): y[...] = x.copy() @assign.defvjp def _(engine, _y, _x): _x[...] = _y @assign.defjvp def _(engine, y_, x_, x): y_[...] = x.copy() y_[...][...] = x_ @statement(ain=['x1', 'x2'], aout=['y']) def add(engine, x1, x2, y): y[...] = x1 + x2 @add.defvjp def _(engine, _y, _x1, _x2): _x1[...] = _y _x2[...] = _y @add.defjvp def _(engine, y_, x1_, x2_): y_[...] = x1_ + x2_ @statement(aout=['y'], ain=['x1', 'x2']) def multiply(engine, x1, x2, y): y[...] = x1 * x2 @multiply.defvjp def _(engine, _x1, _x2, _y, x1, x2): _x1[...] = _y * x2 _x2[...] = _y * x1 @multiply.defjvp def _(engine, x1_, x2_, y_, x1, x2): y_[...] = x1_ * x2 + x1 * x2_ @statement(ain=['x'], aout=['y']) def to_scalar(engine, x, y): if isinstance(x, RealField): y[...] = x.cnorm() elif isinstance(x, ComplexField): raise TypeError("Computing the L-2 norm of complex is not a good idea, because the gradient propagation is ambiguous") else: y[...] = engine.pm.comm.allreduce((x[...] ** 2).sum(dtype='f8')) @to_scalar.defvjp def _(engine, _y, _x, x): _x[...] = x * (2 * _y) @to_scalar.defjvp def _(engine, y_, x_, x): if isinstance(x, RealField): y_[...] = x.cdot(x_) * 2 elif isinstance(x, ComplexField): raise TypeError("Computing the L-2 norm of complex is not a good idea, because the gradient propagation is ambiguous") else: y_[...] = engine.pm.comm.allreduce((x * x_).sum(dtype='f8')) * 2 def check_grad(code, yname, xname, init, eps, rtol, atol=1e-12, verbose=False): from numpy.testing import assert_allclose engine = code.engine comm = engine.pm.comm if isinstance(init[xname], numpy.ndarray) and init[xname].shape == engine.q.shape: cshape = engine.pm.comm.allreduce(engine.q.shape[0]), engine.q.shape[1] def cperturb(pos, ind, eps): pos = pos.copy() start = sum(comm.allgather(pos.shape[0])[:comm.rank]) end = sum(comm.allgather(pos.shape[0])[:comm.rank + 1]) if ind[0] >= start and ind[0] < end: old = pos[ind[0] - start, ind[1]] coord = pos[ind[0]-start].copy() pos[ind[0] - start, ind[1]] = old + eps new = pos[ind[0] - start, ind[1]] else: old, new, coord = 0, 0, 0 diff = comm.allreduce(new - old) return pos def cget(pos, ind): if pos is ZERO: return 0 start = sum(comm.allgather(pos.shape[0])[:comm.rank]) end = sum(comm.allgather(pos.shape[0])[:comm.rank + 1]) if ind[0] >= start and ind[0] < end: old = pos[ind[0] - start, ind[1]] else: old = 0 return comm.allreduce(old) elif isinstance(init[xname], RealField): cshape = init[xname].cshape def cget(real, index): if real is ZERO: return 0 return real.cgetitem(index) def cperturb(real, index, eps): old = real.cgetitem(index) r1 = real.copy() r1.csetitem(index, old + eps) return r1 code = code.copy() code.to_scalar(x=yname, y='y') y, tape = code.compute('y', init=init, return_tape=True) vjp = tape.get_vjp() jvp = tape.get_jvp() _x = vjp.compute('_' + xname, init={'_y' : 1.0}) center = init[xname] init2 = init.copy() ng_bg = [] fg_bg = [] for index in numpy.ndindex(*cshape): x1 = cperturb(center, index, eps) x0 = cperturb(center, index, -eps) analytic = cget(_x, index) init2[xname] = x1 y1 = code.compute('y', init2) init2[xname] = x0 y0 = code.compute('y', init2) base = (x1 - x0) y_ = jvp.compute('y_', init={xname + '_': base}) #logger.DEBUG("CHECKGRAD: %s" % (y1, y0, y1 - y0, get_pos(code.engine, _x, index) * 2 * eps)) if verbose: print(index, (x1 - x0)[...].max(), y, y1 - y0, y_, cget(_x, index) * 2 * eps) fg_bg.append([index, y_, cget(_x, index) * 2 * eps]) ng_bg.append([index, y1 - y0, cget(_x, index) * 2 * eps]) fg_bg = numpy.array(fg_bg, dtype='O') ng_bg = numpy.array(ng_bg, dtype='O') def errorstat(stat, rtol, atol): g1 = numpy.array([a[1] for a in stat]) g2 = numpy.array([a[2] for a in stat]) ag1 = abs(g1) + (abs(g1) == 0) * numpy.std(g1) ag2 = abs(g2) + (abs(g2) == 0) * numpy.std(g2) sig = (g1 - g2) / ((ag1 + ag2) * rtol + atol) bins = [-100, -50, -20, -1, 1, 20, 50, 100] d = numpy.digitize(sig, bins) return d d1 = errorstat(fg_bg, rtol, atol) d2 = errorstat(ng_bg, rtol * 10000, atol) if (d1 != 4).any(): raise AssertionError("FG_BG Bad gradients: %s " % numpy.bincount(d1)) if (d2 != 4).any(): raise AssertionError("NG_BG Bad gradients: %s " % numpy.bincount(d2))
gpl-3.0
divio/django-shop
email_auth/models.py
1
2957
""" Alternative implementation of Django's authentication User model, which allows to authenticate against the email field in addition to the username fields. This alternative implementation is activated by setting ``AUTH_USER_MODEL = 'shop.User'`` in settings.py, otherwise the default Django or another customized implementation will be used. """ from django.contrib.auth import get_user_model from django.contrib.auth.models import AbstractUser, UserManager as BaseUserManager from django.core.exceptions import ValidationError from django.utils.encoding import python_2_unicode_compatible from django.utils.translation import ugettext_lazy as _ class UserManager(BaseUserManager): def get_by_natural_key(self, username): try: return self.get(username=username) except self.model.DoesNotExist: return self.get(is_active=True, email=username) @python_2_unicode_compatible class User(AbstractUser): """ Alternative implementation of Django's User model allowing to authenticate against the email field in addition to the username field, which remains the primary unique identifier. The email field is only used in addition. For users marked as active, their email address must be unique. Guests can reuse their email address as often they want. """ objects = UserManager() USERNAME_FIELD = 'email' REQUIRED_FIELDS = ['username'] class Meta: db_table = 'auth_user' verbose_name = _("Customer") verbose_name_plural = _("Customers") swappable = 'AUTH_USER_MODEL' def get_username(self): return self.email def __str__(self): if self.is_staff or self.is_superuser: return self.username return self.email or '<anonymous>' def get_full_name(self): full_name = super(User, self).get_full_name() if full_name: return full_name return self.get_short_name() def get_short_name(self): short_name = super(User, self).get_short_name() if short_name: return short_name return self.email def validate_unique(self, exclude=None): """ Since the email address is used as the primary identifier, we must ensure that it is unique. However, since this constraint only applies to active users, it can't be done through a field declaration via a database UNIQUE index. Inactive users can't login anyway, so we don't need a unique constraint for them. """ super(User, self).validate_unique(exclude) if self.email and get_user_model().objects.exclude(id=self.id).filter(is_active=True, email__exact=self.email).exists(): msg = _("A customer with the e-mail address ‘{email}’ already exists.") raise ValidationError({'email': msg.format(email=self.email)})
bsd-3-clause
skottler/packstack
packstack/installer/validators.py
1
7154
# -*- coding: utf-8 -*- """ Contains all core validation functions. """ import os import re import socket import logging import tempfile import traceback import basedefs import common_utils as utils from .setup_controller import Controller from .exceptions import ParamValidationError __all__ = ('ParamValidationError', 'validate_integer', 'validate_float', 'validate_regexp', 'validate_port', 'validate_not_empty', 'validate_options', 'validate_ip', 'validate_multi_ip', 'validate_file', 'validate_ping', 'validate_ssh', 'validate_multi_ssh') def validate_integer(param, options=None): """ Raises ParamValidationError if given param is not integer. """ options = options or [] try: int(param) except ValueError: logging.debug('validate_integer(%s, options=%s) failed.' % (param, options)) msg = 'Given value is not an integer: %s' raise ParamValidationError(msg % param) def validate_float(param, options=None): """ Raises ParamValidationError if given param is not a float. """ options = options or [] try: float(param) except ValueError: logging.debug('validate_float(%s, options=%s) failed.' % (param, options)) msg = 'Given value is not a float: %s' raise ParamValidationError(msg % param) def validate_regexp(param, options=None): """ Raises ParamValidationError if given param doesn't match at least one of regular expressions given in options. """ options = options or [] for regex in options: if re.search(regex, param): break else: logging.debug('validate_regexp(%s, options=%s) failed.' % (param, options)) msg = 'Given value does not match required regular expresion: %s' raise ParamValidationError(msg % param) def validate_port(param, options=None): """ Raises ParamValidationError if given param is not a decimal number in range (0, 65535). """ options = options or [] validate_integer(param, options) port = int(param) if not (port >= 0 and port < 65535): logging.debug('validate_port(%s, options=%s) failed.' % (param, options)) msg = 'Given value is outside the range of (0, 65535): %s' raise ParamValidationError(msg % param) def validate_not_empty(param, options=None): """ Raises ParamValidationError if given param is empty. """ options = options or [] if not param and param is not False: logging.debug('validate_not_empty(%s, options=%s) failed.' % (param, options)) msg = 'Given value is not allowed: %s' raise ParamValidationError(msg % param) def validate_options(param, options=None): """ Raises ParamValidationError if given param is not member of options. """ options = options or [] # TO-DO: to be more flexible, remove this and exit in case param is empty validate_not_empty(param, options) if param not in options: logging.debug('validate_options(%s, options=%s) failed.' % (param, options)) msg = 'Given value is not member of allowed values %s: %s' raise ParamValidationError(msg % (options, param)) def validate_multi_options(param, options=None): """ Validates if comma separated values given in params are members of options. """ if not param: return options = options or [] for i in param.split(','): validate_options(i.strip(), options=options) def validate_ip(param, options=None): """ Raises ParamValidationError if given parameter value is not in IPv4 or IPv6 address. """ for family in (socket.AF_INET, socket.AF_INET6): try: socket.inet_pton(family, param) break except socket.error: continue else: logging.debug('validate_ip(%s, options=%s) failed.' % (param, options)) msg = 'Given host is not in IP address format: %s' raise ParamValidationError(msg % param) def validate_multi_ip(param, options=None): """ Raises ParamValidationError if comma separated IP addresses given parameter value are in IPv4 or IPv6 aformat. """ for host in param.split(','): host, device = host.split('/', 1) validate_ip(host.strip(), options) def validate_file(param, options=None): """ Raises ParamValidationError if provided file in param does not exist. """ options = options or [] # TO-DO: to be more flexible, remove this and exit in case param is empty validate_not_empty(param) if not os.path.isfile(param): logging.debug('validate_file(%s, options=%s) failed.' % (param, options)) msg = 'Given file does not exist: %s' raise ParamValidationError(msg % param) def validate_ping(param, options=None): """ Raises ParamValidationError if provided host does not answer to ICMP echo request. """ options = options or [] # TO-DO: to be more flexible, remove this and exit in case param is empty validate_not_empty(param) cmd = ["/bin/ping", "-c", "1", str(param)] out, rc = utils.execCmd(cmdList=cmd) if rc != 0: logging.debug('validate_ping(%s, options=%s) failed.' % (param, options)) msg = 'Given host is unreachable: %s' raise ParamValidationError(msg % param) def validate_multi_ping(param, options=None): """ Raises ParamValidationError if comma separated host given in param do not answer to ICMP echo request. """ options = options or [] # TO-DO: to be more flexible, remove this and exit in case param is empty validate_not_empty(param) for host in param.split(","): validate_ping(host.strip()) _tested_ports = [] def touch_port(host, port): """ Check that provided host is listening on provided port. """ key = "%s:%d" % (host, port) if key in _tested_ports: return s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((host, port)) s.shutdown(socket.SHUT_RDWR) s.close() _tested_ports.append(key) def validate_ssh(param, options=None): """ Raises ParamValidationError if provided host does not listen on port 22. """ options = options or [] try: touch_port(param.strip(), 22) except socket.error: logging.debug('validate_ssh(%s, options=%s) failed.' % (param, options)) msg = 'Given host does not listen on port 22: %s' raise ParamValidationError(msg % param) def validate_multi_ssh(param, options=None): """ Raises ParamValidationError if comma separated host provided in param do not listen on port 22. """ options = options or [] # TO-DO: to be more flexible, remove this and exit in case param is empty validate_not_empty(param) for host in param.split(","): validate_ssh(host)
apache-2.0
ASCrookes/django
django/contrib/postgres/fields/ranges.py
172
5636
import json from psycopg2.extras import DateRange, DateTimeTZRange, NumericRange, Range from django.contrib.postgres import forms, lookups from django.db import models from django.utils import six from .utils import AttributeSetter __all__ = [ 'RangeField', 'IntegerRangeField', 'BigIntegerRangeField', 'FloatRangeField', 'DateTimeRangeField', 'DateRangeField', ] class RangeField(models.Field): empty_strings_allowed = False def get_prep_value(self, value): if value is None: return None elif isinstance(value, Range): return value elif isinstance(value, (list, tuple)): return self.range_type(value[0], value[1]) return value def to_python(self, value): if isinstance(value, six.string_types): # Assume we're deserializing vals = json.loads(value) for end in ('lower', 'upper'): if end in vals: vals[end] = self.base_field.to_python(vals[end]) value = self.range_type(**vals) elif isinstance(value, (list, tuple)): value = self.range_type(value[0], value[1]) return value def set_attributes_from_name(self, name): super(RangeField, self).set_attributes_from_name(name) self.base_field.set_attributes_from_name(name) def value_to_string(self, obj): value = self.value_from_object(obj) if value is None: return None if value.isempty: return json.dumps({"empty": True}) base_field = self.base_field result = {"bounds": value._bounds} for end in ('lower', 'upper'): obj = AttributeSetter(base_field.attname, getattr(value, end)) result[end] = base_field.value_to_string(obj) return json.dumps(result) def formfield(self, **kwargs): kwargs.setdefault('form_class', self.form_field) return super(RangeField, self).formfield(**kwargs) class IntegerRangeField(RangeField): base_field = models.IntegerField() range_type = NumericRange form_field = forms.IntegerRangeField def db_type(self, connection): return 'int4range' class BigIntegerRangeField(RangeField): base_field = models.BigIntegerField() range_type = NumericRange form_field = forms.IntegerRangeField def db_type(self, connection): return 'int8range' class FloatRangeField(RangeField): base_field = models.FloatField() range_type = NumericRange form_field = forms.FloatRangeField def db_type(self, connection): return 'numrange' class DateTimeRangeField(RangeField): base_field = models.DateTimeField() range_type = DateTimeTZRange form_field = forms.DateTimeRangeField def db_type(self, connection): return 'tstzrange' class DateRangeField(RangeField): base_field = models.DateField() range_type = DateRange form_field = forms.DateRangeField def db_type(self, connection): return 'daterange' RangeField.register_lookup(lookups.DataContains) RangeField.register_lookup(lookups.ContainedBy) RangeField.register_lookup(lookups.Overlap) class RangeContainedBy(models.Lookup): lookup_name = 'contained_by' type_mapping = { 'integer': 'int4range', 'bigint': 'int8range', 'double precision': 'numrange', 'date': 'daterange', 'timestamp with time zone': 'tstzrange', } def as_sql(self, qn, connection): field = self.lhs.output_field if isinstance(field, models.FloatField): sql = '%s::numeric <@ %s::{}'.format(self.type_mapping[field.db_type(connection)]) else: sql = '%s <@ %s::{}'.format(self.type_mapping[field.db_type(connection)]) lhs, lhs_params = self.process_lhs(qn, connection) rhs, rhs_params = self.process_rhs(qn, connection) params = lhs_params + rhs_params return sql % (lhs, rhs), params def get_prep_lookup(self): return RangeField().get_prep_lookup(self.lookup_name, self.rhs) models.DateField.register_lookup(RangeContainedBy) models.DateTimeField.register_lookup(RangeContainedBy) models.IntegerField.register_lookup(RangeContainedBy) models.BigIntegerField.register_lookup(RangeContainedBy) models.FloatField.register_lookup(RangeContainedBy) @RangeField.register_lookup class FullyLessThan(lookups.PostgresSimpleLookup): lookup_name = 'fully_lt' operator = '<<' @RangeField.register_lookup class FullGreaterThan(lookups.PostgresSimpleLookup): lookup_name = 'fully_gt' operator = '>>' @RangeField.register_lookup class NotLessThan(lookups.PostgresSimpleLookup): lookup_name = 'not_lt' operator = '&>' @RangeField.register_lookup class NotGreaterThan(lookups.PostgresSimpleLookup): lookup_name = 'not_gt' operator = '&<' @RangeField.register_lookup class AdjacentToLookup(lookups.PostgresSimpleLookup): lookup_name = 'adjacent_to' operator = '-|-' @RangeField.register_lookup class RangeStartsWith(lookups.FunctionTransform): lookup_name = 'startswith' function = 'lower' @property def output_field(self): return self.lhs.output_field.base_field @RangeField.register_lookup class RangeEndsWith(lookups.FunctionTransform): lookup_name = 'endswith' function = 'upper' @property def output_field(self): return self.lhs.output_field.base_field @RangeField.register_lookup class IsEmpty(lookups.FunctionTransform): lookup_name = 'isempty' function = 'isempty' output_field = models.BooleanField()
bsd-3-clause
idatux/idatuxft
engine/xml/dom/html/HTMLDOMImplementation.py
10
1047
######################################################################## # # File Name: implementation.py # # """ WWW: http://4suite.com/4DOM e-mail: support@4suite.com Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved. See http://4suite.com/COPYRIGHT for license and copyright information """ from xml.dom import DOMImplementation # Add the HTML feature DOMImplementation.FEATURES_MAP['HTML'] = 2.0 class HTMLDOMImplementation(DOMImplementation.DOMImplementation): def __init__(self): DOMImplementation.DOMImplementation.__init__(self) def createHTMLDocument(self, title): from xml.dom.html import HTMLDocument doc = HTMLDocument.HTMLDocument() h = doc.createElement('HTML') doc.appendChild(h) doc._set_title(title) return doc def _4dom_createHTMLCollection(self,list=None): if list is None: list = [] from xml.dom.html import HTMLCollection hc = HTMLCollection.HTMLCollection(list) return hc
gpl-3.0
jerome-nexedi/dream
dream/simulation/OrderComponent.py
5
4092
# =========================================================================== # Copyright 2013 University of Limerick # # This file is part of DREAM. # # DREAM is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # DREAM is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with DREAM. If not, see <http://www.gnu.org/licenses/>. # =========================================================================== ''' Created on 20 Dec 2013 @author: George ''' ''' OrderComponent is an Entity that is a component of a broader order ''' from Globals import G from Job import Job # =========================================================================== # The OrderComponent object # =========================================================================== class OrderComponent(Job): # inherits from the Job class type="OrderComponent" def __init__(self, id=None, name=None, route=[], priority=0, dueDate=0, orderDate=0, extraPropertyDict=None, componentType='Basic', order=None, requestingComponent = None, readyForAssembly = 0, isCritical=False, remainingProcessingTime={}, remainingSetupTime={}, currentStation=None, **kw): Job.__init__(self, id, name, route=route, priority=priority, dueDate=dueDate, orderDate=orderDate, extraPropertyDict=extraPropertyDict, isCritical=isCritical, remainingProcessingTime=remainingProcessingTime, remainingSetupTime=remainingSetupTime, currentStation=currentStation) #======================================================================= self.auxiliaryList=[] # Holds the auxiliary components that the component needs for a certain processing #======================================================================= self.order=order # parent order of the order component # TODO: in case the order is not given as argument (when the component is given as WIP) have to give a manager as argument # or create the initiate the parent order not as WIP if self.order: # if the order is not None, and the order.manager is given if self.order.manager: self.manager=self.order.manager self.componentType = componentType # the type of the component which can be Basic/Secondary/Auxiliary #======================================================================= # if the componentType of the component is Auxiliary then there need a requesting Component be defined # the requestingComponent is the component that needs the auxiliary component during its processing # the auxiliary component should then be added to the requestingComponent's auxiliaryList self.requestingComponent = requestingComponent # the id of the requesting component #======================================================================= self.readyForAssembly = readyForAssembly # flag informing weather the component was received # by the MouldAssembleBuffer # used by printRoute if self.order: self.alias=self.order.alias+'C'+str(len(G.OrderComponentList)) G.OrderComponentList.append(self) G.WipList.append(self)
gpl-3.0
jiangzhuo/kbengine
kbe/res/scripts/common/Lib/test/test_poll.py
91
6552
# Test case for the os.poll() function import os import subprocess import random import select try: import threading except ImportError: threading = None import time import unittest from test.support import TESTFN, run_unittest, reap_threads, cpython_only try: select.poll except AttributeError: raise unittest.SkipTest("select.poll not defined") def find_ready_matching(ready, flag): match = [] for fd, mode in ready: if mode & flag: match.append(fd) return match class PollTests(unittest.TestCase): def test_poll1(self): # Basic functional test of poll object # Create a bunch of pipe and test that poll works with them. p = select.poll() NUM_PIPES = 12 MSG = b" This is a test." MSG_LEN = len(MSG) readers = [] writers = [] r2w = {} w2r = {} for i in range(NUM_PIPES): rd, wr = os.pipe() p.register(rd) p.modify(rd, select.POLLIN) p.register(wr, select.POLLOUT) readers.append(rd) writers.append(wr) r2w[rd] = wr w2r[wr] = rd bufs = [] while writers: ready = p.poll() ready_writers = find_ready_matching(ready, select.POLLOUT) if not ready_writers: raise RuntimeError("no pipes ready for writing") wr = random.choice(ready_writers) os.write(wr, MSG) ready = p.poll() ready_readers = find_ready_matching(ready, select.POLLIN) if not ready_readers: raise RuntimeError("no pipes ready for reading") rd = random.choice(ready_readers) buf = os.read(rd, MSG_LEN) self.assertEqual(len(buf), MSG_LEN) bufs.append(buf) os.close(r2w[rd]) ; os.close( rd ) p.unregister( r2w[rd] ) p.unregister( rd ) writers.remove(r2w[rd]) self.assertEqual(bufs, [MSG] * NUM_PIPES) def test_poll_unit_tests(self): # returns NVAL for invalid file descriptor FD, w = os.pipe() os.close(FD) os.close(w) p = select.poll() p.register(FD) r = p.poll() self.assertEqual(r[0], (FD, select.POLLNVAL)) f = open(TESTFN, 'w') fd = f.fileno() p = select.poll() p.register(f) r = p.poll() self.assertEqual(r[0][0], fd) f.close() r = p.poll() self.assertEqual(r[0], (fd, select.POLLNVAL)) os.unlink(TESTFN) # type error for invalid arguments p = select.poll() self.assertRaises(TypeError, p.register, p) self.assertRaises(TypeError, p.unregister, p) # can't unregister non-existent object p = select.poll() self.assertRaises(KeyError, p.unregister, 3) # Test error cases pollster = select.poll() class Nope: pass class Almost: def fileno(self): return 'fileno' self.assertRaises(TypeError, pollster.register, Nope(), 0) self.assertRaises(TypeError, pollster.register, Almost(), 0) # Another test case for poll(). This is copied from the test case for # select(), modified to use poll() instead. def test_poll2(self): cmd = 'for i in 0 1 2 3 4 5 6 7 8 9; do echo testing...; sleep 1; done' proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, bufsize=0) p = proc.stdout pollster = select.poll() pollster.register( p, select.POLLIN ) for tout in (0, 1000, 2000, 4000, 8000, 16000) + (-1,)*10: fdlist = pollster.poll(tout) if (fdlist == []): continue fd, flags = fdlist[0] if flags & select.POLLHUP: line = p.readline() if line != b"": self.fail('error: pipe seems to be closed, but still returns data') continue elif flags & select.POLLIN: line = p.readline() if not line: break self.assertEqual(line, b'testing...\n') continue else: self.fail('Unexpected return value from select.poll: %s' % fdlist) p.close() def test_poll3(self): # test int overflow pollster = select.poll() pollster.register(1) self.assertRaises(OverflowError, pollster.poll, 1 << 64) x = 2 + 3 if x != 5: self.fail('Overflow must have occurred') # Issues #15989, #17919 self.assertRaises(OverflowError, pollster.register, 0, -1) self.assertRaises(OverflowError, pollster.register, 0, 1 << 64) self.assertRaises(OverflowError, pollster.modify, 1, -1) self.assertRaises(OverflowError, pollster.modify, 1, 1 << 64) @cpython_only def test_poll_c_limits(self): from _testcapi import USHRT_MAX, INT_MAX, UINT_MAX pollster = select.poll() pollster.register(1) # Issues #15989, #17919 self.assertRaises(OverflowError, pollster.register, 0, USHRT_MAX + 1) self.assertRaises(OverflowError, pollster.modify, 1, USHRT_MAX + 1) self.assertRaises(OverflowError, pollster.poll, INT_MAX + 1) self.assertRaises(OverflowError, pollster.poll, UINT_MAX + 1) @unittest.skipUnless(threading, 'Threading required for this test.') @reap_threads def test_threaded_poll(self): r, w = os.pipe() self.addCleanup(os.close, r) self.addCleanup(os.close, w) rfds = [] for i in range(10): fd = os.dup(r) self.addCleanup(os.close, fd) rfds.append(fd) pollster = select.poll() for fd in rfds: pollster.register(fd, select.POLLIN) t = threading.Thread(target=pollster.poll) t.start() try: time.sleep(0.5) # trigger ufds array reallocation for fd in rfds: pollster.unregister(fd) pollster.register(w, select.POLLOUT) self.assertRaises(RuntimeError, pollster.poll) finally: # and make the call to poll() from the thread return os.write(w, b'spam') t.join() def test_main(): run_unittest(PollTests) if __name__ == '__main__': test_main()
lgpl-3.0
jjmachan/activityPointsApp
activitypoints/lib/python3.5/site-packages/django/contrib/gis/db/models/fields.py
45
17598
from collections import defaultdict from django.contrib.gis import forms, gdal from django.contrib.gis.db.models.lookups import ( RasterBandTransform, gis_lookups, ) from django.contrib.gis.db.models.proxy import SpatialProxy from django.contrib.gis.gdal.error import GDALException from django.contrib.gis.geometry.backend import Geometry, GeometryException from django.core.exceptions import ImproperlyConfigured from django.db.models.expressions import Expression from django.db.models.fields import Field from django.utils import six from django.utils.translation import ugettext_lazy as _ # Local cache of the spatial_ref_sys table, which holds SRID data for each # spatial database alias. This cache exists so that the database isn't queried # for SRID info each time a distance query is constructed. _srid_cache = defaultdict(dict) def get_srid_info(srid, connection): """ Returns the units, unit name, and spheroid WKT associated with the given SRID from the `spatial_ref_sys` (or equivalent) spatial database table for the given database connection. These results are cached. """ from django.contrib.gis.gdal import SpatialReference global _srid_cache try: # The SpatialRefSys model for the spatial backend. SpatialRefSys = connection.ops.spatial_ref_sys() except NotImplementedError: SpatialRefSys = None alias, get_srs = ( (connection.alias, lambda srid: SpatialRefSys.objects.using(connection.alias).get(srid=srid).srs) if SpatialRefSys else (None, SpatialReference) ) if srid not in _srid_cache[alias]: srs = get_srs(srid) units, units_name = srs.units sphere_name = srs['spheroid'] spheroid = 'SPHEROID["%s",%s,%s]' % (sphere_name, srs.semi_major, srs.inverse_flattening) _srid_cache[alias][srid] = (units, units_name, spheroid) return _srid_cache[alias][srid] class GeoSelectFormatMixin(object): def select_format(self, compiler, sql, params): """ Returns the selection format string, depending on the requirements of the spatial backend. For example, Oracle and MySQL require custom selection formats in order to retrieve geometries in OGC WKT. For all other fields a simple '%s' format string is returned. """ connection = compiler.connection srid = compiler.query.get_context('transformed_srid') if srid: sel_fmt = '%s(%%s, %s)' % (connection.ops.transform, srid) else: sel_fmt = '%s' if connection.ops.select: # This allows operations to be done on fields in the SELECT, # overriding their values -- used by the Oracle and MySQL # spatial backends to get database values as WKT, and by the # `transform` method. sel_fmt = connection.ops.select % sel_fmt return sel_fmt % sql, params class BaseSpatialField(Field): """ The Base GIS Field. It's used as a base class for GeometryField and RasterField. Defines properties that are common to all GIS fields such as the characteristics of the spatial reference system of the field. """ description = _("The base GIS field.") empty_strings_allowed = False # Geodetic units. geodetic_units = ('decimal degree', 'degree') def __init__(self, verbose_name=None, srid=4326, spatial_index=True, **kwargs): """ The initialization function for base spatial fields. Takes the following as keyword arguments: srid: The spatial reference system identifier, an OGC standard. Defaults to 4326 (WGS84). spatial_index: Indicates whether to create a spatial index. Defaults to True. Set this instead of 'db_index' for geographic fields since index creation is different for geometry columns. """ # Setting the index flag with the value of the `spatial_index` keyword. self.spatial_index = spatial_index # Setting the SRID and getting the units. Unit information must be # easily available in the field instance for distance queries. self.srid = srid # Setting the verbose_name keyword argument with the positional # first parameter, so this works like normal fields. kwargs['verbose_name'] = verbose_name super(BaseSpatialField, self).__init__(**kwargs) def deconstruct(self): name, path, args, kwargs = super(BaseSpatialField, self).deconstruct() # Always include SRID for less fragility; include spatial index if it's # not the default value. kwargs['srid'] = self.srid if self.spatial_index is not True: kwargs['spatial_index'] = self.spatial_index return name, path, args, kwargs def db_type(self, connection): return connection.ops.geo_db_type(self) # The following functions are used to get the units, their name, and # the spheroid corresponding to the SRID of the BaseSpatialField. def _get_srid_info(self, connection): # Get attributes from `get_srid_info`. self._units, self._units_name, self._spheroid = get_srid_info(self.srid, connection) def spheroid(self, connection): if not hasattr(self, '_spheroid'): self._get_srid_info(connection) return self._spheroid def units(self, connection): if not hasattr(self, '_units'): self._get_srid_info(connection) return self._units def units_name(self, connection): if not hasattr(self, '_units_name'): self._get_srid_info(connection) return self._units_name def geodetic(self, connection): """ Returns true if this field's SRID corresponds with a coordinate system that uses non-projected units (e.g., latitude/longitude). """ units_name = self.units_name(connection) return units_name.lower() in self.geodetic_units if units_name else self.srid == 4326 def get_placeholder(self, value, compiler, connection): """ Returns the placeholder for the spatial column for the given value. """ return connection.ops.get_geom_placeholder(self, value, compiler) def get_srid(self, obj): """ Return the default SRID for the given geometry or raster, taking into account the SRID set for the field. For example, if the input geometry or raster doesn't have an SRID, then the SRID of the field will be returned. """ srid = obj.srid # SRID of given geometry. if srid is None or self.srid == -1 or (srid == -1 and self.srid != -1): return self.srid else: return srid def get_db_prep_save(self, value, connection): """ Prepare the value for saving in the database. """ if isinstance(value, Geometry) or value: return connection.ops.Adapter(self.get_prep_value(value)) else: return None def get_raster_prep_value(self, value, is_candidate): """ Return a GDALRaster if conversion is successful, otherwise return None. """ if isinstance(value, gdal.GDALRaster): return value elif is_candidate: try: return gdal.GDALRaster(value) except GDALException: pass elif isinstance(value, dict): try: return gdal.GDALRaster(value) except GDALException: raise ValueError("Couldn't create spatial object from lookup value '%s'." % value) def get_prep_value(self, value): """ Spatial lookup values are either a parameter that is (or may be converted to) a geometry or raster, or a sequence of lookup values that begins with a geometry or raster. This routine sets up the geometry or raster value properly and preserves any other lookup parameters. """ value = super(BaseSpatialField, self).get_prep_value(value) # For IsValid lookups, boolean values are allowed. if isinstance(value, (Expression, bool)): return value elif isinstance(value, (tuple, list)): obj = value[0] seq_value = True else: obj = value seq_value = False # When the input is not a geometry or raster, attempt to construct one # from the given string input. if isinstance(obj, Geometry): pass else: # Check if input is a candidate for conversion to raster or geometry. is_candidate = isinstance(obj, (bytes, six.string_types)) or hasattr(obj, '__geo_interface__') # Try to convert the input to raster. raster = self.get_raster_prep_value(obj, is_candidate) if raster: obj = raster elif is_candidate: try: obj = Geometry(obj) except (GeometryException, GDALException): raise ValueError("Couldn't create spatial object from lookup value '%s'." % obj) else: raise ValueError('Cannot use object with type %s for a spatial lookup parameter.' % type(obj).__name__) # Assigning the SRID value. obj.srid = self.get_srid(obj) if seq_value: lookup_val = [obj] lookup_val.extend(value[1:]) return tuple(lookup_val) else: return obj for klass in gis_lookups.values(): BaseSpatialField.register_lookup(klass) class GeometryField(GeoSelectFormatMixin, BaseSpatialField): """ The base Geometry field -- maps to the OpenGIS Specification Geometry type. """ description = _("The base Geometry field -- maps to the OpenGIS Specification Geometry type.") form_class = forms.GeometryField # The OpenGIS Geometry name. geom_type = 'GEOMETRY' def __init__(self, verbose_name=None, dim=2, geography=False, **kwargs): """ The initialization function for geometry fields. In addition to the parameters from BaseSpatialField, it takes the following as keyword arguments: dim: The number of dimensions for this geometry. Defaults to 2. extent: Customize the extent, in a 4-tuple of WGS 84 coordinates, for the geometry field entry in the `USER_SDO_GEOM_METADATA` table. Defaults to (-180.0, -90.0, 180.0, 90.0). tolerance: Define the tolerance, in meters, to use for the geometry field entry in the `USER_SDO_GEOM_METADATA` table. Defaults to 0.05. """ # Setting the dimension of the geometry field. self.dim = dim # Is this a geography rather than a geometry column? self.geography = geography # Oracle-specific private attributes for creating the entry in # `USER_SDO_GEOM_METADATA` self._extent = kwargs.pop('extent', (-180.0, -90.0, 180.0, 90.0)) self._tolerance = kwargs.pop('tolerance', 0.05) super(GeometryField, self).__init__(verbose_name=verbose_name, **kwargs) def deconstruct(self): name, path, args, kwargs = super(GeometryField, self).deconstruct() # Include kwargs if they're not the default values. if self.dim != 2: kwargs['dim'] = self.dim if self.geography is not False: kwargs['geography'] = self.geography return name, path, args, kwargs # ### Routines specific to GeometryField ### def get_distance(self, value, lookup_type, connection): """ Returns a distance number in units of the field. For example, if `D(km=1)` was passed in and the units of the field were in meters, then 1000 would be returned. """ return connection.ops.get_distance(self, value, lookup_type) def get_db_prep_value(self, value, connection, *args, **kwargs): return connection.ops.Adapter( super(GeometryField, self).get_db_prep_value(value, connection, *args, **kwargs), **({'geography': True} if self.geography else {}) ) def from_db_value(self, value, expression, connection, context): if value: if not isinstance(value, Geometry): value = Geometry(value) srid = value.srid if not srid and self.srid != -1: value.srid = self.srid return value # ### Routines overloaded from Field ### def contribute_to_class(self, cls, name, **kwargs): super(GeometryField, self).contribute_to_class(cls, name, **kwargs) # Setup for lazy-instantiated Geometry object. setattr(cls, self.attname, SpatialProxy(Geometry, self)) def formfield(self, **kwargs): defaults = {'form_class': self.form_class, 'geom_type': self.geom_type, 'srid': self.srid, } defaults.update(kwargs) if (self.dim > 2 and 'widget' not in kwargs and not getattr(defaults['form_class'].widget, 'supports_3d', False)): defaults['widget'] = forms.Textarea return super(GeometryField, self).formfield(**defaults) def _get_db_prep_lookup(self, lookup_type, value, connection): """ Prepare for the database lookup, and return any spatial parameters necessary for the query. This includes wrapping any geometry parameters with a backend-specific adapter and formatting any distance parameters into the correct units for the coordinate system of the field. Only used by the deprecated GeoQuerySet and to be RemovedInDjango20Warning. """ # Populating the parameters list, and wrapping the Geometry # with the Adapter of the spatial backend. if isinstance(value, (tuple, list)): params = [connection.ops.Adapter(value[0])] # Getting the distance parameter in the units of the field. params += self.get_distance(value[1:], lookup_type, connection) else: params = [connection.ops.Adapter(value)] return params # The OpenGIS Geometry Type Fields class PointField(GeometryField): geom_type = 'POINT' form_class = forms.PointField description = _("Point") class LineStringField(GeometryField): geom_type = 'LINESTRING' form_class = forms.LineStringField description = _("Line string") class PolygonField(GeometryField): geom_type = 'POLYGON' form_class = forms.PolygonField description = _("Polygon") class MultiPointField(GeometryField): geom_type = 'MULTIPOINT' form_class = forms.MultiPointField description = _("Multi-point") class MultiLineStringField(GeometryField): geom_type = 'MULTILINESTRING' form_class = forms.MultiLineStringField description = _("Multi-line string") class MultiPolygonField(GeometryField): geom_type = 'MULTIPOLYGON' form_class = forms.MultiPolygonField description = _("Multi polygon") class GeometryCollectionField(GeometryField): geom_type = 'GEOMETRYCOLLECTION' form_class = forms.GeometryCollectionField description = _("Geometry collection") class ExtentField(GeoSelectFormatMixin, Field): "Used as a return value from an extent aggregate" description = _("Extent Aggregate Field") def get_internal_type(self): return "ExtentField" class RasterField(BaseSpatialField): """ Raster field for GeoDjango -- evaluates into GDALRaster objects. """ description = _("Raster Field") geom_type = 'RASTER' geography = False def _check_connection(self, connection): # Make sure raster fields are used only on backends with raster support. if not connection.features.gis_enabled or not connection.features.supports_raster: raise ImproperlyConfigured('Raster fields require backends with raster support.') def db_type(self, connection): self._check_connection(connection) return super(RasterField, self).db_type(connection) def from_db_value(self, value, expression, connection, context): return connection.ops.parse_raster(value) def get_db_prep_value(self, value, connection, prepared=False): self._check_connection(connection) # Prepare raster for writing to database. if not prepared: value = connection.ops.deconstruct_raster(value) return super(RasterField, self).get_db_prep_value(value, connection, prepared) def contribute_to_class(self, cls, name, **kwargs): super(RasterField, self).contribute_to_class(cls, name, **kwargs) # Setup for lazy-instantiated Raster object. For large querysets, the # instantiation of all GDALRasters can potentially be expensive. This # delays the instantiation of the objects to the moment of evaluation # of the raster attribute. setattr(cls, self.attname, SpatialProxy(gdal.GDALRaster, self)) def get_transform(self, name): try: band_index = int(name) return type( 'SpecificRasterBandTransform', (RasterBandTransform, ), {'band_index': band_index} ) except ValueError: pass return super(RasterField, self).get_transform(name)
mit
Alwnikrotikz/marinemap
lingcod/user_profile/migrations/0001_initial.py
3
4214
# encoding: utf-8 import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Adding model 'UserProfile' db.create_table('user_profile_userprofile', ( ('about', self.gf('django.db.models.fields.TextField')(blank=True)), ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['auth.User'], unique=True)), )) db.send_create_signal('user_profile', ['UserProfile']) def backwards(self, orm): # Deleting model 'UserProfile' db.delete_table('user_profile_userprofile') models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'user_profile.userprofile': { 'Meta': {'object_name': 'UserProfile'}, 'about': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}) } } complete_apps = ['user_profile']
bsd-3-clause
amenonsen/ansible
lib/ansible/plugins/action/netconf.py
32
3577
# # Copyright 2018 Red Hat Inc. # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # from __future__ import (absolute_import, division, print_function) __metaclass__ = type import copy import sys from ansible.plugins.action.network import ActionModule as ActionNetworkModule from ansible.utils.display import Display display = Display() class ActionModule(ActionNetworkModule): def run(self, tmp=None, task_vars=None): del tmp # tmp no longer has any effect self._config_module = True if self._task.action == 'netconf_config' else False if self._play_context.connection not in ['netconf', 'local'] and self._task.action == 'netconf_config': return {'failed': True, 'msg': 'Connection type %s is not valid for netconf_config module. ' 'Valid connection type is netconf or local (deprecated)' % self._play_context.connection} elif self._play_context.connection not in ['netconf'] and self._task.action != 'netconf_config': return {'failed': True, 'msg': 'Connection type %s is not valid for %s module. ' 'Valid connection type is netconf.' % (self._play_context.connection, self._task.action)} if self._play_context.connection == 'local' and self._task.action == 'netconf_config': args = self._task.args pc = copy.deepcopy(self._play_context) pc.connection = 'netconf' pc.port = int(args.get('port') or self._play_context.port or 830) pc.remote_user = args.get('username') or self._play_context.connection_user pc.password = args.get('password') or self._play_context.password pc.private_key_file = args.get('ssh_keyfile') or self._play_context.private_key_file display.vvv('using connection plugin %s (was local)' % pc.connection, pc.remote_addr) connection = self._shared_loader_obj.connection_loader.get('persistent', pc, sys.stdin) timeout = args.get('timeout') command_timeout = int(timeout) if timeout else connection.get_option('persistent_command_timeout') connection.set_options(direct={'persistent_command_timeout': command_timeout, 'look_for_keys': args.get('look_for_keys'), 'hostkey_verify': args.get('hostkey_verify'), 'allow_agent': args.get('allow_agent')}) socket_path = connection.run() display.vvvv('socket_path: %s' % socket_path, pc.remote_addr) if not socket_path: return {'failed': True, 'msg': 'unable to open shell. Please see: ' + 'https://docs.ansible.com/ansible/network_debug_troubleshooting.html#unable-to-open-shell'} task_vars['ansible_socket'] = socket_path return super(ActionModule, self).run(task_vars=task_vars)
gpl-3.0
ghchinoy/tensorflow
tensorflow/python/ops/distributions/distribution.py
21
46676
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Base classes for probability distributions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import contextlib import types import numpy as np import six from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import tensor_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops.distributions import kullback_leibler from tensorflow.python.ops.distributions import util from tensorflow.python.util import deprecation from tensorflow.python.util import tf_inspect from tensorflow.python.util.tf_export import tf_export __all__ = [ "ReparameterizationType", "FULLY_REPARAMETERIZED", "NOT_REPARAMETERIZED", "Distribution", ] _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS = [ "batch_shape", "batch_shape_tensor", "cdf", "covariance", "cross_entropy", "entropy", "event_shape", "event_shape_tensor", "kl_divergence", "log_cdf", "log_prob", "log_survival_function", "mean", "mode", "prob", "sample", "stddev", "survival_function", "variance", ] @six.add_metaclass(abc.ABCMeta) class _BaseDistribution(object): """Abstract base class needed for resolving subclass hierarchy.""" pass def _copy_fn(fn): """Create a deep copy of fn. Args: fn: a callable Returns: A `FunctionType`: a deep copy of fn. Raises: TypeError: if `fn` is not a callable. """ if not callable(fn): raise TypeError("fn is not callable: %s" % fn) # The blessed way to copy a function. copy.deepcopy fails to create a # non-reference copy. Since: # types.FunctionType == type(lambda: None), # and the docstring for the function type states: # # function(code, globals[, name[, argdefs[, closure]]]) # # Create a function object from a code object and a dictionary. # ... # # Here we can use this to create a new function with the old function's # code, globals, closure, etc. return types.FunctionType( code=fn.__code__, globals=fn.__globals__, name=fn.__name__, argdefs=fn.__defaults__, closure=fn.__closure__) def _update_docstring(old_str, append_str): """Update old_str by inserting append_str just before the "Args:" section.""" old_str = old_str or "" old_str_lines = old_str.split("\n") # Step 0: Prepend spaces to all lines of append_str. This is # necessary for correct markdown generation. append_str = "\n".join(" %s" % line for line in append_str.split("\n")) # Step 1: Find mention of "Args": has_args_ix = [ ix for ix, line in enumerate(old_str_lines) if line.strip().lower() == "args:"] if has_args_ix: final_args_ix = has_args_ix[-1] return ("\n".join(old_str_lines[:final_args_ix]) + "\n\n" + append_str + "\n\n" + "\n".join(old_str_lines[final_args_ix:])) else: return old_str + "\n\n" + append_str def _convert_to_tensor(value, name=None, preferred_dtype=None): """Converts to tensor avoiding an eager bug that loses float precision.""" # TODO(b/116672045): Remove this function. if (context.executing_eagerly() and preferred_dtype is not None and (preferred_dtype.is_integer or preferred_dtype.is_bool)): v = ops.convert_to_tensor(value, name=name) if v.dtype.is_floating: return v return ops.convert_to_tensor( value, name=name, preferred_dtype=preferred_dtype) class _DistributionMeta(abc.ABCMeta): def __new__(mcs, classname, baseclasses, attrs): """Control the creation of subclasses of the Distribution class. The main purpose of this method is to properly propagate docstrings from private Distribution methods, like `_log_prob`, into their public wrappers as inherited by the Distribution base class (e.g. `log_prob`). Args: classname: The name of the subclass being created. baseclasses: A tuple of parent classes. attrs: A dict mapping new attributes to their values. Returns: The class object. Raises: TypeError: If `Distribution` is not a subclass of `BaseDistribution`, or the new class is derived via multiple inheritance and the first parent class is not a subclass of `BaseDistribution`. AttributeError: If `Distribution` does not implement e.g. `log_prob`. ValueError: If a `Distribution` public method lacks a docstring. """ if not baseclasses: # Nothing to be done for Distribution raise TypeError("Expected non-empty baseclass. Does Distribution " "not subclass _BaseDistribution?") which_base = [ base for base in baseclasses if base == _BaseDistribution or issubclass(base, Distribution)] base = which_base[0] if base == _BaseDistribution: # Nothing to be done for Distribution return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs) if not issubclass(base, Distribution): raise TypeError("First parent class declared for %s must be " "Distribution, but saw '%s'" % (classname, base.__name__)) for attr in _DISTRIBUTION_PUBLIC_METHOD_WRAPPERS: special_attr = "_%s" % attr class_attr_value = attrs.get(attr, None) if attr in attrs: # The method is being overridden, do not update its docstring continue base_attr_value = getattr(base, attr, None) if not base_attr_value: raise AttributeError( "Internal error: expected base class '%s' to implement method '%s'" % (base.__name__, attr)) class_special_attr_value = attrs.get(special_attr, None) if class_special_attr_value is None: # No _special method available, no need to update the docstring. continue class_special_attr_docstring = tf_inspect.getdoc(class_special_attr_value) if not class_special_attr_docstring: # No docstring to append. continue class_attr_value = _copy_fn(base_attr_value) class_attr_docstring = tf_inspect.getdoc(base_attr_value) if class_attr_docstring is None: raise ValueError( "Expected base class fn to contain a docstring: %s.%s" % (base.__name__, attr)) class_attr_value.__doc__ = _update_docstring( class_attr_value.__doc__, ("Additional documentation from `%s`:\n\n%s" % (classname, class_special_attr_docstring))) attrs[attr] = class_attr_value return abc.ABCMeta.__new__(mcs, classname, baseclasses, attrs) @tf_export(v1=["distributions.ReparameterizationType"]) class ReparameterizationType(object): """Instances of this class represent how sampling is reparameterized. Two static instances exist in the distributions library, signifying one of two possible properties for samples from a distribution: `FULLY_REPARAMETERIZED`: Samples from the distribution are fully reparameterized, and straight-through gradients are supported. `NOT_REPARAMETERIZED`: Samples from the distribution are not fully reparameterized, and straight-through gradients are either partially unsupported or are not supported at all. In this case, for purposes of e.g. RL or variational inference, it is generally safest to wrap the sample results in a `stop_gradients` call and use policy gradients / surrogate loss instead. """ @deprecation.deprecated( "2019-01-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.distributions`.", warn_once=True) def __init__(self, rep_type): self._rep_type = rep_type def __repr__(self): return "<Reparameteriation Type: %s>" % self._rep_type def __eq__(self, other): """Determine if this `ReparameterizationType` is equal to another. Since RepaparameterizationType instances are constant static global instances, equality checks if two instances' id() values are equal. Args: other: Object to compare against. Returns: `self is other`. """ return self is other # Fully reparameterized distribution: samples from a fully # reparameterized distribution support straight-through gradients with # respect to all parameters. FULLY_REPARAMETERIZED = ReparameterizationType("FULLY_REPARAMETERIZED") tf_export(v1=["distributions.FULLY_REPARAMETERIZED"]).export_constant( __name__, "FULLY_REPARAMETERIZED") # Not reparameterized distribution: samples from a non- # reparameterized distribution do not support straight-through gradients for # at least some of the parameters. NOT_REPARAMETERIZED = ReparameterizationType("NOT_REPARAMETERIZED") tf_export(v1=["distributions.NOT_REPARAMETERIZED"]).export_constant( __name__, "NOT_REPARAMETERIZED") @six.add_metaclass(_DistributionMeta) @tf_export(v1=["distributions.Distribution"]) class Distribution(_BaseDistribution): """A generic probability distribution base class. `Distribution` is a base class for constructing and organizing properties (e.g., mean, variance) of random variables (e.g, Bernoulli, Gaussian). #### Subclassing Subclasses are expected to implement a leading-underscore version of the same-named function. The argument signature should be identical except for the omission of `name="..."`. For example, to enable `log_prob(value, name="log_prob")` a subclass should implement `_log_prob(value)`. Subclasses can append to public-level docstrings by providing docstrings for their method specializations. For example: ```python @util.AppendDocstring("Some other details.") def _log_prob(self, value): ... ``` would add the string "Some other details." to the `log_prob` function docstring. This is implemented as a simple decorator to avoid python linter complaining about missing Args/Returns/Raises sections in the partial docstrings. #### Broadcasting, batching, and shapes All distributions support batches of independent distributions of that type. The batch shape is determined by broadcasting together the parameters. The shape of arguments to `__init__`, `cdf`, `log_cdf`, `prob`, and `log_prob` reflect this broadcasting, as does the return value of `sample` and `sample_n`. `sample_n_shape = [n] + batch_shape + event_shape`, where `sample_n_shape` is the shape of the `Tensor` returned from `sample_n`, `n` is the number of samples, `batch_shape` defines how many independent distributions there are, and `event_shape` defines the shape of samples from each of those independent distributions. Samples are independent along the `batch_shape` dimensions, but not necessarily so along the `event_shape` dimensions (depending on the particulars of the underlying distribution). Using the `Uniform` distribution as an example: ```python minval = 3.0 maxval = [[4.0, 6.0], [10.0, 12.0]] # Broadcasting: # This instance represents 4 Uniform distributions. Each has a lower bound at # 3.0 as the `minval` parameter was broadcasted to match `maxval`'s shape. u = Uniform(minval, maxval) # `event_shape` is `TensorShape([])`. event_shape = u.event_shape # `event_shape_t` is a `Tensor` which will evaluate to []. event_shape_t = u.event_shape_tensor() # Sampling returns a sample per distribution. `samples` has shape # [5, 2, 2], which is [n] + batch_shape + event_shape, where n=5, # batch_shape=[2, 2], and event_shape=[]. samples = u.sample_n(5) # The broadcasting holds across methods. Here we use `cdf` as an example. The # same holds for `log_cdf` and the likelihood functions. # `cum_prob` has shape [2, 2] as the `value` argument was broadcasted to the # shape of the `Uniform` instance. cum_prob_broadcast = u.cdf(4.0) # `cum_prob`'s shape is [2, 2], one per distribution. No broadcasting # occurred. cum_prob_per_dist = u.cdf([[4.0, 5.0], [6.0, 7.0]]) # INVALID as the `value` argument is not broadcastable to the distribution's # shape. cum_prob_invalid = u.cdf([4.0, 5.0, 6.0]) ``` #### Shapes There are three important concepts associated with TensorFlow Distributions shapes: - Event shape describes the shape of a single draw from the distribution; it may be dependent across dimensions. For scalar distributions, the event shape is `[]`. For a 5-dimensional MultivariateNormal, the event shape is `[5]`. - Batch shape describes independent, not identically distributed draws, aka a "collection" or "bunch" of distributions. - Sample shape describes independent, identically distributed draws of batches from the distribution family. The event shape and the batch shape are properties of a Distribution object, whereas the sample shape is associated with a specific call to `sample` or `log_prob`. For detailed usage examples of TensorFlow Distributions shapes, see [this tutorial]( https://github.com/tensorflow/probability/blob/master/tensorflow_probability/examples/jupyter_notebooks/Understanding_TensorFlow_Distributions_Shapes.ipynb) #### Parameter values leading to undefined statistics or distributions. Some distributions do not have well-defined statistics for all initialization parameter values. For example, the beta distribution is parameterized by positive real numbers `concentration1` and `concentration0`, and does not have well-defined mode if `concentration1 < 1` or `concentration0 < 1`. The user is given the option of raising an exception or returning `NaN`. ```python a = tf.exp(tf.matmul(logits, weights_a)) b = tf.exp(tf.matmul(logits, weights_b)) # Will raise exception if ANY batch member has a < 1 or b < 1. dist = distributions.beta(a, b, allow_nan_stats=False) mode = dist.mode().eval() # Will return NaN for batch members with either a < 1 or b < 1. dist = distributions.beta(a, b, allow_nan_stats=True) # Default behavior mode = dist.mode().eval() ``` In all cases, an exception is raised if *invalid* parameters are passed, e.g. ```python # Will raise an exception if any Op is run. negative_a = -1.0 * a # beta distribution by definition has a > 0. dist = distributions.beta(negative_a, b, allow_nan_stats=True) dist.mean().eval() ``` """ @deprecation.deprecated( "2019-01-01", "The TensorFlow Distributions library has moved to " "TensorFlow Probability " "(https://github.com/tensorflow/probability). You " "should update all references to use `tfp.distributions` " "instead of `tf.distributions`.", warn_once=True) def __init__(self, dtype, reparameterization_type, validate_args, allow_nan_stats, parameters=None, graph_parents=None, name=None): """Constructs the `Distribution`. **This is a private method for subclass use.** Args: dtype: The type of the event samples. `None` implies no type-enforcement. reparameterization_type: Instance of `ReparameterizationType`. If `distributions.FULLY_REPARAMETERIZED`, this `Distribution` can be reparameterized in terms of some standard distribution with a function whose Jacobian is constant for the support of the standard distribution. If `distributions.NOT_REPARAMETERIZED`, then no such reparameterization is available. validate_args: Python `bool`, default `False`. When `True` distribution parameters are checked for validity despite possibly degrading runtime performance. When `False` invalid inputs may silently render incorrect outputs. allow_nan_stats: Python `bool`, default `True`. When `True`, statistics (e.g., mean, mode, variance) use the value "`NaN`" to indicate the result is undefined. When `False`, an exception is raised if one or more of the statistic's batch members are undefined. parameters: Python `dict` of parameters used to instantiate this `Distribution`. graph_parents: Python `list` of graph prerequisites of this `Distribution`. name: Python `str` name prefixed to Ops created by this class. Default: subclass name. Raises: ValueError: if any member of graph_parents is `None` or not a `Tensor`. """ graph_parents = [] if graph_parents is None else graph_parents for i, t in enumerate(graph_parents): if t is None or not tensor_util.is_tensor(t): raise ValueError("Graph parent item %d is not a Tensor; %s." % (i, t)) if not name or name[-1] != "/": # `name` is not a name scope non_unique_name = name or type(self).__name__ with ops.name_scope(non_unique_name) as name: pass self._dtype = dtype self._reparameterization_type = reparameterization_type self._allow_nan_stats = allow_nan_stats self._validate_args = validate_args self._parameters = parameters or {} self._graph_parents = graph_parents self._name = name @property def _parameters(self): return self._parameter_dict @_parameters.setter def _parameters(self, value): """Intercept assignments to self._parameters to avoid reference cycles. Parameters are often created using locals(), so we need to clean out any references to `self` before assigning it to an attribute. Args: value: A dictionary of parameters to assign to the `_parameters` property. """ if "self" in value: del value["self"] self._parameter_dict = value @classmethod def param_shapes(cls, sample_shape, name="DistributionParamShapes"): """Shapes of parameters given the desired shape of a call to `sample()`. This is a class method that describes what key/value arguments are required to instantiate the given `Distribution` so that a particular shape is returned for that instance's call to `sample()`. Subclasses should override class method `_param_shapes`. Args: sample_shape: `Tensor` or python list/tuple. Desired shape of a call to `sample()`. name: name to prepend ops with. Returns: `dict` of parameter name to `Tensor` shapes. """ with ops.name_scope(name, values=[sample_shape]): return cls._param_shapes(sample_shape) @classmethod def param_static_shapes(cls, sample_shape): """param_shapes with static (i.e. `TensorShape`) shapes. This is a class method that describes what key/value arguments are required to instantiate the given `Distribution` so that a particular shape is returned for that instance's call to `sample()`. Assumes that the sample's shape is known statically. Subclasses should override class method `_param_shapes` to return constant-valued tensors when constant values are fed. Args: sample_shape: `TensorShape` or python list/tuple. Desired shape of a call to `sample()`. Returns: `dict` of parameter name to `TensorShape`. Raises: ValueError: if `sample_shape` is a `TensorShape` and is not fully defined. """ if isinstance(sample_shape, tensor_shape.TensorShape): if not sample_shape.is_fully_defined(): raise ValueError("TensorShape sample_shape must be fully defined") sample_shape = sample_shape.as_list() params = cls.param_shapes(sample_shape) static_params = {} for name, shape in params.items(): static_shape = tensor_util.constant_value(shape) if static_shape is None: raise ValueError( "sample_shape must be a fully-defined TensorShape or list/tuple") static_params[name] = tensor_shape.TensorShape(static_shape) return static_params @staticmethod def _param_shapes(sample_shape): raise NotImplementedError("_param_shapes not implemented") @property def name(self): """Name prepended to all ops created by this `Distribution`.""" return self._name @property def dtype(self): """The `DType` of `Tensor`s handled by this `Distribution`.""" return self._dtype @property def parameters(self): """Dictionary of parameters used to instantiate this `Distribution`.""" # Remove "self", "__class__", or other special variables. These can appear # if the subclass used: # `parameters = dict(locals())`. return {k: v for k, v in self._parameters.items() if not k.startswith("__") and k != "self"} @property def reparameterization_type(self): """Describes how samples from the distribution are reparameterized. Currently this is one of the static instances `distributions.FULLY_REPARAMETERIZED` or `distributions.NOT_REPARAMETERIZED`. Returns: An instance of `ReparameterizationType`. """ return self._reparameterization_type @property def allow_nan_stats(self): """Python `bool` describing behavior when a stat is undefined. Stats return +/- infinity when it makes sense. E.g., the variance of a Cauchy distribution is infinity. However, sometimes the statistic is undefined, e.g., if a distribution's pdf does not achieve a maximum within the support of the distribution, the mode is undefined. If the mean is undefined, then by definition the variance is undefined. E.g. the mean for Student's T for df = 1 is undefined (no clear way to say it is either + or - infinity), so the variance = E[(X - mean)**2] is also undefined. Returns: allow_nan_stats: Python `bool`. """ return self._allow_nan_stats @property def validate_args(self): """Python `bool` indicating possibly expensive checks are enabled.""" return self._validate_args def copy(self, **override_parameters_kwargs): """Creates a deep copy of the distribution. Note: the copy distribution may continue to depend on the original initialization arguments. Args: **override_parameters_kwargs: String/value dictionary of initialization arguments to override with new values. Returns: distribution: A new instance of `type(self)` initialized from the union of self.parameters and override_parameters_kwargs, i.e., `dict(self.parameters, **override_parameters_kwargs)`. """ parameters = dict(self.parameters, **override_parameters_kwargs) return type(self)(**parameters) def _batch_shape_tensor(self): raise NotImplementedError( "batch_shape_tensor is not implemented: {}".format(type(self).__name__)) def batch_shape_tensor(self, name="batch_shape_tensor"): """Shape of a single sample from a single event index as a 1-D `Tensor`. The batch dimensions are indexes into independent, non-identical parameterizations of this distribution. Args: name: name to give to the op Returns: batch_shape: `Tensor`. """ with self._name_scope(name): if self.batch_shape.is_fully_defined(): return ops.convert_to_tensor(self.batch_shape.as_list(), dtype=dtypes.int32, name="batch_shape") return self._batch_shape_tensor() def _batch_shape(self): return tensor_shape.TensorShape(None) @property def batch_shape(self): """Shape of a single sample from a single event index as a `TensorShape`. May be partially defined or unknown. The batch dimensions are indexes into independent, non-identical parameterizations of this distribution. Returns: batch_shape: `TensorShape`, possibly unknown. """ return tensor_shape.as_shape(self._batch_shape()) def _event_shape_tensor(self): raise NotImplementedError( "event_shape_tensor is not implemented: {}".format(type(self).__name__)) def event_shape_tensor(self, name="event_shape_tensor"): """Shape of a single sample from a single batch as a 1-D int32 `Tensor`. Args: name: name to give to the op Returns: event_shape: `Tensor`. """ with self._name_scope(name): if self.event_shape.is_fully_defined(): return ops.convert_to_tensor(self.event_shape.as_list(), dtype=dtypes.int32, name="event_shape") return self._event_shape_tensor() def _event_shape(self): return tensor_shape.TensorShape(None) @property def event_shape(self): """Shape of a single sample from a single batch as a `TensorShape`. May be partially defined or unknown. Returns: event_shape: `TensorShape`, possibly unknown. """ return tensor_shape.as_shape(self._event_shape()) def is_scalar_event(self, name="is_scalar_event"): """Indicates that `event_shape == []`. Args: name: Python `str` prepended to names of ops created by this function. Returns: is_scalar_event: `bool` scalar `Tensor`. """ with self._name_scope(name): return ops.convert_to_tensor( self._is_scalar_helper(self.event_shape, self.event_shape_tensor), name="is_scalar_event") def is_scalar_batch(self, name="is_scalar_batch"): """Indicates that `batch_shape == []`. Args: name: Python `str` prepended to names of ops created by this function. Returns: is_scalar_batch: `bool` scalar `Tensor`. """ with self._name_scope(name): return ops.convert_to_tensor( self._is_scalar_helper(self.batch_shape, self.batch_shape_tensor), name="is_scalar_batch") def _sample_n(self, n, seed=None): raise NotImplementedError("sample_n is not implemented: {}".format( type(self).__name__)) def _call_sample_n(self, sample_shape, seed, name, **kwargs): with self._name_scope(name, values=[sample_shape]): sample_shape = ops.convert_to_tensor( sample_shape, dtype=dtypes.int32, name="sample_shape") sample_shape, n = self._expand_sample_shape_to_vector( sample_shape, "sample_shape") samples = self._sample_n(n, seed, **kwargs) batch_event_shape = array_ops.shape(samples)[1:] final_shape = array_ops.concat([sample_shape, batch_event_shape], 0) samples = array_ops.reshape(samples, final_shape) samples = self._set_sample_static_shape(samples, sample_shape) return samples def sample(self, sample_shape=(), seed=None, name="sample"): """Generate samples of the specified shape. Note that a call to `sample()` without arguments will generate a single sample. Args: sample_shape: 0D or 1D `int32` `Tensor`. Shape of the generated samples. seed: Python integer seed for RNG name: name to give to the op. Returns: samples: a `Tensor` with prepended dimensions `sample_shape`. """ return self._call_sample_n(sample_shape, seed, name) def _log_prob(self, value): raise NotImplementedError("log_prob is not implemented: {}".format( type(self).__name__)) def _call_log_prob(self, value, name, **kwargs): with self._name_scope(name, values=[value]): value = _convert_to_tensor( value, name="value", preferred_dtype=self.dtype) try: return self._log_prob(value, **kwargs) except NotImplementedError as original_exception: try: return math_ops.log(self._prob(value, **kwargs)) except NotImplementedError: raise original_exception def log_prob(self, value, name="log_prob"): """Log probability density/mass function. Args: value: `float` or `double` `Tensor`. name: Python `str` prepended to names of ops created by this function. Returns: log_prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type `self.dtype`. """ return self._call_log_prob(value, name) def _prob(self, value): raise NotImplementedError("prob is not implemented: {}".format( type(self).__name__)) def _call_prob(self, value, name, **kwargs): with self._name_scope(name, values=[value]): value = _convert_to_tensor( value, name="value", preferred_dtype=self.dtype) try: return self._prob(value, **kwargs) except NotImplementedError as original_exception: try: return math_ops.exp(self._log_prob(value, **kwargs)) except NotImplementedError: raise original_exception def prob(self, value, name="prob"): """Probability density/mass function. Args: value: `float` or `double` `Tensor`. name: Python `str` prepended to names of ops created by this function. Returns: prob: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type `self.dtype`. """ return self._call_prob(value, name) def _log_cdf(self, value): raise NotImplementedError("log_cdf is not implemented: {}".format( type(self).__name__)) def _call_log_cdf(self, value, name, **kwargs): with self._name_scope(name, values=[value]): value = _convert_to_tensor( value, name="value", preferred_dtype=self.dtype) try: return self._log_cdf(value, **kwargs) except NotImplementedError as original_exception: try: return math_ops.log(self._cdf(value, **kwargs)) except NotImplementedError: raise original_exception def log_cdf(self, value, name="log_cdf"): """Log cumulative distribution function. Given random variable `X`, the cumulative distribution function `cdf` is: ```none log_cdf(x) := Log[ P[X <= x] ] ``` Often, a numerical approximation can be used for `log_cdf(x)` that yields a more accurate answer than simply taking the logarithm of the `cdf` when `x << -1`. Args: value: `float` or `double` `Tensor`. name: Python `str` prepended to names of ops created by this function. Returns: logcdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type `self.dtype`. """ return self._call_log_cdf(value, name) def _cdf(self, value): raise NotImplementedError("cdf is not implemented: {}".format( type(self).__name__)) def _call_cdf(self, value, name, **kwargs): with self._name_scope(name, values=[value]): value = _convert_to_tensor( value, name="value", preferred_dtype=self.dtype) try: return self._cdf(value, **kwargs) except NotImplementedError as original_exception: try: return math_ops.exp(self._log_cdf(value, **kwargs)) except NotImplementedError: raise original_exception def cdf(self, value, name="cdf"): """Cumulative distribution function. Given random variable `X`, the cumulative distribution function `cdf` is: ```none cdf(x) := P[X <= x] ``` Args: value: `float` or `double` `Tensor`. name: Python `str` prepended to names of ops created by this function. Returns: cdf: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type `self.dtype`. """ return self._call_cdf(value, name) def _log_survival_function(self, value): raise NotImplementedError( "log_survival_function is not implemented: {}".format( type(self).__name__)) def _call_log_survival_function(self, value, name, **kwargs): with self._name_scope(name, values=[value]): value = _convert_to_tensor( value, name="value", preferred_dtype=self.dtype) try: return self._log_survival_function(value, **kwargs) except NotImplementedError as original_exception: try: return math_ops.log1p(-self.cdf(value, **kwargs)) except NotImplementedError: raise original_exception def log_survival_function(self, value, name="log_survival_function"): """Log survival function. Given random variable `X`, the survival function is defined: ```none log_survival_function(x) = Log[ P[X > x] ] = Log[ 1 - P[X <= x] ] = Log[ 1 - cdf(x) ] ``` Typically, different numerical approximations can be used for the log survival function, which are more accurate than `1 - cdf(x)` when `x >> 1`. Args: value: `float` or `double` `Tensor`. name: Python `str` prepended to names of ops created by this function. Returns: `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type `self.dtype`. """ return self._call_log_survival_function(value, name) def _survival_function(self, value): raise NotImplementedError("survival_function is not implemented: {}".format( type(self).__name__)) def _call_survival_function(self, value, name, **kwargs): with self._name_scope(name, values=[value]): value = _convert_to_tensor( value, name="value", preferred_dtype=self.dtype) try: return self._survival_function(value, **kwargs) except NotImplementedError as original_exception: try: return 1. - self.cdf(value, **kwargs) except NotImplementedError: raise original_exception def survival_function(self, value, name="survival_function"): """Survival function. Given random variable `X`, the survival function is defined: ```none survival_function(x) = P[X > x] = 1 - P[X <= x] = 1 - cdf(x). ``` Args: value: `float` or `double` `Tensor`. name: Python `str` prepended to names of ops created by this function. Returns: `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type `self.dtype`. """ return self._call_survival_function(value, name) def _entropy(self): raise NotImplementedError("entropy is not implemented: {}".format( type(self).__name__)) def entropy(self, name="entropy"): """Shannon entropy in nats.""" with self._name_scope(name): return self._entropy() def _mean(self): raise NotImplementedError("mean is not implemented: {}".format( type(self).__name__)) def mean(self, name="mean"): """Mean.""" with self._name_scope(name): return self._mean() def _quantile(self, value): raise NotImplementedError("quantile is not implemented: {}".format( type(self).__name__)) def _call_quantile(self, value, name, **kwargs): with self._name_scope(name, values=[value]): value = _convert_to_tensor( value, name="value", preferred_dtype=self.dtype) return self._quantile(value, **kwargs) def quantile(self, value, name="quantile"): """Quantile function. Aka "inverse cdf" or "percent point function". Given random variable `X` and `p in [0, 1]`, the `quantile` is: ```none quantile(p) := x such that P[X <= x] == p ``` Args: value: `float` or `double` `Tensor`. name: Python `str` prepended to names of ops created by this function. Returns: quantile: a `Tensor` of shape `sample_shape(x) + self.batch_shape` with values of type `self.dtype`. """ return self._call_quantile(value, name) def _variance(self): raise NotImplementedError("variance is not implemented: {}".format( type(self).__name__)) def variance(self, name="variance"): """Variance. Variance is defined as, ```none Var = E[(X - E[X])**2] ``` where `X` is the random variable associated with this distribution, `E` denotes expectation, and `Var.shape = batch_shape + event_shape`. Args: name: Python `str` prepended to names of ops created by this function. Returns: variance: Floating-point `Tensor` with shape identical to `batch_shape + event_shape`, i.e., the same shape as `self.mean()`. """ with self._name_scope(name): try: return self._variance() except NotImplementedError as original_exception: try: return math_ops.square(self._stddev()) except NotImplementedError: raise original_exception def _stddev(self): raise NotImplementedError("stddev is not implemented: {}".format( type(self).__name__)) def stddev(self, name="stddev"): """Standard deviation. Standard deviation is defined as, ```none stddev = E[(X - E[X])**2]**0.5 ``` where `X` is the random variable associated with this distribution, `E` denotes expectation, and `stddev.shape = batch_shape + event_shape`. Args: name: Python `str` prepended to names of ops created by this function. Returns: stddev: Floating-point `Tensor` with shape identical to `batch_shape + event_shape`, i.e., the same shape as `self.mean()`. """ with self._name_scope(name): try: return self._stddev() except NotImplementedError as original_exception: try: return math_ops.sqrt(self._variance()) except NotImplementedError: raise original_exception def _covariance(self): raise NotImplementedError("covariance is not implemented: {}".format( type(self).__name__)) def covariance(self, name="covariance"): """Covariance. Covariance is (possibly) defined only for non-scalar-event distributions. For example, for a length-`k`, vector-valued distribution, it is calculated as, ```none Cov[i, j] = Covariance(X_i, X_j) = E[(X_i - E[X_i]) (X_j - E[X_j])] ``` where `Cov` is a (batch of) `k x k` matrix, `0 <= (i, j) < k`, and `E` denotes expectation. Alternatively, for non-vector, multivariate distributions (e.g., matrix-valued, Wishart), `Covariance` shall return a (batch of) matrices under some vectorization of the events, i.e., ```none Cov[i, j] = Covariance(Vec(X)_i, Vec(X)_j) = [as above] ``` where `Cov` is a (batch of) `k' x k'` matrices, `0 <= (i, j) < k' = reduce_prod(event_shape)`, and `Vec` is some function mapping indices of this distribution's event dimensions to indices of a length-`k'` vector. Args: name: Python `str` prepended to names of ops created by this function. Returns: covariance: Floating-point `Tensor` with shape `[B1, ..., Bn, k', k']` where the first `n` dimensions are batch coordinates and `k' = reduce_prod(self.event_shape)`. """ with self._name_scope(name): return self._covariance() def _mode(self): raise NotImplementedError("mode is not implemented: {}".format( type(self).__name__)) def mode(self, name="mode"): """Mode.""" with self._name_scope(name): return self._mode() def _cross_entropy(self, other): return kullback_leibler.cross_entropy( self, other, allow_nan_stats=self.allow_nan_stats) def cross_entropy(self, other, name="cross_entropy"): """Computes the (Shannon) cross entropy. Denote this distribution (`self`) by `P` and the `other` distribution by `Q`. Assuming `P, Q` are absolutely continuous with respect to one another and permit densities `p(x) dr(x)` and `q(x) dr(x)`, (Shanon) cross entropy is defined as: ```none H[P, Q] = E_p[-log q(X)] = -int_F p(x) log q(x) dr(x) ``` where `F` denotes the support of the random variable `X ~ P`. Args: other: `tfp.distributions.Distribution` instance. name: Python `str` prepended to names of ops created by this function. Returns: cross_entropy: `self.dtype` `Tensor` with shape `[B1, ..., Bn]` representing `n` different calculations of (Shanon) cross entropy. """ with self._name_scope(name): return self._cross_entropy(other) def _kl_divergence(self, other): return kullback_leibler.kl_divergence( self, other, allow_nan_stats=self.allow_nan_stats) def kl_divergence(self, other, name="kl_divergence"): """Computes the Kullback--Leibler divergence. Denote this distribution (`self`) by `p` and the `other` distribution by `q`. Assuming `p, q` are absolutely continuous with respect to reference measure `r`, the KL divergence is defined as: ```none KL[p, q] = E_p[log(p(X)/q(X))] = -int_F p(x) log q(x) dr(x) + int_F p(x) log p(x) dr(x) = H[p, q] - H[p] ``` where `F` denotes the support of the random variable `X ~ p`, `H[., .]` denotes (Shanon) cross entropy, and `H[.]` denotes (Shanon) entropy. Args: other: `tfp.distributions.Distribution` instance. name: Python `str` prepended to names of ops created by this function. Returns: kl_divergence: `self.dtype` `Tensor` with shape `[B1, ..., Bn]` representing `n` different calculations of the Kullback-Leibler divergence. """ with self._name_scope(name): return self._kl_divergence(other) def __str__(self): return ("tfp.distributions.{type_name}(" "\"{self_name}\"" "{maybe_batch_shape}" "{maybe_event_shape}" ", dtype={dtype})".format( type_name=type(self).__name__, self_name=self.name, maybe_batch_shape=(", batch_shape={}".format(self.batch_shape) if self.batch_shape.ndims is not None else ""), maybe_event_shape=(", event_shape={}".format(self.event_shape) if self.event_shape.ndims is not None else ""), dtype=self.dtype.name)) def __repr__(self): return ("<tfp.distributions.{type_name} " "'{self_name}'" " batch_shape={batch_shape}" " event_shape={event_shape}" " dtype={dtype}>".format( type_name=type(self).__name__, self_name=self.name, batch_shape=self.batch_shape, event_shape=self.event_shape, dtype=self.dtype.name)) @contextlib.contextmanager def _name_scope(self, name=None, values=None): """Helper function to standardize op scope.""" with ops.name_scope(self.name): with ops.name_scope(name, values=( ([] if values is None else values) + self._graph_parents)) as scope: yield scope def _expand_sample_shape_to_vector(self, x, name): """Helper to `sample` which ensures input is 1D.""" x_static_val = tensor_util.constant_value(x) if x_static_val is None: prod = math_ops.reduce_prod(x) else: prod = np.prod(x_static_val, dtype=x.dtype.as_numpy_dtype()) ndims = x.get_shape().ndims # != sample_ndims if ndims is None: # Maybe expand_dims. ndims = array_ops.rank(x) expanded_shape = util.pick_vector( math_ops.equal(ndims, 0), np.array([1], dtype=np.int32), array_ops.shape(x)) x = array_ops.reshape(x, expanded_shape) elif ndims == 0: # Definitely expand_dims. if x_static_val is not None: x = ops.convert_to_tensor( np.array([x_static_val], dtype=x.dtype.as_numpy_dtype()), name=name) else: x = array_ops.reshape(x, [1]) elif ndims != 1: raise ValueError("Input is neither scalar nor vector.") return x, prod def _set_sample_static_shape(self, x, sample_shape): """Helper to `sample`; sets static shape info.""" # Set shape hints. sample_shape = tensor_shape.TensorShape( tensor_util.constant_value(sample_shape)) ndims = x.get_shape().ndims sample_ndims = sample_shape.ndims batch_ndims = self.batch_shape.ndims event_ndims = self.event_shape.ndims # Infer rank(x). if (ndims is None and sample_ndims is not None and batch_ndims is not None and event_ndims is not None): ndims = sample_ndims + batch_ndims + event_ndims x.set_shape([None] * ndims) # Infer sample shape. if ndims is not None and sample_ndims is not None: shape = sample_shape.concatenate([None]*(ndims - sample_ndims)) x.set_shape(x.get_shape().merge_with(shape)) # Infer event shape. if ndims is not None and event_ndims is not None: shape = tensor_shape.TensorShape( [None]*(ndims - event_ndims)).concatenate(self.event_shape) x.set_shape(x.get_shape().merge_with(shape)) # Infer batch shape. if batch_ndims is not None: if ndims is not None: if sample_ndims is None and event_ndims is not None: sample_ndims = ndims - batch_ndims - event_ndims elif event_ndims is None and sample_ndims is not None: event_ndims = ndims - batch_ndims - sample_ndims if sample_ndims is not None and event_ndims is not None: shape = tensor_shape.TensorShape([None]*sample_ndims).concatenate( self.batch_shape).concatenate([None]*event_ndims) x.set_shape(x.get_shape().merge_with(shape)) return x def _is_scalar_helper(self, static_shape, dynamic_shape_fn): """Implementation for `is_scalar_batch` and `is_scalar_event`.""" if static_shape.ndims is not None: return static_shape.ndims == 0 shape = dynamic_shape_fn() if (shape.get_shape().ndims is not None and shape.get_shape().dims[0].value is not None): # If the static_shape is correctly written then we should never execute # this branch. We keep it just in case there's some unimagined corner # case. return shape.get_shape().as_list() == [0] return math_ops.equal(array_ops.shape(shape)[0], 0)
apache-2.0
mihu/redis-rdb-tools
tests/parser_tests.py
1
15247
import unittest import os import math from rdbtools import RdbCallback, RdbParser class RedisParserTestCase(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_empty_rdb(self): r = load_rdb('empty_database.rdb') self.assert_('start_rdb' in r.methods_called) self.assert_('end_rdb' in r.methods_called) self.assertEquals(len(r.databases), 0, msg = "didn't expect any databases") def test_multiple_databases(self): r = load_rdb('multiple_databases.rdb') self.assert_(len(r.databases), 2) self.assert_(1 not in r.databases) self.assertEquals(r.databases[0]["key_in_zeroth_database"], "zero") self.assertEquals(r.databases[2]["key_in_second_database"], "second") def test_keys_with_expiry(self): r = load_rdb('keys_with_expiry.rdb') expiry = r.expiry[0]['expires_ms_precision'] self.assertEquals(expiry.year, 2022) self.assertEquals(expiry.month, 12) self.assertEquals(expiry.day, 25) self.assertEquals(expiry.hour, 10) self.assertEquals(expiry.minute, 11) self.assertEquals(expiry.second, 12) self.assertEquals(expiry.microsecond, 573000) def test_integer_keys(self): r = load_rdb('integer_keys.rdb') self.assertEquals(r.databases[0][125], "Positive 8 bit integer") self.assertEquals(r.databases[0][0xABAB], "Positive 16 bit integer") self.assertEquals(r.databases[0][0x0AEDD325], "Positive 32 bit integer") def test_negative_integer_keys(self): r = load_rdb('integer_keys.rdb') self.assertEquals(r.databases[0][-123], "Negative 8 bit integer") self.assertEquals(r.databases[0][-0x7325], "Negative 16 bit integer") self.assertEquals(r.databases[0][-0x0AEDD325], "Negative 32 bit integer") def test_string_key_with_compression(self): r = load_rdb('easily_compressible_string_key.rdb') key = "".join('a' for x in range(0, 200)) value = "Key that redis should compress easily" self.assertEquals(r.databases[0][key], value) def test_zipmap_thats_compresses_easily(self): r = load_rdb('zipmap_that_compresses_easily.rdb') self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["a"], "aa") self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["aa"], "aaaa") self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["aaaaa"], "aaaaaaaaaaaaaa") def test_zipmap_that_doesnt_compress(self): r = load_rdb('zipmap_that_doesnt_compress.rdb') self.assertEquals(r.databases[0]["zimap_doesnt_compress"]["MKD1G6"], 2) self.assertEquals(r.databases[0]["zimap_doesnt_compress"]["YNNXK"], "F7TI") def test_zipmap_with_big_values(self): ''' See issue https://github.com/sripathikrishnan/redis-rdb-tools/issues/2 Values with length around 253/254/255 bytes are treated specially in the parser This test exercises those boundary conditions In order to test a bug with large ziplists, it is necessary to start Redis with "hash-max-ziplist-value 21000", create this rdb file, and run the test. That forces the 20kbyte value to be stored as a ziplist with a length encoding of 5 bytes. ''' r = load_rdb('zipmap_with_big_values.rdb') self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["253bytes"]), 253) self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["254bytes"]), 254) self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["255bytes"]), 255) self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["300bytes"]), 300) self.assertEquals(len(r.databases[0]["zipmap_with_big_values"]["20kbytes"]), 20000) def test_hash_as_ziplist(self): '''In redis dump version = 4, hashmaps are stored as ziplists''' r = load_rdb('hash_as_ziplist.rdb') self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["a"], "aa") self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["aa"], "aaaa") self.assertEquals(r.databases[0]["zipmap_compresses_easily"]["aaaaa"], "aaaaaaaaaaaaaa") def test_dictionary(self): r = load_rdb('dictionary.rdb') self.assertEquals(r.lengths[0]["force_dictionary"], 1000) self.assertEquals(r.databases[0]["force_dictionary"]["ZMU5WEJDG7KU89AOG5LJT6K7HMNB3DEI43M6EYTJ83VRJ6XNXQ"], "T63SOS8DQJF0Q0VJEZ0D1IQFCYTIPSBOUIAI9SB0OV57MQR1FI") self.assertEquals(r.databases[0]["force_dictionary"]["UHS5ESW4HLK8XOGTM39IK1SJEUGVV9WOPK6JYA5QBZSJU84491"], "6VULTCV52FXJ8MGVSFTZVAGK2JXZMGQ5F8OVJI0X6GEDDR27RZ") def test_ziplist_that_compresses_easily(self): r = load_rdb('ziplist_that_compresses_easily.rdb') self.assertEquals(r.lengths[0]["ziplist_compresses_easily"], 6) for idx, length in enumerate([6, 12, 18, 24, 30, 36]) : self.assertEquals(("".join("a" for x in xrange(length))), r.databases[0]["ziplist_compresses_easily"][idx]) def test_ziplist_that_doesnt_compress(self): r = load_rdb('ziplist_that_doesnt_compress.rdb') self.assertEquals(r.lengths[0]["ziplist_doesnt_compress"], 2) self.assert_("aj2410" in r.databases[0]["ziplist_doesnt_compress"]) self.assert_("cc953a17a8e096e76a44169ad3f9ac87c5f8248a403274416179aa9fbd852344" in r.databases[0]["ziplist_doesnt_compress"]) def test_ziplist_with_integers(self): r = load_rdb('ziplist_with_integers.rdb') expected_numbers = [] for x in range(0,13): expected_numbers.append(x) expected_numbers += [-2, 13, 25, -61, 63, 16380, -16000, 65535, -65523, 4194304, 0x7fffffffffffffff] self.assertEquals(r.lengths[0]["ziplist_with_integers"], len(expected_numbers)) for num in expected_numbers : self.assert_(num in r.databases[0]["ziplist_with_integers"], "Cannot find %d" % num) def test_linkedlist(self): r = load_rdb('linkedlist.rdb') self.assertEquals(r.lengths[0]["force_linkedlist"], 1000) self.assert_("JYY4GIFI0ETHKP4VAJF5333082J4R1UPNPLE329YT0EYPGHSJQ" in r.databases[0]["force_linkedlist"]) self.assert_("TKBXHJOX9Q99ICF4V78XTCA2Y1UYW6ERL35JCIL1O0KSGXS58S" in r.databases[0]["force_linkedlist"]) def test_intset_16(self): r = load_rdb('intset_16.rdb') self.assertEquals(r.lengths[0]["intset_16"], 3) for num in (0x7ffe, 0x7ffd, 0x7ffc) : self.assert_(num in r.databases[0]["intset_16"]) def test_intset_32(self): r = load_rdb('intset_32.rdb') self.assertEquals(r.lengths[0]["intset_32"], 3) for num in (0x7ffefffe, 0x7ffefffd, 0x7ffefffc) : self.assert_(num in r.databases[0]["intset_32"]) def test_intset_64(self): r = load_rdb('intset_64.rdb') self.assertEquals(r.lengths[0]["intset_64"], 3) for num in (0x7ffefffefffefffe, 0x7ffefffefffefffd, 0x7ffefffefffefffc) : self.assert_(num in r.databases[0]["intset_64"]) def test_regular_set(self): r = load_rdb('regular_set.rdb') self.assertEquals(r.lengths[0]["regular_set"], 6) for member in ("alpha", "beta", "gamma", "delta", "phi", "kappa") : self.assert_(member in r.databases[0]["regular_set"], msg=('%s missing' % member)) def test_sorted_set_as_ziplist(self): r = load_rdb('sorted_set_as_ziplist.rdb') self.assertEquals(r.lengths[0]["sorted_set_as_ziplist"], 3) zset = r.databases[0]["sorted_set_as_ziplist"] self.assert_(floateq(zset['8b6ba6718a786daefa69438148361901'], 1)) self.assert_(floateq(zset['cb7a24bb7528f934b841b34c3a73e0c7'], 2.37)) self.assert_(floateq(zset['523af537946b79c4f8369ed39ba78605'], 3.423)) def test_filtering_by_keys(self): r = load_rdb('parser_filters.rdb', filters={"keys":"k[0-9]"}) self.assertEquals(r.databases[0]['k1'], "ssssssss") self.assertEquals(r.databases[0]['k3'], "wwwwwwww") self.assertEquals(len(r.databases[0]), 2) def test_filtering_by_type(self): r = load_rdb('parser_filters.rdb', filters={"types":["sortedset"]}) self.assert_('z1' in r.databases[0]) self.assert_('z2' in r.databases[0]) self.assert_('z3' in r.databases[0]) self.assert_('z4' in r.databases[0]) self.assertEquals(len(r.databases[0]), 4) def test_filtering_by_database(self): r = load_rdb('multiple_databases.rdb', filters={"dbs":[2]}) self.assert_('key_in_zeroth_database' not in r.databases[0]) self.assert_('key_in_second_database' in r.databases[2]) self.assertEquals(len(r.databases[0]), 0) self.assertEquals(len(r.databases[2]), 1) def test_rdb_version_5_with_checksum(self): r = load_rdb('rdb_version_5_with_checksum.rdb') self.assertEquals(r.databases[0]['abcd'], 'efgh') self.assertEquals(r.databases[0]['foo'], 'bar') self.assertEquals(r.databases[0]['bar'], 'baz') self.assertEquals(r.databases[0]['abcdef'], 'abcdef') self.assertEquals(r.databases[0]['longerstring'], 'thisisalongerstring.idontknowwhatitmeans') def test_multiple_databases_stream(self): r = load_rdb_stream('multiple_databases.rdb') self.assert_(len(r.databases), 2) self.assert_(1 not in r.databases) self.assertEquals(r.databases[0]["key_in_zeroth_database"], "zero") self.assertEquals(r.databases[2]["key_in_second_database"], "second") def floateq(f1, f2) : return math.fabs(f1 - f2) < 0.00001 def load_rdb(file_name, filters=None) : r = MockRedis() parser = RdbParser(r, filters) parser.parse(os.path.join(os.path.dirname(__file__), 'dumps', file_name)) return r def load_rdb_stream(file_name, filters=None) : r = MockRedis() parser = RdbParser(r, filters) parser.parse_fd(open(os.path.join(os.path.dirname(__file__), 'dumps', file_name), 'rb')) return r class MockRedis(RdbCallback): def __init__(self) : self.databases = {} self.lengths = {} self.expiry = {} self.methods_called = [] self.dbnum = 0 def currentdb(self) : return self.databases[self.dbnum] def store_expiry(self, key, expiry) : self.expiry[self.dbnum][key] = expiry def store_length(self, key, length) : if not self.dbnum in self.lengths : self.lengths[self.dbnum] = {} self.lengths[self.dbnum][key] = length def get_length(self, key) : if not key in self.lengths[self.dbnum] : raise Exception('Key %s does not have a length' % key) return self.lengths[self.dbnum][key] def start_rdb(self): self.methods_called.append('start_rdb') def start_database(self, dbnum): self.dbnum = dbnum self.databases[dbnum] = {} self.expiry[dbnum] = {} self.lengths[dbnum] = {} def set(self, key, value, expiry, info): self.currentdb()[key] = value if expiry : self.store_expiry(key, expiry) def start_hash(self, key, length, expiry, info): if key in self.currentdb() : raise Exception('start_hash called with key %s that already exists' % key) else : self.currentdb()[key] = {} if expiry : self.store_expiry(key, expiry) self.store_length(key, length) def hset(self, key, field, value): if not key in self.currentdb() : raise Exception('start_hash not called for key = %s', key) self.currentdb()[key][field] = value def end_hash(self, key): if not key in self.currentdb() : raise Exception('start_hash not called for key = %s', key) if len(self.currentdb()[key]) != self.lengths[self.dbnum][key] : raise Exception('Lengths mismatch on hash %s, expected length = %d, actual = %d' % (key, self.lengths[self.dbnum][key], len(self.currentdb()[key]))) def start_set(self, key, cardinality, expiry, info): if key in self.currentdb() : raise Exception('start_set called with key %s that already exists' % key) else : self.currentdb()[key] = [] if expiry : self.store_expiry(key, expiry) self.store_length(key, cardinality) def sadd(self, key, member): if not key in self.currentdb() : raise Exception('start_set not called for key = %s', key) self.currentdb()[key].append(member) def end_set(self, key): if not key in self.currentdb() : raise Exception('start_set not called for key = %s', key) if len(self.currentdb()[key]) != self.lengths[self.dbnum][key] : raise Exception('Lengths mismatch on set %s, expected length = %d, actual = %d' % (key, self.lengths[self.dbnum][key], len(self.currentdb()[key]))) def start_list(self, key, expiry, info): if key in self.currentdb() : raise Exception('start_list called with key %s that already exists' % key) else : self.currentdb()[key] = [] if expiry : self.store_expiry(key, expiry) def rpush(self, key, value) : if not key in self.currentdb() : raise Exception('start_list not called for key = %s', key) self.currentdb()[key].append(value) def end_list(self, key, info): if not key in self.currentdb() : raise Exception('start_set not called for key = %s', key) self.store_length(key, len(self.currentdb()[key])) def start_sorted_set(self, key, length, expiry, info): if key in self.currentdb() : raise Exception('start_sorted_set called with key %s that already exists' % key) else : self.currentdb()[key] = {} if expiry : self.store_expiry(key, expiry) self.store_length(key, length) def zadd(self, key, score, member): if not key in self.currentdb() : raise Exception('start_sorted_set not called for key = %s', key) self.currentdb()[key][member] = score def end_sorted_set(self, key): if not key in self.currentdb() : raise Exception('start_set not called for key = %s', key) if len(self.currentdb()[key]) != self.lengths[self.dbnum][key] : raise Exception('Lengths mismatch on sortedset %s, expected length = %d, actual = %d' % (key, self.lengths[self.dbnum][key], len(self.currentdb()[key]))) def end_database(self, dbnum): if self.dbnum != dbnum : raise Exception('start_database called with %d, but end_database called %d instead' % (self.dbnum, dbnum)) def end_rdb(self): self.methods_called.append('end_rdb')
mit