gt stringclasses 1 value | context stringlengths 2.49k 119k |
|---|---|
# -*- test-case-name: nevow.test.test_testutil -*-
# Copyright (c) 2004-2010 Divmod.
# See LICENSE for details.
import os, sys, signal
#from subprocess import PIPE, Popen
from zope.interface import implements
try:
import subunit
except ImportError:
subunit = None
from twisted.python.log import msg
from twisted.trial.unittest import TestCase as TrialTestCase
from twisted.python.components import Componentized
from twisted.internet import defer
from twisted.web import http
from twisted.protocols.basic import LineReceiver
from formless import iformless
from nevow import inevow, context, athena, loaders, tags, appserver
from nevow.jsutil import findJavascriptInterpreter, generateTestScript
class FakeChannel:
def __init__(self, site):
self.site = site
class FakeSite:
pass
class FakeSession(Componentized):
implements(inevow.ISession)
def __init__(self, avatar):
Componentized.__init__(self)
self.avatar = avatar
self.uid = 12345
def getLoggedInRoot(self):
return self.avatar
fs = FakeSession(None)
class FakeRequest(Componentized):
"""
Implementation of L{inevow.IRequest} which is convenient to use in unit
tests.
@ivar lastModified: The value passed to L{setLastModified} or C{None} if
that method has not been called.
@type accumulator: C{str}
@ivar accumulator: The bytes written to the response body.
@type deferred: L{Deferred}
@ivar deferred: The deferred which represents rendering of the response
to this request. This is basically an implementation detail of
L{NevowRequest}. Application code should probably never use this.
@ivar _appRootURL: C{None} or the object passed to L{rememberRootURL}.
"""
implements(inevow.IRequest)
fields = None
failure = None
context = None
redirected_to = None
lastModified = None
content = ""
method = 'GET'
code = http.OK
deferred = None
accumulator = ''
_appRootURL = None
def __init__(self, headers=None, args=None, avatar=None,
uri='/', currentSegments=None, cookies=None,
user="", password="", isSecure=False):
"""
Create a FakeRequest instance.
@param headers: dict of request headers
@param args: dict of args
@param avatar: avatar to pass to the FakeSession instance
@param uri: request URI
@param currentSegments: list of segments that have "already been located"
@param cookies: dict of cookies
@param user: username (like in http auth)
@param password: password (like in http auth)
@param isSecure: whether this request represents an HTTPS url
"""
Componentized.__init__(self)
self.uri = uri
if not uri.startswith('/'):
raise ValueError('uri must be relative with absolute path')
self.path = uri
self.prepath = []
postpath = uri.split('?')[0]
assert postpath.startswith('/')
self.postpath = postpath[1:].split('/')
if currentSegments is not None:
for seg in currentSegments:
assert seg == self.postpath[0]
self.prepath.append(self.postpath.pop(0))
else:
self.prepath.append('')
self.headers = {}
self.args = args or {}
self.sess = FakeSession(avatar)
self.site = FakeSite()
self.received_headers = {}
if headers:
for k, v in headers.iteritems():
self.received_headers[k.lower()] = v
if cookies is not None:
self.cookies = cookies
else:
self.cookies = {}
self.user = user
self.password = password
self.secure = isSecure
self.deferred = defer.Deferred()
def URLPath(self):
from nevow import url
return url.URL.fromString('')
def getSession(self):
return self.sess
def registerProducer(self, producer, streaming):
"""
Synchronously cause the given producer to produce all of its data.
This will not work with push producers. Do not use it with them.
"""
keepGoing = [None]
self.unregisterProducer = keepGoing.pop
while keepGoing:
producer.resumeProducing()
del self.unregisterProducer
def v():
def get(self):
return self.accumulator
return get,
v = property(*v())
def write(self, bytes):
"""
Accumulate the given bytes as part of the response body.
@type bytes: C{str}
"""
self.accumulator += bytes
finished = False
def finishRequest(self, success):
self.finished = True
def finish(self):
self.deferred.callback('')
def getHeader(self, key):
return self.received_headers.get(key.lower())
def setHeader(self, key, val):
self.headers[key.lower()] = val
def redirect(self, url):
self.redirected_to = url
def processingFailed(self, f):
self.failure = f
def setResponseCode(self, code):
self.code = code
def setLastModified(self, when):
self.lastModified = when
def prePathURL(self):
"""
The absolute URL up until the last handled segment of this request.
@rtype: C{str}.
"""
return 'http://%s/%s' % (self.getHeader('host') or 'localhost',
'/'.join(self.prepath))
def getClientIP(self):
return '127.0.0.1'
def addCookie(self, k, v, expires=None, domain=None, path=None, max_age=None, comment=None, secure=None):
"""
Set a cookie for use in subsequent requests.
"""
self.cookies[k] = v
def getCookie(self, k):
"""
Fetch a cookie previously set.
"""
return self.cookies.get(k)
def getUser(self):
"""
Returns the HTTP auth username.
"""
return self.user
def getPassword(self):
"""
Returns the HTTP auth password.
"""
return self.password
def getRootURL(self):
"""
Return the previously remembered URL.
"""
return self._appRootURL
def rememberRootURL(self, url=None):
"""
For compatibility with appserver.NevowRequest.
"""
if url is None:
raise NotImplementedError(
"Default URL remembering logic is not implemented.")
self._appRootURL = url
def isSecure(self):
"""
Returns whether this is an HTTPS request or not.
"""
return self.secure
class TestCase(TrialTestCase):
hasBools = (sys.version_info >= (2,3))
_assertions = 0
# This should be migrated to Twisted.
def failUnlessSubstring(self, containee, container, msg=None):
self._assertions += 1
if container.find(containee) == -1:
self.fail(msg or "%r not in %r" % (containee, container))
def failIfSubstring(self, containee, container, msg=None):
self._assertions += 1
if container.find(containee) != -1:
self.fail(msg or "%r in %r" % (containee, container))
assertSubstring = failUnlessSubstring
assertNotSubstring = failIfSubstring
def assertNotIdentical(self, first, second, msg=None):
self._assertions += 1
if first is second:
self.fail(msg or '%r is %r' % (first, second))
def failIfIn(self, containee, container, msg=None):
self._assertions += 1
if containee in container:
self.fail(msg or "%r in %r" % (containee, container))
def assertApproximates(self, first, second, tolerance, msg=None):
self._assertions += 1
if abs(first - second) > tolerance:
self.fail(msg or "%s ~== %s" % (first, second))
if not hasattr(TrialTestCase, 'mktemp'):
def mktemp(self):
import tempfile
return tempfile.mktemp()
TestCase.mktemp = mktemp
class AccumulatingFakeRequest(FakeRequest):
"""
I am a fake IRequest that is also a stub implementation of
IFormDefaults.
This class is named I{accumulating} for historical reasons only. You
probably want to ignore this and use L{FakeRequest} instead.
"""
implements(iformless.IFormDefaults)
def __init__(self, *a, **kw):
FakeRequest.__init__(self, *a, **kw)
self.d = defer.Deferred()
def getDefault(self, key, context):
return ''
def remember(self, object, interface):
pass
class FragmentWrapper(athena.LivePage):
"""
I wrap myself around an Athena fragment, providing a minimal amount of html
scaffolding in addition to an L{athena.LivePage}.
"""
docFactory = loaders.stan(
tags.html[
tags.body[
tags.directive('fragment')]])
def __init__(self, f):
super(FragmentWrapper, self).__init__()
self.f = f
def render_fragment(self, ctx, data):
self.f.setFragmentParent(self)
return self.f
def renderLivePage(res, topLevelContext=context.WebContext,
reqFactory=FakeRequest):
"""
Render the given LivePage resource, performing LivePage-specific cleanup.
Return a Deferred which fires when it has rendered.
"""
D = renderPage(res, topLevelContext, reqFactory)
return D.addCallback(lambda x: (res._messageDeliverer.close(), x)[1])
def renderPage(res, topLevelContext=context.WebContext,
reqFactory=FakeRequest):
"""
Render the given resource. Return a Deferred which fires when it has
rendered.
"""
req = reqFactory()
ctx = topLevelContext(tag=res)
ctx.remember(req, inevow.IRequest)
render = appserver.NevowRequest(None, True).gotPageContext
result = render(ctx)
result.addCallback(lambda x: req.accumulator)
return result
class NotSupported(Exception):
"""
Raised by L{JavaScriptTestCase} if the installation lacks a certain
required feature.
"""
class TestProtocolLineReceiverServer(LineReceiver):
"""
Subunit protocol which is also a Twisted LineReceiver so that it
includes line buffering logic.
"""
delimiter = '\n'
def __init__(self, proto):
self.proto = proto
def lineReceived(self, line):
"""
Forward the line on to the subunit protocol's lineReceived method,
which expects it to be newline terminated.
"""
self.proto.lineReceived(line + '\n')
class JavaScriptTestCase(TrialTestCase):
def __init__(self, methodName='runTest'):
TrialTestCase.__init__(self, methodName)
self.testMethod = getattr(self, methodName)
def findJavascriptInterpreter(self):
"""
@see: L{nevow.jsutil.findJavascriptInterpreter}
"""
return findJavascriptInterpreter()
def checkDependencies(self):
"""
Check that all the dependencies of the test are satisfied.
@raise NotSupported: If any one of the dependencies is not satisfied.
"""
js = self.findJavascriptInterpreter()
if js is None:
raise NotSupported("Could not find JavaScript interpreter")
if subunit is None:
raise NotSupported("Could not import 'subunit'")
for name in ['WEXITSTATUS', 'WIFSIGNALED' ,'WTERMSIG']:
if getattr(os, name, None) is None:
raise NotSupported("os.%s unavailable" % (name,))
def _writeToTemp(self, contents):
fname = self.mktemp()
fd = file(fname, 'w')
try:
fd.write(contents)
finally:
fd.close()
return fname
def createSource(self, testModule):
"""
Return a string of JavaScript source code which, when executed, will
run the JavaScript unit tests defined in the given module.
@type testModule: C{str}
@param testModule: The JavaScript module name which contains the
tests to run.
@rtype: C{str}
"""
js = """
// import Divmod.UnitTest
// import %(module)s
Divmod.UnitTest.runRemote(Divmod.UnitTest.loadFromModule(%(module)s));
""" % {'module': testModule}
return js
def makeScript(self, testModule):
"""
Write JavaScript source for executing the JavaScript unit tests in
the given JavaScript module to a file and return the name of that
file.
@type testModule: C{str}
@param testModule: The JavaScript module name which contains the
tests to run.
@rtype: C{str}
"""
jsfile = self._writeToTemp(self.createSource(testModule))
scriptFile = self._writeToTemp(generateTestScript(jsfile))
return scriptFile
def _runWithSigchild(self, f, *a, **kw):
"""
Run the given function with an alternative SIGCHLD handler.
"""
oldHandler = signal.signal(signal.SIGCHLD, signal.SIG_DFL)
try:
return f(*a, **kw)
finally:
signal.signal(signal.SIGCHLD, oldHandler)
def run(self, result):
try:
self.checkDependencies()
except NotSupported, e:
result.startTest(self)
result.addSkip(self, str(e))
result.stopTest(self)
return
js = self.findJavascriptInterpreter()
try:
script = self.makeScript(self.testMethod())
except KeyError:
result.addError(self, sys.exc_info())
return
server = subunit.TestProtocolServer(result)
protocol = TestProtocolLineReceiverServer(server)
# What this *SHOULD BE*
# spawnProcess(protocol, js, (script,))
# return protocol.someDisconnectCallback()
# However, *run cannot return a Deferred profanity profanity profanity
# profanity*, so instead it is *profanity* this:
def run():
argv = [js, script]
msg("Running JavaScript interpreter, argv = %r" % (argv,))
child = Popen(argv, stdout=PIPE)
while True:
bytes = child.stdout.read(4096)
if bytes:
protocol.dataReceived(bytes)
else:
break
returnCode = child.wait()
if returnCode < 0:
result.addError(
self,
(Exception,
"JavaScript interpreter exited due to signal %d" % (-returnCode,),
None))
elif returnCode:
result.addError(
self,
(Exception,
"JavaScript interpreter had error exit: %d" % (returnCode,),
None))
self._runWithSigchild(run)
def setJavascriptInterpreterOrSkip(testCase):
"""
If we're unable to find a javascript interpreter (currently we only look
for smjs or js) then set the C{skip} attribute on C{testCase}. Otherwise
assign the path to the interpreter executable to
C{testCase.javascriptInterpreter}
"""
script = findJavascriptInterpreter()
if script is None:
testCase.skip = "No JavaScript interpreter available."
else:
testCase.javascriptInterpreter = script
class CSSModuleTestMixin:
"""
Mixin for L{unittest.TestCase} subclasses which are testing the Athena's
CSS module functionality.
"""
def _makeCSSRegistry(self):
"""
Make a CSS registry with some modules in it.
"""
def makeModule(contents=None):
fname = self.mktemp()
f = file(fname, 'w')
if contents is not None:
f.write(contents)
f.close()
return fname
return athena.CSSRegistry(
{u'TestCSSModuleDependencies': makeModule(),
u'TestCSSModuleDependencies.Dependor': makeModule(
'// import TestCSSModuleDependencies.Dependee\n'),
u'TestCSSModuleDependencies.Dependee': makeModule()})
| |
"""
DeepMind Control Suite Wrapper directly sourced from:
https://github.com/denisyarats/dmc2gym
MIT License
Copyright (c) 2020 Denis Yarats
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from gym import core, spaces
try:
from dm_env import specs
except ImportError:
specs = None
try:
from dm_control import suite
except ImportError:
suite = None
import numpy as np
def _spec_to_box(spec):
def extract_min_max(s):
assert s.dtype == np.float64 or s.dtype == np.float32
dim = np.int(np.prod(s.shape))
if type(s) == specs.Array:
bound = np.inf * np.ones(dim, dtype=np.float32)
return -bound, bound
elif type(s) == specs.BoundedArray:
zeros = np.zeros(dim, dtype=np.float32)
return s.minimum + zeros, s.maximum + zeros
mins, maxs = [], []
for s in spec:
mn, mx = extract_min_max(s)
mins.append(mn)
maxs.append(mx)
low = np.concatenate(mins, axis=0)
high = np.concatenate(maxs, axis=0)
assert low.shape == high.shape
return spaces.Box(low, high, dtype=np.float32)
def _flatten_obs(obs):
obs_pieces = []
for v in obs.values():
flat = np.array([v]) if np.isscalar(v) else v.ravel()
obs_pieces.append(flat)
return np.concatenate(obs_pieces, axis=0)
class DMCEnv(core.Env):
def __init__(self,
domain_name,
task_name,
task_kwargs=None,
visualize_reward=False,
from_pixels=False,
height=64,
width=64,
camera_id=0,
frame_skip=2,
environment_kwargs=None,
channels_first=True,
preprocess=True):
self._from_pixels = from_pixels
self._height = height
self._width = width
self._camera_id = camera_id
self._frame_skip = frame_skip
self._channels_first = channels_first
self.preprocess = preprocess
if specs is None:
raise RuntimeError((
"The `specs` module from `dm_env` was not imported. Make sure "
"`dm_env` is installed and visible in the current python "
"environment."))
if suite is None:
raise RuntimeError(
("The `suite` module from `dm_control` was not imported. Make "
"sure `dm_control` is installed and visible in the current "
"python enviornment."))
# create task
self._env = suite.load(
domain_name=domain_name,
task_name=task_name,
task_kwargs=task_kwargs,
visualize_reward=visualize_reward,
environment_kwargs=environment_kwargs)
# true and normalized action spaces
self._true_action_space = _spec_to_box([self._env.action_spec()])
self._norm_action_space = spaces.Box(
low=-1.0,
high=1.0,
shape=self._true_action_space.shape,
dtype=np.float32)
# create observation space
if from_pixels:
shape = [3, height,
width] if channels_first else [height, width, 3]
self._observation_space = spaces.Box(
low=0, high=255, shape=shape, dtype=np.uint8)
if preprocess:
self._observation_space = spaces.Box(
low=-0.5, high=0.5, shape=shape, dtype=np.float32)
else:
self._observation_space = _spec_to_box(
self._env.observation_spec().values())
self._state_space = _spec_to_box(self._env.observation_spec().values())
self.current_state = None
def __getattr__(self, name):
return getattr(self._env, name)
def _get_obs(self, time_step):
if self._from_pixels:
obs = self.render(
height=self._height,
width=self._width,
camera_id=self._camera_id)
if self._channels_first:
obs = obs.transpose(2, 0, 1).copy()
if self.preprocess:
obs = obs / 255.0 - 0.5
else:
obs = _flatten_obs(time_step.observation)
return obs
def _convert_action(self, action):
action = action.astype(np.float64)
true_delta = self._true_action_space.high - self._true_action_space.low
norm_delta = self._norm_action_space.high - self._norm_action_space.low
action = (action - self._norm_action_space.low) / norm_delta
action = action * true_delta + self._true_action_space.low
action = action.astype(np.float32)
return action
@property
def observation_space(self):
return self._observation_space
@property
def state_space(self):
return self._state_space
@property
def action_space(self):
return self._norm_action_space
def step(self, action):
assert self._norm_action_space.contains(action)
action = self._convert_action(action)
assert self._true_action_space.contains(action)
reward = 0
extra = {"internal_state": self._env.physics.get_state().copy()}
for _ in range(self._frame_skip):
time_step = self._env.step(action)
reward += time_step.reward or 0
done = time_step.last()
if done:
break
obs = self._get_obs(time_step)
self.current_state = _flatten_obs(time_step.observation)
extra["discount"] = time_step.discount
return obs, reward, done, extra
def reset(self):
time_step = self._env.reset()
self.current_state = _flatten_obs(time_step.observation)
obs = self._get_obs(time_step)
return obs
def render(self, mode="rgb_array", height=None, width=None, camera_id=0):
assert mode == "rgb_array", "only support for rgb_array mode"
height = height or self._height
width = width or self._width
camera_id = camera_id or self._camera_id
return self._env.physics.render(
height=height, width=width, camera_id=camera_id)
| |
# -*- test-case-name: foolscap.test.test_pb -*-
import re
if False:
import sys
from twisted.python import log
log.startLogging(sys.stderr)
from twisted.python import failure, reflect
from twisted.internet import defer
from twisted.internet.interfaces import IAddress
from twisted.trial import unittest
from foolscap import referenceable
from foolscap.tokens import BananaError, Violation, INT, STRING, OPEN
from foolscap.tokens import BananaFailure
from foolscap import broker, call
from foolscap.constraint import IConstraint
from foolscap.logging import log
from foolscap.api import Tub
from foolscap.util import allocate_tcp_port
from foolscap.test.common import HelperTarget, TargetMixin, \
Target, TargetWithoutInterfaces, MakeTubsMixin
from foolscap.eventual import fireEventually, flushEventualQueue
class TestRequest(call.PendingRequest):
def __init__(self, reqID, rref=None):
self.answers = []
call.PendingRequest.__init__(self, reqID, rref, None, None)
def complete(self, res):
self.answers.append((True, res))
def fail(self, why):
self.answers.append((False, why))
class NullTransport:
def write(self, data):
pass
def loseConnection(self, why=None):
pass
class TestReferenceUnslicer(unittest.TestCase):
# OPEN(reference), INT(refid), [STR(interfacename), INT(version)]... CLOSE
def setUp(self):
self.broker = broker.Broker(None)
self.broker.transport = NullTransport()
self.broker.connectionMade()
def tearDown(self):
return flushEventualQueue()
def newUnslicer(self):
unslicer = referenceable.ReferenceUnslicer()
unslicer.broker = self.broker
unslicer.opener = self.broker.rootUnslicer
return unslicer
def testReject(self):
u = self.newUnslicer()
self.failUnlessRaises(BananaError, u.checkToken, STRING, 10)
u = self.newUnslicer()
self.failUnlessRaises(BananaError, u.checkToken, OPEN, 0)
def testNoInterfaces(self):
u = self.newUnslicer()
u.checkToken(INT, 0)
u.receiveChild(12)
rr1,rr1d = u.receiveClose()
self.failUnless(rr1d is None)
rr2 = self.broker.getTrackerForYourReference(12).getRef()
self.failUnless(rr2)
self.failUnless(isinstance(rr2, referenceable.RemoteReference))
self.failUnlessEqual(rr2.tracker.broker, self.broker)
self.failUnlessEqual(rr2.tracker.clid, 12)
self.failUnlessEqual(rr2.tracker.interfaceName, None)
def testInterfaces(self):
u = self.newUnslicer()
u.checkToken(INT, 0)
u.receiveChild(12)
u.receiveChild("IBar")
rr1,rr1d = u.receiveClose()
self.failUnless(rr1d is None)
rr2 = self.broker.getTrackerForYourReference(12).getRef()
self.failUnless(rr2)
self.failUnlessIdentical(rr1, rr2)
self.failUnless(isinstance(rr2, referenceable.RemoteReference))
self.failUnlessEqual(rr2.tracker.broker, self.broker)
self.failUnlessEqual(rr2.tracker.clid, 12)
self.failUnlessEqual(rr2.tracker.interfaceName, "IBar")
class TestAnswer(unittest.TestCase):
# OPEN(answer), INT(reqID), [answer], CLOSE
def setUp(self):
self.broker = broker.Broker(None)
self.broker.transport = NullTransport()
self.broker.connectionMade()
def tearDown(self):
return flushEventualQueue()
def newUnslicer(self):
unslicer = call.AnswerUnslicer()
unslicer.broker = self.broker
unslicer.opener = self.broker.rootUnslicer
unslicer.protocol = self.broker
return unslicer
def testAccept1(self):
req = TestRequest(12)
self.broker.addRequest(req)
u = self.newUnslicer()
u.start(0)
u.checkToken(INT, 0)
u.receiveChild(12) # causes broker.getRequest
u.checkToken(STRING, 8)
u.receiveChild("results")
self.failIf(req.answers)
u.receiveClose() # causes broker.gotAnswer
self.failUnlessEqual(req.answers, [(True, "results")])
def testAccept2(self):
req = TestRequest(12)
req.setConstraint(IConstraint(str))
self.broker.addRequest(req)
u = self.newUnslicer()
u.start(0)
u.checkToken(INT, 0)
u.receiveChild(12) # causes broker.getRequest
u.checkToken(STRING, 15)
u.receiveChild("results")
self.failIf(req.answers)
u.receiveClose() # causes broker.gotAnswer
self.failUnlessEqual(req.answers, [(True, "results")])
def testReject1(self):
# answer a non-existent request
req = TestRequest(12)
self.broker.addRequest(req)
u = self.newUnslicer()
u.checkToken(INT, 0)
self.failUnlessRaises(Violation, u.receiveChild, 13)
def testReject2(self):
# answer a request with a result that violates the constraint
req = TestRequest(12)
req.setConstraint(IConstraint(int))
self.broker.addRequest(req)
u = self.newUnslicer()
u.checkToken(INT, 0)
u.receiveChild(12)
self.failUnlessRaises(Violation, u.checkToken, STRING, 42)
# this does not yet errback the request
self.failIf(req.answers)
# it gets errbacked when banana reports the violation
v = Violation("icky")
v.setLocation("here")
u.reportViolation(BananaFailure(v))
self.failUnlessEqual(len(req.answers), 1)
err = req.answers[0]
self.failIf(err[0])
f = err[1]
self.failUnless(f.check(Violation))
class TestReferenceable(TargetMixin, unittest.TestCase):
# test how a Referenceable gets transformed into a RemoteReference as it
# crosses the wire, then verify that it gets transformed back into the
# original Referenceable when it comes back. Also test how shared
# references to the same object are handled.
def setUp(self):
TargetMixin.setUp(self)
self.setupBrokers()
if 0:
print
self.callingBroker.doLog = "TX"
self.targetBroker.doLog = " rx"
def send(self, arg):
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("set", obj=arg)
d.addCallback(self.failUnless)
d.addCallback(lambda res: target.obj)
return d
def send2(self, arg1, arg2):
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("set2", obj1=arg1, obj2=arg2)
d.addCallback(self.failUnless)
d.addCallback(lambda res: (target.obj1, target.obj2))
return d
def echo(self, arg):
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("echo", obj=arg)
return d
def testRef1(self):
# Referenceables turn into RemoteReferences
r = Target()
d = self.send(r)
d.addCallback(self._testRef1_1, r)
return d
def _testRef1_1(self, res, r):
self.failUnless(isinstance(res, referenceable.RemoteReference))
rref = res
self.failUnless(isinstance(rref.getPeer(), broker.LoopbackAddress))
self.failUnlessEqual(rref.isConnected(), True)
self.failUnlessEqual(rref.getLocationHints(), []) # loopback
self.failUnlessEqual(rref.getSturdyRef().getURL(), None)
# keepalives are disabled
self.failUnlessEqual(rref.getDataLastReceivedAt(), None)
t = rref.tracker
self.failUnlessEqual(t.broker, self.targetBroker)
self.failUnless(type(t.clid) is int)
self.failUnless(self.callingBroker.getMyReferenceByCLID(t.clid) is r)
self.failUnlessEqual(t.interfaceName, 'RIMyTarget')
def testRef2(self):
# sending a Referenceable over the wire multiple times should result
# in equivalent RemoteReferences
r = Target()
d = self.send(r)
d.addCallback(self._testRef2_1, r)
return d
def _testRef2_1(self, res1, r):
d = self.send(r)
d.addCallback(self._testRef2_2, res1)
return d
def _testRef2_2(self, res2, res1):
self.failUnless(res1 == res2)
self.failUnless(res1 is res2) # newpb does this, oldpb didn't
def testRef3(self):
# sending the same Referenceable in multiple arguments should result
# in equivalent RRs
r = Target()
d = self.send2(r, r)
d.addCallback(self._testRef3_1)
return d
def _testRef3_1(self, (res1, res2)):
self.failUnless(res1 == res2)
self.failUnless(res1 is res2)
def testRef4(self):
# sending the same Referenceable in multiple calls will result in
# equivalent RRs
r = Target()
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("set", obj=r)
d.addCallback(self._testRef4_1, rr, r, target)
return d
def _testRef4_1(self, res, rr, r, target):
res1 = target.obj
d = rr.callRemote("set", obj=r)
d.addCallback(self._testRef4_2, target, res1)
return d
def _testRef4_2(self, res, target, res1):
res2 = target.obj
self.failUnless(res1 == res2)
self.failUnless(res1 is res2)
def testRef5(self):
# those RemoteReferences can be used to invoke methods on the sender.
# 'r' lives on side A. The anonymous target lives on side B. From
# side A we invoke B.set(r), and we get the matching RemoteReference
# 'rr' which lives on side B. Then we use 'rr' to invoke r.getName
# from side A.
r = Target()
r.name = "ernie"
d = self.send(r)
d.addCallback(lambda rr: rr.callRemote("getName"))
d.addCallback(self.failUnlessEqual, "ernie")
return d
def testRef6(self):
# Referenceables survive round-trips
r = Target()
d = self.echo(r)
d.addCallback(self.failUnlessIdentical, r)
return d
## def NOTtestRemoteRef1(self):
## # known URLRemoteReferences turn into Referenceables
## root = Target()
## rr, target = self.setupTarget(HelperTarget())
## self.targetBroker.factory = pb.PBServerFactory(root)
## urlRRef = self.callingBroker.remoteReferenceForName("", [])
## # urlRRef points at root
## d = rr.callRemote("set", obj=urlRRef)
## self.failUnless(dr(d))
## self.failUnlessIdentical(target.obj, root)
## def NOTtestRemoteRef2(self):
## # unknown URLRemoteReferences are errors
## root = Target()
## rr, target = self.setupTarget(HelperTarget())
## self.targetBroker.factory = pb.PBServerFactory(root)
## urlRRef = self.callingBroker.remoteReferenceForName("bogus", [])
## # urlRRef points at nothing
## d = rr.callRemote("set", obj=urlRRef)
## f = de(d)
## #print f
## #self.failUnlessEqual(f.type, tokens.Violation)
## self.failUnlessEqual(type(f.value), str)
## self.failUnless(f.value.find("unknown clid 'bogus'") != -1)
def testArgs1(self):
# sending the same non-Referenceable object in multiple calls results
# in distinct objects, because the serialization scope is bounded by
# each method call
r = [1,2]
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("set", obj=r)
d.addCallback(self._testArgs1_1, rr, r, target)
# TODO: also make sure the original list goes out of scope once the
# method call has finished, to guard against a leaky
# reference-tracking implementation.
return d
def _testArgs1_1(self, res, rr, r, target):
res1 = target.obj
d = rr.callRemote("set", obj=r)
d.addCallback(self._testArgs1_2, target, res1)
return d
def _testArgs1_2(self, res, target, res1):
res2 = target.obj
self.failUnless(res1 == res2)
self.failIf(res1 is res2)
def testArgs2(self):
# but sending them as multiple arguments of the *same* method call
# results in identical objects
r = [1,2]
rr, target = self.setupTarget(HelperTarget())
d = rr.callRemote("set2", obj1=r, obj2=r)
d.addCallback(self._testArgs2_1, rr, target)
return d
def _testArgs2_1(self, res, rr, target):
self.failUnlessIdentical(target.obj1, target.obj2)
def testAnswer1(self):
# also, shared objects in a return value should be shared
r = [1,2]
rr, target = self.setupTarget(HelperTarget())
target.obj = (r,r)
d = rr.callRemote("get")
d.addCallback(lambda res: self.failUnlessIdentical(res[0], res[1]))
return d
def testAnswer2(self):
# but objects returned by separate method calls should be distinct
rr, target = self.setupTarget(HelperTarget())
r = [1,2]
target.obj = r
d = rr.callRemote("get")
d.addCallback(self._testAnswer2_1, rr, target)
return d
def _testAnswer2_1(self, res1, rr, target):
d = rr.callRemote("get")
d.addCallback(self._testAnswer2_2, res1)
return d
def _testAnswer2_2(self, res2, res1):
self.failUnless(res1 == res2)
self.failIf(res1 is res2)
class TestFactory(unittest.TestCase):
def setUp(self):
self.client = None
self.server = None
def gotReference(self, ref):
self.client = ref
def tearDown(self):
if self.client:
self.client.broker.transport.loseConnection()
if self.server:
d = self.server.stopListening()
else:
d = defer.succeed(None)
d.addCallback(flushEventualQueue)
return d
class TestCallable(MakeTubsMixin, unittest.TestCase):
def setUp(self):
self.tubA, self.tubB = self.makeTubs(2)
self._log_observers_to_remove = []
def addLogObserver(self, observer):
log.theLogger.addObserver(observer)
self._log_observers_to_remove.append(observer)
def tearDown(self):
for lo in self._log_observers_to_remove:
log.theLogger.removeObserver(lo)
d = defer.DeferredList([s.stopService() for s in self.services])
d.addCallback(flushEventualQueue)
return d
def testWrongSwiss(self):
target = Target()
url = self.tubB.registerReference(target)
badurl = url + "_wrong"
swiss = url[url.rindex("/")+1:]
d = self.tubA.getReference(badurl)
def _check(f):
self.failIf(swiss in str(f), "swissnum revealed")
self.failUnless(swiss[:2] in str(f), "swissnum hint not given")
d.addErrback(_check)
return d
def testGetSturdyRef(self):
target = Target()
url = self.tubB.registerReference(target)
d = self.tubA.getReference(url)
def _check(rref):
sr = rref.getSturdyRef()
self.failUnlessEqual(sr.getURL(), url)
peer = rref.getPeer()
self.failUnless(IAddress.providedBy(peer))
self.failUnlessEqual(peer.type, "TCP")
self.failUnlessEqual(peer.host, "127.0.0.1")
self.failUnlessEqual(rref.getRemoteTubID(), self.tubB.getTubID())
self.failUnlessEqual(rref.isConnected(), True)
self.failUnlessEqual(rref.getLocationHints(),
['127.0.0.1:%d' % self.tub_ports[1]])
d.addCallback(_check)
return d
def testLogLocalFailure(self):
self.tubB.setOption("logLocalFailures", True)
target = Target()
logs = []
self.addLogObserver(logs.append)
url = self.tubB.registerReference(target)
d = self.tubA.getReference(url)
d.addCallback(lambda rref: rref.callRemote("fail"))
# this will cause some text to be logged with log.msg. TODO: capture
# this text and look at it more closely.
def _check(res):
self.failUnless(isinstance(res, failure.Failure))
res.trap(ValueError)
messages = [log.format_message(e) for e in logs]
failures = [e['failure'] for e in logs if "failure" in e]
text = "\n".join(messages)
msg = ("an inbound callRemote that we [%s] executed (on behalf of "
"someone else, TubID %s) failed\n"
% (self.tubB.getShortTubID(), self.tubA.getShortTubID()))
self.failUnless(msg in text,
"msg '%s' not in text '%s'" % (msg, text))
self.failUnless("\n reqID=2, rref=<foolscap.test.common.Target object at "
in text)
self.failUnless(", methname=RIMyTarget.fail\n" in text)
self.failUnless("\n args=[]\n" in text)
self.failUnless("\n kwargs={}\n" in text)
self.failUnless("\n the LOCAL failure was:" in text)
self.failUnlessEqual(len(failures), 1)
f = failures[0]
self.failUnless(isinstance(f, failure.Failure))
self.failUnless(isinstance(f, call.CopiedFailure))
self.failUnless("Traceback (most recent call last):\n"
in str(f))
self.failUnless("\nexceptions.ValueError: you asked me to fail\n"
in str(f))
d.addBoth(_check)
return d
testLogLocalFailure.timeout = 2
def testLogRemoteFailure(self):
self.tubA.setOption("logRemoteFailures", True)
target = Target()
logs = []
self.addLogObserver(logs.append)
url = self.tubB.registerReference(target)
d = self.tubA.getReference(url)
d.addCallback(lambda rref: rref.callRemote("fail"))
# this will cause some text to be logged with log.msg. Capture this
# text and look at it more closely. Log events are sent through an
# eventual-send, so we need the fireEventually() call to give the
# event a chance to be put into the list.
d.addBoth(fireEventually)
def _check(res):
self.failUnless(isinstance(res, failure.Failure))
res.trap(ValueError)
messages = [log.format_message(e) for e in logs]
failures = [e['failure'] for e in logs if "failure" in e]
text = "\n".join(messages)
msg = ("an outbound callRemote (that we [%s] sent to someone "
"else [%s]) failed on the far end\n"
% (self.tubA.getShortTubID(), self.tubB.getShortTubID()))
self.failUnless(msg in text)
self.failUnless("\n reqID=2, rref=<RemoteReference at "
in text)
self.failUnless((" [%s]>, methname=RIMyTarget.fail\n" % url)
in text)
#self.failUnless("\n args=[]\n" in text) # TODO: log these too
#self.failUnless("\n kwargs={}\n" in text)
self.failUnlessEqual(len(failures), 1)
f = failures[0]
self.failUnless("Traceback (most recent call last):\n"
in str(f))
self.failUnless("\nexceptions.ValueError: you asked me to fail\n"
in str(f))
d.addBoth(_check)
return d
testLogRemoteFailure.timeout = 2
def testBoundMethod(self):
target = Target()
meth_url = self.tubB.registerReference(target.remote_add)
d = self.tubA.getReference(meth_url)
d.addCallback(self._testBoundMethod_1)
return d
testBoundMethod.timeout = 5
def _testBoundMethod_1(self, ref):
self.failUnless(isinstance(ref, referenceable.RemoteMethodReference))
#self.failUnlessEqual(ref.getSchemaName(),
# RIMyTarget.__remote_name__ + "/remote_add")
d = ref.callRemote(a=1, b=2)
d.addCallback(lambda res: self.failUnlessEqual(res, 3))
return d
def testFunction(self):
l = []
# we need a keyword arg here
def append(what):
l.append(what)
func_url = self.tubB.registerReference(append)
d = self.tubA.getReference(func_url)
d.addCallback(self._testFunction_1, l)
return d
testFunction.timeout = 5
def _testFunction_1(self, ref, l):
self.failUnless(isinstance(ref, referenceable.RemoteMethodReference))
d = ref.callRemote(what=12)
d.addCallback(lambda res: self.failUnlessEqual(l, [12]))
return d
class TestService(unittest.TestCase):
def setUp(self):
self.services = [Tub()]
self.services[0].startService()
def tearDown(self):
d = defer.DeferredList([s.stopService() for s in self.services])
d.addCallback(flushEventualQueue)
return d
def testRegister(self):
s = self.services[0]
portnum = allocate_tcp_port()
s.listenOn("tcp:%d:interface=127.0.0.1" % portnum)
s.setLocation("127.0.0.1:%d" % portnum)
t1 = Target()
public_url = s.registerReference(t1, "target")
self.failUnless(public_url.startswith("pb://"))
self.failUnless(public_url.endswith("@127.0.0.1:%d/target" % portnum))
self.failUnlessEqual(s.registerReference(t1, "target"), public_url)
self.failUnlessIdentical(s.getReferenceForURL(public_url), t1)
t2 = Target()
private_url = s.registerReference(t2)
self.failUnlessEqual(s.registerReference(t2), private_url)
self.failUnlessIdentical(s.getReferenceForURL(private_url), t2)
s.unregisterURL(public_url)
self.failUnlessRaises(KeyError, s.getReferenceForURL, public_url)
s.unregisterReference(t2)
self.failUnlessRaises(KeyError, s.getReferenceForURL, private_url)
# TODO: check what happens when you register the same referenceable
# under multiple URLs
def getRef(self, target):
self.services.append(Tub())
s1 = self.services[0]
s2 = self.services[1]
s2.startService()
portnum = allocate_tcp_port()
s1.listenOn("tcp:%d:interface=127.0.0.1" % portnum)
s1.setLocation("127.0.0.1:%d" % portnum)
public_url = s1.registerReference(target, "target")
self.public_url = public_url
d = s2.getReference(public_url)
return d
def testConnect1(self):
t1 = TargetWithoutInterfaces()
d = self.getRef(t1)
d.addCallback(lambda ref: ref.callRemote('add', a=2, b=3))
d.addCallback(self._testConnect1, t1)
return d
testConnect1.timeout = 5
def _testConnect1(self, res, t1):
self.failUnlessEqual(t1.calls, [(2,3)])
self.failUnlessEqual(res, 5)
def testConnect2(self):
t1 = Target()
d = self.getRef(t1)
d.addCallback(lambda ref: ref.callRemote('add', a=2, b=3))
d.addCallback(self._testConnect2, t1)
return d
testConnect2.timeout = 5
def _testConnect2(self, res, t1):
self.failUnlessEqual(t1.calls, [(2,3)])
self.failUnlessEqual(res, 5)
def testConnect3(self):
# test that we can get the reference multiple times
t1 = Target()
d = self.getRef(t1)
d.addCallback(lambda ref: ref.callRemote('add', a=2, b=3))
def _check(res):
self.failUnlessEqual(t1.calls, [(2,3)])
self.failUnlessEqual(res, 5)
t1.calls = []
d.addCallback(_check)
d.addCallback(lambda res:
self.services[1].getReference(self.public_url))
d.addCallback(lambda ref: ref.callRemote('add', a=5, b=6))
def _check2(res):
self.failUnlessEqual(t1.calls, [(5,6)])
self.failUnlessEqual(res, 11)
d.addCallback(_check2)
return d
testConnect3.timeout = 5
def TODO_testStatic(self):
# make sure we can register static data too, at least hashable ones
t1 = (1,2,3)
d = self.getRef(t1)
d.addCallback(lambda ref: self.failUnlessEqual(ref, (1,2,3)))
return d
#testStatic.timeout = 2
def testBadMethod(self):
t1 = Target()
d = self.getRef(t1)
d.addCallback(lambda ref: ref.callRemote('missing', a=2, b=3))
d.addCallbacks(self._testBadMethod_cb, self._testBadMethod_eb)
return d
testBadMethod.timeout = 5
def _testBadMethod_cb(self, res):
self.fail("method wasn't supposed to work")
def _testBadMethod_eb(self, f):
#self.failUnlessEqual(f.type, 'foolscap.tokens.Violation')
self.failUnlessEqual(f.type, Violation)
self.failUnless(re.search(r'RIMyTarget\(.*\) does not offer missing',
str(f)))
def testBadMethod2(self):
t1 = TargetWithoutInterfaces()
d = self.getRef(t1)
d.addCallback(lambda ref: ref.callRemote('missing', a=2, b=3))
d.addCallbacks(self._testBadMethod_cb, self._testBadMethod2_eb)
return d
testBadMethod2.timeout = 5
def _testBadMethod2_eb(self, f):
self.failUnlessEqual(reflect.qual(f.type), 'exceptions.AttributeError')
self.failUnlessSubstring("TargetWithoutInterfaces", f.value)
self.failUnlessSubstring(" has no attribute 'remote_missing'", f.value)
# TODO:
# when the Violation is remote, it is reported in a CopiedFailure, which
# means f.type is a string. When it is local, it is reported in a Failure,
# and f.type is the tokens.Violation class. I'm not sure how I feel about
# these being different.
# TODO: tests to port from oldpb suite
# testTooManyRefs: sending pb.MAX_BROKER_REFS across the wire should die
# testFactoryCopy?
# tests which aren't relevant right now but which might be once we port the
# corresponding functionality:
#
# testObserve, testCache (pb.Cacheable)
# testViewPoint
# testPublishable (spread.publish??)
# SpreadUtilTestCase (spread.util)
# NewCredTestCase
# tests which aren't relevant and aren't like to ever be
#
# PagingTestCase
# ConnectionTestCase (oldcred)
# NSPTestCase
| |
import collections
import networkx
import numpy
import os
import pandas
import pysam
import re
import scipy.stats
from grocsvs import step
from grocsvs import structuralvariants
from grocsvs import utilities
from grocsvs.stages import assembly
from grocsvs.stages import cluster_svs
from grocsvs.stages import call_readclouds
BAM_CMATCH = 0
BAM_CINS = 1
BAM_CDEL = 2
BAM_CREF_SKIP = 3
BAM_CSOFT_CLIP = 4
BAM_CHARD_CLIP = 5
class WalkAssembliesStep(step.StepChunk):
@staticmethod
def get_steps(options):
yield WalkAssembliesStep(options)
def __init__(self, options):
self.options = options
def __str__(self):
return ".".join([self.__class__.__name__])
def outpaths(self, final):
directory = self.results_dir if final \
else self.working_dir
walk_assemblies = "walk_assemblies.tsv"
graphs = "walk_assemblies.graphs"
paths = {
"walk_assemblies": os.path.join(directory, walk_assemblies),
"graphs": os.path.join(directory, graphs)
}
return paths
def run(self):
edges_path = cluster_svs.ClusterSVsStep(self.options).outpaths(final=True)["edges"]
clusters = pandas.read_table(edges_path)
clusters["chromx"] = clusters["chromx"].astype("string")
clusters["chromy"] = clusters["chromy"].astype("string")
assembled = []
utilities.ensure_dir(self.outpaths(final=False)["graphs"])
for cluster_number, cluster in clusters.groupby("cluster"):
self.logger.log(cluster_number)
try:
cur_assembled = self.walk(cluster_number, cluster)
assembled.append(cur_assembled)
except IOError:
print "not found", cluster_number
# TODO: deal with empty list
# TODO: normalize coordinates according to reference.compare_chroms()
assembled = pandas.concat(assembled, ignore_index=True)
assembled["x"] = assembled["x"].astype(int)
assembled["y"] = assembled["y"].astype(int)
print self.options.reference.chroms
print assembled["chromx"].unique()
print assembled["chromy"].unique()
outpath = self.outpaths(final=False)["walk_assemblies"]
assembled.to_csv(outpath, sep="\t", index=False)
def walk(self, event_name, cluster):
assembly_directory = assembly.AssemblyStep(self.options, event_name)\
.outpaths(final=True)["assembly_dir"]
bam, contigs = self.get_contigs(assembly_directory)
cluster = cluster.loc[cluster["kind"]=="breakpoint"]
cluster = cluster.sort_values("p")
chains = set()
for i, event in cluster.iterrows():
if event["x"] < 0 or event["y"] < 0:
continue
# print "starting from:\n", event
chain = get_chain(bam, event["chromx"], event["x"], contigs)
# print "chain:", chain
chains.update(chain)
# print "starting from:\n", event
chain = get_chain(bam, event["chromy"], event["y"], contigs)
# print "chain:", chain
chains.update(chain)
# self.analyze_chains(chains, event_name)
cur_edges = pandas.DataFrame(
list(chains), columns=["chromx", "x", "orientationx", "chromy", "y", "orientationy", "contig"])
cur_edges = cur_edges.loc[(cur_edges["chromx"].isin(self.options.reference.chroms)) &
(cur_edges["chromy"].isin(self.options.reference.chroms))]
return cur_edges
def _barcodes_for_breakpoint(self, chromx, x, orientationx,
chromy, y, orientationy, dist1, dist2):
# TODO: refactor to re-use same version as cluster_svs
fragsx, fragsy, merged = structuralvariants.get_supporting_fragments_new(
self.options, self.sample, self.dataset,
chromx, x, chromy, y,
orientationx+orientationy, dist1, dist2)
bcx = set(fragsx["bc"])
bcy = set(fragsy["bc"])
common_barcodes = bcx.intersection(bcy)
if len(common_barcodes) < 1:
return None
return common_barcodes
def _compare_breakpoint_pair_pairs(self, breakpoint1, breakpoint2, good_bc_count, dist1, dist2):
chrom1x, pos1x, orientation1x, chrom1y, pos1y, orientation1y, _ = breakpoint1
chrom2x, pos2x, orientation2x, chrom2y, pos2y, orientation2y, _ = breakpoint2
# TODO: refactor to re-use same version as cluster_svs
barcodes1 = self._barcodes_for_breakpoint(
chrom1x, pos1x, orientation1x, chrom1y, pos1y, orientation1y, dist1, dist2)
barcodes2 = self._barcodes_for_breakpoint(
chrom2x, pos2x, orientation2x, chrom2y, pos2y, orientation2y, dist1, dist2)
if barcodes1 is None or barcodes2 is None:
return None
total_barcodes = barcodes1.union(barcodes2)
common_barcodes = barcodes1.intersection(barcodes2)
contingency_table = numpy.array([[len(common_barcodes), len(barcodes1-barcodes2)],
[len(barcodes2-barcodes1), good_bc_count-len(total_barcodes)]])
p = scipy.stats.fisher_exact(contingency_table, alternative="greater")[1]
return len(common_barcodes), len(total_barcodes), p
def get_contigs(self, assembly_directory):
bam_path = os.path.join(assembly_directory, "contigs.sorted.bam")
names_to_reads = collections.defaultdict(list)
self.logger.log(bam_path)
bam = pysam.AlignmentFile(bam_path)
for read in bam.fetch():
if read.reference_length > 45:
names_to_reads[read.query_name].append(read)
return bam, names_to_reads
# def analyze_chains(self, chains, event_name):
# dist1 = -500
# dist2 = 5000
# good_bc_count = utilities.get_good_bc_count(self)
# full_graph = networkx.Graph()
# barcode_supported_graph = networkx.Graph()
# for chain in chains:
# for breakpoint in chain:
# chromx, x, orientationx, chromy, y, orientationy, _ = breakpoint
# nodex = get_node_label(chromx, x, orientationx)
# nodey = get_node_label(chromy, y, orientationy)
# if not full_graph.has_edge(nodex, nodey):
# quant = quantify_breakpoint(
# chromx, x,
# chromy, y,
# orientationx+orientationy,
# self.options, self.sample, self.dataset,
# good_bc_count, dist1, dist2)
# if quant is None: continue
# cur_common_counts, cur_total_counts, p = quant
# print breakpoint, cur_common_counts, cur_total_counts, p
# ratio = cur_common_counts/float(cur_total_counts)
# label = "{}/{}={:.2g};{:.2g}".format(
# int(cur_common_counts),
# int(cur_total_counts),
# ratio,
# p)
# if ratio > 0.08 and p < 1e-4 and cur_total_counts > 10:
# barcode_supported_graph.add_edge(nodex, nodey, label=label)
# full_graph.add_edge(nodex, nodey, label=label)
# for j, breakpoint1 in enumerate(chain[:-1]):
# breakpoint2 = chain[j+1]
# quant = self._compare_breakpoint_pair_pairs(
# breakpoint1, breakpoint2, good_bc_count, dist1, dist2)
# if quant is None: continue
# common_counts, total_counts, p = quant
# ratio = common_counts/float(total_counts)
# node1y = get_node_label(*breakpoint1[3:6])
# node2x = get_node_label(*breakpoint2[:3])
# label = "[{}/{}={:.2g};{:.2g}]".format(
# int(common_counts),
# int(total_counts),
# ratio,
# p)
# if ratio > 0.08 and p < 1e-4 and total_counts > 10:
# barcode_supported_graph.add_edge(node1y, node2x, label=label, style="dashed")
# full_graph.add_edge(node1y, node2x, label=label, style="dashed")
# print ":: ALL:", full_graph.edges(data=True)
# print ":: SUPPORTED:", barcode_supported_graph.edges(data=True)
# outdir = self.outpaths(final=False)["graphs"]
# barcode_supported_dot = networkx.nx_agraph.to_agraph(barcode_supported_graph)
# barcode_supported_dot.draw("{}/barcode_supported.{}.pdf".format(outdir, event_name), prog="dot")
# full_dot = networkx.nx_agraph.to_agraph(full_graph)
# full_dot.draw("{}/full_graph.{}.pdf".format(outdir, event_name), prog="dot")
# def breakend_from_label(node):
# if node.startswith("]"):
# return (node[1:].split(":")[0], int(node.split(":")[1].replace(",","")), "+")
# elif node.endswith("["):
# return (node.split(":")[0], int(node[:-1].split(":")[1].replace(",","")), "-")
# from rpy2.robjects import r
# r.pdf("{}/raw{}.pdf".format(outdir, event_name))
# for component in networkx.connected_components(barcode_supported_graph):
# subgraph = barcode_supported_graph.subgraph(component)
# ends = [node for node,degree in subgraph.degree_iter() if degree==1]
# breakends = [node for node in list(networkx.shortest_simple_paths(subgraph, ends[0], ends[1]))[0]]
# breakends = [breakend_from_label(node) for node in breakends]
# breakends = breakends[:-1:2] + breakends[-1:]
# # print ")"*100, breakends
# plot_frags(breakends, self.options, self.sample, self.dataset)
# r["dev.off"]()
def sort_by_ref_pos(q1, r1, q2, r2, l=None):
if r1 < r2:
return q1, r1, q2, r2
else:
return q2, r2, q1, r1
def get_query_positions(read):
cigar = read.cigar
qstart = 0
rstart = read.pos
if cigar[0][0] in [BAM_CSOFT_CLIP, BAM_CHARD_CLIP]:
qstart += cigar[0][1]
qend = 0
rend = read.pos
for op, length in cigar:
if op in [BAM_CSOFT_CLIP, BAM_CHARD_CLIP, BAM_CINS]:
qend += length
elif op == BAM_CMATCH:
qend += length
rend += length
elif op in [BAM_CDEL, BAM_CREF_SKIP]:
rend += length
total_length = qend
if cigar[-1][0] in [BAM_CSOFT_CLIP, BAM_CHARD_CLIP]:
qend -= cigar[-1][1]
if read.is_reverse:
qstart, rstart, qend, rend = total_length-qend, rend, total_length-qstart, rstart
return qstart, rstart, qend, rend, total_length
def is_clipped(read, min_clip_length=40):
q1, r1, q2, r2, l = get_query_positions(read)
x = ""
# print read.query_name, q1, q2, l
if q1 > min_clip_length:
if r2 > r1:
x += ">"
else:
x += "<"
if q2 < l-min_clip_length:
if r2 > r1:
x += "<"
else:
x += ">"
return x, min(r1,r2), max(r1,r2)
def covered_positions(start, end, reads):
length = end-start
positions = [False]*length
for read in reads:
for pos in read.get_reference_positions():
pos = pos-start
if 0 <= pos < length:
positions[pos] = True
return positions
def walk_local(bam, chrom, pos, direction, offset=4000, window_size=10000):
reads = set()
if direction == "-":
offset = -offset
window_size = -window_size
cur_start = pos
cur_end = pos + window_size
# cur_start, cur_end = sorted([cur_start, cur_end])
while True:
if min(cur_start, cur_end) < abs(offset)*2:
return []
cur_reads = list(bam.fetch(chrom, min(cur_start, cur_end), max(cur_start, cur_end)))
if len(cur_reads) == 0 or max(read.reference_length for read in cur_reads) < 40:
break
reads.update(cur_reads)
cur_start += offset
cur_end += offset
if len(reads) == 0:
return []
reads = sorted(reads, key=lambda x: x.pos)
if direction == "+":
start_index = 0
end_index = len(reads)-1
increment = 1
elif direction == "-":
start_index = len(reads)-1
end_index = 0
increment = -1
positions = set()
for read in reads:
positions.update(read.get_reference_positions())
positions = numpy.array(sorted(positions))
# print len(numpy.diff(positions))
# print numpy.where(numpy.diff(positions)>abs(offset))[0]
# print numpy.split(positions, numpy.where(numpy.diff(positions)>abs(offset))[0]+1)
split = numpy.split(positions, numpy.where(numpy.diff(positions)>abs(offset))[0]+1)
# for s in split:
# print " ", direction, s[0], s[-1]
if direction == "+":
positions = split[0]
else:
positions = split[-1]
# print "POSITIONS:", positions[0], positions[-1]
filtered_reads = [read for read in reads if (read.reference_end>=positions[0] and
read.reference_start<=positions[-1])]
if direction == "-":
filtered_reads = sorted(filtered_reads, key=lambda x: x.reference_start)
elif direction == "+":
filtered_reads = sorted(filtered_reads, key=lambda x: x.reference_end)
return filtered_reads
def are_matched(read1, orientation1, read2, orientation2):
if reads_overlap(read1, read2):
# TODO: should figure out if we can improve the assemblies
# so that we can detect small events
return False, None, None
qstart1, rstart1, qend1, rend1 = sort_by_ref_pos(*get_query_positions(read1))
qstart2, rstart2, qend2, rend2 = sort_by_ref_pos(*get_query_positions(read2))
# print ":: {} {:,} {} {:,}|| {} {:,} {} {:,}".format(qstart1, rstart1, qend1, rend1, qstart2, rstart2, qend2, rend2)
switched = False
if (qstart1+qend1) > (qstart2+qend2):
# print "switch"
switched = True
qstart1, rstart1, qend1, rend1, qstart2, rstart2, qend2, rend2 = qstart2, rstart2, qend2, rend2, qstart1, rstart1, qend1, rend1
orientation1, orientation2 = orientation2, orientation1
if orientation1 == "+":
qadj1 = qend1
radj1 = rend1
elif orientation1 == "-":
qadj1 = qstart1
radj1 = rstart1
if orientation2 == "+":
qadj2 = qend2
radj2 = rend2
elif orientation2 == "-":
qadj2 = qstart2
radj2 = rstart2
# print qadj1, qadj2
if switched:
radj1, radj2 = radj2, radj1
if abs(qadj1 - qadj2) < 20:
return True, radj1, radj2
return False, None, None
def reads_overlap(read1, read2):
if read1.reference_id != read2.reference_id:
return False
if read1.reference_end < read2.reference_start:
return False
if read2.reference_end < read1.reference_start:
return False
return True
def _get_chain(bam, chrom, pos, direction, reads_by_name, previous=None):
# direction is "-" if we're walking from the right to the left, ie looking
# for a "-" orientation breakpoint; and "+" if we're walking from left to right
# print "\n** _get_chain", chrom, pos, direction
reads = walk_local(bam, chrom, pos, direction)
if len(reads) == 0:
return []
if direction == "+":
read = reads[-1]
clipping = is_clipped(read)
if "<" in clipping[0]:
end = clipping[2]
else:
return []
elif direction == "-":
read = reads[0]
clipping = is_clipped(read)
if ">" in clipping[0]:
end = clipping[1]
else:
return []
# print "last read:", read.reference_start, read.reference_end
for other_read in reads_by_name[read.query_name]:
if other_read == read: continue
is_match = False
for other_orientation in "+-":
match = are_matched(read, direction, other_read, other_orientation)
if match[0]:
is_match = True
other_end = match[2]
break
if is_match:
next_chrom = other_read.reference_name
next_start = (other_read.pos+other_read.reference_end)/2
next_orientation = {"+":"-", "-":"+"}[other_orientation]
# if reads_overlap(read, other_read):
# return []
bad_chrom = re.search(r"gl|hap|un|_", next_chrom, re.IGNORECASE)
# print ">>>>>>>>>>>", chrom, end, direction, next_chrom, next_start, next_orientation
if bad_chrom is not None:
return []
if previous is None:
previous = set()
if (next_chrom, next_start, next_orientation) in previous:
return []
previous.add((next_chrom, next_start, next_orientation))
next_chain = _get_chain(bam, next_chrom, next_start, next_orientation,
reads_by_name, previous)
chain = [(chrom, end, direction, next_chrom, other_end, other_orientation, read.query_name)]
if next_chain is not None:
chain.extend(next_chain)
return chain
return []
def get_chain(bam, chrom, pos, reads_by_name):
chain = []
# print "orientation:", "-"*30
left_chain = _get_chain(bam, chrom, pos+5000, "-", reads_by_name)
for link in left_chain[::-1]:
chain.append(tuple(list(link[3:6])+list(link[:3])+[link[6]]))
# print "orientation:", "+"*30
right_chain = _get_chain(bam, chrom, max(0, pos-5000), "+", reads_by_name)
if right_chain is not None:
chain.extend(right_chain)
return chain
# def visualize_chain(chain):
# graph = networkx.Graph()
# prev = None
# for link in chain:
# chromx, posx, orientationx = link[:3]
# chromy, posy, orientationy = link[3:6]
# graph.add_edge(
# get_node_label(chromx, posx, orientationx),
# get_node_label(chromy, posy, orientationy)
# )
def get_node_label(chrom, position, orientation):
if orientation == "+":
return "]{}:{:,}".format(chrom, int(position))
else:
return "{}:{:,}[".format(chrom, int(position))
| |
import os
import yaml
import tempfile
from mock import MagicMock, patch
from cloudify.exceptions import CommandExecutionException
from ... import env
from ...config import config
from .test_base import CliCommandTest
from .constants import BLUEPRINTS_DIR, SAMPLE_BLUEPRINT_PATH, \
SAMPLE_ARCHIVE_PATH
class BlueprintsTest(CliCommandTest):
def setUp(self):
super(BlueprintsTest, self).setUp()
self.use_manager()
def test_blueprints_list(self):
self.client.blueprints.list = MagicMock(return_value=[])
self.invoke('blueprints list')
@patch('cloudify_cli.table.generate')
def test_blueprints_list_with_values(self, table_generate_mock):
self.client.blueprints.list = MagicMock(
return_value=[
{'description': '12345678901234567890123'},
{'description': 'abcdefg'}
]
)
self.invoke('blueprints list')
table_generate_mock.assert_called_with(
[
'id',
'description',
'main_file_name',
'created_at',
'updated_at'
],
data=[{'description': '123456789012345678..'},
{'description': 'abcdefg'}]
)
def test_blueprints_delete(self):
self.client.blueprints.delete = MagicMock()
self.invoke('blueprints delete a-blueprint-id')
def test_blueprints_download(self):
self.client.blueprints.download = MagicMock(return_value='test')
outcome = self.invoke('blueprints download a-blueprint-id')
self.assertIn('Blueprint downloaded as test', outcome.logs)
@patch('cloudify_cli.table.generate', autospec=True)
@patch('cloudify_cli.table.log', autospec=True)
def test_blueprints_get(self, *args):
self.client.blueprints.get = MagicMock()
self.client.deployments.list = MagicMock()
self.invoke('blueprints get a-blueprint-id')
def test_blueprints_upload(self):
self.client.blueprints.upload = MagicMock()
self.invoke(
'blueprints upload {0}'.format(SAMPLE_BLUEPRINT_PATH))
def test_blueprints_upload_invalid(self):
self.client.blueprints.upload = MagicMock()
self.invoke(
'cfy blueprints upload {0}/bad_blueprint/blueprint.yaml '
'-b my_blueprint_id'.format(BLUEPRINTS_DIR))
def test_blueprints_upload_invalid_validate(self):
self.client.blueprints.upload = MagicMock()
self.invoke(
'cfy blueprints upload {0}/bad_blueprint/blueprint.yaml '
'-b my_blueprint_id --validate'.format(BLUEPRINTS_DIR),
err_str_segment='Failed to validate blueprint'
)
def test_blueprints_upload_archive(self):
self.client.blueprints.upload = MagicMock()
self.invoke(
'cfy blueprints upload {0} '
'-b my_blueprint_id --blueprint-filename blueprint.yaml'
.format(SAMPLE_ARCHIVE_PATH))
def test_blueprints_upload_unsupported_archive_type(self):
self.client.blueprints.upload = MagicMock()
# passing in a directory instead of a valid archive type
self.invoke(
'cfy blueprints upload {0}/helloworld -b my_blueprint_id'.format(
BLUEPRINTS_DIR),
'You must provide either a path to a local file')
def test_blueprints_upload_archive_bad_file_path(self):
self.client.blueprints.upload = MagicMock()
self.invoke(
'cfy blueprints upload {0}/helloworld.tar.gz -n blah'
.format(BLUEPRINTS_DIR),
err_str_segment="You must provide either a path to a local file")
def test_blueprints_upload_archive_no_filename(self):
# TODO: The error message here should be different - something to
# do with the filename provided being incorrect
self.client.blueprints.upload = MagicMock()
self.invoke(
'cfy blueprints upload {0}/helloworld.tar.gz -b my_blueprint_id'
.format(BLUEPRINTS_DIR),
err_str_segment="You must provide either a path to a local file")
def test_blueprints_upload_from_url(self):
self.client.blueprints.publish_archive = MagicMock()
self.invoke(
'cfy blueprints upload https://aaa.com/maste.tar.gz -n b.yaml '
'-b blueprint3')
def test_blueprint_validate(self):
self.invoke(
'cfy blueprints validate {0}'.format(
SAMPLE_BLUEPRINT_PATH))
def test_blueprint_validate_definitions_version_false(self):
with open(config.CLOUDIFY_CONFIG_PATH) as f:
conf = yaml.safe_load(f.read())
with open(config.CLOUDIFY_CONFIG_PATH, 'w') as f:
conf['validate_definitions_version'] = False
f.write(yaml.safe_dump(conf))
self.invoke(
'cfy blueprints validate '
'{0}/local/blueprint_validate_definitions_version.yaml'
.format(BLUEPRINTS_DIR))
def test_blueprint_validate_definitions_version_true(self):
self.invoke(
'cfy blueprints validate '
'{0}/local/blueprint_validate_definitions_version.yaml'
.format(BLUEPRINTS_DIR),
err_str_segment='Failed to validate blueprint description'
)
def test_validate_bad_blueprint(self):
self.invoke(
'cfy blueprints validate {0}/bad_blueprint/blueprint.yaml'
.format(BLUEPRINTS_DIR),
err_str_segment='Failed to validate blueprint')
def test_blueprint_inputs(self):
blueprint_id = 'a-blueprint-id'
name = 'test_input'
type = 'string'
description = 'Test input.'
blueprint = {
'plan': {
'inputs': {
name: {
'type': type,
'description': description
# field 'default' intentionally omitted
}
}
}
}
assert_equal = self.assertEqual
class RestClientMock(object):
class BlueprintsClientMock(object):
def __init__(self, blueprint_id, blueprint):
self.blueprint_id = blueprint_id
self.blueprint = blueprint
def get(self, blueprint_id):
assert_equal(blueprint_id, self.blueprint_id)
return self.blueprint
def __init__(self, blueprint_id, blueprint):
self.blueprints = self.BlueprintsClientMock(blueprint_id,
blueprint)
def get_rest_client_mock(*args, **kwargs):
return RestClientMock(blueprint_id, blueprint)
def table_mock(fields, data, *args, **kwargs):
self.assertEqual(len(data), 1)
input = data[0]
self.assertIn('name', input)
self.assertIn('type', input)
self.assertIn('default', input)
self.assertIn('description', input)
self.assertEqual(input['name'], name)
self.assertEqual(input['type'], type)
self.assertEqual(input['default'], '-')
self.assertEqual(input['description'], description)
with patch('cloudify_cli.env.get_rest_client',
get_rest_client_mock),\
patch('cloudify_cli.table.generate', table_mock):
self.invoke('cfy blueprints inputs {0}'.format(blueprint_id))
def test_create_requirements(self):
local_dir = os.path.join(BLUEPRINTS_DIR, 'local')
blueprint_path = os.path.join(local_dir, 'blueprint_with_plugins.yaml')
expected_requirements = {
'http://localhost/plugin.zip',
os.path.join(local_dir, 'plugins', 'local_plugin'),
'http://localhost/host_plugin.zip'
}
tmp_requirements_path = os.path.join(
env.CLOUDIFY_WORKDIR, 'requirements.txt')
self.invoke('cfy blueprints create-requirements {0} -o {1}'
.format(blueprint_path, tmp_requirements_path))
with open(tmp_requirements_path, 'r') as f:
actual_requirements = set(f.read().split())
self.assertEqual(actual_requirements, expected_requirements)
def test_create_requirements_existing_output_file(self):
blueprint_path = '{0}/local/blueprint_with_plugins.yaml'\
.format(BLUEPRINTS_DIR)
file_path = tempfile.mktemp()
with open(file_path, 'w') as f:
f.write('')
self.invoke(
'cfy blueprints create-requirements {0} -o {1}'
.format(blueprint_path, file_path),
err_str_segment='Path {0} already exists'
.format(file_path)
)
def test_create_requirements_output_to_screen(self):
local_dir = os.path.join(BLUEPRINTS_DIR, 'local')
blueprint_path = os.path.join(local_dir, 'blueprint_with_plugins.yaml')
expected_requirements = {
'http://localhost/plugin.zip',
os.path.join(local_dir, 'plugins', 'local_plugin'),
'http://localhost/host_plugin.zip'
}
output = self.invoke('cfy blueprints create-requirements {0}'
.format(blueprint_path)).logs.split('\n')
for requirement in expected_requirements:
self.assertIn(requirement, output)
def test_install_plugins(self):
self.invoke('cfy use local')
blueprint_path = os.path.join(
BLUEPRINTS_DIR,
'local',
'blueprint_with_plugins.yaml'
)
output = self.invoke(
'cfy blueprints install-plugins {0}'.format(blueprint_path),
err_str_segment='Invalid requirement',
exception=CommandExecutionException
)
self.assertIn('pip install -r', output.exception.command)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Curses-Based Command-Line Interface of TensorFlow Debugger (tfdbg)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import curses
from curses import textpad
import signal
import sys
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import tensor_format
class CursesUI(object):
"""Curses-based Command-line UI.
In this class, the methods with the prefix "_screen_" are the methods that
interact with the actual terminal using the curses library.
"""
CLI_PROMPT = "tfdbg> "
CLI_EXIT_COMMANDS = ["exit", "quit"]
CLI_TERMINATOR_KEY = 7 # Terminator key for input text box.
CLI_TAB_KEY = ord("\t")
REGEX_SEARCH_PREFIX = "/"
TENSOR_INDICES_NAVIGATION_PREFIX = "@"
ERROR_MESSAGE_PREFIX = "ERROR: "
# Possible Enter keys. 343 is curses key code for the num-pad Enter key when
# num lock is off.
CLI_CR_KEYS = [ord("\n"), ord("\r"), 343]
_SCROLL_REFRESH = "refresh"
_SCROLL_UP = "up"
_SCROLL_DOWN = "down"
_SCROLL_HOME = "home"
_SCROLL_END = "end"
_SCROLL_TO_LINE_INDEX = "scroll_to_line_index"
_FOREGROUND_COLORS = {
"white": curses.COLOR_WHITE,
"red": curses.COLOR_RED,
"green": curses.COLOR_GREEN,
"yellow": curses.COLOR_YELLOW,
"blue": curses.COLOR_BLUE,
"magenta": curses.COLOR_MAGENTA,
"black": curses.COLOR_BLACK,
}
_BACKGROUND_COLORS = {
"white": curses.COLOR_WHITE,
"black": curses.COLOR_BLACK,
}
# Font attribute for search and highlighting.
_SEARCH_HIGHLIGHT_FONT_ATTR = "black_on_white"
_ARRAY_INDICES_COLOR_PAIR = "black_on_white"
_ERROR_TOAST_COLOR_PAIR = "red_on_white"
_STATUS_BAR_COLOR_PAIR = "black_on_white"
def __init__(self):
self._screen_init()
self._screen_refresh_size()
# TODO(cais): Error out if the size of the screen is too small.
# Initialize some UI component size and locations.
self._init_layout()
self._command_handler_registry = (
debugger_cli_common.CommandHandlerRegistry())
# Create tab completion registry and register the empty-str (top-level)
# tab-completion context with it.
self._tab_completion_registry = debugger_cli_common.TabCompletionRegistry()
# Create top-level tab-completion context and register the exit and help
# commands.
self._tab_completion_registry.register_tab_comp_context(
[""], self.CLI_EXIT_COMMANDS +
[debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND] +
debugger_cli_common.CommandHandlerRegistry.HELP_COMMAND_ALIASES)
self._command_history_store = debugger_cli_common.CommandHistory()
# Active list of command history, used in history navigation.
# _command_handler_registry holds all the history commands the CLI has
# received, up to a size limit. _active_command_history is the history
# currently being navigated in, e.g., using the Up/Down keys. The latter
# can be different from the former during prefixed or regex-based history
# navigation, e.g., when user enter the beginning of a command and hit Up.
self._active_command_history = []
# Pointer to the current position in the history sequence.
# 0 means it is a new command being keyed in.
self._command_pointer = 0
self._command_history_limit = 100
self._pending_command = ""
# State related to screen output.
self._output_pad = None
self._output_pad_row = 0
self._output_array_pointer_indices = None
self._curr_unwrapped_output = None
self._curr_wrapped_output = None
# Register signal handler for SIGINT.
signal.signal(signal.SIGINT, self._interrupt_handler)
def _init_layout(self):
"""Initialize the layout of UI components.
Initialize the location and size of UI components such as command textbox
and output region according to the terminal size.
"""
# NamedTuple for rectangular locations on screen
self.rectangle = collections.namedtuple("rectangle",
"top left bottom right")
# Height of command text box
self._command_textbox_height = 2
self._title_row = 0
# Top row index of the output pad.
# A "pad" is a curses object that holds lines of text and not limited to
# screen size. It can be rendered on the screen partially with scroll
# parameters specified.
self._output_top_row = 1
# Number of rows that the output pad has.
self._output_num_rows = (
self._max_y - self._output_top_row - self._command_textbox_height - 1)
# Row index of scroll information line: Taking into account the zero-based
# row indexing and the command textbox area under the scroll information
# row.
self._output_scroll_row = self._max_y - 1 - self._command_textbox_height
# Tab completion bottom row.
self._candidates_top_row = self._output_scroll_row - 4
self._candidates_bottom_row = self._output_scroll_row - 1
# Maximum number of lines the candidates display can have.
self._candidates_max_lines = int(self._output_num_rows / 2)
self.max_output_lines = 10000
# Regex search state.
self._curr_search_regex = None
self._unwrapped_regex_match_lines = []
# Size of view port on screen, which is always smaller or equal to the
# screen size.
self._output_pad_screen_height = self._output_num_rows - 1
self._output_pad_screen_width = self._max_x - 1
self._output_pad_screen_location = self.rectangle(
top=self._output_top_row,
left=0,
bottom=self._output_top_row + self._output_num_rows,
right=self._output_pad_screen_width)
def _screen_init(self):
"""Screen initialization.
Creates curses stdscr and initialize the color pairs for display.
"""
self._stdscr = curses.initscr()
self._command_window = None
# Prepare color pairs.
curses.start_color()
self._color_pairs = {}
color_index = 0
for fg_color in self._FOREGROUND_COLORS:
for bg_color in self._BACKGROUND_COLORS:
color_index += 1
curses.init_pair(color_index, self._FOREGROUND_COLORS[fg_color],
self._BACKGROUND_COLORS[bg_color])
color_name = fg_color
if bg_color != "black":
color_name += "_on_" + bg_color
self._color_pairs[color_name] = curses.color_pair(color_index)
# A_BOLD is not really a "color". But place it here for convenience.
self._color_pairs["bold"] = curses.A_BOLD
# Default color pair to use when a specified color pair does not exist.
self._default_color_pair = self._color_pairs["white"]
def _screen_launch(self):
"""Launch the curses screen."""
curses.noecho()
curses.cbreak()
self._stdscr.keypad(1)
self._screen_create_command_window()
def _screen_create_command_window(self):
"""Create command window according to screen size."""
if self._command_window:
del self._command_window
self._command_window = curses.newwin(
self._command_textbox_height, self._max_x - len(self.CLI_PROMPT),
self._max_y - self._command_textbox_height, len(self.CLI_PROMPT))
def _screen_refresh(self):
self._stdscr.refresh()
def _screen_terminate(self):
"""Terminate the curses screen."""
self._stdscr.keypad(0)
curses.nocbreak()
curses.echo()
curses.endwin()
# Remove SIGINT handler.
signal.signal(signal.SIGINT, signal.SIG_DFL)
def run_ui(self, init_command=None, title=None, title_color=None):
"""Run the Curses CLI.
Args:
init_command: (str) Optional command to run on CLI start up.
title: (str) Optional title to display in the CLI.
title_color: (str) Optional color of the title, e.g., "yellow".
Returns:
An exit token of arbitrary type. Can be None.
"""
self._screen_launch()
# Optional initial command.
if init_command is not None:
self._dispatch_command(init_command)
if title is not None:
self._title(title, title_color=title_color)
# CLI main loop.
exit_token = self._ui_loop()
self._screen_terminate()
return exit_token
def register_command_handler(self,
prefix,
handler,
help_info,
prefix_aliases=None):
"""A wrapper around CommandHandlerRegistry.register_command_handler().
In addition to calling the wrapped register_command_handler() method, this
method also registers the top-level tab-completion context based on the
command prefixes and their aliases.
See the doc string of the wrapped method for more details on the args.
Args:
prefix: (str) command prefix.
handler: (callable) command handler.
help_info: (str) help information.
prefix_aliases: (list of str) aliases of the command prefix.
"""
self._command_handler_registry.register_command_handler(
prefix, handler, help_info, prefix_aliases=prefix_aliases)
self._tab_completion_registry.extend_comp_items("", [prefix])
if prefix_aliases:
self._tab_completion_registry.extend_comp_items("", prefix_aliases)
def register_tab_comp_context(self, *args, **kwargs):
"""Wrapper around TabCompletionRegistry.register_tab_comp_context()."""
self._tab_completion_registry.register_tab_comp_context(*args, **kwargs)
def set_help_intro(self, help_intro):
"""Set an introductory message to the help output of the command registry.
Args:
help_intro: (list of str) Text lines appended to the beginning of the
the output of the command "help", as introductory information.
"""
self._command_handler_registry.set_help_intro(help_intro=help_intro)
def get_help(self):
return self._command_handler_registry.get_help()
def _screen_create_command_textbox(self, existing_command):
"""Create command textbox on screen.
Args:
existing_command: (str) A command string to put in the textbox right
after its creation.
"""
# Display the tfdbg prompt.
self._stdscr.addstr(self._max_y - self._command_textbox_height, 0,
self.CLI_PROMPT, curses.A_BOLD)
self._stdscr.refresh()
self._command_window.clear()
# Command text box.
self._command_textbox = textpad.Textbox(
self._command_window, insert_mode=True)
# Enter existing command.
self._auto_key_in(existing_command)
def _ui_loop(self):
"""Command-line UI loop.
Returns:
An exit token of arbitrary type. The token can be None.
"""
while True:
# Enter history command if pointer is in history (> 0):
if self._command_pointer > 0:
existing_command = self._active_command_history[-self._command_pointer]
else:
existing_command = self._pending_command
self._screen_create_command_textbox(existing_command)
command, terminator, pending_command_changed = self._get_user_command()
if terminator in self.CLI_CR_KEYS:
exit_token = self._dispatch_command(command)
if exit_token is not None:
return exit_token
elif terminator == self.CLI_TAB_KEY:
tab_completed = self._tab_complete(command)
self._pending_command = tab_completed
self._cmd_ptr = 0
elif pending_command_changed:
self._pending_command = command
return
def _get_user_command(self):
"""Get user command from UI.
Returns:
command: (str) The user-entered command.
terminator: (str) Terminator type for the command.
If command is a normal command entered with the Enter key, the value
will be the key itself. If this is a tab completion call (using the
Tab key), the value will reflect that as well.
pending_command_changed: (bool) If the pending command has changed.
Used during command history navigation.
"""
# First, reset textbox state variables.
self._textbox_curr_terminator = None
self._textbox_pending_command_changed = False
command = self._screen_get_user_command()
command = self._strip_terminator(command)
return (command, self._textbox_curr_terminator,
self._textbox_pending_command_changed)
def _screen_get_user_command(self):
return self._command_textbox.edit(validate=self._on_textbox_keypress)
def _strip_terminator(self, command):
for v in self.CLI_CR_KEYS:
if v < 256:
command = command.replace(chr(v), "")
return command.strip()
def _screen_refresh_size(self):
self._max_y, self._max_x = self._stdscr.getmaxyx()
def _dispatch_command(self, command):
"""Dispatch user command.
Args:
command: (str) Command to dispatch.
Returns:
An exit token object. None value means that the UI loop should not exit.
A non-None value means the UI loop should exit.
"""
if command in self.CLI_EXIT_COMMANDS:
# Explicit user command-triggered exit: EXPLICIT_USER_EXIT as the exit
# token.
return debugger_cli_common.EXPLICIT_USER_EXIT
if command:
self._command_history_store.add_command(command)
if (command.startswith(self.REGEX_SEARCH_PREFIX) and
self._curr_unwrapped_output):
if len(command) > len(self.REGEX_SEARCH_PREFIX):
# Command is like "/regex". Perform regex search.
regex = command[len(self.REGEX_SEARCH_PREFIX):]
self._curr_search_regex = regex
self._display_output(self._curr_unwrapped_output, highlight_regex=regex)
elif self._unwrapped_regex_match_lines:
# Command is "/". Continue scrolling down matching lines.
self._display_output(
self._curr_unwrapped_output,
is_refresh=True,
highlight_regex=self._curr_search_regex)
self._command_pointer = 0
self._pending_command = ""
return
elif command.startswith(self.TENSOR_INDICES_NAVIGATION_PREFIX):
indices_str = command[1:].strip()
if indices_str:
try:
indices = command_parser.parse_indices(indices_str)
omitted, line_index = tensor_format.locate_tensor_element(
self._curr_wrapped_output, indices)
if not omitted:
self._scroll_output(
self._SCROLL_TO_LINE_INDEX, line_index=line_index)
except Exception as e: # pylint: disable=broad-except
self._error_toast(str(e))
else:
self._error_toast("Empty indices.")
return
prefix, args = self._parse_command(command)
if not prefix:
# Empty command: take no action. Should not exit.
return
screen_info = {"cols": self._max_x}
exit_token = None
if self._command_handler_registry.is_registered(prefix):
try:
screen_output = self._command_handler_registry.dispatch_command(
prefix, args, screen_info=screen_info)
except debugger_cli_common.CommandLineExit as e:
exit_token = e.exit_token
else:
screen_output = debugger_cli_common.RichTextLines([
self.ERROR_MESSAGE_PREFIX + "Invalid command prefix \"%s\"" % prefix
])
# Clear active command history. Until next up/down history navigation
# occurs, it will stay empty.
self._active_command_history = []
if exit_token is not None:
return exit_token
self._display_output(screen_output)
self._command_pointer = 0
self._pending_command = ""
def _parse_command(self, command):
"""Parse a command string into prefix and arguments.
Args:
command: (str) Command string to be parsed.
Returns:
prefix: (str) The command prefix.
args: (list of str) The command arguments (i.e., not including the
prefix).
"""
command = command.strip()
if not command:
return "", []
command_items = command_parser.parse_command(command)
return command_items[0], command_items[1:]
def _screen_gather_textbox_str(self):
"""Gather the text string in the command text box.
Returns:
(str) the current text string in the command textbox, excluding any
return keys.
"""
txt = self._command_textbox.gather()
return txt.strip()
def _on_textbox_keypress(self, x):
"""Text box key validator: Callback of key strokes.
Handles a user's keypress in the input text box. Translates certain keys to
terminator keys for the textbox to allow its edit() method to return.
Also handles special key-triggered events such as PgUp/PgDown scrolling of
the screen output.
Args:
x: (int) Key code.
Returns:
(int) A translated key code. In most cases, this is identical to the
input x. However, if x is a Return key, the return value will be
CLI_TERMINATOR_KEY, so that the text box's edit() method can return.
Raises:
TypeError: If the input x is not of type int.
"""
if not isinstance(x, int):
raise TypeError("Key validator expected type int, received type %s" %
type(x))
if x in self.CLI_CR_KEYS:
# Make Enter key the terminator
self._textbox_curr_terminator = x
return self.CLI_TERMINATOR_KEY
elif x == self.CLI_TAB_KEY:
self._textbox_curr_terminator = self.CLI_TAB_KEY
return self.CLI_TERMINATOR_KEY
elif x == curses.KEY_PPAGE:
self._scroll_output(self._SCROLL_UP)
return x
elif x == curses.KEY_NPAGE:
self._scroll_output(self._SCROLL_DOWN)
return x
elif x == curses.KEY_HOME:
self._scroll_output(self._SCROLL_HOME)
return x
elif x == curses.KEY_END:
self._scroll_output(self._SCROLL_END)
return x
elif x in [curses.KEY_UP, curses.KEY_DOWN]:
# Command history navigation.
if not self._active_command_history:
hist_prefix = self._screen_gather_textbox_str()
self._active_command_history = (
self._command_history_store.lookup_prefix(
hist_prefix, self._command_history_limit))
if self._active_command_history:
if x == curses.KEY_UP:
if self._command_pointer < len(self._active_command_history):
self._command_pointer += 1
elif x == curses.KEY_DOWN:
if self._command_pointer > 0:
self._command_pointer -= 1
else:
self._command_pointer = 0
self._textbox_curr_terminator = x
# Force return from the textbox edit(), so that the textbox can be
# redrawn with a history command entered.
return self.CLI_TERMINATOR_KEY
elif x == curses.KEY_RESIZE:
# Respond to terminal resize.
self._screen_refresh_size()
self._init_layout()
self._screen_create_command_window()
if self._curr_unwrapped_output is not None:
# Force render screen output again, under new screen size.
self._output_pad = self._display_output(
self._curr_unwrapped_output, is_refresh=True)
# Force return from the textbox edit(), so that the textbox can be
# redrawn.
return self.CLI_TERMINATOR_KEY
else:
# Mark the pending command as modified.
self._textbox_pending_command_changed = True
# Invalidate active command history.
self._command_pointer = 0
self._active_command_history = []
return x
def _title(self, title, title_color=None):
"""Display title.
Args:
title: (str) The title to display.
title_color: (str) Color of the title, e.g., "yellow".
"""
# Pad input title str with "-" and space characters to make it pretty.
self._title_line = "--- %s " % title
if len(self._title_line) < self._max_x:
self._title_line += "-" * (self._max_x - len(self._title_line))
self._screen_draw_text_line(
self._title_row, self._title_line, color=title_color)
def _auto_key_in(self, command):
"""Automatically key in a command to the command Textbox.
Args:
command: The command, as a string.
"""
for c in command:
self._command_textbox.do_command(ord(c))
def _screen_draw_text_line(self, row, line, attr=curses.A_NORMAL, color=None):
"""Render a line of text on the screen.
Args:
row: (int) Row index.
line: (str) The line content.
attr: curses font attribute.
color: (str) font foreground color name.
Raises:
TypeError: If row is not of type int.
"""
if not isinstance(row, int):
raise TypeError("Invalid type in row")
if len(line) > self._max_x:
line = line[:self._max_x]
if color is None:
self._stdscr.addstr(row, 0, line, attr)
else:
self._stdscr.addstr(row, 0, line, self._color_pairs[color])
self._screen_refresh()
def _screen_new_output_pad(self, rows, cols):
"""Generate a new pad on the screen.
Args:
rows: (int) Number of rows the pad will have: not limited to screen size.
cols: (int) Number of columns the pad will have: not limited to screen
size.
Returns:
A curses textpad object.
"""
return curses.newpad(rows, cols)
def _screen_display_output(self, output):
"""Actually render text output on the screen.
Wraps the lines according to screen width. Pad lines below according to
screen height so that the user can scroll the output to a state where
the last non-empty line is on the top of the screen. Then renders the
lines on the screen.
Args:
output: (RichTextLines) text lines to display on the screen. These lines
may have widths exceeding the screen width. This method will take care
of the wrapping.
Returns:
(List of int) A list of line indices, in the wrapped output, where there
are regex matches.
"""
# Wrap the output lines according to screen width.
self._curr_wrapped_output, wrapped_line_indices = (
debugger_cli_common.wrap_rich_text_lines(output, self._max_x - 1))
# Append lines to curr_wrapped_output so that the user can scroll to a
# state where the last text line is on the top of the output area.
self._curr_wrapped_output.lines.extend([""] * (self._output_num_rows - 1))
# Limit number of lines displayed to avoid curses overflow problems.
if self._curr_wrapped_output.num_lines() > self.max_output_lines:
self._curr_wrapped_output = self._curr_wrapped_output.slice(
0, self.max_output_lines)
self._curr_wrapped_output.lines.append("Output cut off at %d lines!" %
self.max_output_lines)
self._curr_wrapped_output.font_attr_segs[self.max_output_lines] = [
(0, len(output.lines[-1]), "magenta")
]
(self._output_pad, self._output_pad_height,
self._output_pad_width) = self._display_lines(self._curr_wrapped_output,
self._output_num_rows)
# The indices of lines with regex matches (if any) need to be mapped to
# indices of wrapped lines.
return [
wrapped_line_indices[line]
for line in self._unwrapped_regex_match_lines
]
def _display_output(self, output, is_refresh=False, highlight_regex=None):
"""Display text output in a scrollable text pad.
This method does some preprocessing on the text lines, render them on the
screen and scroll to the appropriate line. These are done according to regex
highlighting requests (if any), scroll-to-next-match requests (if any),
and screen refresh requests (if any).
TODO(cais): Separate these unrelated request to increase clarity and
maintainability.
Args:
output: A RichTextLines object that is the screen output text.
is_refresh: (bool) Is this a refreshing display with existing output.
highlight_regex: (str) Optional string representing the regex used to
search and highlight in the current screen output.
"""
if highlight_regex:
try:
output = debugger_cli_common.regex_find(
output, highlight_regex, font_attr=self._SEARCH_HIGHLIGHT_FONT_ATTR)
except ValueError as e:
self._error_toast(str(e))
return
if not is_refresh:
# Perform new regex search on the current output.
self._unwrapped_regex_match_lines = output.annotations[
debugger_cli_common.REGEX_MATCH_LINES_KEY]
else:
# Continue scrolling down.
self._output_pad_row += 1
else:
self._curr_unwrapped_output = output
self._unwrapped_regex_match_lines = []
# Display output on the screen.
wrapped_regex_match_lines = self._screen_display_output(output)
# Now that the text lines are displayed on the screen scroll to the
# appropriate line according to previous scrolling state and regex search
# and highlighting state.
if highlight_regex:
next_match_line = -1
for match_line in wrapped_regex_match_lines:
if match_line >= self._output_pad_row:
next_match_line = match_line
break
if next_match_line >= 0:
self._scroll_output(
self._SCROLL_TO_LINE_INDEX, line_index=next_match_line)
else:
# Regex search found no match >= current line number. Display message
# stating as such.
self._toast("Pattern not found", color=self._ERROR_TOAST_COLOR_PAIR)
elif is_refresh:
self._scroll_output(self._SCROLL_REFRESH)
else:
self._output_pad_row = 0
self._scroll_output(self._SCROLL_HOME)
def _display_lines(self, output, min_num_rows):
"""Display RichTextLines object on screen.
Args:
output: A RichTextLines object.
min_num_rows: (int) Minimum number of output rows.
Returns:
1) The text pad object used to display the text.
2) (int) number of rows of the text pad, which may exceed screen size.
3) (int) number of columns of the text pad.
Raises:
ValueError: If input argument "output" is invalid.
"""
if not isinstance(output, debugger_cli_common.RichTextLines):
raise ValueError(
"Output is required to be an instance of RichTextLines, but is not.")
self._screen_refresh()
# Number of rows the output area will have.
rows = max(min_num_rows, len(output.lines))
# Size of the output pad, which may exceed screen size and require
# scrolling.
cols = self._max_x - 1
# Create new output pad.
pad = self._screen_new_output_pad(rows, cols)
for i in xrange(len(output.lines)):
if i in output.font_attr_segs:
self._screen_add_line_to_output_pad(
pad, i, output.lines[i], color_segments=output.font_attr_segs[i])
else:
self._screen_add_line_to_output_pad(pad, i, output.lines[i])
return pad, rows, cols
def _screen_add_line_to_output_pad(self, pad, row, txt, color_segments=None):
"""Render a line in a text pad.
Assumes: segments in color_segments are sorted in ascending order of the
beginning index.
Note: Gaps between the segments are allowed and will be fixed in with a
default color.
Args:
pad: The text pad to render the line in.
row: Row index, as an int.
txt: The text to be displayed on the specified row, as a str.
color_segments: A list of 3-tuples. Each tuple represents the beginning
and the end of a color segment, in the form of a right-open interval:
[start, end). The last element of the tuple is a color string, e.g.,
"red".
Raisee:
TypeError: If color_segments is not of type list.
"""
if not color_segments:
pad.addstr(row, 0, txt, self._default_color_pair)
return
if not isinstance(color_segments, list):
raise TypeError("Input color_segments needs to be a list, but is not.")
all_segments = []
all_color_pairs = []
# Process the beginning.
if color_segments[0][0] == 0:
pass
else:
all_segments.append((0, color_segments[0][0]))
all_color_pairs.append(self._default_color_pair)
for (curr_start, curr_end, curr_color), (next_start, _, _) in zip(
color_segments, color_segments[1:] + [(len(txt), None, None)]):
all_segments.append((curr_start, curr_end))
all_color_pairs.append(
self._color_pairs.get(curr_color, self._default_color_pair))
if curr_end < next_start:
# Fill in the gap with the default color.
all_segments.append((curr_end, next_start))
all_color_pairs.append(self._default_color_pair)
# Finally, draw all the segments.
for segment, color_pair in zip(all_segments, all_color_pairs):
pad.addstr(row, segment[0], txt[segment[0]:segment[1]], color_pair)
def _screen_scroll_output_pad(self, pad, viewport_top, viewport_left,
screen_location_top, screen_location_left,
screen_location_bottom, screen_location_right):
pad.refresh(viewport_top, viewport_left, screen_location_top,
screen_location_left, screen_location_bottom,
screen_location_right)
def _scroll_output(self, direction, line_index=None):
"""Scroll the output pad.
Args:
direction: _SCROLL_REFRESH, _SCROLL_UP, _SCROLL_DOWN, _SCROLL_HOME or
_SCROLL_END, _SCROLL_TO_LINE_INDEX
line_index: (int) Specifies the zero-based line index to scroll to.
Applicable only if direction is _SCROLL_TO_LINE_INDEX.
Raises:
ValueError: On invalid scroll direction.
TypeError: If line_index is not int and direction is
_SCROLL_TO_LINE_INDEX.
"""
if not self._output_pad:
# No output pad is present. Do nothing.
return
if direction == self._SCROLL_REFRESH:
pass
elif direction == self._SCROLL_UP:
# Scroll up
if self._output_pad_row - 1 >= 0:
self._output_pad_row -= 1
elif direction == self._SCROLL_DOWN:
# Scroll down
if self._output_pad_row + 1 < (
self._output_pad_height - self._output_pad_screen_height):
self._output_pad_row += 1
elif direction == self._SCROLL_HOME:
# Scroll to top
self._output_pad_row = 0
elif direction == self._SCROLL_END:
# Scroll to bottom
self._output_pad_row = (
self._output_pad_height - self._output_pad_screen_height - 1)
elif direction == self._SCROLL_TO_LINE_INDEX:
if not isinstance(line_index, int):
raise TypeError("Invalid line_index type (%s) under mode %s" %
(type(line_index), self._SCROLL_TO_LINE_INDEX))
self._output_pad_row = line_index
else:
raise ValueError("Unsupported scroll mode: %s" % direction)
# Actually scroll the output pad: refresh with new location.
self._screen_scroll_output_pad(self._output_pad, self._output_pad_row, 0,
self._output_pad_screen_location.top,
self._output_pad_screen_location.left,
self._output_pad_screen_location.bottom,
self._output_pad_screen_location.right)
if self._output_pad_height > self._output_pad_screen_height + 1:
# Display information about the scrolling of tall screen output.
self._scroll_info = "--- Scroll: %.2f%% " % (100.0 * (min(
1.0,
float(self._output_pad_row) /
(self._output_pad_height - self._output_pad_screen_height - 1))))
self._output_array_pointer_indices = self._show_array_indices()
# Add array indices information to scroll message.
if self._output_array_pointer_indices:
if self._output_array_pointer_indices[0]:
self._scroll_info += self._format_indices(
self._output_array_pointer_indices[0])
self._scroll_info += "-"
if self._output_array_pointer_indices[-1]:
self._scroll_info += self._format_indices(
self._output_array_pointer_indices[-1])
self._scroll_info += " "
if len(self._scroll_info) < self._max_x:
self._scroll_info += "-" * (self._max_x - len(self._scroll_info))
self._screen_draw_text_line(
self._output_scroll_row,
self._scroll_info,
color=self._STATUS_BAR_COLOR_PAIR)
else:
# Screen output is not tall enough to cause scrolling.
self._scroll_info = "-" * self._max_x
self._screen_draw_text_line(
self._output_scroll_row,
self._scroll_info,
color=self._STATUS_BAR_COLOR_PAIR)
def _format_indices(self, indices):
# Remove the spaces to make it compact.
return repr(indices).replace(" ", "")
def _show_array_indices(self):
"""Show array indices for the lines at the top and bottom of the output.
For the top line and bottom line of the output display area, show the
element indices of the array being displayed.
Returns:
If either the top of the bottom row has any matching array indices,
a dict from line index (0 being the top of the display area, -1
being the bottom of the display area) to array element indices. For
example:
{0: [0, 0], -1: [10, 0]}
Otherwise, None.
"""
indices_top = self._show_array_index_at_line(0)
bottom_line_index = (self._output_pad_screen_location.bottom -
self._output_pad_screen_location.top - 1)
indices_bottom = self._show_array_index_at_line(bottom_line_index)
if indices_top or indices_bottom:
return {0: indices_top, -1: indices_bottom}
else:
return None
def _show_array_index_at_line(self, line_index):
"""Show array indices for the specified line in the display area.
Uses the line number to array indices map in the annotations field of the
RichTextLines object being displayed.
If the displayed RichTextLines object does not contain such a mapping,
will do nothing.
Args:
line_index: (int) 0-based line index from the top of the display area.
For example,if line_index == 0, this method will display the array
indices for the line currently at the top of the display area.
Returns:
(list) The array indices at the specified line, if available. None, if
not available.
"""
# Examine whether the index information is available for the specified line
# number.
pointer = self._output_pad_row + line_index
if pointer in self._curr_wrapped_output.annotations:
indices = self._curr_wrapped_output.annotations[pointer]["i0"]
array_indices_str = self._format_indices(indices)
array_indices_info = "@" + array_indices_str
self._toast(
array_indices_info,
color=self._ARRAY_INDICES_COLOR_PAIR,
line_index=self._output_pad_screen_location.top + line_index)
return indices
else:
return None
def _tab_complete(self, command_str):
"""Perform tab completion.
Obtains tab completion candidates.
If there are no candidates, return command_str and take no other actions.
If there are candidates, display the candidates on screen and return
command_str + (common prefix of the candidates).
Args:
command_str: (str) The str in the command input textbox when Tab key is
hit.
Returns:
(str) Completed string. Could be the same as command_str if no completion
candidate is available. If candidate(s) are available, return command_str
appended by the common prefix of the candidates.
"""
command_str = command_str.lstrip()
if not command_str:
# Empty (top-level) context.
context = ""
prefix = ""
items = []
else:
items = command_str.split(" ")
if len(items) == 1:
# Single word: top-level context.
context = ""
prefix = items[0]
else:
# Multiple words.
context = items[0]
prefix = items[-1]
candidates, common_prefix = self._tab_completion_registry.get_completions(
context, prefix)
if candidates and len(candidates) > 1:
self._display_candidates(candidates)
else:
# In the case of len(candidates) == 1, the single completion will be
# entered to the textbox automatically. So there is no need to show any
# candidates.
self._display_candidates([])
if common_prefix:
# Common prefix is not None and non-empty. The completed string will
# incorporate the common prefix.
return " ".join(items[:-1] + [common_prefix])
else:
return " ".join(items)
def _display_candidates(self, candidates):
"""Show candidates (e.g., tab-completion candidates) on multiple lines.
Args:
candidates: (list of str) candidates.
"""
if self._curr_unwrapped_output:
# Force refresh screen output.
self._scroll_output(self._SCROLL_REFRESH)
if not candidates:
return
candidates_prefix = "Candidates: "
candidates_line = candidates_prefix + " ".join(candidates)
candidates_output = debugger_cli_common.RichTextLines(
candidates_line,
font_attr_segs={
0: [(len(candidates_prefix), len(candidates_line), "yellow")]
})
candidates_output, _ = debugger_cli_common.wrap_rich_text_lines(
candidates_output, self._max_x - 2)
# Calculate how many lines the candidate text should occupy. Limit it to
# a maximum value.
candidates_num_rows = min(
len(candidates_output.lines), self._candidates_max_lines)
self._candidates_top_row = (
self._candidates_bottom_row - candidates_num_rows + 1)
# Render the candidate text on screen.
pad, _, _ = self._display_lines(candidates_output, 0)
self._screen_scroll_output_pad(
pad, 0, 0, self._candidates_top_row, 0,
self._candidates_top_row + candidates_num_rows - 1, self._max_x - 1)
def _toast(self, message, color=None, line_index=None):
"""Display a one-line message on the screen.
By default, the toast is displayed in the line right above the scroll bar.
But the line location can be overridden with the line_index arg.
Args:
message: (str) the message to display.
color: (str) optional color attribute for the message.
line_index: (int) line index.
"""
pad, _, _ = self._display_lines(
debugger_cli_common.RichTextLines(
message,
font_attr_segs={0: [(0, len(message), color or "white")]}),
0)
right_end = min(len(message), self._max_x - 1)
if line_index is None:
line_index = self._output_scroll_row - 1
self._screen_scroll_output_pad(pad, 0, 0, line_index, 0, line_index,
right_end)
def _error_toast(self, message):
"""Display a one-line error message on screen.
Args:
message: The error message, without the preceding "ERROR: " substring.
"""
self._toast(
self.ERROR_MESSAGE_PREFIX + message, color=self._ERROR_TOAST_COLOR_PAIR)
def _interrupt_handler(self, signal_num, frame):
_ = signal_num # Unused.
_ = frame # Unused.
self._screen_terminate()
print("\ntfdbg: caught SIGINT; calling sys.exit(1).", file=sys.stderr)
sys.exit(1)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2010 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Management class for Storage-related functions (attach, detach, etc).
"""
from nova import exception
from nova.openstack.common import log as logging
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
LOG = logging.getLogger(__name__)
class VolumeOps(object):
"""
Management class for Volume-related tasks
"""
def __init__(self, session):
self._session = session
def create_volume_for_sm(self, volume, sr_uuid):
LOG.debug("Creating volume for Storage Manager")
sm_vol_rec = {}
try:
sr_ref = self._session.call_xenapi("SR.get_by_uuid", sr_uuid)
except self._session.XenAPI.Failure, exc:
LOG.exception(exc)
raise volume_utils.StorageError(_('Unable to get SR using uuid'))
#Create VDI
label = 'vol-' + volume['id']
desc = 'xensm volume for ' + volume['id']
# size presented to xenapi is in bytes, while euca api is in GB
vdi_size = volume['size'] * 1024 * 1024 * 1024
vdi_ref = vm_utils.create_vdi(self._session, sr_ref,
None, label, desc,
vdi_size, False)
vdi_rec = self._session.call_xenapi("VDI.get_record", vdi_ref)
sm_vol_rec['vdi_uuid'] = vdi_rec['uuid']
return sm_vol_rec
def delete_volume_for_sm(self, vdi_uuid):
vdi_ref = self._session.call_xenapi("VDI.get_by_uuid", vdi_uuid)
if vdi_ref is None:
raise exception.NovaException(_('Could not find VDI ref'))
vm_utils.destroy_vdi(self._session, vdi_ref)
def create_sr(self, label, params):
LOG.debug(_("Creating SR %s") % label)
sr_ref = volume_utils.create_sr(self._session, label, params)
if sr_ref is None:
raise exception.NovaException(_('Could not create SR'))
sr_rec = self._session.call_xenapi("SR.get_record", sr_ref)
if sr_rec is None:
raise exception.NovaException(_('Could not retrieve SR record'))
return sr_rec['uuid']
# Checks if sr has already been introduced to this host
def introduce_sr(self, sr_uuid, label, params):
LOG.debug(_("Introducing SR %s") % label)
sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
if sr_ref:
LOG.debug(_('SR found in xapi database. No need to introduce'))
return sr_ref
sr_ref = volume_utils.introduce_sr(self._session, sr_uuid, label,
params)
if sr_ref is None:
raise exception.NovaException(_('Could not introduce SR'))
return sr_ref
def is_sr_on_host(self, sr_uuid):
LOG.debug(_('Checking for SR %s') % sr_uuid)
sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
if sr_ref:
return True
return False
# Checks if sr has been introduced
def forget_sr(self, sr_uuid):
sr_ref = volume_utils.find_sr_by_uuid(self._session, sr_uuid)
if sr_ref is None:
LOG.INFO(_('SR %s not found in the xapi database') % sr_uuid)
return
try:
volume_utils.forget_sr(self._session, sr_uuid)
except volume_utils.StorageError, exc:
LOG.exception(exc)
raise exception.NovaException(_('Could not forget SR'))
def attach_volume(self, connection_info, instance_name, mountpoint):
"""Attach volume storage to VM instance"""
# Before we start, check that the VM exists
vm_ref = vm_utils.lookup(self._session, instance_name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
# NOTE: No Resource Pool concept so far
LOG.debug(_("Attach_volume: %(connection_info)s, %(instance_name)s,"
" %(mountpoint)s") % locals())
driver_type = connection_info['driver_volume_type']
if driver_type not in ['iscsi', 'xensm']:
raise exception.VolumeDriverNotFound(driver_type=driver_type)
data = connection_info['data']
if 'name_label' not in data:
label = 'tempSR-%s' % data['volume_id']
else:
label = data['name_label']
del data['name_label']
if 'name_description' not in data:
desc = 'Disk-for:%s' % instance_name
else:
desc = data['name_description']
LOG.debug(connection_info)
sr_params = {}
if u'sr_uuid' not in data:
sr_params = volume_utils.parse_volume_info(connection_info,
mountpoint)
uuid = "FA15E-D15C-" + str(sr_params['id'])
sr_params['sr_type'] = 'iscsi'
else:
uuid = data['sr_uuid']
for k in data['introduce_sr_keys']:
sr_params[k] = data[k]
sr_params['name_description'] = desc
# Introduce SR
try:
sr_ref = self.introduce_sr(uuid, label, sr_params)
LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
except self._session.XenAPI.Failure, exc:
LOG.exception(exc)
raise volume_utils.StorageError(
_('Unable to introduce Storage Repository'))
vdi_uuid = None
target_lun = None
if 'vdi_uuid' in data:
vdi_uuid = data['vdi_uuid']
elif 'target_lun' in data:
target_lun = data['target_lun']
else:
vdi_uuid = None
# Introduce VDI and attach VBD to VM
try:
vdi_ref = volume_utils.introduce_vdi(self._session, sr_ref,
vdi_uuid, target_lun)
except volume_utils.StorageError, exc:
LOG.exception(exc)
self.forget_sr(uuid)
raise Exception(_('Unable to create VDI on SR %(sr_ref)s for'
' instance %(instance_name)s') % locals())
dev_number = volume_utils.mountpoint_to_number(mountpoint)
try:
vbd_ref = vm_utils.create_vbd(self._session, vm_ref, vdi_ref,
dev_number, bootable=False)
except self._session.XenAPI.Failure, exc:
LOG.exception(exc)
self.forget_sr(uuid)
raise Exception(_('Unable to use SR %(sr_ref)s for'
' instance %(instance_name)s') % locals())
try:
self._session.call_xenapi("VBD.plug", vbd_ref)
except self._session.XenAPI.Failure, exc:
LOG.exception(exc)
self.forget_sr(uuid)
raise Exception(_('Unable to attach volume to instance %s')
% instance_name)
LOG.info(_('Mountpoint %(mountpoint)s attached to'
' instance %(instance_name)s') % locals())
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach volume storage to VM instance"""
# Before we start, check that the VM exists
vm_ref = vm_utils.lookup(self._session, instance_name)
if vm_ref is None:
raise exception.InstanceNotFound(instance_id=instance_name)
# Detach VBD from VM
LOG.debug(_("Detach_volume: %(instance_name)s, %(mountpoint)s")
% locals())
device_number = volume_utils.mountpoint_to_number(mountpoint)
try:
vbd_ref = vm_utils.find_vbd_by_number(self._session, vm_ref,
device_number)
except volume_utils.StorageError, exc:
LOG.exception(exc)
raise Exception(_('Unable to locate volume %s') % mountpoint)
try:
sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
vm_utils.unplug_vbd(self._session, vbd_ref)
except volume_utils.StorageError, exc:
LOG.exception(exc)
raise Exception(_('Unable to detach volume %s') % mountpoint)
try:
vm_utils.destroy_vbd(self._session, vbd_ref)
except volume_utils.StorageError, exc:
LOG.exception(exc)
raise Exception(_('Unable to destroy vbd %s') % mountpoint)
# Forget SR only if no other volumes on this host are using it
try:
volume_utils.purge_sr(self._session, sr_ref)
except volume_utils.StorageError, exc:
LOG.exception(exc)
raise Exception(_('Error purging SR %s') % sr_ref)
LOG.info(_('Mountpoint %(mountpoint)s detached from'
' instance %(instance_name)s') % locals())
| |
# coding=utf-8
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Prepares and launches the emulator."""
import os
import subprocess
import time
from typing import Optional
from absl import logging
from android_env.components import errors
import grpc
from android_env.proto import emulator_controller_pb2
from android_env.proto import emulator_controller_pb2_grpc
from google.protobuf import empty_pb2
# Period in milliseconds to ping the Emulator gRPC server to keep the connection
# alive. If too frequent, we may get errors such as "Too many pings.", which can
# bring down the process.
_GRPC_KEEPALIVE_MS = 100000
class EmulatorLauncher():
"""Handles launching the emulator."""
def __init__(
self,
local_tmp_dir: str = '/tmp',
adb_port: Optional[int] = None,
adb_server_port: Optional[int] = None,
emulator_console_port: Optional[int] = None,
grpc_port: int = -1,
emulator_path: str = '',
android_sdk_root: str = '',
avd_name: str = '',
run_headless: bool = False,
kvm_device: str = '/dev/kvm',
gpu_mode: str = 'swiftshader_indirect',
android_avd_home: str = '',
startup_wait_time_sec: int = 300,
):
"""Installs required files locally and launches the emulator.
Args:
local_tmp_dir: Local directory for logs and maybe installing the AVD.
adb_port: ADB port for the Android device.
adb_server_port: Port of the ADB server deamon.
emulator_console_port: Port for telnet communication with the emulator.
grpc_port: Port for gRPC communication with the emulator.
emulator_path: Path to the emulator binary.
android_sdk_root: Root directory of the Android SDK.
avd_name: Name of the AVD.
run_headless: Whether to run in headless mode.
kvm_device: Path to the KVM device.
gpu_mode: GPU mode override. Supported values are listed at:
https://developer.android.com/studio/run/emulator-acceleration#accel-graphics
android_avd_home: Local directory for AVDs.
startup_wait_time_sec: Timeout for booting the emulator.
"""
self._local_tmp_dir = local_tmp_dir
self._adb_port = adb_port
self._adb_server_port = adb_server_port
self._emulator_console_port = emulator_console_port
self._emulator_path = emulator_path
self._android_sdk_root = android_sdk_root
self._avd_name = avd_name
self._run_headless = run_headless
self._kvm_device = kvm_device
self._gpu_mode = gpu_mode
self._android_avd_home = android_avd_home
self._startup_wait_time_sec = startup_wait_time_sec
self._grpc_port = grpc_port
self._emulator = None
self._emulator_output = None
self._emulator_stub = None
self._is_closed = False
def launch(self) -> None:
"""Launches the emulator."""
logging.info('Booting the emulator [%s]', self._emulator_path)
# Set necessary environment variables.
base_lib_dir = self._emulator_path[:-8] + 'lib64/'
ld_library_path = ':'.join([
base_lib_dir + 'x11/',
base_lib_dir + 'qt/lib/',
base_lib_dir + 'gles_swiftshader/',
base_lib_dir
])
extra_env_vars = {
'ANDROID_HOME': '',
'ANDROID_SDK_ROOT': self._android_sdk_root,
'ANDROID_AVD_HOME': self._android_avd_home,
'ANDROID_EMULATOR_KVM_DEVICE': self._kvm_device,
'ANDROID_ADB_SERVER_PORT': str(self._adb_server_port),
'LD_LIBRARY_PATH': ld_library_path,
'QT_DEBUG_PLUGINS': '1',
'QT_XKB_CONFIG_ROOT': str(self._emulator_path[:-8] + 'qt_config/'),
}
logging.info('extra_env_vars: %s', str(extra_env_vars))
env_vars = dict(os.environ).copy()
env_vars.update(extra_env_vars)
# Compile command.
grpc_port = ['-grpc', str(self._grpc_port)] if self._grpc_port >= 0 else []
run_headless = ['-no-skin', '-no-window'] if self._run_headless else []
ports = ['-ports', '%s,%s' % (self._emulator_console_port, self._adb_port)]
command = [
self._emulator_path,
'-no-snapshot',
'-gpu', self._gpu_mode,
'-no-audio',
'-verbose',
'-avd', self._avd_name,
] + grpc_port + run_headless + ports
logging.info('Emulator launch command: %s', ' '.join(command))
# Prepare logfile.
emulator_logfile = os.path.join(self._local_tmp_dir, 'emulator_output')
self._emulator_output = open(emulator_logfile, 'wb')
# Spawn the emulator process.
self._emulator = subprocess.Popen(
command,
env=env_vars,
stdout=self._emulator_output,
stderr=self._emulator_output)
self._emulator_stub = EmulatorLauncher.create_emulator_stub(self._grpc_port)
# Wait for the emulator to boot.
start_time = time.time()
deadline = start_time + self._startup_wait_time_sec
success = False
while time.time() < deadline:
emu_status = self._emulator_stub.getStatus(empty_pb2.Empty())
logging.info('Waiting for emulator to start. Emulator uptime: %rms',
emu_status.uptime)
if emu_status.booted:
success = True
break
time.sleep(5.0)
elapsed_time = time.time() - start_time
if not success:
raise errors.SimulatorCrashError(
'The emulator failed to boot after %r seconds' %
self._startup_wait_time_sec)
logging.info('Done booting the emulator (in %f seconds).', elapsed_time)
def restart(self) -> None:
logging.info('Restarting the emulator...')
self._kill_emulator_process()
self.launch()
logging.info('Done restarting the emulator.')
@classmethod
def create_emulator_stub(
cls,
grpc_port: int,
use_async: bool = False,
) -> emulator_controller_pb2_grpc.EmulatorControllerStub:
"""Returns a stub to the EmulatorController service."""
logging.info('Creating gRPC channel to the emulator on port %r', grpc_port)
port = f'localhost:{grpc_port}'
options = [('grpc.max_send_message_length', -1),
('grpc.max_receive_message_length', -1),
('grpc.keepalive_time_ms', _GRPC_KEEPALIVE_MS)]
creds = grpc.local_channel_credentials()
if use_async:
channel = grpc.aio.secure_channel(port, creds, options=options)
else:
channel = grpc.secure_channel(port, creds, options=options)
grpc.channel_ready_future(channel).result() # Wait for channel to be ready.
logging.info('Added gRPC channel for the Emulator on port %s', port)
return emulator_controller_pb2_grpc.EmulatorControllerStub(channel)
def get_emulator_stub(
self) -> emulator_controller_pb2_grpc.EmulatorControllerStub:
"""Returns the EmulatorController stub for the launched emulator."""
return self._emulator_stub
def _kill_emulator_process(self) -> None:
"""Shuts down the emulator process."""
if self._emulator:
logging.info('Killing the emulator process...')
self._emulator_stub.setVmState(
emulator_controller_pb2.VmRunState(
state=emulator_controller_pb2.VmRunState.RunState.SHUTDOWN))
logging.info('Will wait 30s for it to finish gracefully...')
try:
self._emulator.wait(timeout=30.0)
except subprocess.TimeoutExpired:
logging.exception(
'The emulator process did not finish after 30s. '
'returncode: %s. Will now try to kill() it.',
self._emulator.returncode)
self._emulator.kill()
self._emulator = None
self._emulator_output.close()
logging.info('Done killing the emulator process.')
def close(self):
"""Clean up launcher files and processes."""
if not self._is_closed:
self._kill_emulator_process()
self._is_closed = True
def __del__(self):
self.close()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import Dict, Optional, Sequence, Tuple, Union
from google.api_core.operation import Operation
from google.api_core.retry import Retry
from google.cloud.orchestration.airflow.service_v1 import EnvironmentsClient, ImageVersionsClient
from google.cloud.orchestration.airflow.service_v1.services.environments.pagers import ListEnvironmentsPager
from google.cloud.orchestration.airflow.service_v1.services.image_versions.pagers import (
ListImageVersionsPager,
)
from google.cloud.orchestration.airflow.service_v1.types import Environment
from google.protobuf.field_mask_pb2 import FieldMask
from airflow import AirflowException
from airflow.providers.google.common.consts import CLIENT_INFO
from airflow.providers.google.common.hooks.base_google import GoogleBaseHook
class CloudComposerHook(GoogleBaseHook):
"""Hook for Google Cloud Composer APIs."""
client_options = {'api_endpoint': 'composer.googleapis.com:443'}
def get_environment_client(self) -> EnvironmentsClient:
"""Retrieves client library object that allow access Environments service."""
return EnvironmentsClient(
credentials=self._get_credentials(),
client_info=CLIENT_INFO,
client_options=self.client_options,
)
def get_image_versions_client(
self,
) -> ImageVersionsClient:
"""Retrieves client library object that allow access Image Versions service."""
return ImageVersionsClient(
credentials=self._get_credentials(),
client_info=CLIENT_INFO,
client_options=self.client_options,
)
def wait_for_operation(self, operation: Operation, timeout: Optional[float] = None):
"""Waits for long-lasting operation to complete."""
try:
return operation.result(timeout=timeout)
except Exception:
error = operation.exception(timeout=timeout)
raise AirflowException(error)
def get_operation(self, operation_name):
return self.get_environment_client().transport.operations_client.get_operation(name=operation_name)
def get_environment_name(self, project_id, region, environment_id):
return f'projects/{project_id}/locations/{region}/environments/{environment_id}'
def get_parent(self, project_id, region):
return f'projects/{project_id}/locations/{region}'
@GoogleBaseHook.fallback_to_default_project_id
def create_environment(
self,
project_id: str,
region: str,
environment: Union[Environment, Dict],
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Operation:
"""
Create a new environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment: The environment to create. This corresponds to the ``environment`` field on the
``request`` instance; if ``request`` is provided, this should not be set.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_environment_client()
result = client.create_environment(
request={'parent': self.get_parent(project_id, region), 'environment': environment},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def delete_environment(
self,
project_id: str,
region: str,
environment_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Operation:
"""
Delete an environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment_id: Required. The ID of the Google Cloud environment that the service belongs to.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_environment_client()
name = self.get_environment_name(project_id, region, environment_id)
result = client.delete_environment(
request={"name": name}, retry=retry, timeout=timeout, metadata=metadata
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def get_environment(
self,
project_id: str,
region: str,
environment_id: str,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Environment:
"""
Get an existing environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment_id: Required. The ID of the Google Cloud environment that the service belongs to.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_environment_client()
result = client.get_environment(
request={'name': self.get_environment_name(project_id, region, environment_id)},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_environments(
self,
project_id: str,
region: str,
page_size: Optional[int] = None,
page_token: Optional[str] = None,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ListEnvironmentsPager:
"""
List environments.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param page_size: The maximum number of environments to return.
:param page_token: The next_page_token value returned from a previous List
request, if any.
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_environment_client()
result = client.list_environments(
request={
"parent": self.get_parent(project_id, region),
"page_size": page_size,
"page_token": page_token,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def update_environment(
self,
project_id: str,
region: str,
environment_id: str,
environment: Union[Environment, Dict],
update_mask: Union[Dict, FieldMask],
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> Operation:
r"""
Update an environment.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param environment_id: Required. The ID of the Google Cloud environment that the service belongs to.
:param environment: A patch environment. Fields specified by the ``updateMask`` will be copied from
the patch environment into the environment under update.
This corresponds to the ``environment`` field on the ``request`` instance; if ``request`` is
provided, this should not be set.
:param update_mask: Required. A comma-separated list of paths, relative to ``Environment``, of fields
to update. If a dict is provided, it must be of the same form as the protobuf message
:class:`~google.protobuf.field_mask_pb2.FieldMask`
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_environment_client()
name = self.get_environment_name(project_id, region, environment_id)
result = client.update_environment(
request={"name": name, "environment": environment, "update_mask": update_mask},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
@GoogleBaseHook.fallback_to_default_project_id
def list_image_versions(
self,
project_id: str,
region: str,
page_size: Optional[int] = None,
page_token: Optional[str] = None,
include_past_releases: bool = False,
retry: Optional[Retry] = None,
timeout: Optional[float] = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> ListImageVersionsPager:
"""
List ImageVersions for provided location.
:param project_id: Required. The ID of the Google Cloud project that the service belongs to.
:param region: Required. The ID of the Google Cloud region that the service belongs to.
:param page_size: The maximum number of environments to return.
:param page_token: The next_page_token value returned from a previous List
request, if any.
:param include_past_releases: Flag to include past releases
:param retry: Designation of what errors, if any, should be retried.
:param timeout: The timeout for this request.
:param metadata: Strings which should be sent along with the request as metadata.
"""
client = self.get_image_versions_client()
result = client.list_image_versions(
request={
'parent': self.get_parent(project_id, region),
"page_size": page_size,
"page_token": page_token,
"include_past_releases": include_past_releases,
},
retry=retry,
timeout=timeout,
metadata=metadata,
)
return result
| |
import logging
from typing import Dict
from unittest import mock
import pytest
from great_expectations import DataContext
from great_expectations.core.usage_statistics.schemas import (
anonymized_usage_statistics_record_schema,
)
from great_expectations.core.usage_statistics.usage_statistics import (
UsageStatisticsHandler,
get_profiler_run_usage_statistics,
)
from great_expectations.data_context import BaseDataContext
from great_expectations.data_context.types.base import DataContextConfig
from great_expectations.rule_based_profiler.rule_based_profiler import RuleBasedProfiler
from tests.core.usage_statistics.util import usage_stats_invalid_messages_exist
from tests.integration.usage_statistics.test_integration_usage_statistics import (
USAGE_STATISTICS_QA_URL,
)
@pytest.fixture
def in_memory_data_context_config_usage_stats_enabled():
return DataContextConfig(
**{
"commented_map": {},
"config_version": 2,
"plugins_directory": None,
"evaluation_parameter_store_name": "evaluation_parameter_store",
"validations_store_name": "validations_store",
"expectations_store_name": "expectations_store",
"config_variables_file_path": None,
"datasources": {},
"stores": {
"expectations_store": {
"class_name": "ExpectationsStore",
},
"validations_store": {
"class_name": "ValidationsStore",
},
"evaluation_parameter_store": {
"class_name": "EvaluationParameterStore",
},
},
"data_docs_sites": {},
"validation_operators": {
"default": {
"class_name": "ActionListValidationOperator",
"action_list": [],
}
},
"anonymous_usage_statistics": {
"enabled": True,
"data_context_id": "00000000-0000-0000-0000-000000000001",
"usage_statistics_url": USAGE_STATISTICS_QA_URL,
},
}
)
@pytest.fixture
def sample_partial_message():
return {
"event": "checkpoint.run",
"event_payload": {
"anonymized_name": "f563d9aa1604e16099bb7dff7b203319",
"config_version": 1.0,
"anonymized_expectation_suite_name": "6a04fc37da0d43a4c21429f6788d2cff",
"anonymized_action_list": [
{
"anonymized_name": "8e3e134cd0402c3970a02f40d2edfc26",
"parent_class": "StoreValidationResultAction",
},
{
"anonymized_name": "40e24f0c6b04b6d4657147990d6f39bd",
"parent_class": "StoreEvaluationParametersAction",
},
{
"anonymized_name": "2b99b6b280b8a6ad1176f37580a16411",
"parent_class": "UpdateDataDocsAction",
},
],
"anonymized_validations": [
{
"anonymized_batch_request": {
"anonymized_batch_request_required_top_level_properties": {
"anonymized_datasource_name": "a732a247720783a5931fa7c4606403c2",
"anonymized_data_connector_name": "d52d7bff3226a7f94dd3510c1040de78",
"anonymized_data_asset_name": "556e8c06239d09fc66f424eabb9ca491",
},
"batch_request_optional_top_level_keys": [
"batch_identifiers",
"runtime_parameters",
],
"runtime_parameters_keys": ["batch_data"],
},
"anonymized_expectation_suite_name": "6a04fc37da0d43a4c21429f6788d2cff",
"anonymized_action_list": [
{
"anonymized_name": "8e3e134cd0402c3970a02f40d2edfc26",
"parent_class": "StoreValidationResultAction",
},
{
"anonymized_name": "40e24f0c6b04b6d4657147990d6f39bd",
"parent_class": "StoreEvaluationParametersAction",
},
{
"anonymized_name": "2b99b6b280b8a6ad1176f37580a16411",
"parent_class": "UpdateDataDocsAction",
},
],
},
],
},
"success": True,
# "version": "1.0.0",
# "event_time": "2020-06-25T16:08:28.070Z",
# "event_duration": 123,
# "data_context_id": "00000000-0000-0000-0000-000000000002",
# "data_context_instance_id": "10000000-0000-0000-0000-000000000002",
# "ge_version": "0.13.45.manual_testing",
"x-forwarded-for": "00.000.00.000, 00.000.000.000",
}
def test_usage_statistics_handler_build_envelope(
in_memory_data_context_config_usage_stats_enabled, sample_partial_message
):
"""This test is for a happy path only but will fail if there is an exception thrown in build_envelope"""
context: BaseDataContext = BaseDataContext(
in_memory_data_context_config_usage_stats_enabled
)
usage_statistics_handler = UsageStatisticsHandler(
data_context=context,
data_context_id=in_memory_data_context_config_usage_stats_enabled.anonymous_usage_statistics.data_context_id,
usage_statistics_url=in_memory_data_context_config_usage_stats_enabled.anonymous_usage_statistics.usage_statistics_url,
)
assert (
usage_statistics_handler._data_context_id
== "00000000-0000-0000-0000-000000000001"
)
envelope = usage_statistics_handler.build_envelope(sample_partial_message)
required_keys = [
"event",
"event_payload",
"version",
"ge_version",
"data_context_id",
"data_context_instance_id",
"event_time",
]
assert all([key in envelope.keys() for key in required_keys])
assert envelope["version"] == "1.0.0"
assert envelope["data_context_id"] == "00000000-0000-0000-0000-000000000001"
def test_usage_statistics_handler_validate_message_failure(
caplog, in_memory_data_context_config_usage_stats_enabled, sample_partial_message
):
# caplog default is WARNING and above, we want to see DEBUG level messages for this test
caplog.set_level(
level=logging.DEBUG,
logger="great_expectations.core.usage_statistics.usage_statistics",
)
context: BaseDataContext = BaseDataContext(
in_memory_data_context_config_usage_stats_enabled
)
usage_statistics_handler = UsageStatisticsHandler(
data_context=context,
data_context_id=in_memory_data_context_config_usage_stats_enabled.anonymous_usage_statistics.data_context_id,
usage_statistics_url=in_memory_data_context_config_usage_stats_enabled.anonymous_usage_statistics.usage_statistics_url,
)
assert (
usage_statistics_handler._data_context_id
== "00000000-0000-0000-0000-000000000001"
)
validated_message = usage_statistics_handler.validate_message(
sample_partial_message, anonymized_usage_statistics_record_schema
)
assert not validated_message
assert usage_stats_invalid_messages_exist(caplog.messages)
def test_usage_statistics_handler_validate_message_success(
caplog, in_memory_data_context_config_usage_stats_enabled, sample_partial_message
):
# caplog default is WARNING and above, we want to see DEBUG level messages for this test
caplog.set_level(
level=logging.DEBUG,
logger="great_expectations.core.usage_statistics.usage_statistics",
)
context: BaseDataContext = BaseDataContext(
in_memory_data_context_config_usage_stats_enabled
)
usage_statistics_handler = UsageStatisticsHandler(
data_context=context,
data_context_id=in_memory_data_context_config_usage_stats_enabled.anonymous_usage_statistics.data_context_id,
usage_statistics_url=in_memory_data_context_config_usage_stats_enabled.anonymous_usage_statistics.usage_statistics_url,
)
assert (
usage_statistics_handler._data_context_id
== "00000000-0000-0000-0000-000000000001"
)
envelope = usage_statistics_handler.build_envelope(sample_partial_message)
validated_message = usage_statistics_handler.validate_message(
envelope, anonymized_usage_statistics_record_schema
)
assert validated_message
assert not usage_stats_invalid_messages_exist(caplog.messages)
def test_build_init_payload(
titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled,
):
"""This test is for a happy path only but will fail if there is an exception thrown in init_payload"""
context: DataContext = titanic_pandas_data_context_with_v013_datasource_with_checkpoints_v1_with_empty_store_stats_enabled
usage_statistics_handler = context._usage_statistics_handler
init_payload = usage_statistics_handler.build_init_payload()
assert list(init_payload.keys()) == [
"platform.system",
"platform.release",
"version_info",
"anonymized_datasources",
"anonymized_stores",
"anonymized_validation_operators",
"anonymized_data_docs_sites",
"anonymized_expectation_suites",
]
assert init_payload["anonymized_datasources"] == [
{
"anonymized_data_connectors": [
{
"anonymized_name": "af09acd176f54642635a8a2975305437",
"parent_class": "InferredAssetFilesystemDataConnector",
},
{
"anonymized_name": "e475f70ca0bcbaf2748b93da5e9867ec",
"parent_class": "ConfiguredAssetFilesystemDataConnector",
},
{
"anonymized_name": "2030a96b1eaa8579087d31709fb6ec1b",
"parent_class": "ConfiguredAssetFilesystemDataConnector",
},
{
"anonymized_name": "d52d7bff3226a7f94dd3510c1040de78",
"parent_class": "RuntimeDataConnector",
},
],
"anonymized_execution_engine": {
"anonymized_name": "212039ff9860a796a32c75c7d5c2fac0",
"parent_class": "PandasExecutionEngine",
},
"anonymized_name": "a732a247720783a5931fa7c4606403c2",
"parent_class": "Datasource",
}
]
assert init_payload["anonymized_expectation_suites"] == []
@mock.patch("great_expectations.data_context.data_context.DataContext")
def test_get_profiler_run_usage_statistics_with_handler_valid_payload(
mock_data_context: mock.MagicMock,
):
# Ensure that real handler gets passed down by the context
handler: UsageStatisticsHandler = UsageStatisticsHandler(
mock_data_context, "my_id", "my_url"
)
mock_data_context.usage_statistics_handler = handler
profiler: RuleBasedProfiler = RuleBasedProfiler(
name="my_profiler", config_version=1.0, data_context=mock_data_context
)
override_rules: Dict[str, dict] = {
"my_override_rule": {
"domain_builder": {
"class_name": "ColumnDomainBuilder",
"module_name": "great_expectations.rule_based_profiler.domain_builder",
},
"parameter_builders": [
{
"class_name": "MetricMultiBatchParameterBuilder",
"module_name": "great_expectations.rule_based_profiler.parameter_builder",
"name": "my_parameter",
"metric_name": "my_metric",
},
{
"class_name": "NumericMetricRangeMultiBatchParameterBuilder",
"module_name": "great_expectations.rule_based_profiler.parameter_builder",
"name": "my_other_parameter",
"metric_name": "my_other_metric",
},
],
"expectation_configuration_builders": [
{
"class_name": "DefaultExpectationConfigurationBuilder",
"module_name": "great_expectations.rule_based_profiler.expectation_configuration_builder",
"expectation_type": "expect_column_pair_values_A_to_be_greater_than_B",
"column_A": "$domain.domain_kwargs.column_A",
"column_B": "$domain.domain_kwargs.column_B",
"my_one_arg": "$parameter.my_parameter.value[0]",
"meta": {
"details": {
"my_parameter_estimator": "$parameter.my_parameter.details",
"note": "Important remarks about estimation algorithm.",
},
},
},
{
"class_name": "DefaultExpectationConfigurationBuilder",
"module_name": "great_expectations.rule_based_profiler.expectation_configuration_builder",
"expectation_type": "expect_column_min_to_be_between",
"column": "$domain.domain_kwargs.column",
"my_another_arg": "$parameter.my_other_parameter.value[0]",
"meta": {
"details": {
"my_other_parameter_estimator": "$parameter.my_other_parameter.details",
"note": "Important remarks about estimation algorithm.",
},
},
},
],
},
}
payload: dict = get_profiler_run_usage_statistics(
profiler=profiler, rules=override_rules
)
assert payload == {
"anonymized_name": "a0061ec021855cd2b3a994dd8d90fe5d",
"anonymized_rules": [
{
"anonymized_domain_builder": {"parent_class": "ColumnDomainBuilder"},
"anonymized_expectation_configuration_builders": [
{
"expectation_type": "expect_column_pair_values_A_to_be_greater_than_B",
"parent_class": "DefaultExpectationConfigurationBuilder",
},
{
"expectation_type": "expect_column_min_to_be_between",
"parent_class": "DefaultExpectationConfigurationBuilder",
},
],
"anonymized_name": "bd8a8b4465a94b363caf2b307c080547",
"anonymized_parameter_builders": [
{
"anonymized_name": "25dac9e56a1969727bc0f90db6eaa833",
"parent_class": "MetricMultiBatchParameterBuilder",
},
{
"anonymized_name": "be5baa3f1064e6e19356f2168968cbeb",
"parent_class": "NumericMetricRangeMultiBatchParameterBuilder",
},
],
}
],
"config_version": 1.0,
"rule_count": 1,
"variable_count": 0,
}
@mock.patch("great_expectations.data_context.data_context.DataContext")
def test_get_profiler_run_usage_statistics_with_handler_invalid_payload(
mock_data_context: mock.MagicMock,
):
# Ensure that real handler gets passed down by the context
handler: UsageStatisticsHandler = UsageStatisticsHandler(
mock_data_context, "my_id", "my_url"
)
mock_data_context.usage_statistics_handler = handler
profiler: RuleBasedProfiler = RuleBasedProfiler(
name="my_profiler", config_version=1.0, data_context=mock_data_context
)
payload: dict = get_profiler_run_usage_statistics(profiler=profiler)
# Payload won't pass schema validation due to a lack of rules but we can confirm that it is anonymized
assert payload == {
"anonymized_name": "a0061ec021855cd2b3a994dd8d90fe5d",
"config_version": 1.0,
"rule_count": 0,
"variable_count": 0,
}
def test_get_profiler_run_usage_statistics_without_handler():
# Without a DataContext, the usage stats handler is not propogated down to the RBP
profiler: RuleBasedProfiler = RuleBasedProfiler(
name="my_profiler",
config_version=1.0,
)
payload: dict = get_profiler_run_usage_statistics(profiler=profiler)
assert payload == {}
| |
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Data Commons Python API unit tests.
Unit tests for Place methods in the Data Commons Python API.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
try:
from unittest.mock import patch
except ImportError:
from mock import patch
import datacommons as dc
import datacommons.utils as utils
import json
import unittest
import six.moves.urllib as urllib
def request_mock(*args, **kwargs):
""" A mock urlopen requests sent in the requests package. """
# Create the mock response object.
class MockResponse:
def __init__(self, json_data):
self.json_data = json_data
def read(self):
return self.json_data
req = args[0]
data = json.loads(req.data)
# Mock responses for urlopen requests to get_places_in.
if req.get_full_url() == utils._API_ROOT + utils._API_ENDPOINTS['get_places_in']:
if (data['dcids'] == ['geoId/06085', 'geoId/24031']
and data['place_type'] == 'City'):
# Response returned when querying for multiple valid dcids.
res_json = json.dumps([
{
'dcid': 'geoId/06085',
'place': 'geoId/0649670',
},
{
'dcid': 'geoId/24031',
'place': 'geoId/2467675',
},
{
'dcid': 'geoId/24031',
'place': 'geoId/2476650',
},
])
return MockResponse(json.dumps({'payload': res_json}))
if (data['dcids'] == ['geoId/06085', 'dc/MadDcid']
and data['place_type'] == 'City'):
# Response returned when querying for a dcid that does not exist.
res_json = json.dumps([
{
'dcid': 'geoId/06085',
'place': 'geoId/0649670',
},
])
return MockResponse(json.dumps({'payload': res_json}))
if data['dcids'] == ['dc/MadDcid', 'dc/MadderDcid']\
and data['place_type'] == 'City':
# Response returned when both given dcids do not exist.
res_json = json.dumps([])
return MockResponse(json.dumps({'payload': res_json}))
if data['dcids'] == [] and data['place_type'] == 'City':
res_json = json.dumps([])
# Response returned when no dcids are given.
return MockResponse(json.dumps({'payload': res_json}))
# Mock responses for urlopen requests to get_stats.
if req.get_full_url() == utils._API_ROOT + utils._API_ENDPOINTS['get_stats']:
if (data['place'] == ['geoId/05', 'geoId/06'] and
data['stats_var'] == 'dc/0hyp6tkn18vcb'):
# Response returned when querying for multiple valid dcids.
res_json = json.dumps({
'geoId/05': {
'data': {
'2011': 18136,
'2012': 17279,
'2013': 17459,
'2014': 16966,
'2015': 17173,
'2016': 17041,
'2017': 17783,
'2018': 18003
},
'place_name': 'Arkansas'
},
'geoId/06': {
'data': {
'2011': 316667,
'2012': 324116,
'2013': 331853,
'2014': 342818,
'2015': 348979,
'2016': 354806,
'2017': 360645,
'2018': 366331
},
'place_name': 'California'
}
})
return MockResponse(json.dumps({'payload': res_json}))
if (data['place'] == ['geoId/00'] and
data['stats_var'] == 'dc/0hyp6tkn18vcb'):
# No data for the request
res_json = json.dumps({
'geoId/00': None
})
return MockResponse(json.dumps({'payload': res_json}))
if ((data['place'] == ['geoId/05', 'dc/MadDcid'] or
data['place'] == ['geoId/05']) and
data['stats_var'] == 'dc/0hyp6tkn18vcb'):
# Response ignores dcid that does not exist.
res_json = json.dumps({
'geoId/05': {
'data': {
'2011': 18136,
'2012': 17279,
'2013': 17459,
'2014': 16966,
'2015': 17173,
'2016': 17041,
'2017': 17783,
'2018': 18003
},
'place_name': 'Arkansas'
}
})
return MockResponse(json.dumps({'payload': res_json}))
if (data['place'] == ['geoId/06'] and
data['stats_var'] == 'dc/0hyp6tkn18vcb'):
res_json = json.dumps({
'geoId/06': {
'data': {
'2011': 316667,
'2012': 324116,
'2013': 331853,
'2014': 342818,
'2015': 348979,
'2016': 354806,
'2017': 360645,
'2018': 366331
},
'place_name': 'California'
}
})
return MockResponse(json.dumps({'payload': res_json}))
if (data['place'] == ['dc/MadDcid', 'dc/MadderDcid'] and
data['stats_var'] == 'dc/0hyp6tkn18vcb'):
# Response returned when both given dcids do not exist.
res_json = json.dumps({})
return MockResponse(json.dumps({'payload': res_json}))
if data['place'] == [] and data['stats_var'] == 'dc/0hyp6tkn18vcb':
res_json = json.dumps({})
# Response returned when no dcids are given.
return MockResponse(json.dumps({'payload': res_json}))
if (data['place'] == ['geoId/48'] and
data['stats_var'] == 'dc/0hyp6tkn18vcb'):
if (data.get('measurement_method') == 'MM1' and
data.get('unit') == 'Inch' and
data.get('observation_period') == 'P1Y'):
res_json = json.dumps({
'geoId/48': {
'data': {
'2015': 1,
'2016': 1,
},
'place_name': 'Texas'
}
})
elif data.get('measurement_method') == 'MM1':
res_json = json.dumps({
'geoId/48': {
'data': {
'2015': 2,
'2016': 2,
},
'place_name': 'Texas'
}
})
else:
res_json = json.dumps({
'geoId/48': {
'data': {
'2015': 3,
'2016': 3,
},
'place_name': 'Texas'
}
})
return MockResponse(json.dumps({'payload': res_json}))
# Otherwise, return an empty response and a 404.
return urllib.error.HTTPError(None, 404, None, None, None)
class TestGetPlacesIn(unittest.TestCase):
""" Unit stests for get_places_in. """
@patch('six.moves.urllib.request.urlopen', side_effect=request_mock)
def test_multiple_dcids(self, urlopen):
""" Calling get_places_in with proper dcids returns valid results. """
# Call get_places_in
places = dc.get_places_in(['geoId/06085', 'geoId/24031'], 'City')
self.assertDictEqual(places, {
'geoId/06085': ['geoId/0649670'],
'geoId/24031': ['geoId/2467675', 'geoId/2476650']
})
@patch('six.moves.urllib.request.urlopen', side_effect=request_mock)
def test_bad_dcids(self, urlopen):
""" Calling get_places_in with dcids that do not exist returns empty
results.
"""
# Call get_places_in with one dcid that does not exist
bad_dcids_1 = dc.get_places_in(['geoId/06085', 'dc/MadDcid'], 'City')
self.assertDictEqual(bad_dcids_1, {
'geoId/06085': ['geoId/0649670'],
'dc/MadDcid': []
})
# Call get_places_in when both dcids do not exist
bad_dcids_2 = dc.get_places_in(['dc/MadDcid', 'dc/MadderDcid'], 'City')
self.assertDictEqual(bad_dcids_2, {
'dc/MadDcid': [],
'dc/MadderDcid': []
})
@patch('six.moves.urllib.request.urlopen', side_effect=request_mock)
def test_no_dcids(self, urlopen):
""" Calling get_places_in with no dcids returns empty results. """
# Call get_places_in with no dcids.
bad_dcids = dc.get_places_in(['dc/MadDcid', 'dc/MadderDcid'], 'City')
self.assertDictEqual(bad_dcids, {
'dc/MadDcid': [],
'dc/MadderDcid': []
})
class TestGetStats(unittest.TestCase):
""" Unit stests for get_stats. """
@patch('six.moves.urllib.request.urlopen', side_effect=request_mock)
def test_multiple_dcids(self, urlopen):
""" Calling get_stats with proper dcids returns valid results. """
# Call get_stats
stats = dc.get_stats(['geoId/05', 'geoId/06'], 'dc/0hyp6tkn18vcb', 'all')
self.assertDictEqual(
stats, {
'geoId/05': {
'data': {
'2011': 18136,
'2012': 17279,
'2013': 17459,
'2014': 16966,
'2015': 17173,
'2016': 17041,
'2017': 17783,
'2018': 18003
},
'place_name': 'Arkansas'
},
'geoId/06': {
'data': {
'2011': 316667,
'2012': 324116,
'2013': 331853,
'2014': 342818,
'2015': 348979,
'2016': 354806,
'2017': 360645,
'2018': 366331
},
'place_name': 'California'
}
})
# Call get_stats for latest obs
stats = dc.get_stats(['geoId/05', 'geoId/06'], 'dc/0hyp6tkn18vcb', 'latest')
self.assertDictEqual(
stats, {
'geoId/05': {
'data': {
'2018': 18003
},
'place_name': 'Arkansas'
},
'geoId/06': {
'data': {
'2018': 366331
},
'place_name': 'California'
}
})
# Call get_stats for specific obs
stats = dc.get_stats(['geoId/05', 'geoId/06'], 'dc/0hyp6tkn18vcb', ['2013', '2018'])
self.assertDictEqual(
stats, {
'geoId/05': {
'data': {
'2013': 17459,
'2018': 18003
},
'place_name': 'Arkansas'
},
'geoId/06': {
'data': {
'2013': 331853,
'2018': 366331
},
'place_name': 'California'
}
})
# Call get_stats -- dates must be in interable
stats = dc.get_stats(['geoId/05', 'geoId/06'], 'dc/0hyp6tkn18vcb', '2018')
self.assertDictEqual(
stats, {
'geoId/05': {
'data': {
},
'place_name': 'Arkansas'
},
'geoId/06': {
'data': {
},
'place_name': 'California'
}
})
@patch('six.moves.urllib.request.urlopen', side_effect=request_mock)
def test_opt_args(self, urlopen):
""" Calling get_stats with mmethod, unit, and obs period returns specific data.
"""
# Set the API key
dc.set_api_key('TEST-API-KEY')
# Call get_stats with all optional args
stats = dc.get_stats(['geoId/48'], 'dc/0hyp6tkn18vcb', 'latest', 'MM1',
'Inch', 'P1Y')
self.assertDictEqual(
stats, {
'geoId/48': {
'data': {
'2016': 1
},
'place_name': 'Texas'
}
})
# Call get_stats with mmethod specified
stats = dc.get_stats(['geoId/48'], 'dc/0hyp6tkn18vcb', 'latest', 'MM1')
self.assertDictEqual(
stats, {
'geoId/48': {
'data': {
'2016': 2
},
'place_name': 'Texas'
}
})
# Call get_stats without optional args
stats = dc.get_stats(['geoId/48'], 'dc/0hyp6tkn18vcb', 'latest')
self.assertDictEqual(
stats, {
'geoId/48': {
'data': {
'2016': 3
},
'place_name': 'Texas'
}
})
@patch('six.moves.urllib.request.urlopen', side_effect=request_mock)
def test_bad_dcids(self, urlopen):
""" Calling get_stats with dcids that do not exist returns empty
results.
"""
# Call get_stats with one dcid that does not exist
bad_dcids_1 = dc.get_stats(['geoId/05', 'dc/MadDcid'], 'dc/0hyp6tkn18vcb')
self.assertDictEqual(
bad_dcids_1, {
'geoId/05': {
'data': {
'2018': 18003
},
'place_name': 'Arkansas'
}
})
# Call get_stats when both dcids do not exist
bad_dcids_2 = dc.get_stats(['dc/MadDcid', 'dc/MadderDcid'],
'dc/0hyp6tkn18vcb')
self.assertDictEqual({}, bad_dcids_2)
@patch('six.moves.urllib.request.urlopen', side_effect=request_mock)
def test_no_dcids(self, urlopen):
""" Calling get_stats with no dcids returns empty results. """
# Call get_stats with no dcids.
no_dcids = dc.get_stats([], 'dc/0hyp6tkn18vcb')
self.assertDictEqual({}, no_dcids)
@patch('six.moves.urllib.request.urlopen', side_effect=request_mock)
def test_no_data(self, urlopen):
""" Calling get_stats with for None data. """
# Call get_stats with no dcids.
result = dc.get_stats(['geoId/00'], 'dc/0hyp6tkn18vcb')
self.assertDictEqual({}, result)
@patch('six.moves.urllib.request.urlopen', side_effect=request_mock)
def test_batch_request(self, mock_urlopen):
""" Make multiple calls to REST API when number of geos exceeds the batch size. """
save_batch_size = dc.utils._QUERY_BATCH_SIZE
dc.utils._QUERY_BATCH_SIZE = 1
self.assertEqual(0, mock_urlopen.call_count)
stats = dc.get_stats(['geoId/05'], 'dc/0hyp6tkn18vcb', 'latest')
self.assertDictEqual(
stats, {
'geoId/05': {
'data': {
'2018': 18003
},
'place_name': 'Arkansas'
},
})
self.assertEqual(1, mock_urlopen.call_count)
stats = dc.get_stats(['geoId/05', 'geoId/06'], 'dc/0hyp6tkn18vcb', 'latest')
self.assertDictEqual(
stats, {
'geoId/05': {
'data': {
'2018': 18003
},
'place_name': 'Arkansas'
},
'geoId/06': {
'data': {
'2018': 366331
},
'place_name': 'California'
}
})
self.assertEqual(3, mock_urlopen.call_count)
dc.utils._QUERY_BATCH_SIZE = save_batch_size
if __name__ == '__main__':
unittest.main()
| |
"""
This module processes images and makes them suitable to use as neural network inputs.
"""
from __future__ import division
import numpy as np
try:
from scipy.misc import imresize
has_scipy = True
except ImportError:
has_scipy = False
from ..util import misc
from ..util import netlog
log = netlog.setup_logging("image_process", level="INFO")
def rescale_clip(img, new_shape):
pass
def decorrelate_and_whiten(minibatch):
"""Takes minibatch of images (with batch index first), and zero-means, decorrelates, and
whitens pixels.
http://www.slideshare.net/roelofp/python-for-image-understanding-deep-learning-with-convolutional-neural-nets (slide 39)"""
input_shape = minibatch.shape
flat_batch = minibatch.reshape((input_shape[0], -1))
flat_batch = flat_batch - np.mean(flat_batch, axis=0) # Creating a zero-meaned copy.
cov = np.dot(flat_batch.T, flat_batch) / flat_batch.shape[0]
U, S, V = np.linalg.svd(cov)
batch_rot = np.dot(flat_batch, U)
batch_white = batch_rot / np.sqrt(S + 1e-5)
return batch_white.reshape(input_shape)
def find_edges(img, axis, cutoff=0.01):
"""Take an image as input and return the edges of the lighted region.
Determine "black" by finding regions with less than `cutoff` times the row or column sum.
"""
# First sum along all dimensions but the one we're interested in.
axis_index = list(range(img.ndim))
axis_index.pop(axis)
axis_sum = np.sum(img, axis=tuple(axis_index))
# Now find the minima on the high and low side.
min_low = np.argmin(axis_sum[: len(axis_sum) // 2])
min_high = np.argmin(axis_sum[len(axis_sum) // 2:]) + len(axis_sum) // 2
axis_sum = axis_sum[min_low: min_high]
# Finally, find the edges of the illuminated region.
edges = np.where(axis_sum > cutoff * axis_sum.max())[0][np.array([0, -1])] + min_low
return edges
def trim_black(img, cutoff=0.01, axis=(0, 1)):
"""Take an image as input and return an image trimmed so that black regions to the left and
right and to the top and bottom have been removed. Determine "black" by finding regions
with less than `cutoff` times the row or column sum.
"""
axis = misc.as_list(axis)
for this_axis in axis:
edges = find_edges(img, axis=this_axis, cutoff=cutoff)
this_slice = img.ndim * [slice(None)]
this_slice[this_axis] = slice(*edges)
img = img[this_slice]
# column_edges = find_edges(img, axis=1, cutoff=cutoff)
# width_slice = slice(*column_edges)
#
# row_edges = find_edges(img, axis=0, cutoff=cutoff)
# height_slice = slice(*row_edges)
return img #[height_slice, width_slice, ...]
def process_images(plk_images, new_scale=32, means=None, stds=None):
"""Prepares images for training. Output training images will have the following transforms:
- Invert scale (background is 0)
- Rescale to float in [0, 1]
- Rescale so that the largest dimension is `new_scale` pixels. Center the smaller
dimension and zero-pad the edges.
**Parameters**
* `plk_images` <list of arrays>: List of arrays, or a 2D array. Each array (or row of a
2D array) is a greyscale plankton image. The input list will be unchanged.
**Optional Parameters**
* `new_scale` <int>: Output images will be (new_scale, new_scale) arrays.
**Returns**
A modified list of arrays of the same length as `plk_images`.
The list has the same length and order as `plk_images`.
"""
try:
from skimage import transform
except ImportError:
print("This function uses the `transform` function from scikit-image.")
raise
output_images = []
for image in plk_images:
# Invert image and adjust the range of values
img = (255 - image) / 255
# Rescale the image.
img = transform.rescale(img, new_scale / max(img.shape), order=3, mode="constant", cval=0)
new_image = np.zeros((new_scale, new_scale))
if img.shape[0] <= img.shape[1]:
pdiff = int((new_image.shape[0] - img.shape[0]) / 2)
new_image[pdiff: pdiff + img.shape[0], :img.shape[1]] = img
else:
pdiff = int((new_image.shape[1] - img.shape[1]) / 2)
new_image[:img.shape[0], pdiff: pdiff + img.shape[1]] = img
output_images.append(new_image.flatten())
imgs = np.asarray(output_images)
# Zero-mean and normalize:
image_means = np.mean(imgs, axis=0) if means is None else means
image_std = 1e-7 + np.std(imgs, axis=0) if stds is None else stds # Regularize to avoid dividing by zero
output_images = (imgs - image_means) / image_std
return output_images, image_means, image_std
def create_data_shifts(input_x, input_y, scale):
shift_x, shift_y = [], []
for img, label in zip(input_x, input_y):
img = 255 - img
imgshifts = shift_image(img, scale)
for img_ in imgshifts:
shift_x.append(img_)
shift_y.append(label)
shift_x = np.asarray(shift_x)
shift_y = np.asarray(shift_y)
image_means = np.mean(shift_x, axis=0)
image_std = 1e-7 + np.std(shift_x, axis=0) # Regularize to avoid dividing by zero
shift_x = (shift_x - image_means) / image_std
return shift_x, shift_y, image_means, image_std
class ZCA:
""" Copied from https://gist.github.com/duschendestroyer/5170087
which itself was based off
http://ufldl.stanford.edu/wiki/index.php/Implementing_PCA/Whitening
"""
def __init__(self, regularization=10**-5, copy=False):
self.regularization = regularization
self.copy = copy
def fit(self, X, y=None):
X = np.asarray(X)
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
sigma = np.dot(X.T, X) / X.shape[1]
U, S, V = np.linalg.svd(sigma)
tmp = np.dot(U, np.diag(1 / np.sqrt(S + self.regularization)))
self.components_ = np.dot(tmp, U.T)
return self
def transform(self, X):
X = np.asarray(X)
X_transformed = X - self.mean_
X_transformed = np.dot(X_transformed, self.components_.T)
return X_transformed
def shift_image(img, scale):
""" Creates 3 images from an input image. The image
is scaled so that the min side is equal to the scale.
After scaling, the image is shifted cropped along
the max side so that it equals the scale in three
ways in order to get 3 different "shifts" of the
image.
"""
if not has_scipy:
raise ImportError("This function requires scipy.")
if img.shape[0] > img.shape[1]:
img = np.rot90(img)
# Calculate scaling and shift numbers.
scale_pct = float(scale) / img.shape[0]
max_side_size = int(np.round(img.shape[1] * scale_pct))
diff = max_side_size - scale
half_diff_floor = int(np.floor(diff/2.0))
half_diff_ceil = int(np.ceil(diff/2.0))
img_ = imresize(img, (scale, max_side_size))
if img_.shape[0] == img_.shape[1]:
# Add some zero padding so we can get at least a small jitter on this image.
npad = 3
padded_img = np.zeros((img_.shape[0], img_.shape[1] + 2 * npad))
padded_img[:, npad:img_.shape[1] + npad] = img_
img_ = padded_img
diff = 2 * npad
max_side_size = img_.shape[1]
half_diff_ceil, half_diff_floor = npad, npad
# Quality check the scaled image.
assert img_.shape[0] == scale
assert img_.shape[0] < img_.shape[1]
# Create shifts.
img_a = img_[:, diff:max_side_size]
img_b = img_[:, :max_side_size - diff]
img_c = img_[:, half_diff_floor:max_side_size - half_diff_ceil]
flat_images = [x.flatten() for x in [img_a, img_b, img_c]]
assert all([len(img) == scale * scale for img in flat_images])
return flat_images
| |
# Copyright (c) 2015 FUJITSU LIMITED
# Copyright (c) 2012 EMC Corporation.
# Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
iSCSI Cinder Volume driver for Fujitsu ETERNUS DX S3 series.
"""
import six
from cinder import interface
from cinder.volume import driver
from cinder.volume.drivers.fujitsu import eternus_dx_common
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
@interface.volumedriver
class FJDXISCSIDriver(driver.ISCSIDriver):
"""iSCSI Cinder Volume Driver for Fujitsu ETERNUS DX S3 series."""
# ThirdPartySystems wiki page
CI_WIKI_NAME = "Fujitsu_ETERNUS_CI"
VERSION = eternus_dx_common.FJDXCommon.VERSION
def __init__(self, *args, **kwargs):
super(FJDXISCSIDriver, self).__init__(*args, **kwargs)
self.common = eternus_dx_common.FJDXCommon(
'iSCSI',
configuration=self.configuration)
self.VERSION = self.common.VERSION
def check_for_setup_error(self):
if not self.common.pywbemAvailable:
LOG.error('pywbem could not be imported! '
'pywbem is necessary for this volume driver.')
return
def create_volume(self, volume):
"""Create volume."""
LOG.info('create_volume, volume id: %s, Enter method.', volume['id'])
element_path, metadata = self.common.create_volume(volume)
v_metadata = volume.get('volume_metadata')
if v_metadata:
for data in v_metadata:
metadata[data['key']] = data['value']
else:
v_metadata = volume.get('metadata', {})
metadata.update(v_metadata)
LOG.info('create_volume, info: %s, Exit method.', metadata)
return {'provider_location': six.text_type(element_path),
'metadata': metadata}
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
LOG.info('create_volume_from_snapshot, '
'volume id: %(vid)s, snap id: %(sid)s, Enter method.',
{'vid': volume['id'], 'sid': snapshot['id']})
element_path, metadata = (
self.common.create_volume_from_snapshot(volume, snapshot))
v_metadata = volume.get('volume_metadata')
if v_metadata:
for data in v_metadata:
metadata[data['key']] = data['value']
else:
v_metadata = volume.get('metadata', {})
metadata.update(v_metadata)
LOG.info('create_volume_from_snapshot, '
'info: %s, Exit method.', metadata)
return {'provider_location': six.text_type(element_path),
'metadata': metadata}
def create_cloned_volume(self, volume, src_vref):
"""Create cloned volume."""
LOG.info('create_cloned_volume, '
'target volume id: %(tid)s, '
'source volume id: %(sid)s, Enter method.',
{'tid': volume['id'], 'sid': src_vref['id']})
element_path, metadata = (
self.common.create_cloned_volume(volume, src_vref))
v_metadata = volume.get('volume_metadata')
if v_metadata:
for data in v_metadata:
metadata[data['key']] = data['value']
else:
v_metadata = volume.get('metadata', {})
metadata.update(v_metadata)
LOG.info('create_cloned_volume, info: %s, Exit method.', metadata)
return {'provider_location': six.text_type(element_path),
'metadata': metadata}
def delete_volume(self, volume):
"""Delete volume on ETERNUS."""
LOG.info('delete_volume, volume id: %s, Enter method.', volume['id'])
vol_exist = self.common.delete_volume(volume)
LOG.info('delete_volume, delete: %s, Exit method.', vol_exist)
return
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
LOG.info('create_snapshot, snap id: %(sid)s, volume id: %(vid)s, '
'Enter method.',
{'sid': snapshot['id'], 'vid': snapshot['volume_id']})
element_path, metadata = self.common.create_snapshot(snapshot)
LOG.info('create_snapshot, info: %s, Exit method.', metadata)
return {'provider_location': six.text_type(element_path)}
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
LOG.info('delete_snapshot, snap id: %(sid)s, volume id: %(vid)s, '
'Enter method.',
{'sid': snapshot['id'], 'vid': snapshot['volume_id']})
vol_exist = self.common.delete_snapshot(snapshot)
LOG.info('delete_snapshot, delete: %s, Exit method.', vol_exist)
return
def ensure_export(self, context, volume):
"""Driver entry point to get the export info for an existing volume."""
return
def create_export(self, context, volume, connector):
"""Driver entry point to get the export info for a new volume."""
return
def remove_export(self, context, volume):
"""Driver entry point to remove an export for a volume."""
return
def initialize_connection(self, volume, connector):
"""Allow connection to connector and return connection info."""
LOG.info('initialize_connection, volume id: %(vid)s, '
'initiator: %(initiator)s, Enter method.',
{'vid': volume['id'], 'initiator': connector['initiator']})
info = self.common.initialize_connection(volume, connector)
LOG.info('initialize_connection, info: %s, Exit method.', info)
return info
def terminate_connection(self, volume, connector, **kwargs):
"""Disallow connection from connector."""
initiator = connector.get('initiator') if connector else None
LOG.info('terminate_connection, volume id: %(vid)s, '
'initiator: %(initiator)s, Enter method.',
{'vid': volume['id'], 'initiator': initiator})
map_exist = self.common.terminate_connection(volume, connector)
LOG.info('terminate_connection, unmap: %s, Exit method.', map_exist)
return
def get_volume_stats(self, refresh=False):
"""Get volume stats."""
LOG.debug('get_volume_stats, refresh: %s, Enter method.', refresh)
pool_name = None
if refresh is True:
data, pool_name = self.common.update_volume_stats()
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or 'FJDXISCSIDriver'
data['storage_protocol'] = 'iSCSI'
self._stats = data
LOG.debug('get_volume_stats, '
'pool name: %s, Exit method.', pool_name)
return self._stats
def extend_volume(self, volume, new_size):
"""Extend volume."""
LOG.info('extend_volume, volume id: %s, Enter method.', volume['id'])
used_pool_name = self.common.extend_volume(volume, new_size)
LOG.info('extend_volume, used pool name: %s, Exit method.',
used_pool_name)
| |
import logging
import arrow
import json
import os
import re
from os.path import join, abspath, dirname
import settings
import github_api as gh
THIS_DIR = dirname(abspath(__file__))
# Hopefully this isn't overwritten on pulls
SAVED_COMMANDS_FILE = join(THIS_DIR, '..', "server/issue_commands_ran.json")
if not os.path.exists(SAVED_COMMANDS_FILE):
'''
"comment_id": [ // the actual id
{
"comment_id" : int,
"issue_id": int,
"has_ran" : bool,
"command": string
"chaos_response_id" : int, // chaos status comment id
"time_remaining": int
}
"last_ran": "2017-05-20T00:00:00Z" //format
]
'''
with open(SAVED_COMMANDS_FILE, 'w') as f:
json.dump({}, f)
__log = logging.getLogger("poll_issue_commands")
'''
Command Syntax
/vote close closes issue when no nay reactions on this comment are added within voting window
/vote reopen reopens issue when see above ^^^
/vote label=<LABEL_TEXT> adds label when ^^^
/vote remove-label=<LABEL_TEXT> removes label when ^^^
/vote assign=<USER> assigns to user when ^^^
/vote unassign=<USER> unassigns from user when ^^^
'''
# If no subcommands, map cmd: None
COMMAND_LIST = {
"/vote": ("close", "reopen")
}
def update_db(comment_id, data_fields, db=None):
if not db:
with open(SAVED_COMMANDS_FILE, 'r') as f:
db = json.load(f)
if comment_id not in db:
db[comment_id] = {}
for field, value in data_fields.items():
db[comment_id][field] = value
with open(SAVED_COMMANDS_FILE, 'w') as f:
json.dump(db, f)
return db
def select_db(comment_id, fields, db=None):
if db is None:
with open(SAVED_COMMANDS_FILE, 'r') as f:
db = json.load(f)
data = {}
for field in fields:
data[field] = db[comment_id][field]
return data
def get_last_ran(db=None):
if db is None:
with open(SAVED_COMMANDS_FILE, 'r') as f:
db = json.load(f)
return db.get("last_ran", None) # First time return none
def set_last_run(last_ran, db=None):
if db is None:
with open(SAVED_COMMANDS_FILE, 'r') as f:
db = json.load(f)
db["last_ran"] = last_ran
with open(SAVED_COMMANDS_FILE, 'w') as f:
json.dump(db, f)
def insert_or_update(api, comment_id, issue_id, comment_txt):
command_history = {}
with open(SAVED_COMMANDS_FILE, 'r') as f:
command_history = json.load(f)
# equivalent of db INSERT OR UPDATE.
comment_data = command_history.get(comment_id, None)
if not comment_data:
comment_data = {
"comment_id": comment_id,
"issue_id": issue_id,
"has_ran": False,
# "command": comment_txt,
"chaos_response_id": None,
"time_remaining": None
}
command_history[comment_id] = comment_data
if comment_data["has_ran"]:
return
voting_window = gh.voting.get_initial_voting_window()
seconds_remaining = gh.issues.voting_window_remaining_seconds(api, settings.URN, comment_id,
voting_window)
seconds_remaining = max(0, seconds_remaining) # No negative time
data = {
"time_remaining": seconds_remaining,
"command": comment_txt # Keep this fresh so nobody edits their command post..
}
update_db(comment_id, data, db=command_history)
def has_enough_votes(votes):
# At least one negative vote will cause vote to not pass
for user, vote in votes.items():
if vote < 0:
# __log.debug("vote less than one")
return False
return True
def post_command_status_update(api, issue_id, comment_id, has_votes):
# First find out if we have posted a status update for this command already
# Todo, stop opening all these files
command_history = {}
with open(SAVED_COMMANDS_FILE, 'r') as f:
command_history = json.load(f)
# Todo - stop doing loops
comment_data = command_history[comment_id]
if comment_data["has_ran"]:
return
seconds_remaining = comment_data["time_remaining"]
command_text = comment_data["command"]
time = gh.misc.seconds_to_human(seconds_remaining)
status = "passing" if has_votes else "failing"
body = "> {command}\n\nTime remaining: {time} - Vote status: {status}".format(
command=command_text,
time=time,
status=status)
if comment_data["chaos_response_id"]:
resp = gh.comments.edit_comment(api, settings.URN, comment_data["chaos_response_id"], body)
else:
resp = gh.comments.leave_comment(api, settings.URN, issue_id, body)
update_db(comment_id, {"chaos_response_id": str(resp["id"])}, db=command_history)
def can_run_vote_command(api, comment_id):
comment_data = select_db(comment_id, ("has_ran", "time_remaining"))
if comment_data["has_ran"]:
# __log.debug("Already ran command")
return False
time_left = comment_data["time_remaining"]
if time_left > 0:
# __log.debug("Time remaining: " + gh.misc.seconds_to_human(time_left))
return False
return True
def update_command_ran(api, comment_id, text):
db = update_db(comment_id, {"has_ran": True})
db_fields = select_db(comment_id, ("chaos_response_id", "command"), db=db)
resp_id = db_fields["chaos_response_id"]
command = db_fields["command"]
body = "> {command}\n\n{text}".format(command=command, text=text)
gh.comments.edit_comment(api, settings.URN, resp_id, body)
def get_command_votes(api, urn, comment_id):
votes = {}
for voter, vote in gh.voting.get_comment_reaction_votes(api, urn, comment_id):
votes[voter] = vote
return votes
def handle_vote_command(api, command, issue_id, comment_id, votes):
orig_command = command[:]
# Check for correct command syntax, ie, subcommands
log_warning = False
if len(command):
sub_command = command.pop(0)
if sub_command == "close":
gh.issues.close_issue(api, settings.URN, issue_id)
gh.comments.leave_issue_closed_comment(api, settings.URN, issue_id)
elif sub_command == "reopen":
gh.issues.open_issue(api, settings.URN, issue_id)
gh.comments.leave_issue_reopened_comment(api, settings.URN, issue_id)
else:
# Implement other commands
pass
else:
log_warning = True
if log_warning:
__log.warning("Unknown issue command syntax: /vote {command}".format(command=orig_command))
def handle_comment(api, issue_comment):
issue_id = issue_comment["issue_id"]
global_comment_id = str(issue_comment["global_comment_id"])
comment_text = issue_comment["comment_text"]
comment_text = re.sub('\s+', ' ', comment_text)
parsed_comment = list(map(lambda x: x.lower(), comment_text.split(' ')))
command = parsed_comment.pop(0)
if command in COMMAND_LIST:
votes = get_command_votes(api, settings.URN, global_comment_id)
insert_or_update(api, global_comment_id, issue_id, comment_text)
can_run = can_run_vote_command(api, global_comment_id)
has_votes = has_enough_votes(votes)
post_command_status_update(api, issue_id, global_comment_id, has_votes)
# We doin stuff boyz
if can_run and has_votes:
__log.debug("Handling issue {issue}: command {comment}".format(issue=issue_id,
comment=comment_text))
if command == "/vote":
handle_vote_command(api, parsed_comment, issue_id, global_comment_id, votes)
update_command_ran(api, global_comment_id, "Command Ran")
elif can_run and not has_votes:
# oops we didn't pass
update_command_ran(api, global_comment_id, "Vote Failed")
def is_command(comment):
comment = re.sub('\s+', ' ', comment)
parsed_comment = list(map(lambda x: x.lower(), comment.split(' ')))
cmd = parsed_comment[0]
is_cmd = False
if cmd in COMMAND_LIST:
subcommands = COMMAND_LIST.get(cmd, None)
# 4 cases
# 1. No subcommands for command
# 2. Subcommands exist, and args has it
# 3. Subcommands exist, and args don't have it
# 4. Args specify non existant subcommand
if subcommands is None:
is_cmd = True # Already have the command
else:
sub_cmd_with_args = parsed_comment[1:]
if len(sub_cmd_with_args) > 0:
sub_cmd = sub_cmd_with_args[0]
# Check cond 2
if sub_cmd in subcommands:
is_cmd = True
else:
is_cmd = False
else:
# Cond 3
is_cmd = False
return is_cmd
def poll_read_issue_comments(api):
__log.info("looking for issue comments")
last_ran = get_last_ran()
if last_ran:
last_ran = arrow.get(last_ran)
paged_results = gh.comments.get_all_issue_comments(api,
settings.URN,
page='all',
since=last_ran)
# This now only finds new entries that have been either posted or updated
# Add them to our database
# If page=all, you have to loop through pages as well
for page in paged_results:
for issue_comment in page:
# Get info and store in db
issue_id = issue_comment["issue_id"]
global_comment_id = str(issue_comment["global_comment_id"])
comment_text = issue_comment["comment_text"]
if is_command(comment_text):
insert_or_update(api, global_comment_id, issue_id, comment_text)
# WARNING - be careful of saving wrong version of db to disk
db = None
with open(SAVED_COMMANDS_FILE, 'r') as f:
db = json.load(f)
# NEVER delete a comment_id data structure, even if it already ran
# Simply updating the command comment will cause it to reenter the system
# whic could cause unexpected behaviour
# One solution is to delete the original comment with the command..
# Or move id to a separate db
if "last_ran" in db:
del db["last_ran"]
# TODO - run commands oldest to newest
db_sorted = sorted(db.items(), key=lambda x: x[1]["time_remaining"])
for cmd_id, cmd_obj in db_sorted:
try:
# I'm really lazy right now. Just mock up an object and pass to handle_comment
mock = {
"issue_id": cmd_obj["issue_id"],
"global_comment_id": cmd_obj["comment_id"],
"comment_text": cmd_obj["command"]
}
handle_comment(api, mock)
except KeyError as e:
__log.warning("Unable to handle comment id {id}".format(cmd_id))
now = arrow.utcnow()
set_last_run(gh.misc.dt_to_github_dt(now))
__log.info("Waiting %d seconds until next scheduled Issue comment polling",
settings.ISSUE_COMMENT_POLLING_INTERVAL_SECONDS)
| |
"""
Platform for retrieving meteorological data from Dark Sky.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/weather.darksky/
"""
from datetime import datetime, timedelta
import logging
from requests.exceptions import (
ConnectionError as ConnectError, HTTPError, Timeout)
import voluptuous as vol
from homeassistant.components.weather import (
ATTR_FORECAST_TEMP, ATTR_FORECAST_TIME, ATTR_FORECAST_CONDITION,
ATTR_FORECAST_WIND_SPEED, ATTR_FORECAST_WIND_BEARING,
ATTR_FORECAST_TEMP_LOW, ATTR_FORECAST_PRECIPITATION,
PLATFORM_SCHEMA, WeatherEntity)
from homeassistant.const import (
CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS,
CONF_MODE, TEMP_FAHRENHEIT)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
REQUIREMENTS = ['python-forecastio==1.4.0']
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Powered by Dark Sky"
FORECAST_MODE = ['hourly', 'daily']
MAP_CONDITION = {
'clear-day': 'sunny',
'clear-night': 'clear-night',
'rain': 'rainy',
'snow': 'snowy',
'sleet': 'snowy-rainy',
'wind': 'windy',
'fog': 'fog',
'cloudy': 'cloudy',
'partly-cloudy-day': 'partlycloudy',
'partly-cloudy-night': 'partlycloudy',
'hail': 'hail',
'thunderstorm': 'lightning',
'tornado': None,
}
CONF_UNITS = 'units'
DEFAULT_NAME = 'Dark Sky'
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_MODE, default='hourly'): vol.In(FORECAST_MODE),
vol.Optional(CONF_UNITS): vol.In(['auto', 'si', 'us', 'ca', 'uk', 'uk2']),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
})
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=3)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Dark Sky weather."""
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
name = config.get(CONF_NAME)
mode = config.get(CONF_MODE)
units = config.get(CONF_UNITS)
if not units:
units = 'ca' if hass.config.units.is_metric else 'us'
dark_sky = DarkSkyData(
config.get(CONF_API_KEY), latitude, longitude, units)
add_entities([DarkSkyWeather(name, dark_sky, mode)], True)
class DarkSkyWeather(WeatherEntity):
"""Representation of a weather condition."""
def __init__(self, name, dark_sky, mode):
"""Initialize Dark Sky weather."""
self._name = name
self._dark_sky = dark_sky
self._mode = mode
self._ds_data = None
self._ds_currently = None
self._ds_hourly = None
self._ds_daily = None
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def temperature(self):
"""Return the temperature."""
return self._ds_currently.get('temperature')
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT if 'us' in self._dark_sky.units \
else TEMP_CELSIUS
@property
def humidity(self):
"""Return the humidity."""
return round(self._ds_currently.get('humidity') * 100.0, 2)
@property
def wind_speed(self):
"""Return the wind speed."""
return self._ds_currently.get('windSpeed')
@property
def wind_bearing(self):
"""Return the wind bearing."""
return self._ds_currently.get('windBearing')
@property
def ozone(self):
"""Return the ozone level."""
return self._ds_currently.get('ozone')
@property
def pressure(self):
"""Return the pressure."""
return self._ds_currently.get('pressure')
@property
def visibility(self):
"""Return the visibility."""
return self._ds_currently.get('visibility')
@property
def condition(self):
"""Return the weather condition."""
return MAP_CONDITION.get(self._ds_currently.get('icon'))
@property
def forecast(self):
"""Return the forecast array."""
# Per conversation with Joshua Reyes of Dark Sky, to get the total
# forecasted precipitation, you have to multiple the intensity by
# the hours for the forecast interval
def calc_precipitation(intensity, hours):
amount = None
if intensity is not None:
amount = round((intensity * hours), 1)
return amount if amount > 0 else None
data = None
if self._mode == 'daily':
data = [{
ATTR_FORECAST_TIME:
datetime.fromtimestamp(entry.d.get('time')).isoformat(),
ATTR_FORECAST_TEMP:
entry.d.get('temperatureHigh'),
ATTR_FORECAST_TEMP_LOW:
entry.d.get('temperatureLow'),
ATTR_FORECAST_PRECIPITATION:
calc_precipitation(entry.d.get('precipIntensity'), 24),
ATTR_FORECAST_WIND_SPEED:
entry.d.get('windSpeed'),
ATTR_FORECAST_WIND_BEARING:
entry.d.get('windBearing'),
ATTR_FORECAST_CONDITION:
MAP_CONDITION.get(entry.d.get('icon'))
} for entry in self._ds_daily.data]
else:
data = [{
ATTR_FORECAST_TIME:
datetime.fromtimestamp(entry.d.get('time')).isoformat(),
ATTR_FORECAST_TEMP:
entry.d.get('temperature'),
ATTR_FORECAST_PRECIPITATION:
calc_precipitation(entry.d.get('precipIntensity'), 1),
ATTR_FORECAST_CONDITION:
MAP_CONDITION.get(entry.d.get('icon'))
} for entry in self._ds_hourly.data]
return data
def update(self):
"""Get the latest data from Dark Sky."""
self._dark_sky.update()
self._ds_data = self._dark_sky.data
self._ds_currently = self._dark_sky.currently.d
self._ds_hourly = self._dark_sky.hourly
self._ds_daily = self._dark_sky.daily
class DarkSkyData:
"""Get the latest data from Dark Sky."""
def __init__(self, api_key, latitude, longitude, units):
"""Initialize the data object."""
self._api_key = api_key
self.latitude = latitude
self.longitude = longitude
self.requested_units = units
self.data = None
self.currently = None
self.hourly = None
self.daily = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from Dark Sky."""
import forecastio
try:
self.data = forecastio.load_forecast(
self._api_key, self.latitude, self.longitude,
units=self.requested_units)
self.currently = self.data.currently()
self.hourly = self.data.hourly()
self.daily = self.data.daily()
except (ConnectError, HTTPError, Timeout, ValueError) as error:
_LOGGER.error("Unable to connect to Dark Sky. %s", error)
self.data = None
@property
def units(self):
"""Get the unit system of returned data."""
return self.data.json.get('flags').get('units')
| |
"""This module provides an API to validate and to some extent
manipulate data structures, such as JSON and XML parsing results.
Example usage:
>>> validate(int, 5)
5
>>> validate({text: int}, {'foo': '1'})
ValueError: Type of '1' should be 'int' but is 'str'
>>> validate({'foo': transform(int)}, {'foo': '1'})
{'foo': 1}
"""
from xml.etree import ElementTree as ET
from copy import copy as copy_obj
try:
from functools import singledispatch
except ImportError:
from singledispatch import singledispatch
from ...compat import is_py2, urlparse
from ...exceptions import PluginError
__all__ = [
"any", "all", "filter", "get", "getattr", "hasattr", "length", "optional",
"transform", "text", "union", "url", "startswith", "endswith", "contains",
"xml_element", "xml_find", "xml_findall", "xml_findtext",
"validate", "Schema", "SchemaContainer"
]
#: Alias for text type on each Python version
text = is_py2 and basestring or str
# References to original functions that we override in this module
_all = all
_getattr = getattr
_hasattr = hasattr
_filter = filter
_map = map
_re_match_attr = ("group", "groups", "groupdict", "re")
def _is_re_match(value):
return _all(_hasattr(value, a) for a in _re_match_attr)
class any(tuple):
"""At least one of the schemas must be valid."""
def __new__(cls, *args):
return super(any, cls).__new__(cls, args)
class all(tuple):
"""All schemas must be valid."""
def __new__(cls, *args):
return super(all, cls).__new__(cls, args)
class SchemaContainer(object):
def __init__(self, schema):
self.schema = schema
class transform(object):
"""Applies function to value to transform it."""
def __init__(self, func):
# text is an alias for basestring on Python 2, which cannot be
# instantiated and therefore can't be used to transform the value,
# so we force to unicode instead.
if is_py2 and func == text:
func = unicode
self.func = func
class optional(object):
"""An optional key used in a dict or union-dict."""
def __init__(self, key):
self.key = key
class union(SchemaContainer):
"""Extracts multiple validations based on the same value."""
class attr(SchemaContainer):
"""Validates an object's attributes."""
class xml_element(object):
"""A XML element."""
def __init__(self, tag=None, text=None, attrib=None):
self.tag = tag
self.text = text
self.attrib = attrib
def length(length):
"""Checks value for minimum length using len()."""
def min_len(value):
if not len(value) >= length:
raise ValueError(
"Minimum length is {0} but value is {1}".format(length, len(value))
)
return True
return min_len
def startswith(string):
"""Checks if the string value starts with another string."""
def starts_with(value):
validate(text, value)
if not value.startswith(string):
raise ValueError("'{0}' does not start with '{1}'".format(value, string))
return True
return starts_with
def endswith(string):
"""Checks if the string value ends with another string."""
def ends_with(value):
validate(text, value)
if not value.endswith(string):
raise ValueError("'{0}' does not end with '{1}'".format(value, string))
return True
return ends_with
def contains(string):
"""Checks if the string value contains another string."""
def contains_str(value):
validate(text, value)
if string not in value:
raise ValueError("'{0}' does not contain '{1}'".format(value, string))
return True
return contains_str
def get(item, default=None):
"""Get item from value (value[item]).
If the item is not found, return the default.
Handles XML elements, regex matches and anything that has __getitem__.
"""
def getter(value):
if ET.iselement(value):
value = value.attrib
try:
# Use .group() if this is a regex match object
if _is_re_match(value):
return value.group(item)
else:
return value[item]
except (KeyError, IndexError):
return default
except (TypeError, AttributeError) as err:
raise ValueError(err)
return transform(getter)
def getattr(attr, default=None):
"""Get a named attribute from an object.
When a default argument is given, it is returned when the attribute
doesn't exist.
"""
def getter(value):
return _getattr(value, attr, default)
return transform(getter)
def hasattr(attr):
"""Verifies that the object has an attribute with the given name."""
def has_attr(value):
return _hasattr(value, attr)
return has_attr
def filter(func):
"""Filters out unwanted items using the specified function.
Supports both dicts and sequences, key/value pairs are
expanded when applied to a dict.
"""
def expand_kv(kv):
return func(*kv)
def filter_values(value):
cls = type(value)
if isinstance(value, dict):
return cls(_filter(expand_kv, value.items()))
else:
return cls(_filter(func, value))
return transform(filter_values)
def map(func):
"""Apply function to each value inside the sequence or dict.
Supports both dicts and sequences, key/value pairs are
expanded when applied to a dict.
"""
# text is an alias for basestring on Python 2, which cannot be
# instantiated and therefore can't be used to transform the value,
# so we force to unicode instead.
if is_py2 and text == func:
func = unicode
def expand_kv(kv):
return func(*kv)
def map_values(value):
cls = type(value)
if isinstance(value, dict):
return cls(_map(expand_kv, value.items()))
else:
return cls(_map(func, value))
return transform(map_values)
def url(**attributes):
"""Parses an URL and validates its attributes."""
def check_url(value):
validate(text, value)
parsed = urlparse(value)
if not parsed.netloc:
raise ValueError("'{0}' is not a valid URL".format(value))
for name, schema in attributes.items():
if not _hasattr(parsed, name):
raise ValueError("Invalid URL attribute '{0}'".format(name))
try:
validate(schema, _getattr(parsed, name))
except ValueError as err:
raise ValueError(
"Unable to validate URL attribute '{0}': {1}".format(
name, err
)
)
return True
# Convert "http" to be either any("http", "https") for convenience
if attributes.get("scheme") == "http":
attributes["scheme"] = any("http", "https")
return check_url
def xml_find(xpath):
"""Find a XML element via xpath."""
def xpath_find(value):
validate(ET.iselement, value)
value = value.find(xpath)
if value is None:
raise ValueError("XPath '{0}' did not return an element".format(xpath))
return validate(ET.iselement, value)
return transform(xpath_find)
def xml_findall(xpath):
"""Find a list of XML elements via xpath."""
def xpath_findall(value):
validate(ET.iselement, value)
return value.findall(xpath)
return transform(xpath_findall)
def xml_findtext(xpath):
"""Find a XML element via xpath and extract its text."""
return all(
xml_find(xpath),
getattr("text"),
)
@singledispatch
def validate(schema, value):
if callable(schema):
if schema(value):
return value
else:
raise ValueError("{0}({1!r}) is not true".format(schema.__name__, value))
if schema == value:
return value
else:
raise ValueError("{0!r} does not equal {1!r}".format(value, schema))
@validate.register(any)
def validate_any(schema, value):
errors = []
for subschema in schema:
try:
return validate(subschema, value)
except ValueError as err:
errors.append(err)
else:
err = " or ".join(_map(str, errors))
raise ValueError(err)
@validate.register(all)
def validate_all(schemas, value):
for schema in schemas:
value = validate(schema, value)
return value
@validate.register(transform)
def validate_transform(schema, value):
validate(callable, schema.func)
return schema.func(value)
@validate.register(list)
@validate.register(tuple)
@validate.register(set)
@validate.register(frozenset)
def validate_sequence(schema, value):
validate(type(schema), value)
return type(schema)(validate(any(*schema), v) for v in value)
@validate.register(dict)
def validate_dict(schema, value):
validate(type(schema), value)
new = type(schema)()
for key, subschema in schema.items():
if isinstance(key, optional):
if key.key not in value:
continue
key = key.key
if type(key) in (type, transform, any, all, union):
for subkey, subvalue in value.items():
new[validate(key, subkey)] = validate(subschema, subvalue)
break
else:
if key not in value:
raise ValueError("Key '{0}' not found in {1!r}".format(key, value))
try:
new[key] = validate(subschema, value[key])
except ValueError as err:
raise ValueError("Unable to validate key '{0}': {1}".format(key, err))
return new
@validate.register(type)
def validate_type(schema, value):
if isinstance(value, schema):
return value
else:
raise ValueError(
"Type of {0!r} should be '{1}' but is '{2}'".format(
value, schema.__name__, type(value).__name__
)
)
@validate.register(xml_element)
def validate_xml_element(schema, value):
validate(ET.iselement, value)
new = ET.Element(value.tag, attrib=value.attrib)
if schema.attrib is not None:
try:
new.attrib = validate(schema.attrib, value.attrib)
except ValueError as err:
raise ValueError("Unable to validate XML attributes: {0}".format(err))
if schema.tag is not None:
try:
new.tag = validate(schema.tag, value.tag)
except ValueError as err:
raise ValueError("Unable to validate XML tag: {0}".format(err))
if schema.text is not None:
try:
new.text = validate(schema.text, value.text)
except ValueError as err:
raise ValueError("Unable to validate XML text: {0}".format(err))
for child in value:
new.append(child)
return new
@validate.register(attr)
def validate_attr(schema, value):
new = copy_obj(value)
for attr, schema in schema.schema.items():
if not _hasattr(value, attr):
raise ValueError("Attribute '{0}' not found on object '{1}'".format(
attr, value
))
setattr(new, attr, validate(schema, _getattr(value, attr)))
return new
@singledispatch
def validate_union(schema, value):
raise ValueError("Invalid union type: {0}".format(type(schema).__name__))
@validate_union.register(dict)
def validate_union_dict(schema, value):
new = type(schema)()
for key, schema in schema.items():
optional_ = isinstance(key, optional)
if optional_:
key = key.key
try:
new[key] = validate(schema, value)
except ValueError as err:
if optional_:
continue
raise ValueError("Unable to validate union '{0}': {1}".format(key, err))
return new
@validate_union.register(list)
@validate_union.register(tuple)
@validate_union.register(set)
@validate_union.register(frozenset)
def validate_union_sequence(schemas, value):
return type(schemas)(validate(schema, value) for schema in schemas)
@validate.register(union)
def validate_unions(schema, value):
return validate_union(schema.schema, value)
class Schema(object):
"""Wraps a validator schema into a object."""
def __init__(self, *schemas):
self.schema = all(*schemas)
def validate(self, value, name="result", exception=PluginError):
try:
return validate(self.schema, value)
except ValueError as err:
raise exception("Unable to validate {0}: {1}".format(name, err))
@validate.register(Schema)
def validate_schema(schema, value):
return schema.validate(value, exception=ValueError)
| |
"""
RenderTarget
Copyright (c) 2015 tobspr <tobias.springer1@gmail.com>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from __future__ import print_function, division
from panda3d.core import GraphicsOutput, Texture, AuxBitplaneAttrib, NodePath
from panda3d.core import Vec4, TransparencyAttrib, ColorWriteAttrib, SamplerState
from panda3d.core import WindowProperties, FrameBufferProperties, GraphicsPipe
from panda3d.core import LVecBase2i
from rplibs.six.moves import range
from rplibs.six import iterkeys, itervalues
from rpcore.globals import Globals
from rpcore.rpobject import RPObject
from rpcore.util.post_process_region import PostProcessRegion
__all__ = "RenderTarget",
__version__ = "2.0"
class setter(object): # pylint: disable=C0103
""" Setter only property """
def __init__(self, func):
self.__func = func
self.__doc__ = func.__doc__
def __set__(self, name, value):
return self.__func(name, value)
class RenderTarget(RPObject):
""" Second version of the RenderTarget library, provides functions
to easily setup buffers in Panda3D. """
NUM_ALLOCATED_BUFFERS = 0
USE_R11G11B10 = True
REGISTERED_TARGETS = []
def __init__(self, name="target"):
RPObject.__init__(self, name)
self._targets = {}
self._color_bits = (0, 0, 0, 0)
self._aux_bits = 8
self._aux_count = 0
self._depth_bits = 0
self._size = LVecBase2i(-1, -1)
self._source_window = Globals.base.win
self._source_region = None
self._active = False
self._internal_buffer = None
# Public attributes
self.engine = Globals.base.graphicsEngine
self.support_transparency = False
self.create_default_region = True
# Disable all global clears, since they are not required
for region in Globals.base.win.get_display_regions():
region.disable_clears()
def add_color_attachment(self, bits=8, alpha=False):
""" Adds a new color attachment with the given amount of bits, bits can
be either a single int or a tuple determining the bits. If bits is a
single int, alpha determines whether alpha bits are requested """
self._targets["color"] = Texture(self.debug_name + "_color")
if isinstance(bits, (list, tuple)):
self._color_bits = (bits[0], bits[1], bits[2], bits[3] if len(bits) == 4 else 0)
else:
self._color_bits = ((bits, bits, bits, (bits if alpha else 0)))
def add_depth_attachment(self, bits=32):
""" Adds a depth attachment wit the given amount of bits """
self._targets["depth"] = Texture(self.debug_name + "_depth")
self._depth_bits = bits
def add_aux_attachment(self, bits=8):
""" Adds a new aux attachment with the given amount of bits. The amount
of bits passed overrides all previous bits set, since all aux textures
have to have the same amount of bits. """
self._aux_bits = bits
self._aux_count += 1
def add_aux_attachments(self, bits=8, count=1):
""" Adds n new aux attachments, with the given amount of bits. All
previously set aux bits are overriden, since all aux textures have to
have the same amount of bits """
self._aux_bits = bits
self._aux_count += count
@setter
def size(self, *args):
""" Sets the render target size. This can be either a single integer,
in which case it applies to both dimensions. Negative integers cause
the render target to be proportional to the screen size, i.e. a value
of -4 produces a quarter resolution target, a value of -2 a half
resolution target, and a value of -1 a full resolution target
(the default). """
self._size = LVecBase2i(*args)
@property
def active(self):
""" Returns whether the target is currently active """
return self._active
@active.setter
def active(self, flag):
""" Sets whether the target is active, this just propagates the active
flag to all display regions """
for region in self._internal_buffer.get_display_regions():
region.set_active(flag)
@property
def color_tex(self):
""" Returns the color attachment if present """
return self._targets["color"]
@property
def depth_tex(self):
""" Returns the depth attachment if present """
return self._targets["depth"]
@property
def aux_tex(self):
""" Returns a list of aux textures, can be used like target.aux_tex[2],
notice the indices start at zero, so the first target has the index 0. """
return [self._targets[i] for i in sorted(iterkeys(self._targets)) if i.startswith("aux_")]
def set_shader_input(self, *args, **kwargs):
""" Sets a shader input available to the target """
if self.create_default_region:
self._source_region.set_shader_input(*args, **kwargs)
@setter
def shader(self, shader_obj):
""" Sets a shader on the target """
if not shader_obj:
self.error("shader must not be None!")
return
self._source_region.set_shader(shader_obj)
@property
def internal_buffer(self):
""" Returns a handle to the internal GraphicsBuffer object """
return self._internal_buffer
@property
def targets(self):
""" Returns the dictionary of attachments, whereas the key is the name
of the attachment and the value is the Texture handle of the attachment """
return self._targets
@property
def region(self):
""" Returns the internally used PostProcessRegion """
return self._source_region
def prepare_render(self, camera_np):
""" Prepares to render a scene """
self.create_default_region = False
self._create_buffer()
self._source_region = self._internal_buffer.get_display_region(0)
if camera_np:
initial_state = NodePath("rtis")
initial_state.set_state(camera_np.node().get_initial_state())
if self._aux_count:
initial_state.set_attrib(AuxBitplaneAttrib.make(self._aux_bits), 20)
initial_state.set_attrib(TransparencyAttrib.make(TransparencyAttrib.M_none), 20)
if max(self._color_bits) == 0:
initial_state.set_attrib(ColorWriteAttrib.make(ColorWriteAttrib.C_off), 20)
# Disable existing regions of the camera
for region in camera_np.node().get_display_regions():
region.set_active(False)
# Remove the existing display region of the camera
for region in self._source_window.get_display_regions():
if region.get_camera() == camera_np:
self._source_window.remove_display_region(region)
camera_np.node().set_initial_state(initial_state.get_state())
self._source_region.set_camera(camera_np)
self._internal_buffer.disable_clears()
self._source_region.disable_clears()
self._source_region.set_active(True)
self._source_region.set_sort(20)
# Reenable depth-clear, usually desireable
self._source_region.set_clear_depth_active(True)
self._source_region.set_clear_depth(1.0)
self._active = True
def prepare_buffer(self):
""" Prepares the target to render to an offscreen buffer """
self._create_buffer()
self._active = True
def present_on_screen(self):
""" Prepares the target to render on the main window, to present the
final rendered image """
self._source_region = PostProcessRegion.make(self._source_window)
self._source_region.set_sort(5)
def cleanup(self):
""" Deletes this buffer, restoring the previous state """
self._internal_buffer.clear_render_textures()
self.engine.remove_window(self._internal_buffer)
self._active = False
for target in itervalues(self._targets):
target.release_all()
def set_clear_color(self, *args):
""" Sets the clear color """
self._internal_buffer.set_clear_color_active(True)
self._internal_buffer.set_clear_color(Vec4(*args))
@setter
def instance_count(self, count):
""" Sets the instance count """
self._source_region.set_instance_count(count)
def _create_buffer(self):
""" Internal method to create the buffer object """
if self._source_window == Globals.base.win:
w, h = Globals.resolution.x, Globals.resolution.y
else:
w, h = self._source_window.get_x_size(), self._source_window.get_y_size()
if self._size.x < 0:
self._size.x = (w - self._size.x - 1) // (-self._size.x)
if self._size.y < 0:
self._size.y = (h - self._size.y - 1) // (-self._size.y)
if not self._create():
self.error("Failed to create buffer!")
return False
if self.create_default_region:
self._source_region = PostProcessRegion.make(self._internal_buffer)
if max(self._color_bits) == 0:
self._source_region.set_attrib(ColorWriteAttrib.make(ColorWriteAttrib.M_none), 1000)
def _setup_textures(self):
""" Prepares all bound textures """
for i in range(self._aux_count):
self._targets["aux_{}".format(i)] = Texture(
self.debug_name + "_aux{}".format(i))
for tex in itervalues(self._targets):
tex.set_wrap_u(SamplerState.WM_clamp)
tex.set_wrap_v(SamplerState.WM_clamp)
tex.set_anisotropic_degree(0)
tex.set_x_size(self._size.x)
tex.set_y_size(self._size.y)
tex.set_minfilter(SamplerState.FT_linear)
tex.set_magfilter(SamplerState.FT_linear)
def _make_properties(self):
""" Creates the window and buffer properties """
window_props = WindowProperties.size(self._size.x, self._size.y)
buffer_props = FrameBufferProperties()
if self._color_bits == (16, 16, 16, 0):
if RenderTarget.USE_R11G11B10:
buffer_props.set_rgba_bits(11, 11, 10, 0)
else:
buffer_props.set_rgba_bits(*self._color_bits)
elif 8 in self._color_bits:
# When specifying 8 bits, specify 1 bit, this is a workarround
# to a legacy logic in panda
buffer_props.set_rgba_bits(*[i if i != 8 else 1 for i in self._color_bits])
else:
buffer_props.set_rgba_bits(*self._color_bits)
buffer_props.set_accum_bits(0)
buffer_props.set_stencil_bits(0)
buffer_props.set_back_buffers(0)
buffer_props.set_coverage_samples(0)
buffer_props.set_depth_bits(self._depth_bits)
if self._depth_bits == 32:
buffer_props.set_float_depth(True)
buffer_props.set_float_color(max(self._color_bits) > 8)
buffer_props.set_force_hardware(True)
buffer_props.set_multisamples(0)
buffer_props.set_srgb_color(False)
buffer_props.set_stereo(False)
buffer_props.set_stencil_bits(0)
if self._aux_bits == 8:
buffer_props.set_aux_rgba(self._aux_count)
elif self._aux_bits == 16:
buffer_props.set_aux_hrgba(self._aux_count)
elif self._aux_bits == 32:
buffer_props.set_aux_float(self._aux_count)
else:
self.error("Invalid aux bits")
return window_props, buffer_props
def _create(self):
""" Creates the internally used buffer """
self._setup_textures()
window_props, buffer_props = self._make_properties()
self._internal_buffer = self.engine.make_output(
self._source_window.get_pipe(), self.debug_name, 1,
buffer_props, window_props, GraphicsPipe.BF_refuse_window,
self._source_window.get_gsg(), self._source_window)
if not self._internal_buffer:
self.error("Failed to create buffer")
return
if self._depth_bits:
self._internal_buffer.add_render_texture(
self.depth_tex, GraphicsOutput.RTM_bind_or_copy,
GraphicsOutput.RTP_depth)
if max(self._color_bits) > 0:
self._internal_buffer.add_render_texture(
self.color_tex, GraphicsOutput.RTM_bind_or_copy,
GraphicsOutput.RTP_color)
aux_prefix = {
8: "RTP_aux_rgba_{}",
16: "RTP_aux_hrgba_{}",
32: "RTP_aux_float_{}",
}[self._aux_bits]
for i in range(self._aux_count):
target_mode = getattr(GraphicsOutput, aux_prefix.format(i))
self._internal_buffer.add_render_texture(
self.aux_tex[i], GraphicsOutput.RTM_bind_or_copy, target_mode)
sort = -300 + RenderTarget.NUM_ALLOCATED_BUFFERS * 10
RenderTarget.NUM_ALLOCATED_BUFFERS += 1
self._internal_buffer.set_sort(sort)
self._internal_buffer.disable_clears()
self._internal_buffer.get_display_region(0).disable_clears()
self._internal_buffer.get_overlay_display_region().disable_clears()
self._internal_buffer.get_overlay_display_region().set_active(False)
RenderTarget.REGISTERED_TARGETS.append(self)
return True
| |
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'User.session_nonce'
db.add_column('auth_user', 'session_nonce',
self.gf('django.db.models.fields.CharField')(max_length=12, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'User.session_nonce'
db.delete_column('auth_user', 'session_nonce')
models = {
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.apikey': {
'Meta': {'object_name': 'ApiKey'},
'allowed_origins': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'default': "'Default'", 'max_length': '64', 'blank': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Organization']"}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.apitoken': {
'Meta': {'object_name': 'ApiToken'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True'}),
'scopes': ('django.db.models.fields.BigIntegerField', [], {'default': 'None'}),
'token': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.auditlogentry': {
'Meta': {'object_name': 'AuditLogEntry'},
'actor': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_actors'", 'null': 'True', 'to': "orm['sentry.User']"}),
'actor_key': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.ApiKey']", 'null': 'True', 'blank': 'True'}),
'actor_label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'target_object': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'target_user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'audit_targets'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.authenticator': {
'Meta': {'unique_together': "(('user', 'type'),)", 'object_name': 'Authenticator', 'db_table': "'auth_authenticator'"},
'config': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {}),
'created_at': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'last_used_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authidentity': {
'Meta': {'unique_together': "(('auth_provider', 'ident'), ('auth_provider', 'user'))", 'object_name': 'AuthIdentity'},
'auth_provider': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.AuthProvider']"}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'last_synced': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_verified': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.authprovider': {
'Meta': {'object_name': 'AuthProvider'},
'config': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'default_role': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50'}),
'default_teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_sync': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']", 'unique': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'sync_time': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2016, 11, 9, 0, 0)', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'upstream_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'sentry.broadcastseen': {
'Meta': {'unique_together': "(('broadcast', 'user'),)", 'object_name': 'BroadcastSeen'},
'broadcast': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Broadcast']"}),
'date_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.commit': {
'Meta': {'unique_together': "(('repository_id', 'key'),)", 'object_name': 'Commit', 'index_together': "(('repository_id', 'date_added'),)"},
'author': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.CommitAuthor']", 'null': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'message': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'repository_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.commitauthor': {
'Meta': {'unique_together': "(('organization_id', 'email'),)", 'object_name': 'CommitAuthor'},
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.counter': {
'Meta': {'object_name': 'Counter', 'db_table': "'sentry_projectcounter'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'unique': 'True'}),
'value': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.dsymbundle': {
'Meta': {'object_name': 'DSymBundle'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'sdk': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymSDK']"})
},
'sentry.dsymobject': {
'Meta': {'object_name': 'DSymObject'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_path': ('django.db.models.fields.TextField', [], {'db_index': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36', 'db_index': 'True'}),
'vmaddr': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'vmsize': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'})
},
'sentry.dsymsdk': {
'Meta': {'object_name': 'DSymSDK', 'index_together': "[('version_major', 'version_minor', 'version_patchlevel', 'version_build')]"},
'dsym_type': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'sdk_name': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'version_build': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'version_major': ('django.db.models.fields.IntegerField', [], {}),
'version_minor': ('django.db.models.fields.IntegerField', [], {}),
'version_patchlevel': ('django.db.models.fields.IntegerField', [], {})
},
'sentry.dsymsymbol': {
'Meta': {'unique_together': "[('object', 'address')]", 'object_name': 'DSymSymbol'},
'address': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.DSymObject']"}),
'symbol': ('django.db.models.fields.TextField', [], {})
},
'sentry.environment': {
'Meta': {'unique_together': "(('project_id', 'name'),)", 'object_name': 'Environment'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.event': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group_id', 'datetime'),)"},
'data': ('sentry.db.models.fields.node.NodeField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'time_spent': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project_id', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventtag': {
'Meta': {'unique_together': "(('event_id', 'key_id', 'value_id'),)", 'object_name': 'EventTag', 'index_together': "(('project_id', 'key_id', 'value_id'), ('group_id', 'key_id', 'value_id'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {}),
'value_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.eventuser': {
'Meta': {'unique_together': "(('project', 'ident'), ('project', 'hash'))", 'object_name': 'EventUser', 'index_together': "(('project', 'email'), ('project', 'username'), ('project', 'ip_address'))"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'}),
'ip_address': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True'})
},
'sentry.file': {
'Meta': {'object_name': 'File'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'legacy_blob'", 'null': 'True', 'to': "orm['sentry.FileBlob']"}),
'blobs': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.FileBlob']", 'through': "orm['sentry.FileBlobIndex']", 'symmetrical': 'False'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True'}),
'headers': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.fileblob': {
'Meta': {'object_name': 'FileBlob'},
'checksum': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'path': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'size': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'null': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'})
},
'sentry.fileblobindex': {
'Meta': {'unique_together': "(('file', 'blob', 'offset'),)", 'object_name': 'FileBlobIndex'},
'blob': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.FileBlob']"}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'offset': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {})
},
'sentry.globaldsymfile': {
'Meta': {'object_name': 'GlobalDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'short_id'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'", 'index_together': "(('project', 'first_release'),)"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']", 'null': 'True', 'on_delete': 'models.PROTECT'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'short_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'time_spent_total': ('sentry.db.models.fields.bounded.BoundedIntegerField', [], {'default': '0'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupemailthread': {
'Meta': {'unique_together': "(('email', 'group'), ('email', 'msgid'))", 'object_name': 'GroupEmailThread'},
'date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'msgid': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'groupemail_set'", 'to': "orm['sentry.Project']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.groupredirect': {
'Meta': {'object_name': 'GroupRedirect'},
'group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'previous_group_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'unique': 'True'})
},
'sentry.grouprelease': {
'Meta': {'unique_together': "(('group_id', 'release_id', 'environment'),)", 'object_name': 'GroupRelease'},
'environment': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '64'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.groupresolution': {
'Meta': {'object_name': 'GroupResolution'},
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_active': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.groupsnooze': {
'Meta': {'object_name': 'GroupSnooze'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'unique': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'until': ('django.db.models.fields.DateTimeField', [], {})
},
'sentry.groupsubscription': {
'Meta': {'unique_together': "(('group', 'user'),)", 'object_name': 'GroupSubscription'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'subscription_set'", 'to': "orm['sentry.Project']"}),
'reason': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('group', 'key', 'value'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'", 'index_together': "(('project', 'key', 'value', 'last_seen'),)"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.organization': {
'Meta': {'object_name': 'Organization'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'default_role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'org_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.organizationaccessrequest': {
'Meta': {'unique_together': "(('team', 'member'),)", 'object_name': 'OrganizationAccessRequest'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'member': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationmember': {
'Meta': {'unique_together': "(('organization', 'user'), ('organization', 'email'))", 'object_name': 'OrganizationMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'flags': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'has_global_access': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Organization']"}),
'role': ('django.db.models.fields.CharField', [], {'default': "'member'", 'max_length': '32'}),
'teams': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Team']", 'symmetrical': 'False', 'through': "orm['sentry.OrganizationMemberTeam']", 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'type': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '50', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'sentry_orgmember_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.organizationmemberteam': {
'Meta': {'unique_together': "(('team', 'organizationmember'),)", 'object_name': 'OrganizationMemberTeam', 'db_table': "'sentry_organizationmember_teams'"},
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'organizationmember': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.OrganizationMember']"}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.organizationonboardingtask': {
'Meta': {'unique_together': "(('organization', 'task'),)", 'object_name': 'OrganizationOnboardingTask'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_completed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'task': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.organizationoption': {
'Meta': {'unique_together': "(('organization', 'key'),)", 'object_name': 'OrganizationOption', 'db_table': "'sentry_organizationoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'), ('organization', 'slug'))", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'first_event': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'forced_color': ('django.db.models.fields.CharField', [], {'max_length': '6', 'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"})
},
'sentry.projectbookmark': {
'Meta': {'unique_together': "(('project_id', 'user'),)", 'object_name': 'ProjectBookmark'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.projectdsymfile': {
'Meta': {'unique_together': "(('project', 'uuid'),)", 'object_name': 'ProjectDSymFile'},
'cpu_name': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'object_name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'max_length': '36'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.projectplatform': {
'Meta': {'unique_together': "(('project_id', 'platform'),)", 'object_name': 'ProjectPlatform'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedBigIntegerField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'date_released': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'new_groups': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'owner': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'ref': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.releasecommit': {
'Meta': {'unique_together': "(('release', 'commit'), ('release', 'order'))", 'object_name': 'ReleaseCommit'},
'commit': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Commit']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'order': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.releaseenvironment': {
'Meta': {'unique_together': "(('project_id', 'release_id', 'environment_id'),)", 'object_name': 'ReleaseEnvironment', 'db_table': "'sentry_environmentrelease'"},
'environment_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'project_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'}),
'release_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.releasefile': {
'Meta': {'unique_together': "(('release', 'ident'),)", 'object_name': 'ReleaseFile'},
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']"}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'name': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'release': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Release']"})
},
'sentry.repository': {
'Meta': {'unique_together': "(('organization_id', 'name'),)", 'object_name': 'Repository'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organization_id': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'db_index': 'True'})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.savedsearch': {
'Meta': {'unique_together': "(('project', 'name'),)", 'object_name': 'SavedSearch'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'query': ('django.db.models.fields.TextField', [], {})
},
'sentry.savedsearchuserdefault': {
'Meta': {'unique_together': "(('project', 'user'),)", 'object_name': 'SavedSearchUserDefault', 'db_table': "'sentry_savedsearch_userdefault'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'savedsearch': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.SavedSearch']"}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'values_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'unique_together': "(('organization', 'slug'),)", 'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'organization': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Organization']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'status': ('sentry.db.models.fields.bounded.BoundedPositiveIntegerField', [], {'default': '0'})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_password_expired': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_password_change': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_column': "'first_name'", 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'session_nonce': ('django.db.models.fields.CharField', [], {'max_length': '12', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_type': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'}),
'file': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.File']", 'unique': 'True', 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'avatar'", 'unique': 'True', 'to': "orm['sentry.User']"})
},
'sentry.useremail': {
'Meta': {'unique_together': "(('user', 'email'),)", 'object_name': 'UserEmail'},
'date_hash_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'related_name': "'emails'", 'to': "orm['sentry.User']"}),
'validation_hash': ('django.db.models.fields.CharField', [], {'default': "u'o1Fm5V34FBi7vEcjC8i7yx1ycjcNaOCO'", 'max_length': '32'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.userreport': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'UserReport', 'index_together': "(('project', 'event_id'), ('project', 'date_added'))"},
'comments': ('django.db.models.fields.TextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.bounded.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'project': ('sentry.db.models.fields.foreignkey.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
}
}
complete_apps = ['sentry']
| |
from lims.shared.loggedintestcase import LoggedInTestCase
from rest_framework import status
from .models import Address
class AddressTestCase(LoggedInTestCase):
def setUp(self):
super(AddressTestCase, self).setUp()
self._joeBloggsAddress = \
Address.objects.create(institution_name="Beetroot Institute",
address_1="12 Muddy Field",
address_2="Long Lane",
city="Norwich",
postcode="NR1 1AA",
country="UK",
user=self._joeBloggs)
self._janeDoeAddress = \
Address.objects.create(institution_name="Onion Institute",
address_1="110a Deep Dark Wood",
address_2="Bridge Street",
city="Ipswich",
postcode="IP1 1AA",
country="UK",
user=self._janeDoe)
def test_presets(self):
self.assertIs(Address.objects.filter(institution_name="Beetroot Institute").exists(), True)
address1 = Address.objects.get(institution_name="Beetroot Institute")
self.assertEqual(address1.institution_name, "Beetroot Institute")
self.assertEqual(address1.address_1, "12 Muddy Field")
self.assertEqual(address1.address_2, "Long Lane")
self.assertEqual(address1.city, "Norwich")
self.assertEqual(address1.postcode, "NR1 1AA")
self.assertEqual(address1.country, "UK")
self.assertEqual(address1.user, self._joeBloggs)
self.assertIs(Address.objects.filter(institution_name="Onion Institute").exists(), True)
address2 = Address.objects.get(institution_name="Onion Institute")
self.assertEqual(address2.institution_name, "Onion Institute")
self.assertEqual(address2.address_1, "110a Deep Dark Wood")
self.assertEqual(address2.address_2, "Bridge Street")
self.assertEqual(address2.city, "Ipswich")
self.assertEqual(address2.postcode, "IP1 1AA")
self.assertEqual(address2.country, "UK")
self.assertEqual(address2.user, self._janeDoe)
def test_access_anonymous(self):
self._asAnonymous()
response = self._client.get('/addresses/')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self._client.get('/addresses/%d/' % self._joeBloggsAddress.id)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_access_invalid(self):
self._asInvalid()
response = self._client.get('/addresses/')
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
response = self._client.get('/addresses/%d/' % self._joeBloggsAddress.id)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_user_list(self):
# Others not permitted
self._asJoeBloggs()
response = self._client.get('/addresses/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
addresses = response.data
self.assertEqual(len(addresses["results"]), 1)
address1 = addresses["results"][0]
self.assertEqual(address1["institution_name"], "Beetroot Institute")
def test_user_view_own(self):
self._asJoeBloggs()
response = self._client.get('/addresses/%d/' % self._joeBloggsAddress.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
address1 = response.data
self.assertEqual(address1["institution_name"], "Beetroot Institute")
def test_user_view_other(self):
# Others not permitted
self._asJaneDoe()
response = self._client.get('/addresses/%d/' % self._janeDoeAddress.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response = self._client.get('/addresses/%d/' % self._joeBloggsAddress.id)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_admin_list(self):
self._asAdmin()
response = self._client.get('/addresses/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
addresses = response.data
self.assertEqual(len(addresses["results"]), 2)
def test_admin_view_any(self):
self._asAdmin()
response = self._client.get('/addresses/%d/' % self._joeBloggsAddress.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
address1 = response.data
self.assertEqual(address1["institution_name"], "Beetroot Institute")
def test_user_create_own(self):
self._asJaneDoe()
new_address = {"institution_name": "Leek Institute",
"address_1": "45 Mole Hill",
"address_2": "High St",
"city": "Cardiff",
"postcode": "CF1 1AA",
"country": "Wales",
"user": self._janeDoe.username}
response = self._client.post("/addresses/", new_address, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Address.objects.count(), 3)
self.assertIs(Address.objects.filter(institution_name="Leek Institute").exists(), True)
address = Address.objects.get(institution_name="Leek Institute")
self.assertEqual(address.institution_name, "Leek Institute")
self.assertEqual(address.address_1, "45 Mole Hill")
self.assertEqual(address.address_2, "High St")
self.assertEqual(address.city, "Cardiff")
self.assertEqual(address.postcode, "CF1 1AA")
self.assertEqual(address.country, "Wales")
self.assertEqual(address.user, self._janeDoe)
# Other user still sees just theirs but we see both our old and new ones
self._asJoeBloggs()
response = self._client.get('/addresses/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
addresses = response.data
self.assertEqual(len(addresses["results"]), 1)
self._asJaneDoe()
response = self._client.get('/addresses/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
addresses = response.data
self.assertEqual(len(addresses["results"]), 2)
def test_user_create_other(self):
# Others not permitted
self._asJaneDoe()
new_address = {"institution_name": "Jam Ltd.",
"address_1": "Sticky House",
"address_2": "Low St",
"city": "Hull",
"postcode": "H1 1AA",
"country": "UK",
"user": self._joeBloggs.id}
response = self._client.post("/addresses/", new_address, format='json')
self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)
self.assertIs(Address.objects.filter(institution_name="Jam Ltd.").exists(), False)
def test_admin_create_any(self):
self._asAdmin()
new_address = {"institution_name": "Leek Institute",
"address_1": "45 Mole Hill",
"address_2": "High St",
"city": "Cardiff",
"postcode": "CF1 1AA",
"country": "Wales",
"user": self._janeDoe.username}
response = self._client.post("/addresses/", new_address, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertEqual(Address.objects.count(), 3)
self.assertIs(Address.objects.filter(institution_name="Leek Institute").exists(), True)
address = Address.objects.get(institution_name="Leek Institute")
self.assertEqual(address.institution_name, "Leek Institute")
self.assertEqual(address.address_1, "45 Mole Hill")
self.assertEqual(address.address_2, "High St")
self.assertEqual(address.city, "Cardiff")
self.assertEqual(address.postcode, "CF1 1AA")
self.assertEqual(address.country, "Wales")
self.assertEqual(address.user, self._janeDoe)
# Other user still sees just theirs but we see both our old and new ones
self._asJoeBloggs()
response = self._client.get('/addresses/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
addresses = response.data
self.assertEqual(len(addresses["results"]), 1)
self._asJaneDoe()
response = self._client.get('/addresses/')
self.assertEqual(response.status_code, status.HTTP_200_OK)
addresses = response.data
self.assertEqual(len(addresses["results"]), 2)
def test_user_edit_own(self):
self._asJaneDoe()
updated_address = {"institution_name": "Onion Institute Revised"}
response = self._client.patch("/addresses/%d/" % self._janeDoeAddress.id,
updated_address, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIs(Address.objects.filter(institution_name="Onion Institute Revised").exists(),
True)
address = Address.objects.get(institution_name="Onion Institute Revised")
self.assertEqual(address.institution_name, "Onion Institute Revised")
self.assertEqual(address.address_1, "110a Deep Dark Wood")
self.assertEqual(address.address_2, "Bridge Street")
self.assertEqual(address.city, "Ipswich")
self.assertEqual(address.postcode, "IP1 1AA")
self.assertEqual(address.country, "UK")
self.assertEqual(address.user, self._janeDoe)
def test_user_edit_other(self):
# Others not permitted
self._asJoeBloggs()
updated_address = {"institution_name": "Toast Co."}
response = self._client.put("/addresses/%d/" % self._janeDoeAddress.id,
updated_address, format='json')
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertIs(Address.objects.filter(institution_name="Onion Institute").exists(), True)
self.assertIs(Address.objects.filter(institution_name="Toast Co.").exists(), False)
def test_admin_edit_any(self):
self._asAdmin()
updated_address = {"institution_name": "Onion Institute Revised"}
response = self._client.patch("/addresses/%d/" % self._janeDoeAddress.id,
updated_address, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIs(Address.objects.filter(institution_name="Onion Institute Revised").exists(),
True)
address = Address.objects.get(institution_name="Onion Institute Revised")
self.assertEqual(address.institution_name, "Onion Institute Revised")
self.assertEqual(address.address_1, "110a Deep Dark Wood")
self.assertEqual(address.address_2, "Bridge Street")
self.assertEqual(address.city, "Ipswich")
self.assertEqual(address.postcode, "IP1 1AA")
self.assertEqual(address.country, "UK")
self.assertEqual(address.user, self._janeDoe)
def test_user_delete_own(self):
self._asJoeBloggs()
response = self._client.delete("/addresses/%d/" % self._joeBloggsAddress.id)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertIs(Address.objects.filter(institution_name="Beetroot Institute").exists(), False)
def test_user_delete_other(self):
# Others not permitted
self._asJaneDoe()
response = self._client.delete("/addresses/%d/" % self._joeBloggsAddress.id)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.assertIs(Address.objects.filter(institution_name="Beetroot Institute").exists(), True)
def test_admin_delete_any(self):
self._asAdmin()
response = self._client.delete("/addresses/%d/" % self._joeBloggsAddress.id)
self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertIs(Address.objects.filter(institution_name="Beetroot Institute").exists(), False)
def _setup_audittrail(self):
self._asAdmin()
new_address = {"institution_name": "Leek Institute",
"address_1": "45 Mole Hill",
"address_2": "High St",
"city": "Cardiff",
"postcode": "CF1 1AA",
"country": "Wales",
"user": self._janeDoe.username}
response = self._client.post("/addresses/", new_address, format='json')
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
address = Address.objects.get(institution_name="Leek Institute")
updated_address = {"institution_name": "Onion Institute Revised"}
response = self._client.patch("/addresses/%d/" % address.id,
updated_address, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_admin_audit_history(self):
self._setup_audittrail()
address = Address.objects.get(institution_name="Onion Institute Revised")
response = self._client.get("/addresses/%d/history/" % address.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
history = response.data
self.assertEqual(len(history), 2)
v0 = history[0]
v1 = history[1]
self.assertEqual(v0["version"], 0)
self.assertEqual(v0["data"]["institution_name"], "Leek Institute")
self.assertEqual(v1["version"], 1)
self.assertEqual(v1["data"]["institution_name"], "Onion Institute Revised")
def test_admin_audit_compare(self):
self._setup_audittrail()
address = Address.objects.get(institution_name="Onion Institute Revised")
response = self._client.get("/addresses/%d/compare/?version1=0&version2=1" % address.id)
self.assertEqual(response.status_code, status.HTTP_200_OK)
changes = response.data
self.assertEqual(len(changes), 1)
self.assertEqual(changes["institution_name"]["version1"], "Leek Institute")
self.assertEqual(changes["institution_name"]["version2"], "Onion Institute Revised")
def test_admin_audit_revert(self):
self._setup_audittrail()
address = Address.objects.get(institution_name="Onion Institute Revised")
response = self._client.post("/addresses/%d/revert/?version=0" % address.id, format='json')
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertIs(Address.objects.filter(institution_name="Onion Institute Revised").exists(),
False)
self.assertIs(Address.objects.filter(institution_name="Leek Institute").exists(), True)
| |
"""
Module to interact with the ChromeCast via protobuf-over-socket.
Big thanks goes out to Fred Clift <fred@clift.org> who build the first
version of this code: https://github.com/minektur/chromecast-python-poc.
Without him this would not have been possible.
"""
# Pylint does not understand the protobuf objects correctly
# pylint: disable=no-member, too-many-lines
import errno
import json
import logging
import select
import socket
import ssl
import sys
import threading
import time
from collections import namedtuple
from struct import pack, unpack
from . import cast_channel_pb2
from .controllers import BaseController
from .controllers.media import MediaController
from .const import CAST_TYPE_AUDIO, CAST_TYPE_CHROMECAST, CAST_TYPE_GROUP
from .discovery import get_info_from_service, get_host_from_service_info
from .error import (
ChromecastConnectionError,
UnsupportedNamespace,
NotConnected,
PyChromecastStopped,
)
NS_CONNECTION = "urn:x-cast:com.google.cast.tp.connection"
NS_RECEIVER = "urn:x-cast:com.google.cast.receiver"
NS_HEARTBEAT = "urn:x-cast:com.google.cast.tp.heartbeat"
PLATFORM_DESTINATION_ID = "receiver-0"
MESSAGE_TYPE = "type"
TYPE_PING = "PING"
TYPE_RECEIVER_STATUS = "RECEIVER_STATUS"
TYPE_PONG = "PONG"
TYPE_CONNECT = "CONNECT"
TYPE_CLOSE = "CLOSE"
TYPE_GET_STATUS = "GET_STATUS"
TYPE_LAUNCH = "LAUNCH"
TYPE_LAUNCH_ERROR = "LAUNCH_ERROR"
TYPE_LOAD = "LOAD"
# The socket connection is being setup
CONNECTION_STATUS_CONNECTING = "CONNECTING"
# The socket connection was complete
CONNECTION_STATUS_CONNECTED = "CONNECTED"
# The socket connection has been disconnected
CONNECTION_STATUS_DISCONNECTED = "DISCONNECTED"
# Connecting to socket failed (after a CONNECTION_STATUS_CONNECTING)
CONNECTION_STATUS_FAILED = "FAILED"
# Failed to resolve service name
CONNECTION_STATUS_FAILED_RESOLVE = "FAILED_RESOLVE"
# The socket connection was lost and needs to be retried
CONNECTION_STATUS_LOST = "LOST"
APP_ID = "appId"
REQUEST_ID = "requestId"
SESSION_ID = "sessionId"
ERROR_REASON = "reason"
HB_PING_TIME = 10
HB_PONG_TIME = 10
POLL_TIME_BLOCKING = 5.0
POLL_TIME_NON_BLOCKING = 0.01
TIMEOUT_TIME = 30
RETRY_TIME = 5
class InterruptLoop(Exception):
""" The chromecast has been manually stopped. """
def _json_from_message(message):
""" Parses a PB2 message into JSON format. """
try:
return json.loads(message.payload_utf8)
except ValueError:
logger = logging.getLogger(__name__)
logger.warning(
"Ignoring invalid json in namespace %s: %s",
message.namespace,
message.payload_utf8,
)
return {}
def _message_to_string(message, data=None):
""" Gives a string representation of a PB2 message. """
if data is None:
data = _json_from_message(message)
return "Message {} from {} to {}: {}".format(
message.namespace, message.source_id, message.destination_id, data
)
if sys.version_info >= (3, 0):
def _json_to_payload(data):
""" Encodes a python value into JSON format. """
return json.dumps(data, ensure_ascii=False).encode("utf8")
else:
def _json_to_payload(data):
""" Encodes a python value into JSON format. """
return json.dumps(data, ensure_ascii=False)
def _is_ssl_timeout(exc):
""" Returns True if the exception is for an SSL timeout """
return exc.message in (
"The handshake operation timed out",
"The write operation timed out",
"The read operation timed out",
)
NetworkAddress = namedtuple("NetworkAddress", ["address", "port"])
ConnectionStatus = namedtuple("ConnectionStatus", ["status", "address"])
CastStatus = namedtuple(
"CastStatus",
[
"is_active_input",
"is_stand_by",
"volume_level",
"volume_muted",
"app_id",
"display_name",
"namespaces",
"session_id",
"transport_id",
"status_text",
"icon_url",
],
)
LaunchFailure = namedtuple("LaunchStatus", ["reason", "app_id", "request_id"])
# pylint: disable=too-many-instance-attributes
class SocketClient(threading.Thread):
"""
Class to interact with a Chromecast through a socket.
:param host: The host to connect to.
:param port: The port to use when connecting to the device, set to None to
use the default of 8009. Special devices such as Cast Groups
may return a different port number so we need to use that.
:param cast_type: The type of chromecast to connect to, see
dial.CAST_TYPE_* for types.
:param tries: Number of retries to perform if the connection fails.
None for inifinite retries.
:param timeout: A floating point number specifying the socket timeout in
seconds. None means to use the default which is 30 seconds.
:param retry_wait: A floating point number specifying how many seconds to
wait between each retry. None means to use the default
which is 5 seconds.
:param services: A list of mDNS services to try to connect to. If present,
parameters host and port are ignored and host and port are
instead resolved through mDNS. The list of services may be
modified, for example if speaker group leadership is handed
over. SocketClient will catch modifications to the list when
attempting reconnect.
:param zconf: A zeroconf instance, needed if a list of services is passed.
The zeroconf instance may be obtained from the browser returned by
pychromecast.start_discovery().
"""
def __init__(self, host, port=None, cast_type=CAST_TYPE_CHROMECAST, **kwargs):
tries = kwargs.pop("tries", None)
timeout = kwargs.pop("timeout", None)
retry_wait = kwargs.pop("retry_wait", None)
services = kwargs.pop("services", None)
zconf = kwargs.pop("zconf", None)
super(SocketClient, self).__init__()
self.daemon = True
self.logger = logging.getLogger(__name__)
self.retry_log_fun = self.logger.error
self._force_recon = False
self.cast_type = cast_type
self.fn = None # pylint:disable=invalid-name
self.tries = tries
self.timeout = timeout or TIMEOUT_TIME
self.retry_wait = retry_wait or RETRY_TIME
self.host = host
self.services = services or [None]
self.zconf = zconf
self.port = port or 8009
self.source_id = "sender-0"
self.stop = threading.Event()
# socketpair used to interrupt the worker thread
self.socketpair = socket.socketpair()
self.app_namespaces = []
self.destination_id = None
self.session_id = None
self._request_id = 0
# dict mapping requestId on threading.Event objects
self._request_callbacks = {}
self._open_channels = []
self.retries = {}
self.connecting = True
self.first_connection = True
self.socket = None
# dict mapping namespace on Controller objects
self._handlers = {}
self._connection_listeners = []
self.receiver_controller = ReceiverController(cast_type)
self.media_controller = MediaController()
self.heartbeat_controller = HeartbeatController()
self.register_handler(self.heartbeat_controller)
self.register_handler(ConnectionController())
self.register_handler(self.receiver_controller)
self.register_handler(self.media_controller)
self.receiver_controller.register_status_listener(self)
def initialize_connection(
self,
): # noqa: E501 pylint:disable=too-many-statements, too-many-branches
"""Initialize a socket to a Chromecast."""
if self.socket is not None:
self.socket.close()
self.socket = None
# Make sure nobody is blocking.
for callback in self._request_callbacks.values():
callback["event"].set()
self.app_namespaces = []
self.destination_id = None
self.session_id = None
self._request_id = 0
self._request_callbacks = {}
self._open_channels = []
self.connecting = True
def mdns_backoff(service, retry):
"""Exponentional backoff for service name mdns lookups."""
now = time.time()
retry["next_retry"] = now + retry["delay"]
retry["delay"] = min(retry["delay"] * 2, 300)
self.retries[service] = retry
# Prune retries dict
self.retries = {
key: self.retries[key]
for key in self.services
if (key is not None and key in self.retries)
}
for service in self.services.copy():
now = time.time()
retry = self.retries.get(
service, {"delay": self.retry_wait, "next_retry": now}
)
# If we're connecting to a named service, check if it's time
if service and now < retry["next_retry"]:
continue
try:
self.socket = new_socket()
self.socket.settimeout(self.timeout)
self._report_connection_status(
ConnectionStatus(
CONNECTION_STATUS_CONNECTING,
NetworkAddress(self.host, self.port),
)
)
# Resolve the service name. If service is None, we're
# connecting directly to a host name or IP-address
if service:
host = None
port = None
service_info = get_info_from_service(service, self.zconf)
host, port = get_host_from_service_info(service_info)
if host and port:
try:
self.fn = service_info.properties[b"fn"].decode("utf-8")
except (AttributeError, KeyError, UnicodeError):
pass
self.logger.debug(
"[%s(%s):%s] Resolved service %s to %s:%s",
self.fn or "",
self.host,
self.port,
service,
host,
port,
)
self.host = host
self.port = port
else:
self.logger.debug(
"[%s(%s):%s] Failed to resolve service %s",
self.fn or "",
self.host,
self.port,
service,
)
self._report_connection_status(
ConnectionStatus(
CONNECTION_STATUS_FAILED_RESOLVE,
NetworkAddress(service, None),
)
)
mdns_backoff(service, retry)
# If zeroconf fails to receive the necessary data,
# try next service
continue
self.logger.debug(
"[%s(%s):%s] Connecting to %s:%s",
self.fn or "",
self.host,
self.port,
self.host,
self.port,
)
self.socket.connect((self.host, self.port))
self.socket = ssl.wrap_socket(self.socket)
self.connecting = False
self._force_recon = False
# reset retries
self.retries = {}
self.curr_tries = self.tries
self._report_connection_status(
ConnectionStatus(
CONNECTION_STATUS_CONNECTED,
NetworkAddress(self.host, self.port),
)
)
self.receiver_controller.update_status()
self.heartbeat_controller.ping()
self.heartbeat_controller.reset()
if self.first_connection:
self.first_connection = False
self.logger.debug(
"[%s(%s):%s] Connected!",
self.fn or "",
self.host,
self.port,
)
else:
self.logger.info(
"[%s(%s):%s] Connection reestablished!",
self.fn or "",
self.host,
self.port,
)
return
except OSError as err:
self.connecting = True
self._report_connection_status(
ConnectionStatus(
CONNECTION_STATUS_FAILED,
NetworkAddress(self.host, self.port),
)
)
if service is not None:
self.retry_log_fun(
"[%s(%s):%s] Failed to connect to service %s, retrying in %.1fs",
self.fn or "",
self.host,
self.port,
service,
retry["delay"],
)
mdns_backoff(service, retry)
else:
self.retry_log_fun(
"[%s(%s):%s] Failed to connect, retrying in %.1fs",
self.fn or "",
self.host,
self.port,
self.retry_wait,
)
raise ChromecastConnectionError("Failed to connect")
def connect(self):
"""
This method is just needed for non-blocking reconnect after disconnect
"""
self.stop.clear()
def disconnect(self, blocking=True):
""" Disconnect socket connection to Chromecast device """
if blocking:
self.stop.set()
try:
# Write to the socket to interrupt the worker thread
self.socketpair[1].send(b"x")
except socket.error:
# The socketpair may already be closed during shutdown, ignore it
pass
else:
self._cleanup()
def register_handler(self, handler):
""" Register a new namespace handler. """
self._handlers[handler.namespace] = handler
handler.registered(self)
def new_cast_status(self, cast_status):
""" Called when a new cast status has been received. """
new_channel = self.destination_id != cast_status.transport_id
if new_channel:
self.disconnect_channel(self.destination_id)
self.app_namespaces = cast_status.namespaces
self.destination_id = cast_status.transport_id
self.session_id = cast_status.session_id
if new_channel:
# If any of the namespaces of the new app are supported
# we will automatically connect to it to receive updates
for namespace in self.app_namespaces:
if namespace in self._handlers:
self._ensure_channel_connected(self.destination_id)
self._handlers[namespace].channel_connected()
def _gen_request_id(self):
""" Generates a unique request id. """
self._request_id += 1
return self._request_id
@property
def is_connected(self):
"""
Returns True if the client is connected, False if it is stopped
(or trying to connect).
"""
return not self.connecting
@property
def is_stopped(self):
"""
Returns True if the connection has been stopped, False if it is
running.
"""
return self.stop.is_set()
def run(self):
"""
Run main loop thread which connects to the socket and start polling the socket.
"""
self.logger.debug("Thread started...")
self.curr_tries = self.tries
while not self.stop.is_set() and (
self.curr_tries is None or self.curr_tries > 0
):
try:
self.run_once(timeout=POLL_TIME_BLOCKING)
except ChromecastConnectionError as err:
if self.stop.is_set():
self.logger.error(
"[%s(%s):%s] Failed to connect: %s. aborting due to stop signal.",
self.fn or "",
self.host,
self.port,
err,
)
# Exit loop and cleanup things
break
# Only sleep if we have another retry remaining
if self.curr_tries is None or self.curr_tries > 1:
self.logger.debug(
"[%s(%s):%s] Not connected, sleeping for %.1fs. Services: %s",
self.fn or "",
self.host,
self.port,
self.retry_wait,
self.services,
)
time.sleep(self.retry_wait)
if self.curr_tries:
self.curr_tries -= 1
# log error only once
self.retry_log_fun = self.logger.debug
if self.curr_tries == 0:
self.stop.set()
self.logger.error(
"[%s(%s):%s] Failed to connect. No retries.",
self.fn or "",
self.host,
self.port,
)
raise err
except InterruptLoop as exc:
if self.stop.is_set():
self.logger.info(
"[%s(%s):%s] Stopped while reading message, disconnecting.",
self.fn or "",
self.host,
self.port,
)
else:
self.logger.error(
"[%s(%s):%s] Interruption caught without being stopped: %s",
self.fn or "",
self.host,
self.port,
exc,
)
# Exit loop due to interrupt
break
except ssl.SSLError as exc:
if exc.errno == ssl.SSL_ERROR_EOF:
if self.stop.is_set():
# Exit loop due to stop set
break
raise
except Exception: # pylint: disable=broad-except
self.logger.exception(
("[%s(%s):%s] Unhandled exception in worker thread"),
self.fn or "",
self.host,
self.port,
)
raise
self.logger.debug("Thread done...")
# Clean up
self._cleanup()
def run_once(self, timeout=POLL_TIME_NON_BLOCKING):
"""
Use run_once() in your own main loop after you
receive something on the socket (get_socket()).
"""
# pylint: disable=too-many-branches, too-many-return-statements
# do not check connection/reconnect if stop is set
if self.stop.is_set():
return
try:
self._check_connection()
except ChromecastConnectionError:
raise
if self.socket is None:
# wait for socket
return
# poll the socket, as well as the socketpair to allow us to be interrupted
rlist = [self.socket, self.socketpair[0]]
can_read, _, _ = select.select(rlist, [], [], timeout)
# read messages from chromecast
message = data = None
if self.socket in can_read and not self._force_recon:
try:
message = self._read_message()
except socket.error:
self._force_recon = True
self.logger.error(
"[%s(%s):%s] Error reading from socket.",
self.fn or "",
self.host,
self.port,
)
else:
data = _json_from_message(message)
if self.socketpair[0] in can_read:
# Clear the socket's buffer
self.socketpair[0].recv(128)
# If we are stopped after receiving a message we skip the message
# and tear down the connection
if self.stop.is_set():
return
if not message:
return
# See if any handlers will accept this message
self._route_message(message, data)
if REQUEST_ID in data:
callback = self._request_callbacks.pop(data[REQUEST_ID], None)
if callback is not None:
event = callback["event"]
callback["response"] = data
function = callback["function"]
event.set()
if function:
function(data)
return
def get_socket(self):
"""
Returns the socket of the connection to use it in you own
main loop.
"""
return self.socket
def _check_connection(self):
"""
Checks if the connection is active, and if not reconnect
:return: True if the connection is active, False if the connection was
reset.
"""
# check if we need to connect
if self.connecting:
try:
self.initialize_connection()
except ChromecastConnectionError:
if self.first_connection:
self._report_connection_status(
ConnectionStatus(
CONNECTION_STATUS_DISCONNECTED, NetworkAddress(self.host, self.port)
)
)
else:
raise
return
# check if connection is expired
reset = False
if self._force_recon:
self.logger.warning(
"[%s(%s):%s] Error communicating with socket, resetting connection",
self.fn or "",
self.host,
self.port,
)
reset = True
elif self.heartbeat_controller.is_expired():
self.logger.warning(
"[%s(%s):%s] Heartbeat timeout, resetting connection",
self.fn or "",
self.host,
self.port,
)
reset = True
if reset:
self.receiver_controller.disconnected()
for channel in self._open_channels:
self.disconnect_channel(channel)
self._report_connection_status(
ConnectionStatus(
CONNECTION_STATUS_LOST, NetworkAddress(self.host, self.port)
)
)
self.initialize_connection()
def _route_message(self, message, data):
""" Route message to any handlers on the message namespace """
# route message to handlers
if message.namespace in self._handlers:
# debug messages
if message.namespace != NS_HEARTBEAT:
self.logger.debug(
"[%s(%s):%s] Received: %s",
self.fn or "",
self.host,
self.port,
_message_to_string(message, data),
)
# message handlers
try:
handled = self._handlers[message.namespace].receive_message(
message, data
)
if not handled:
if data.get(REQUEST_ID) not in self._request_callbacks:
self.logger.debug(
"[%s(%s):%s] Message unhandled: %s",
self.fn or "",
self.host,
self.port,
_message_to_string(message, data),
)
except Exception: # pylint: disable=broad-except
self.logger.exception(
(
"[%s(%s):%s] Exception caught while sending message to "
"controller %s: %s"
),
self.fn or "",
self.host,
self.port,
type(self._handlers[message.namespace]).__name__,
_message_to_string(message, data),
)
else:
self.logger.debug(
"[%s(%s):%s] Received unknown namespace: %s",
self.fn or "",
self.host,
self.port,
_message_to_string(message, data),
)
def _cleanup(self):
""" Cleanup open channels and handlers """
for channel in self._open_channels:
try:
self.disconnect_channel(channel)
except Exception: # pylint: disable=broad-except
pass
for handler in self._handlers.values():
try:
handler.tear_down()
except Exception: # pylint: disable=broad-except
pass
try:
self.socket.close()
except Exception: # pylint: disable=broad-except
self.logger.exception(
"[%s(%s):%s] _cleanup", self.fn or "", self.host, self.port
)
self._report_connection_status(
ConnectionStatus(
CONNECTION_STATUS_DISCONNECTED, NetworkAddress(self.host, self.port)
)
)
self.socketpair[0].close()
self.socketpair[1].close()
self.connecting = True
def _report_connection_status(self, status):
""" Report a change in the connection status to any listeners """
for listener in self._connection_listeners:
try:
self.logger.debug(
"[%s(%s):%s] connection listener: %x (%s) %s",
self.fn or "",
self.host,
self.port,
id(listener),
type(listener).__name__,
status,
)
listener.new_connection_status(status)
except Exception: # pylint: disable=broad-except
self.logger.exception(
"[%s(%s):%s] Exception thrown when calling connection listener",
self.fn or "",
self.host,
self.port,
)
def _read_bytes_from_socket(self, msglen):
""" Read bytes from the socket. """
chunks = []
bytes_recd = 0
while bytes_recd < msglen:
if self.stop.is_set():
raise InterruptLoop("Stopped while reading from socket")
try:
chunk = self.socket.recv(min(msglen - bytes_recd, 2048))
if chunk == b"":
raise socket.error("socket connection broken")
chunks.append(chunk)
bytes_recd += len(chunk)
except socket.timeout:
self.logger.debug(
"[%s(%s):%s] timeout in : _read_bytes_from_socket",
self.fn or "",
self.host,
self.port,
)
continue
except ssl.SSLError as exc:
# Support older ssl implementations which does not raise
# socket.timeout on timeouts
if _is_ssl_timeout(exc):
self.logger.debug(
"[%s(%s):%s] ssl timeout in : _read_bytes_from_socket",
self.fn or "",
self.host,
self.port,
)
continue
raise
return b"".join(chunks)
def _read_message(self):
""" Reads a message from the socket and converts it to a message. """
# first 4 bytes is Big-Endian payload length
payload_info = self._read_bytes_from_socket(4)
read_len = unpack(">I", payload_info)[0]
# now read the payload
payload = self._read_bytes_from_socket(read_len)
# pylint: disable=no-member
message = cast_channel_pb2.CastMessage()
message.ParseFromString(payload)
return message
# pylint: disable=too-many-arguments
def send_message(
self,
destination_id,
namespace,
data,
inc_session_id=False,
callback_function=False,
no_add_request_id=False,
force=False,
):
""" Send a message to the Chromecast. """
# namespace is a string containing namespace
# data is a dict that will be converted to json
# wait_for_response only works if we have a request id
# If channel is not open yet, connect to it.
self._ensure_channel_connected(destination_id)
request_id = None
if not no_add_request_id:
request_id = self._gen_request_id()
data[REQUEST_ID] = request_id
if inc_session_id:
data[SESSION_ID] = self.session_id
# pylint: disable=no-member
msg = cast_channel_pb2.CastMessage()
msg.protocol_version = msg.CASTV2_1_0
msg.source_id = self.source_id
msg.destination_id = destination_id
msg.payload_type = cast_channel_pb2.CastMessage.STRING
msg.namespace = namespace
msg.payload_utf8 = _json_to_payload(data)
# prepend message with Big-Endian 4 byte payload size
be_size = pack(">I", msg.ByteSize())
# Log all messages except heartbeat
if msg.namespace != NS_HEARTBEAT:
self.logger.debug(
"[%s(%s):%s] Sending: %s",
self.fn or "",
self.host,
self.port,
_message_to_string(msg, data),
)
if not force and self.stop.is_set():
raise PyChromecastStopped("Socket client's thread is stopped.")
if not self.connecting and not self._force_recon:
try:
if not no_add_request_id and callback_function:
self._request_callbacks[request_id] = {
"event": threading.Event(),
"response": None,
"function": callback_function,
}
self.socket.sendall(be_size + msg.SerializeToString())
except socket.error:
self._request_callbacks.pop(request_id, None)
self._force_recon = True
self.logger.info(
"[%s(%s):%s] Error writing to socket.",
self.fn or "",
self.host,
self.port,
)
else:
raise NotConnected(
"Chromecast {}:{} is connecting...".format(self.host, self.port)
)
def send_platform_message(
self, namespace, message, inc_session_id=False, callback_function_param=False
):
""" Helper method to send a message to the platform. """
return self.send_message(
PLATFORM_DESTINATION_ID,
namespace,
message,
inc_session_id,
callback_function_param,
)
def send_app_message(
self, namespace, message, inc_session_id=False, callback_function_param=False
):
""" Helper method to send a message to current running app. """
if namespace not in self.app_namespaces:
raise UnsupportedNamespace(
(
"Namespace {} is not supported by current app. " "Supported are {}"
).format(namespace, ", ".join(self.app_namespaces))
)
return self.send_message(
self.destination_id,
namespace,
message,
inc_session_id,
callback_function_param,
)
def register_connection_listener(self, listener):
""" Register a connection listener for when the socket connection
changes. Listeners will be called with
listener.new_connection_status(status) """
self._connection_listeners.append(listener)
def _ensure_channel_connected(self, destination_id):
""" Ensure we opened a channel to destination_id. """
if destination_id not in self._open_channels:
self._open_channels.append(destination_id)
self.send_message(
destination_id,
NS_CONNECTION,
{
MESSAGE_TYPE: TYPE_CONNECT,
"origin": {},
"userAgent": "PyChromecast",
"senderInfo": {
"sdkType": 2,
"version": "15.605.1.3",
"browserVersion": "44.0.2403.30",
"platform": 4,
"systemVersion": "Macintosh; Intel Mac OS X10_10_3",
"connectionType": 1,
},
},
no_add_request_id=True,
)
def disconnect_channel(self, destination_id):
""" Disconnect a channel with destination_id. """
if destination_id in self._open_channels:
try:
self.send_message(
destination_id,
NS_CONNECTION,
{MESSAGE_TYPE: TYPE_CLOSE, "origin": {}},
no_add_request_id=True,
force=True,
)
except NotConnected:
pass
except Exception: # pylint: disable=broad-except
self.logger.exception(
"[%s(%s):%s] Exception", self.fn or "", self.host, self.port
)
self._open_channels.remove(destination_id)
self.handle_channel_disconnected()
def handle_channel_disconnected(self):
""" Handles a channel being disconnected. """
for namespace in self.app_namespaces:
if namespace in self._handlers:
self._handlers[namespace].channel_disconnected()
self.app_namespaces = []
self.destination_id = None
self.session_id = None
class ConnectionController(BaseController):
""" Controller to respond to connection messages. """
def __init__(self):
super(ConnectionController, self).__init__(NS_CONNECTION)
def receive_message(self, message, data):
""" Called when a connection message is received. """
if self._socket_client.is_stopped:
return True
if data[MESSAGE_TYPE] == TYPE_CLOSE:
# The cast device is asking us to acknowledge closing this channel.
self._socket_client.disconnect_channel(message.source_id)
# Schedule a status update so that a channel is created.
self._socket_client.receiver_controller.update_status()
return True
return False
class HeartbeatController(BaseController):
""" Controller to respond to heartbeat messages. """
def __init__(self):
super(HeartbeatController, self).__init__(NS_HEARTBEAT, target_platform=True)
self.last_ping = 0
self.last_pong = time.time()
def receive_message(self, message, data):
""" Called when a heartbeat message is received. """
if self._socket_client.is_stopped:
return True
if data[MESSAGE_TYPE] == TYPE_PING:
try:
self._socket_client.send_message(
PLATFORM_DESTINATION_ID,
self.namespace,
{MESSAGE_TYPE: TYPE_PONG},
no_add_request_id=True,
)
except PyChromecastStopped:
self._socket_client.logger.debug(
"Heartbeat error when sending response, "
"Chromecast connection has stopped"
)
return True
if data[MESSAGE_TYPE] == TYPE_PONG:
self.reset()
return True
return False
def ping(self):
""" Send a ping message. """
self.last_ping = time.time()
try:
self.send_message({MESSAGE_TYPE: TYPE_PING})
except NotConnected:
self._socket_client.logger.error(
"Chromecast is disconnected. " "Cannot ping until reconnected."
)
def reset(self):
""" Reset expired counter. """
self.last_pong = time.time()
def is_expired(self):
""" Indicates if connection has expired. """
if time.time() - self.last_ping > HB_PING_TIME:
self.ping()
return (time.time() - self.last_pong) > HB_PING_TIME + HB_PONG_TIME
class ReceiverController(BaseController):
"""
Controller to interact with the Chromecast platform.
:param cast_type: Type of Chromecast device.
"""
def __init__(self, cast_type=CAST_TYPE_CHROMECAST):
super(ReceiverController, self).__init__(NS_RECEIVER, target_platform=True)
self.status = None
self.launch_failure = None
self.app_to_launch = None
self.cast_type = cast_type
self.app_launch_event = threading.Event()
self.app_launch_event_function = None
self._status_listeners = []
self._launch_error_listeners = []
def disconnected(self):
""" Called when disconnected. Will erase status. """
self.logger.info("Receiver:channel_disconnected")
self.status = None
@property
def app_id(self):
""" Convenience method to retrieve current app id. """
return self.status.app_id if self.status else None
def receive_message(self, message, data):
""" Called when a receiver-message has been received. """
if data[MESSAGE_TYPE] == TYPE_RECEIVER_STATUS:
self._process_get_status(data)
return True
if data[MESSAGE_TYPE] == TYPE_LAUNCH_ERROR:
self._process_launch_error(data)
return True
return False
def register_status_listener(self, listener):
""" Register a status listener for when a new Chromecast status
has been received. Listeners will be called with
listener.new_cast_status(status) """
self._status_listeners.append(listener)
def register_launch_error_listener(self, listener):
""" Register a listener for when a new launch error message
has been received. Listeners will be called with
listener.new_launch_error(launch_failure) """
self._launch_error_listeners.append(listener)
def update_status(self, callback_function_param=False):
""" Sends a message to the Chromecast to update the status. """
self.logger.debug("Receiver:Updating status")
self.send_message(
{MESSAGE_TYPE: TYPE_GET_STATUS}, callback_function=callback_function_param
)
def launch_app(self, app_id, force_launch=False, callback_function=False):
""" Launches an app on the Chromecast.
Will only launch if it is not currently running unless
force_launch=True. """
if not force_launch and self.status is None:
self.update_status(
lambda response: self._send_launch_message(
app_id, force_launch, callback_function
)
)
else:
self._send_launch_message(app_id, force_launch, callback_function)
def _send_launch_message(self, app_id, force_launch=False, callback_function=False):
if force_launch or self.app_id != app_id:
self.logger.info("Receiver:Launching app %s", app_id)
self.app_to_launch = app_id
self.app_launch_event.clear()
self.app_launch_event_function = callback_function
self.launch_failure = None
self.send_message({MESSAGE_TYPE: TYPE_LAUNCH, APP_ID: app_id})
else:
self.logger.info("Not launching app %s - already running", app_id)
if callback_function:
callback_function()
def stop_app(self, callback_function_param=False):
""" Stops the current running app on the Chromecast. """
self.logger.info("Receiver:Stopping current app '%s'", self.app_id)
return self.send_message(
{MESSAGE_TYPE: "STOP"},
inc_session_id=True,
callback_function=callback_function_param,
)
def set_volume(self, volume):
""" Allows to set volume. Should be value between 0..1.
Returns the new volume.
"""
volume = min(max(0, volume), 1)
self.logger.info("Receiver:setting volume to %.1f", volume)
self.send_message({MESSAGE_TYPE: "SET_VOLUME", "volume": {"level": volume}})
return volume
def set_volume_muted(self, muted):
""" Allows to mute volume. """
self.send_message({MESSAGE_TYPE: "SET_VOLUME", "volume": {"muted": muted}})
@staticmethod
def _parse_status(data, cast_type):
"""
Parses a STATUS message and returns a CastStatus object.
:type data: dict
:param cast_type: Type of Chromecast.
:rtype: CastStatus
"""
data = data.get("status", {})
volume_data = data.get("volume", {})
try:
app_data = data["applications"][0]
except (KeyError, IndexError):
app_data = {}
is_audio = cast_type in (CAST_TYPE_AUDIO, CAST_TYPE_GROUP)
status = CastStatus(
data.get("isActiveInput", None if is_audio else False),
data.get("isStandBy", None if is_audio else True),
volume_data.get("level", 1.0),
volume_data.get("muted", False),
app_data.get(APP_ID),
app_data.get("displayName"),
[item["name"] for item in app_data.get("namespaces", [])],
app_data.get(SESSION_ID),
app_data.get("transportId"),
app_data.get("statusText", ""),
app_data.get("iconUrl"),
)
return status
def _process_get_status(self, data):
""" Processes a received STATUS message and notifies listeners. """
status = self._parse_status(data, self.cast_type)
is_new_app = self.app_id != status.app_id and self.app_to_launch
self.status = status
self.logger.debug("Received status: %s", self.status)
self._report_status()
if is_new_app and self.app_to_launch == self.app_id:
self.app_to_launch = None
self.app_launch_event.set()
if self.app_launch_event_function:
self.logger.debug("Start app_launch_event_function...")
self.app_launch_event_function()
self.app_launch_event_function = None
def _report_status(self):
""" Reports the current status to all listeners. """
for listener in self._status_listeners:
try:
listener.new_cast_status(self.status)
except Exception: # pylint: disable=broad-except
self.logger.exception(
"Exception thrown when calling cast status listener"
)
@staticmethod
def _parse_launch_error(data):
"""
Parses a LAUNCH_ERROR message and returns a LaunchFailure object.
:type data: dict
:rtype: LaunchFailure
"""
return LaunchFailure(
data.get(ERROR_REASON, None), data.get(APP_ID), data.get(REQUEST_ID)
)
def _process_launch_error(self, data):
"""
Processes a received LAUNCH_ERROR message and notifies listeners.
"""
launch_failure = self._parse_launch_error(data)
self.launch_failure = launch_failure
if self.app_to_launch:
self.app_to_launch = None
self.app_launch_event.set()
self.logger.debug("Launch status: %s", launch_failure)
for listener in self._launch_error_listeners:
try:
listener.new_launch_error(launch_failure)
except Exception: # pylint: disable=broad-except
self.logger.exception(
"Exception thrown when calling launch error listener"
)
def tear_down(self):
""" Called when controller is destroyed. """
super(ReceiverController, self).tear_down()
self.status = None
self.launch_failure = None
self.app_to_launch = None
self.app_launch_event.clear()
self._status_listeners[:] = []
def new_socket():
"""
Create a new socket with OS-specific parameters
Try to set SO_REUSEPORT for BSD-flavored systems if it's an option.
Catches errors if not.
"""
new_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
new_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
# noinspection PyUnresolvedReferences
reuseport = socket.SO_REUSEPORT
except AttributeError:
pass
else:
try:
new_sock.setsockopt(socket.SOL_SOCKET, reuseport, 1)
except (OSError, socket.error) as err:
# OSError on python 3, socket.error on python 2
if err.errno != errno.ENOPROTOOPT:
raise
return new_sock
| |
# Copyright 2012 Grid Dynamics
# Copyright 2013 Inktank Storage, Inc.
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from eventlet import tpool
from six.moves import urllib
try:
import rados
import rbd
except ImportError:
rados = None
rbd = None
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_service import loopingcall
from oslo_utils import excutils
from oslo_utils import units
from nova.compute import task_states
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import utils
from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
class RbdProxy(object):
"""A wrapper around rbd.RBD class instance to avoid blocking of process.
Offloads all calls to rbd.RBD class methods to native OS threads, so that
we do not block the whole process while executing the librbd code.
"""
def __init__(self):
self._rbd = tpool.Proxy(rbd.RBD())
def __getattr__(self, attr):
return getattr(self._rbd, attr)
class RBDVolumeProxy(object):
"""Context manager for dealing with an existing rbd volume.
This handles connecting to rados and opening an ioctx automatically, and
otherwise acts like a librbd Image object.
The underlying librados client and ioctx can be accessed as the attributes
'client' and 'ioctx'.
"""
def __init__(self, driver, name, pool=None, snapshot=None,
read_only=False):
client, ioctx = driver._connect_to_rados(pool)
try:
snap_name = snapshot.encode('utf8') if snapshot else None
self.volume = tpool.Proxy(rbd.Image(ioctx, name.encode('utf8'),
snapshot=snap_name,
read_only=read_only))
except rbd.ImageNotFound:
with excutils.save_and_reraise_exception():
LOG.debug("rbd image %s does not exist", name)
driver._disconnect_from_rados(client, ioctx)
except rbd.Error:
with excutils.save_and_reraise_exception():
LOG.exception(_LE("error opening rbd image %s"), name)
driver._disconnect_from_rados(client, ioctx)
self.driver = driver
self.client = client
self.ioctx = ioctx
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
try:
self.volume.close()
finally:
self.driver._disconnect_from_rados(self.client, self.ioctx)
def __getattr__(self, attrib):
return getattr(self.volume, attrib)
class RADOSClient(object):
"""Context manager to simplify error handling for connecting to ceph."""
def __init__(self, driver, pool=None):
self.driver = driver
self.cluster, self.ioctx = driver._connect_to_rados(pool)
def __enter__(self):
return self
def __exit__(self, type_, value, traceback):
self.driver._disconnect_from_rados(self.cluster, self.ioctx)
@property
def features(self):
features = self.cluster.conf_get('rbd_default_features')
if ((features is None) or (int(features) == 0)):
features = rbd.RBD_FEATURE_LAYERING
return int(features)
class RBDDriver(object):
def __init__(self, pool, ceph_conf, rbd_user):
self.pool = pool.encode('utf8')
# NOTE(angdraug): rados.Rados fails to connect if ceph_conf is None:
# https://github.com/ceph/ceph/pull/1787
self.ceph_conf = ceph_conf.encode('utf8') if ceph_conf else ''
self.rbd_user = rbd_user.encode('utf8') if rbd_user else None
if rbd is None:
raise RuntimeError(_('rbd python libraries not found'))
def _connect_to_rados(self, pool=None):
client = rados.Rados(rados_id=self.rbd_user,
conffile=self.ceph_conf)
try:
client.connect()
pool_to_open = pool or self.pool
ioctx = client.open_ioctx(pool_to_open.encode('utf-8'))
return client, ioctx
except rados.Error:
# shutdown cannot raise an exception
client.shutdown()
raise
def _disconnect_from_rados(self, client, ioctx):
# closing an ioctx cannot raise an exception
ioctx.close()
client.shutdown()
def ceph_args(self):
"""List of command line parameters to be passed to ceph commands to
reflect RBDDriver configuration such as RBD user name and location
of ceph.conf.
"""
args = []
if self.rbd_user:
args.extend(['--id', self.rbd_user])
if self.ceph_conf:
args.extend(['--conf', self.ceph_conf])
return args
def get_mon_addrs(self):
args = ['ceph', 'mon', 'dump', '--format=json'] + self.ceph_args()
out, _ = utils.execute(*args)
lines = out.split('\n')
if lines[0].startswith('dumped monmap epoch'):
lines = lines[1:]
monmap = jsonutils.loads('\n'.join(lines))
addrs = [mon['addr'] for mon in monmap['mons']]
hosts = []
ports = []
for addr in addrs:
host_port = addr[:addr.rindex('/')]
host, port = host_port.rsplit(':', 1)
hosts.append(host.strip('[]'))
ports.append(port)
return hosts, ports
def parse_url(self, url):
prefix = 'rbd://'
if not url.startswith(prefix):
reason = _('Not stored in rbd')
raise exception.ImageUnacceptable(image_id=url, reason=reason)
pieces = [urllib.parse.unquote(piece)
for piece in url[len(prefix):].split('/')]
if '' in pieces:
reason = _('Blank components')
raise exception.ImageUnacceptable(image_id=url, reason=reason)
if len(pieces) != 4:
reason = _('Not an rbd snapshot')
raise exception.ImageUnacceptable(image_id=url, reason=reason)
return pieces
def get_fsid(self):
with RADOSClient(self) as client:
return client.cluster.get_fsid()
def is_cloneable(self, image_location, image_meta):
url = image_location['url']
try:
fsid, pool, image, snapshot = self.parse_url(url)
except exception.ImageUnacceptable as e:
LOG.debug('not cloneable: %s', e)
return False
if self.get_fsid() != fsid:
reason = '%s is in a different ceph cluster' % url
LOG.debug(reason)
return False
if image_meta.get('disk_format') != 'raw':
reason = ("rbd image clone requires image format to be "
"'raw' but image {0} is '{1}'").format(
url, image_meta.get('disk_format'))
LOG.debug(reason)
return False
# check that we can read the image
try:
return self.exists(image, pool=pool, snapshot=snapshot)
except rbd.Error as e:
LOG.debug('Unable to open image %(loc)s: %(err)s',
dict(loc=url, err=e))
return False
def clone(self, image_location, dest_name, dest_pool=None):
_fsid, pool, image, snapshot = self.parse_url(
image_location['url'])
LOG.debug('cloning %(pool)s/%(img)s@%(snap)s to '
'%(dest_pool)s/%(dest_name)s',
dict(pool=pool, img=image, snap=snapshot,
dest_pool=dest_pool, dest_name=dest_name))
with RADOSClient(self, str(pool)) as src_client:
with RADOSClient(self, dest_pool) as dest_client:
try:
RbdProxy().clone(src_client.ioctx,
image.encode('utf-8'),
snapshot.encode('utf-8'),
dest_client.ioctx,
str(dest_name),
features=src_client.features)
except rbd.PermissionError:
raise exception.Forbidden(_('no write permission on '
'storage pool %s') % dest_pool)
def size(self, name):
with RBDVolumeProxy(self, name, read_only=True) as vol:
return vol.size()
def resize(self, name, size):
"""Resize RBD volume.
:name: Name of RBD object
:size: New size in bytes
"""
LOG.debug('resizing rbd image %s to %d', name, size)
with RBDVolumeProxy(self, name) as vol:
vol.resize(size)
def parent_info(self, volume, pool=None):
"""Returns the pool, image and snapshot name for the parent of an
RBD volume.
:volume: Name of RBD object
:pool: Name of pool
"""
try:
with RBDVolumeProxy(self, str(volume), pool=pool,
read_only=True) as vol:
return vol.parent_info()
except rbd.ImageNotFound:
raise exception.ImageUnacceptable(_("no usable parent snapshot "
"for volume %s") % volume)
def flatten(self, volume, pool=None):
""""Flattens" a snapshotted image with the parents' data,
effectively detaching it from the parent.
:volume: Name of RBD object
:pool: Name of pool
"""
LOG.debug('flattening %(pool)s/%(vol)s', dict(pool=pool, vol=volume))
with RBDVolumeProxy(self, str(volume), pool=pool) as vol:
vol.flatten()
def exists(self, name, pool=None, snapshot=None):
try:
with RBDVolumeProxy(self, name,
pool=pool,
snapshot=snapshot,
read_only=True):
return True
except rbd.ImageNotFound:
return False
def remove_image(self, name):
"""Remove RBD volume
:name: Name of RBD volume
"""
with RADOSClient(self, self.pool) as client:
try:
RbdProxy().remove(client.ioctx, name)
except rbd.ImageNotFound:
LOG.warning(_LW('image %(volume)s in pool %(pool)s can not be '
'found, failed to remove'),
{'volume': name, 'pool': self.pool})
except rbd.ImageHasSnapshots:
LOG.error(_LE('image %(volume)s in pool %(pool)s has '
'snapshots, failed to remove'),
{'volume': name, 'pool': self.pool})
def import_image(self, base, name):
"""Import RBD volume from image file.
Uses the command line import instead of librbd since rbd import
command detects zeroes to preserve sparseness in the image.
:base: Path to image file
:name: Name of RBD volume
"""
args = ['--pool', self.pool, base, name]
# Image format 2 supports cloning,
# in stable ceph rbd release default is not 2,
# we need to use it explicitly.
args += ['--image-format=2']
args += self.ceph_args()
utils.execute('rbd', 'import', *args)
def _destroy_volume(self, client, volume, pool=None):
"""Destroy an RBD volume, retrying as needed.
"""
def _cleanup_vol(ioctx, volume, retryctx):
try:
RbdProxy().remove(ioctx, volume)
raise loopingcall.LoopingCallDone(retvalue=False)
except rbd.ImageHasSnapshots:
self.remove_snap(volume, libvirt_utils.RESIZE_SNAPSHOT_NAME,
ignore_errors=True)
except (rbd.ImageBusy, rbd.ImageHasSnapshots):
LOG.warning(_LW('rbd remove %(volume)s in pool %(pool)s '
'failed'),
{'volume': volume, 'pool': self.pool})
retryctx['retries'] -= 1
if retryctx['retries'] <= 0:
raise loopingcall.LoopingCallDone()
# NOTE(danms): We let it go for ten seconds
retryctx = {'retries': 10}
timer = loopingcall.FixedIntervalLoopingCall(
_cleanup_vol, client.ioctx, volume, retryctx)
timed_out = timer.start(interval=1).wait()
if timed_out:
# NOTE(danms): Run this again to propagate the error, but
# if it succeeds, don't raise the loopingcall exception
try:
_cleanup_vol(client.ioctx, volume, retryctx)
except loopingcall.LoopingCallDone:
pass
def cleanup_volumes(self, instance):
with RADOSClient(self, self.pool) as client:
def belongs_to_instance(disk):
# NOTE(nic): On revert_resize, the cleanup steps for the root
# volume are handled with an "rbd snap rollback" command,
# and none of this is needed (and is, in fact, harmful) so
# filter out non-ephemerals from the list
if instance.task_state == task_states.RESIZE_REVERTING:
return (disk.startswith(instance.uuid) and
disk.endswith('disk.local'))
else:
return disk.startswith(instance.uuid)
volumes = RbdProxy().list(client.ioctx)
for volume in filter(belongs_to_instance, volumes):
self._destroy_volume(client, volume)
def get_pool_info(self):
with RADOSClient(self) as client:
stats = client.cluster.get_cluster_stats()
return {'total': stats['kb'] * units.Ki,
'free': stats['kb_avail'] * units.Ki,
'used': stats['kb_used'] * units.Ki}
def create_snap(self, volume, name, pool=None, protect=False):
"""Create a snapshot of an RBD volume.
:volume: Name of RBD object
:name: Name of snapshot
:pool: Name of pool
:protect: Set the snapshot to "protected"
"""
LOG.debug('creating snapshot(%(snap)s) on rbd image(%(img)s)',
{'snap': name, 'img': volume})
with RBDVolumeProxy(self, str(volume), pool=pool) as vol:
vol.create_snap(name)
if protect and not vol.is_protected_snap(name):
vol.protect_snap(name)
def remove_snap(self, volume, name, ignore_errors=False, pool=None,
force=False):
"""Removes a snapshot from an RBD volume.
:volume: Name of RBD object
:name: Name of snapshot
:ignore_errors: whether or not to log warnings on failures
:pool: Name of pool
:force: Remove snapshot even if it is protected
"""
with RBDVolumeProxy(self, str(volume), pool=pool) as vol:
if name in [snap.get('name', '') for snap in vol.list_snaps()]:
if vol.is_protected_snap(name):
if force:
vol.unprotect_snap(name)
elif not ignore_errors:
LOG.warning(_LW('snapshot(%(name)s) on rbd '
'image(%(img)s) is protected, '
'skipping'),
{'name': name, 'img': volume})
return
LOG.debug('removing snapshot(%(name)s) on rbd image(%(img)s)',
{'name': name, 'img': volume})
vol.remove_snap(name)
elif not ignore_errors:
LOG.warning(_LW('no snapshot(%(name)s) found on rbd '
'image(%(img)s)'),
{'name': name, 'img': volume})
def rollback_to_snap(self, volume, name):
"""Revert an RBD volume to its contents at a snapshot.
:volume: Name of RBD object
:name: Name of snapshot
"""
with RBDVolumeProxy(self, volume) as vol:
if name in [snap.get('name', '') for snap in vol.list_snaps()]:
LOG.debug('rolling back rbd image(%(img)s) to '
'snapshot(%(snap)s)', {'snap': name, 'img': volume})
vol.rollback_to_snap(name)
else:
raise exception.SnapshotNotFound(snapshot_id=name)
def destroy_volume(self, volume, pool=None):
"""A one-shot version of cleanup_volumes()
"""
with RADOSClient(self, pool) as client:
self._destroy_volume(client, volume)
| |
# coding=utf-8
# pylint: disable-msg=E1101,W0612
from datetime import datetime
import numpy as np
from numpy import nan
import pytest
import pandas.compat as compat
from pandas.compat import lrange, range
import pandas as pd
from pandas import Categorical, Series, date_range, isna
import pandas.util.testing as tm
from pandas.util.testing import assert_series_equal
@pytest.mark.parametrize(
'first_slice,second_slice', [
[[2, None], [None, -5]],
[[None, 0], [None, -5]],
[[None, -5], [None, 0]],
[[None, 0], [None, 0]]
])
@pytest.mark.parametrize('fill', [None, -1])
def test_align(test_data, first_slice, second_slice, join_type, fill):
a = test_data.ts[slice(*first_slice)]
b = test_data.ts[slice(*second_slice)]
aa, ab = a.align(b, join=join_type, fill_value=fill)
join_index = a.index.join(b.index, how=join_type)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
assert (aa.reindex(diff_a) == fill).all()
if len(diff_b) > 0:
assert (ab.reindex(diff_b) == fill).all()
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
assert aa.name == 'ts'
assert ea.name == 'ts'
assert ab.name == 'ts'
assert eb.name == 'ts'
@pytest.mark.parametrize(
'first_slice,second_slice', [
[[2, None], [None, -5]],
[[None, 0], [None, -5]],
[[None, -5], [None, 0]],
[[None, 0], [None, 0]]
])
@pytest.mark.parametrize('method', ['pad', 'bfill'])
@pytest.mark.parametrize('limit', [None, 1])
def test_align_fill_method(test_data,
first_slice, second_slice,
join_type, method, limit):
a = test_data.ts[slice(*first_slice)]
b = test_data.ts[slice(*second_slice)]
aa, ab = a.align(b, join=join_type, method=method, limit=limit)
join_index = a.index.join(b.index, how=join_type)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
def test_align_nocopy(test_data):
b = test_data.ts[:5].copy()
# do copy
a = test_data.ts.copy()
ra, _ = a.align(b, join='left')
ra[:5] = 5
assert not (a[:5] == 5).any()
# do not copy
a = test_data.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
assert (a[:5] == 5).all()
# do copy
a = test_data.ts.copy()
b = test_data.ts[:5].copy()
_, rb = a.align(b, join='right')
rb[:3] = 5
assert not (b[:3] == 5).any()
# do not copy
a = test_data.ts.copy()
b = test_data.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
assert (b[:2] == 5).all()
def test_align_same_index(test_data):
a, b = test_data.ts.align(test_data.ts, copy=False)
assert a.index is test_data.ts.index
assert b.index is test_data.ts.index
a, b = test_data.ts.align(test_data.ts, copy=True)
assert a.index is not test_data.ts.index
assert b.index is not test_data.ts.index
def test_align_multiindex():
# GH 10665
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
s1 = pd.Series(np.arange(12, dtype='int64'), index=midx)
s2 = pd.Series(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join='left')
res2l, res2r = s2.align(s1, join='right')
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
res1l, res1r = s1.align(s2, join='right')
res2l, res2r = s2.align(s1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
def test_reindex(test_data):
identity = test_data.series.reindex(test_data.series.index)
# __array_interface__ is not defined for older numpies
# and on some pythons
try:
assert np.may_share_memory(test_data.series.index, identity.index)
except AttributeError:
pass
assert identity.index.is_(test_data.series.index)
assert identity.index.identical(test_data.series.index)
subIndex = test_data.series.index[10:20]
subSeries = test_data.series.reindex(subIndex)
for idx, val in compat.iteritems(subSeries):
assert val == test_data.series[idx]
subIndex2 = test_data.ts.index[10:20]
subTS = test_data.ts.reindex(subIndex2)
for idx, val in compat.iteritems(subTS):
assert val == test_data.ts[idx]
stuffSeries = test_data.ts.reindex(subIndex)
assert np.isnan(stuffSeries).all()
# This is extremely important for the Cython code to not screw up
nonContigIndex = test_data.ts.index[::2]
subNonContig = test_data.ts.reindex(nonContigIndex)
for idx, val in compat.iteritems(subNonContig):
assert val == test_data.ts[idx]
# return a copy the same index here
result = test_data.ts.reindex()
assert not (result is test_data.ts)
def test_reindex_nan():
ts = Series([2, 3, 5, 7], index=[1, 4, nan, 8])
i, j = [nan, 1, nan, 8, 4, nan], [2, 0, 2, 3, 1, 2]
assert_series_equal(ts.reindex(i), ts.iloc[j])
ts.index = ts.index.astype('object')
# reindex coerces index.dtype to float, loc/iloc doesn't
assert_series_equal(ts.reindex(i), ts.iloc[j], check_index_type=False)
def test_reindex_series_add_nat():
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
assert np.issubdtype(result.dtype, np.dtype('M8[ns]'))
mask = result.isna()
assert mask[-5:].all()
assert not mask[:-5].any()
def test_reindex_with_datetimes():
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_reindex_corner(test_data):
# (don't forget to fix this) I think it's fixed
test_data.empty.reindex(test_data.ts.index, method='pad') # it works
# corner case: pad empty series
reindexed = test_data.empty.reindex(test_data.ts.index, method='pad')
# pass non-Index
reindexed = test_data.ts.reindex(list(test_data.ts.index))
assert_series_equal(test_data.ts, reindexed)
# bad fill method
ts = test_data.ts[::2]
msg = (r"Invalid fill method\. Expecting pad \(ffill\), backfill"
r" \(bfill\) or nearest\. Got foo")
with pytest.raises(ValueError, match=msg):
ts.reindex(test_data.ts.index, method='foo')
def test_reindex_pad():
s = Series(np.arange(10), dtype='int64')
s2 = s[::2]
reindexed = s2.reindex(s.index, method='pad')
reindexed2 = s2.reindex(s.index, method='ffill')
assert_series_equal(reindexed, reindexed2)
expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10))
assert_series_equal(reindexed, expected)
# GH4604
s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
new_index = ['a', 'g', 'c', 'f']
expected = Series([1, 1, 3, 3], index=new_index)
# this changes dtype because the ffill happens after
result = s.reindex(new_index).ffill()
assert_series_equal(result, expected.astype('float64'))
result = s.reindex(new_index).ffill(downcast='infer')
assert_series_equal(result, expected)
expected = Series([1, 5, 3, 5], index=new_index)
result = s.reindex(new_index, method='ffill')
assert_series_equal(result, expected)
# inference of new dtype
s = Series([True, False, False, True], index=list('abcd'))
new_index = 'agc'
result = s.reindex(list(new_index)).ffill()
expected = Series([True, True, False], index=list(new_index))
assert_series_equal(result, expected)
# GH4618 shifted series downcasting
s = Series(False, index=lrange(0, 5))
result = s.shift(1).fillna(method='bfill')
expected = Series(False, index=lrange(0, 5))
assert_series_equal(result, expected)
def test_reindex_nearest():
s = Series(np.arange(10, dtype='int64'))
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method='nearest')
expected = Series(np.around(target).astype('int64'), target)
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest')
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest', tolerance=1)
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest',
tolerance=[1, 2, 3, 4])
assert_series_equal(expected, actual)
actual = s.reindex(target, method='nearest', tolerance=0.2)
expected = Series([0, 1, np.nan, 2], target)
assert_series_equal(expected, actual)
actual = s.reindex(target, method='nearest',
tolerance=[0.3, 0.01, 0.4, 3])
expected = Series([0, np.nan, np.nan, 2], target)
assert_series_equal(expected, actual)
def test_reindex_backfill():
pass
def test_reindex_int(test_data):
ts = test_data.ts[::2]
int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index)
# this should work fine
reindexed_int = int_ts.reindex(test_data.ts.index)
# if NaNs introduced
assert reindexed_int.dtype == np.float_
# NO NaNs introduced
reindexed_int = int_ts.reindex(int_ts.index[::2])
assert reindexed_int.dtype == np.int_
def test_reindex_bool(test_data):
# A series other than float, int, string, or object
ts = test_data.ts[::2]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
# this should work fine
reindexed_bool = bool_ts.reindex(test_data.ts.index)
# if NaNs introduced
assert reindexed_bool.dtype == np.object_
# NO NaNs introduced
reindexed_bool = bool_ts.reindex(bool_ts.index[::2])
assert reindexed_bool.dtype == np.bool_
def test_reindex_bool_pad(test_data):
# fail
ts = test_data.ts[5:]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
filled_bool = bool_ts.reindex(test_data.ts.index, method='pad')
assert isna(filled_bool[:5]).all()
def test_reindex_categorical():
index = date_range('20000101', periods=3)
# reindexing to an invalid Categorical
s = Series(['a', 'b', 'c'], dtype='category')
result = s.reindex(index)
expected = Series(Categorical(values=[np.nan, np.nan, np.nan],
categories=['a', 'b', 'c']))
expected.index = index
tm.assert_series_equal(result, expected)
# partial reindexing
expected = Series(Categorical(values=['b', 'c'], categories=['a', 'b',
'c']))
expected.index = [1, 2]
result = s.reindex([1, 2])
tm.assert_series_equal(result, expected)
expected = Series(Categorical(
values=['c', np.nan], categories=['a', 'b', 'c']))
expected.index = [2, 3]
result = s.reindex([2, 3])
tm.assert_series_equal(result, expected)
def test_reindex_like(test_data):
other = test_data.ts[::2]
assert_series_equal(test_data.ts.reindex(other.index),
test_data.ts.reindex_like(other))
# GH 7179
day1 = datetime(2013, 3, 5)
day2 = datetime(2013, 5, 5)
day3 = datetime(2014, 3, 5)
series1 = Series([5, None, None], [day1, day2, day3])
series2 = Series([None, None], [day1, day3])
result = series1.reindex_like(series2, method='pad')
expected = Series([5, np.nan], index=[day1, day3])
assert_series_equal(result, expected)
def test_reindex_fill_value():
# -----------------------------------------------------------
# floats
floats = Series([1., 2., 3.])
result = floats.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
result = floats.reindex([1, 2, 3], fill_value=0)
expected = Series([2., 3., 0], index=[1, 2, 3])
assert_series_equal(result, expected)
# -----------------------------------------------------------
# ints
ints = Series([1, 2, 3])
result = ints.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
# don't upcast
result = ints.reindex([1, 2, 3], fill_value=0)
expected = Series([2, 3, 0], index=[1, 2, 3])
assert issubclass(result.dtype.type, np.integer)
assert_series_equal(result, expected)
# -----------------------------------------------------------
# objects
objects = Series([1, 2, 3], dtype=object)
result = objects.reindex([1, 2, 3])
expected = Series([2, 3, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = objects.reindex([1, 2, 3], fill_value='foo')
expected = Series([2, 3, 'foo'], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
# ------------------------------------------------------------
# bools
bools = Series([True, False, True])
result = bools.reindex([1, 2, 3])
expected = Series([False, True, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = bools.reindex([1, 2, 3], fill_value=False)
expected = Series([False, True, False], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_reindex_datetimeindexes_tz_naive_and_aware():
# GH 8306
idx = date_range('20131101', tz='America/Chicago', periods=7)
newidx = date_range('20131103', periods=10, freq='H')
s = Series(range(7), index=idx)
with pytest.raises(TypeError):
s.reindex(newidx, method='ffill')
def test_reindex_empty_series_tz_dtype():
# GH 20869
result = Series(dtype='datetime64[ns, UTC]').reindex([0, 1])
expected = Series([pd.NaT] * 2, dtype='datetime64[ns, UTC]')
tm.assert_equal(result, expected)
def test_rename():
# GH 17407
s = Series(range(1, 6), index=pd.Index(range(2, 7), name='IntIndex'))
result = s.rename(str)
expected = s.rename(lambda i: str(i))
assert_series_equal(result, expected)
assert result.name == expected.name
@pytest.mark.parametrize(
'data, index, drop_labels,'
' axis, expected_data, expected_index',
[
# Unique Index
([1, 2], ['one', 'two'], ['two'],
0, [1], ['one']),
([1, 2], ['one', 'two'], ['two'],
'rows', [1], ['one']),
([1, 1, 2], ['one', 'two', 'one'], ['two'],
0, [1, 2], ['one', 'one']),
# GH 5248 Non-Unique Index
([1, 1, 2], ['one', 'two', 'one'], 'two',
0, [1, 2], ['one', 'one']),
([1, 1, 2], ['one', 'two', 'one'], ['one'],
0, [1], ['two']),
([1, 1, 2], ['one', 'two', 'one'], 'one',
0, [1], ['two'])])
def test_drop_unique_and_non_unique_index(data, index, axis, drop_labels,
expected_data, expected_index):
s = Series(data=data, index=index)
result = s.drop(drop_labels, axis=axis)
expected = Series(data=expected_data, index=expected_index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
'data, index, drop_labels,'
' axis, error_type, error_desc',
[
# single string/tuple-like
(range(3), list('abc'), 'bc',
0, KeyError, 'not found in axis'),
# bad axis
(range(3), list('abc'), ('a',),
0, KeyError, 'not found in axis'),
(range(3), list('abc'), 'one',
'columns', ValueError, 'No axis named columns')])
def test_drop_exception_raised(data, index, drop_labels,
axis, error_type, error_desc):
with pytest.raises(error_type, match=error_desc):
Series(data, index=index).drop(drop_labels, axis=axis)
def test_drop_with_ignore_errors():
# errors='ignore'
s = Series(range(3), index=list('abc'))
result = s.drop('bc', errors='ignore')
tm.assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.iloc[1:]
tm.assert_series_equal(result, expected)
# GH 8522
s = Series([2, 3], index=[True, False])
assert s.index.is_object()
result = s.drop(True)
expected = Series([3], index=[False])
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize('index', [[1, 2, 3], [1, 1, 3]])
@pytest.mark.parametrize('drop_labels', [[], [1], [3]])
def test_drop_empty_list(index, drop_labels):
# GH 21494
expected_index = [i for i in index if i not in drop_labels]
series = pd.Series(index=index).drop(drop_labels)
tm.assert_series_equal(series, pd.Series(index=expected_index))
@pytest.mark.parametrize('data, index, drop_labels', [
(None, [1, 2, 3], [1, 4]),
(None, [1, 2, 2], [1, 4]),
([2, 3], [0, 1], [False, True])
])
def test_drop_non_empty_list(data, index, drop_labels):
# GH 21494 and GH 16877
with pytest.raises(KeyError, match='not found in axis'):
pd.Series(data=data, index=index).drop(drop_labels)
| |
# Lint as: python2, python3
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper class for implementing a beam search decoder.
Individual models just need to provide a few callback functions.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import REDACTED.transformer_lingvo.lingvo.compat as tf
from REDACTED.transformer_lingvo.lingvo.core import base_layer
from REDACTED.transformer_lingvo.lingvo.core import ops
from REDACTED.transformer_lingvo.lingvo.core import py_utils
import six
from REDACTED.tensorflow.python.ops import inplace_ops
# TODO(yonghui):
# 1) Change the tensor shape [max_decoder_time_steps, batch_size *
# num_hyps_per_beam] to [max_decoder_time_steps, num_hyps_per_beam,
# batch_size] to avoid confusing and mis-interpretation of the results.
# Defines a namedtuple to store the results of BeamSearchDecode. It contains
# the following entries:
# done_hyps: A string Tensor of shape
# [max_decoder_time_steps, batch_size * num_hyps_per_beam] which can be
# either an empty string, or a serialized Hypothesis proto. The non-empty
# hyps in done_hyps are terminated hypotheses. The 'h'-th hyp for sample
# 'b' at time step 't' can be found at done_hyps[t, batch_size * h + b].
# topk_hyps: A string Tensor of shape [batch_size, num_hyps_per_beam].
# topk_hyps[b, h] is the h-th hypothesis for the sample 'b' in the
# batch, which can either be an empty string or a serialized Hypothesis
# proto.
# topk_ids: Int32 Tensor of shape [batch_size * num_hyps_per_beam,
# target_seq_len] which contains the IDs of the targets in each of the
# hypotheses in the beam for the samples in the batch. For sample
# 'b' in the batch, the h-th hypothesis for this sample can be found at
# position [b * num_hyps_per_beam + h, :].
# topk_lens: Int32 Tensor of shape [batch_size * num_hyps_per_beam] which
# indicates the length (>=0) of each of the hypotheses.
# topk_scores: Float32 Tensor of shape [batch_size * num_hyps_per_beam]
# containing the scores (negative log probabilities) of each of the
# hypotheses in the beam.
# topk_decoded: A string Tensor of shape [batch_size * num_hyps_per_beam] which
# contains the decoded target strings in each of the hypotheses in the
# beam for the samples in the batch. The 'h'-th hyp for sample 'b' can
# be found at topk_decoded[b * num_hyps_per_beam + h]
BeamSearchDecodeOutput = collections.namedtuple(
'BeamSearchDecodeOutput',
[
'done_hyps', 'topk_hyps', 'topk_ids', 'topk_lens', 'topk_scores',
'topk_decoded', 'other_states'
],
)
# Make the last attribute default to None.
BeamSearchDecodeOutput.__new__.__defaults__ = (None,)
class BeamSearchHelper(base_layer.BaseLayer):
"""Helper class for performing beam search.
The user of this helper class needs to implement three callbacks.
This callback is called once only at the beginning of beam search:
.. code-block:: none
def InitBeamSearchState(theta, encoder_outputs, num_hyps_per_beam):
Args:
theta: A NestedMap object containing weights' values of this layer and
its children layers.
encoder_outputs: A NestedMap computed by encoder.
num_hyps_per_beam: An int, number hyps to keep for source sentence.
Returns:
A tuple (initial_results, states):
- initial_results: a `.NestedMap` of initial results. It must contain
the 'atten_probs' and 'log_probs' tensors. Optionally it may
contain 'step_ids'.
- log_probs: The initial log probs for each of the tokens in the
target vocab of shape [num_hyps_per_beam * src_batch, vocab_size].
src_batch "b" and hyp_per_beam "h" is represented at index
``(h * src_batch + b)``.
- atten_probs: The initial attention probs, of shape
[num_hyps_per_beam * src_batch, src_len]. src_batch "b" and
hyp_per_beam "h" is represented at index ``(h * src_batch + b)``.
- step_ids: Optional. The initial ids of shape [num_hyps_per_beam *
src_batch, 1] for which to start the beam search. src_batch "b"
hyp_per_beam "h" is represented at index ``(h * src_batch + b)``.
If not specified, defaults to a tensor filled with target_sos_id.
- states: a `.NestedMap` of tensors representing states that the
client would like to keep track of for each hyp.
This callback is called once every decoding time step before beam_search_step
is called:
.. code-block:: none
def PreBeamSearchStepCallback(theta,
encoder_outputs,
step_ids,
in_states,
num_hyps_per_beam):
Args:
theta: A NestedMap object containing weights' values of this layer and
its children layers.
encoder_outputs: A NestedMap computed by encoder.
step_ids: A tensor of shape [num_hyps_per_beam * src_batch, 1].
in_states: A `.NestedMap` of tensors representing states that the
clients would like to keep track of for each of the active hyps.
Returns:
A tuple (results, out_states):
- results: A `.NestedMap` of beam search results. It should contain
the 'atten_probs' and 'log_probs' tensors at the minimal.
Optionally it may contain 'is_last_chunk' if it is decoding a
neural transducer model.
- atten_probs: The updated attention probs, of shape
[num_hyps_per_beam * src_batch, src_len]. src_batch "b" and
hyp_per_beam "h" is represented at index ``(h * src_batch + b)``.
- log_probs: Log prob for each of the tokens in the target vocab.
This is of shape [num_hyps_per_beam * src_batch, vocab_size].
src_batch "b" and hyp_per_beam "h" is represented at index
``(h * src_batch + b)``.
- is_last_chunk: Whether each of the hyp is at the end of a chunk.
If non-empty, it has shape [num_hyps_per_beam * src_batch, 1].
- out_states: A `.NestedMap`. The updated states. This 'out_states'
should be of the exact same structure as 'in_states'
This callback is called once every decoding time step after beam_search_step
is called:
.. code-block:: none
def PostBeamSearchStepCallback(theta,
encoder_outputs,
new_step_ids,
other_states):
Args:
theta: A NestedMap object containing weights' values of this layer and
its children layers.
encoder_outputs: A NestedMap computed by encoder.
new_step_ids: Token ids for the next beam search step.
other_states: A `.NestedMap`.
Returns:
final_states, A `.NestedMap`.
"""
@classmethod
def Params(cls):
p = super(BeamSearchHelper, cls).Params()
p.Define('num_hyps_per_beam', 8,
'Num of hyps to keep per beam during decoding.')
p.Define(
'target_seq_length_ratio', 1.0,
'Ratio of the average target sequence length over the average '
'source sequence length.')
p.Define('length_normalization', 0.0,
'Beam search length normalization ratio.')
p.Define('coverage_penalty', 0.0, 'Beam search coverage penalty.')
p.Define(
'valid_eos_max_logit_delta', 5.0,
'During beam search, allow </s> to terminate a hyp only if its '
'logit is no more than than this value away from the logit of the '
'best candidate.')
p.Define(
'beam_size', 3.0,
'The maximum difference between best hyp and the worst in a beam.'
' This allows to prune our search when none of the active hyp is'
' close enough to the current best.')
p.Define('target_sos_id', 1, 'Id of the start of sentence token.')
p.Define('target_eos_id', 2, 'Id of the end of sentence token.')
p.Define(
'target_eoc_id', -1,
'Id of the end of chunk token. Used by neural transducer only.'
' Set this id to a non-negative value only for NT.')
p.Define(
'target_seq_len', 0, 'Maximum allowed target seq length. Note '
'that decoding terminates if an end of sentence token '
'is not emitted after target_seq_len decode steps.')
p.Define(
'merge_paths', False, 'If true, hyps which are identical when '
'epsilons are removed will be combined into a single hyp. The '
'probability for that combined hyp will be the sum of the '
'probabilities of the component hyps. This can only be applied '
'for epsilon-emitting models (RNN-T and NT).')
p.Define(
'allow_empty_terminated_hyp', True, 'Whether it is okay to consider a '
'hyp that consists only of epsilons as terminated. By default this '
'is true, as an utterance may consist of silence. It should be set '
'to false when EMBR training epsilon-emitting models (e.g., RNN-T), '
'which are prone to emit all-epsilon hyps even in the presence of '
'speech. Note that a hyp that terminates in EOS is not considered '
'empty, so this flag has no effect for non-epsilon-emitting models.')
p.Define(
'ensure_full_beam', False, 'If True, we will not terminate the search '
'until both of these conditions are satisfied: we have found '
'num_hyps_per_beam terminated hyps AND no active hyps have a score '
'within beam_size of the best terminated hyp. If False, only the '
'second condition must be satisfied. Note that in either case, we can '
'also terminate if we have run for target_seq_len steps. Generally '
'this should be False unless beam search is being run as part of '
'minimum word error rate training.')
p.Define(
'force_eos_in_last_step', False,
'For all active hyps that are still on the beam after target_seq_len '
'steps, return partial hyps with EOS set as the last token.')
p.Define(
'batch_major_state', True, 'If True, we use batch as the major '
'dimension of the hyp states. Otherwise, timing becomes the major '
'dimension, and the gathers are performed along the second-to-major '
'dimension.')
p.Define(
'batch_major_compute', False, 'If True, the target batch dimension '
'is organized as num_beams by num_hyps_per_beam during the '
'ExtendStep computation and the cache is stored following this order. '
'So the topk indices into the cache for ReOrderHyps needs to be '
'reordered before usage. Otherwise, the indices will be directly used '
'without extra transformation.')
p.Define(
'short_seq_limit', 0,
'An integer, the sequence length limit for using early stop '
'method in attention layer (batch-major implementation). The sequence '
'is always treated as the default long sequence for decoding when the '
'limit is set to 0. For typical mt transformer config '
'(batch 16, sequence length 150), the break even point is around 40 '
'on TPU V3, and 50 on TPU V2. This may slightly change for '
'different batch size and sequence length, which requires more '
'experiments to set the value.')
p.Define(
'local_eos_threshold', -100.0,
'During beam search, allow </s> to terminate a hyp if the local score '
'for </s> is greater than local_eos_threshold.')
p.name = 'beam_search'
return p
@base_layer.initializer
def __init__(self, params):
super(BeamSearchHelper, self).__init__(params)
p = self.params
self._model_uses_eoc_id = p.target_eoc_id >= 0
def _BeamSearchStep(self, theta, encoder_outputs, cur_step, step_ids,
core_bs_states, other_states, num_hyps_per_beam,
pre_beam_search_step_callback,
post_beam_search_step_callback):
"""Extend beam search hyps for one step.
| num_beams = Number of source sequences to be decoded.
| num_hyps_per_beam = Number of hyps to keep per source sequence.
| num_hyps = num_beams * num_hyps_per_beam
| src_seq_len = Number of time steps in the source sequence.
| src_batch = Number of examples in the source sequence.
| tgt_seq_len = Maximum allowed time steps in the target sequence.
| tgt_batch = num_hyps_per_beam * src_batch
Args:
theta: A `.NestedMap` object containing weights' values of the decoder
layer and its children layers.
encoder_outputs: A `.NestedMap` containing encoder outputs to be passed to
the callbacks.
cur_step: A scalar int tensor, the current time step, 0-based.
step_ids: An int tensor of shape [num_hyps, 1]. The input ids to the
current search step.
core_bs_states: A tuple of core beam search states. This list is
maintained by this helper class.
other_states: A `.NestedMap` of other beam search states. This
`.NestedMap` is managed and updated by the client. It is expected that
each of its member tensors are of rank >= 1. t[i, ...] is the state of
the i-th hyp at the beginning of this search step.
num_hyps_per_beam: Num of hyps to keep per beam.
pre_beam_search_step_callback: The `PreBeamSearchStepCallback` callback.
See class header comments for more details.
post_beam_search_step_callback: The `PostBeamSearchStepCallback` callback.
See class header comments for more details.
Returns:
A tuple of following elements for the next beam search step,
(next step, all_done, step_ids, core_bs_states, other_states)
"""
p = self.params
bs_results, other_states = pre_beam_search_step_callback(
theta, encoder_outputs, step_ids, other_states, num_hyps_per_beam)
(best_scores, cumulative_scores, in_scores, in_hyps, in_prev_hyps,
in_done_hyps, in_atten_probs) = core_bs_states
(out_best_scores, out_cumulative_scores, out_scores, out_hyps,
out_prev_hyps, out_done_hyps, out_atten_probs,
all_done) = ops.beam_search_step(
tf.cast(bs_results.log_probs, dtype=p.dtype),
tf.cast(bs_results.atten_probs, dtype=p.dtype),
best_scores,
cumulative_scores,
in_scores,
in_hyps,
in_prev_hyps,
in_done_hyps,
in_atten_probs,
bs_results.is_last_chunk if self._model_uses_eoc_id else [],
cur_step,
eoc_id=p.target_eoc_id,
eos_id=p.target_eos_id,
beam_size=p.beam_size,
num_hyps_per_beam=num_hyps_per_beam,
valid_eos_max_logit_delta=p.valid_eos_max_logit_delta,
merge_paths=p.merge_paths,
allow_empty_terminated_hyp=p.allow_empty_terminated_hyp,
ensure_full_beam=p.ensure_full_beam,
force_eos_in_last_step=p.force_eos_in_last_step,
local_eos_threshold=p.local_eos_threshold)
new_step_ids = tf.reshape(out_hyps[cur_step, :], tf.shape(step_ids))
new_step_ids.set_shape(step_ids.get_shape())
old_hyp_ids = tf.reshape(
tf.slice(out_prev_hyps, begin=[cur_step, 0], size=[1, -1]), [-1])
if p.batch_major_compute:
# Transformed the indices into the key/value cache for fast decoding
# (prefix_states in other_states) due to the num_hyps dimension of
# cache is computed as num_beams by num_hyps_per_beam, which is different
# from the old_hyp_ids assumption (num_hyps_per_beam by num_beams).
# Both transpose and recomputation are required to correct the indices.
num_beams = tf.shape(best_scores)[0]
old_hyp_ids_in_cache_order = tf.reshape(
tf.transpose(tf.reshape(old_hyp_ids, [num_hyps_per_beam, -1])), [-1])
old_hyp_ids_in_cache_order = (
(old_hyp_ids_in_cache_order % num_beams) * num_hyps_per_beam +
old_hyp_ids_in_cache_order // num_beams)
new_bs_states = (out_best_scores, out_cumulative_scores, out_scores,
out_hyps, out_prev_hyps, out_done_hyps, out_atten_probs)
def ReOrderHyps(x_in):
"""Reorders x_in based on prev hyp ids."""
if (isinstance(x_in, tf.Tensor) and x_in.shape.ndims and
x_in.shape.ndims > 0):
if x_in.shape.ndims > 2 and not p.batch_major_state:
# Use corrected indices only here for batch major compute as key/value
# caches are the states being affected.
correct_old_hyp_ids = (
old_hyp_ids_in_cache_order
if p.batch_major_compute else old_hyp_ids)
x_out = tf.gather(x_in, correct_old_hyp_ids, axis=1)
else:
x_out = tf.gather(x_in, old_hyp_ids)
x_out.set_shape(x_in.get_shape())
return x_out
else:
return x_in
new_other_states = other_states.Transform(ReOrderHyps)
final_other_states = post_beam_search_step_callback(theta, encoder_outputs,
new_step_ids,
new_other_states)
return (cur_step + 1, all_done, new_step_ids, new_bs_states,
final_other_states)
def BeamSearchDecode(self,
theta,
encoder_outputs,
num_hyps_per_beam_override=0,
init_beam_search_state=None,
pre_beam_search_step_callback=None,
post_beam_search_step_callback=None,
max_steps=None):
"""Performs beam-search based decoding.
Args:
theta: A NestedMap object containing weights' values of the decoder layer
and its children layers.
encoder_outputs: A NestedMap containing encoder outputs to be passed to
the callbacks. Mostly opaque to BeamSearchHelper, except that it should
contain either a 'seq_lengths' field of shape [source_batch_size] or
a 'paddings' field of shape [source_max_lengths, source_batch_size].
num_hyps_per_beam_override: If set to a value <= 0, this parameter is
ignored. If set to a value > 0, then this value will be used to override
`p.num_hyps_per_beam`.
init_beam_search_state: The `InitBeamSearchState` callback. Please refer
to the class header comments for more details.
pre_beam_search_step_callback: The `PreBeamSearchStepCallback` callback.
Please refer to the class header comments for more details.
post_beam_search_step_callback: The `PostBeamSearchStepCallback` callback.
Please refer to the class header comments for more details.
max_steps: maximum beam search steps. If None, use
self.params.target_seq_len.
Returns:
A `BeamSearchDecodeOutput`.
"""
p = self.params
num_hyps_per_beam = p.num_hyps_per_beam
if num_hyps_per_beam_override > 0:
num_hyps_per_beam = num_hyps_per_beam_override
if max_steps is None:
max_steps = p.target_seq_len
initial_results, other_states = init_beam_search_state(
theta, encoder_outputs, num_hyps_per_beam)
num_hyps = tf.shape(initial_results.log_probs)[0]
num_beams = num_hyps // num_hyps_per_beam
if 'step_ids' in initial_results:
# [num_hyps, 1]
step_ids = tf.ensure_shape(initial_results.step_ids, [None, 1])
else:
step_ids = tf.fill([num_hyps, 1],
tf.constant(p.target_sos_id, dtype=tf.int32))
min_score = -1e36
best_scores = (tf.zeros(shape=[num_beams], dtype=p.dtype) + min_score)
cumulative_scores = tf.zeros(shape=[num_hyps], dtype=p.dtype)
in_scores = tf.zeros([max_steps, num_hyps], dtype=p.dtype)
in_hyps = tf.zeros([max_steps, num_hyps], dtype=tf.int32)
in_prev_hyps = tf.zeros([max_steps, num_hyps], dtype=tf.int32)
in_done_hyps = tf.zeros([max_steps, num_hyps], dtype=tf.string)
bs_atten_probs = tf.zeros(
[max_steps, num_hyps,
tf.shape(initial_results.atten_probs)[1]],
dtype=p.dtype)
cur_step = tf.constant(0, dtype=tf.int32)
all_done = tf.constant(False, dtype=tf.bool)
core_bs_states = (best_scores, cumulative_scores, in_scores, in_hyps,
in_prev_hyps, in_done_hyps, bs_atten_probs)
def LoopContinue(cur_step, all_done, unused_step_ids, unused_core_bs_states,
unused_other_states_list):
return tf.math.logical_and(cur_step < max_steps,
tf.math.logical_not(all_done))
def LoopBody(cur_step, unused_all_done, step_ids, core_bs_states,
other_states_list):
(cur_step, all_done, new_step_ids, new_bs_states,
new_other_states) = self._BeamSearchStep(
theta, encoder_outputs, cur_step, step_ids, core_bs_states,
other_states.Pack(other_states_list), num_hyps_per_beam,
pre_beam_search_step_callback, post_beam_search_step_callback)
return (cur_step, all_done, new_step_ids, new_bs_states,
new_other_states.Flatten())
flat_other_states = other_states.Flatten()
_, _, _, final_bs_states, flat_final_other_states = tf.while_loop(
LoopContinue,
LoopBody,
loop_vars=(cur_step, all_done, step_ids, core_bs_states,
flat_other_states),
parallel_iterations=10,
back_prop=False,
swap_memory=False,
shape_invariants=(tf.TensorShape(cur_step.get_shape()),
tf.TensorShape(all_done.get_shape()),
tf.TensorShape(step_ids.get_shape()),
_GetShapes(core_bs_states),
_GetShapes(flat_other_states, none_shapes=True)))
# [target_seq_len, num_beams * num_hyps_per_beam].
final_done_hyps = final_bs_states[5]
final_other_states = other_states.Pack(flat_final_other_states)
# Assume that `paddings` has shape [source_max_lengths, source_batch_size]
# by default, and compute `encoded_seq_lengths` accordingly. This can be
# overridden by directly passing `seq_lengths` in the `encoder_outputs`
# NestedMap.
encoded_seq_lengths = getattr(encoder_outputs, 'seq_lengths', None)
if encoded_seq_lengths is None:
source_paddings = encoder_outputs.padding
if isinstance(source_paddings, py_utils.NestedMap):
encoded_seq_lengths = tf.cast(
tf.round(
tf.reduce_sum(1.0 - tf.transpose(source_paddings.Flatten()[0]),
1)), tf.int32)
else:
encoded_seq_lengths = tf.cast(
tf.round(tf.reduce_sum(1.0 - tf.transpose(source_paddings), 1)),
tf.int32)
# [num_beams, num_hyps_per_beam].
topk_hyps = ops.top_k_terminated_hyps(
final_done_hyps,
encoded_seq_lengths,
k=num_hyps_per_beam,
num_hyps_per_beam=num_hyps_per_beam,
length_normalization=p.length_normalization,
coverage_penalty=p.coverage_penalty,
target_seq_length_ratio=p.target_seq_length_ratio,
eoc_id=p.target_eoc_id,
merge_paths=p.merge_paths)
# [num_beams * num_hyps_per_beam, ...].
max_seq_length = 0 if isinstance(max_steps, tf.Tensor) else max_steps
topk_ids, topk_lens, topk_scores = ops.unpack_hyp(
tf.reshape(topk_hyps, [-1]), max_seq_length=max_seq_length)
# [num_beams, num_hyps_per_beam].
topk_scores = tf.reshape(topk_scores, tf.shape(topk_hyps))
return BeamSearchDecodeOutput(final_done_hyps, topk_hyps, topk_ids,
topk_lens, topk_scores, None,
final_other_states)
def _GetShapes(tensors, none_shapes=False):
"""Util for getting nested structure of shapes from structure of tensors.
Args:
tensors: Structure of Tensors to get shapes for.
none_shapes: Returns None shapes if true.
Returns:
The same structure as tensors but of corresponding `TensorShape` objects.
"""
shapes = []
for t in tf.nest.flatten(tensors):
shape = t.get_shape() if isinstance(t, tf.Tensor) else None
if none_shapes:
if shape:
shapes.append(tf.TensorShape([None] * len(shape)))
else:
shapes.append(tf.TensorShape(None))
else:
shapes.append(tf.TensorShape(shape))
return type(tensors)(tf.nest.pack_sequence_as(tensors, shapes))
def MergeBeamSearchOutputs(max_hyps_per_beam, beam_search_outputs):
"""Merges beam search hyps from multiple decoders.
Args:
max_hyps_per_beam: the number of top hyps in the merged results. Must be
less than or equal to total number of input hyps.
beam_search_outputs: a list of BeamSearchDecodeOutput objects. Must share
the same source_batch and max sequence length.
Returns:
A BeamSearchDecodeOutput object containing max_hyps_per_beam hypotheses per
beam.
"""
source_batch = tf.shape(beam_search_outputs[0].topk_hyps)[0]
value_dict = {}
for output in beam_search_outputs:
hyps_per_beam = py_utils.with_dependencies([
py_utils.assert_equal(source_batch,
tf.shape(output.topk_hyps)[0]),
],
tf.shape(output.topk_hyps)[1])
for k, v in six.iteritems(output._asdict()):
if v is None:
continue
if k == 'done_hyps':
v = tf.transpose(v)
if k not in value_dict:
value_dict[k] = []
value_dict[k].append(tf.reshape(v, [source_batch, hyps_per_beam, -1]))
# Concatenate the tensors along the 'num_hyps_per_beam' dimension.
concatenated = {}
for k, values in six.iteritems(value_dict):
if len(values) != len(beam_search_outputs):
raise ValueError('Incomplete values for %s: %s' %
(k, beam_search_outputs))
concatenated[k] = tf.concat(values, axis=1)
scores = concatenated['topk_scores']
scores = tf.where(
tf.equal(concatenated['topk_lens'], 0), tf.fill(tf.shape(scores), -1e6),
scores)
scores = tf.squeeze(scores, -1)
# Select top max_hyps_per_beam indices per beam.
_, top_indices = tf.nn.top_k(scores, max_hyps_per_beam)
batch_ids = tf.tile(
tf.expand_dims(tf.range(source_batch), -1), [1, max_hyps_per_beam])
# [source_batch, max_hyps_per_beam, 2]
gather_indices = tf.stack([batch_ids, top_indices], axis=-1)
# Gather the merged top hyps according to 'gather_indices'.
top = beam_search_outputs[0]._asdict()
total_hyps = source_batch * max_hyps_per_beam
for k, v in six.iteritems(concatenated):
v = tf.gather_nd(v, gather_indices)
if k == 'done_hyps':
v = tf.transpose(tf.reshape(v, [total_hyps, -1]))
elif k == 'topk_hyps':
v = tf.reshape(v, [source_batch, max_hyps_per_beam])
elif k == 'topk_ids':
v = tf.reshape(v, [total_hyps, -1])
elif k in ('topk_lens', 'topk_scores', 'topk_decoded'):
v = tf.reshape(v, [total_hyps])
else:
raise ValueError('Unexpected field: %s' % k)
top[k] = v
return BeamSearchDecodeOutput(**top)
class GreedySearchHelper(base_layer.BaseLayer):
"""Helper class for performing greedy decoding.
The user of this helper class needs to implement three callbacks just as in a
beam search decoder.
"""
@classmethod
def Params(cls):
p = super(GreedySearchHelper, cls).Params()
p.Define('target_sos_id', 1, 'Id of the start of sentence token.')
p.Define('target_eos_id', 2, 'Id of the end of sentence token.')
p.Define(
'target_seq_len', 0, 'Maximum allowed target seq length. Note '
'that decoding terminates if an end of sentence token '
'is not emitted after target_seq_len decode steps.')
p.name = 'greedy_search'
return p
def _GreedySearchStep(self, theta, encoder_outputs, cur_step, step_ids,
hyp_ids, hyp_lens, done_hyps, other_states,
pre_beam_search_step_callback,
post_beam_search_step_callback):
"""Extend greedy search hyps for one step.
Args:
theta: A `.NestedMap` object containing weights' values of the decoder
layer and its children layers.
encoder_outputs: A `.NestedMap` containing encoder outputs to be passed to
the callbacks.
cur_step: A scalar int tensor, the current time step, 0-based.
step_ids: An int tensor of shape [num_hyps, 1]. The input ids to the
current search step.
hyp_ids: An int tensor of shape [num_hyps, tgt_seq_len].
hyp_lens: Valid length of all the hyps. Tokens after eos ids are not
counted.
done_hyps: Whether or not a hyp has finished.
other_states: A `.NestedMap` of other beam search states. This
`.NestedMap` is managed and updated by the client. It is expected that
each of its member tensors are of rank >= 1. t[i, ...] is the state of
the i-th hyp at the beginning of this search step.
pre_beam_search_step_callback: The `PreBeamSearchStepCallback` callback.
See class header comments for more details.
post_beam_search_step_callback: The `PostBeamSearchStepCallback` callback.
See class header comments for more details.
Returns:
A tuple of following elements for the next greedy search step,
(next step, new_step_ids, hyp_ids, hyp_lens, done_hyps, other_states)
"""
p = self.params
# Increment hyp_lens by 1 if the hyp is not finished yet.
hyp_lens = hyp_lens + (1 - tf.cast(done_hyps, tf.int32))
bs_results, new_other_states = pre_beam_search_step_callback(
theta, encoder_outputs, step_ids, other_states, 1) # num_hyps_per_beam
new_step_ids = tf.math.argmax(bs_results.log_probs, 1)
new_step_ids = tf.cast(new_step_ids, tf.int32)
new_step_ids = tf.reshape(new_step_ids, tf.shape(step_ids))
final_other_states = post_beam_search_step_callback(theta, encoder_outputs,
new_step_ids,
new_other_states)
# Stash new_step_ids into the right slot.
new_step_ids_1d = tf.reshape(new_step_ids, [-1])
hyp_ids = inplace_ops.alias_inplace_update(hyp_ids, cur_step,
new_step_ids_1d)
# Update done_hyps if the current step_ids is the end of sequence token.
done_hyps = tf.math.logical_or(done_hyps,
tf.equal(new_step_ids_1d, p.target_eos_id))
return (cur_step + 1, new_step_ids, hyp_ids, hyp_lens, done_hyps,
final_other_states)
def GreedySearchDecode(self,
theta,
encoder_outputs,
init_beam_search_state=None,
pre_beam_search_step_callback=None,
post_beam_search_step_callback=None,
max_steps=None):
"""Performs greedy-search based decoding.
Args:
theta: A NestedMap object containing weights' values of the decoder layer
and its children layers.
encoder_outputs: A NestedMap containing encoder outputs to be passed to
the callbacks.
init_beam_search_state: The `InitBeamSearchState` callback. Please refer
to the class header comments for more details.
pre_beam_search_step_callback: The `PreBeamSearchStepCallback` callback.
Please refer to the class header comments for more details.
post_beam_search_step_callback: The `PostBeamSearchStepCallback` callback.
Please refer to the class header comments for more details.
max_steps: maximum beam search steps. If None, use
self.params.target_seq_len.
Returns:
A tuple (hyp_ids, hyp_lens, done_hyps). Note that num_hyps is same as
src_batch_size.
- hyp_ids: [num_hyps, max_step]. Hyps end with <eos> token if the <eos>
token is encountered during search.
- hyp_lens: [num_hyps].
- done_hyps: [num_hyps], whether or not an eos is encountered.
"""
p = self.params
if max_steps is None:
max_steps = p.target_seq_len
initial_results, other_states = init_beam_search_state(
theta,
encoder_outputs,
1 # num_hyps_per_beam
)
num_hyps = tf.shape(initial_results.log_probs)[0]
if 'step_ids' in initial_results:
# [num_hyps, 1]
step_ids = tf.ensure_shape(initial_results.step_ids, [None, 1])
else:
step_ids = tf.fill([num_hyps, 1],
tf.constant(p.target_sos_id, dtype=tf.int32))
cur_step = tf.constant(0, dtype=tf.int32)
done_hyps = inplace_ops.empty(
shape=[num_hyps], dtype=tf.bool, init=True, name='done_hyps')
hyp_lens = inplace_ops.empty(
shape=[num_hyps], dtype=tf.int32, init=True, name='hyp_lens')
hyp_ids = inplace_ops.empty(
shape=[max_steps, num_hyps], dtype=tf.int32, init=True, name='hyp_ids')
def LoopContinue(cur_step, unused_step_ids, unused_hyp_ids, unused_hyp_lens,
done_hyps, unused_other_states_list):
return tf.math.logical_and(cur_step < max_steps,
tf.math.logical_not(tf.reduce_all(done_hyps)))
def LoopBody(cur_step, step_ids, hyp_ids, hyp_lens, done_hyps,
other_states_list):
(cur_step, new_step_ids, hyp_ids, hyp_lens, done_hyps,
new_other_states) = self._GreedySearchStep(
theta, encoder_outputs, cur_step,
step_ids, hyp_ids, hyp_lens, done_hyps,
other_states.Pack(other_states_list), pre_beam_search_step_callback,
post_beam_search_step_callback)
return (cur_step, new_step_ids, hyp_ids, hyp_lens, done_hyps,
new_other_states.Flatten())
flat_other_states = other_states.Flatten()
_, _, final_hyp_ids, final_hyp_lens, final_done_hyps, _ = tf.while_loop(
LoopContinue,
LoopBody,
loop_vars=(cur_step, step_ids, hyp_ids, hyp_lens, done_hyps,
flat_other_states),
parallel_iterations=10,
back_prop=False,
swap_memory=False,
shape_invariants=(tf.TensorShape(cur_step.get_shape()),
tf.TensorShape(step_ids.get_shape()),
tf.TensorShape(hyp_ids.get_shape()),
tf.TensorShape(hyp_lens.get_shape()),
tf.TensorShape(done_hyps.get_shape()),
_GetShapes(flat_other_states, none_shapes=True)))
# transpose hyp_ids so it matches BeamSearchDecode's output
final_hyp_ids = tf.transpose(final_hyp_ids)
return final_hyp_ids, final_hyp_lens, final_done_hyps
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from msrest import Serializer
from .. import models as _models
from .._vendor import _convert_request, _format_url_section
T = TypeVar('T')
JSONType = Any
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
_SERIALIZER = Serializer()
_SERIALIZER.client_side_validation = False
def build_get_request(
vault_name: str,
resource_group_name: str,
subscription_id: str,
policy_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupPolicies/{policyName}')
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"policyName": _SERIALIZER.url("policy_name", policy_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="GET",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
def build_create_or_update_request(
vault_name: str,
resource_group_name: str,
subscription_id: str,
policy_name: str,
*,
json: JSONType = None,
content: Any = None,
**kwargs: Any
) -> HttpRequest:
content_type = kwargs.pop('content_type', None) # type: Optional[str]
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupPolicies/{policyName}')
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"policyName": _SERIALIZER.url("policy_name", policy_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
if content_type is not None:
header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str')
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="PUT",
url=url,
params=query_parameters,
headers=header_parameters,
json=json,
content=content,
**kwargs
)
def build_delete_request_initial(
vault_name: str,
resource_group_name: str,
subscription_id: str,
policy_name: str,
**kwargs: Any
) -> HttpRequest:
api_version = "2021-12-01"
accept = "application/json"
# Construct URL
url = kwargs.pop("template_url", '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupPolicies/{policyName}')
path_format_arguments = {
"vaultName": _SERIALIZER.url("vault_name", vault_name, 'str'),
"resourceGroupName": _SERIALIZER.url("resource_group_name", resource_group_name, 'str'),
"subscriptionId": _SERIALIZER.url("subscription_id", subscription_id, 'str'),
"policyName": _SERIALIZER.url("policy_name", policy_name, 'str'),
}
url = _format_url_section(url, **path_format_arguments)
# Construct parameters
query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any]
query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str')
# Construct headers
header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any]
header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str')
return HttpRequest(
method="DELETE",
url=url,
params=query_parameters,
headers=header_parameters,
**kwargs
)
class ProtectionPoliciesOperations(object):
"""ProtectionPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.recoveryservicesbackup.activestamp.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace
def get(
self,
vault_name: str,
resource_group_name: str,
policy_name: str,
**kwargs: Any
) -> "_models.ProtectionPolicyResource":
"""Provides the details of the backup policies associated to Recovery Services Vault. This is an
asynchronous
operation. Status of the operation can be fetched using GetPolicyOperationResult API.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param policy_name: Backup policy information to be fetched.
:type policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProtectionPolicyResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectionPolicyResource
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ProtectionPolicyResource"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
policy_name=policy_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ProtectionPolicyResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupPolicies/{policyName}'} # type: ignore
@distributed_trace
def create_or_update(
self,
vault_name: str,
resource_group_name: str,
policy_name: str,
parameters: "_models.ProtectionPolicyResource",
**kwargs: Any
) -> Optional["_models.ProtectionPolicyResource"]:
"""Creates or modifies a backup policy. This is an asynchronous operation. Status of the operation
can be fetched
using GetPolicyOperationResult API.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param policy_name: Backup policy to be created.
:type policy_name: str
:param parameters: resource backup policy.
:type parameters:
~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectionPolicyResource
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ProtectionPolicyResource, or the result of cls(response)
:rtype: ~azure.mgmt.recoveryservicesbackup.activestamp.models.ProtectionPolicyResource or None
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.ProtectionPolicyResource"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(parameters, 'ProtectionPolicyResource')
request = build_create_or_update_request(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
policy_name=policy_name,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('ProtectionPolicyResource', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupPolicies/{policyName}'} # type: ignore
def _delete_initial(
self,
vault_name: str,
resource_group_name: str,
policy_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
vault_name=vault_name,
resource_group_name=resource_group_name,
subscription_id=self._config.subscription_id,
policy_name=policy_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupPolicies/{policyName}'} # type: ignore
@distributed_trace
def begin_delete(
self,
vault_name: str,
resource_group_name: str,
policy_name: str,
**kwargs: Any
) -> LROPoller[None]:
"""Deletes specified backup policy from your Recovery Services Vault. This is an asynchronous
operation. Status of the
operation can be fetched using GetProtectionPolicyOperationResult API.
:param vault_name: The name of the recovery services vault.
:type vault_name: str
:param resource_group_name: The name of the resource group where the recovery services vault is
present.
:type resource_group_name: str
:param policy_name: Backup policy to be deleted.
:type policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be ARMPolling. Pass in False for this
operation to not poll, or pass in your own initialized polling object for a personal polling
strategy.
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
vault_name=vault_name,
resource_group_name=resource_group_name,
policy_name=policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{vaultName}/backupPolicies/{policyName}'} # type: ignore
| |
import datetime
import commonware.log
import django_tables as tables
import jinja2
from django.conf import settings
from django.template import Context, loader
from django.utils.datastructures import SortedDict
from django.utils.translation import force_text
from jingo import register
from tower import ugettext as _, ugettext_lazy as _lazy, ungettext as ngettext
import amo
from access import acl
from addons.helpers import new_context
from addons.models import Addon
from amo.helpers import absolutify, breadcrumbs, page_title
from amo.urlresolvers import reverse
from amo.utils import send_mail as amo_send_mail
from editors.models import (ReviewerScore, ViewFastTrackQueue,
ViewFullReviewQueue, ViewPendingQueue,
ViewPreliminaryQueue,
ViewUnlistedFullReviewQueue,
ViewUnlistedPendingQueue,
ViewUnlistedPreliminaryQueue)
from editors.sql_table import SQLTable
from lib.crypto.packaged import sign_file
from users.models import UserProfile
@register.function
def file_compare(file_obj, version):
# Compare this file to the one in the version with same platform
file_obj = version.files.filter(platform=file_obj.platform)
# If not there, just compare to all.
if not file_obj:
file_obj = version.files.filter(platform=amo.PLATFORM_ALL.id)
# At this point we've got no idea what Platform file to
# compare with, so just chose the first.
if not file_obj:
file_obj = version.files.all()
return file_obj[0]
@register.function
def file_review_status(addon, file):
# If the file is pending review, check the add-on status
if file.status == amo.STATUS_UNREVIEWED:
if addon.status in [amo.STATUS_NOMINATED, amo.STATUS_PUBLIC]:
return _(u'Pending Full Review')
if addon.status in [amo.STATUS_UNREVIEWED, amo.STATUS_LITE]:
return _(u'Pending Preliminary Review')
# Special case: prelim upgrading to full approval,
# file can already be preliminary reviewed or not
if (file.status in [amo.STATUS_LITE, amo.STATUS_UNREVIEWED]
and addon.status == amo.STATUS_LITE_AND_NOMINATED):
if addon.latest_version.version_int == file.version.version_int:
return _(u'Pending Full Review')
if file.status in [amo.STATUS_DISABLED, amo.STATUS_REJECTED]:
if file.reviewed is not None:
return _(u'Rejected')
# Can't assume that if the reviewed date is missing its
# unreviewed. Especially for versions.
else:
return _(u'Rejected or Unreviewed')
return file.STATUS_CHOICES[file.status]
@register.function
def version_status(addon, version):
return ','.join(unicode(s) for s in version.status)
@register.function
@jinja2.contextfunction
def editor_page_title(context, title=None, addon=None):
"""Wrapper for editor page titles. Eerily similar to dev_page_title."""
if addon:
title = u'%s :: %s' % (title, addon.name)
else:
section = _lazy('Editor Tools')
title = u'%s :: %s' % (title, section) if title else section
return page_title(context, title)
@register.function
@jinja2.contextfunction
def editors_breadcrumbs(context, queue=None, addon_queue=None, items=None,
themes=False):
"""
Wrapper function for ``breadcrumbs``. Prepends 'Editor Tools'
breadcrumbs.
**items**
list of [(url, label)] to be inserted after Add-on.
**addon_queue**
Addon object. This sets the queue by addon type or addon status.
**queue**
Explicit queue type to set.
"""
crumbs = [(reverse('editors.home'), _('Editor Tools'))]
if themes:
crumbs.append((reverse('editors.themes.home'), _('Themes')))
if addon_queue:
queue_id = addon_queue.status
queue_ids = {amo.STATUS_UNREVIEWED: 'prelim',
amo.STATUS_NOMINATED: 'nominated',
amo.STATUS_PUBLIC: 'pending',
amo.STATUS_LITE: 'prelim',
amo.STATUS_LITE_AND_NOMINATED: 'nominated',
amo.STATUS_PENDING: 'pending'}
queue = queue_ids.get(queue_id, 'queue')
listed = not context.get('unlisted')
if queue:
if listed:
queues = {
'queue': _('Queue'),
'pending': _('Pending Updates'),
'nominated': _('Full Reviews'),
'prelim': _('Preliminary Reviews'),
'moderated': _('Moderated Reviews'),
'fast_track': _('Fast Track'),
'pending_themes': _('Pending Themes'),
'flagged_themes': _('Flagged Themes'),
'rereview_themes': _('Update Themes'),
}
else:
queues = {
'queue': _('Queue'),
'pending': _('Unlisted Pending Updates'),
'nominated': _('Unlisted Full Reviews'),
'prelim': _('Unlisted Preliminary Reviews')
}
if items and not queue == 'queue':
if listed:
url = reverse('editors.queue_{0}'.format(queue))
else:
url = reverse('editors.unlisted_queue_{0}'.format(queue))
else:
# The Addon is the end of the trail.
url = None
crumbs.append((url, queues[queue]))
if items:
crumbs.extend(items)
return breadcrumbs(context, crumbs, add_default=False)
@register.function
@jinja2.contextfunction
def queue_tabnav(context):
"""Returns tuple of tab navigation for the queue pages.
Each tuple contains three elements: (tab_code, page_url, tab_text)
"""
counts = context['queue_counts']
unlisted_counts = context['unlisted_queue_counts']
listed = not context.get('unlisted')
if listed:
tabnav = [('fast_track', 'queue_fast_track',
(ngettext('Fast Track ({0})',
'Fast Track ({0})',
counts['fast_track'])
.format(counts['fast_track']))),
('nominated', 'queue_nominated',
(ngettext('Full Review ({0})',
'Full Reviews ({0})',
counts['nominated'])
.format(counts['nominated']))),
('pending', 'queue_pending',
(ngettext('Pending Update ({0})',
'Pending Updates ({0})',
counts['pending'])
.format(counts['pending']))),
('prelim', 'queue_prelim',
(ngettext('Preliminary Review ({0})',
'Preliminary Reviews ({0})',
counts['prelim'])
.format(counts['prelim']))),
('moderated', 'queue_moderated',
(ngettext('Moderated Review ({0})',
'Moderated Reviews ({0})',
counts['moderated'])
.format(counts['moderated'])))]
else:
tabnav = [('nominated', 'unlisted_queue_nominated',
(ngettext('Unlisted Full Review ({0})',
'Unlisted Full Reviews ({0})',
unlisted_counts['nominated'])
.format(unlisted_counts['nominated']))),
('pending', 'unlisted_queue_pending',
(ngettext('Unlisted Pending Update ({0})',
'Unlisted Pending Updates ({0})',
unlisted_counts['pending'])
.format(unlisted_counts['pending']))),
('prelim', 'unlisted_queue_prelim',
(ngettext('Unlisted Preliminary Review ({0})',
'Unlisted Preliminary Reviews ({0})',
unlisted_counts['prelim'])
.format(unlisted_counts['prelim'])))]
return tabnav
@register.inclusion_tag('editors/includes/reviewers_score_bar.html')
@jinja2.contextfunction
def reviewers_score_bar(context, types=None, addon_type=None):
user = context.get('amo_user')
return new_context(dict(
request=context.get('request'),
amo=amo, settings=settings,
points=ReviewerScore.get_recent(user, addon_type=addon_type),
total=ReviewerScore.get_total(user),
**ReviewerScore.get_leaderboards(user, types=types,
addon_type=addon_type)))
@register.inclusion_tag('editors/includes/files_view.html')
@jinja2.contextfunction
def all_distinct_files(context, version):
"""Only display a file once even if it's been uploaded
for several platforms."""
# hashes_to_file will group files per hash:
# {<file.hash>: [<file>, 'Windows / Mac OS X']}
hashes_to_file = {}
for file_ in version.all_files:
display_name = force_text(amo.PLATFORMS[file_.platform].name)
if file_.hash in hashes_to_file:
hashes_to_file[file_.hash][1] += ' / ' + display_name
else:
hashes_to_file[file_.hash] = [file_, display_name]
return new_context(dict(
# We don't need the hashes in the template.
distinct_files=hashes_to_file.values(),
amo=context.get('amo'),
addon=context.get('addon'),
show_diff=context.get('show_diff'),
version=version))
class ItemStateTable(object):
def increment_item(self):
self.item_number += 1
def set_page(self, page):
self.item_number = page.start_index()
class EditorQueueTable(SQLTable, ItemStateTable):
addon_name = tables.Column(verbose_name=_lazy(u'Addon'))
addon_type_id = tables.Column(verbose_name=_lazy(u'Type'))
waiting_time_min = tables.Column(verbose_name=_lazy(u'Waiting Time'))
flags = tables.Column(verbose_name=_lazy(u'Flags'), sortable=False)
applications = tables.Column(verbose_name=_lazy(u'Applications'),
sortable=False)
platforms = tables.Column(verbose_name=_lazy(u'Platforms'),
sortable=False)
additional_info = tables.Column(
verbose_name=_lazy(u'Additional'), sortable=False)
def render_addon_name(self, row):
url = reverse('editors.review', args=[row.addon_slug])
self.increment_item()
return u'<a href="%s">%s <em>%s</em></a>' % (
url, jinja2.escape(row.addon_name),
jinja2.escape(row.latest_version))
def render_addon_type_id(self, row):
return amo.ADDON_TYPE[row.addon_type_id]
def render_additional_info(self, row):
info = []
if row.is_site_specific:
info.append(_lazy(u'Site Specific'))
if row.external_software:
info.append(_lazy(u'Requires External Software'))
if row.binary or row.binary_components:
info.append(_lazy(u'Binary Components'))
return u', '.join([jinja2.escape(i) for i in info])
def render_applications(self, row):
# TODO(Kumar) show supported version ranges on hover (if still needed)
icon = u'<div class="app-icon ed-sprite-%s" title="%s"></div>'
return u''.join([icon % (amo.APPS_ALL[i].short, amo.APPS_ALL[i].pretty)
for i in row.application_ids])
def render_platforms(self, row):
icons = []
html = u'<div class="platform-icon plat-sprite-%s" title="%s"></div>'
for platform in row.file_platform_ids:
icons.append(html % (amo.PLATFORMS[int(platform)].shortname,
amo.PLATFORMS[int(platform)].name))
return u''.join(icons)
def render_flags(self, row):
return ''.join(u'<div class="app-icon ed-sprite-%s" '
u'title="%s"></div>' % flag
for flag in row.flags)
def render_waiting_time_min(self, row):
if row.waiting_time_min == 0:
r = _lazy('moments ago')
elif row.waiting_time_hours == 0:
# L10n: first argument is number of minutes
r = ngettext(u'{0} minute', u'{0} minutes',
row.waiting_time_min).format(row.waiting_time_min)
elif row.waiting_time_days == 0:
# L10n: first argument is number of hours
r = ngettext(u'{0} hour', u'{0} hours',
row.waiting_time_hours).format(row.waiting_time_hours)
else:
# L10n: first argument is number of days
r = ngettext(u'{0} day', u'{0} days',
row.waiting_time_days).format(row.waiting_time_days)
return jinja2.escape(r)
@classmethod
def translate_sort_cols(cls, colname):
legacy_sorts = {
'name': 'addon_name',
'age': 'waiting_time_min',
'type': 'addon_type_id',
}
return legacy_sorts.get(colname, colname)
@classmethod
def default_order_by(cls):
return '-waiting_time_min'
@classmethod
def review_url(cls, row):
return reverse('editors.review', args=[row.addon_slug])
class Meta:
sortable = True
columns = ['addon_name', 'addon_type_id', 'waiting_time_min',
'flags', 'applications', 'additional_info']
class ViewPendingQueueTable(EditorQueueTable):
class Meta(EditorQueueTable.Meta):
model = ViewPendingQueue
class ViewFullReviewQueueTable(EditorQueueTable):
class Meta(EditorQueueTable.Meta):
model = ViewFullReviewQueue
class ViewPreliminaryQueueTable(EditorQueueTable):
class Meta(EditorQueueTable.Meta):
model = ViewPreliminaryQueue
class ViewFastTrackQueueTable(EditorQueueTable):
class Meta(EditorQueueTable.Meta):
model = ViewFastTrackQueue
class ViewUnlistedPendingQueueTable(EditorQueueTable):
class Meta(EditorQueueTable.Meta):
model = ViewUnlistedPendingQueue
class ViewUnlistedFullReviewQueueTable(EditorQueueTable):
class Meta(EditorQueueTable.Meta):
model = ViewUnlistedFullReviewQueue
class ViewUnlistedPreliminaryQueueTable(EditorQueueTable):
class Meta(EditorQueueTable.Meta):
model = ViewUnlistedPreliminaryQueue
log = commonware.log.getLogger('z.mailer')
NOMINATED_STATUSES = (amo.STATUS_NOMINATED, amo.STATUS_LITE_AND_NOMINATED)
PRELIMINARY_STATUSES = (amo.STATUS_UNREVIEWED, amo.STATUS_LITE)
PENDING_STATUSES = (amo.STATUS_BETA, amo.STATUS_DISABLED, amo.STATUS_NULL,
amo.STATUS_PENDING, amo.STATUS_PUBLIC)
def send_mail(template, subject, emails, context, perm_setting=None):
template = loader.get_template(template)
amo_send_mail(subject, template.render(Context(context, autoescape=False)),
recipient_list=emails, from_email=settings.EDITORS_EMAIL,
use_blacklist=False, perm_setting=perm_setting)
@register.function
def get_position(addon):
if addon.is_persona() and addon.is_pending():
qs = (Addon.objects.filter(status=amo.STATUS_PENDING,
type=amo.ADDON_PERSONA)
.no_transforms().order_by('created')
.values_list('id', flat=True))
id_ = addon.id
position = 0
for idx, addon_id in enumerate(qs, start=1):
if addon_id == id_:
position = idx
break
total = qs.count()
return {'pos': position, 'total': total}
else:
version = addon.latest_version
if not version:
return False
q = version.current_queue
if not q:
return False
mins_query = q.objects.filter(id=addon.id)
if mins_query.count() > 0:
mins = mins_query[0].waiting_time_min
pos = q.objects.having('waiting_time_min >=', mins).count()
total = q.objects.count()
return dict(mins=mins, pos=pos, total=total)
return False
class ReviewHelper:
"""
A class that builds enough to render the form back to the user and
process off to the correct handler.
"""
def __init__(self, request=None, addon=None, version=None):
self.handler = None
self.required = {}
self.addon = addon
self.all_files = version.files.all() if version else []
self.get_review_type(request, addon, version)
self.actions = self.get_actions(request, addon)
def set_data(self, data):
self.handler.set_data(data)
def get_review_type(self, request, addon, version):
if self.addon.status in NOMINATED_STATUSES:
self.review_type = 'nominated'
self.handler = ReviewAddon(request, addon, version, 'nominated')
elif self.addon.status == amo.STATUS_UNREVIEWED:
self.review_type = 'preliminary'
self.handler = ReviewAddon(request, addon, version, 'preliminary')
elif self.addon.status == amo.STATUS_LITE:
self.review_type = 'preliminary'
self.handler = ReviewFiles(request, addon, version, 'preliminary')
else:
self.review_type = 'pending'
self.handler = ReviewFiles(request, addon, version, 'pending')
def get_actions(self, request, addon):
labels, details = self._review_actions()
actions = SortedDict()
if not addon.admin_review or acl.action_allowed(
request, 'ReviewerAdminTools', 'View'):
if self.review_type != 'preliminary':
if addon.is_listed:
label = _lazy('Push to public')
else:
label = _lazy('Grant full review')
actions['public'] = {'method': self.handler.process_public,
'minimal': False,
'label': label}
# An unlisted sideload add-on, which requests a full review, cannot
# be granted a preliminary review.
if addon.is_listed or self.review_type == 'preliminary':
actions['prelim'] = {
'method': self.handler.process_preliminary,
'label': labels['prelim'],
'minimal': False}
actions['reject'] = {'method': self.handler.process_sandbox,
'label': _lazy('Reject'),
'minimal': False}
actions['info'] = {'method': self.handler.request_information,
'label': _lazy('Request more information'),
'minimal': True}
actions['super'] = {'method': self.handler.process_super_review,
'label': _lazy('Request super-review'),
'minimal': True}
actions['comment'] = {'method': self.handler.process_comment,
'label': _lazy('Comment'),
'minimal': True}
for k, v in actions.items():
v['details'] = details.get(k)
return actions
def _review_actions(self):
labels = {'prelim': _lazy('Grant preliminary review')}
details = {'prelim': _lazy('This will mark the files as '
'preliminarily reviewed.'),
'info': _lazy('Use this form to request more information '
'from the author. They will receive an email '
'and be able to answer here. You will be '
'notified by email when they reply.'),
'super': _lazy('If you have concerns about this add-on\'s '
'security, copyright issues, or other '
'concerns that an administrator should look '
'into, enter your comments in the area '
'below. They will be sent to '
'administrators, not the author.'),
'reject': _lazy('This will reject the add-on and remove '
'it from the review queue.'),
'comment': _lazy('Make a comment on this version. The '
'author won\'t be able to see this.')}
if self.addon.status == amo.STATUS_LITE:
details['reject'] = _lazy('This will reject the files and remove '
'them from the review queue.')
if self.addon.status in (amo.STATUS_UNREVIEWED, amo.STATUS_NOMINATED):
details['prelim'] = _lazy('This will mark the add-on as '
'preliminarily reviewed. Future '
'versions will undergo '
'preliminary review.')
elif self.addon.status == amo.STATUS_LITE:
details['prelim'] = _lazy('This will mark the files as '
'preliminarily reviewed. Future '
'versions will undergo '
'preliminary review.')
elif self.addon.status == amo.STATUS_LITE_AND_NOMINATED:
labels['prelim'] = _lazy('Retain preliminary review')
details['prelim'] = _lazy('This will retain the add-on as '
'preliminarily reviewed. Future '
'versions will undergo preliminary '
'review.')
if self.review_type == 'pending':
details['public'] = _lazy('This will approve a sandboxed version '
'of a public add-on to appear on the '
'public side.')
details['reject'] = _lazy('This will reject a version of a public '
'add-on and remove it from the queue.')
else:
details['public'] = _lazy('This will mark the add-on and its most '
'recent version and files as public. '
'Future versions will go into the '
'sandbox until they are reviewed by an '
'editor.')
return labels, details
def process(self):
action = self.handler.data.get('action', '')
if not action:
raise NotImplementedError
return self.actions[action]['method']()
class ReviewBase(object):
def __init__(self, request, addon, version, review_type):
self.request = request
if request:
self.user = self.request.user
else:
# Use the addons team go-to user "Mozilla" for the automatic
# validations.
self.user = UserProfile.objects.get(pk=settings.TASK_USER_ID)
self.addon = addon
self.version = version
self.review_type = review_type
self.files = None
def set_addon(self, **kw):
"""Alters addon and sets reviewed timestamp on version."""
self.addon.update(**kw)
self.version.update(reviewed=datetime.datetime.now())
def set_files(self, status, files, copy_to_mirror=False,
hide_disabled_file=False):
"""Change the files to be the new status
and copy, remove from the mirror as appropriate."""
for file in files:
file.datestatuschanged = datetime.datetime.now()
file.reviewed = datetime.datetime.now()
if copy_to_mirror:
file.copy_to_mirror()
if hide_disabled_file:
file.hide_disabled_file()
file.status = status
file.save()
def log_action(self, action):
details = {'comments': self.data['comments'],
'reviewtype': self.review_type}
if self.files:
details['files'] = [f.id for f in self.files]
if self.version:
details['version'] = self.version.version
amo.log(action, self.addon, self.version, user=self.user,
created=datetime.datetime.now(), details=details)
def notify_email(self, template, subject):
"""Notify the authors that their addon has been reviewed."""
emails = [a.email for a in self.addon.authors.all()]
data = self.data.copy() if self.data else {}
data.update(self.get_context_data())
data['tested'] = ''
os, app = data.get('operating_systems'), data.get('applications')
if os and app:
data['tested'] = 'Tested on %s with %s' % (os, app)
elif os and not app:
data['tested'] = 'Tested on %s' % os
elif not os and app:
data['tested'] = 'Tested with %s' % app
data['addon_type'] = (_lazy('add-on'))
send_mail('editors/emails/%s.ltxt' % template,
subject % (self.addon.name, self.version.version),
emails, Context(data), perm_setting='editor_reviewed')
def get_context_data(self):
if self.addon.is_listed:
url = self.addon.get_url_path(add_prefix=False)
else:
url = self.addon.get_dev_url('versions')
return {'name': self.addon.name,
'number': self.version.version,
'reviewer': self.user.display_name,
'addon_url': absolutify(url),
'review_url': absolutify(reverse('editors.review',
args=[self.addon.pk],
add_prefix=False)),
'comments': self.data.get('comments'),
'SITE_URL': settings.SITE_URL}
def request_information(self):
"""Send a request for information to the authors."""
emails = [a.email for a in self.addon.authors.all()]
self.log_action(amo.LOG.REQUEST_INFORMATION)
self.version.update(has_info_request=True)
log.info(u'Sending request for information for %s to %s' %
(self.addon, emails))
send_mail('editors/emails/info.ltxt',
u'Mozilla Add-ons: %s %s' %
(self.addon.name, self.version.version),
emails, Context(self.get_context_data()),
perm_setting='individual_contact')
def send_super_mail(self):
self.log_action(amo.LOG.REQUEST_SUPER_REVIEW)
log.info(u'Super review requested for %s' % (self.addon))
send_mail('editors/emails/super_review.ltxt',
u'Super review requested: %s' % (self.addon.name),
[settings.SENIOR_EDITORS_EMAIL],
Context(self.get_context_data()))
def process_comment(self):
kw = {'has_editor_comment': True}
if self.data.get('clear_info_request'):
kw['has_info_request'] = False
self.version.update(**kw)
self.log_action(amo.LOG.COMMENT_VERSION)
class ReviewAddon(ReviewBase):
def __init__(self, *args, **kwargs):
super(ReviewAddon, self).__init__(*args, **kwargs)
self.is_upgrade = (self.addon.status == amo.STATUS_LITE_AND_NOMINATED
and self.review_type == 'nominated')
def set_data(self, data):
self.data = data
self.files = self.version.files.all()
def process_public(self):
"""Set an addon to public."""
if self.review_type == 'preliminary':
raise AssertionError('Preliminary addons cannot be made public.')
# Sign addon.
for file_ in self.files:
sign_file(file_, settings.SIGNING_SERVER)
# Hold onto the status before we change it.
status = self.addon.status
# Save files first, because set_addon checks to make sure there
# is at least one public file or it won't make the addon public.
self.set_files(amo.STATUS_PUBLIC, self.files, copy_to_mirror=True)
self.set_addon(highest_status=amo.STATUS_PUBLIC,
status=amo.STATUS_PUBLIC)
self.log_action(amo.LOG.APPROVE_VERSION)
template = u'%s_to_public' % self.review_type
subject = u'Mozilla Add-ons: %s %s Fully Reviewed'
if not self.addon.is_listed:
template = u'unlisted_to_reviewed'
subject = u'Mozilla Add-ons: %s %s signed and ready to download'
self.notify_email(template, subject)
log.info(u'Making %s public' % (self.addon))
log.info(u'Sending email for %s' % (self.addon))
# Assign reviewer incentive scores.
if self.request:
ReviewerScore.award_points(self.request.amo_user, self.addon,
status)
def process_sandbox(self):
"""Set an addon back to sandbox."""
# Hold onto the status before we change it.
status = self.addon.status
if (not self.is_upgrade or
not self.addon.versions.exclude(id=self.version.id)
.filter(files__status__in=amo.REVIEWED_STATUSES)):
self.set_addon(status=amo.STATUS_NULL)
else:
self.set_addon(status=amo.STATUS_LITE)
self.set_files(amo.STATUS_DISABLED, self.files,
hide_disabled_file=True)
self.log_action(amo.LOG.REJECT_VERSION)
template = u'%s_to_sandbox' % self.review_type
subject = u'Mozilla Add-ons: %s %s Rejected'
if not self.addon.is_listed:
template = u'unlisted_to_sandbox'
subject = u'Mozilla Add-ons: %s %s didn\'t pass review'
self.notify_email(template, subject)
log.info(u'Making %s disabled' % (self.addon))
log.info(u'Sending email for %s' % (self.addon))
# Assign reviewer incentive scores.
if self.request:
ReviewerScore.award_points(self.request.amo_user, self.addon,
status)
def process_preliminary(self, auto_validation=False):
"""Set an addon to preliminary."""
# Sign addon.
for file_ in self.files:
sign_file(file_, settings.PRELIMINARY_SIGNING_SERVER)
# Hold onto the status before we change it.
status = self.addon.status
changes = {'status': amo.STATUS_LITE}
if (self.addon.status in (amo.STATUS_PUBLIC,
amo.STATUS_LITE_AND_NOMINATED)):
changes['highest_status'] = amo.STATUS_LITE
template = u'%s_to_preliminary' % self.review_type
subject = u'Mozilla Add-ons: %s %s Preliminary Reviewed'
if (self.review_type == 'preliminary' and
self.addon.status == amo.STATUS_LITE_AND_NOMINATED):
template = u'nominated_to_nominated'
if not self.addon.is_listed:
template = u'unlisted_to_reviewed'
if auto_validation:
template = u'unlisted_to_reviewed_auto'
subject = u'Mozilla Add-ons: %s %s signed and ready to download'
self.set_addon(**changes)
self.set_files(amo.STATUS_LITE, self.files, copy_to_mirror=True)
self.log_action(amo.LOG.PRELIMINARY_VERSION)
self.notify_email(template, subject)
log.info(u'Making %s preliminary' % (self.addon))
log.info(u'Sending email for %s' % (self.addon))
if self.request and not auto_validation:
# Assign reviewer incentive scores.
ReviewerScore.award_points(self.request.amo_user, self.addon,
status)
def process_super_review(self):
"""Give an addon super review."""
self.addon.update(admin_review=True)
self.notify_email('author_super_review',
u'Mozilla Add-ons: %s %s flagged for Admin Review')
self.send_super_mail()
class ReviewFiles(ReviewBase):
def set_data(self, data):
self.data = data
self.files = data.get('addon_files', None)
def process_public(self):
"""Set an addons files to public."""
if self.review_type == 'preliminary':
raise AssertionError('Preliminary addons cannot be made public.')
# Sign addon.
for file_ in self.files:
sign_file(file_, settings.SIGNING_SERVER)
# Hold onto the status before we change it.
status = self.addon.status
self.set_files(amo.STATUS_PUBLIC, self.files, copy_to_mirror=True)
self.log_action(amo.LOG.APPROVE_VERSION)
template = u'%s_to_public' % self.review_type
subject = u'Mozilla Add-ons: %s %s Fully Reviewed'
if not self.addon.is_listed:
template = u'unlisted_to_reviewed'
subject = u'Mozilla Add-ons: %s %s signed and ready to download'
self.notify_email(template, subject)
log.info(u'Making %s files %s public' %
(self.addon, ', '.join([f.filename for f in self.files])))
log.info(u'Sending email for %s' % (self.addon))
# Assign reviewer incentive scores.
if self.request:
ReviewerScore.award_points(self.request.amo_user, self.addon,
status)
def process_sandbox(self):
"""Set an addons files to sandbox."""
# Hold onto the status before we change it.
status = self.addon.status
self.set_files(amo.STATUS_DISABLED, self.files,
hide_disabled_file=True)
self.log_action(amo.LOG.REJECT_VERSION)
template = u'%s_to_sandbox' % self.review_type
subject = u'Mozilla Add-ons: %s %s Rejected'
if not self.addon.is_listed:
template = u'unlisted_to_sandbox'
subject = u'Mozilla Add-ons: %s %s didn\'t pass review'
self.notify_email(template, subject)
log.info(u'Making %s files %s disabled' %
(self.addon,
', '.join([f.filename for f in self.files])))
log.info(u'Sending email for %s' % (self.addon))
# Assign reviewer incentive scores.
if self.request:
ReviewerScore.award_points(self.request.amo_user, self.addon,
status)
def process_preliminary(self, auto_validation=False):
"""Set an addons files to preliminary."""
# Sign addon.
for file_ in self.files:
sign_file(file_, settings.PRELIMINARY_SIGNING_SERVER)
# Hold onto the status before we change it.
status = self.addon.status
self.set_files(amo.STATUS_LITE, self.files, copy_to_mirror=True)
self.log_action(amo.LOG.PRELIMINARY_VERSION)
template = u'%s_to_preliminary' % self.review_type
subject = u'Mozilla Add-ons: %s %s Preliminary Reviewed'
if not self.addon.is_listed:
template = u'unlisted_to_reviewed'
if auto_validation:
template = u'unlisted_to_reviewed_auto'
subject = u'Mozilla Add-ons: %s %s signed and ready to download'
self.notify_email(template, subject)
log.info(u'Making %s files %s preliminary' %
(self.addon, ', '.join([f.filename for f in self.files])))
log.info(u'Sending email for %s' % (self.addon))
if self.request and not auto_validation:
# Assign reviewer incentive scores.
ReviewerScore.award_points(self.request.amo_user, self.addon,
status)
def process_super_review(self):
"""Give an addon super review when preliminary."""
self.addon.update(admin_review=True)
self.notify_email('author_super_review',
u'Mozilla Add-ons: %s %s flagged for Admin Review')
self.send_super_mail()
@register.function
@jinja2.contextfunction
def logs_tabnav_themes(context):
"""
Returns tuple of tab navigation for the log pages.
Each tuple contains three elements: (named url, tab_code, tab_text)
"""
rv = [
('editors.themes.logs', 'themes', _('Reviews'))
]
if acl.action_allowed(context['request'], 'SeniorPersonasTools', 'View'):
rv.append(('editors.themes.deleted', 'deleted', _('Deleted')))
return rv
@register.function
@jinja2.contextfunction
def queue_tabnav_themes(context):
"""Similar to queue_tabnav, but for themes."""
tabs = []
if acl.action_allowed(context['request'], 'Personas', 'Review'):
tabs.append((
'editors.themes.list', 'pending_themes', _('Pending'),
))
if acl.action_allowed(context['request'], 'SeniorPersonasTools', 'View'):
tabs.append((
'editors.themes.list_flagged', 'flagged_themes', _('Flagged'),
))
tabs.append((
'editors.themes.list_rereview', 'rereview_themes',
_('Updates'),
))
return tabs
@register.function
@jinja2.contextfunction
def queue_tabnav_themes_interactive(context):
"""Tabnav for the interactive shiny theme queues."""
tabs = []
if acl.action_allowed(context['request'], 'Personas', 'Review'):
tabs.append((
'editors.themes.queue_themes', 'pending', _('Pending'),
))
if acl.action_allowed(context['request'], 'SeniorPersonasTools', 'View'):
tabs.append((
'editors.themes.queue_flagged', 'flagged', _('Flagged'),
))
tabs.append((
'editors.themes.queue_rereview', 'rereview', _('Updates'),
))
return tabs
@register.function
@jinja2.contextfunction
def is_expired_lock(context, lock):
return lock.expiry < datetime.datetime.now()
| |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
"""Agent implementing the server side of the AMQP management protocol.
Adapter layer between external attribute-value maps sent/received via the AMQP
management protocol and implementation objects (C or python) of the dispatch
router. Entity types are as described in qdrouter.json schema. Reading
configuration files is treated as a set of CREATE operations.
Maintains a set of L{EntityAdapter} that hold attribute maps reflecting the last
known attribute values of the implementation objects. Delegates management
operations to the correct adapter.
EntityAdapters are created/deleted in two ways:
- Externally by CREATE/DELETE operations (or loading config file)
- Internally by creation or deletion of corresponding implementation object.
Memory managment: The implementation is reponsible for informing the L{Agent}
when an implementation object is created and *before* it is deleted in the case
of a C object.
EntityAdapters can:
- Receive attribute maps via CREATE or UPDATE operations (reading configuration
files is treated as a set of CREATE operations) and set configuration in the
implementation objects.
- Refresh the adapters attribute map to reflect the current state of the
implementation objects, to respond to READ or QUERY operations with up-to-date values.
To avoid confusion the term "update" is only used for the EntityAdapter updating
the implementation object. The term "refresh" is used for the EntityAdapter
getting current information from the implementation object.
## Threading:
The agent is locked to be thread safe, called in the following threads:
- Reading configuration file in initialization thread (no contention).
- Management requests arriving in multiple, concurrent connection threads.
- Implementation objects created/deleted in multiple, concurrent connection threads.
When refreshing attributes, the agent must also read C implementation object
data that may be updated in other threads.
# FIXME aconway 2015-02-09:
Temporary solution is to lock the entire dispatch router lock during full refresh.
Better solution coming soon...
"""
import traceback, json, pstats
from itertools import ifilter, chain
from traceback import format_exc
from threading import Lock
from cProfile import Profile
from cStringIO import StringIO
from ctypes import c_void_p, py_object, c_long
from subprocess import Popen
from ..dispatch import IoAdapter, LogAdapter, LOG_INFO, LOG_WARNING, LOG_DEBUG, LOG_ERROR, TREATMENT_ANYCAST_CLOSEST
from qpid_dispatch.management.error import ManagementError, OK, CREATED, NO_CONTENT, STATUS_TEXT, \
BadRequestStatus, InternalServerErrorStatus, NotImplementedStatus, NotFoundStatus, ForbiddenStatus
from qpid_dispatch.management.entity import camelcase
from .schema import ValidationError, SchemaEntity, EntityType
from .qdrouter import QdSchema
from ..router.message import Message
from ..router.address import Address
from ..policy.policy_manager import PolicyManager
def dictstr(d):
"""Stringify a dict in the form 'k=v, k=v ...' instead of '{k:v, ...}'"""
return ", ".join("%s=%r" % (k, v) for k, v in d.iteritems())
def required_property(prop, request):
"""Raise exception if required property is missing"""
if not request.properties or prop not in request.properties:
raise BadRequestStatus("No '%s' property: %s"%(prop, request))
return request.properties[prop]
def not_implemented(operation, entity_type):
"""Raise NOT_IMPLEMENTED exception"""
raise NotImplementedStatus("Operation '%s' not implemented on %s" % (operation, entity_type))
class AtomicCount(object):
"""Simple atomic counter"""
def __init__(self, count=0):
self.count = count
self.lock = Lock()
def next(self):
with self.lock:
n = self.count
self.count += 1
return n
class Implementation(object):
"""Abstract implementation wrapper"""
def __init__(self, entity_type, key):
self.entity_type, self.key = entity_type, key
class CImplementation(Implementation):
"""Wrapper for a C implementation pointer"""
def __init__(self, qd, entity_type, pointer):
super(CImplementation, self).__init__(entity_type, pointer)
fname = "qd_entity_refresh_" + entity_type.short_name.replace('.', '_')
self.refreshfn = qd.function(fname, c_long, [py_object, c_void_p])
def refresh_entity(self, attributes):
return self.refreshfn(attributes, self.key) or True
class PythonImplementation(Implementation):
"""Wrapper for a Python implementation object"""
def __init__(self, entity_type, impl):
"""impl.refresh_entity(attributes) must be a valid function call"""
super(PythonImplementation, self).__init__(entity_type, id(impl))
self.refresh_entity = impl.refresh_entity
class EntityAdapter(SchemaEntity):
"""
Base class for agent entities with operations as well as attributes.
"""
def __init__(self, agent, entity_type, attributes=None, validate=True):
"""
@para agent: Containing L{Agent}
@param entity_type: L{EntityType}
@param attributes: Attribute name:value map
@param validate: If true, validate the entity.
"""
super(EntityAdapter, self).__init__(entity_type, attributes or {}, validate=validate)
# Direct __dict__ access to avoid validation as schema attributes
self.__dict__['_agent'] = agent
self.__dict__['_log'] = agent.log
self.__dict__['_qd'] = agent.qd
self.__dict__['_dispatch'] = agent.dispatch
self.__dict__['_policy'] = agent.policy
self.__dict__['_implementations'] = []
def validate(self, **kwargs):
"""Set default identity and name if not already set, then do schema validation"""
identity = self.attributes.get("identity")
name = self.attributes.get("name")
if identity:
if not name:
self.attributes[u"name"] = "%s/%s" % (self.entity_type.short_name, self._identifier())
else:
self.attributes[u"identity"] = "%s/%s" % (self.entity_type.short_name, self._identifier())
if not name:
self.attributes.setdefault(u'name', self.attributes[u'identity'])
super(EntityAdapter, self).validate(**kwargs)
def _identifier(self):
"""
Generate identifier. identity=type/identifier.
Default is per-type counter, derived classes can override.
"""
try: counter = type(self)._identifier_count
except AttributeError: counter = type(self)._identifier_count = AtomicCount()
return str(counter.next())
def _refresh(self):
"""Refresh self.attributes from implementation object(s)."""
for impl in self._implementations:
impl.refresh_entity(self.attributes)
return bool(self._implementations)
def _add_implementation(self, impl):
"""Add an implementaiton object to use to refresh our attributes"""
self._implementations.append(impl)
def create(self):
"""Subclasses can add extra create actions here"""
pass
def read(self, request):
"""Handle read request, default is to return attributes."""
request_type = self.entity_type.schema.long_name(request.properties.get('type'))
if request_type and self.type != request_type:
raise NotFoundStatus("Entity type '%s' does match requested type '%s'" %
(self.type, request_type))
return (OK, self.attributes)
def update(self, request):
"""Handle update request with new attributes from management client"""
self.entity_type.update_check(request.body, self.attributes)
newattrs = dict(self.attributes, **request.body)
self.entity_type.validate(newattrs, update=True)
self.attributes = newattrs
self._update()
return (OK, self.attributes)
def _update(self):
"""Subclasses implement update logic here"""
pass
def delete(self, request):
"""Handle delete request from client"""
self._delete()
self._agent.remove(self)
return (NO_CONTENT, {})
def _delete(self):
"""Subclasses implement delete logic here"""
pass
def __str__(self):
keys = sorted(self.attributes.keys())
# If the attribute is hidden the attribute value will show up as stars ('*******').
return "Entity(%s)" % ", ".join("%s=%s" % (k, '*******' if self.entity_type.attribute(k).hidden else self.attributes[k]) for k in keys)
class ContainerEntity(EntityAdapter):
"""
The ContainerEntity has been deprecated. Use the the RouterEntity instead
"""
def create(self):
self._qd.qd_dispatch_configure_container(self._dispatch, self)
def _identifier(self):
self.attributes.setdefault("containerName", "00000000-0000-0000-0000-000000000000")
return self.attributes["containerName"]
def __str__(self):
return super(ContainerEntity, self).__str__().replace("Entity(", "ContainerEntity(")
class RouterEntity(EntityAdapter):
def __init__(self, agent, entity_type, attributes=None):
super(RouterEntity, self).__init__(agent, entity_type, attributes, validate=False)
# Router is a mix of configuration and operational entity.
# The statistics attributes are operational not configured.
self._add_implementation(
CImplementation(agent.qd, entity_type, self._dispatch))
def _identifier(self): return self.attributes.get('id')
def create(self):
try:
if self.routerId:
self._agent.log(LOG_WARNING, "routerId is deprecated, use id instead")
except:
pass
self._qd.qd_dispatch_configure_router(self._dispatch, self)
def __str__(self):
return super(RouterEntity, self).__str__().replace("Entity(", "RouterEntity(")
class LogEntity(EntityAdapter):
def __init__(self, agent, entity_type, attributes=None, validate=True):
# Special defaults for DEFAULT module.
if attributes.get("module") == "DEFAULT":
defaults = dict(enable="info+", timestamp=True, source=False, output="stderr")
attributes = dict(defaults, **attributes)
super(LogEntity, self).__init__(agent, entity_type, attributes, validate=True)
def _identifier(self): return self.attributes.get('module')
def create(self):
self._qd.qd_log_entity(self)
def _update(self):
self._qd.qd_log_entity(self)
def _delete(self):
"""Can't actually delete a log source but return it to the default state"""
self._qd.qd_log_source_reset(self.attributes['module'])
def __str__(self):
return super(LogEntity, self).__str__().replace("Entity(", "LogEntity(")
class PolicyEntity(EntityAdapter):
def __init__(self, agent, entity_type, attributes=None):
super(PolicyEntity, self).__init__(agent, entity_type, attributes, validate=False)
# Policy is a mix of configuration and operational entity.
# The statistics attributes are operational not configured.
self._add_implementation(
CImplementation(agent.qd, entity_type, self._dispatch))
def create(self):
self._qd.qd_dispatch_configure_policy(self._dispatch, self)
self._qd.qd_dispatch_register_policy_manager(self._dispatch, self._policy)
def _identifier(self):
return self.attributes.get('module')
def __str__(self):
return super(PolicyEntity, self).__str__().replace("Entity(", "PolicyEntity(")
class VhostEntity(EntityAdapter):
def create(self):
self._policy.create_ruleset(self.attributes)
def _identifier(self):
return self.attributes.get('id')
def __str__(self):
return super(VhostEntity, self).__str__().replace("Entity(", "VhostEntity(")
def _delete(self):
self._policy.delete_ruleset(self.id)
def _update(self):
self._policy.update_ruleset(self.attributes)
class VhostStatsEntity(EntityAdapter):
def _identifier(self):
return self.attributes.get('id')
def __str__(self):
return super(VhostStatsEntity, self).__str__().replace("Entity(", "VhostStatsEntity(")
def _host_port_name_identifier(entity):
for attr in ['host', 'port', 'name']: # Set default values if need be
entity.attributes.setdefault(
attr, entity.entity_type.attribute(attr).missing_value())
if entity.attributes.get('name'):
return "%s:%s:%s" % (entity.attributes['host'], entity.attributes['port'], entity.attributes['name'])
else:
return "%s:%s" % (entity.attributes['host'], entity.attributes['port'])
class SslProfileEntity(EntityAdapter):
def create(self):
return self._qd.qd_dispatch_configure_ssl_profile(self._dispatch, self)
def _delete(self):
deleted = self._qd.qd_connection_manager_delete_ssl_profile(self._dispatch, self._implementations[0].key)
# SSL Profiles cannot be deleted if they are referenced by a connector/listener.
if not deleted:
raise ForbiddenStatus("SSL Profile is referenced by other listeners/connectors. Delete the associated "
"listeners/connectors before deleting the SSL Profile")
def _identifier(self):
return self.name
def __str__(self):
return super(SslProfileEntity, self).__str__().replace("Entity(", "SslProfileEntity(")
class ListenerEntity(EntityAdapter):
def create(self):
config_listener = self._qd.qd_dispatch_configure_listener(self._dispatch, self)
self._qd.qd_connection_manager_start(self._dispatch)
return config_listener
def _identifier(self):
return _host_port_name_identifier(self)
def __str__(self):
return super(ListenerEntity, self).__str__().replace("Entity(", "ListenerEntity(")
def _delete(self):
self._qd.qd_connection_manager_delete_listener(self._dispatch, self._implementations[0].key)
class ConnectorEntity(EntityAdapter):
def create(self):
config_connector = self._qd.qd_dispatch_configure_connector(self._dispatch, self)
self._qd.qd_connection_manager_start(self._dispatch)
return config_connector
def _delete(self):
self._qd.qd_connection_manager_delete_connector(self._dispatch, self._implementations[0].key)
def _identifier(self):
return _host_port_name_identifier(self)
def __str__(self):
return super(ConnectorEntity, self).__str__().replace("Entity(", "ConnectorEntity(")
class FixedAddressEntity(EntityAdapter):
def create(self):
self._qd.qd_dispatch_configure_fixed_address(self._dispatch, self)
def __str__(self):
return super(FixedAddressEntity, self).__str__().replace("Entity(", "FixedAddressEntity(")
class WaypointEntity(EntityAdapter):
def create(self):
self._qd.qd_dispatch_configure_waypoint(self._dispatch, self)
#self._qd.qd_waypoint_activate_all(self._dispatch)
def __str__(self):
return super(WaypointEntity, self).__str__().replace("Entity(", "WaypointEntity(")
class LinkRoutePatternEntity(EntityAdapter):
def create(self):
self._qd.qd_dispatch_configure_lrp(self._dispatch, self)
def __str__(self):
return super(LinkRoutePatternEntity, self).__str__().replace("Entity(", "LinkRoutePatternEntity(")
class AddressEntity(EntityAdapter):
def create(self):
self._qd.qd_dispatch_configure_address(self._dispatch, self)
def __str__(self):
return super(AddressEntity, self).__str__().replace("Entity(", "AddressEntity(")
class LinkRouteEntity(EntityAdapter):
def create(self):
self._qd.qd_dispatch_configure_link_route(self._dispatch, self)
def __str__(self):
return super(LinkRouteEntity, self).__str__().replace("Entity(", "LinkRouteEntity(")
class AutoLinkEntity(EntityAdapter):
def create(self):
self._qd.qd_dispatch_configure_auto_link(self._dispatch, self)
def __str__(self):
return super(AutoLinkEntity, self).__str__().replace("Entity(", "AutoLinkEntity(")
class ConsoleEntity(EntityAdapter):
def __str__(self):
return super(ConsoleEntity, self).__str__().replace("Entity(", "ConsoleEntity(")
def create(self):
# if a named listener is present, use its host:port
name = self.attributes.get('listener')
if name:
listeners = self._agent.find_entity_by_type("listener")
for listener in listeners:
if listener.name == name:
try:
#required
host = listener.attributes['host']
port = listener.attributes['port']
#optional
wsport = self.attributes.get('wsport')
home = self.attributes.get('home')
args = self.attributes.get('args')
pargs = []
pargs.append(self.attributes['proxy'])
if args:
# Replace any $port|$host|$wsport|$home
dargs = {'$port': port, '$host': host}
if wsport:
dargs['$wsport'] = wsport
if home:
dargs['$home'] = home
for k,v in dargs.iteritems():
args = args.replace(k,str(v))
pargs += args.split()
#run the external program
Popen(pargs)
except:
self._agent.log(LOG_ERROR, "Can't parse console entity: %s" % (format_exc()))
break
class DummyEntity(EntityAdapter):
def callme(self, request):
return (OK, dict(**request.properties))
class RouterLinkEntity(EntityAdapter):
def __str__(self):
return super(RouterLinkEntity, self).__str__().replace("Entity(", "RouterLinkEntity(")
class RouterNodeEntity(EntityAdapter):
def _identifier(self):
return self.attributes.get('id')
def __str__(self):
return super(RouterNodeEntity, self).__str__().replace("Entity(", "RouterNodeEntity(")
class RouterAddressEntity(EntityAdapter):
def _identifier(self):
return self.attributes.get('key')
def __str__(self):
return super(RouterAddressEntity, self).__str__().replace("Entity(", "RouterAddressEntity(")
class ConnectionEntity(EntityAdapter):
def _identifier(self):
return self.attributes.get('host') + ":" + str(self.attributes.get('identity'))
def __str__(self):
return super(ConnectionEntity, self).__str__().replace("Entity(", "ConnectionEntity(")
class LogStatsEntity(EntityAdapter):
def _identifier(self):
return self.attributes.get('identity')
def __str__(self):
return super(LogStatsEntity, self).__str__().replace("Entity(", "LogStatsEntity(")
class AllocatorEntity(EntityAdapter):
def _identifier(self):
return self.attributes.get('typeName')
def __str__(self):
return super(AllocatorEntity, self).__str__().replace("Entity(", "AllocatorEntity(")
class EntityCache(object):
"""
Searchable cache of entities, can be refreshed from implementation objects.
"""
def __init__(self, agent):
self.entities = []
self.implementations = {}
self.agent = agent
self.qd = self.agent.qd
self.schema = agent.schema
self.log = self.agent.log
def map_filter(self, function, test):
"""Filter with test then apply function."""
return map(function, ifilter(test, self.entities))
def map_type(self, function, type):
"""Apply function to all entities of type, if type is None do all entities"""
if type is None:
return map(function, self.entities)
else:
if not isinstance(type, EntityType): type = self.schema.entity_type(type)
return map(function, ifilter(lambda e: e.entity_type.is_a(type), self.entities))
def add(self, entity):
"""Add an entity to the agent"""
self.log(LOG_DEBUG, "Add entity: %s" % entity)
entity.validate() # Fill in defaults etc.
# Validate in the context of the existing entities for uniqueness
self.schema.validate_full(chain(iter([entity]), iter(self.entities)))
self.entities.append(entity)
def _add_implementation(self, implementation, adapter=None):
"""Create an adapter to wrap the implementation object and add it"""
cls = self.agent.entity_class(implementation.entity_type)
if not adapter:
adapter = cls(self.agent, implementation.entity_type, validate=False)
self.implementations[implementation.key] = adapter
adapter._add_implementation(implementation)
adapter._refresh()
self.add(adapter)
def add_implementation(self, implementation, adapter=None):
self._add_implementation(implementation, adapter=adapter)
def _remove(self, entity):
try:
self.entities.remove(entity)
self.log(LOG_DEBUG, "Remove %s entity: %s" %
(entity.entity_type.short_name, entity.attributes['identity']))
except ValueError: pass
def remove(self, entity):
self._remove(entity)
def _remove_implementation(self, key):
if key in self.implementations:
entity = self.implementations[key]
del self.implementations[key]
self._remove(entity)
def remove_implementation(self, key):
self._remove_implementation(key)
def refresh_from_c(self):
"""Refresh entities from the C dispatch runtime"""
REMOVE, ADD = 0, 1
def remove_redundant(events):
"""Remove redundant add/remove pairs of events."""
add = {} # add[pointer] = index of add event.
redundant = [] # List of redundant event indexes.
for i in xrange(len(events)):
action, type, pointer = events[i]
if action == ADD:
add[pointer] = i
elif pointer in add: # action == REMOVE and there's an ADD
redundant.append(add[pointer])
redundant.append(i)
del add[pointer]
for i in sorted(redundant, reverse=True):
events.pop(i)
# FIXME aconway 2014-10-23: locking is ugly, push it down into C code.
self.qd.qd_dispatch_router_lock(self.agent.dispatch)
try:
events = []
self.qd.qd_entity_refresh_begin(events)
remove_redundant(events)
for action, type, pointer in events:
if action == REMOVE:
self._remove_implementation(pointer)
elif action == ADD:
entity_type = self.schema.entity_type(type)
self._add_implementation(CImplementation(self.qd, entity_type, pointer))
# Refresh the entity values while the lock is still held.
for e in self.entities: e._refresh()
finally:
self.qd.qd_entity_refresh_end()
self.qd.qd_dispatch_router_unlock(self.agent.dispatch)
class ManagementEntity(EntityAdapter):
"""An entity representing the agent itself. It is a singleton created by the agent."""
def __init__(self, agent, entity_type, attributes, validate=True):
attributes = {"identity": "self", "name": "self"}
super(ManagementEntity, self).__init__(agent, entity_type, attributes, validate=validate)
self.__dict__["_schema"] = entity_type.schema
def requested_type(self, request):
type = request.properties.get('entityType')
if type: return self._schema.entity_type(type)
else: return None
def query(self, request):
"""Management node query operation"""
entity_type = self.requested_type(request)
if entity_type:
all_attrs = set(entity_type.attributes.keys())
else:
all_attrs = self._schema.all_attributes
names = set(request.body.get('attributeNames'))
if names:
unknown = names - all_attrs
if unknown:
if entity_type:
for_type = " for type %s" % entity_type.name
else:
for_type = ""
raise NotFoundStatus("Unknown attributes %s%s." % (list(unknown), for_type))
else:
names = all_attrs
results = []
def add_result(entity):
result = []
non_empty = False
for name in names:
result.append(entity.attributes.get(name))
if result[-1] is not None: non_empty = True
if non_empty: results.append(result)
self._agent.entities.map_type(add_result, entity_type)
return (OK, {'attributeNames': list(names), 'results': results})
def get_types(self, request):
type = self.requested_type(request)
return (OK, dict((t.name, [b.name for b in t.all_bases])
for t in self._schema.by_type(type)))
def get_annotations(self, request):
"""
We are not supporting any annotations at the moment.
"""
return (OK, {})
def get_operations(self, request):
type = self.requested_type(request)
return (OK, dict((t, et.operations)
for t, et in self._schema.entity_types.iteritems()
if not type or type.name == t))
def get_attributes(self, request):
type = self.requested_type(request)
return (OK, dict((t, [a for a in et.attributes])
for t, et in self._schema.entity_types.iteritems()
if not type or type.name == t))
def get_mgmt_nodes(self, request):
router = self._agent.entities.map_type(None, 'router')[0]
area = router.attributes['area']
def node_address(node):
return str(Address.topological(node.attributes['id'], "$management", area))
return (OK, self._agent.entities.map_type(node_address, 'router.node'))
def get_schema(self, request):
return (OK, self._schema.dump())
def _intprop(self, request, prop):
value = request.properties.get(prop)
if value is not None: value = int(value)
return value
def get_json_schema(self, request):
return (OK, json.dumps(self._schema.dump(), indent=self._intprop(request, "indent")))
def get_log(self, request):
logs = self._qd.qd_log_recent_py(self._intprop(request, "limit") or -1)
return (OK, logs)
def profile(self, request):
"""Start/stop the python profiler, returns profile results"""
profile = self.__dict__.get("_profile")
if "start" in request.properties:
if not profile:
profile = self.__dict__["_profile"] = Profile()
profile.enable()
self._log(LOG_INFO, "Started python profiler")
return (OK, None)
if not profile:
raise BadRequestStatus("Profiler not started")
if "stop" in request.properties:
profile.create_stats()
self._log(LOG_INFO, "Stopped python profiler")
out = StringIO()
stats = pstats.Stats(profile, stream=out)
try:
stop = request.properties["stop"]
if stop == "kgrind": # Generate kcachegrind output using pyprof2calltree
from pyprof2calltree import convert
convert(stats, out)
elif stop == "visualize": # Start kcachegrind using pyprof2calltree
from pyprof2calltree import visualize
visualize(stats)
else:
stats.print_stats() # Plain python profile stats
return (OK, out.getvalue())
finally:
out.close()
raise BadRequestStatus("Bad profile request %s" % (request))
class Agent(object):
"""AMQP managment agent. Manages entities, directs requests to the correct entity."""
def __init__(self, dispatch, qd):
self.qd = qd
self.dispatch = dispatch
self.schema = QdSchema()
self.entities = EntityCache(self)
self.request_lock = Lock()
self.log_adapter = LogAdapter("AGENT")
self.policy = PolicyManager(self)
self.management = self.create_entity({"type": "management"})
self.add_entity(self.management)
def log(self, level, text):
info = traceback.extract_stack(limit=2)[0] # Caller frame info
self.log_adapter.log(level, text, info[0], info[1])
def activate(self, address):
"""Register the management address to receive management requests"""
self.entities.refresh_from_c()
self.log(LOG_INFO, "Activating management agent on %s" % address)
self.io = IoAdapter(self.receive, address, 'L', '0', TREATMENT_ANYCAST_CLOSEST)
def entity_class(self, entity_type):
"""Return the class that implements entity_type"""
class_name = camelcase(entity_type.short_name, capital=True) + 'Entity'
entity_class = globals().get(class_name)
if not entity_class:
raise InternalServerErrorStatus(
"Can't find implementation '%s' for '%s'" % (class_name, entity_type.name))
return entity_class
def create_entity(self, attributes):
"""Create an instance of the implementation class for an entity"""
if attributes.get('identity') is not None:
raise BadRequestStatus("'identity' attribute cannot be specified %s" % attributes)
if attributes.get('type') is None:
raise BadRequestStatus("No 'type' attribute in %s" % attributes)
entity_type = self.schema.entity_type(attributes['type'])
return self.entity_class(entity_type)(self, entity_type, attributes)
def respond(self, request, status=OK, description=None, body=None):
"""Send a response to the client"""
if body is None: body = {}
description = description or STATUS_TEXT[status]
response = Message(
address=request.reply_to,
correlation_id=request.correlation_id,
properties={'statusCode': status, 'statusDescription': description},
body=body)
self.log(LOG_DEBUG, "Agent response:\n %s\n Responding to: \n %s"%(response, request))
try:
self.io.send(response)
except:
self.log(LOG_ERROR, "Can't respond to %s: %s"%(request, format_exc()))
def receive(self, request, unused_link_id, unused_cost):
"""Called when a management request is received."""
def error(e, trace):
"""Raise an error"""
self.log(LOG_ERROR, "Error performing %s: %s"%(request.properties.get('operation'), e.message))
self.respond(request, e.status, e.description)
# If there's no reply_to, don't bother to process the request.
if not request.reply_to:
return
# Coarse locking, handle one request at a time.
with self.request_lock:
try:
self.entities.refresh_from_c()
self.log(LOG_DEBUG, "Agent request %s"% request)
status, body = self.handle(request)
self.respond(request, status=status, body=body)
except ManagementError, e:
error(e, format_exc())
except ValidationError, e:
error(BadRequestStatus(str(e)), format_exc())
except Exception, e:
error(InternalServerErrorStatus("%s: %s"%(type(e).__name__, e)), format_exc())
def entity_type(self, type):
try: return self.schema.entity_type(type)
except ValidationError, e: raise NotFoundStatus(str(e))
def handle(self, request):
"""
Handle a request.
Dispatch management node requests to self, entity requests to the entity.
@return: (response-code, body)
"""
operation = required_property('operation', request)
if operation.lower() == 'create':
# Create requests are entity requests but must be handled by the agent since
# the entity does not yet exist.
return self.create(request)
else:
target = self.find_entity(request)
target.entity_type.allowed(operation, request.body)
try:
method = getattr(target, operation.lower().replace("-", "_"))
except AttributeError:
not_implemented(operation, target.type)
return method(request)
def _create(self, attributes):
"""Create an entity, called externally or from configuration file."""
entity = self.create_entity(attributes)
pointer = entity.create()
if pointer:
cimplementation = CImplementation(self.qd, entity.entity_type, pointer)
self.entities.add_implementation(cimplementation, entity)
else:
self.add_entity(entity)
return entity
def create(self, request):
"""
Create operation called from an external client.
Create is special: it is directed at an entity but the entity
does not yet exist so it is handled initially by the agent and
then delegated to the new entity.
"""
attributes = request.body
for a in ['type', 'name']:
prop = request.properties.get(a)
if prop:
old = attributes.setdefault(a, prop)
if old is not None and old != prop:
raise BadRequestStatus("Conflicting values for '%s'" % a)
attributes[a] = prop
if attributes.get('type') is None:
raise BadRequestStatus("No 'type' attribute in %s" % attributes)
et = self.schema.entity_type(attributes['type'])
et.allowed("CREATE", attributes)
et.create_check(attributes)
return (CREATED, self._create(attributes).attributes)
def configure(self, attributes):
"""Created via configuration file"""
self._create(attributes)
def add_entity(self, entity):
"""Add an entity adapter"""
self.entities.add(entity)
def remove(self, entity):
self.entities.remove(entity)
def add_implementation(self, implementation, entity_type_name):
"""Add an internal python implementation object, it will be wrapped with an entity adapter"""
self.entities.add_implementation(
PythonImplementation(self.entity_type(entity_type_name), implementation))
def remove_implementation(self, implementation):
"""Remove and internal python implementation object."""
self.entities.remove_implementation(id(implementation))
def find_entity(self, request):
"""Find the entity addressed by request"""
requested_type = request.properties.get('type')
if requested_type:
requested_type = self.schema.entity_type(requested_type)
# ids is a map of identifying attribute values
ids = dict((k, request.properties.get(k))
for k in ['name', 'identity'] if k in request.properties)
# Special case for management object: if no name/id and no conflicting type
# then assume this is for "self"
if not ids:
if not requested_type or self.management.entity_type.is_a(requested_type):
return self.management
else:
raise BadRequestStatus("%s: No name or identity provided" % requested_type)
def attrvals():
"""String form of the id attribute values for error messages"""
return " ".join(["%s=%r" % (k, v) for k, v in ids.iteritems()])
k, v = ids.iteritems().next() # Get the first id attribute
found = self.entities.map_filter(None, lambda e: e.attributes.get(k) == v)
if len(found) == 1:
entity = found[0]
elif len(found) > 1:
raise InternalServerErrorStatus(
"Duplicate (%s) entities with %s=%r" % (len(found), k, v))
else:
raise NotFoundStatus("No entity with %s" % attrvals())
for k, v in ids.iteritems():
if entity[k] != v: raise BadRequestStatus("Conflicting %s" % attrvals())
if requested_type:
if not entity.entity_type.is_a(requested_type):
raise BadRequestStatus("Entity type '%s' does not extend requested type '%s'" %
(entity.entity_type.name, requested_type))
return entity
def find_entity_by_type(self, type):
return self.entities.map_type(None, type)
| |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Swaminathan Vasudevan, Hewlett Packard.
import sys
from neutronclient.neutron.v2_0.vpn import ipsecpolicy
from neutronclient.tests.unit import test_cli20
class CLITestV20VpnIpsecPolicyJSON(test_cli20.CLITestV20Base):
def test_create_ipsecpolicy_all_params(self):
"""vpn-ipsecpolicy-create all params with dashes."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.CreateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
name = 'ipsecpolicy1'
description = 'first-ipsecpolicy1'
auth_algorithm = 'sha1'
encryption_algorithm = 'aes-256'
encapsulation_mode = 'tunnel'
pfs = 'group5'
transform_protocol = 'ah'
tenant_id = 'my-tenant'
my_id = 'my-id'
lifetime = 'units=seconds,value=20000'
args = [name,
'--description', description,
'--tenant-id', tenant_id,
'--auth-algorithm', auth_algorithm,
'--encryption-algorithm', encryption_algorithm,
'--transform-protocol', transform_protocol,
'--encapsulation-mode', encapsulation_mode,
'--lifetime', lifetime,
'--pfs', pfs]
position_names = ['name', 'auth_algorithm', 'encryption_algorithm',
'encapsulation_mode', 'description',
'transform_protocol', 'pfs',
'tenant_id']
position_values = [name, auth_algorithm, encryption_algorithm,
encapsulation_mode, description,
transform_protocol, pfs,
tenant_id]
extra_body = {
'lifetime': {
'units': 'seconds',
'value': 20000,
},
}
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values,
extra_body=extra_body)
def test_create_ipsecpolicy_with_limited_params(self):
"""vpn-ipsecpolicy-create with limited params."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.CreateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
name = 'ipsecpolicy1'
auth_algorithm = 'sha1'
encryption_algorithm = 'aes-128'
encapsulation_mode = 'tunnel'
pfs = 'group5'
transform_protocol = 'esp'
tenant_id = 'my-tenant'
my_id = 'my-id'
args = [name,
'--tenant-id', tenant_id]
position_names = ['name', 'auth_algorithm', 'encryption_algorithm',
'encapsulation_mode',
'transform_protocol', 'pfs',
'tenant_id']
position_values = [name, auth_algorithm, encryption_algorithm,
encapsulation_mode,
transform_protocol, pfs,
tenant_id]
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values)
def _test_lifetime_values(self, lifetime):
resource = 'ipsecpolicy'
cmd = ipsecpolicy.CreateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
name = 'ipsecpolicy1'
description = 'my-ipsec-policy'
auth_algorithm = 'sha1'
encryption_algorithm = 'aes-256'
ike_version = 'v1'
phase1_negotiation_mode = 'main'
pfs = 'group5'
tenant_id = 'my-tenant'
my_id = 'my-id'
args = [name,
'--description', description,
'--tenant-id', tenant_id,
'--auth-algorithm', auth_algorithm,
'--encryption-algorithm', encryption_algorithm,
'--ike-version', ike_version,
'--phase1-negotiation-mode', phase1_negotiation_mode,
'--lifetime', lifetime,
'--pfs', pfs]
position_names = ['name', 'description',
'auth_algorithm', 'encryption_algorithm',
'phase1_negotiation_mode',
'ike_version', 'pfs',
'tenant_id']
position_values = [name, description,
auth_algorithm, encryption_algorithm,
phase1_negotiation_mode, ike_version, pfs,
tenant_id]
try:
self._test_create_resource(resource, cmd, name, my_id, args,
position_names, position_values)
except Exception:
return
self.fail("IPsecPolicy Lifetime Error")
def test_create_ipsecpolicy_with_invalid_lifetime_keys(self):
lifetime = 'uts=seconds,val=20000'
self._test_lifetime_values(lifetime)
def test_create_ipsecpolicy_with_invalide_lifetime_values(self):
lifetime = 'units=minutes,value=0'
self._test_lifetime_values(lifetime)
def test_list_ipsecpolicy(self):
"""vpn-ipsecpolicy-list."""
resources = "ipsecpolicies"
cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, True)
def test_list_ipsecpolicy_pagination(self):
"""vpn-ipsecpolicy-list."""
resources = "ipsecpolicies"
cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources_with_pagination(resources, cmd)
def test_list_ipsecpolicy_sort(self):
"""vpn-ipsecpolicy-list --sort-key name --sort-key id --sort-key asc
--sort-key desc
"""
resources = "ipsecpolicies"
cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd,
sort_key=["name", "id"],
sort_dir=["asc", "desc"])
def test_list_ipsecpolicy_limit(self):
"""vpn-ipsecpolicy-list -P."""
resources = "ipsecpolicies"
cmd = ipsecpolicy.ListIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_list_resources(resources, cmd, page_size=1000)
def test_show_ipsecpolicy_id(self):
"""vpn-ipsecpolicy-show ipsecpolicy_id."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.ShowIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', self.test_id]
self._test_show_resource(resource, cmd, self.test_id, args, ['id'])
def test_show_ipsecpolicy_id_name(self):
"""vpn-ipsecpolicy-show."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.ShowIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
args = ['--fields', 'id', '--fields', 'name', self.test_id]
self._test_show_resource(resource, cmd, self.test_id,
args, ['id', 'name'])
def test_update_ipsecpolicy(self):
"""vpn-ipsecpolicy-update myid --name newname --tags a b."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.UpdateIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
self._test_update_resource(resource, cmd, 'myid',
['myid', '--name', 'newname'],
{'name': 'newname', })
def test_delete_ipsecpolicy(self):
"""vpn-ipsecpolicy-delete my-id."""
resource = 'ipsecpolicy'
cmd = ipsecpolicy.DeleteIPsecPolicy(test_cli20.MyApp(sys.stdout), None)
my_id = 'my-id'
args = [my_id]
self._test_delete_resource(resource, cmd, my_id, args)
class CLITestV20VpnIpsecPolicyXML(CLITestV20VpnIpsecPolicyJSON):
format = 'xml'
| |
# Author: Jeffrey Armstrong <jeff@approximatrix.com>
# April 4, 2011
import numpy as np
from numpy.testing import (assert_equal,
assert_array_almost_equal, assert_array_equal,
assert_allclose, assert_, assert_almost_equal,
suppress_warnings)
from pytest import raises as assert_raises
from scipy.signal import (dlsim, dstep, dimpulse, tf2zpk, lti, dlti,
StateSpace, TransferFunction, ZerosPolesGain,
dfreqresp, dbode, BadCoefficients)
class TestDLTI(object):
def test_dlsim(self):
a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
c = np.asarray([[0.1, 0.3]])
d = np.asarray([[0.0, -0.1, 0.0]])
dt = 0.5
# Create an input matrix with inputs down the columns (3 cols) and its
# respective time input vector
u = np.hstack((np.linspace(0, 4.0, num=5)[:, np.newaxis],
np.full((5, 1), 0.01),
np.full((5, 1), -0.002)))
t_in = np.linspace(0, 2.0, num=5)
# Define the known result
yout_truth = np.array([[-0.001,
-0.00073,
0.039446,
0.0915387,
0.13195948]]).T
xout_truth = np.asarray([[0, 0],
[0.0012, 0.0005],
[0.40233, 0.00071],
[1.163368, -0.079327],
[2.2402985, -0.3035679]])
tout, yout, xout = dlsim((a, b, c, d, dt), u, t_in)
assert_array_almost_equal(yout_truth, yout)
assert_array_almost_equal(xout_truth, xout)
assert_array_almost_equal(t_in, tout)
# Make sure input with single-dimension doesn't raise error
dlsim((1, 2, 3), 4)
# Interpolated control - inputs should have different time steps
# than the discrete model uses internally
u_sparse = u[[0, 4], :]
t_sparse = np.asarray([0.0, 2.0])
tout, yout, xout = dlsim((a, b, c, d, dt), u_sparse, t_sparse)
assert_array_almost_equal(yout_truth, yout)
assert_array_almost_equal(xout_truth, xout)
assert_equal(len(tout), yout.shape[0])
# Transfer functions (assume dt = 0.5)
num = np.asarray([1.0, -0.1])
den = np.asarray([0.3, 1.0, 0.2])
yout_truth = np.array([[0.0,
0.0,
3.33333333333333,
-4.77777777777778,
23.0370370370370]]).T
# Assume use of the first column of the control input built earlier
tout, yout = dlsim((num, den, 0.5), u[:, 0], t_in)
assert_array_almost_equal(yout, yout_truth)
assert_array_almost_equal(t_in, tout)
# Retest the same with a 1-D input vector
uflat = np.asarray(u[:, 0])
uflat = uflat.reshape((5,))
tout, yout = dlsim((num, den, 0.5), uflat, t_in)
assert_array_almost_equal(yout, yout_truth)
assert_array_almost_equal(t_in, tout)
# zeros-poles-gain representation
zd = np.array([0.5, -0.5])
pd = np.array([1.j / np.sqrt(2), -1.j / np.sqrt(2)])
k = 1.0
yout_truth = np.array([[0.0, 1.0, 2.0, 2.25, 2.5]]).T
tout, yout = dlsim((zd, pd, k, 0.5), u[:, 0], t_in)
assert_array_almost_equal(yout, yout_truth)
assert_array_almost_equal(t_in, tout)
# Raise an error for continuous-time systems
system = lti([1], [1, 1])
assert_raises(AttributeError, dlsim, system, u)
def test_dstep(self):
a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
c = np.asarray([[0.1, 0.3]])
d = np.asarray([[0.0, -0.1, 0.0]])
dt = 0.5
# Because b.shape[1] == 3, dstep should result in a tuple of three
# result vectors
yout_step_truth = (np.asarray([0.0, 0.04, 0.052, 0.0404, 0.00956,
-0.036324, -0.093318, -0.15782348,
-0.226628324, -0.2969374948]),
np.asarray([-0.1, -0.075, -0.058, -0.04815,
-0.04453, -0.0461895, -0.0521812,
-0.061588875, -0.073549579,
-0.08727047595]),
np.asarray([0.0, -0.01, -0.013, -0.0101, -0.00239,
0.009081, 0.0233295, 0.03945587,
0.056657081, 0.0742343737]))
tout, yout = dstep((a, b, c, d, dt), n=10)
assert_equal(len(yout), 3)
for i in range(0, len(yout)):
assert_equal(yout[i].shape[0], 10)
assert_array_almost_equal(yout[i].flatten(), yout_step_truth[i])
# Check that the other two inputs (tf, zpk) will work as well
tfin = ([1.0], [1.0, 1.0], 0.5)
yout_tfstep = np.asarray([0.0, 1.0, 0.0])
tout, yout = dstep(tfin, n=3)
assert_equal(len(yout), 1)
assert_array_almost_equal(yout[0].flatten(), yout_tfstep)
zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,)
tout, yout = dstep(zpkin, n=3)
assert_equal(len(yout), 1)
assert_array_almost_equal(yout[0].flatten(), yout_tfstep)
# Raise an error for continuous-time systems
system = lti([1], [1, 1])
assert_raises(AttributeError, dstep, system)
def test_dimpulse(self):
a = np.asarray([[0.9, 0.1], [-0.2, 0.9]])
b = np.asarray([[0.4, 0.1, -0.1], [0.0, 0.05, 0.0]])
c = np.asarray([[0.1, 0.3]])
d = np.asarray([[0.0, -0.1, 0.0]])
dt = 0.5
# Because b.shape[1] == 3, dimpulse should result in a tuple of three
# result vectors
yout_imp_truth = (np.asarray([0.0, 0.04, 0.012, -0.0116, -0.03084,
-0.045884, -0.056994, -0.06450548,
-0.068804844, -0.0703091708]),
np.asarray([-0.1, 0.025, 0.017, 0.00985, 0.00362,
-0.0016595, -0.0059917, -0.009407675,
-0.011960704, -0.01372089695]),
np.asarray([0.0, -0.01, -0.003, 0.0029, 0.00771,
0.011471, 0.0142485, 0.01612637,
0.017201211, 0.0175772927]))
tout, yout = dimpulse((a, b, c, d, dt), n=10)
assert_equal(len(yout), 3)
for i in range(0, len(yout)):
assert_equal(yout[i].shape[0], 10)
assert_array_almost_equal(yout[i].flatten(), yout_imp_truth[i])
# Check that the other two inputs (tf, zpk) will work as well
tfin = ([1.0], [1.0, 1.0], 0.5)
yout_tfimpulse = np.asarray([0.0, 1.0, -1.0])
tout, yout = dimpulse(tfin, n=3)
assert_equal(len(yout), 1)
assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse)
zpkin = tf2zpk(tfin[0], tfin[1]) + (0.5,)
tout, yout = dimpulse(zpkin, n=3)
assert_equal(len(yout), 1)
assert_array_almost_equal(yout[0].flatten(), yout_tfimpulse)
# Raise an error for continuous-time systems
system = lti([1], [1, 1])
assert_raises(AttributeError, dimpulse, system)
def test_dlsim_trivial(self):
a = np.array([[0.0]])
b = np.array([[0.0]])
c = np.array([[0.0]])
d = np.array([[0.0]])
n = 5
u = np.zeros(n).reshape(-1, 1)
tout, yout, xout = dlsim((a, b, c, d, 1), u)
assert_array_equal(tout, np.arange(float(n)))
assert_array_equal(yout, np.zeros((n, 1)))
assert_array_equal(xout, np.zeros((n, 1)))
def test_dlsim_simple1d(self):
a = np.array([[0.5]])
b = np.array([[0.0]])
c = np.array([[1.0]])
d = np.array([[0.0]])
n = 5
u = np.zeros(n).reshape(-1, 1)
tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1)
assert_array_equal(tout, np.arange(float(n)))
expected = (0.5 ** np.arange(float(n))).reshape(-1, 1)
assert_array_equal(yout, expected)
assert_array_equal(xout, expected)
def test_dlsim_simple2d(self):
lambda1 = 0.5
lambda2 = 0.25
a = np.array([[lambda1, 0.0],
[0.0, lambda2]])
b = np.array([[0.0],
[0.0]])
c = np.array([[1.0, 0.0],
[0.0, 1.0]])
d = np.array([[0.0],
[0.0]])
n = 5
u = np.zeros(n).reshape(-1, 1)
tout, yout, xout = dlsim((a, b, c, d, 1), u, x0=1)
assert_array_equal(tout, np.arange(float(n)))
# The analytical solution:
expected = (np.array([lambda1, lambda2]) **
np.arange(float(n)).reshape(-1, 1))
assert_array_equal(yout, expected)
assert_array_equal(xout, expected)
def test_more_step_and_impulse(self):
lambda1 = 0.5
lambda2 = 0.75
a = np.array([[lambda1, 0.0],
[0.0, lambda2]])
b = np.array([[1.0, 0.0],
[0.0, 1.0]])
c = np.array([[1.0, 1.0]])
d = np.array([[0.0, 0.0]])
n = 10
# Check a step response.
ts, ys = dstep((a, b, c, d, 1), n=n)
# Create the exact step response.
stp0 = (1.0 / (1 - lambda1)) * (1.0 - lambda1 ** np.arange(n))
stp1 = (1.0 / (1 - lambda2)) * (1.0 - lambda2 ** np.arange(n))
assert_allclose(ys[0][:, 0], stp0)
assert_allclose(ys[1][:, 0], stp1)
# Check an impulse response with an initial condition.
x0 = np.array([1.0, 1.0])
ti, yi = dimpulse((a, b, c, d, 1), n=n, x0=x0)
# Create the exact impulse response.
imp = (np.array([lambda1, lambda2]) **
np.arange(-1, n + 1).reshape(-1, 1))
imp[0, :] = 0.0
# Analytical solution to impulse response
y0 = imp[:n, 0] + np.dot(imp[1:n + 1, :], x0)
y1 = imp[:n, 1] + np.dot(imp[1:n + 1, :], x0)
assert_allclose(yi[0][:, 0], y0)
assert_allclose(yi[1][:, 0], y1)
# Check that dt=0.1, n=3 gives 3 time values.
system = ([1.0], [1.0, -0.5], 0.1)
t, (y,) = dstep(system, n=3)
assert_allclose(t, [0, 0.1, 0.2])
assert_array_equal(y.T, [[0, 1.0, 1.5]])
t, (y,) = dimpulse(system, n=3)
assert_allclose(t, [0, 0.1, 0.2])
assert_array_equal(y.T, [[0, 1, 0.5]])
class TestDlti(object):
def test_dlti_instantiation(self):
# Test that lti can be instantiated.
dt = 0.05
# TransferFunction
s = dlti([1], [-1], dt=dt)
assert_(isinstance(s, TransferFunction))
assert_(isinstance(s, dlti))
assert_(not isinstance(s, lti))
assert_equal(s.dt, dt)
# ZerosPolesGain
s = dlti(np.array([]), np.array([-1]), 1, dt=dt)
assert_(isinstance(s, ZerosPolesGain))
assert_(isinstance(s, dlti))
assert_(not isinstance(s, lti))
assert_equal(s.dt, dt)
# StateSpace
s = dlti([1], [-1], 1, 3, dt=dt)
assert_(isinstance(s, StateSpace))
assert_(isinstance(s, dlti))
assert_(not isinstance(s, lti))
assert_equal(s.dt, dt)
# Number of inputs
assert_raises(ValueError, dlti, 1)
assert_raises(ValueError, dlti, 1, 1, 1, 1, 1)
class TestStateSpaceDisc(object):
def test_initialization(self):
# Check that all initializations work
dt = 0.05
StateSpace(1, 1, 1, 1, dt=dt)
StateSpace([1], [2], [3], [4], dt=dt)
StateSpace(np.array([[1, 2], [3, 4]]), np.array([[1], [2]]),
np.array([[1, 0]]), np.array([[0]]), dt=dt)
StateSpace(1, 1, 1, 1, dt=True)
def test_conversion(self):
# Check the conversion functions
s = StateSpace(1, 2, 3, 4, dt=0.05)
assert_(isinstance(s.to_ss(), StateSpace))
assert_(isinstance(s.to_tf(), TransferFunction))
assert_(isinstance(s.to_zpk(), ZerosPolesGain))
# Make sure copies work
assert_(StateSpace(s) is not s)
assert_(s.to_ss() is not s)
def test_properties(self):
# Test setters/getters for cross class properties.
# This implicitly tests to_tf() and to_zpk()
# Getters
s = StateSpace(1, 1, 1, 1, dt=0.05)
assert_equal(s.poles, [1])
assert_equal(s.zeros, [0])
class TestTransferFunction(object):
def test_initialization(self):
# Check that all initializations work
dt = 0.05
TransferFunction(1, 1, dt=dt)
TransferFunction([1], [2], dt=dt)
TransferFunction(np.array([1]), np.array([2]), dt=dt)
TransferFunction(1, 1, dt=True)
def test_conversion(self):
# Check the conversion functions
s = TransferFunction([1, 0], [1, -1], dt=0.05)
assert_(isinstance(s.to_ss(), StateSpace))
assert_(isinstance(s.to_tf(), TransferFunction))
assert_(isinstance(s.to_zpk(), ZerosPolesGain))
# Make sure copies work
assert_(TransferFunction(s) is not s)
assert_(s.to_tf() is not s)
def test_properties(self):
# Test setters/getters for cross class properties.
# This implicitly tests to_ss() and to_zpk()
# Getters
s = TransferFunction([1, 0], [1, -1], dt=0.05)
assert_equal(s.poles, [1])
assert_equal(s.zeros, [0])
class TestZerosPolesGain(object):
def test_initialization(self):
# Check that all initializations work
dt = 0.05
ZerosPolesGain(1, 1, 1, dt=dt)
ZerosPolesGain([1], [2], 1, dt=dt)
ZerosPolesGain(np.array([1]), np.array([2]), 1, dt=dt)
ZerosPolesGain(1, 1, 1, dt=True)
def test_conversion(self):
# Check the conversion functions
s = ZerosPolesGain(1, 2, 3, dt=0.05)
assert_(isinstance(s.to_ss(), StateSpace))
assert_(isinstance(s.to_tf(), TransferFunction))
assert_(isinstance(s.to_zpk(), ZerosPolesGain))
# Make sure copies work
assert_(ZerosPolesGain(s) is not s)
assert_(s.to_zpk() is not s)
class Test_dfreqresp(object):
def test_manual(self):
# Test dfreqresp() real part calculation (manual sanity check).
# 1st order low-pass filter: H(z) = 1 / (z - 0.2),
system = TransferFunction(1, [1, -0.2], dt=0.1)
w = [0.1, 1, 10]
w, H = dfreqresp(system, w=w)
# test real
expected_re = [1.2383, 0.4130, -0.7553]
assert_almost_equal(H.real, expected_re, decimal=4)
# test imag
expected_im = [-0.1555, -1.0214, 0.3955]
assert_almost_equal(H.imag, expected_im, decimal=4)
def test_auto(self):
# Test dfreqresp() real part calculation.
# 1st order low-pass filter: H(z) = 1 / (z - 0.2),
system = TransferFunction(1, [1, -0.2], dt=0.1)
w = [0.1, 1, 10, 100]
w, H = dfreqresp(system, w=w)
jw = np.exp(w * 1j)
y = np.polyval(system.num, jw) / np.polyval(system.den, jw)
# test real
expected_re = y.real
assert_almost_equal(H.real, expected_re)
# test imag
expected_im = y.imag
assert_almost_equal(H.imag, expected_im)
def test_freq_range(self):
# Test that freqresp() finds a reasonable frequency range.
# 1st order low-pass filter: H(z) = 1 / (z - 0.2),
# Expected range is from 0.01 to 10.
system = TransferFunction(1, [1, -0.2], dt=0.1)
n = 10
expected_w = np.linspace(0, np.pi, 10, endpoint=False)
w, H = dfreqresp(system, n=n)
assert_almost_equal(w, expected_w)
def test_pole_one(self):
# Test that freqresp() doesn't fail on a system with a pole at 0.
# integrator, pole at zero: H(s) = 1 / s
system = TransferFunction([1], [1, -1], dt=0.1)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, message="divide by zero")
sup.filter(RuntimeWarning, message="invalid value encountered")
w, H = dfreqresp(system, n=2)
assert_equal(w[0], 0.) # a fail would give not-a-number
def test_error(self):
# Raise an error for continuous-time systems
system = lti([1], [1, 1])
assert_raises(AttributeError, dfreqresp, system)
def test_from_state_space(self):
# H(z) = 2 / z^3 - 0.5 * z^2
system_TF = dlti([2], [1, -0.5, 0, 0])
A = np.array([[0.5, 0, 0],
[1, 0, 0],
[0, 1, 0]])
B = np.array([[1, 0, 0]]).T
C = np.array([[0, 0, 2]])
D = 0
system_SS = dlti(A, B, C, D)
w = 10.0**np.arange(-3,0,.5)
with suppress_warnings() as sup:
sup.filter(BadCoefficients)
w1, H1 = dfreqresp(system_TF, w=w)
w2, H2 = dfreqresp(system_SS, w=w)
assert_almost_equal(H1, H2)
def test_from_zpk(self):
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
system_ZPK = dlti([],[0.2],0.3)
system_TF = dlti(0.3, [1, -0.2])
w = [0.1, 1, 10, 100]
w1, H1 = dfreqresp(system_ZPK, w=w)
w2, H2 = dfreqresp(system_TF, w=w)
assert_almost_equal(H1, H2)
class Test_bode(object):
def test_manual(self):
# Test bode() magnitude calculation (manual sanity check).
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
dt = 0.1
system = TransferFunction(0.3, [1, -0.2], dt=dt)
w = [0.1, 0.5, 1, np.pi]
w2, mag, phase = dbode(system, w=w)
# Test mag
expected_mag = [-8.5329, -8.8396, -9.6162, -12.0412]
assert_almost_equal(mag, expected_mag, decimal=4)
# Test phase
expected_phase = [-7.1575, -35.2814, -67.9809, -180.0000]
assert_almost_equal(phase, expected_phase, decimal=4)
# Test frequency
assert_equal(np.array(w) / dt, w2)
def test_auto(self):
# Test bode() magnitude calculation.
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
system = TransferFunction(0.3, [1, -0.2], dt=0.1)
w = np.array([0.1, 0.5, 1, np.pi])
w2, mag, phase = dbode(system, w=w)
jw = np.exp(w * 1j)
y = np.polyval(system.num, jw) / np.polyval(system.den, jw)
# Test mag
expected_mag = 20.0 * np.log10(abs(y))
assert_almost_equal(mag, expected_mag)
# Test phase
expected_phase = np.rad2deg(np.angle(y))
assert_almost_equal(phase, expected_phase)
def test_range(self):
# Test that bode() finds a reasonable frequency range.
# 1st order low-pass filter: H(s) = 0.3 / (z - 0.2),
dt = 0.1
system = TransferFunction(0.3, [1, -0.2], dt=0.1)
n = 10
# Expected range is from 0.01 to 10.
expected_w = np.linspace(0, np.pi, n, endpoint=False) / dt
w, mag, phase = dbode(system, n=n)
assert_almost_equal(w, expected_w)
def test_pole_one(self):
# Test that freqresp() doesn't fail on a system with a pole at 0.
# integrator, pole at zero: H(s) = 1 / s
system = TransferFunction([1], [1, -1], dt=0.1)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, message="divide by zero")
sup.filter(RuntimeWarning, message="invalid value encountered")
w, mag, phase = dbode(system, n=2)
assert_equal(w[0], 0.) # a fail would give not-a-number
def test_imaginary(self):
# bode() should not fail on a system with pure imaginary poles.
# The test passes if bode doesn't raise an exception.
system = TransferFunction([1], [1, 0, 100], dt=0.1)
dbode(system, n=2)
def test_error(self):
# Raise an error for continuous-time systems
system = lti([1], [1, 1])
assert_raises(AttributeError, dbode, system)
class TestTransferFunctionZConversion(object):
"""Test private conversions between 'z' and 'z**-1' polynomials."""
def test_full(self):
# Numerator and denominator same order
num = [2, 3, 4]
den = [5, 6, 7]
num2, den2 = TransferFunction._z_to_zinv(num, den)
assert_equal(num, num2)
assert_equal(den, den2)
num2, den2 = TransferFunction._zinv_to_z(num, den)
assert_equal(num, num2)
assert_equal(den, den2)
def test_numerator(self):
# Numerator lower order than denominator
num = [2, 3]
den = [5, 6, 7]
num2, den2 = TransferFunction._z_to_zinv(num, den)
assert_equal([0, 2, 3], num2)
assert_equal(den, den2)
num2, den2 = TransferFunction._zinv_to_z(num, den)
assert_equal([2, 3, 0], num2)
assert_equal(den, den2)
def test_denominator(self):
# Numerator higher order than denominator
num = [2, 3, 4]
den = [5, 6]
num2, den2 = TransferFunction._z_to_zinv(num, den)
assert_equal(num, num2)
assert_equal([0, 5, 6], den2)
num2, den2 = TransferFunction._zinv_to_z(num, den)
assert_equal(num, num2)
assert_equal([5, 6, 0], den2)
| |
from bson import ObjectId
import simplejson as json
from eve.tests import TestBase
from eve.tests.test_settings import MONGO_DBNAME
from eve.tests.utils import DummyEvent
from eve import STATUS_OK, LAST_UPDATED, ISSUES, STATUS, ETAG
from eve.methods.patch import patch_internal
class TestPatch(TestBase):
def test_patch_to_resource_endpoint(self):
_, status = self.patch(self.known_resource_url, data={})
self.assert405(status)
def test_readonly_resource(self):
_, status = self.patch(self.readonly_id_url, data={})
self.assert405(status)
def test_unknown_id(self):
_, status = self.patch(self.unknown_item_id_url,
data={"key1": 'value1'})
self.assert404(status)
def test_unknown_id_different_resource(self):
# patching a 'user' with a valid 'contact' id will 404
_, status = self.patch('%s/%s/' % (self.different_resource,
self.item_id),
data={"key1": "value1"})
self.assert404(status)
# of course we can still patch a 'user'
_, status = self.patch('%s/%s/' % (self.different_resource,
self.user_id),
data={'key1': '{"username": "username1"}'},
headers=[('If-Match', self.user_etag)])
self.assert200(status)
def test_by_name(self):
_, status = self.patch(self.item_name_url, data={'key1': 'value1'})
self.assert405(status)
def test_ifmatch_missing(self):
_, status = self.patch(self.item_id_url, data={'key1': 'value1'})
self.assert403(status)
def test_ifmatch_disabled(self):
self.app.config['IF_MATCH'] = False
r, status = self.patch(self.item_id_url, data={'key1': 'value1'})
self.assert200(status)
self.assertTrue(ETAG not in r)
def test_ifmatch_bad_etag(self):
_, status = self.patch(self.item_id_url,
data={'key1': 'value1'},
headers=[('If-Match', 'not-quite-right')])
self.assert412(status)
def test_unique_value(self):
# TODO
# for the time being we are happy with testing only Eve's custom
# validation. We rely on Cerberus' own test suite for other validation
# unit tests. This test also makes sure that response status is
# syntatically correcy in case of validation issues.
# We should probably test every single case as well (seems overkill).
r, status = self.patch(self.item_id_url,
data={"ref": "%s" % self.alt_ref},
headers=[('If-Match', self.item_etag)])
self.assertValidationErrorStatus(status)
self.assertValidationError(r, {'ref': "value '%s' is not unique" %
self.alt_ref})
def test_patch_string(self):
field = "ref"
test_value = "1234567890123456789012345"
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_integer(self):
field = "prog"
test_value = 9999
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_list_as_array(self):
field = "role"
test_value = ["vendor", "client"]
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertTrue(set(test_value).issubset(db_value))
def test_patch_rows(self):
field = "rows"
test_value = [
{'sku': 'AT1234', 'price': 99},
{'sku': 'XF9876', 'price': 9999}
]
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
for test_item in test_value:
self.assertTrue(test_item in db_value)
def test_patch_list(self):
field = "alist"
test_value = ["a_string", 99]
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_dict(self):
field = "location"
test_value = {'address': 'an address', 'city': 'a city'}
changes = {field: test_value}
original_city = []
def keep_original_city(resource_name, updates, original):
original_city.append(original['location']['city'])
self.app.on_update += keep_original_city
self.app.on_updated += keep_original_city
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
self.assertEqual(original_city[0], original_city[1])
def test_patch_datetime(self):
field = "born"
test_value = "Tue, 06 Nov 2012 10:33:31 GMT"
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_objectid(self):
field = "tid"
test_value = "4f71c129c88e2018d4000000"
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_null_objectid(self):
# verify that #341 is fixed.
field = "tid"
test_value = None
changes = {field: test_value}
r = self.perform_patch(changes)
db_value = self.compare_patch_with_get(field, r)
self.assertEqual(db_value, test_value)
def test_patch_defaults(self):
field = "ref"
test_value = "1234567890123456789012345"
changes = {field: test_value}
r = self.perform_patch(changes)
self.assertRaises(KeyError, self.compare_patch_with_get, 'title', r)
def test_patch_defaults_with_post_override(self):
field = "ref"
test_value = "1234567890123456789012345"
r = self.perform_patch_with_post_override(field, test_value)
self.assert200(r.status_code)
self.assertRaises(KeyError, self.compare_patch_with_get, 'title',
json.loads(r.get_data()))
def test_patch_multiple_fields(self):
fields = ['ref', 'prog', 'role']
test_values = ["9876543210987654321054321", 123, ["agent"]]
changes = {"ref": test_values[0], "prog": test_values[1],
"role": test_values[2]}
r = self.perform_patch(changes)
db_values = self.compare_patch_with_get(fields, r)
for i in range(len(db_values)):
self.assertEqual(db_values[i], test_values[i])
def test_patch_with_post_override(self):
# a POST request with PATCH override turns into a PATCH request
r = self.perform_patch_with_post_override('prog', 1)
self.assert200(r.status_code)
def test_patch_internal(self):
# test that patch_internal is available and working properly.
test_field = 'ref'
test_value = "9876543210987654321098765"
data = {test_field: test_value}
with self.app.test_request_context(self.item_id_url):
r, _, _, status = patch_internal(
self.known_resource, data, concurrency_check=False,
**{'_id': self.item_id})
db_value = self.compare_patch_with_get(test_field, r)
self.assertEqual(db_value, test_value)
self.assert200(status)
def test_patch_etag_header(self):
# test that Etag is always includer with response header. See #562.
changes = {"ref": "1234567890123456789012345"}
headers = [('Content-Type', 'application/json'),
('If-Match', self.item_etag)]
r = self.test_client.patch(self.item_id_url,
data=json.dumps(changes),
headers=headers)
self.assertTrue('Etag' in r.headers)
def test_patch_nested(self):
changes = {'location.city': 'a nested city',
'location.address': 'a nested address'}
r = self.perform_patch(changes)
values = self.compare_patch_with_get('location', r)
self.assertEqual(values['city'], 'a nested city')
self.assertEqual(values['address'], 'a nested address')
def perform_patch(self, changes):
r, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
self.assertPatchResponse(r, self.item_id)
return r
def perform_patch_with_post_override(self, field, value):
headers = [('X-HTTP-Method-Override', 'PATCH'),
('If-Match', self.item_etag),
('Content-Type', 'application/json')]
return self.test_client.post(self.item_id_url,
data=json.dumps({field: value}),
headers=headers)
def compare_patch_with_get(self, fields, patch_response):
raw_r = self.test_client.get(self.item_id_url)
r, status = self.parse_response(raw_r)
self.assert200(status)
self.assertEqual(raw_r.headers.get('ETag'),
patch_response[ETAG])
if isinstance(fields, str):
return r[fields]
else:
return [r[field] for field in fields]
def test_patch_allow_unknown(self):
changes = {"unknown": "unknown"}
r, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assertValidationErrorStatus(status)
self.assertValidationError(r, {'unknown': 'unknown field'})
self.app.config['DOMAIN'][self.known_resource]['allow_unknown'] = True
r, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
self.assertPatchResponse(r, self.item_id)
def test_patch_x_www_form_urlencoded(self):
field = "ref"
test_value = "1234567890123456789012345"
changes = {field: test_value}
headers = [('If-Match', self.item_etag)]
r, status = self.parse_response(self.test_client.patch(
self.item_id_url, data=changes, headers=headers))
self.assert200(status)
self.assertTrue('OK' in r[STATUS])
def test_patch_x_www_form_urlencoded_number_serialization(self):
del(self.domain['contacts']['schema']['ref']['required'])
field = 'anumber'
test_value = 3.5
changes = {field: test_value}
headers = [('If-Match', self.item_etag)]
r, status = self.parse_response(self.test_client.patch(
self.item_id_url, data=changes, headers=headers))
self.assert200(status)
self.assertTrue('OK' in r[STATUS])
def test_patch_referential_integrity(self):
data = {"person": self.unknown_item_id}
headers = [('If-Match', self.invoice_etag)]
r, status = self.patch(self.invoice_id_url, data=data, headers=headers)
self.assertValidationErrorStatus(status)
expected = ("value '%s' must exist in resource '%s', field '%s'" %
(self.unknown_item_id, 'contacts',
self.domain['contacts']['id_field']))
self.assertValidationError(r, {'person': expected})
data = {"person": self.item_id}
r, status = self.patch(self.invoice_id_url, data=data, headers=headers)
self.assert200(status)
self.assertPatchResponse(r, self.invoice_id)
def test_patch_write_concern_success(self):
# 0 and 1 are the only valid values for 'w' on our mongod instance (1
# is the default)
self.domain['contacts']['mongo_write_concern'] = {'w': 0}
field = "ref"
test_value = "X234567890123456789012345"
changes = {field: test_value}
_, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
def test_patch_write_concern_fail(self):
# should get a 500 since there's no replicaset on the mongod instance
self.domain['contacts']['mongo_write_concern'] = {'w': 2}
field = "ref"
test_value = "X234567890123456789012345"
changes = {field: test_value}
_, status = self.patch(self.item_id_url,
data=changes,
headers=[('If-Match', self.item_etag)])
self.assert500(status)
def test_patch_missing_standard_date_fields(self):
"""Documents created outside the API context could be lacking the
LAST_UPDATED and/or DATE_CREATED fields.
"""
# directly insert a document, without DATE_CREATED e LAST_UPDATED
# values.
contacts = self.random_contacts(1, False)
ref = 'test_update_field'
contacts[0]['ref'] = ref
_db = self.connection[MONGO_DBNAME]
_db.contacts.insert(contacts)
# now retrieve same document via API and get its etag, which is
# supposed to be computed on default DATE_CREATED and LAST_UPDATAED
# values.
response, status = self.get(self.known_resource, item=ref)
etag = response[ETAG]
_id = response['_id']
# attempt a PATCH with the new etag.
field = "ref"
test_value = "X234567890123456789012345"
changes = {field: test_value}
_, status = self.patch('%s/%s' % (self.known_resource_url, _id),
data=changes, headers=[('If-Match', etag)])
self.assert200(status)
def test_patch_subresource(self):
_db = self.connection[MONGO_DBNAME]
# create random contact
fake_contact = self.random_contacts(1)
fake_contact_id = _db.contacts.insert(fake_contact)[0]
# update first invoice to reference the new contact
_db.invoices.update({'_id': ObjectId(self.invoice_id)},
{'$set': {'person': fake_contact_id}})
# GET all invoices by new contact
response, status = self.get('users/%s/invoices/%s' %
(fake_contact_id, self.invoice_id))
etag = response[ETAG]
data = {"inv_number": "new_number"}
headers = [('If-Match', etag)]
response, status = self.patch('users/%s/invoices/%s' %
(fake_contact_id, self.invoice_id),
data=data, headers=headers)
self.assert200(status)
self.assertPatchResponse(response, self.invoice_id, 'peopleinvoices')
def test_patch_bandwidth_saver(self):
changes = {'ref': '1234567890123456789012345'}
# bandwidth_saver is on by default
self.assertTrue(self.app.config['BANDWIDTH_SAVER'])
r = self.perform_patch(changes)
self.assertFalse('ref' in r)
db_value = self.compare_patch_with_get(self.app.config['ETAG'], r)
self.assertEqual(db_value, r[self.app.config['ETAG']])
self.item_etag = r[self.app.config['ETAG']]
# test return all fields (bandwidth_saver off)
self.app.config['BANDWIDTH_SAVER'] = False
r = self.perform_patch(changes)
self.assertTrue('ref' in r)
db_value = self.compare_patch_with_get(self.app.config['ETAG'], r)
self.assertEqual(db_value, r[self.app.config['ETAG']])
def test_patch_readonly_field_with_previous_document(self):
schema = self.domain['contacts']['schema']
del(schema['ref']['required'])
# disable read-only on the field so we can store a value which is
# also different form its default value.
schema['read_only_field']['readonly'] = False
changes = {'read_only_field': 'value'}
r = self.perform_patch(changes)
# resume read-only status for the field
self.domain['contacts']['schema']['read_only_field']['readonly'] = True
# test that if the read-only field is included with the payload and its
# value is equal to the one stored with the document, validation
# succeeds (#479).
etag = r['_etag']
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', etag)])
self.assert200(status)
self.assertPatchResponse(r, self.item_id)
# test that if the read-only field is included with the payload and its
# value is different from the stored document, validation fails.
etag = r['_etag']
changes = {'read_only_field': 'another value'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', etag)])
self.assert422(status)
self.assertTrue('is read-only' in r['_issues']['read_only_field'])
def test_patch_nested_document_not_overwritten(self):
""" Test that nested documents are not overwritten on PATCH and #519
is fixed.
"""
schema = {
'sensor': {
"type": "dict",
"schema": {
"name": {"type": "string"},
"lon": {"type": "float"},
"lat": {"type": "float"},
"value": {"type": "float", "default": 10.3},
"dict": {
'type': 'dict',
'schema': {
'string': {'type': 'string'},
'int': {'type': 'integer'},
}
}
}
},
'test': {
'type': 'string',
'readonly': True,
'default': 'default'
}
}
self.app.config['BANDWIDTH_SAVER'] = False
self.app.register_resource('sensors', {'schema': schema})
changes = {
'sensor': {
'name': 'device_name',
'lon': 43.4,
'lat': 1.31,
'dict': {'int': 99}
}
}
r, status = self.post("sensors", data=changes)
self.assert201(status)
id, etag, value, test, int = (
r[self.domain['sensors']['id_field']],
r[ETAG],
r['sensor']['value'],
r['test'],
r['sensor']['dict']['int']
)
changes = {
'sensor': {
'lon': 10.0,
'dict': {'string': 'hi'}
}
}
r, status = self.patch(
"/%s/%s" % ('sensors', id),
data=changes,
headers=[('If-Match', etag)]
)
self.assert200(status)
etag, value, int = (
r[ETAG],
r['sensor']['value'],
r['sensor']['dict']['int']
)
self.assertEqual(value, 10.3)
self.assertEqual(test, 'default')
self.assertEqual(int, 99)
def test_patch_nested_document_nullable_missing(self):
schema = {
'sensor': {
'type': 'dict',
'schema': {
'name': {'type': 'string'},
},
'default': None,
},
'other': {
'type': 'dict',
'schema': {
'name': {'type': 'string'},
},
}
}
self.app.config['BANDWIDTH_SAVER'] = False
self.app.register_resource('sensors', {'schema': schema})
changes = {}
r, status = self.post("sensors", data=changes)
self.assert201(status)
id, etag = r[self.domain['sensors']['id_field']], r[ETAG]
self.assertTrue('sensor' in r)
self.assertEqual(r['sensor'], None)
self.assertFalse('other' in r)
changes = {
'sensor': {'name': 'device_name'},
'other': {'name': 'other_name'},
}
r, status = self.patch(
"/%s/%s" % ('sensors', id),
data=changes,
headers=[('If-Match', etag)]
)
self.assert200(status)
self.assertEqual(r['sensor'], {'name': 'device_name'})
self.assertEqual(r['other'], {'name': 'other_name'})
def test_patch_dependent_field_on_origin_document(self):
""" Test that when patching a field which is dependent on another and
this other field is not provided with the patch but is still present
on the target document, the patch will be accepted. See #363.
"""
# this will fail as dependent field is missing even in the
# document we are trying to update.
del(self.domain['contacts']['schema']['dependency_field1']['default'])
del(self.domain['contacts']['defaults']['dependency_field1'])
changes = {'dependency_field2': 'value'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', self.item_etag)])
self.assert422(status)
# update the stored document by adding dependency field.
changes = {'dependency_field1': 'value'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
# now the field2 update will be accepted as the dependency field is
# present in the stored document already.
etag = r['_etag']
changes = {'dependency_field2': 'value'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', etag)])
self.assert200(status)
def test_patch_dependent_field_value_on_origin_document(self):
""" Test that when patching a field which is dependent on another and
this other field is not provided with the patch but is still present
on the target document, the patch will be accepted. See #363.
"""
# this will fail as dependent field is missing even in the
# document we are trying to update.
changes = {'dependency_field3': 'value'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', self.item_etag)])
self.assert422(status)
# update the stored document by setting the dependency field to
# the required value.
changes = {'dependency_field1': 'value'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
# now the field2 update will be accepted as the dependency field is
# present in the stored document already.
etag = r['_etag']
changes = {'dependency_field3': 'value'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', etag)])
self.assert200(status)
def test_id_field_in_document_fails(self):
# since v0.6 we also allow the id field to be included with the POSTed
# document, but not with PATCH since it is immutable
self.app.config['IF_MATCH'] = False
id_field = self.domain[self.known_resource]['id_field']
data = {id_field: '55b2340538345bd048100ffe'}
r, status = self.patch(self.item_id_url, data=data)
self.assert400(status)
self.assertTrue('immutable' in r['_error']['message'])
def test_patch_custom_idfield(self):
response, status = self.get('products?max_results=1')
product = response['_items'][0]
headers = [('If-Match', product[ETAG])]
data = {'title': 'Awesome product'}
r, status = self.patch('products/%s' % product['sku'], data=data,
headers=headers)
self.assert200(status)
def test_patch_type_coercion(self):
schema = self.domain[self.known_resource]['schema']
schema['aninteger']['coerce'] = lambda string: int(float(string))
changes = {'ref': '1234567890123456789054321', 'aninteger': '42.3'}
r, status = self.patch(self.item_id_url, data=changes,
headers=[('If-Match', self.item_etag)])
self.assert200(status)
r, status = self.get(r['_links']['self']['href'])
self.assertEqual(r['aninteger'], 42)
def assertPatchResponse(self, response, item_id, resource=None):
id_field = self.domain[resource or self.known_resource]['id_field']
self.assertTrue(STATUS in response)
self.assertTrue(STATUS_OK in response[STATUS])
self.assertFalse(ISSUES in response)
self.assertTrue(id_field in response)
self.assertEqual(response[id_field], item_id)
self.assertTrue(LAST_UPDATED in response)
self.assertTrue(ETAG in response)
self.assertTrue('_links' in response)
self.assertItemLink(response['_links'], item_id)
def patch(self, url, data, headers=[]):
headers.append(('Content-Type', 'application/json'))
r = self.test_client.patch(url,
data=json.dumps(data),
headers=headers)
return self.parse_response(r)
class TestEvents(TestBase):
new_ref = "0123456789012345678901234"
def test_on_pre_PATCH(self):
devent = DummyEvent(self.before_update)
self.app.on_pre_PATCH += devent
self.patch()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(3, len(devent.called))
def test_on_pre_PATCH_contacts(self):
devent = DummyEvent(self.before_update)
self.app.on_pre_PATCH_contacts += devent
self.patch()
self.assertEqual(2, len(devent.called))
def test_on_PATCH_dynamic_filter(self):
def filter_this(resource, request, lookup):
lookup["_id"] = self.unknown_item_id
self.app.on_pre_PATCH += filter_this
# Would normally patch the known document; will return 404 instead.
r, s = self.parse_response(self.patch())
self.assert404(s)
def test_on_post_PATCH(self):
devent = DummyEvent(self.after_update)
self.app.on_post_PATCH += devent
self.patch()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(200, devent.called[2].status_code)
self.assertEqual(3, len(devent.called))
def test_on_post_PATCH_contacts(self):
devent = DummyEvent(self.after_update)
self.app.on_post_PATCH_contacts += devent
self.patch()
self.assertEqual(200, devent.called[1].status_code)
self.assertEqual(2, len(devent.called))
def test_on_update(self):
devent = DummyEvent(self.before_update)
self.app.on_update += devent
self.patch()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(3, len(devent.called))
def test_on_update_contacts(self):
devent = DummyEvent(self.before_update)
self.app.on_update_contacts += devent
self.patch()
self.assertEqual(2, len(devent.called))
def test_on_updated(self):
devent = DummyEvent(self.after_update)
self.app.on_updated += devent
self.patch()
self.assertEqual(self.known_resource, devent.called[0])
self.assertEqual(3, len(devent.called))
def test_on_updated_contacts(self):
devent = DummyEvent(self.after_update)
self.app.on_updated_contacts += devent
self.patch()
self.assertEqual(2, len(devent.called))
def before_update(self):
db = self.connection[MONGO_DBNAME]
contact = db.contacts.find_one(ObjectId(self.item_id))
return contact['ref'] == self.item_name
def after_update(self):
return not self.before_update()
def patch(self):
headers = [('Content-Type', 'application/json'),
('If-Match', self.item_etag)]
data = json.dumps({"ref": self.new_ref})
return self.test_client.patch(
self.item_id_url, data=data, headers=headers)
| |
# -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import datetime
import logging
import os
import random
import re
import signal
import subprocess
import sys
import tempfile
import time
import luigi
import luigi.format
import luigi.hdfs
from luigi import configuration
logger = logging.getLogger('luigi-interface')
"""
Apache Spark on YARN support
Example configuration section in client.cfg:
[spark]
# assembly jar containing spark and dependencies
spark-jar: /usr/share/spark/jars/spark-assembly-0.8.1-incubating-hadoop2.2.0.jar
# spark script to invoke
spark-class: /usr/share/spark/spark-class
# directory containing the (client side) configuration files for the hadoop cluster
hadoop-conf-dir: /etc/hadoop/conf
"""
class SparkRunContext(object):
def __init__(self):
self.app_id = None
def __enter__(self):
self.__old_signal = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGTERM, self.kill_job)
return self
def kill_job(self, captured_signal=None, stack_frame=None):
if self.app_id:
done = False
while not done:
try:
logger.info('Job interrupted, killing application %s', self.app_id)
subprocess.call(['yarn', 'application', '-kill', self.app_id])
done = True
except KeyboardInterrupt:
continue
if captured_signal is not None:
# adding 128 gives the exit code corresponding to a signal
sys.exit(128 + captured_signal)
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is KeyboardInterrupt:
self.kill_job()
signal.signal(signal.SIGTERM, self.__old_signal)
class SparkJobError(RuntimeError):
def __init__(self, message, out=None, err=None):
super(SparkJobError, self).__init__(message, out, err)
self.message = message
self.out = out
self.err = err
def __str__(self):
info = self.message
if self.out:
info += "\nSTDOUT: " + str(self.out)
if self.err:
info += "\nSTDERR: " + str(self.err)
return info
class SparkJob(luigi.Task):
spark_workers = None
spark_master_memory = None
spark_worker_memory = None
queue = luigi.Parameter(is_global=True, default=None, significant=False)
temp_hadoop_output_file = None
def requires_local(self):
"""
Default impl - override this method if you need any local input to be accessible in init().
"""
return []
def requires_hadoop(self):
return self.requires() # default impl
def input_local(self):
return luigi.task.getpaths(self.requires_local())
def input(self):
return luigi.task.getpaths(self.requires())
def deps(self):
# Overrides the default implementation
return luigi.task.flatten(self.requires_hadoop()) + luigi.task.flatten(self.requires_local())
def jar(self):
raise NotImplementedError("subclass should define jar containing job_class")
def job_class(self):
raise NotImplementedError("subclass should define Spark job_class")
def job_args(self):
return []
def output(self):
raise NotImplementedError("subclass should define HDFS output path")
def run(self):
original_output_path = self.output().path
path_no_slash = original_output_path[:-2] if original_output_path.endswith('/*') else original_output_path
path_no_slash = original_output_path[:-1] if original_output_path[-1] == '/' else path_no_slash
tmp_output = luigi.hdfs.HdfsTarget(path_no_slash + '-luigi-tmp-%09d' % random.randrange(0, 1e10))
args = ['org.apache.spark.deploy.yarn.Client']
args += ['--jar', self.jar()]
args += ['--class', self.job_class()]
for a in self.job_args():
if a == self.output().path:
# pass temporary output path to job args
logger.info('Using temp path: %s for path %s', tmp_output.path, original_output_path)
args += ['--args', tmp_output.path]
else:
args += ['--args', str(a)]
if self.spark_workers is not None:
args += ['--num-workers', self.spark_workers]
if self.spark_master_memory is not None:
args += ['--master-memory', self.spark_master_memory]
if self.spark_worker_memory is not None:
args += ['--worker-memory', self.spark_worker_memory]
queue = self.queue
if queue is not None:
args += ['--queue', queue]
env = os.environ.copy()
env['SPARK_JAR'] = configuration.get_config().get('spark', 'spark-jar')
env['HADOOP_CONF_DIR'] = configuration.get_config().get('spark', 'hadoop-conf-dir')
env['MASTER'] = 'yarn-client'
spark_class = configuration.get_config().get('spark', 'spark-class')
temp_stderr = tempfile.TemporaryFile()
logger.info('Running: %s %s', spark_class, ' '.join(args))
proc = subprocess.Popen([spark_class] + args, stdout=subprocess.PIPE,
stderr=temp_stderr, env=env, close_fds=True)
return_code, final_state, app_id = self.track_progress(proc)
if return_code == 0 and final_state != 'FAILED':
tmp_output.move(path_no_slash)
elif final_state == 'FAILED':
raise SparkJobError('Spark job failed: see yarn logs for %s' % app_id)
else:
temp_stderr.seek(0)
errors = "".join((x.decode('utf8') for x in temp_stderr.readlines()))
logger.error(errors)
raise SparkJobError('Spark job failed', err=errors)
def track_progress(self, proc):
# The Spark client currently outputs a multiline status to stdout every second
# while the application is running. This instead captures status data and updates
# a single line of output until the application finishes.
app_id = None
app_status = 'N/A'
url = 'N/A'
final_state = None
start = time.time()
with SparkRunContext() as context:
while proc.poll() is None:
s = proc.stdout.readline()
app_id_s = re.compile('application identifier: (\w+)').search(s)
if app_id_s:
app_id = app_id_s.group(1)
context.app_id = app_id
app_status_s = re.compile('yarnAppState: (\w+)').search(s)
if app_status_s:
app_status = app_status_s.group(1)
url_s = re.compile('appTrackingUrl: (.+)').search(s)
if url_s:
url = url_s.group(1)
final_state_s = re.compile('distributedFinalState: (\w+)').search(s)
if final_state_s:
final_state = final_state_s.group(1)
if not app_id:
logger.info(s.strip())
else:
elapsed_mins, elapsed_secs = divmod(datetime.timedelta(seconds=time.time() - start).seconds, 60)
status = '[%0d:%02d] Status: %s Tracking: %s' % (elapsed_mins, elapsed_secs, app_status, url)
sys.stdout.write("\r\x1b[K" + status)
sys.stdout.flush()
logger.info(proc.communicate()[0])
return proc.returncode, final_state, app_id
class Spark1xJob(luigi.Task):
num_executors = None
driver_memory = None
executor_memory = None
executor_cores = None
deploy_mode = None
queue = None
spark_master = configuration.get_config().get("spark", "spark-master", "yarn-client")
def jar(self):
raise NotImplementedError("subclass should define jar "
"containing job_class")
def dependency_jars(self):
"""
Override to provide a list of dependency jars.
"""
return []
def job_class(self):
raise NotImplementedError("subclass should define Spark job_class")
def spark_options(self):
return []
def job_args(self):
return []
def output(self):
raise NotImplementedError("subclass should define HDFS output path")
def spark_heartbeat(self, line, spark_run_context):
pass
def run(self):
spark_submit = configuration.get_config().get('spark', 'spark-submit',
'spark-submit')
options = [
'--class', self.job_class(),
]
if self.num_executors is not None:
options += ['--num-executors', self.num_executors]
if self.driver_memory is not None:
options += ['--driver-memory', self.driver_memory]
if self.executor_memory is not None:
options += ['--executor-memory', self.executor_memory]
if self.executor_cores is not None:
options += ['--executor-cores', self.executor_cores]
if self.deploy_mode is not None:
options += ['--deploy-mode', self.deploy_mode]
if self.queue is not None:
options += ['--queue', self.queue]
if self.spark_master is not None:
options += ['--master', self.spark_master]
dependency_jars = self.dependency_jars()
if dependency_jars != []:
options += ['--jars', ','.join(dependency_jars)]
args = [spark_submit] + options + self.spark_options() + \
[self.jar()] + list(self.job_args())
args = map(str, args)
env = os.environ.copy()
temp_stderr = tempfile.TemporaryFile()
logger.info('Running: %s', repr(args))
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=temp_stderr, env=env, close_fds=True)
return_code, final_state, app_id = self.track_progress(proc)
if final_state == 'FAILED':
raise SparkJobError('Spark job failed: see yarn logs for {0}'
.format(app_id))
elif return_code != 0:
temp_stderr.seek(0)
errors = "".join((x.decode('utf8') for x in temp_stderr.readlines()))
logger.error(errors)
raise SparkJobError('Spark job failed', err=errors)
def track_progress(self, proc):
"""
The Spark client currently outputs a multiline status to stdout every second while the application is running.
This instead captures status data and updates a single line of output until the application finishes.
"""
app_id = None
app_status = 'N/A'
url = 'N/A'
final_state = None
start = time.time()
re_app_id = re.compile('application identifier: (\w+)')
re_app_status = re.compile('yarnAppState: (\w+)')
re_url = re.compile('appTrackingUrl: (.+)')
re_final_state = re.compile('distributedFinalState: (\w+)')
with SparkRunContext() as context:
while proc.poll() is None:
s = proc.stdout.readline()
app_id_s = re_app_id.search(s)
if app_id_s:
app_id = app_id_s.group(1)
context.app_id = app_id
app_status_s = re_app_status.search(s)
if app_status_s:
app_status = app_status_s.group(1)
url_s = re_url.search(s)
if url_s:
url = url_s.group(1)
final_state_s = re_final_state.search(s)
if final_state_s:
final_state = final_state_s.group(1)
if not app_id:
logger.info(s.strip())
else:
t_diff = time.time() - start
elapsed_mins, elapsed_secs = divmod(t_diff, 60)
status = ('[%0d:%02d] Status: %s Tracking: %s' %
(elapsed_mins, elapsed_secs, app_status, url))
sys.stdout.write("\r\x1b[K" + status)
sys.stdout.flush()
self.spark_heartbeat(s, context)
logger.info(proc.communicate()[0])
return proc.returncode, final_state, app_id
class PySpark1xJob(Spark1xJob):
num_executors = None
driver_memory = None
executor_memory = None
executor_cores = None
def program(self):
raise NotImplementedError("subclass should define Spark .py file")
def py_files(self):
"""
Override to provide a list of py files.
"""
return []
def run(self):
spark_submit = configuration.get_config().get('spark', 'spark-submit',
'spark-submit')
options = ['--master', self.spark_master]
if self.num_executors is not None:
options += ['--num-executors', self.num_executors]
if self.driver_memory is not None:
options += ['--driver-memory', self.driver_memory]
if self.executor_memory is not None:
options += ['--executor-memory', self.executor_memory]
if self.executor_cores is not None:
options += ['--executor-cores', self.executor_cores]
py_files = self.py_files()
if py_files != []:
options += ['--py-files', ','.join(py_files)]
dependency_jars = self.dependency_jars()
if dependency_jars != []:
options += ['--jars', ','.join(dependency_jars)]
args = [spark_submit] + options + self.spark_options() + \
[self.program()] + list(self.job_args())
args = map(str, args)
env = os.environ.copy()
temp_stderr = tempfile.TemporaryFile()
logger.info('Running: %s', repr(args))
proc = subprocess.Popen(args, stdout=subprocess.PIPE,
stderr=temp_stderr, env=env, close_fds=True)
return_code, final_state, app_id = self.track_progress(proc)
if final_state == 'FAILED':
raise SparkJobError('Spark job failed: see yarn logs for %s', app_id)
elif return_code != 0:
temp_stderr.seek(0)
errors = "".join((x.decode('utf8') for x in temp_stderr.readlines()))
logger.error(errors)
raise SparkJobError('Spark job failed', err=errors)
| |
from numpy import asarray, nonzero, sort, unique
from pandas import DataFrame
from .make_colorscale import make_colorscale
from .nd_array.nd_array.check_nd_array_for_bad import check_nd_array_for_bad
from .nd_array.nd_array.cluster_2d_array_slices import cluster_2d_array_slices
from .nd_array.nd_array.normalize_nd_array import normalize_nd_array
from .plot_and_save import plot_and_save
from .style import CATEGORICAL_COLORS
def plot_heat_map(
z,
x=None,
y=None,
normalization_axis=None,
normalization_method=None,
column_annotation=None,
column_annotation_str=None,
column_annotation_colors=None,
column_annotation_kwargs=None,
row_annotation=None,
row_annotation_str=None,
row_annotation_colors=None,
row_annotation_kwargs=None,
sort_axis=None,
cluster_axis=None,
cluster_distance_function="euclidean",
cluster_linkage_method="ward",
colorscale=None,
colormap="bwr",
zmin=None,
zmax=None,
showscale=None,
colorbar_x=None,
layout_width=800,
layout_height=800,
heat_map_axis_domain=(0, 0.9),
annotation_axis_domain=(0.92, 1),
title=None,
xaxis_title=None,
yaxis_title=None,
show_x_tick=True,
show_y_tick=True,
html_file_path=None,
plotly_html_file_path=None,
):
heat_map_axis_template = dict(
domain=heat_map_axis_domain, showgrid=False, zeroline=False
)
annotation_axis_template = dict(
showgrid=False, zeroline=False, ticks="", showticklabels=False
)
if xaxis_title is not None:
xaxis_title = "{} ({})".format(xaxis_title, z.shape[1])
if yaxis_title is not None:
yaxis_title = "{} ({})".format(yaxis_title, z.shape[0])
if show_x_tick is False:
x_ticks = ""
else:
x_ticks = None
if show_y_tick is False:
y_ticks = ""
else:
y_ticks = None
layout = dict(
width=layout_width,
height=layout_height,
title=title,
xaxis=dict(
title=xaxis_title,
ticks=x_ticks,
showticklabels=show_x_tick,
**heat_map_axis_template,
),
xaxis2=dict(domain=annotation_axis_domain, **annotation_axis_template),
yaxis=dict(
title=yaxis_title,
ticks=y_ticks,
showticklabels=show_y_tick,
**heat_map_axis_template,
),
yaxis2=dict(domain=annotation_axis_domain, **annotation_axis_template),
)
if isinstance(z, DataFrame):
x = z.columns
y = z.index
z = z.values
if x is not None:
x = asarray(x)
if y is not None:
y = asarray(y)
if normalization_method:
z = normalize_nd_array(
z, normalization_axis, normalization_method, raise_for_bad=False
)
column_indices = None
row_indices = None
if column_annotation is not None or row_annotation is not None:
if column_annotation is not None:
column_indices = asarray(column_annotation).argsort()
column_annotation = column_annotation[column_indices]
if row_annotation is not None:
row_indices = asarray(row_annotation).argsort()
row_annotation = row_annotation[row_indices]
row_annotation = row_annotation[::-1]
elif sort_axis in (0, 1):
z = sort(z, axis=sort_axis)
if sort_axis == 0:
y = None
elif sort_axis == 1:
x = None
elif cluster_axis is not None:
if not check_nd_array_for_bad(z, raise_for_bad=False).any():
if cluster_axis == "01" or cluster_axis == 0:
row_indices = cluster_2d_array_slices(
z,
0,
distance_function=cluster_distance_function,
linkage_method=cluster_linkage_method,
)
if cluster_axis == "01" or cluster_axis == 1:
column_indices = cluster_2d_array_slices(
z,
1,
distance_function=cluster_distance_function,
linkage_method=cluster_linkage_method,
)
if column_indices is not None:
z = z[:, column_indices]
if x is not None:
x = x[column_indices]
if row_indices is not None:
z = z[row_indices]
if y is not None:
y = y[row_indices]
z = z[::-1]
if y is not None:
y = y[::-1]
if colorscale == "CATEGORICAL_COLORS":
colorscale = make_colorscale(
colors=CATEGORICAL_COLORS[: unique(z).size], plot=False
)
elif colorscale is None and colormap is not None:
colorscale = make_colorscale(colormap=colormap, plot=False)
colorbar_template = dict(len=0.8, thickness=layout_width / 80)
if column_annotation is not None or row_annotation is not None:
colorbar_template["y"] = (heat_map_axis_domain[1] - heat_map_axis_domain[0]) / 2
data = [
dict(
type="heatmap",
z=z,
x=x,
y=y,
colorscale=colorscale,
zmin=zmin,
zmax=zmax,
showscale=showscale,
colorbar=dict(x=colorbar_x, **colorbar_template),
)
]
if column_annotation is not None or row_annotation is not None:
layout["annotations"] = []
annotation_kwargs = dict(showarrow=False, borderpad=0)
if column_annotation is not None:
if column_annotation_colors is None:
column_annotation_colors = CATEGORICAL_COLORS[
: len(set(column_annotation))
]
data.append(
dict(
yaxis="y2",
type="heatmap",
z=tuple((i,) for i in column_annotation),
transpose=True,
colorscale=make_colorscale(
colors=column_annotation_colors, plot=False
),
showscale=False,
hoverinfo="x+z",
)
)
if column_annotation_str is not None:
if column_annotation_kwargs is None:
column_annotation_kwargs = dict(textangle=-90)
for a in unique(column_annotation):
indices = nonzero(column_annotation == a)[0]
index_0 = indices[0]
layout["annotations"].append(
dict(
yref="y2",
x=index_0 + (indices[-1] - index_0) / 2,
y=0,
text="<b>{}</b>".format(column_annotation_str[a]),
**annotation_kwargs,
**column_annotation_kwargs,
)
)
if row_annotation is not None:
if row_annotation_colors is None:
row_annotation_colors = CATEGORICAL_COLORS[: len(set(row_annotation))]
data.append(
dict(
xaxis="x2",
type="heatmap",
z=tuple((i,) for i in row_annotation),
colorscale=make_colorscale(
colors=row_annotation_colors, plot=False
),
showscale=False,
hoverinfo="y+z",
)
)
if row_annotation_str is not None:
if row_annotation_kwargs is None:
row_annotation_kwargs = dict()
for a in unique(row_annotation):
indices = nonzero(row_annotation == a)[0]
index_0 = indices[0]
layout["annotations"].append(
dict(
xref="x2",
x=0,
y=index_0 + (indices[-1] - index_0) / 2,
text="<b>{}</b>".format(row_annotation_str[a]),
**annotation_kwargs,
**row_annotation_kwargs,
)
)
plot_and_save(dict(layout=layout, data=data), html_file_path, plotly_html_file_path)
| |
# -*- coding:utf-8 -*-
#
# Copyright 2019, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from datetime import timedelta
from typing import *
from couchbase.management.generic import GenericManager
from couchbase.options import OptionBlockTimeOut, forward_args
from couchbase.analytics import (AnalyticsLinkType, AnalyticsOptions,
AnalyticsResult, AnalyticsDataset, AnalyticsIndex,
AnalyticsLink, CouchbaseRemoteAnalyticsLink,
S3ExternalAnalyticsLink, AzureBlobExternalAnalyticsLink)
import couchbase_core._libcouchbase as _LCB
from couchbase_core import ulp, mk_formstr
from couchbase.exceptions import (CouchbaseException, DataverseNotFoundException, NotSupportedException,
InvalidArgumentException, ErrorMapper, HTTPException,
AnalyticsLinkExistsException, AnalyticsLinkNotFoundException)
from couchbase.management.admin import Admin, METHMAP
class BaseAnalyticsIndexManagerOptions(OptionBlockTimeOut):
# valid AnalyticsOptions keys
OPTION_KEYS = ["timeout", "readonly", "scan_consistency", "client_context_id", "priority",
"positional_parameters", "named_parameters", "raw"]
def to_analytics_options(self, **kwargs):
final_opts = {**self, **kwargs}
return AnalyticsOptions(
**{k: v for k, v in final_opts.items() if k in self.OPTION_KEYS})
class GetPendingMutationsOptions(BaseAnalyticsIndexManagerOptions):
pass
class DisconnectLinkOptions(BaseAnalyticsIndexManagerOptions):
@overload
def __init__(self,
timeout=None, # type: timedelta
dataverse_name='Default', # type: str
link_name='Local' # type: str
):
pass
def __init__(self, **kwargs):
super(DisconnectLinkOptions, self).__init__(**kwargs)
@property
def dataverse_name(self):
return self.get('dataverse_name', 'Default')
@property
def link_name(self):
return self.get("link_name", 'Local')
class ConnectLinkOptions(BaseAnalyticsIndexManagerOptions):
@overload
def __init__(self,
timeout=None, # type: timedelta
dataverse_name='Default', # type: str
link_name='Local', # type: str
force=False # type: bool
):
pass
def __init__(self, **kwargs):
super(ConnectLinkOptions, self).__init__(**kwargs)
@property
def dataverse_name(self):
return self.get('dataverse_name', 'Default')
@property
def link_name(self):
return self.get('link_name', "Local")
@property
def force(self):
return self.get("force", False)
class DropAnalyticsIndexOptions(BaseAnalyticsIndexManagerOptions):
@overload
def __init__(self,
timeout=None, # type: timedelta
ignore_if_not_exists=False, # type: bool
dataverse_name='Default' # type: str
):
pass
def __init__(self, **kwargs):
super(DropAnalyticsIndexOptions, self).__init__(**kwargs)
@property
def dataverse_name(self):
return self.get('dataverse_name', 'Default')
@property
def ignore_if_not_exists(self):
return self.get('ignore_if_not_exists', False)
class GetAllAnalyticsIndexesOptions(BaseAnalyticsIndexManagerOptions):
pass
class CreateAnalyticsIndexOptions(BaseAnalyticsIndexManagerOptions):
@overload
def __init__(self,
timeout=None, # type: timedelta
ignore_if_exists=False, # type: bool
dataverse_name='Default', # type: str
):
pass
def __init__(self, **kwargs):
super(CreateAnalyticsIndexOptions, self).__init__(**kwargs)
@property
def ignore_if_exists(self):
return self.get('ignore_if_exists', False)
@property
def dataverse_name(self):
return self.get('dataverse_name', 'Default')
class DropDatasetOptions(BaseAnalyticsIndexManagerOptions):
@overload
def __init__(self,
timeout=None, # type: timedelta
ignore_if_not_exists=False, # type: bool
dataverse_name=None, # type: str
):
pass
def __init__(self, **kwargs):
super(DropDatasetOptions, self).__init__(**kwargs)
@property
def ignore_if_not_exists(self):
return self.get('ignore_if_not_exists', False)
@property
def dataverse_name(self):
return self.get('dataverse_name', 'Default')
class GetAllDatasetsOptions(BaseAnalyticsIndexManagerOptions):
pass
class CreateDataverseOptions(BaseAnalyticsIndexManagerOptions):
@overload
def __init__(self,
timeout=None, # type: timedelta
ignore_if_exists=False # type: bool
):
pass
def __init__(self, **kwargs):
super(CreateDataverseOptions, self).__init__(**kwargs)
@property
def ignore_if_exists(self):
return self.get('ignore_if_exists', False)
class DropDataverseOptions(BaseAnalyticsIndexManagerOptions):
@overload
def __init__(self,
timeout=None, # type: timedelta
ignore_if_not_exists=False # type: bool
):
pass
def __init__(self, **kwargs):
super(DropDataverseOptions, self).__init__(**kwargs)
@property
def ignore_if_not_exists(self):
return self.get('ignore_if_not_exists', False)
class CreateDatasetOptions(BaseAnalyticsIndexManagerOptions):
@overload
def __init__(self,
timeout=None, # type: timedelta
ignore_if_exists=False, # type: bool
condition=None, # type: str
dataverse_name='Default' # type: str
):
pass
def __init__(self, **kwargs):
super(CreateDatasetOptions, self).__init__(**kwargs)
@property
def ignore_if_exists(self):
# type: (...) -> bool
return self.get('ignore_if_exists', False)
@property
def condition(self):
# type: (...) -> str
return self.get('condition', "")
@property
def dataverse_name(self):
# type: (...) -> str
return self.get('dataverse_name', 'Default')
class CreateLinkAnalyticsOptions(OptionBlockTimeOut):
pass
class ReplaceLinkAnalyticsOptions(OptionBlockTimeOut):
pass
class DropLinkAnalyticsOptions(OptionBlockTimeOut):
pass
class GetLinksAnalyticsOptions(OptionBlockTimeOut):
@overload
def __init__(self,
timeout=None, # type: timedelta
dataverse_name=None, # type: str
name=None, # type: str
link_type=None, # type: AnalyticsLinkType
):
pass
def __init__(self, **kwargs):
super(GetLinksAnalyticsOptions, self).__init__(**kwargs)
@property
def dataverse_name(self):
# type: (...) -> str
return self.get('dataverse_name', None)
@property
def name(self):
# type: (...) -> str
return self.get('name', None)
@property
def link_type(self):
# type: (...) -> AnalyticsLinkType
return self.get('link_type', None)
class AnalyticsIndexErrorHandler(ErrorMapper):
@staticmethod
def mapping():
# type (...)->Mapping[str, CBErrorType]
return {HTTPException: {'24055.*already exists': AnalyticsLinkExistsException,
'24006.*does not exist': AnalyticsLinkNotFoundException,
'24034.*Cannot find': DataverseNotFoundException}}
class AnalyticsIndexManager(GenericManager):
def __init__(self, # type: "AnalyticsIndexManager"
cluster, # type: "Cluster"
admin_bucket # type: "Admin"
):
"""Analytics Manager
:param admin_bucket: Admin bucket
"""
super(AnalyticsIndexManager, self).__init__(admin_bucket)
self._cluster = cluster
@staticmethod
def _to_analytics_options(option # type: BaseAnalyticsIndexManagerOptions
):
return option.to_analytics_options() if option else AnalyticsOptions()
def _http_request(self, **kwargs):
# TODO: maybe there is a more general way of making this
# call? Ponder
# the kwargs can override the defaults
imeth = None
method = kwargs.get('method', 'GET')
if not method in METHMAP:
raise InvalidArgumentException("Unknown HTTP Method", method)
imeth = METHMAP[method]
return self._admin_bucket._http_request(
type=_LCB.LCB_HTTP_TYPE_ANALYTICS,
path=kwargs['path'],
method=imeth,
content_type=kwargs.get(
'content_type', 'application/x-www-form-urlencoded'),
post_data=kwargs.get('content', None),
response_format=_LCB.FMT_JSON,
timeout=kwargs.get('timeout', None))
def _scrub_dataverse_name(self, dataverse_name):
tokens = dataverse_name.split("/")
return "`" + "`.`".join(tokens) + "`"
def create_dataverse(self,
dataverse_name, # type: str
options=None, # type: CreateDataverseOptions
**kwargs
):
# type: (...) -> None
"""
:param str dataverse_name: Name of the dataverse to create.
:param CreateDataverseOptions options: Options for dataverse creation.
:param Any kwargs: Override corresponding value in options.
:return: None
:raises DataverseAlreadyExistsException
:raises InvalidArgumentsException
:raises CouchbaseException
"""
if not options:
options = CreateDataverseOptions()
ignore = options.ignore_if_exists or kwargs.get(
"ignore_if_exists", False)
if_not_exists_clause = "IF NOT EXISTS"
query = "CREATE DATAVERSE {} {};".format(self._scrub_dataverse_name(
dataverse_name), if_not_exists_clause if ignore else "")
#print("create_dataverse query: {}".format(query))
self._cluster.analytics_query(
query, AnalyticsIndexManager._to_analytics_options(options)).rows()
def drop_dataverse(self,
dataverse_name, # type: str
options=None, # type: DropDataverseOptions
**kwargs
):
# type: (...) -> None
if not options:
options = DropDataverseOptions()
ignore = options.ignore_if_not_exists or kwargs.get(
"ignore_if_not_exists", False)
if_exists_clause = "IF EXISTS"
query = "DROP DATAVERSE {} {};".format(self._scrub_dataverse_name(
dataverse_name), if_exists_clause if ignore else "")
#print("drop dataverse query: {}".format(query))
self._cluster.analytics_query(
query, AnalyticsIndexManager._to_analytics_options(options)).rows()
def create_dataset(self,
dataset_name, # type: str
bucket_name, # type: str
options=None, # type: CreateDatasetOptions
**kwargs):
# type: (...) -> None
if not options:
options = CreateDatasetOptions()
ignore = kwargs.get('ignore_if_exists', options.ignore_if_exists)
dataverse_name = kwargs.get('dataverse_name', options.dataverse_name)
if_not_exists_clause = "IF NOT EXISTS"
where_clause = kwargs.get('condition', options.condition)
if where_clause:
where_clause = "WHERE {}".format(where_clause)
query = "USE {}; CREATE DATASET {} `{}` ON `{}` {};" .format(self._scrub_dataverse_name(dataverse_name),
if_not_exists_clause if ignore else "",
dataset_name,
bucket_name,
where_clause,
)
#print("create_dataset n1ql: {}".format(query))
self._cluster.analytics_query(
query, AnalyticsIndexManager._to_analytics_options(options)).rows()
def drop_dataset(self,
dataset_name, # type: str
options=None, # type: DropDatasetOptions
**kwargs
):
# type: (...) -> None
"""
Drop a dataset.
:param str dataset_name: Name of dataset to drop.
:param DropDatasetOptions options: Options for the drop request.
:param Any kwargs: Override corresponding value in options.
:return: None
"""
if not options:
options = DropDatasetOptions()
dataverse_name = kwargs.get('dataverse_name', options.dataverse_name)
ignore = kwargs.get('ignore_if_not_exists',
options.ignore_if_not_exists)
if_exists_clause = ""
if ignore:
if_exists_clause = "IF EXISTS"
query = "USE {}; DROP DATASET `{}` {};".format(
self._scrub_dataverse_name(dataverse_name), dataset_name, if_exists_clause)
self._cluster.analytics_query(
query, options.to_analytics_options()).rows()
def get_all_datasets(self,
options=None # type: GetAllDatasetsOptions
):
# type: (...) -> Iterable[AnalyticsDataset]
"""
Get all the datasets in the cluster. Note we don't return the Metadata dataset, but we do return the Default
dataset.
:return Iterable[AnalyticsDataset]: The datasets, in an iterable.
"""
if not options:
options = GetAllDatasetsOptions()
query = 'SELECT d.* FROM Metadata.`Dataset` d WHERE d.DataverseName <> "Metadata"'
result = self._cluster.analytics_query(
query, options.to_analytics_options())
return_val = []
for r in result.rows():
return_val.append(AnalyticsDataset(**r))
return return_val
def create_index(self,
index_name, # type: str
dataset_name, # type: str
fields, # type:Dict[str, AnalyticsDataType]
options=None, # type: CreateAnalyticsIndexOptions
**kwargs
):
# type: (...) -> None
"""
Create Index on a dataset, over a set of fields.
:param str index_name: Name for the index
:param str dataset_name: Name of dataset to use for the index
:param Dict[str, str] fields: Dict containing the name of the field (as the key) and the type of the
field (as the value)
:param CreateAnalyticsOptions options: Options for creating the index.
:param kwargs: Override corresponding value in options.
:return: None
"""
if not options:
options = CreateAnalyticsIndexOptions()
ignore = kwargs.get('ignore_if_exists', options.ignore_if_exists)
dataverse_name = kwargs.get('dataverse_name', options.dataverse_name)
if_not_exists_clause = ''
if ignore:
if_not_exists_clause = "IF NOT EXISTS"
fields_clause = ", "
fields_clause = fields_clause.join(
["{}: {}".format(k, v.value) for k, v in fields.items()])
statement = "CREATE INDEX `{}` {} ON {}.`{}` ({});".format(index_name, if_not_exists_clause, self._scrub_dataverse_name(dataverse_name),
dataset_name, fields_clause)
#print("create index statement: {}".format(statement))
self._cluster.analytics_query(
statement, options.to_analytics_options()).rows()
def drop_index(self,
index_name, # type: str
dataset_name, # type: str
options=None, # type: DropAnalyticsIndexOptions
**kwargs):
# type: (...) -> None
"""
Drop specified index.
:param str index_name: Name of index to drop.
:param str dataset_name: Name of the dataset this index was created on.
:param DropAnalyticsIndexOptions options: Options for dropping index.
:param Any kwargs: Override corresponding value in options.
:return: None.
"""
if not options:
options = DropAnalyticsIndexOptions()
dataverse_name = kwargs.get('dataverse_name', options.dataverse_name)
ignore = kwargs.get('ignore_if_not_exists',
options.ignore_if_not_exists)
if_exists_clause = ""
if ignore:
if_exists_clause = "IF EXISTS"
statement = 'DROP INDEX {} {}.`{}`.`{}`'.format(
if_exists_clause, self._scrub_dataverse_name(dataverse_name), dataset_name, index_name)
self._cluster.analytics_query(
statement, options.to_analytics_options(**kwargs)).rows()
def get_all_indexes(self,
options=None, # type: GetAllAnalyticsIndexesOptions
**kwargs
):
# (...) -> Iterable[AnalyticsIndex]
"""
Get all analytics indexes in the cluster.
:param GetAllAnalyticsIndexesOptions options: Options for getting all analytics indexes.
:param Any kwargs: Override corresponding value in options.
:return:
"""
if not options:
options = GetAllAnalyticsIndexesOptions()
statement = 'SELECT * FROM Metadata.`Index` WHERE DataverseName <> "Metadata";'
result = self._cluster.analytics_query(
statement, options.to_analytics_options())
return_val = []
for r in result.rows():
return_val.append(AnalyticsIndex(**r))
return return_val
def connect_link(self,
options=None, # type: ConnectLinkOptions
**kwargs
):
# type: (...) -> None
"""
Connect a link.
:param ConnectLinkOptions options: Options to connect a link.
:param Any kwargs: Override corresponding value in options.
:return:
"""
if not options:
options = ConnectLinkOptions()
dataverse_name = kwargs.get('dataverse_name', options.dataverse_name)
link_name = kwargs.get('link_name', options.link_name)
force = kwargs.get('force', options.force)
force_clause = ""
if force:
force_clause = "WITH force: true"
statement = 'USE {}; CONNECT LINK {} {};'.format(
self._scrub_dataverse_name(dataverse_name), link_name, force_clause)
self._cluster.analytics_query(
statement, options.to_analytics_options(**kwargs)).rows()
def disconnect_link(self,
options=None, # type: DisconnectLinkOptions
**kwargs
):
# type: (...) -> None
"""
Disconnect a link.
:param DisconnectLinkOptions options: Options to disconnect a link.
:param Any kwargs: Override corresponding value in options.
:return:
"""
if not options:
options = DisconnectLinkOptions()
dataverse_name = kwargs.get('dataverse_name', options.dataverse_name)
link_name = kwargs.get('link_name', options.link_name)
statement = 'USE {}; DISCONNECT LINK {};'.format(
self._scrub_dataverse_name(dataverse_name), link_name)
self._cluster.analytics_query(
statement, options.to_analytics_options(**kwargs)).rows()
def get_pending_mutations(self,
options=None, # type: GetPendingMutationsOptions
**kwargs
):
# type: (...) -> Dict[string, int]
if not options:
options = GetPendingMutationsOptions()
try:
return self._cluster._admin._http_request(type=_LCB.LCB_HTTP_TYPE_ANALYTICS,
method=_LCB.LCB_HTTP_METHOD_GET,
path="analytics/node/agg/stats/remaining"
).value
except CouchbaseException as e:
extra = getattr(e, 'objextra', None)
if extra:
if int(getattr(extra, 'http_status', None)) == 404:
raise NotSupportedException(
"get pending mutations not supported")
raise e
@AnalyticsIndexErrorHandler.mgmt_exc_wrap
def create_link(
self, # type: "AnalyticsIndexManager"
link, # type: "AnalyticsLink"
*options, # type: CreateLinkAnalyticsOptions
**kwargs
):
"""Creates a new analytics link
:param link: the link to create
:param options: CreateLinkAnalyticsOptions to create a link.
:param kwargs: Override corresponding value in options.
:raises: AnalyticsLinkExistsException
:raises: DataverseNotFoundException
:raises: InvalidArgumentException
"""
link.validate()
if "/" in link.dataverse_name():
path = "/analytics/link/{}/{}".format(
ulp.quote(link.dataverse_name(), safe=''), link.name())
else:
path = "/analytics/link"
self._http_request(
path=path,
method="POST",
content=link.form_encode(),
**forward_args(kwargs, *options))
@AnalyticsIndexErrorHandler.mgmt_exc_wrap
def replace_link(
self, # type: "AnalyticsIndexManager"
link, # type: "AnalyticsLink"
*options, # type: ReplaceLinkAnalyticsOptions
**kwargs
):
"""Replaces an existing analytics link
:param link: the link to replace
:param options: CreateLinkAnalyticsOptions to create a link.
:param kwargs: Override corresponding value in options.
:raises: AnalyticsLinkNotFoundException
:raises: DataverseNotFoundException
:raises: InvalidArgumentException
"""
link.validate()
if "/" in link.dataverse_name():
path = "/analytics/link/{}/{}".format(
ulp.quote(link.dataverse_name(), safe=''), link.name())
else:
path = "/analytics/link"
self._http_request(
path=path,
method="PUT",
content=link.form_encode(),
**forward_args(kwargs, *options))
@AnalyticsIndexErrorHandler.mgmt_exc_wrap
def drop_link(
self, # type: "AnalyticsIndexManager"
link_name, # type: str
dataverse_name, # type: str
*options, # type: DropLinkAnalyticsOptions
**kwargs
):
"""Drops an existing analytics link from provided dataverse
:param link_name: The name of the link to drop
:param dataverse_name: The name of the dataverse in which the link belongs
:param options: DropLinkAnalyticsOptions to create a link.
:param kwargs: Override corresponding value in options.
:raises: AnalyticsLinkNotFoundException
:raises: DataverseNotFoundException
"""
content = None
if "/" in dataverse_name:
path = "/analytics/link/{}/{}".format(
ulp.quote(dataverse_name, safe=''), link_name)
else:
path = "/analytics/link"
content = mk_formstr({
"dataverse": dataverse_name,
"name": link_name,
})
self._http_request(
path=path,
method="DELETE",
content=content,
**forward_args(kwargs, *options))
@AnalyticsIndexErrorHandler.mgmt_exc_wrap
def get_links(
self, # type: "AnalyticsIndexManager"
*options, # type: GetLinksAnalyticsOptions
**kwargs
) -> List[AnalyticsLink]:
"""Gets existing analytics links
:param options: GetLinksAnalyticsOptions to create a link.
:param kwargs: Override corresponding value in options.
:raises: DataverseNotFoundException
:raises: InvalidArgumentException
"""
path = "analytics/link"
final_args = forward_args(kwargs, *options)
link_type = final_args.pop("link_type", None)
link_name = final_args.pop("name", None)
dataverse_name = final_args.pop("dataverse_name", None)
if dataverse_name is not None:
if "/" in dataverse_name:
path += "/{}".format(ulp.quote(dataverse_name, safe=''))
if link_name is not None:
path += "/{}".format(link_name)
else:
path += "?dataverse={}".format(dataverse_name)
if link_name is not None:
path += "&name={}".format(link_name)
else:
if link_name is not None:
raise InvalidArgumentException(
"Both the link name and the dataverse name must be set.")
if link_type is not None:
path += "?type={}".format(link_type.value)
links = self._http_request(
path=path,
method="GET",
**final_args).value
analytics_links = []
for link in links:
if link["type"] == AnalyticsLinkType.CouchbaseRemote.value:
analytics_links.append(
CouchbaseRemoteAnalyticsLink.link_from_server_json(link))
elif link["type"] == AnalyticsLinkType.S3External.value:
analytics_links.append(
S3ExternalAnalyticsLink.link_from_server_json(link))
if link["type"] == AnalyticsLinkType.AzureBlobExternal.value:
analytics_links.append(
AzureBlobExternalAnalyticsLink.link_from_server_json(link))
return analytics_links
| |
"""
Python VXI-11 driver
Copyright (c) 2012-2017 Alex Forencich and Michael Walle
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from . import rpc
import random
import re
import struct
import time
# VXI-11 RPC constants
# Device async
DEVICE_ASYNC_PROG = 0x0607b0
DEVICE_ASYNC_VERS = 1
DEVICE_ABORT = 1
# Device core
DEVICE_CORE_PROG = 0x0607af
DEVICE_CORE_VERS = 1
CREATE_LINK = 10
DEVICE_WRITE = 11
DEVICE_READ = 12
DEVICE_READSTB = 13
DEVICE_TRIGGER = 14
DEVICE_CLEAR = 15
DEVICE_REMOTE = 16
DEVICE_LOCAL = 17
DEVICE_LOCK = 18
DEVICE_UNLOCK = 19
DEVICE_ENABLE_SRQ = 20
DEVICE_DOCMD = 22
DESTROY_LINK = 23
CREATE_INTR_CHAN = 25
DESTROY_INTR_CHAN = 26
# Device intr
DEVICE_INTR_PROG = 0x0607b1
DEVICE_INTR_VERS = 1
DEVICE_INTR_SRQ = 30
# Error states
ERR_NO_ERROR = 0
ERR_SYNTAX_ERROR = 1
ERR_DEVICE_NOT_ACCESSIBLE = 3
ERR_INVALID_LINK_IDENTIFIER = 4
ERR_PARAMETER_ERROR = 5
ERR_CHANNEL_NOT_ESTABLISHED = 6
ERR_OPERATION_NOT_SUPPORTED = 8
ERR_OUT_OF_RESOURCES = 9
ERR_DEVICE_LOCKED_BY_ANOTHER_LINK = 11
ERR_NO_LOCK_HELD_BY_THIS_LINK = 12
ERR_IO_TIMEOUT = 15
ERR_IO_ERROR = 17
ERR_INVALID_ADDRESS = 21
ERR_ABORT = 23
ERR_CHANNEL_ALREADY_ESTABLISHED = 29
# Flags
OP_FLAG_WAIT_BLOCK = 1
OP_FLAG_END = 8
OP_FLAG_TERMCHAR_SET = 128
RX_REQCNT = 1
RX_CHR = 2
RX_END = 4
# IEEE 488.1 interface device commands
CMD_SEND_COMMAND = 0x020000
CMD_BUS_STATUS = 0x020001
CMD_ATN_CTRL = 0x020002
CMD_REN_CTRL = 0x020003
CMD_PASS_CTRL = 0x020004
CMD_BUS_ADDRESS = 0x02000A
CMD_IFC_CTRL = 0x020010
CMD_BUS_STATUS_REMOTE = 1
CMD_BUS_STATUS_SRQ = 2
CMD_BUS_STATUS_NDAC = 3
CMD_BUS_STATUS_SYSTEM_CONTROLLER = 4
CMD_BUS_STATUS_CONTROLLER_IN_CHARGE = 5
CMD_BUS_STATUS_TALKER = 6
CMD_BUS_STATUS_LISTENER = 7
CMD_BUS_STATUS_BUS_ADDRESS = 8
GPIB_CMD_GTL = 0x01 # go to local
GPIB_CMD_SDC = 0x04 # selected device clear
GPIB_CMD_PPC = 0x05 # parallel poll config
GPIB_CMD_GET = 0x08 # group execute trigger
GPIB_CMD_TCT = 0x09 # take control
GPIB_CMD_LLO = 0x11 # local lockout
GPIB_CMD_DCL = 0x14 # device clear
GPIB_CMD_PPU = 0x15 # parallel poll unconfigure
GPIB_CMD_SPE = 0x18 # serial poll enable
GPIB_CMD_SPD = 0x19 # serial poll disable
GPIB_CMD_LAD = 0x20 # listen address (base)
GPIB_CMD_UNL = 0x3F # unlisten
GPIB_CMD_TAD = 0x40 # talk address (base)
GPIB_CMD_UNT = 0x5F # untalk
GPIB_CMD_SAD = 0x60 # my secondary address (base)
GPIB_CMD_PPE = 0x60 # parallel poll enable (base)
GPIB_CMD_PPD = 0x70 # parallel poll disable
def parse_visa_resource_string(resource_string):
# valid resource strings:
# TCPIP::10.0.0.1::INSTR
# TCPIP0::10.0.0.1::INSTR
# TCPIP::10.0.0.1::gpib,5::INSTR
# TCPIP0::10.0.0.1::gpib,5::INSTR
# TCPIP0::10.0.0.1::usb0::INSTR
# TCPIP0::10.0.0.1::usb0[1234::5678::MYSERIAL::0]::INSTR
m = re.match('^(?P<prefix>(?P<type>TCPIP)\d*)(::(?P<arg1>[^\s:]+))'
'(::(?P<arg2>[^\s:]+(\[.+\])?))?(::(?P<suffix>INSTR))$',
resource_string, re.I)
if m is not None:
return dict(
type = m.group('type').upper(),
prefix = m.group('prefix'),
arg1 = m.group('arg1'),
arg2 = m.group('arg2'),
suffix = m.group('suffix'),
)
# Exceptions
class Vxi11Exception(Exception):
em = {0: "No error",
1: "Syntax error",
3: "Device not accessible",
4: "Invalid link identifier",
5: "Parameter error",
6: "Channel not established",
8: "Operation not supported",
9: "Out of resources",
11: "Device locked by another link",
12: "No lock held by this link",
15: "IO timeout",
17: "IO error",
21: "Invalid address",
23: "Abort",
29: "Channel already established"}
def __init__(self, err = None, note = None):
self.err = err
self.note = note
self.msg = ''
if err is None:
self.msg = note
else:
if type(err) is int:
if err in self.em:
self.msg = "%d: %s" % (err, self.em[err])
else:
self.msg = "%d: Unknown error" % err
else:
self.msg = err
if note is not None:
self.msg = "%s [%s]" % (self.msg, note)
def __str__(self):
return self.msg
class Packer(rpc.Packer):
def pack_device_link(self, link):
self.pack_int(link)
def pack_create_link_parms(self, params):
id, lock_device, lock_timeout, device = params
self.pack_int(id)
self.pack_bool(lock_device)
self.pack_uint(lock_timeout)
self.pack_string(device)
def pack_device_write_parms(self, params):
link, timeout, lock_timeout, flags, data = params
self.pack_int(link)
self.pack_uint(timeout)
self.pack_uint(lock_timeout)
self.pack_int(flags)
self.pack_opaque(data)
def pack_device_read_parms(self, params):
link, request_size, timeout, lock_timeout, flags, term_char = params
self.pack_int(link)
self.pack_uint(request_size)
self.pack_uint(timeout)
self.pack_uint(lock_timeout)
self.pack_int(flags)
self.pack_int(term_char)
def pack_device_generic_parms(self, params):
link, flags, lock_timeout, timeout = params
self.pack_int(link)
self.pack_int(flags)
self.pack_uint(lock_timeout)
self.pack_uint(timeout)
def pack_device_remote_func_parms(self, params):
host_addr, host_port, prog_num, prog_vers, prog_family = params
self.pack_uint(host_addr)
self.pack_uint(host_port)
self.pack_uint(prog_num)
self.pack_uint(prog_vers)
self.pack_int(prog_family)
def pack_device_enable_srq_parms(self, params):
link, enable, handle = params
self.pack_int(link)
self.pack_bool(enable)
if len(handle) > 40:
raise Vxi11Exception("array length too long")
self.pack_opaque(handle)
def pack_device_lock_parms(self, params):
link, flags, lock_timeout = params
self.pack_int(link)
self.pack_int(flags)
self.pack_uint(lock_timeout)
def pack_device_docmd_parms(self, params):
link, flags, timeout, lock_timeout, cmd, network_order, datasize, data_in = params
self.pack_int(link)
self.pack_int(flags)
self.pack_uint(timeout)
self.pack_uint(lock_timeout)
self.pack_int(cmd)
self.pack_bool(network_order)
self.pack_int(datasize)
self.pack_opaque(data_in)
def pack_device_error(self, error):
self.pack_int(error)
def pack_device_srq_parms(self, params):
handle = params
self.pack_opaque(handle)
def pack_create_link_resp(self, params):
error, link, abort_port, max_recv_size = params
self.pack_int(error)
self.pack_int(link)
self.pack_uint(abort_port)
self.pack_uint(max_recv_size)
def pack_device_write_resp(self, params):
error, size = params
self.pack_int(error)
self.pack_uint(size)
def pack_device_read_resp(self, params):
error, reason, data = params
self.pack_int(error)
self.pack_int(reason)
self.pack_opaque(data)
def pack_device_read_stb_resp(self, params):
error, stb = params
self.pack_int(error)
self.pack_uint(stb)
def pack_device_docmd_resp(self, params):
error, data_out = params
self.pack_int(error)
self.pack_opaque(data_out)
class Unpacker(rpc.Unpacker):
def unpack_device_link(self):
return self.unpack_int()
def unpack_create_link_parms(self):
id = self.unpack_int()
lock_device = self.unpack_bool()
lock_timeout = self.unpack_uint()
device = self.unpack_string()
return id, lock_device, lock_timeout, device
def unpack_device_write_parms(self):
link = self.unpack_int()
timeout = self.unpack_uint()
lock_timeout = self.unpack_uint()
flags = self.unpack_int()
data = self.unpack_opaque()
return link, timeout, lock_timeout, flags, data
def unpack_device_read_parms(self):
link = self.unpack_int()
request_size = self.unpack_uint()
timeout = self.unpack_uint()
lock_timeout = self.unpack_uint()
flags = self.unpack_int()
term_char = self.unpack_int()
return link, request_size, timeout, lock_timeout, flags, term_char
def unpack_device_generic_parms(self):
link = self.unpack_int()
flags = self.unpack_int()
lock_timeout = self.unpack_uint()
timeout = self.unpack_uint()
return link, flags, lock_timeout, timeout
def unpack_device_remote_func_parms(self):
host_addr = self.unpack_uint()
host_port = self.unpack_uint()
prog_num = self.unpack_uint()
prog_vers = self.unpack_uint()
prog_family = self.unpack_int()
return host_addr, host_port, prog_num, prog_vers, prog_family
def unpack_device_enable_srq_parms(self):
link = self.unpack_int()
enable = self.unpack_bool()
handle = self.unpack_opaque()
return link, enable, handle
def unpack_device_lock_parms(self):
link = self.unpack_int()
flags = self.unpack_int()
lock_timeout = self.unpack_uint()
return link, flags, lock_timeout
def unpack_device_docmd_parms(self):
link = self.unpack_int()
flags = self.unpack_int()
timeout = self.unpack_uint()
lock_timeout = self.unpack_uint()
cmd = self.unpack_int()
network_order = self.unpack_bool()
datasize = self.unpack_int()
data_in = self.unpack_opaque()
return link, flags, timeout, lock_timeout, cmd, network_order, datasize, data_in
def unpack_device_error(self):
return self.unpack_int()
def unpack_device_srq_params(self):
handle = self.unpack_opaque()
return handle
def unpack_create_link_resp(self):
error = self.unpack_int()
link = self.unpack_int()
abort_port = self.unpack_uint()
max_recv_size = self.unpack_uint()
return error, link, abort_port, max_recv_size
def unpack_device_write_resp(self):
error = self.unpack_int()
size = self.unpack_uint()
return error, size
def unpack_device_read_resp(self):
error = self.unpack_int()
reason = self.unpack_int()
data = self.unpack_opaque()
return error, reason, data
def unpack_device_read_stb_resp(self):
error = self.unpack_int()
stb = self.unpack_uint()
return error, stb
def unpack_device_docmd_resp(self):
error = self.unpack_int()
data_out = self.unpack_opaque()
return error, data_out
def done(self):
# ignore any trailing bytes
pass
class CoreClient(rpc.TCPClient):
def __init__(self, host, port=0):
self.packer = Packer()
self.unpacker = Unpacker('')
rpc.TCPClient.__init__(self, host, DEVICE_CORE_PROG, DEVICE_CORE_VERS, port)
def create_link(self, id, lock_device, lock_timeout, name):
params = (id, lock_device, lock_timeout, name)
return self.make_call(CREATE_LINK, params,
self.packer.pack_create_link_parms,
self.unpacker.unpack_create_link_resp)
def device_write(self, link, timeout, lock_timeout, flags, data):
params = (link, timeout, lock_timeout, flags, data)
return self.make_call(DEVICE_WRITE, params,
self.packer.pack_device_write_parms,
self.unpacker.unpack_device_write_resp)
def device_read(self, link, request_size, timeout, lock_timeout, flags, term_char):
params = (link, request_size, timeout, lock_timeout, flags, term_char)
return self.make_call(DEVICE_READ, params,
self.packer.pack_device_read_parms,
self.unpacker.unpack_device_read_resp)
def device_read_stb(self, link, flags, lock_timeout, timeout):
params = (link, flags, lock_timeout, timeout)
return self.make_call(DEVICE_READSTB, params,
self.packer.pack_device_generic_parms,
self.unpacker.unpack_device_read_stb_resp)
def device_trigger(self, link, flags, lock_timeout, timeout):
params = (link, flags, lock_timeout, timeout)
return self.make_call(DEVICE_TRIGGER, params,
self.packer.pack_device_generic_parms,
self.unpacker.unpack_device_error)
def device_clear(self, link, flags, lock_timeout, timeout):
params = (link, flags, lock_timeout, timeout)
return self.make_call(DEVICE_CLEAR, params,
self.packer.pack_device_generic_parms,
self.unpacker.unpack_device_error)
def device_remote(self, link, flags, lock_timeout, timeout):
params = (link, flags, lock_timeout, timeout)
return self.make_call(DEVICE_REMOTE, params,
self.packer.pack_device_generic_parms,
self.unpacker.unpack_device_error)
def device_local(self, link, flags, lock_timeout, timeout):
params = (link, flags, lock_timeout, timeout)
return self.make_call(DEVICE_LOCAL, params,
self.packer.pack_device_generic_parms,
self.unpacker.unpack_device_error)
def device_lock(self, link, flags, lock_timeout):
params = (link, flags, lock_timeout)
return self.make_call(DEVICE_LOCK, params,
self.packer.pack_device_lock_parms,
self.unpacker.unpack_device_error)
def device_unlock(self, link):
return self.make_call(DEVICE_UNLOCK, link,
self.packer.pack_device_link,
self.unpacker.unpack_device_error)
def device_enable_srq(self, link, enable, handle):
params = (link, enable, handle)
return self.make_call(DEVICE_ENABLE_SRQ, params,
self.packer.pack_device_enable_srq_parms,
self.unpacker.unpack_device_error)
def device_docmd(self, link, flags, timeout, lock_timeout, cmd, network_order, datasize, data_in):
params = (link, flags, timeout, lock_timeout, cmd, network_order, datasize, data_in)
return self.make_call(DEVICE_DOCMD, params,
self.packer.pack_device_docmd_parms,
self.unpacker.unpack_device_docmd_resp)
def destroy_link(self, link):
return self.make_call(DESTROY_LINK, link,
self.packer.pack_device_link,
self.unpacker.unpack_device_error)
def create_intr_chan(self, host_addr, host_port, prog_num, prog_vers, prog_family):
params = (host_addr, host_port, prog_num, prog_vers, prog_family)
return self.make_call(CREATE_INTR_CHAN, params,
self.packer.pack_device_remote_func_parms,
self.unpacker.unpack_device_error)
def destroy_intr_chan(self):
return self.make_call(DESTROY_INTR_CHAN, None,
None,
self.unpacker.unpack_device_error)
class AbortClient(rpc.TCPClient):
def __init__(self, host, port=0):
self.packer = Packer()
self.unpacker = Unpacker('')
rpc.TCPClient.__init__(self, host, DEVICE_ASYNC_PROG, DEVICE_ASYNC_VERS, port)
def device_abort(self, link):
return self.make_call(DEVICE_ABORT, link,
self.packer.pack_device_link,
self.unpacker.unpack_device_error)
def list_devices(ip=None, timeout=1):
"Detect VXI-11 devices on network"
if ip is None:
ip = ['255.255.255.255']
if type(ip) is str:
ip = [ip]
hosts = []
for addr in ip:
pmap = rpc.BroadcastUDPPortMapperClient(addr)
pmap.set_timeout(timeout)
resp = pmap.get_port((DEVICE_CORE_PROG, DEVICE_CORE_VERS, rpc.IPPROTO_TCP, 0))
l = [r[1][0] for r in resp if r[0] > 0]
hosts.extend(l)
return sorted(hosts, key=lambda ip: tuple(int(part) for part in ip.split('.')))
def list_resources(ip=None, timeout=1):
"List resource strings for all detected VXI-11 devices"
res = []
for host in list_devices(ip, timeout):
try:
# try connecting as an instrument
instr = Instrument(host)
instr.open()
res.append("TCPIP::%s::INSTR" % host)
except:
try:
# try connecting as a GPIB interface
intf_dev = InterfaceDevice(host)
# enumerate connected devices
devs = intf_dev.find_listeners()
res.extend(['TCPIP::%s::gpib0,%d::INSTR' % (host, d) for d in devs])
except:
# if that fails, just list the host
res.append("TCPIP::%s::INSTR" % host)
return res
class Device(object):
"VXI-11 device interface client"
def __init__(self, host, name = None, client_id = None, term_char = None):
"Create new VXI-11 device object"
if host.upper().startswith('TCPIP') and '::' in host:
res = parse_visa_resource_string(host)
if res is None:
raise Vxi11Exception('Invalid resource string', 'init')
host = res['arg1']
name = res['arg2']
if name is None:
name = "inst0"
if client_id is None:
client_id = random.getrandbits(31)
self.client = None
self.abort_client = None
self.host = host
self.name = name
self.client_id = client_id
self.term_char = term_char
self.lock_timeout = 10
self.timeout = 10
self.abort_port = 0
self.link = None
self.max_recv_size = 0
self.max_read_len = 128*1024*1024
self.locked = False
def __del__(self):
if self.link is not None:
self.close()
@property
def timeout(self):
return self._timeout
@timeout.setter
def timeout(self, val):
self._timeout = val
self._timeout_ms = int(val * 1000)
if self.client is not None:
self.client.sock.settimeout(self.timeout+1)
if self.abort_client is not None:
self.abort_client.sock.settimeout(self.timeout+1)
@property
def lock_timeout(self):
return self._lock_timeout
@lock_timeout.setter
def lock_timeout(self, val):
self._lock_timeout = val
self._lock_timeout_ms = int(val * 1000)
def open(self):
"Open connection to VXI-11 device"
if self.link is not None:
return
if self.client is None:
self.client = CoreClient(self.host)
self.client.sock.settimeout(self.timeout+1)
error, link, abort_port, max_recv_size = self.client.create_link(
self.client_id,
0,
self._lock_timeout_ms,
self.name.encode("utf-8")
)
if error:
raise Vxi11Exception(error, 'open')
self.abort_port = abort_port
self.link = link
self.max_recv_size = min(max_recv_size, 1024*1024)
def close(self):
"Close connection"
if self.link is None:
return
self.client.destroy_link(self.link)
self.client.close()
self.link = None
self.client = None
def abort(self):
"Asynchronous abort"
if self.link is None:
self.open()
if self.abort_client is None:
self.abort_client = AbortClient(self.host, self.abort_port)
self.abort_client.sock.settimeout(self.timeout)
error = self.abort_client.device_abort(self.link)
if error:
raise Vxi11Exception(error, 'abort')
def write_raw(self, data):
"Write binary data to instrument"
if self.link is None:
self.open()
if self.term_char is not None:
flags = OP_FLAG_TERMCHAR_SET
term_char = str(self.term_char).encode('utf-8')[0]
data += term_char
flags = 0
num = len(data)
offset = 0
while num > 0:
if num <= self.max_recv_size:
flags |= OP_FLAG_END
block = data[offset:offset+self.max_recv_size]
error, size = self.client.device_write(
self.link,
self._timeout_ms,
self._lock_timeout_ms,
flags,
block
)
if error:
raise Vxi11Exception(error, 'write')
elif size < len(block):
raise Vxi11Exception("did not write complete block", 'write')
offset += size
num -= size
def read_raw(self, num=-1):
"Read binary data from instrument"
if self.link is None:
self.open()
read_len = self.max_read_len
if num > 0:
read_len = min(num, self.max_read_len)
flags = 0
reason = 0
term_char = 0
if self.term_char is not None:
flags = OP_FLAG_TERMCHAR_SET
term_char = str(self.term_char).encode('utf-8')[0]
read_data = bytearray()
while reason & (RX_END | RX_CHR) == 0:
error, reason, data = self.client.device_read(
self.link,
read_len,
self._timeout_ms,
self._lock_timeout_ms,
flags,
term_char
)
if error:
raise Vxi11Exception(error, 'read')
read_data.extend(data)
if num > 0:
num = num - len(data)
if num <= 0:
break
if num < read_len:
read_len = num
return bytes(read_data)
def ask_raw(self, data, num=-1):
"Write then read binary data"
self.write_raw(data)
return self.read_raw(num)
def write(self, message, encoding = 'utf-8'):
"Write string to instrument"
if type(message) is tuple or type(message) is list:
# recursive call for a list of commands
for message_i in message:
self.write(message_i, encoding)
return
self.write_raw(str(message).encode(encoding))
def read(self, num=-1, encoding = 'utf-8'):
"Read string from instrument"
return self.read_raw(num).decode(encoding).rstrip('\r\n')
def ask(self, message, num=-1, encoding = 'utf-8'):
"Write then read string"
if type(message) is tuple or type(message) is list:
# recursive call for a list of commands
val = list()
for message_i in message:
val.append(self.ask(message_i, num, encoding))
return val
self.write(message, encoding)
return self.read(num, encoding)
def trigger(self):
"Send trigger command"
if self.link is None:
self.open()
flags = 0
error = self.client.device_trigger(
self.link,
flags,
self._lock_timeout_ms,
self._timeout_ms
)
if error:
raise Vxi11Exception(error, 'trigger')
def clear(self):
"Send clear command"
if self.link is None:
self.open()
flags = 0
error = self.client.device_clear(
self.link,
flags,
self._lock_timeout_ms,
self._timeout_ms
)
if error:
raise Vxi11Exception(error, 'clear')
def lock(self):
"Send lock command"
if self.link is None:
self.open()
flags = 0
error = self.client.device_lock(
self.link,
flags,
self._lock_timeout_ms
)
if error:
raise Vxi11Exception(error, 'lock')
self.locked = True
def unlock(self):
"Send unlock command"
if self.link is None:
self.open()
flags = 0
error = self.client.device_unlock(self.link)
if error:
raise Vxi11Exception(error, 'unlock')
self.locked = False
class InterfaceDevice(Device):
"VXI-11 IEEE 488.1 interface device interface client"
def __init__(self, host, name = None, client_id = None, term_char = None):
"Create new VXI-11 488.1 interface device object"
if host.upper().startswith('TCPIP') and '::' in host:
res = parse_visa_resource_string(host)
if res is None:
raise Vxi11Exception('Invalid resource string', 'init')
host = res['arg1']
name = res['arg2']
if name is None:
name = "gpib0"
super(InterfaceDevice, self).__init__(host, name, client_id, term_char)
self._bus_address = 0
def open(self):
"Open connection to VXI-11 device"
if self.link is not None:
return
if ',' in self.name:
raise Vxi11Exception("Cannot specify address for InterfaceDevice")
super(InterfaceDevice, self).open()
self._bus_address = self.get_bus_address()
def send_command(self, data):
"Send command"
if self.link is None:
self.open()
flags = 0
error, data_out = self.client.device_docmd(
self.link,
flags,
self._timeout_ms,
self._lock_timeout_ms,
CMD_SEND_COMMAND,
True,
1,
data
)
if error:
raise Vxi11Exception(error, 'send_command')
return data_out
def create_setup(self, address_list):
data = bytearray([self._bus_address | GPIB_CMD_TAD, GPIB_CMD_UNL])
if type(address_list) is int:
address_list = [address_list]
for addr in address_list:
if type(addr) is tuple:
if addr[0] < 0 or addr[0] > 30:
raise Vxi11Exception("Invalid address", 'create_setup')
data.append(addr[0] | GPIB_CMD_LAD)
if len(addr) > 1:
if addr[1] < 0 or addr[1] > 30:
raise Vxi11Exception("Invalid address", 'create_setup')
data.append(addr[1] | GPIB_CMD_SAD)
else:
if addr < 0 or addr > 30:
raise Vxi11Exception("Invalid address", 'create_setup')
data.append(addr | GPIB_CMD_LAD)
return bytes(data)
def send_setup(self, address_list):
"Send setup"
return self.send_command(self.create_setup(address_list))
def _bus_status(self, val):
"Bus status"
if self.link is None:
self.open()
flags = 0
error, data_out = self.client.device_docmd(
self.link,
flags,
self._timeout_ms,
self._lock_timeout_ms,
CMD_BUS_STATUS,
True,
2,
struct.pack('!H', val)
)
if error:
raise Vxi11Exception(error, 'bus_status')
return struct.unpack('!H', data_out)[0]
def test_ren(self):
"Read REN line"
return self._bus_status(CMD_BUS_STATUS_REMOTE)
def test_srq(self):
"Read SRQ line"
return self._bus_status(CMD_BUS_STATUS_SRQ)
def test_ndac(self):
"Read NDAC line"
return self._bus_status(CMD_BUS_STATUS_NDAC)
def is_system_controller(self):
"Check if interface device is a system controller"
return self._bus_status(CMD_BUS_STATUS_SYSTEM_CONTROLLER)
def is_controller_in_charge(self):
"Check if interface device is the controller-in-charge"
return self._bus_status(CMD_BUS_STATUS_CONTROLLER_IN_CHARGE)
def is_talker(self):
"Check if interface device is addressed as a talker"
return self._bus_status(CMD_BUS_STATUS_TALKER)
def is_listener(self):
"Check if interface device is addressed as a listener"
return self._bus_status(CMD_BUS_STATUS_LISTENER)
def get_bus_address(self):
"Get interface device bus address"
return self._bus_status(CMD_BUS_STATUS_BUS_ADDRESS)
def set_atn(self, val):
"Set ATN line"
if self.link is None:
self.open()
flags = 0
error, data_out = self.client.device_docmd(
self.link,
flags,
self._timeout_ms,
self._lock_timeout_ms,
CMD_ATN_CTRL,
True,
2,
struct.pack('!H', val)
)
if error:
raise Vxi11Exception(error, 'set_atn')
return struct.unpack('!H', data_out)[0]
def set_ren(self, val):
"Set REN line"
if self.link is None:
self.open()
flags = 0
error, data_out = self.client.device_docmd(
self.link,
flags,
self._timeout_ms,
self._lock_timeout_ms,
CMD_REN_CTRL,
True,
2,
struct.pack('!H', val)
)
if error:
raise Vxi11Exception(error, 'set_ren')
return struct.unpack('!H', data_out)[0]
def pass_control(self, addr):
"Pass control to another controller"
if addr < 0 or addr > 30:
raise Vxi11Exception("Invalid address", 'pass_control')
if self.link is None:
self.open()
flags = 0
error, data_out = self.client.device_docmd(
self.link,
flags,
self._timeout_ms,
self._lock_timeout_ms,
CMD_PASS_CTRL,
True,
4,
struct.pack('!L', addr)
)
if error:
raise Vxi11Exception(error, 'pass_control')
return struct.unpack('!L', data_out)[0]
def set_bus_address(self, addr):
"Set interface device bus address"
if addr < 0 or addr > 30:
raise Vxi11Exception("Invalid address", 'set_bus_address')
if self.link is None:
self.open()
flags = 0
error, data_out = self.client.device_docmd(
self.link,
flags,
self._timeout_ms,
self._lock_timeout_ms,
CMD_BUS_ADDRESS,
True,
4,
struct.pack('!L', addr)
)
if error:
raise Vxi11Exception(error, 'set_bus_address')
self._bus_address = addr
return struct.unpack('!L', data_out)[0]
def send_ifc(self):
"Send IFC"
if self.link is None:
self.open()
flags = 0
error, data_out = self.client.device_docmd(
self.link,
flags,
self._timeout_ms,
self._lock_timeout_ms,
CMD_IFC_CTRL,
True,
1,
b''
)
if error:
raise Vxi11Exception(error, 'send_ifc')
def find_listeners(self, address_list=None):
"Find devices"
if self.link is None:
self.open()
if address_list is None:
address_list = list(range(31))
address_list.remove(self._bus_address)
found = []
try:
self.lock()
for addr in address_list:
# check for listener at primary address
cmd = bytearray([GPIB_CMD_UNL, GPIB_CMD_UNT])
cmd.append(self._bus_address | GPIB_CMD_TAD) # spec says this is unnecessary, but doesn't appear to work without this
if type(addr) is tuple:
addr = addr[0]
if addr < 0 or addr > 30:
raise Vxi11Exception("Invalid address", 'find_listeners')
cmd.append(addr | GPIB_CMD_LAD)
self.send_command(cmd)
self.set_atn(False)
time.sleep(0.0015) # probably not necessary due to network delays
if self.test_ndac():
found.append(addr)
else:
# check for listener at any sub-address
cmd = bytearray([GPIB_CMD_UNL, GPIB_CMD_UNT])
cmd.append(self._bus_address | GPIB_CMD_TAD) # spec says this is unnecessary, but doesn't appear to work without this
cmd.append(addr | GPIB_CMD_LAD)
for sa in range(31):
cmd.append(sa | GPIB_CMD_SAD)
self.send_command(cmd)
self.set_atn(False)
time.sleep(0.0015) # probably not necessary due to network delays
if self.test_ndac():
# find specific sub-address
for sa in range(31):
cmd = bytearray([GPIB_CMD_UNL, GPIB_CMD_UNT])
cmd.append(self._bus_address | GPIB_CMD_TAD) # spec says this is unnecessary, but doesn't appear to work without this
cmd.append(addr | GPIB_CMD_LAD)
cmd.append(sa | GPIB_CMD_SAD)
self.send_command(cmd)
self.set_atn(False)
time.sleep(0.0015) # probably not necessary due to network delays
if self.test_ndac():
found.append((addr, sa))
self.unlock()
except:
self.unlock()
raise
return found
class Instrument(Device):
"VXI-11 instrument interface client"
def read_stb(self):
"Read status byte"
if self.link is None:
self.open()
flags = 0
error, stb = self.client.device_read_stb(
self.link,
flags,
self._lock_timeout_ms,
self._timeout_ms
)
if error:
raise Vxi11Exception(error, 'read_stb')
return stb
def remote(self):
"Send remote command"
if self.link is None:
self.open()
flags = 0
error = self.client.device_remote(
self.link,
flags,
self._lock_timeout_ms,
self._timeout_ms
)
if error:
raise Vxi11Exception(error, 'remote')
def local(self):
"Send local command"
if self.link is None:
self.open()
flags = 0
error = self.client.device_local(
self.link,
flags,
self._lock_timeout_ms,
self._timeout_ms
)
if error:
raise Vxi11Exception(error, 'local')
| |
import asyncio
import logging
import re
import urllib
from dataclasses import dataclass, field
from time import sleep
from typing import Dict, Iterable, List, Union
from okta.client import Client as OktaClient
from okta.models import Group, GroupProfile, User, UserProfile, UserStatus
from datahub.configuration import ConfigModel
from datahub.ingestion.api.common import PipelineContext
from datahub.ingestion.api.source import Source, SourceReport
from datahub.ingestion.api.workunit import MetadataWorkUnit
from datahub.metadata.com.linkedin.pegasus2avro.metadata.snapshot import (
CorpGroupSnapshot,
CorpUserSnapshot,
)
from datahub.metadata.com.linkedin.pegasus2avro.mxe import MetadataChangeEvent
from datahub.metadata.schema_classes import ( # GroupMembershipClass,
CorpGroupInfoClass,
CorpUserInfoClass,
GroupMembershipClass,
)
logger = logging.getLogger(__name__)
class OktaConfig(ConfigModel):
# Required: Domain of the Okta deployment. Example: dev-33231928.okta.com
okta_domain = "dev-44231988.okta.com"
# Required: An API token generated from Okta.
okta_api_token = "00be4R_M2MzDqXawbWgfKGpKee0kuEOfX1RCQSRx00"
# Optional: Whether to ingest users, groups, or both.
ingest_users: bool = True
ingest_groups: bool = True
ingest_group_membership: bool = True
# Optional: Customize the mapping to DataHub Username from an attribute appearing in the Okta User
# profile. Reference: https://developer.okta.com/docs/reference/api/users/
okta_profile_to_username_attr: str = "login"
okta_profile_to_username_regex: str = "([^@]+)"
# Optional: Customize the mapping to DataHub Group from an attribute appearing in the Okta Group
# profile. Reference: https://developer.okta.com/docs/reference/api/groups/
okta_profile_to_group_name_attr: str = "name"
okta_profile_to_group_name_regex: str = "(.*)"
# Optional: Include deprovisioned or suspended Okta users in the ingestion.
include_deprovisioned_users = False
include_suspended_users = False
# Optional: Page size for reading groups and users from Okta API.
page_size = 100
# Optional: Set the delay for fetching batches of entities from Okta. Okta has rate limiting in place.
delay_seconds = 0.01
@dataclass
class OktaSourceReport(SourceReport):
filtered: List[str] = field(default_factory=list)
def report_filtered(self, name: str) -> None:
self.filtered.append(name)
#
# Source Connector that extracts Users, Groups, and Group Membership in batch using the Okta Python SDK.
#
# Validated against Okta API Versions:
# - 2021.07.2
#
# Validated against load:
# - User Count: 1000
# - Group Count: 100
# - Group Membership Edges: 1000 (1 per User)
# - Run Time (Wall Clock): 2min 7sec
#
class OktaSource(Source):
"""Ingest Okta Users & Groups into Datahub"""
@classmethod
def create(cls, config_dict, ctx):
config = OktaConfig.parse_obj(config_dict)
return cls(config, ctx)
def __init__(self, config: OktaConfig, ctx: PipelineContext):
super().__init__(ctx)
self.config = config
self.report = OktaSourceReport()
self.okta_client = self._create_okta_client()
def get_workunits(self) -> Iterable[MetadataWorkUnit]:
# Step 1: Produce MetadataWorkUnits for CorpGroups.
if self.config.ingest_groups:
okta_groups = list(self._get_okta_groups())
datahub_corp_group_snapshots = self._map_okta_groups(okta_groups)
for datahub_corp_group_snapshot in datahub_corp_group_snapshots:
mce = MetadataChangeEvent(proposedSnapshot=datahub_corp_group_snapshot)
wu = MetadataWorkUnit(id=datahub_corp_group_snapshot.urn, mce=mce)
self.report.report_workunit(wu)
yield wu
# Step 2: Populate GroupMembership Aspects for CorpUsers
datahub_corp_user_urn_to_group_membership: Dict[str, GroupMembershipClass] = {}
if self.config.ingest_group_membership and okta_groups is not None:
# Fetch membership for each group.
for okta_group in okta_groups:
datahub_corp_group_urn = self._map_okta_group_profile_to_urn(
okta_group.profile
)
if datahub_corp_group_urn is None:
error_str = f"Failed to extract DataHub Group Name from Okta Group: Invalid regex pattern provided or missing profile attribute for group named {okta_group.profile.name}. Skipping..."
logger.error(error_str)
self.report.report_failure("okta_group_mapping", error_str)
continue
# Extract and map users for each group.
okta_group_users = self._get_okta_group_users(okta_group)
for okta_user in okta_group_users:
datahub_corp_user_urn = self._map_okta_user_profile_to_urn(
okta_user.profile
)
if datahub_corp_user_urn is None:
error_str = f"Failed to extract DataHub Username from Okta User: Invalid regex pattern provided or missing profile attribute for User with login {okta_user.profile.login}. Skipping..."
logger.error(error_str)
self.report.report_failure("okta_user_mapping", error_str)
continue
# Either update or create the GroupMembership aspect for this group member.
# TODO: Production of the GroupMembership aspect will overwrite the existing
# group membership for the DataHub user.
if (
datahub_corp_user_urn
in datahub_corp_user_urn_to_group_membership
):
datahub_corp_user_urn_to_group_membership[
datahub_corp_user_urn
].groups.append(datahub_corp_group_urn)
else:
datahub_corp_user_urn_to_group_membership[
datahub_corp_user_urn
] = GroupMembershipClass(groups=[datahub_corp_group_urn])
# Step 3: Produce MetadataWorkUnits for CorpUsers.
if self.config.ingest_users:
okta_users = self._get_okta_users()
filtered_okta_users = filter(self._filter_okta_user, okta_users)
datahub_corp_user_snapshots = self._map_okta_users(filtered_okta_users)
for datahub_corp_user_snapshot in datahub_corp_user_snapshots:
# Add GroupMembership aspect populated in Step 2 if applicable.
if (
datahub_corp_user_snapshot.urn
in datahub_corp_user_urn_to_group_membership
):
datahub_group_membership = (
datahub_corp_user_urn_to_group_membership.get(
datahub_corp_user_snapshot.urn
)
)
assert datahub_group_membership is not None
datahub_corp_user_snapshot.aspects.append(datahub_group_membership)
mce = MetadataChangeEvent(proposedSnapshot=datahub_corp_user_snapshot)
wu = MetadataWorkUnit(id=datahub_corp_user_snapshot.urn, mce=mce)
self.report.report_workunit(wu)
yield wu
def get_report(self):
return self.report
def close(self):
pass
# Instantiates Okta SDK Client.
def _create_okta_client(self):
config = {
"orgUrl": f"https://{self.config.okta_domain}",
"token": f"{self.config.okta_api_token}",
}
return OktaClient(config)
# Retrieves all Okta Group Objects in batches.
def _get_okta_groups(self) -> Iterable[Group]:
# Note that this is not taking full advantage of Python AsyncIO, as we are blocking on calls.
query_parameters = {"limit": self.config.page_size}
groups, resp, err = asyncio.get_event_loop().run_until_complete(
self.okta_client.list_groups(query_parameters)
)
while True:
if err is not None:
self.report.report_failure(
"okta_groups", f"Failed to fetch Groups from Okta API: {err}"
)
if groups is not None:
for group in groups:
yield group
if resp is not None and resp.has_next():
sleep(self.config.delay_seconds)
groups, err = asyncio.get_event_loop().run_until_complete(resp.next())
else:
break
# Retrieves Okta User Objects in a particular Okta Group in batches.
def _get_okta_group_users(self, group: Group) -> Iterable[User]:
# Note that this is not taking full advantage of Python AsyncIO; we are blocking on calls.
query_parameters = {"limit": self.config.page_size}
users, resp, err = asyncio.get_event_loop().run_until_complete(
self.okta_client.list_group_users(group.id, query_parameters)
)
while True:
if err is not None:
self.report.report_failure(
"okta_group_users",
f"Failed to fetch Users of Group {group.profile.name} from Okta API: {err}",
)
if users is not None:
for user in users:
yield user
if resp is not None and resp.has_next():
sleep(self.config.delay_seconds)
users, err = asyncio.get_event_loop().run_until_complete(resp.next())
else:
break
# Retrieves all Okta User Objects in batches.
def _get_okta_users(self) -> Iterable[User]:
query_parameters = {"limit": self.config.page_size}
users, resp, err = asyncio.get_event_loop().run_until_complete(
self.okta_client.list_users(query_parameters)
)
while True:
if err is not None:
self.report.report_failure(
"okta_users", f"Failed to fetch Users from Okta API: {err}"
)
if users is not None:
for user in users:
yield user
if resp is not None and resp.has_next():
sleep(self.config.delay_seconds)
users, err = asyncio.get_event_loop().run_until_complete(resp.next())
else:
break
# Filters Okta User Objects based on provided configuration.
def _filter_okta_user(self, okta_user: User) -> bool:
if (
self.config.include_deprovisioned_users is False
and okta_user.status == UserStatus.DEPROVISIONED
):
return False
elif (
self.config.include_suspended_users is False
and okta_user.status == UserStatus.SUSPENDED
):
return False
return True
# Converts Okta Group Objects into DataHub CorpGroupSnapshots.
def _map_okta_groups(
self, okta_groups: Iterable[Group]
) -> Iterable[CorpGroupSnapshot]:
for okta_group in okta_groups:
corp_group_urn = self._map_okta_group_profile_to_urn(okta_group.profile)
if corp_group_urn is None:
error_str = f"Failed to extract DataHub Group Name from Okta Group: Invalid regex pattern provided or missing profile attribute for group named {okta_group.profile.name}. Skipping..."
logger.error(error_str)
self.report.report_failure("okta_group_mapping", error_str)
continue
corp_group_snapshot = CorpGroupSnapshot(
urn=corp_group_urn,
aspects=[],
)
corp_group_info = self._map_okta_group_profile(okta_group.profile)
corp_group_snapshot.aspects.append(corp_group_info)
yield corp_group_snapshot
# Creates DataHub CorpGroup Urn from Okta Group Object.
def _map_okta_group_profile_to_urn(
self, okta_group_profile: GroupProfile
) -> Union[str, None]:
# Profile is a required field as per https://developer.okta.com/docs/reference/api/groups/#group-attributes
group_name = self._map_okta_group_profile_to_group_name(okta_group_profile)
if group_name is None:
return None
# URL Encode the Group Name to deal with potential spaces.
# TODO: Modeling - Need to figure out a better way to generate a stable identifier for the group.
url_encoded_group_name = urllib.parse.quote(group_name)
return self._make_corp_group_urn(url_encoded_group_name)
# Converts Okta Group Profile Object into a DataHub CorpGroupInfo Aspect.
def _map_okta_group_profile(self, profile: GroupProfile) -> CorpGroupInfoClass:
return CorpGroupInfoClass(
displayName=self._map_okta_group_profile_to_group_name(profile),
description=profile.description,
members=[],
groups=[],
admins=[],
)
# Converts Okta Group Profile Object into a DataHub Group Name.
def _map_okta_group_profile_to_group_name(
self, okta_group_profile: GroupProfile
) -> Union[str, None]:
# Profile is a required field as per https://developer.okta.com/docs/reference/api/groups/#group-attributes
return self._extract_regex_match_from_dict_value(
okta_group_profile.as_dict(),
self.config.okta_profile_to_group_name_attr,
self.config.okta_profile_to_group_name_regex,
)
# Converts Okta User Objects into DataHub CorpUserSnapshots.
def _map_okta_users(self, okta_users: Iterable[User]) -> Iterable[CorpUserSnapshot]:
for okta_user in okta_users:
corp_user_urn = self._map_okta_user_profile_to_urn(okta_user.profile)
if corp_user_urn is None:
error_str = f"Failed to extract DataHub Username from Okta User: Invalid regex pattern provided or missing profile attribute for User with login {okta_user.profile.login}. Skipping..."
logger.error(error_str)
self.report.report_failure("okta_user_mapping", error_str)
continue
corp_user_snapshot = CorpUserSnapshot(
urn=corp_user_urn,
aspects=[],
)
corp_user_info = self._map_okta_user_profile(okta_user.profile)
corp_user_snapshot.aspects.append(corp_user_info)
yield corp_user_snapshot
# Creates DataHub CorpUser Urn from Okta User Profile
def _map_okta_user_profile_to_urn(
self, okta_user_profile: UserProfile
) -> Union[str, None]:
# Profile is a required field as per https://developer.okta.com/docs/reference/api/users/#user-attributes
username = self._map_okta_user_profile_to_username(okta_user_profile)
if username is None:
return None
return self._make_corp_user_urn(username)
# Converts Okta User Profile Object into a DataHub User name.
def _map_okta_user_profile_to_username(
self, okta_user_profile: UserProfile
) -> Union[str, None]:
# Profile is a required field as per https://developer.okta.com/docs/reference/api/users/#user-attributes
return self._extract_regex_match_from_dict_value(
okta_user_profile.as_dict(),
self.config.okta_profile_to_username_attr,
self.config.okta_profile_to_username_regex,
)
# Converts Okta User Profile into a CorpUserInfo.
def _map_okta_user_profile(self, profile: UserProfile) -> CorpUserInfoClass:
# TODO: Extract user's manager if provided.
# Source: https://developer.okta.com/docs/reference/api/users/#default-profile-properties
full_name = f"{profile.firstName} {profile.lastName}"
return CorpUserInfoClass(
active=True,
displayName=profile.displayName
if profile.displayName is not None
else full_name,
firstName=profile.firstName,
lastName=profile.lastName,
fullName=full_name,
email=profile.email,
title=profile.title,
countryCode=profile.countryCode,
departmentName=profile.department,
)
def _make_corp_group_urn(self, name: str) -> str:
return f"urn:li:corpGroup:{name}"
def _make_corp_user_urn(self, username: str) -> str:
return f"urn:li:corpuser:{username}"
def _extract_regex_match_from_dict_value(
self, str_dict: Dict[str, str], key: str, pattern: str
) -> Union[str, None]:
raw_value = str_dict.get(key)
if raw_value is None:
return None
match = re.search(pattern, raw_value)
if match is None:
return None
return match.group()
| |
from __future__ import absolute_import
from __future__ import with_statement
import pickle
import sys
from functools import wraps
from mock import Mock, patch
if sys.version_info >= (3, 0):
from io import StringIO, BytesIO
else:
from StringIO import StringIO, StringIO as BytesIO # noqa
from kombu import utils
from kombu.utils.compat import next
from .utils import (
TestCase,
redirect_stdouts, mask_modules, module_exists, skip_if_module,
)
class OldString(object):
def __init__(self, value):
self.value = value
def __str__(self):
return self.value
def split(self, *args, **kwargs):
return self.value.split(*args, **kwargs)
def rsplit(self, *args, **kwargs):
return self.value.rsplit(*args, **kwargs)
class test_kombu_module(TestCase):
def test_dir(self):
import kombu
self.assertTrue(dir(kombu))
class test_utils(TestCase):
def test_maybe_list(self):
self.assertEqual(utils.maybe_list(None), [])
self.assertEqual(utils.maybe_list(1), [1])
self.assertEqual(utils.maybe_list([1, 2, 3]), [1, 2, 3])
def test_fxrange_no_repeatlast(self):
self.assertEqual(list(utils.fxrange(1.0, 3.0, 1.0)),
[1.0, 2.0, 3.0])
def test_fxrangemax(self):
self.assertEqual(list(utils.fxrangemax(1.0, 3.0, 1.0, 30.0)),
[1.0, 2.0, 3.0, 3.0, 3.0, 3.0,
3.0, 3.0, 3.0, 3.0, 3.0])
self.assertEqual(list(utils.fxrangemax(1.0, None, 1.0, 30.0)),
[1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0])
def test_reprkwargs(self):
self.assertTrue(utils.reprkwargs({'foo': 'bar', 1: 2, u'k': 'v'}))
def test_reprcall(self):
self.assertTrue(
utils.reprcall('add', (2, 2), {'copy': True}),
)
class test_UUID(TestCase):
def test_uuid4(self):
self.assertNotEqual(utils.uuid4(),
utils.uuid4())
def test_uuid(self):
i1 = utils.uuid()
i2 = utils.uuid()
self.assertIsInstance(i1, str)
self.assertNotEqual(i1, i2)
@skip_if_module('__pypy__')
def test_uuid_without_ctypes(self):
old_utils = sys.modules.pop('kombu.utils')
@mask_modules('ctypes')
def with_ctypes_masked():
from kombu.utils import ctypes, uuid
self.assertIsNone(ctypes)
tid = uuid()
self.assertTrue(tid)
self.assertIsInstance(tid, basestring)
try:
with_ctypes_masked()
finally:
sys.modules['celery.utils'] = old_utils
class test_Misc(TestCase):
def test_kwdict(self):
def f(**kwargs):
return kwargs
kw = {u'foo': 'foo',
u'bar': 'bar'}
self.assertTrue(f(**utils.kwdict(kw)))
class MyStringIO(StringIO):
def close(self):
pass
class MyBytesIO(BytesIO):
def close(self):
pass
class test_emergency_dump_state(TestCase):
@redirect_stdouts
def test_dump(self, stdout, stderr):
fh = MyBytesIO()
utils.emergency_dump_state({'foo': 'bar'}, open_file=lambda n, m: fh)
self.assertDictEqual(pickle.loads(fh.getvalue()), {'foo': 'bar'})
self.assertTrue(stderr.getvalue())
self.assertFalse(stdout.getvalue())
@redirect_stdouts
def test_dump_second_strategy(self, stdout, stderr):
fh = MyStringIO()
def raise_something(*args, **kwargs):
raise KeyError('foo')
utils.emergency_dump_state(
{'foo': 'bar'},
open_file=lambda n, m: fh, dump=raise_something,
)
self.assertIn("'foo': 'bar'", fh.getvalue())
self.assertTrue(stderr.getvalue())
self.assertFalse(stdout.getvalue())
def insomnia(fun):
@wraps(fun)
def _inner(*args, **kwargs):
def mysleep(i):
pass
prev_sleep = utils.sleep
utils.sleep = mysleep
try:
return fun(*args, **kwargs)
finally:
utils.sleep = prev_sleep
return _inner
class test_retry_over_time(TestCase):
def setUp(self):
self.index = 0
class Predicate(Exception):
pass
def myfun(self):
if self.index < 9:
raise self.Predicate()
return 42
def errback(self, exc, intervals, retries):
interval = next(intervals)
sleepvals = (None, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 16.0)
self.index += 1
self.assertEqual(interval, sleepvals[self.index])
return interval
@insomnia
def test_simple(self):
prev_count, utils.count = utils.count, Mock()
try:
utils.count.return_value = range(1)
x = utils.retry_over_time(self.myfun, self.Predicate,
errback=None, interval_max=14)
self.assertIsNone(x)
utils.count.return_value = range(10)
cb = Mock()
x = utils.retry_over_time(self.myfun, self.Predicate,
errback=self.errback, callback=cb,
interval_max=14)
self.assertEqual(x, 42)
self.assertEqual(self.index, 9)
cb.assert_called_with()
finally:
utils.count = prev_count
@insomnia
def test_retry_once(self):
self.assertRaises(
self.Predicate, utils.retry_over_time,
self.myfun, self.Predicate,
max_retries=1, errback=self.errback, interval_max=14,
)
self.assertEqual(self.index, 2)
# no errback
self.assertRaises(
self.Predicate, utils.retry_over_time,
self.myfun, self.Predicate,
max_retries=1, errback=None, interval_max=14,
)
@insomnia
def test_retry_never(self):
self.assertRaises(
self.Predicate, utils.retry_over_time,
self.myfun, self.Predicate,
max_retries=0, errback=self.errback, interval_max=14,
)
self.assertEqual(self.index, 1)
class test_cached_property(TestCase):
def test_deleting(self):
class X(object):
xx = False
@utils.cached_property
def foo(self):
return 42
@foo.deleter # noqa
def foo(self, value):
self.xx = value
x = X()
del(x.foo)
self.assertFalse(x.xx)
x.__dict__['foo'] = 'here'
del(x.foo)
self.assertEqual(x.xx, 'here')
def test_when_access_from_class(self):
class X(object):
xx = None
@utils.cached_property
def foo(self):
return 42
@foo.setter # noqa
def foo(self, value):
self.xx = 10
desc = X.__dict__['foo']
self.assertIs(X.foo, desc)
self.assertIs(desc.__get__(None), desc)
self.assertIs(desc.__set__(None, 1), desc)
self.assertIs(desc.__delete__(None), desc)
self.assertTrue(desc.setter(1))
x = X()
x.foo = 30
self.assertEqual(x.xx, 10)
del(x.foo)
class test_symbol_by_name(TestCase):
def test_instance_returns_instance(self):
instance = object()
self.assertIs(utils.symbol_by_name(instance), instance)
def test_returns_default(self):
default = object()
self.assertIs(
utils.symbol_by_name('xyz.ryx.qedoa.weq:foz', default=default),
default,
)
def test_no_default(self):
with self.assertRaises(ImportError):
utils.symbol_by_name('xyz.ryx.qedoa.weq:foz')
def test_imp_reraises_ValueError(self):
imp = Mock()
imp.side_effect = ValueError()
with self.assertRaises(ValueError):
utils.symbol_by_name('kombu.Connection', imp=imp)
def test_package(self):
from kombu.entity import Exchange
self.assertIs(
utils.symbol_by_name('.entity:Exchange', package='kombu'),
Exchange,
)
self.assertTrue(utils.symbol_by_name(':Consumer', package='kombu'))
class test_ChannelPromise(TestCase):
def test_repr(self):
self.assertEqual(
repr(utils.ChannelPromise(lambda: 'foo')),
"<promise: 'foo'>",
)
class test_entrypoints(TestCase):
@mask_modules('pkg_resources')
def test_without_pkg_resources(self):
self.assertListEqual(list(utils.entrypoints('kombu.test')), [])
@module_exists('pkg_resources')
def test_with_pkg_resources(self):
with patch('pkg_resources.iter_entry_points', create=True) as iterep:
eps = iterep.return_value = [Mock(), Mock()]
self.assertTrue(list(utils.entrypoints('kombu.test')))
iterep.assert_called_with('kombu.test')
eps[0].load.assert_called_with()
eps[1].load.assert_called_with()
class test_shufflecycle(TestCase):
def test_shuffles(self):
prev_repeat, utils.repeat = utils.repeat, Mock()
try:
utils.repeat.return_value = range(10)
values = set(['A', 'B', 'C'])
cycle = utils.shufflecycle(values)
seen = set()
for i in xrange(10):
cycle.next()
utils.repeat.assert_called_with(None)
self.assertTrue(seen.issubset(values))
with self.assertRaises(StopIteration):
cycle.next()
cycle.next()
finally:
utils.repeat = prev_repeat
| |
# -*- coding: utf-8 -*-
#
# Copyright 2017-2021 Tencent Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
from datetime import datetime
import hashlib
import json
import random
import sys
import time
import uuid
import warnings
import logging
import logging.handlers
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
import tencentcloud
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.exception import TencentCloudSDKException as SDKError
from tencentcloud.common.http.request import ApiRequest
from tencentcloud.common.http.request import RequestInternal
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.common.sign import Sign
warnings.filterwarnings("ignore")
_json_content = 'application/json'
_multipart_content = 'multipart/form-data'
_form_urlencoded_content = 'application/x-www-form-urlencoded'
_octet_stream = "application/octet-stream"
class EmptyHandler(logging.Handler):
def emit(self, message):
pass
LOGGER_NAME = "tencentcloud_sdk_common"
logger = logging.getLogger(LOGGER_NAME)
logger.addHandler(EmptyHandler())
class AbstractClient(object):
_requestPath = '/'
_params = {}
_apiVersion = ''
_endpoint = ''
_service = ''
_sdkVersion = 'SDK_PYTHON_%s' % tencentcloud.__version__
_default_content_type = _form_urlencoded_content
FMT = '%(asctime)s %(process)d %(filename)s L%(lineno)s %(levelname)s %(message)s'
def __init__(self, credential, region, profile=None):
if credential is None:
raise TencentCloudSDKException(
"InvalidCredential", "Credential is None or invalid")
self.credential = credential
self.region = region
self.profile = ClientProfile() if profile is None else profile
is_http = True if self.profile.httpProfile.scheme == "http" else False
self.request = ApiRequest(self._get_endpoint(),
req_timeout=self.profile.httpProfile.reqTimeout,
proxy=self.profile.httpProfile.proxy,
is_http=is_http,
certification=self.profile.httpProfile.certification)
if self.profile.httpProfile.keepAlive:
self.request.set_keep_alive()
def _fix_params(self, params):
if not isinstance(params, (dict,)):
return params
return self._format_params(None, params)
def _format_params(self, prefix, params):
d = {}
if params is None:
return d
if not isinstance(params, (tuple, list, dict)):
d[prefix] = params
return d
if isinstance(params, (list, tuple)):
for idx, item in enumerate(params):
if prefix:
key = "{0}.{1}".format(prefix, idx)
else:
key = "{0}".format(idx)
d.update(self._format_params(key, item))
return d
if isinstance(params, dict):
for k, v in params.items():
if prefix:
key = '{0}.{1}'.format(prefix, k)
else:
key = '{0}'.format(k)
d.update(self._format_params(key, v))
return d
raise TencentCloudSDKException("ClientParamsError", "some params type error")
def _build_req_inter(self, action, params, req_inter, options=None):
options = options or {}
if self.profile.signMethod == "TC3-HMAC-SHA256" or options.get("IsMultipart") is True:
self._build_req_with_tc3_signature(action, params, req_inter, options)
elif self.profile.signMethod in ("HmacSHA1", "HmacSHA256"):
self._build_req_with_old_signature(action, params, req_inter)
else:
raise TencentCloudSDKException("ClientError", "Invalid signature method.")
def _build_req_with_old_signature(self, action, params, req):
params = copy.deepcopy(self._fix_params(params))
params['Action'] = action[0].upper() + action[1:]
params['RequestClient'] = self._sdkVersion
params['Nonce'] = random.randint(1, sys.maxsize)
params['Timestamp'] = int(time.time())
params['Version'] = self._apiVersion
if self.region:
params['Region'] = self.region
if self.credential.token:
params['Token'] = self.credential.token
if self.credential.secret_id:
params['SecretId'] = self.credential.secret_id
if self.profile.signMethod:
params['SignatureMethod'] = self.profile.signMethod
if self.profile.language:
params['Language'] = self.profile.language
signInParam = self._format_sign_string(params)
params['Signature'] = Sign.sign(str(self.credential.secret_key),
str(signInParam),
str(self.profile.signMethod))
req.data = urlencode(params)
req.header["Content-Type"] = "application/x-www-form-urlencoded"
def _build_req_with_tc3_signature(self, action, params, req, options=None):
content_type = self._default_content_type
if req.method == 'GET':
content_type = _form_urlencoded_content
elif req.method == 'POST':
content_type = _json_content
options = options or {}
if options.get("IsMultipart"):
content_type = _multipart_content
if options.get("IsOctetStream"):
content_type = _octet_stream
req.header["Content-Type"] = content_type
if req.method == "GET" and content_type == _multipart_content:
raise SDKError("ClientError",
"Invalid request method GET for multipart.")
endpoint = self._get_endpoint()
timestamp = int(time.time())
req.header["Host"] = endpoint
req.header["X-TC-Action"] = action[0].upper() + action[1:]
req.header["X-TC-RequestClient"] = self._sdkVersion
req.header["X-TC-Timestamp"] = str(timestamp)
req.header["X-TC-Version"] = self._apiVersion
if self.profile.unsignedPayload is True:
req.header["X-TC-Content-SHA256"] = "UNSIGNED-PAYLOAD"
if self.region:
req.header['X-TC-Region'] = self.region
if self.credential.token:
req.header['X-TC-Token'] = self.credential.token
if self.profile.language:
req.header['X-TC-Language'] = self.profile.language
if req.method == 'GET':
params = copy.deepcopy(self._fix_params(params))
req.data = urlencode(params)
elif content_type == _json_content:
req.data = json.dumps(params)
elif content_type == _multipart_content:
boundary = uuid.uuid4().hex
req.header["Content-Type"] = content_type + "; boundary=" + boundary
req.data = self._get_multipart_body(params, boundary, options)
service = endpoint.split('.')[0]
date = datetime.utcfromtimestamp(timestamp).strftime('%Y-%m-%d')
signature = self._get_tc3_signature(params, req, date, service, options)
auth = "TC3-HMAC-SHA256 Credential=%s/%s/%s/tc3_request, SignedHeaders=content-type;host, Signature=%s" % (
self.credential.secret_id, date, service, signature)
req.header["Authorization"] = auth
def _get_tc3_signature(self, params, req, date, service, options=None):
options = options or {}
canonical_uri = req.uri
canonical_querystring = ""
payload = req.data
if req.method == 'GET':
canonical_querystring = req.data
payload = ""
if req.header.get("X-TC-Content-SHA256") == "UNSIGNED-PAYLOAD":
payload = "UNSIGNED-PAYLOAD"
if sys.version_info[0] == 3 and isinstance(payload, type("")):
payload = payload.encode("utf8")
payload_hash = hashlib.sha256(payload).hexdigest()
canonical_headers = 'content-type:%s\nhost:%s\n' % (
req.header["Content-Type"], req.header["Host"])
signed_headers = 'content-type;host'
canonical_request = '%s\n%s\n%s\n%s\n%s\n%s' % (req.method,
canonical_uri,
canonical_querystring,
canonical_headers,
signed_headers,
payload_hash)
algorithm = 'TC3-HMAC-SHA256'
credential_scope = date + '/' + service + '/tc3_request'
if sys.version_info[0] == 3:
canonical_request = canonical_request.encode("utf8")
digest = hashlib.sha256(canonical_request).hexdigest()
string2sign = '%s\n%s\n%s\n%s' % (algorithm,
req.header["X-TC-Timestamp"],
credential_scope,
digest)
return Sign.sign_tc3(self.credential.secret_key, date, service, string2sign)
# it must return bytes instead of string
def _get_multipart_body(self, params, boundary, options=None):
if options is None:
options = {}
# boundary and params key will never contain unicode characters
boundary = boundary.encode()
binparas = options.get("BinaryParams", [])
body = b''
for k, v in params.items():
kbytes = k.encode()
body += b'--%s\r\n' % boundary
body += b'Content-Disposition: form-data; name="%s"' % kbytes
if k in binparas:
body += b'; filename="%s"\r\n' % kbytes
else:
body += b"\r\n"
if isinstance(v, list) or isinstance(v, dict):
v = json.dumps(v)
body += b'Content-Type: application/json\r\n'
if sys.version_info[0] == 3 and isinstance(v, type("")):
v = v.encode()
body += b'\r\n%s\r\n' % v
if body != b'':
body += b'--%s--\r\n' % boundary
return body
def _check_status(self, resp_inter):
if resp_inter.status != 200:
raise TencentCloudSDKException("ServerNetworkError", resp_inter.data)
def _format_sign_string(self, params):
formatParam = {}
for k in params:
formatParam[k.replace('_', '.')] = params[k]
strParam = '&'.join('%s=%s' % (k, formatParam[k]) for k in sorted(formatParam))
msg = '%s%s%s?%s' % (self.profile.httpProfile.reqMethod, self._get_endpoint(), self._requestPath, strParam)
return msg
def _get_service_domain(self):
rootDomain = self.profile.httpProfile.rootDomain
return self._service + "." + rootDomain
def _get_endpoint(self):
endpoint = self.profile.httpProfile.endpoint
if endpoint is None:
endpoint = self._get_service_domain()
return endpoint
def call(self, action, params, options=None):
req = RequestInternal(self._get_endpoint(),
self.profile.httpProfile.reqMethod,
self._requestPath)
self._build_req_inter(action, params, req, options)
resp_inter = self.request.send_request(req)
self._check_status(resp_inter)
data = resp_inter.data
return data
def call_octet_stream(self, action, headers, body):
"""
Invoke API with application/ocet-stream content-type.
Note:
1. only specific API can be invoked in such manner.
2. only TC3-HMAC-SHA256 signature method can be specified.
3. only POST request method can be specified
:type action: str
:param action: Specific API action name.
:type headers: dict
:param headers: Header parameters for this API.
:type body: bytes
:param body: Bytes of requested body
"""
if self.profile.signMethod != "TC3-HMAC-SHA256":
raise SDKError("ClientError", "Invalid signature method.")
if self.profile.httpProfile.reqMethod != "POST":
raise SDKError("ClientError", "Invalid request method.")
req = RequestInternal(self._get_endpoint(),
self.profile.httpProfile.reqMethod,
self._requestPath)
for key in headers:
req.header[key] = headers[key]
req.data = body
options = {"IsOctetStream": True}
self._build_req_inter(action, None, req, options)
resp = self.request.send_request(req)
self._check_status(resp)
data = resp.data
json_rsp = json.loads(data)
if "Error" in json_rsp["Response"]:
code = json_rsp["Response"]["Error"]["Code"]
message = json_rsp["Response"]["Error"]["Message"]
reqid = json_rsp["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
return json_rsp
def call_json(self, action, params):
"""
Call api with json object and return with json object.
:type action: str
:param action: api name e.g. ``DescribeInstances``
:type params: dict
:param params: params with this action
"""
body = self.call(action, params)
response = json.loads(body)
if "Error" not in response["Response"]:
return response
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
def set_stream_logger(self, stream=None, level=logging.DEBUG, log_format=None):
"""
Add a stream handler
:type stream: IO[str]
:param stream: e.g. ``sys.stdout`` ``sys.stdin`` ``sys.stderr``
:type level: int
:param level: Logging level, e.g. ``logging.INFO``
:type log_format: str
:param log_format: Log message format
"""
log = logging.getLogger(LOGGER_NAME)
log.setLevel(level)
sh = logging.StreamHandler(stream)
sh.setLevel(level)
if log_format is None:
log_format = self.FMT
formatter = logging.Formatter(log_format)
sh.setFormatter(formatter)
log.addHandler(sh)
def set_file_logger(self, file_path, level=logging.DEBUG, log_format=None):
"""
Add a file handler
:type file_path: str
:param file_path: path of log file
:type level: int
:param level: Logging level, e.g. ``logging.INFO``
:type log_format: str
:param log_format: Log message format
"""
log = logging.getLogger(LOGGER_NAME)
log.setLevel(level)
mb = 1024 * 1024
fh = logging.handlers.RotatingFileHandler(file_path, maxBytes=512*mb, backupCount=10)
fh.setLevel(level)
if log_format is None:
log_format = self.FMT
formatter = logging.Formatter(log_format)
fh.setFormatter(formatter)
log.addHandler(fh)
def set_default_logger(self):
"""
Set default log handler
"""
log = logging.getLogger(LOGGER_NAME)
log.handlers = []
logger.addHandler(EmptyHandler())
| |
import numpy as np
import pytest
import tensorflow as tf
from numpy.testing import assert_almost_equal
from tensorflow.python.framework.errors import InvalidArgumentError
from gtd.ml.seq_batch import SequenceBatch, FeedSequenceBatch, reduce_mean, reduce_max, reduce_sum
from gtd.ml.utils import clean_session
from gtd.ml.vocab import SimpleVocab
from gtd.tests.ml.test_framework import FeedableTester, assert_array_collections_equal, clean_test_session
from gtd.tests.ml.test_model import VocabExample
class TestSequenceBatch(object):
def test(self):
values = tf.constant([
[1, -8, 5],
[0, 2, 7],
[2, -8, 6],
], dtype=tf.float32)
float_mask = tf.constant([
[1, 1, 1],
[0, 0, 1],
[1, 1, 0],
], dtype=tf.float32)
bool_mask = tf.constant([
[True, True, True],
[False, False, True],
[True, True, False],
], dtype=tf.bool)
ninf = float('-inf')
correct = np.array([
[1, -8, 5],
[ninf, ninf, 7],
[2, -8, ninf],
], dtype=np.float32)
seq_batch0 = SequenceBatch(values, float_mask)
seq_batch1 = SequenceBatch(values, bool_mask)
with tf.Session():
assert_almost_equal(seq_batch0.with_pad_value(ninf).values.eval(), correct)
assert_almost_equal(seq_batch1.with_pad_value(ninf).values.eval(), correct)
class TestFeedSequenceBatch(FeedableTester):
@pytest.fixture
def model(self):
return FeedSequenceBatch(align='left')
@pytest.fixture
def inputs(self):
tokens = u'<unk> a b c'.split()
unk = '<unk>'
vocab = VocabExample(tokens, unk)
sequences = [
u'a a b b c'.split(),
u'a b'.split(),
[u'b'],
[u'c'],
]
return self.as_args_kwargs(sequences, vocab)
@pytest.fixture
def feed_dict(self, model):
indices = np.array([
[1, 1, 2, 2, 3],
[1, 2, 0, 0, 0],
[2, 0, 0, 0, 0],
[3, 0, 0, 0, 0],
], dtype=np.int32)
mask = np.array([
[1, 1, 1, 1, 1],
[1, 1, 0, 0, 0],
[1, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
], dtype=np.float32)
return {model.values: indices, model.mask: mask}
def test_outputs(self):
pass # trivial to test placeholders
def test_right_align(self, inputs):
indices = np.array([
[1, 1, 2, 2, 3],
[0, 0, 0, 1, 2],
[0, 0, 0, 0, 2],
[0, 0, 0, 0, 3],
], dtype=np.int32)
mask = np.array([
[1, 1, 1, 1, 1],
[0, 0, 0, 1, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
], dtype=np.float32)
with clean_session():
model = FeedSequenceBatch(align='right')
correct = {model.values: indices, model.mask: mask}
args, kwargs = inputs
test = model.inputs_to_feed_dict(*args, **kwargs)
assert_array_collections_equal(correct, test)
def test_seq_length(self):
tokens = u'<unk> a b c'.split()
unk = '<unk>'
vocab = VocabExample(tokens, unk)
sequences = [
u'a b a b c'.split(), # more than length 4
u'a b'.split(),
[u'b'],
[u'c'],
]
indices = np.array([
[2, 1, 2, 3],
[0, 0, 1, 2],
[0, 0, 0, 2],
[0, 0, 0, 3],
], dtype=np.int32)
mask = np.array([
[1, 1, 1, 1],
[0, 0, 1, 1],
[0, 0, 0, 1],
[0, 0, 0, 1],
], dtype=np.float32)
with clean_session():
model = FeedSequenceBatch(align='right', seq_length=4)
test_feed = model.inputs_to_feed_dict(sequences, vocab)
correct = {model.values: indices, model.mask: mask}
assert_array_collections_equal(correct, test_feed)
indices = tf.identity(model.values)
mask = tf.identity(model.mask)
assert indices.get_shape().as_list() == [None, 4]
assert mask.get_shape().as_list() == [None, 4]
def test_no_sequences(self):
vocab = SimpleVocab(u'a b c'.split())
sequences = []
with clean_session():
model = FeedSequenceBatch()
indices = tf.identity(model.values)
mask = tf.identity(model.mask)
indices_val, mask_val = model.compute([indices, mask], sequences, vocab)
assert indices_val.shape == mask_val.shape == (0, 0)
class TestReduceMean(object):
def test_multidim(self):
npa = lambda arr: np.array(arr, dtype=np.float32)
correct = npa([
npa([4, 7, 10]) / 2,
npa([8, 14, 20]) / 3,
npa([13, 16, 19]) / 3,
])
with clean_session():
array = tf.constant([[[1., 2., 3.],
[3., 5., 7.],
[0., 0., 0.]],
[[2., 4., 6.],
[3., 5., 7.],
[3., 5., 7.]],
[[9., 9., 9.],
[3., 5., 7.],
[1., 2., 3.]]], dtype=tf.float32)
mask = tf.constant([
[1, 1, 0],
[1, 1, 1],
[1, 1, 1],
], dtype=tf.float32)
bm = reduce_mean(SequenceBatch(array, mask))
assert_almost_equal(bm.eval(), correct, decimal=5)
def test_batch_mean(self):
correct = np.array([-2. / 3, 1., 21. / 4])
with clean_session():
array = tf.constant([
[1, -8, 5, 4, 9],
[0, 2, 7, 8, 1],
[2, -8, 6, 4, 9],
], dtype=tf.float32)
mask = tf.constant([
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0],
[1, 0, 1, 1, 1],
], dtype=tf.float32)
bad_mask = tf.constant([
[1, 1, 1, 0, 0],
[0, 0, 0, 0, 0],
[1, 0, 1, 1, 1],
], dtype=tf.float32)
bm = reduce_mean(SequenceBatch(array, mask))
assert_almost_equal(bm.eval(), correct, decimal=5)
bm2 = reduce_mean(SequenceBatch(array, bad_mask))
with pytest.raises(InvalidArgumentError):
bm2.eval()
# try allow_empty option
bm3 = reduce_mean(SequenceBatch(array, bad_mask), allow_empty=True)
assert_almost_equal(bm3.eval(), np.array([-2. / 3, 0., 21. / 4]))
def test_empty(self):
with clean_session():
array = tf.constant(np.empty((0, 10, 20)))
mask = tf.constant(np.empty((0, 10)))
bm = reduce_mean(SequenceBatch(array, mask))
assert bm.eval().shape == (0, 20)
class TestReduceMax(object):
def test(self):
npa = lambda arr: np.array(arr, dtype=np.float32)
correct = npa([
npa([3, 5, 7]),
npa([3, 5, 7]),
npa([9, 9, 9]),
])
with clean_session():
array = tf.constant([[[1., 2., 3.],
[3., 5., 7.],
[100., 200., 2000.]],
[[2., 4., 6.],
[3., 5., 7.],
[3., 5., 7.]],
[[9., 9., 9.],
[3., 5., 7.],
[1., 2., 3.]]], dtype=tf.float32)
mask = tf.constant([
[1, 1, 0],
[1, 1, 1],
[1, 1, 1],
], dtype=tf.float32)
bm = reduce_max(SequenceBatch(array, mask))
assert_almost_equal(bm.eval(), correct, decimal=5)
bad_mask = tf.constant([
[0, 0, 0],
[1, 1, 1],
[1, 1, 1],
], dtype=tf.float32)
bm2 = reduce_mean(SequenceBatch(array, bad_mask))
with pytest.raises(InvalidArgumentError):
bm2.eval()
class TestReduceSum(object):
def test(self):
correct = np.array([-2, 2, 21])
with clean_session():
array = tf.constant([
[1, -8, 5, 4, 9],
[0, 2, 7, 8, 1],
[2, -8, 6, 4, 9],
], dtype=tf.float32)
mask = tf.constant([
[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0],
[1, 0, 1, 1, 1],
], dtype=tf.float32)
result = reduce_sum(SequenceBatch(array, mask))
assert_almost_equal(result.eval(), correct, decimal=5)
| |
# coding: utf-8
#
# Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import strutils
import wsme
from wsme import types as wtypes
from magnum.common import exception
from magnum.common import utils
from magnum.i18n import _
class MacAddressType(wtypes.UserType):
"""A simple MAC address type."""
basetype = wtypes.text
name = 'macaddress'
# FIXME(lucasagomes): When used with wsexpose decorator WSME will try
# to get the name of the type by accessing it's __name__ attribute.
# Remove this __name__ attribute once it's fixed in WSME.
# https://bugs.launchpad.net/wsme/+bug/1265590
__name__ = name
@staticmethod
def validate(value):
return utils.validate_and_normalize_mac(value)
@staticmethod
def frombasetype(value):
if value is None:
return None
return MacAddressType.validate(value)
class NameType(wtypes.UserType):
"""A logical name type."""
basetype = wtypes.text
name = 'name'
# FIXME(lucasagomes): When used with wsexpose decorator WSME will try
# to get the name of the type by accessing it's __name__ attribute.
# Remove this __name__ attribute once it's fixed in WSME.
# https://bugs.launchpad.net/wsme/+bug/1265590
__name__ = name
@staticmethod
def validate(value):
if not utils.is_name_safe(value):
raise exception.InvalidName(name=value)
return value
@staticmethod
def frombasetype(value):
if value is None:
return None
return NameType.validate(value)
class UuidType(wtypes.UserType):
"""A simple UUID type."""
basetype = wtypes.text
name = 'uuid'
# FIXME(lucasagomes): When used with wsexpose decorator WSME will try
# to get the name of the type by accessing it's __name__ attribute.
# Remove this __name__ attribute once it's fixed in WSME.
# https://bugs.launchpad.net/wsme/+bug/1265590
__name__ = name
@staticmethod
def validate(value):
if not utils.is_uuid_like(value):
raise exception.InvalidUUID(uuid=value)
return value
@staticmethod
def frombasetype(value):
if value is None:
return None
return UuidType.validate(value)
class BooleanType(wtypes.UserType):
"""A simple boolean type."""
basetype = wtypes.text
name = 'boolean'
# FIXME(lucasagomes): When used with wsexpose decorator WSME will try
# to get the name of the type by accessing it's __name__ attribute.
# Remove this __name__ attribute once it's fixed in WSME.
# https://bugs.launchpad.net/wsme/+bug/1265590
__name__ = name
@staticmethod
def validate(value):
try:
return strutils.bool_from_string(value, strict=True)
except ValueError as e:
# raise Invalid to return 400 (BadRequest) in the API
raise exception.Invalid(e)
@staticmethod
def frombasetype(value):
if value is None:
return None
return BooleanType.validate(value)
class MultiType(wtypes.UserType):
"""A complex type that represents one or more types.
Used for validating that a value is an instance of one of the types.
:param types: Variable-length list of types.
"""
basetype = wtypes.text
def __init__(self, *types):
self.types = types
def __str__(self):
return ' | '.join(map(str, self.types))
def validate(self, value):
for t in self.types:
try:
return wtypes.validate_value(t, value)
except (exception.InvalidUUID, ValueError):
pass
else:
raise ValueError(
_("Wrong type. Expected '%(type)s', got '%(value)s'")
% {'type': self.types, 'value': type(value)})
macaddress = MacAddressType()
uuid = UuidType()
name = NameType()
uuid_or_name = MultiType(UuidType, NameType)
boolean = BooleanType()
class JsonPatchType(wtypes.Base):
"""A complex type that represents a single json-patch operation."""
path = wtypes.wsattr(wtypes.StringType(pattern='^(/[\w-]+)+$'),
mandatory=True)
op = wtypes.wsattr(wtypes.Enum(str, 'add', 'replace', 'remove'),
mandatory=True)
value = MultiType(wtypes.text, int)
@staticmethod
def internal_attrs():
"""Returns a list of internal attributes.
Internal attributes can't be added, replaced or removed. This
method may be overwritten by derived class.
"""
return ['/created_at', '/id', '/links', '/updated_at', '/uuid']
@staticmethod
def mandatory_attrs():
"""Retruns a list of mandatory attributes.
Mandatory attributes can't be removed from the document. This
method should be overwritten by derived class.
"""
return []
@staticmethod
def validate(patch):
if patch.path in patch.internal_attrs():
msg = _("'%s' is an internal attribute and can not be updated")
raise wsme.exc.ClientSideError(msg % patch.path)
if patch.path in patch.mandatory_attrs() and patch.op == 'remove':
msg = _("'%s' is a mandatory attribute and can not be removed")
raise wsme.exc.ClientSideError(msg % patch.path)
if patch.op != 'remove':
if not patch.value:
msg = _("'add' and 'replace' operations needs value")
raise wsme.exc.ClientSideError(msg)
ret = {'path': patch.path, 'op': patch.op}
if patch.value:
ret['value'] = patch.value
return ret
| |
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2020 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
from __future__ import absolute_import, unicode_literals
import multiprocessing
import os
import android.adb.commands
from swift_build_support.swift_build_support import targets
from swift_build_support.swift_build_support.targets import \
StdlibDeploymentTarget
from . import argparse
from . import defaults
__all__ = [
'create_argument_parser',
]
class _ApplyDefaultsArgumentParser(argparse.ArgumentParser):
"""Wrapper class around the default ArgumentParser that allows for
post-processing the parsed argument namespace to apply default argument
transformations.
"""
def __init__(self, apply_defaults=None, *args, **kwargs):
self._apply_defaults = apply_defaults
super(_ApplyDefaultsArgumentParser, self).__init__(*args, **kwargs)
def parse_known_args(self, args=None, namespace=None):
args, argv = super(_ApplyDefaultsArgumentParser, self)\
.parse_known_args(args, namespace)
self._apply_defaults(args)
return args, argv
def _apply_default_arguments(args):
"""Preprocess argument namespace to apply default behaviors.
"""
# Build cmark if any cmark-related options were specified.
if (args.cmark_build_variant is not None):
args.build_cmark = True
# Build LLDB if any LLDB-related options were specified.
if args.lldb_build_variant is not None or \
args.lldb_assertions is not None or \
args.lldb_build_with_xcode is not None:
args.build_lldb = True
# Set the default build variant.
if args.build_variant is None:
args.build_variant = 'Debug'
if args.llvm_build_variant is None:
args.llvm_build_variant = args.build_variant
if args.swift_build_variant is None:
args.swift_build_variant = args.build_variant
if args.swift_stdlib_build_variant is None:
args.swift_stdlib_build_variant = args.build_variant
if args.cmark_build_variant is None:
args.cmark_build_variant = args.swift_build_variant
if args.lldb_build_variant is None:
args.lldb_build_variant = args.build_variant
if args.lldb_build_with_xcode is None:
args.lldb_build_with_xcode = '0'
if args.foundation_build_variant is None:
args.foundation_build_variant = args.build_variant
if args.libdispatch_build_variant is None:
args.libdispatch_build_variant = args.build_variant
if args.libicu_build_variant is None:
args.libicu_build_variant = args.build_variant
# Assertions are enabled by default.
if args.assertions is None:
args.assertions = True
# Propagate the default assertions setting.
if args.cmark_assertions is None:
args.cmark_assertions = args.assertions
if args.llvm_assertions is None:
args.llvm_assertions = args.assertions
if args.swift_assertions is None:
args.swift_assertions = args.assertions
if args.swift_stdlib_assertions is None:
args.swift_stdlib_assertions = args.assertions
if args.llbuild_assertions is None:
args.llbuild_assertions = args.assertions
if args.lldb_assertions is None:
args.lldb_assertions = args.assertions
# Set the default CMake generator.
if args.cmake_generator is None:
args.cmake_generator = 'Ninja'
# --ios-all etc are not supported by open-source Swift.
if args.ios_all:
raise ValueError('error: --ios-all is unavailable in open-source '
'Swift.\nUse --ios to skip iOS device tests.')
if args.tvos_all:
raise ValueError('error: --tvos-all is unavailable in open-source '
'Swift.\nUse --tvos to skip tvOS device tests.')
if args.watchos_all:
raise ValueError('error: --watchos-all is unavailable in open-source '
'Swift.\nUse --watchos to skip watchOS device tests.')
# --skip-{ios,tvos,watchos} or --skip-build-{ios,tvos,watchos} are
# merely shorthands for --skip-build-{**os}-{device,simulator}
if not args.ios or not args.build_ios:
args.build_ios_device = False
args.build_ios_simulator = False
if not args.tvos or not args.build_tvos:
args.build_tvos_device = False
args.build_tvos_simulator = False
if not args.watchos or not args.build_watchos:
args.build_watchos_device = False
args.build_watchos_simulator = False
if not args.android or not args.build_android:
args.build_android = False
# --test-paths implies --test and/or --validation-test
# depending on what directories/files have been specified.
if args.test_paths:
for path in args.test_paths:
if path.startswith('test'):
args.test = True
elif path.startswith('validation-test'):
args.test = True
args.validation_test = True
# --validation-test implies --test.
if args.validation_test:
args.test = True
# --test-optimized implies --test.
if args.test_optimized:
args.test = True
# --test-optimize-size implies --test.
if args.test_optimize_for_size:
args.test = True
# --test-optimize-none-with-implicit-dynamic implies --test.
if args.test_optimize_none_with_implicit_dynamic:
args.test = True
# If none of tests specified skip swift stdlib test on all platforms
if not args.test and not args.validation_test and not args.long_test:
args.test_linux = False
args.test_freebsd = False
args.test_cygwin = False
args.test_osx = False
args.test_ios = False
args.test_tvos = False
args.test_watchos = False
args.test_android = False
args.test_cmark = False
args.test_swiftpm = False
args.test_swift_driver = False
args.test_swiftsyntax = False
args.test_indexstoredb = False
args.test_sourcekitlsp = False
args.test_skstresstester = False
args.test_swiftformat = False
args.test_swiftevolve = False
args.test_toolchainbenchmarks = False
# --test implies --test-early-swift-driver
# (unless explicitly skipped with `--skip-test-early-swift-driver`)
if args.test and (args.build_early_swift_driver and
args.test_early_swift_driver is None):
args.test_early_swift_driver = True
# --skip-test-ios is merely a shorthand for host and simulator tests.
if not args.test_ios:
args.test_ios_host = False
args.test_ios_simulator = False
# --skip-test-tvos is merely a shorthand for host and simulator tests.
if not args.test_tvos:
args.test_tvos_host = False
args.test_tvos_simulator = False
# --skip-test-watchos is merely a shorthand for host and simulator
# --tests.
if not args.test_watchos:
args.test_watchos_host = False
args.test_watchos_simulator = False
# --skip-build-{ios,tvos,watchos}-{device,simulator} implies
# --skip-test-{ios,tvos,watchos}-{host,simulator}
if not args.build_ios_device:
args.test_ios_host = False
if not args.build_ios_simulator:
args.test_ios_simulator = False
if not args.build_tvos_device:
args.test_tvos_host = False
if not args.build_tvos_simulator:
args.test_tvos_simulator = False
if not args.build_watchos_device:
args.test_watchos_host = False
if not args.build_watchos_simulator:
args.test_watchos_simulator = False
if not args.build_android:
# If building natively on an Android host, allow running the test suite
# without the NDK config.
if not StdlibDeploymentTarget.Android.contains(StdlibDeploymentTarget
.host_target().name):
args.test_android = False
args.test_android_host = False
if not args.test_android:
args.test_android_host = False
if not args.host_test:
args.test_ios_host = False
args.test_tvos_host = False
args.test_watchos_host = False
args.test_android_host = False
def create_argument_parser():
"""Return a configured argument parser."""
# NOTE: USAGE, DESCRIPTION and EPILOG are defined at the bottom of the file
parser = _ApplyDefaultsArgumentParser(
apply_defaults=_apply_default_arguments,
formatter_class=argparse.RawDescriptionHelpFormatter,
usage=USAGE,
description=DESCRIPTION,
epilog=EPILOG)
builder = parser.to_builder()
# Prepare DSL functions
option = builder.add_option
set_defaults = builder.set_defaults
in_group = builder.in_group
mutually_exclusive_group = builder.mutually_exclusive_group
# Prepare DSL actions
append = builder.actions.append
store = builder.actions.store
store_true = builder.actions.store_true
store_false = builder.actions.store_false
store_int = builder.actions.store_int
store_path = builder.actions.store_path
toggle_true = builder.actions.toggle_true
toggle_false = builder.actions.toggle_false
unsupported = builder.actions.unsupported
# -------------------------------------------------------------------------
# Top-level options
option(['-n', '--dry-run'], store_true,
help='print the commands that would be executed, but do not '
'execute them')
option('--dump-config', toggle_true,
help='instead of building, write JSON to stdout containing '
'various values used to build in this configuration')
option(['--reconfigure'], store_true,
help="Reconfigure all projects as we build")
option('--legacy-impl', store_true('legacy_impl'),
help='use legacy implementation')
option('--build-runtime-with-host-compiler', toggle_true,
help='Use the host compiler, not the self-built one to compile the '
'Swift runtime')
option(['-i', '--ios'], store_true,
help='also build for iOS, but disallow tests that require an iOS '
'device')
option(['-I', '--ios-all'], store_true('ios_all'),
help='also build for iOS, and allow all iOS tests')
option(['--skip-local-build'], toggle_true('skip_local_build'),
help='set to skip building for the local platform')
option('--skip-ios', store_false('ios'),
help='set to skip everything iOS-related')
option('--tvos', toggle_true,
help='also build for tvOS, but disallow tests that require a tvos '
'device')
option('--tvos-all', toggle_true('tvos_all'),
help='also build for tvOS, and allow all tvOS tests')
option('--skip-tvos', store_false('tvos'),
help='set to skip everything tvOS-related')
option('--watchos', toggle_true,
help='also build for watchOS, but disallow tests that require an '
'watchOS device')
option('--watchos-all', toggle_true('watchos_all'),
help='also build for Apple watchOS, and allow all Apple watchOS '
'tests')
option('--skip-watchos', store_false('watchos'),
help='set to skip everything watchOS-related')
option('--maccatalyst', toggle_true,
help='Enable building Swift with macCatalyst support')
option('--maccatalyst-ios-tests', toggle_true,
help='When building for macCatalyst run tests with iOS-like '
'target triple')
option('--android', toggle_true,
help='also build for Android')
option('--swift-analyze-code-coverage', store,
choices=['false', 'not-merged', 'merged'],
# so CMake can see the inert mode as a false value
default=defaults.SWIFT_ANALYZE_CODE_COVERAGE,
help='enable code coverage analysis in Swift (false, not-merged, '
'merged).')
option('--swift-disable-dead-stripping', toggle_true,
help="Turn off Darwin-specific dead stripping for Swift host tools")
option('--build-subdir', store,
metavar='PATH',
help='name of the directory under $SWIFT_BUILD_ROOT where the '
'build products will be placed')
option('--relocate-xdg-cache-home-under-build-subdir',
store_true,
help='relocate $XDG_CACHE_HOME to the same location '
'where build products will be placed; '
'this supports having multiple runs for different branches '
'in CI bots for Linux')
option('--install-prefix', store_path,
default=targets.install_prefix(),
help='The installation prefix. This is where built Swift products '
'(like bin, lib, and include) will be installed.')
option('--install-symroot', store_path,
help='the path to install debug symbols into')
option('--install-destdir', store_path,
help='the path to use as the filesystem root for the installation')
option('--install-all', toggle_true,
help='Assume all built products should be installed')
option(['-j', '--jobs'], store_int('build_jobs'),
default=multiprocessing.cpu_count(),
help='the number of parallel build jobs to use')
option('--darwin-xcrun-toolchain', store,
help='the name of the toolchain to use on Darwin')
option('--cmake', store_path(executable=True),
help='the path to a CMake executable that will be used to build '
'Swift')
option('--show-sdks', toggle_true,
help='print installed Xcode and SDK versions')
option('--extra-swift-args', append,
help='Pass through extra flags to swift in the form of a CMake '
'list "module_regexp;flag". Can be called multiple times to '
'add multiple such module_regexp flag pairs. All semicolons '
'in flags must be escaped with a "\\"')
option('--host-cc', store_path(executable=True),
help='the absolute path to CC, the "clang" compiler for the host '
'platform. Default is auto detected.')
option('--host-cxx', store_path(executable=True),
help='the absolute path to CXX, the "clang++" compiler for the '
'host platform. Default is auto detected.')
option('--native-swift-tools-path', store_path,
help='the path to a directory that contains prebuilt Swift tools '
'that are executable on the host platform')
option('--native-clang-tools-path', store_path,
help='the path to a directory that contains prebuilt Clang tools '
'that are executable on the host platform')
option('--native-llvm-tools-path', store_path,
help='the path to a directory that contains prebuilt LLVM tools '
'that are executable on the host platform')
option('--cmake-c-launcher', store_path(executable=True),
default=os.environ.get('C_COMPILER_LAUNCHER', None),
help='the absolute path to set CMAKE_C_COMPILER_LAUNCHER')
option('--cmake-cxx-launcher', store_path(executable=True),
default=os.environ.get('CXX_COMPILER_LAUNCHER', None),
help='the absolute path to set CMAKE_CXX_COMPILER_LAUNCHER')
option('--host-lipo', store_path(executable=True),
help='the absolute path to lipo. Default is auto detected.')
option('--host-libtool', store_path(executable=True),
help='the absolute path to libtool. Default is auto detected.')
option('--distcc', toggle_true,
default=os.environ.get('USE_DISTCC') == '1',
help='use distcc in pump mode')
option('--sccache', toggle_true,
default=os.environ.get('SWIFT_USE_SCCACHE') == '1',
help='use sccache')
option('--enable-asan', toggle_true,
help='enable Address Sanitizer')
option('--enable-ubsan', toggle_true,
help='enable Undefined Behavior Sanitizer')
option('--enable-tsan', toggle_true,
help='enable Thread Sanitizer for swift tools')
option('--enable-tsan-runtime', toggle_true,
help='enable Thread Sanitizer on the swift runtime')
option('--enable-lsan', toggle_true,
help='enable Leak Sanitizer for swift tools')
option('--enable-sanitize-coverage', toggle_true,
help='enable sanitizer coverage for swift tools. Necessary for '
'fuzzing swiftc')
option('--compiler-vendor', store,
choices=['none', 'apple'],
default=defaults.COMPILER_VENDOR,
help='Compiler vendor name')
option('--clang-compiler-version', store,
type=argparse.ClangVersionType(),
metavar='MAJOR.MINOR.PATCH',
help='string that indicates a compiler version for Clang')
option('--clang-user-visible-version', store,
type=argparse.ClangVersionType(),
default=defaults.CLANG_USER_VISIBLE_VERSION,
metavar='MAJOR.MINOR.PATCH',
help='User-visible version of the embedded Clang and LLVM '
'compilers')
option('--swift-compiler-version', store,
type=argparse.SwiftVersionType(),
metavar='MAJOR.MINOR',
help='string that indicates a compiler version for Swift')
option('--swift-user-visible-version', store,
type=argparse.SwiftVersionType(),
default=defaults.SWIFT_USER_VISIBLE_VERSION,
metavar='MAJOR.MINOR',
help='User-visible version of the embedded Swift compiler')
option('--darwin-deployment-version-osx', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_OSX,
metavar='MAJOR.MINOR',
help='minimum deployment target version for OS X')
option('--darwin-deployment-version-ios', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_IOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for iOS')
option('--darwin-deployment-version-tvos', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_TVOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for tvOS')
option('--darwin-deployment-version-watchos', store,
default=defaults.DARWIN_DEPLOYMENT_VERSION_WATCHOS,
metavar='MAJOR.MINOR',
help='minimum deployment target version for watchOS')
option('--extra-cmake-options', append,
type=argparse.ShellSplitType(),
help='Pass through extra options to CMake in the form of comma '
'separated options "-DCMAKE_VAR1=YES,-DCMAKE_VAR2=/tmp". Can '
'be called multiple times to add multiple such options.')
option('--build-args', store,
type=argparse.ShellSplitType(),
default=[],
help='arguments to the build tool. This would be prepended to the '
'default argument that is "-j8" when CMake generator is '
'"Ninja".')
option('--verbose-build', toggle_true,
help='print the commands executed during the build')
option('--lto', store('lto_type'),
choices=['thin', 'full'],
const='full',
default=None,
metavar='LTO_TYPE',
help='use lto optimization on llvm/swift tools. This does not '
'imply using lto on the swift standard library or runtime. '
'Options: thin, full. If no optional arg is provided, full is '
'chosen by default')
option('--clang-profile-instr-use', store_path,
help='profile file to use for clang PGO')
option('--llvm-max-parallel-lto-link-jobs', store_int,
default=defaults.LLVM_MAX_PARALLEL_LTO_LINK_JOBS,
metavar='COUNT',
help='the maximum number of parallel link jobs to use when '
'compiling llvm')
option('--swift-tools-max-parallel-lto-link-jobs', store_int,
default=defaults.SWIFT_MAX_PARALLEL_LTO_LINK_JOBS,
metavar='COUNT',
help='the maximum number of parallel link jobs to use when '
'compiling swift tools.')
option('--dsymutil-jobs', store_int,
default=defaults.DSYMUTIL_JOBS,
metavar='COUNT',
help='the maximum number of parallel dsymutil jobs to use when '
'extracting symbols. Tweak with caution, since dsymutil'
'is memory intensive.')
option('--disable-guaranteed-normal-arguments', store_true,
help='Disable guaranteed normal arguments')
option('--enable-stdlibcore-exclusivity-checking', store_true,
help='Enable exclusivity checking in stdlibCore')
option('--force-optimized-typechecker', store_true,
help='Force the type checker to be built with '
'optimization')
option('--lit-args', store,
default='-sv',
metavar='LITARGS',
help='lit args to use when testing')
option('--coverage-db', store_path,
help='coverage database to use when prioritizing testing')
option('--llvm-install-components', store,
default=defaults.llvm_install_components(),
help='A semi-colon split list of llvm components to install')
# -------------------------------------------------------------------------
in_group('Host and cross-compilation targets')
option('--host-target', store,
default=StdlibDeploymentTarget.host_target().name,
help='The host target. LLVM, Clang, and Swift will be built for '
'this target. The built LLVM and Clang will be used to '
'compile Swift for the cross-compilation targets.')
option('--cross-compile-hosts', append,
type=argparse.ShellSplitType(),
default=[],
help='A space separated list of targets to cross-compile host '
'Swift tools for. Can be used multiple times.')
option('--stdlib-deployment-targets', store,
type=argparse.ShellSplitType(),
default=None,
help='The targets to compile or cross-compile the Swift standard '
'library for. %(default)s by default.'
' Comma separated list: {}'.format(
' '.join(StdlibDeploymentTarget.get_target_names())))
option('--build-stdlib-deployment-targets', store,
type=argparse.ShellSplitType(),
default=['all'],
help='A space-separated list that filters which of the configured '
'targets to build the Swift standard library for, or "all".')
option('--swift-darwin-supported-archs', store,
metavar='ARCHS',
help='Semicolon-separated list of architectures to configure on '
'Darwin platforms. If left empty all default architectures '
'are configured.')
option('--swift-darwin-module-archs', store,
metavar='ARCHS',
help='Semicolon-separated list of architectures to configure Swift '
'module-only targets on Darwin platforms. These targets are '
'in addition to the full library targets.')
# -------------------------------------------------------------------------
in_group('Options to select projects')
option('--infer', toggle_true('infer_dependencies'),
help='Infer any downstream dependencies from enabled projects')
option(['-l', '--lldb'], toggle_true('build_lldb'),
help='build LLDB')
option(['-b', '--llbuild'], toggle_true('build_llbuild'),
help='build llbuild')
option(['--back-deploy-concurrency'], toggle_true('build_backdeployconcurrency'),
help='build back-deployment support for concurrency')
option(['--install-back-deploy-concurrency'],
toggle_true('install_backdeployconcurrency'),
help='install back-deployment support libraries for concurrency')
option(['--libcxx'], toggle_true('build_libcxx'),
help='build libcxx')
option(['-p', '--swiftpm'], toggle_true('build_swiftpm'),
help='build swiftpm')
option(['--install-swiftpm'], toggle_true('install_swiftpm'),
help='install swiftpm')
option(['--swiftsyntax'], toggle_true('build_swiftsyntax'),
help='build swiftSyntax')
option(['--skstresstester'], toggle_true('build_skstresstester'),
help='build the SourceKit stress tester')
option(['--swiftformat'], toggle_true('build_swiftformat'),
help='build swift-format')
option(['--swiftevolve'], toggle_true('build_swiftevolve'),
help='build the swift-evolve tool')
option(['--swift-driver'], toggle_true('build_swift_driver'),
help='build swift-driver')
option(['--skip-early-swift-driver'], toggle_false('build_early_swift_driver'),
help='skip building the early swift-driver')
option(['--indexstore-db'], toggle_true('build_indexstoredb'),
help='build IndexStoreDB')
option('--test-indexstore-db-sanitize-all',
toggle_true('test_indexstoredb_sanitize_all'),
help='run indexstore-db tests under all sanitizers')
option(['--sourcekit-lsp'], toggle_true('build_sourcekitlsp'),
help='build SourceKitLSP')
option('--test-sourcekit-lsp-sanitize-all',
toggle_true('test_sourcekitlsp_sanitize_all'),
help='run sourcekit-lsp tests under all sanitizers')
option('--install-swiftsyntax', toggle_true('install_swiftsyntax'),
help='install SwiftSyntax')
option('--swiftsyntax-verify-generated-files',
toggle_true('swiftsyntax_verify_generated_files'),
help='set to verify that the generated files in the source tree '
'match the ones that would be generated from current main')
option(['--install-sourcekit-lsp'], toggle_true('install_sourcekitlsp'),
help='install SourceKitLSP')
option(['--install-skstresstester'], toggle_true('install_skstresstester'),
help='install the SourceKit stress tester')
option(['--install-swift-driver'], toggle_true('install_swift_driver'),
help='install new Swift driver')
option(['--install-swiftevolve'], toggle_true('install_swiftevolve'),
help='install SwiftEvolve')
option(['--toolchain-benchmarks'],
toggle_true('build_toolchainbenchmarks'),
help='build Swift Benchmarks using swiftpm against the just built '
'toolchain')
option(['--swift-inspect'],
toggle_true('build_swift_inspect'),
help='build SwiftInspect using swiftpm against the just built '
'toolchain')
option('--xctest', toggle_true('build_xctest'),
help='build xctest')
option('--foundation', toggle_true('build_foundation'),
help='build foundation')
option('--libdispatch', toggle_true('build_libdispatch'),
help='build libdispatch')
option('--libicu', toggle_true('build_libicu'),
help='build libicu')
option('--playgroundsupport', toggle_true('build_playgroundsupport'),
help='build PlaygroundSupport')
option('--install-playgroundsupport',
toggle_true('install_playgroundsupport'),
help='install playground support')
option('--build-ninja', toggle_true,
help='build the Ninja tool')
option(['--build-libparser-only'], toggle_true('build_libparser_only'),
help='build only libParser for SwiftSyntax')
option('--skip-build-clang-tools-extra',
toggle_false('build_clang_tools_extra'),
default=True,
help='skip building clang-tools-extra as part of llvm')
# -------------------------------------------------------------------------
in_group('Extra actions to perform before or in addition to building')
option(['-c', '--clean'], store_true,
help='do a clean build')
option('--export-compile-commands', toggle_true,
help='generate compilation databases in addition to building')
option('--symbols-package', store_path,
help='if provided, an archive of the symbols directory will be '
'generated at this path')
option('--darwin-symroot-path-filters', append,
type=argparse.ShellSplitType(),
help='Space separated list of patterns used to match '
'a subset of files to generate symbols for. '
'Only supported on Darwin. Can be called multiple times '
'to add multiple such options.')
# -------------------------------------------------------------------------
in_group('Build variant')
with mutually_exclusive_group():
set_defaults(build_variant='Debug')
option(['-d', '--debug'], store('build_variant'),
const='Debug',
help='build the Debug variant of everything (LLVM, Clang, '
'Swift host tools, target Swift standard libraries, LLDB) '
'(default is %(default)s)')
option(['-r', '--release-debuginfo'], store('build_variant'),
const='RelWithDebInfo',
help='build the RelWithDebInfo variant of everything (default '
'is %(default)s)')
option(['-R', '--release'], store('build_variant'),
const='Release',
help='build the Release variant of everything (default is '
'%(default)s)')
option(['--min-size-release'], store('build_variant'),
const='MinSizeRel',
help='build the MinSizeRel variant of everything (default is '
'%(default)s)')
# -------------------------------------------------------------------------
in_group('Override build variant for a specific project')
option('--debug-llvm', store('llvm_build_variant'),
const='Debug',
help='build the Debug variant of LLVM')
option('--debug-swift', store('swift_build_variant'),
const='Debug',
help='build the Debug variant of Swift host tools')
option('--debug-swift-stdlib', store('swift_stdlib_build_variant'),
const='Debug',
help='build the Debug variant of the Swift standard library and '
' SDK overlay')
option('--debug-lldb', store('lldb_build_variant'),
const='Debug',
help='build the Debug variant of LLDB')
option('--lldb-build-with-xcode', store('lldb_build_with_xcode'),
const='1',
help='build LLDB using xcodebuild, if possible')
option('--lldb-build-with-cmake', store('lldb_build_with_xcode'),
const='0',
help='build LLDB using CMake')
option('--debug-cmark', store('cmark_build_variant'),
const='Debug',
help='build the Debug variant of CommonMark')
option('--debug-foundation', store('foundation_build_variant'),
const='Debug',
help='build the Debug variant of Foundation')
option('--debug-libdispatch', store('libdispatch_build_variant'),
const='Debug',
help='build the Debug variant of libdispatch')
option('--debug-libicu', store('libicu_build_variant'),
const='Debug',
help='build the Debug variant of libicu')
# -------------------------------------------------------------------------
# Assertions group
with mutually_exclusive_group():
set_defaults(assertions=True)
# TODO: Convert to store_true
option(['-a', '--assertions'], store,
const=True,
help='enable assertions in all projects')
# TODO: Convert to store_false
option(['-A', '--no-assertions'], store('assertions'),
const=False,
help='disable assertions in all projects')
# -------------------------------------------------------------------------
in_group('Control assertions in a specific project')
option('--cmark-assertions', store,
const=True,
help='enable assertions in CommonMark')
option('--llvm-assertions', store,
const=True,
help='enable assertions in LLVM')
option('--no-llvm-assertions', store('llvm_assertions'),
const=False,
help='disable assertions in LLVM')
option('--swift-assertions', store,
const=True,
help='enable assertions in Swift')
option('--no-swift-assertions', store('swift_assertions'),
const=False,
help='disable assertions in Swift')
option('--swift-stdlib-assertions', store,
const=True,
help='enable assertions in the Swift standard library')
option('--no-swift-stdlib-assertions', store('swift_stdlib_assertions'),
const=False,
help='disable assertions in the Swift standard library')
option('--lldb-assertions', store,
const=True,
help='enable assertions in LLDB')
option('--no-lldb-assertions', store('lldb_assertions'),
const=False,
help='disable assertions in LLDB')
option('--llbuild-assertions', store,
const=True,
help='enable assertions in llbuild')
option('--no-llbuild-assertions', store('llbuild_assertions'),
const=False,
help='disable assertions in llbuild')
# -------------------------------------------------------------------------
in_group('Select the CMake generator')
set_defaults(cmake_generator=defaults.CMAKE_GENERATOR)
option(['-e', '--eclipse'], store('cmake_generator'),
const='Eclipse CDT4 - Ninja',
help="use CMake's Eclipse generator (%(default)s by default)")
option(['-m', '--make'], store('cmake_generator'),
const='Unix Makefiles',
help="use CMake's Makefile generator (%(default)s by default)")
option(['-x', '--xcode'], store('cmake_generator'),
const='Xcode',
help="use CMake's Xcode generator (%(default)s by default)")
# -------------------------------------------------------------------------
in_group('Run tests')
# NOTE: We can't merge -t and --test, because nargs='?' makes
# `-ti` to be treated as `-t=i`.
# FIXME: Convert to store_true action
option('-t', store('test', const=True),
help='test Swift after building')
option('--test', toggle_true,
help='test Swift after building')
option('-T', store('validation_test', const=True),
help='run the validation test suite (implies --test)')
option('--validation-test', toggle_true,
help='run the validation test suite (implies --test)')
# FIXME: Convert to store_true action
option('-o', store('test_optimized', const=True),
help='run the test suite in optimized mode too (implies --test)')
option('--test-optimized', toggle_true,
help='run the test suite in optimized mode too (implies --test)')
# FIXME: Convert to store_true action
option('-s', store('test_optimize_for_size', const=True),
help='run the test suite in optimize for size mode too '
'(implies --test)')
option('--test-optimize-for-size', toggle_true,
help='run the test suite in optimize for size mode too '
'(implies --test)')
# FIXME: Convert to store_true action
option('-y', store('test_optimize_none_with_implicit_dynamic', const=True),
help='run the test suite in optimize none with implicit dynamic'
' mode too (implies --test)')
option('--test-optimize-none-with-implicit-dynamic', toggle_true,
help='run the test suite in optimize none with implicit dynamic'
'mode too (implies --test)')
option('--long-test', toggle_true,
help='run the long test suite')
option('--stress-test', toggle_true,
help='run the stress test suite')
option('--host-test', toggle_true,
help='run executable tests on host devices (such as iOS or tvOS)')
option('--only-executable-test', toggle_true,
help='Only run executable tests. Does nothing if host-test is not '
'allowed')
option('--only-non-executable-test', toggle_true,
help='Only run non-executable tests.')
option('--test-paths', append,
type=argparse.ShellSplitType(),
help='run tests located in specific directories and/or files '
'(implies --test and/or --validation-test)')
option(['-B', '--benchmark'], store_true,
help='run the Swift Benchmark Suite after building')
option('--benchmark-num-o-iterations', store_int,
default=3,
help='if the Swift Benchmark Suite is run after building, run N '
'iterations with -O')
option('--benchmark-num-onone-iterations', store_int,
default=3,
help='if the Swift Benchmark Suite is run after building, run N '
'iterations with -Onone')
# We want to run the TSan (compiler-rt) libdispatch tests on Linux, where
# libdispatch is just another library and not available by default. To do
# so we build Clang/LLVM/libdispatch and use it to compile/run the TSan
# libdispatch tests.
option('--tsan-libdispatch-test', toggle_true,
help='Builds a new toolchain including the libdispatch C library. '
'Then re-builds the TSan runtime (compiler-rt) using this '
'freshly-built Clang and runs the TSan libdispatch tests.')
option('--skip-test-osx', toggle_false('test_osx'),
help='skip testing Swift stdlibs for Mac OS X')
option('--skip-test-linux', toggle_false('test_linux'),
help='skip testing Swift stdlibs for Linux')
option('--skip-test-freebsd', toggle_false('test_freebsd'),
help='skip testing Swift stdlibs for FreeBSD')
option('--skip-test-cygwin', toggle_false('test_cygwin'),
help='skip testing Swift stdlibs for Cygwin')
# -------------------------------------------------------------------------
in_group('Run build')
option('--build-swift-dynamic-stdlib', toggle_true,
default=True,
help='build dynamic variants of the Swift standard library')
option('--build-swift-static-stdlib', toggle_true,
help='build static variants of the Swift standard library')
option('--build-swift-dynamic-sdk-overlay', toggle_true,
default=True,
help='build dynamic variants of the Swift SDK overlay')
option('--build-swift-static-sdk-overlay', toggle_true,
help='build static variants of the Swift SDK overlay')
option('--build-swift-stdlib-unittest-extra', toggle_true,
help='Build optional StdlibUnittest components')
option(['-S', '--skip-build'], store_true,
help='generate build directory only without building')
option('--skip-build-linux', toggle_false('build_linux'),
help='skip building Swift stdlibs for Linux')
option('--skip-build-freebsd', toggle_false('build_freebsd'),
help='skip building Swift stdlibs for FreeBSD')
option('--skip-build-cygwin', toggle_false('build_cygwin'),
help='skip building Swift stdlibs for Cygwin')
option('--skip-build-osx', toggle_false('build_osx'),
help='skip building Swift stdlibs for MacOSX')
option('--skip-build-ios', toggle_false('build_ios'),
help='skip building Swift stdlibs for iOS')
option('--skip-build-ios-device', toggle_false('build_ios_device'),
help='skip building Swift stdlibs for iOS devices '
'(i.e. build simulators only)')
option('--skip-build-ios-simulator', toggle_false('build_ios_simulator'),
help='skip building Swift stdlibs for iOS simulator '
'(i.e. build devices only)')
option('--skip-build-tvos', toggle_false('build_tvos'),
help='skip building Swift stdlibs for tvOS')
option('--skip-build-tvos-device', toggle_false('build_tvos_device'),
help='skip building Swift stdlibs for tvOS devices '
'(i.e. build simulators only)')
option('--skip-build-tvos-simulator', toggle_false('build_tvos_simulator'),
help='skip building Swift stdlibs for tvOS simulator '
'(i.e. build devices only)')
option('--skip-build-watchos', toggle_false('build_watchos'),
help='skip building Swift stdlibs for watchOS')
option('--skip-build-watchos-device', toggle_false('build_watchos_device'),
help='skip building Swift stdlibs for watchOS devices '
'(i.e. build simulators only)')
option('--skip-build-watchos-simulator',
toggle_false('build_watchos_simulator'),
help='skip building Swift stdlibs for watchOS simulator '
'(i.e. build devices only)')
option('--skip-build-android', toggle_false('build_android'),
help='skip building Swift stdlibs for Android')
option('--skip-build-benchmarks', toggle_false('build_benchmarks'),
help='skip building Swift Benchmark Suite')
option('--build-external-benchmarks', toggle_true,
help='skip building Swift Benchmark Suite')
# -------------------------------------------------------------------------
in_group('Skip testing specified targets')
option('--skip-test-ios',
toggle_false('test_ios'),
help='skip testing all iOS targets. Equivalent to specifying both '
'--skip-test-ios-simulator and --skip-test-ios-host')
option('--skip-test-ios-simulator',
toggle_false('test_ios_simulator'),
help='skip testing iOS simulator targets')
option('--skip-test-ios-32bit-simulator',
toggle_false('test_ios_32bit_simulator'),
default=False,
help='skip testing iOS 32 bit simulator targets')
option('--skip-test-watchos-32bit-simulator',
toggle_false('test_watchos_32bit_simulator'),
default=True,
help='skip testing watchOS 32 bit simulator targets')
option('--skip-test-ios-host',
toggle_false('test_ios_host'),
help='skip testing iOS device targets on the host machine (the '
'phone itself)')
option('--skip-test-tvos',
toggle_false('test_tvos'),
help='skip testing all tvOS targets. Equivalent to specifying both '
'--skip-test-tvos-simulator and --skip-test-tvos-host')
option('--skip-test-tvos-simulator',
toggle_false('test_tvos_simulator'),
help='skip testing tvOS simulator targets')
option('--skip-test-tvos-host',
toggle_false('test_tvos_host'),
help='skip testing tvOS device targets on the host machine (the '
'TV itself)')
option('--skip-test-watchos',
toggle_false('test_watchos'),
help='skip testing all tvOS targets. Equivalent to specifying both '
'--skip-test-watchos-simulator and --skip-test-watchos-host')
option('--skip-test-watchos-simulator',
toggle_false('test_watchos_simulator'),
help='skip testing watchOS simulator targets')
option('--skip-test-watchos-host',
toggle_false('test_watchos_host'),
help='skip testing watchOS device targets on the host machine (the '
'watch itself)')
option('--skip-test-android',
toggle_false('test_android'),
help='skip testing all Android targets.')
option('--skip-test-android-host',
toggle_false('test_android_host'),
help='skip testing Android device targets on the host machine (the '
'phone itself)')
option('--skip-clean-llbuild', toggle_false('clean_llbuild'),
help='skip cleaning up llbuild')
option('--clean-early-swift-driver', toggle_true('clean_early_swift_driver'),
help='Clean up the early SwiftDriver')
option('--skip-test-early-swift-driver',
store('test_early_swift_driver', const=False),
help='Test the early SwiftDriver against the host toolchain')
option('--skip-clean-swiftpm', toggle_false('clean_swiftpm'),
help='skip cleaning up swiftpm')
option('--skip-clean-swift-driver', toggle_false('clean_swift_driver'),
help='skip cleaning up Swift driver')
option('--skip-test-cmark', toggle_false('test_cmark'),
help='skip testing cmark')
option('--skip-test-swiftpm', toggle_false('test_swiftpm'),
help='skip testing swiftpm')
option('--skip-test-swift-driver', toggle_false('test_swift_driver'),
help='skip testing Swift driver')
option('--skip-test-swiftsyntax', toggle_false('test_swiftsyntax'),
help='skip testing SwiftSyntax')
option('--skip-test-indexstore-db', toggle_false('test_indexstoredb'),
help='skip testing indexstore-db')
option('--skip-test-sourcekit-lsp', toggle_false('test_sourcekitlsp'),
help='skip testing sourcekit-lsp')
option('--skip-test-playgroundsupport',
toggle_false('test_playgroundsupport'),
help='skip testing PlaygroundSupport')
option('--skip-test-skstresstester', toggle_false('test_skstresstester'),
help='skip testing the SourceKit Stress tester')
option('--skip-test-swiftformat', toggle_false('test_swiftformat'),
help='skip testing swift-format')
option('--skip-test-swiftevolve', toggle_false('test_swiftevolve'),
help='skip testing SwiftEvolve')
option('--skip-test-toolchain-benchmarks',
toggle_false('test_toolchainbenchmarks'),
help='skip testing toolchain benchmarks')
option('--skip-test-swift-inspect',
toggle_false('test_swift_inspect'),
help='skip testing swift_inspect')
# -------------------------------------------------------------------------
in_group('Build settings specific for LLVM')
option('--llvm-targets-to-build', store,
default='X86;ARM;AArch64;PowerPC;SystemZ;Mips',
help='LLVM target generators to build')
option('--llvm-ninja-targets', append,
type=argparse.ShellSplitType(),
help='Space separated list of ninja targets to build for LLVM '
'instead of the default ones. Only supported when using '
'ninja to build. Can be called multiple times '
'to add multiple such options.')
option('--llvm-ninja-targets-for-cross-compile-hosts', append,
type=argparse.ShellSplitType(),
help='Space separated list of ninja targets to build for LLVM '
'in cross compile hosts instead of the ones specified in '
'llvm-ninja-targets (or the default ones). '
'Can be called multiple times '
'to add multiple such options.')
# -------------------------------------------------------------------------
in_group('Build settings for Android')
option('--android-ndk', store_path,
help='An absolute path to the NDK that will be used as a libc '
'implementation for Android builds')
option('--android-api-level', store,
default='21',
help='The Android API level to target when building for Android. '
'Currently only 21 or above is supported')
option('--android-ndk-gcc-version', store,
choices=['4.8', '4.9'],
default='4.9',
help='The GCC version to use when building for Android. Currently '
'only 4.9 is supported. %(default)s is also the default '
'value. This option may be used when experimenting with '
'versions of the Android NDK not officially supported by '
'Swift')
option('--android-icu-uc', store_path,
help='Path to libicuuc.so')
option('--android-icu-uc-include', store_path,
help='Path to a directory containing headers for libicuuc')
option('--android-icu-i18n', store_path,
help='Path to libicui18n.so')
option('--android-icu-i18n-include', store_path,
help='Path to a directory containing headers libicui18n')
option('--android-icu-data', store_path,
help='Path to libicudata.so')
option('--android-deploy-device-path', store_path,
default=android.adb.commands.DEVICE_TEMP_DIR,
help='Path on an Android device to which built Swift stdlib '
'products will be deployed. If running host tests, specify '
'the "{}" directory.'.format(
android.adb.commands.DEVICE_TEMP_DIR))
option('--android-arch', store,
choices=['armv7', 'aarch64', 'x86_64'],
default='armv7',
help='The target architecture when building for Android. '
'Currently, only armv7, aarch64, and x86_64 are supported. '
'%(default)s is the default.')
# -------------------------------------------------------------------------
in_group('Experimental language features')
option('--enable-experimental-differentiable-programming', toggle_true,
default=True,
help='Enable experimental Swift differentiable programming language'
' features.')
option('--enable-experimental-concurrency', toggle_true,
default=True,
help='Enable experimental Swift concurrency model.')
option('--enable-experimental-distributed', toggle_true,
default=True,
help='Enable experimental Swift distributed actors.')
# -------------------------------------------------------------------------
in_group('Unsupported options')
option('--build-jobs', unsupported)
option('--common-cmake-options', unsupported)
option('--only-execute', unsupported)
option('--skip-test-optimize-for-size', unsupported)
option('--skip-test-optimize-none-with-implicit-dynamic', unsupported)
option('--skip-test-optimized', unsupported)
# -------------------------------------------------------------------------
in_group('Build-script-impl arguments (for disambiguation)')
# We need to represent these options so that we can skip installing them if
# the user is running in install-all mode.
option('--skip-build-cmark', toggle_false('build_cmark'),
help='skip building cmark')
option('--skip-build-llvm', toggle_false('build_llvm'),
help='skip building llvm')
option('--skip-build-swift', toggle_false('build_swift'),
help='skip building swift')
# We need to list --skip-test-swift explicitly because otherwise argparse
# will auto-expand arguments like --skip-test-swift to the only known
# argument --skip-test-swiftevolve.
# These arguments are forwarded to impl_args in migration.py
option('--install-swift', toggle_true('impl_install_swift'))
option('--skip-test-swift', toggle_true('impl_skip_test_swift'))
# -------------------------------------------------------------------------
return builder.build()
# ----------------------------------------------------------------------------
USAGE = """
%(prog)s [-h | --help] [OPTION ...]
%(prog)s --preset=NAME [SUBSTITUTION ...]
"""
DESCRIPTION = """
Use this tool to build, test, and prepare binary distribution archives of Swift
and related tools.
Builds Swift (and, optionally, LLDB), incrementally, optionally
testing it thereafter. Different build configurations are maintained in
parallel.
"""
EPILOG = """
Using option presets:
--preset-file=PATH load presets from the specified file
--preset=NAME use the specified option preset
The preset mode is mutually exclusive with other options. It is not
possible to add ad-hoc customizations to a preset. This is a deliberate
design decision. (Rationale: a preset is a certain important set of
options that we want to keep in a centralized location. If you need to
customize it, you should create another preset in a centralized location,
rather than scattering the knowledge about the build across the system.)
Presets support substitutions for controlled customizations. Substitutions
are defined in the preset file. Values for substitutions are supplied
using the name=value syntax on the command line.
Any arguments not listed are forwarded directly to Swift's
'build-script-impl'. See that script's help for details. The listed
build-script-impl arguments are only for disambiguation in the argument parser.
Environment variables
---------------------
This script respects a few environment variables, should you
choose to set them:
SWIFT_SOURCE_ROOT: a directory containing the source for LLVM, Clang, Swift.
If this script is located in a Swift
source directory, the location of SWIFT_SOURCE_ROOT will be
inferred if the variable is not set.
'build-script' expects the sources to be laid out in the following way:
$SWIFT_SOURCE_ROOT/llvm-project
/swift
/llbuild (optional)
/swiftpm (optional, requires llbuild)
/swift-syntax (optional, requires swiftpm)
/swift-stress-tester (optional,
requires swift-syntax)
/swift-corelibs-xctest (optional)
/swift-corelibs-foundation (optional)
/swift-corelibs-libdispatch (optional)
/icu (optional)
SWIFT_BUILD_ROOT: a directory in which to create out-of-tree builds.
Defaults to "$SWIFT_SOURCE_ROOT/build/".
Preparing to run this script
----------------------------
See README.md for instructions on cloning Swift subprojects.
If you intend to use the -l, -L, --lldb, or --debug-lldb options.
That's it; you're ready to go!
Examples
--------
Given the above layout of sources, the simplest invocation of 'build-script' is
just:
[~/src/s]$ ./swift/utils/build-script
This builds LLVM, Clang, Swift and Swift standard library in debug mode.
All builds are incremental. To incrementally build changed files, repeat the
same 'build-script' command.
Typical uses of 'build-script'
------------------------------
To build everything with optimization without debug information:
[~/src/s]$ ./swift/utils/build-script -R
To run tests, add '-t':
[~/src/s]$ ./swift/utils/build-script -R -t
To run normal tests and validation tests, add '-T':
[~/src/s]$ ./swift/utils/build-script -R -T
To build LLVM+Clang with optimization without debug information, and a
debuggable Swift compiler:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift
To build a debuggable Swift standard library:
[~/src/s]$ ./swift/utils/build-script -R --debug-swift-stdlib
iOS build targets are always configured and present, but are not built by
default. To build the standard library for OS X, iOS simulator and iOS device:
[~/src/s]$ ./swift/utils/build-script -R -i
To run OS X and iOS tests that don't require a device:
[~/src/s]$ ./swift/utils/build-script -R -i -t
To use 'make' instead of 'ninja', use '-m':
[~/src/s]$ ./swift/utils/build-script -m -R
To create Xcode projects that can build Swift, use '-x':
[~/src/s]$ ./swift/utils/build-script -x -R
Preset mode in build-script
---------------------------
All buildbots and automated environments use 'build-script' in *preset mode*.
In preset mode, the command line only specifies the preset name and allows
limited customization (extra output paths). The actual options come from
the selected preset in 'utils/build-presets.ini'. For example, to build like
the incremental buildbot, run:
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_incremental
To build with AddressSanitizer:
[~/src/s]$ ./swift/utils/build-script --preset=asan
To build a root for Xcode XYZ, '/tmp/xcode-xyz-root.tar.gz':
[~/src/s]$ ./swift/utils/build-script --preset=buildbot_BNI_internal_XYZ \\
install_destdir="/tmp/install"
install_symroot="/tmp/symroot"
installable_package="/tmp/xcode-xyz-root.tar.gz"
If you have your own favorite set of options, you can create your own, local,
preset. For example, let's create a preset called 'ds' (which stands for
Debug Swift):
$ cat > ~/.swift-build-presets
[preset: ds]
release
debug-swift
debug-swift-stdlib
test
build-subdir=ds
To use it, specify the '--preset=' argument:
[~/src/s]$ ./swift/utils/build-script --preset=ds
./swift/utils/build-script: using preset 'ds', which expands to
./swift/utils/build-script --release --debug-swift --debug-swift-stdlib \
--test
--build-subdir=ds --
...
Existing presets can be found in `utils/build-presets.ini`
Philosophy
----------
While you can invoke CMake directly to build Swift, this tool will save you
time by taking away the mechanical parts of the process, providing you controls
for the important options.
For all automated build environments, this tool is regarded as *the* *only* way
to build Swift. This is not a technical limitation of the Swift build system.
It is a policy decision aimed at making the builds uniform across all
environments and easily reproducible by engineers who are not familiar with the
details of the setups of other systems or automated environments.
"""
| |
#!/usr/bin/python
# coding: utf8
"""
Contains classes related to Range semantic
"""
import ast
from print_trace import PrintTrace
from range_operator import operators
from range_operator import comparison_sign
from range_operator import opposite_comparison
from data_structure import VariableRangeValue
from lexical_scope_symbol_table import LexicalScopeSymbolTable
class ExtractRangeSemantic(PrintTrace):
"""
Keeps track of operations made on symbols per scope and updates the range according
to the semantic of the operations.
"""
all_variable_id = list()
vector_point = list()
scope_symbol_table = LexicalScopeSymbolTable()
id_node = 1
def __init__(self):
PrintTrace.__init__(self)
self.id_node = 1
def get_id_node(self):
"""
Returns the value of id_node
"""
return self.id_node
def register_variable_id(self, variable_id):
"""
Register a variable's identifier, a symbol
"""
self.all_variable_id.append(str(variable_id))
self.print_register_variable_id(variable_id)
def next_step_variables(self):
"""
Initializes a variable range value and propagate the range
"""
self.vector_point.append(dict())
if len(self.vector_point) > 1:
for k, value in self.vector_point[-2].iteritems():
if value != 0:
self.vector_point[-1][k] = VariableRangeValue(64, k, [None, None, None])
self.propagate_range(len(self.vector_point) - 1, k)
self.vector_point[-1][k].value = None
self.vector_point[-1][k].id = k
else:
for element in self.all_variable_id:
self.vector_point[-1][element] = VariableRangeValue(64,
element,
[-float("inf"),
0,
float("inf")])
self.vector_point[-1][element].id = element
self.id_node = len(self.vector_point)
def get_binary_operator_operands(self, node_left, node_right):
"""
Extracts values from the operands of a binary operator
Returns a tuple of ast.Num nodes
"""
left_op = node_left
right_op = node_right
if isinstance(node_left, ast.Name):
left_op = self.scope_symbol_table.lookup_symbol(node_left.id)
else:
left_op = node_left
if isinstance(node_right, ast.Name):
right_op = self.scope_symbol_table.lookup_symbol(node_right.id)
else:
right_op = node_right
return [left_op, right_op]
def get_unary_operator_operand(self, operand):
"""
Extracts the value from the operand of an unary operator
Returns a ast.Num nodes
"""
if isinstance(operand, ast.Name):
return self.scope_symbol_table.lookup_symbol(operand.id)
else:
raise Exception("Error: bad operand")
def eval_(self, node):
"""
Performs unary, binary operations and return the result as a real value
"""
if isinstance(node, ast.Num):
return node.n
elif isinstance(node, ast.BinOp):
operands = self.get_binary_operator_operands(node.left, node.right)
print "Eval: operands - " + str(operands)
return int(operators[type(node.op)](self.eval_(operands[0]), self.eval_(operands[1])))
elif isinstance(node, ast.UnaryOp):
operand = self.get_unary_operator_operand(node.operand)
return int(operators[type(node.op)](self.eval_(operand)))
raise Exception("Error: incorrect type in eval_")
def propagate_range(self, point, var_id):
"""
Propagates the range value from the previous point to the next one
"""
prev_point = point - 1
self.vector_point[point][var_id].range[0] = self.vector_point[prev_point][var_id].range[0]
self.vector_point[point][var_id].range[1] = self.vector_point[prev_point][var_id].range[1]
self.vector_point[point][var_id].range[2] = self.vector_point[prev_point][var_id].range[2]
def reset_range(self, point, variable_id):
"""
Sets the range to <None, None, None>
"""
for index in range(0, len(self.vector_point[point][variable_id].range)):
self.vector_point[point][variable_id].range[index] = None
index += 1
def update_range_semantic(self, point, variable_id, value):
"""
Updates the range of a variable for a specific point
"""
index = 1
self.reset_range(point, variable_id)
if value == 0:
self.vector_point[point][variable_id].range[index] = 0
return 0
index = (2 if value > 0 else 0)
self.vector_point[point][variable_id].range[index] = (float('inf') \
if value > 0 else -float('inf'))
def assignment_update(self, new_value):
"""
Updates the value of a symbol, its range and print the assignment
"""
for element in new_value.targets:
evaluated_value = self.eval_(new_value.value)
node_value = ast.Num(evaluated_value)
self.update_range_semantic(len(self.vector_point) - 1, element.id, evaluated_value)
self.vector_point[-1][element.id].value = evaluated_value
index_level = self.scope_symbol_table.get_current_level() - 1
self.scope_symbol_table.bind_symbol(element.id,
node_value,
index_level,
self.scope_symbol_table.get_last_offset(index_level))
self.print_assignment_update(self.id_node, element.id, evaluated_value)
self.next_step_variables()
@staticmethod
def get_statement(node, flag_opposite):
"""
Extracts a statement from a node
"""
collection = [None, None, None]
comparator = node.test.comparators
collection[0] = (comparison_sign[opposite_comparison[type(node.test.ops[0])]] \
if flag_opposite == 1 else comparison_sign[type(node.test.ops[0])])
if isinstance(node.test.left, ast.Num):
collection[1] = node.test.left.n
elif isinstance(node.test.left, ast.Name):
collection[1] = node.test.left.id
if isinstance(comparator[0], ast.Num):
collection[2] = comparator[0].n
elif isinstance(comparator[0], ast.Name):
collection[2] = comparator[0].id
return collection
def extract_while_update(self, node):
"""
Extracts the while statement's content and update the range
"""
operation = self.get_statement(node, 0)
self.print_statement(self.id_node, "While", operation[1], operation[0], operation[2])
self.next_step_variables()
def extract_if_statement_update(self, node):
"""
Extracts the if statement's content and update the range
"""
operation = self.get_statement(node, 0)
self.print_statement(self.id_node, "If", operation[1], operation[0], operation[2])
self.next_step_variables()
def extract_else_statement_update(self, node):
"""
Extracts the else statement's content and update the range
"""
operation = self.get_statement(node, 1)
self.print_statement(self.id_node, "Else", operation[1], operation[0], operation[2])
self.next_step_variables()
# def extract_binary_operation(self, node):
# pass
def initialize_scope(self):
"""
Initializes a scope
"""
self.scope_symbol_table.initialize_scope()
def finalize_scope(self):
"""
Closes a scope
"""
self.scope_symbol_table.finalize_scope()
def print_debug_level(self):
"""
Prints debug information
"""
for i in range(0, self.scope_symbol_table.get_size_level()):
self.scope_symbol_table.dump_level(i)
print "#" * 30 + "End of DEBUG LEVEL" + "#" * 30
def print_all_iteration(self):
"""
Prints all iterations performed on the vector_point
"""
point = 0
self.print_debug_level()
for element in self.vector_point:
self.print_state(point + 1)
for k in element:
point_index = point
lower_bound = self.vector_point[point_index][k].range[0]
median = self.vector_point[point_index][k].range[1]
upper_bound = self.vector_point[point_index][k].range[2]
value = self.vector_point[point_index][k].value
variable_id = self.vector_point[point_index][k].id
self.print_point(variable_id, value, lower_bound, median, upper_bound)
point += 1
| |
# -*- coding: utf-8 -*-
"""``cacheutils`` contains consistent implementations of fundamental
cache types. Currently there are two to choose from:
* :class:`LRI` - Least-recently inserted
* :class:`LRU` - Least-recently used
Both caches are :class:`dict` subtypes, designed to be as
interchangeable as possible, to facilitate experimentation. A key
practice with performance enhancement with caching is ensuring that
the caching strategy is working. If the cache is constantly missing,
it is just adding more overhead and code complexity. The standard
statistics are:
* ``hit_count`` - the number of times the queried key has been in
the cache
* ``miss_count`` - the number of times a key has been absent and/or
fetched by the cache
* ``soft_miss_count`` - the number of times a key has been absent,
but a default has been provided by the caller, as with
:meth:`dict.get` and :meth:`dict.setdefault`. Soft misses are a
subset of misses, so this number is always less than or equal to
``miss_count``.
Additionally, ``cacheutils`` the cache-like bounded counter,
:class:`ThresholdCounter`.
Learn more about `caching algorithms on Wikipedia
<https://en.wikipedia.org/wiki/Cache_algorithms#Examples>`_.
"""
# TODO: clarify soft_miss_count. is it for .get and .set_default or is
# it for when on_miss provides a value. also, should on_miss itself be
# allowed to raise a KeyError
# TODO: TimedLRI
# TODO: support 0 max_size?
__all__ = ['LRI', 'LRU', 'cached', 'ThresholdCache']
import itertools
from collections import deque
try:
from _thread import RLock
except:
class RLock(object):
'Dummy reentrant lock for builds without threads'
def __enter__(self):
pass
def __exit__(self, exctype, excinst, exctb):
pass
try:
from typeutils import make_sentinel
_MISSING = make_sentinel(var_name='_MISSING')
_KWARG_MARK = make_sentinel(var_name='_KWARG_MARK')
except ImportError:
_MISSING = object()
_KWARG_MARK = object()
try:
xrange
except NameError:
xrange = range
PREV, NEXT, KEY, VALUE = range(4) # names for the link fields
DEFAULT_MAX_SIZE = 128
class LRU(dict):
"""The ``LRU`` is :class:`dict` subtype implementation of the
*Least-Recently Used* caching strategy.
Args:
max_size (int): Max number of items to cache. Defaults to ``128``.
values (iterable): Initial values for the cache. Defaults to ``None``.
on_miss (callable): a callable which accepts a single argument, the
key not present in the cache, and returns the value to be cached.
>>> cap_cache = LRU(max_size=2)
>>> cap_cache['a'], cap_cache['b'] = 'A', 'B'
>>> from pprint import pprint as pp
>>> pp(dict(cap_cache))
{'a': 'A', 'b': 'B'}
>>> [cap_cache['b'] for i in range(3)][0]
'B'
>>> cap_cache['c'] = 'C'
>>> print(cap_cache.get('a'))
None
This cache is also instrumented with statistics
collection. ``hit_count``, ``miss_count``, and ``soft_miss_count``
are all integer members that can be used to introspect the
performance of the cache. ("Soft" misses are misses that did not
raise :exc:`KeyError`, e.g., ``LRU.get()`` or ``on_miss`` was used to
cache a default.
>>> cap_cache.hit_count, cap_cache.miss_count, cap_cache.soft_miss_count
(3, 1, 1)
Other than the size-limiting caching behavior and statistics,
``LRU`` acts like its parent class, the built-in Python :class:`dict`.
"""
def __init__(self, max_size=DEFAULT_MAX_SIZE, values=None,
on_miss=None):
if max_size <= 0:
raise ValueError('expected max_size > 0, not %r' % max_size)
self.hit_count = self.miss_count = self.soft_miss_count = 0
self.max_size = max_size
root = []
root[:] = [root, root, None, None]
self.link_map = {}
self.root = root
self.lock = RLock()
if on_miss is not None and not callable(on_miss):
raise TypeError('expected on_miss to be a callable'
' (or None), not %r' % on_miss)
self.on_miss = on_miss
if values:
self.update(values)
# TODO: fromkeys()?
def __setitem__(self, key, value):
with self.lock:
root = self.root
if len(self) < self.max_size:
# to the front of the queue
last = root[PREV]
link = [last, root, key, value]
last[NEXT] = root[PREV] = link
self.link_map[key] = link
super(LRU, self).__setitem__(key, value)
else:
# Use the old root to store the new key and result.
oldroot = root
oldroot[KEY] = key
oldroot[VALUE] = value
# prevent ref counts going to zero during update
self.root = root = oldroot[NEXT]
oldkey, oldresult = root[KEY], root[VALUE]
root[KEY] = root[VALUE] = None
# Now update the cache dictionary.
del self.link_map[oldkey]
super(LRU, self).__delitem__(oldkey)
self.link_map[key] = oldroot
super(LRU, self).__setitem__(key, value)
return
def __getitem__(self, key):
with self.lock:
try:
link = self.link_map[key]
except KeyError:
self.miss_count += 1
if not self.on_miss:
raise
ret = self[key] = self.on_miss(key)
return ret
self.hit_count += 1
# Move the link to the front of the queue
root = self.root
link_prev, link_next, _key, value = link
link_prev[NEXT] = link_next
link_next[PREV] = link_prev
last = root[PREV]
last[NEXT] = root[PREV] = link
link[PREV] = last
link[NEXT] = root
return value
def get(self, key, default=None):
try:
return self[key]
except KeyError:
self.soft_miss_count += 1
return default
def __delitem__(self, key):
with self.lock:
link = self.link_map.pop(key)
super(LRU, self).__delitem__(key)
link[PREV][NEXT], link[NEXT][PREV] = link[NEXT], link[PREV]
def pop(self, key, default=_MISSING):
# NB: hit/miss counts are bypassed for pop()
try:
ret = super(LRU, self).__getitem__(key)
del self[key]
except KeyError:
if default is _MISSING:
raise KeyError(key)
ret = default
return ret
def popitem(self):
with self.lock:
key, link = self.link_map.popitem()
super(LRU, self).__delitem__(link[KEY])
link[PREV][NEXT], link[NEXT][PREV] = link[NEXT], link[PREV]
return key, link[VALUE]
def clear(self):
with self.lock:
self.root = [self.root, self.root, None, None]
self.link_map.clear()
super(LRU, self).clear()
def copy(self):
return self.__class__(max_size=self.max_size, values=self)
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self.soft_miss_count += 1
self[key] = default
return default
def update(self, E, **F):
# E and F are throwback names to the dict() __doc__
if E is self:
return
setitem = self.__setitem__
if callable(getattr(E, 'keys', None)):
for k in E.keys():
setitem(k, E[k])
else:
for k, v in E:
setitem(k, v)
for k in F:
setitem(k, F[k])
return
def __eq__(self, other):
if self is other:
return True
if len(other) != len(self):
return False
return other == self
def __ne__(self, other):
return not (self == other)
def __repr__(self):
cn = self.__class__.__name__
val_map = super(LRU, self).__repr__()
return ('%s(max_size=%r, on_miss=%r, values=%s)'
% (cn, self.max_size, self.on_miss, val_map))
class LRI(dict):
"""The ``LRI`` implements the basic *Least Recently Inserted* strategy to
caching. One could also think of this as a ``SizeLimitedDefaultDict``.
*on_miss* is a callable that accepts the missing key (as opposed
to :class:`collections.defaultdict`'s "default_factory", which
accepts no arguments.) Also note that, like the :class:`LRU`,
the ``LRI`` is instrumented with statistics tracking.
>>> cap_cache = LRI(max_size=2)
>>> cap_cache['a'], cap_cache['b'] = 'A', 'B'
>>> from pprint import pprint as pp
>>> pp(cap_cache)
{'a': 'A', 'b': 'B'}
>>> [cap_cache['b'] for i in range(3)][0]
'B'
>>> cap_cache['c'] = 'C'
>>> print(cap_cache.get('a'))
None
>>> cap_cache.hit_count, cap_cache.miss_count, cap_cache.soft_miss_count
(3, 1, 1)
"""
# In order to support delitem andn .pop() setitem will need to
# popleft until it finds a key still in the cache. or, only
# support popitems and raise an error on pop.
def __init__(self, max_size=DEFAULT_MAX_SIZE, values=None,
on_miss=None):
super(LRI, self).__init__()
self.hit_count = self.miss_count = self.soft_miss_count = 0
self.max_size = max_size
self.on_miss = on_miss
self._queue = deque()
if values:
self.update(values)
def __setitem__(self, key, value):
# TODO: pop support (see above)
if len(self) >= self.max_size:
old = self._queue.popleft()
del self[old]
super(LRI, self).__setitem__(key, value)
self._queue.append(key)
def update(self, E, **F):
# E and F are throwback names to the dict() __doc__
if E is self:
return
setitem = self.__setitem__
if callable(getattr(E, 'keys', None)):
for k in E.keys():
setitem(k, E[k])
else:
for k, v in E:
setitem(k, v)
for k in F:
setitem(k, F[k])
return
def copy(self):
return self.__class__(max_size=self.max_size, values=self)
def clear(self):
self._queue.clear()
super(LRI, self).clear()
def __getitem__(self, key):
try:
ret = super(LRI, self).__getitem__(key)
except KeyError:
self.miss_count += 1
if not self.on_miss:
raise
ret = self[key] = self.on_miss(key)
return ret
self.hit_count += 1
return ret
def get(self, key, default=None):
try:
return self[key]
except KeyError:
self.soft_miss_count += 1
return default
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self.soft_miss_count += 1
self[key] = default
return default
### Cached decorator
# Key-making technique adapted from Python 3.4's functools
class _HashedKey(list):
"""The _HashedKey guarantees that hash() will be called no more than once
per cached function invocation.
"""
__slots__ = 'hash_value'
def __init__(self, key):
self[:] = key
self.hash_value = hash(tuple(key))
def __hash__(self):
return self.hash_value
def _make_cache_key(args, kwargs, typed=False, kwarg_mark=_KWARG_MARK,
fasttypes=frozenset([int, str, frozenset, type(None)])):
"""Make a cache key from optionally typed positional and keyword
arguments. If *typed* is ``True``, ``3`` and ``3.0`` will be
treated as separate keys.
The key is constructed in a way that is flat as possible rather than
as a nested structure that would take more memory.
If there is only a single argument and its data type is known to cache
its hash value, then that argument is returned without a wrapper. This
saves space and improves lookup speed.
"""
key = list(args)
if kwargs:
sorted_items = sorted(kwargs.items())
key.append(kwarg_mark)
key.extend(sorted_items)
if typed:
key.extend([type(v) for v in args])
if kwargs:
key.extend([type(v) for k, v in sorted_items])
elif len(key) == 1 and type(key[0]) in fasttypes:
return key[0]
return _HashedKey(key)
class CachedFunction(object):
def __init__(self, func, cache, typed=False):
self.func = func
self.cache = cache
self.typed = typed
def __call__(self, *args, **kwargs):
key = _make_cache_key(args, kwargs, typed=self.typed)
try:
ret = self.cache[key]
except KeyError:
ret = self.cache[key] = self.func(*args, **kwargs)
return ret
def __repr__(self):
cn = self.__class__.__name__
if self.typed:
return "%s(func=%r, typed=%r)" % (cn, self.func, self.typed)
return "%s(func=%r)" % (cn, self.func)
def cached(cache, typed=False):
"""Cache any function with the cache instance of your choosing. Note
that the function wrapped should take only `hashable`_ arguments.
Args:
cache (Mapping): Any :class:`dict`-like object suitable for
use as a cache. Instances of the :class:`LRU` and
:class:`LRI` are good choices.
typed (bool): Whether to factor argument types into the cache
check. Default ``False``, setting to ``True`` causes the
cache keys for ``3`` and ``3.0`` to be considered unequal.
>>> my_cache = LRU()
>>> @cached(my_cache)
... def cached_lower(x):
... return x.lower()
...
>>> cached_lower("CaChInG's FuN AgAiN!")
"caching's fun again!"
>>> len(my_cache)
1
.. _hashable: https://docs.python.org/2/glossary.html#term-hashable
"""
def cached_func_decorator(func):
return CachedFunction(func, cache, typed=typed)
return cached_func_decorator
class ThresholdCounter(object):
"""A **bounded** dict-like Mapping from keys to counts. The
ThresholdCounter automatically compacts after every (1 /
*threshold*) additions, maintaining exact counts for any keys
whose count represents at least a *threshold* ratio of the total
data. In other words, if a particular key is not present in the
ThresholdCounter, its count represents less than *threshold* of
the total data.
>>> tc = ThresholdCounter(threshold=0.1)
>>> tc.add(1)
>>> tc.items()
[(1, 1)]
>>> tc.update([2] * 10)
>>> tc.get(1)
0
>>> tc.add(5)
>>> 5 in tc
True
>>> len(list(tc.elements()))
11
As you can see above, the API is kept similar to
:class:`collections.Counter`. The most notable feature omissions
being that counted items cannot be set directly, uncounted, or
removed, as this would disrupt the math.
Use the ThresholdCounter when you need best-effort long-lived
counts for dynamically-keyed data. Without a bounded datastructure
such as this one, the dynamic keys often represent a memory leak
and can impact application reliability. The ThresholdCounter's
item replacement strategy is fully deterministic and can be
thought of as *Amortized Least Relevant*. The absolute upper bound
of keys it will store is *(2/threshold)*, but realistically
*(1/threshold)* is expected for uniformly random datastreams, and
one or two orders of magnitude better for real-world data.
This algorithm is an implementation of the Lossy Counting
algorithm described in "Approximate Frequency Counts over Data
Streams" by Manku & Motwani. Hat tip to Kurt Rose for discovery
and initial implementation.
"""
# TODO: hit_count/miss_count?
def __init__(self, threshold=0.001):
if not 0 < threshold < 1:
raise ValueError('expected threshold between 0 and 1, not: %r'
% threshold)
self.total = 0
self._count_map = {}
self._threshold = threshold
self._thresh_count = int(1 / threshold)
self._cur_bucket = 1
@property
def threshold(self):
return self._threshold
def add(self, key):
"""Increment the count of *key* by 1, automatically adding it if it
does not exist.
Cache compaction is triggered every *1/threshold* additions.
"""
self.total += 1
try:
self._count_map[key][0] += 1
except KeyError:
self._count_map[key] = [1, self._cur_bucket - 1]
if self.total % self._thresh_count == 0:
self._count_map = dict([(k, v) for k, v in self._count_map.items()
if sum(v) > self._cur_bucket])
self._cur_bucket += 1
return
def elements(self):
"""Return an iterator of all the common elements tracked by the
counter. Yields each key as many times as it has been seen.
"""
repeaters = itertools.starmap(itertools.repeat, self.iteritems())
return itertools.chain.from_iterable(repeaters)
def most_common(self, n=None):
"""Get the top *n* keys and counts as tuples. If *n* is omitted,
returns all the pairs.
"""
if n <= 0:
return []
ret = sorted(self.iteritems(), key=lambda x: x[1][0], reverse=True)
if n is None or n >= len(ret):
return ret
return ret[:n]
def get_common_count(self):
"""Get the sum of counts for keys exceeding the configured data
threshold.
"""
return sum([count for count, _ in self._count_map.itervalues()])
def get_uncommon_count(self):
"""Get the sum of counts for keys that were culled because the
associated counts represented less than the configured
threshold. The long-tail counts.
"""
return self.total - self.get_common_count()
def get_commonality(self):
"""Get a float representation of the effective count accuracy. The
higher the number, the less uniform the keys being added, and
the higher accuracy and efficiency of the ThresholdCounter.
If a stronger measure of data cardinality is required,
consider using hyperloglog.
"""
return float(self.get_common_count()) / self.total
def __getitem__(self, key):
return self._count_map[key][0]
def __len__(self):
return len(self._count_map)
def __contains__(self, key):
return key in self._count_map
def iterkeys(self):
return iter(self._count_map)
def keys(self):
return list(self.iterkeys())
def itervalues(self):
count_map = self._count_map
for k in count_map:
yield count_map[k][0]
def values(self):
return list(self.itervalues())
def iteritems(self):
count_map = self._count_map
for k in count_map:
yield (k, count_map[k][0])
def items(self):
return list(self.iteritems())
def get(self, key, default=0):
"Get count for *key*, defaulting to 0."
try:
return self[key]
except KeyError:
return default
def update(self, iterable, **kwargs):
"""Like dict.update() but add counts instead of replacing them, used
to add multiple items in one call.
Source can be an iterable of keys to add, or a mapping of keys
to integer counts.
"""
if iterable is not None:
if callable(getattr(iterable, 'iteritems', None)):
for key, count in iterable.iteritems():
for i in xrange(count):
self.add(key)
else:
for key in iterable:
self.add(key)
if kwargs:
self.update(kwargs)
# end cacheutils.py
| |
""" The MIT License (MIT)
Copyright (c) 2016 Kyle Hollins Wray, University of Massachusetts
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import ctypes as ct
import os
import sys
import csv
import numpy as np
import time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__))))
import pomdp
import pomdp_alpha_vectors as pav
import nova_pomdp_pbvi as npp
class POMDPPBVICPU(npp.NovaPOMDPPBVICPU):
""" The point-based value iteration solver for POMDPs.
This class provides a clean python wrapper for simple interactions with this solver.
"""
def __init__(self, pomdpObject, GammaInitial=None):
""" The constructor for the POMDPPBVICPU class.
Parameters:
pomdpObject -- The POMDP object on which to run PBVI.
GammaInitial -- The initial values for each belief state. If undefined, then it is
zero for gamma = 1.0, and Rmin / (1 - gamma) otherwise.
"""
self.pomdp = pomdpObject
self.pomdpPtr = ct.POINTER(pomdp.POMDP)(self.pomdp)
self.GammaInitial = GammaInitial
if GammaInitial is None:
if self.pomdp.gamma < 1.0:
GammaInitial = np.array([[float(self.pomdp.Rmin / (1.0 - self.pomdp.gamma))
for s in range(self.pomdp.n)] \
for i in range(self.pomdp.r)])
else:
GammaInitial = np.array([[0.0 for s in range(self.pomdp.n)] for b in range(self.pomdp.r)])
GammaInitial = GammaInitial.flatten()
array_type_rn_float = ct.c_float * (self.pomdp.r * self.pomdp.n)
self.GammaInitial = array_type_rn_float(*GammaInitial)
self.currentHorizon = int(0)
self.Gamma = ct.POINTER(ct.c_float)()
self.GammaPrime = ct.POINTER(ct.c_float)()
self.pi = ct.POINTER(ct.c_uint)()
# Attempt to initialize the algorithm.
result = npp._nova.pomdp_pbvi_initialize_cpu(self.pomdpPtr, self)
if result != 0:
print("Failed to initialize the PBVI (CPU) algorithm.")
raise Exception()
def __del__(self):
""" The deconstructor for the POMDPPBVICPU class which automatically frees memory. """
result = npp._nova.pomdp_pbvi_uninitialize_cpu(self.pomdpPtr, self)
if result != 0:
print("Failed to free the PBVI (CPU) algorithm.")
raise Exception()
def solve(self):
""" Solve the POMDP by executing the solver.
Returns:
The POMDPAlphaVectors policy solution to the POMDP.
"""
policy = pav.POMDPAlphaVectors()
result = npp._nova.pomdp_pbvi_execute_cpu(self.pomdpPtr, self, policy)
if result != 0:
print("Failed to execute the 'nova' library's CPU POMDP solver.")
raise Exception()
return policy
def __str__(self):
""" Return the string of the POMDP PBVI.
Returns:
The string of the POMDP PBVI.
"""
result = "GammaInitial:\n%s" % (str(np.array([[self.GammaInitial[j * self.pomdp.n + i] \
for j in range(self.pomdp.r)] \
for i in range(self.pomdp.n)]))) + "\n\n"
result += "currentHorizon: %i" % (self.currentHorizon) + "\n\n"
result += "Gamma:\n%s" % (str(np.array([[self.Gamma[j * self.pomdp.n + i] \
for j in range(self.pomdp.r)] \
for i in range(self.pomdp.n)]))) + "\n\n"
result += "GammaPrime:\n%s" % (str(np.array([[self.GammaPrime[j * self.pomdp.n + i] \
for j in range(self.pomdp.r)] \
for i in range(self.pomdp.n)]))) + "\n\n"
result += "pi:\n%s" % (str(np.array([self.pi[i] \
for i in range(self.pomdp.n)]))) + "\n\n"
return result
class POMDPPBVIGPU(npp.NovaPOMDPPBVIGPU):
""" The point-based value iteration solver for POMDPs.
This class provides a clean python wrapper for simple interactions with this solver.
"""
def __init__(self, pomdpObject, numThreads=1024, GammaInitial=None):
""" The constructor for the MDPValueIterationGPU class.
Parameters:
pomdpObject -- The POMDP object on which to run PBVI.
numThreads -- The number of CUDA threads to execute (multiple of 32). Default is 1024.
GammaInitial -- The initial values for each belief state. If undefined, then it is
zero for gamma = 1.0, and Rmin / (1 - gamma) otherwise.
"""
self.pomdp = pomdpObject
self.pomdpPtr = ct.POINTER(pomdp.POMDP)(self.pomdp)
self.GammaInitial = GammaInitial
if GammaInitial is None:
if self.pomdp.gamma < 1.0:
GammaInitial = np.array([[float(self.pomdp.Rmin / (1.0 - self.pomdp.gamma))
for s in range(self.pomdp.n)] \
for i in range(self.pomdp.r)])
else:
GammaInitial = np.array([[0.0 for s in range(self.pomdp.n)] for b in range(self.pomdp.r)])
GammaInitial = GammaInitial.flatten()
array_type_rn_float = ct.c_float * (self.pomdp.r * self.pomdp.n)
self.GammaInitial = array_type_rn_float(*GammaInitial)
self.currentHorizon = int(0)
self.numThreads = numThreads
self.d_Gamma = ct.POINTER(ct.c_float)()
self.d_GammaPrime = ct.POINTER(ct.c_float)()
self.d_pi = ct.POINTER(ct.c_uint)()
self.d_alphaBA = ct.POINTER(ct.c_float)()
# Attempt to initialize the algorithm.
result = npp._nova.pomdp_pbvi_initialize_gpu(self.pomdpPtr, self)
if result != 0:
print("Failed to initialize the PBVI (GPU) algorithm.")
raise Exception()
def __del__(self):
""" The deconstructor for the POMDPPBVIGPU class which automatically frees memory. """
result = npp._nova.pomdp_pbvi_uninitialize_gpu(self.pomdpPtr, self)
if result != 0:
print("Failed to free the PBVI (GPU) algorithm.")
raise Exception()
def solve(self):
""" Solve the POMDP by executing the solver.
Returns:
The POMDPAlphaVectors policy solution to the POMDP.
"""
policy = pav.POMDPAlphaVectors()
result = npp._nova.pomdp_pbvi_execute_gpu(self.pomdpPtr, self, policy)
if result != 0:
print("Failed to execute the 'nova' library's GPU POMDP solver.")
raise Exception()
return policy
def __str__(self):
""" Return the string of the POMDP PBVI.
Returns:
The string of the POMDP PBVI.
"""
result = "GammaInitial:\n%s" % (str(np.array([[self.GammaInitial[j * self.pomdp.n + i] \
for j in range(self.pomdp.r)] \
for i in range(self.pomdp.n)]))) + "\n\n"
result += "currentHorizon: %i" % (self.currentHorizon) + "\n\n"
result += "numThreads: %i" % (self.numThreads) + "\n\n"
return result
| |
"""Query module.
This module contains a definition of object queries.
"""
import traceback
import inspect
import re
import logging
import uuid
import exc
from lib.rome.core.terms.terms import *
from sqlalchemy.sql.expression import BinaryExpression, BooleanClauseList
from lib.rome.core.rows.rows import construct_rows, find_table_name, all_selectables_are_functions
from sqlalchemy.sql.elements import UnaryExpression
from lib.rome.core.models import get_model_class_from_name, get_model_classname_from_tablename, get_model_tablename_from_classname, get_tablename_from_name
try:
from lib.rome.core.dataformat import get_decoder
from lib.rome.core.dataformat.json import find_table_name
except:
pass
import uuid
class Query:
def __init__(self, *args, **kwargs):
self._models = []
self._initial_models = []
self._criterions = []
self._funcs = []
self._hints = []
self._orders = []
self._session = None
base_model = None
# Process Query's arguments in a seperate function
self._extract_arguments(*args, **kwargs)
def _extract_arguments(self, *args, **kwargs):
if "base_model" in kwargs:
base_model = kwargs.get("base_model")
if "session" in kwargs:
self._session = kwargs.get("session")
for arg in args:
if type(arg) is tuple:
for arg2 in arg:
self._extract_argument(arg2)
else:
self._extract_argument(arg)
if all_selectables_are_functions(self._models):
if base_model:
self._models += [Selection(base_model, "*", is_hidden=True)]
def _extract_argument(self, arg):
if ("count" in str(arg) or "sum" in str(arg)) and "DeclarativeMeta" not in str(type(arg)):
function_name = re.sub("\(.*\)", "", str(arg))
field_id = re.sub("\)", "", re.sub(".*\(", "", str(arg)))
self._models += [Selection(None, None, is_function=True, function=Function(function_name, field_id))]
elif find_table_name(arg) != "none":
arg_as_text = "%s" % (arg)
attribute_name = "*"
if not hasattr(arg, "_sa_class_manager"):
if (len(arg_as_text.split(".")) > 1):
attribute_name = arg_as_text.split(".")[-1]
if hasattr(arg, "_sa_class_manager"):
self._models += [Selection(arg, attribute_name)]
elif hasattr(arg, "class_"):
self._models += [Selection(arg.class_, attribute_name)]
else:
self._models += [Selection(arg, "*")]
pass
elif isinstance(arg, UnaryExpression):
parts = str(arg).split(" ")
if len(parts) > 1:
fieldname = parts[0]
order = parts[1]
if order in ["ASC", "DESC"]:
self._orders += [arg]
elif isinstance(arg, Selection):
self._models += [arg]
elif isinstance(arg, Hint):
self._hints += [arg]
elif isinstance(arg, Function):
self._models += [Selection(None, None, True, arg)]
self._funcs += [arg]
elif isinstance(arg, BooleanClauseList) or type(arg) == list:
for clause in arg:
if type(clause) == BinaryExpression:
self._criterions += [BooleanExpression("NORMAL", clause)]
elif isinstance(arg, BinaryExpression):
self._criterions += [BooleanExpression("NORMAL", arg)]
elif hasattr(arg, "is_boolean_expression"):
self._criterions += [arg]
else:
pass
def all(self, request_uuid=None):
result_list = construct_rows(self._models, self._criterions, self._hints, session=self._session, request_uuid=request_uuid, order_by=self._orders)
result = []
for r in result_list:
ok = True
if ok:
result += [r]
return result
def first(self):
rows = self.all()
if len(rows) > 0:
return rows[0]
else:
None
def one_or_none(self):
ret = self.all()
l = len(ret)
if l == 1:
return ret[0]
elif l == 0:
return None
else:
raise exc.MultipleResultsFound("Multiple rows were found for one_or_none()")
def one(self):
try:
ret = self.one_or_none()
except exc.MultipleResultsFound:
raise exc.MultipleResultsFound("Multiple results were found for one()")
else:
if ret is None:
raise exc.NoResultFound("No row was found for one()")
def exists(self):
return self.first() is not None
def count(self):
return len(self.all())
def soft_delete(self, synchronize_session=False):
for e in self.all():
try:
e.soft_delete()
except:
pass
return self
def delete(self, synchronize_session=False):
for e in self.all():
try:
e.delete()
except:
pass
return self
def update(self, values, synchronize_session='evaluate'):
result = self.all()
count = 0
for each in result:
try:
each.update(values)
if self._session is not None:
self._session.update(each)
else:
each.save()
count = count + 1
except:
traceback.print_exc()
pass
return count
def distinct(self):
return list(set(self.all()))
def limit(self, limit):
return self
####################################################################################################################
# Query construction
####################################################################################################################
def _extract_hint(self, criterion):
if hasattr(criterion, "extract_hint"):
self._hints += criterion.extract_hint()
elif type(criterion).__name__ == "BinaryExpression":
exp = BooleanExpression("or", *[criterion])
self._extract_hint(exp)
def _extract_models(self, criterion):
tables = []
""" This means that the current criterion is involving a constant value: there
is not information that could be collected about a join between tables. """
if ":" in str(criterion):
return
else:
""" Extract tables names from the criterion. """
expressions = [criterion.expression.left, criterion.expression.right] if hasattr(criterion, "expression") else []
for expression in expressions:
if str(expression) == "NULL":
return
if hasattr(expression, "foreign_keys"):
for foreign_key in getattr(expression, "foreign_keys"):
if hasattr(foreign_key, "column"):
tables += [foreign_key.column.table]
tables_objects = getattr(criterion, "_from_objects", [])
tables_names = map(lambda x: str(x), tables_objects)
tables += tables_names
tables = list(set(tables)) # remove duplicate names
""" Extract the missing entity models from tablenames. """
current_entities = map(lambda x: x._model, self._models)
current_entities = filter(lambda x: x is not None, current_entities)
current_entities_tablenames = map(lambda x: x.__tablename__, current_entities)
missing_tables = filter(lambda x: x not in current_entities_tablenames, tables)
missing_tables_names = map(lambda x: str(x), missing_tables)
missing_entities_names = map(lambda x: get_model_classname_from_tablename(x), missing_tables_names)
missing_entities_objects = map(lambda x: get_model_class_from_name(x), missing_entities_names)
""" Add the missing entity models to the models of the current query. """
missing_models_to_selections = map(lambda x: Selection(x, "id", is_hidden=True), missing_entities_objects)
self._models += missing_models_to_selections
def filter_by(self, **kwargs):
criterions = []
for a in kwargs:
for selectable in self._models:
try:
column = getattr(selectable._model, a)
criterion = column.__eq__(kwargs[a])
self._extract_hint(criterion)
criterions += [criterion]
break
except Exception as e:
# create a binary expression
# traceback.print_exc()
pass
return self.filter(*criterions)
def filter_dict(self, filters):
return self.filter_by(**filters)
# criterions can be a function
def filter(self, *criterions):
_func = self._funcs[:]
_orders = self._orders[:]
_criterions = self._criterions[:]
for criterion in criterions:
self._extract_hint(criterion)
self._extract_models(criterion)
_criterions += [criterion]
_hints = self._hints[:]
args = self._models + _func + _criterions + _hints + self._initial_models + _orders
kwargs = {}
if self._session is not None:
kwargs["session"] = self._session
return Query(*args, **kwargs)
def join(self, *args, **kwargs):
_func = self._funcs[:]
_models = self._models[:]
_orders = self._orders[:]
_criterions = self._criterions[:]
_hints = self._hints[:]
for arg in args:
""" The following block has been written to handle the following kind of call to 'join':
query.join("_metadata")
where the joining class is not specified but rather a relationship name.
"""
# if type(arg) is str and len(args) == 1:
# if len(self._models) == 0:
# continue
# candidate_model = self._models[0]._model
# if not hasattr(candidate_model, arg):
# continue
# candidate_attribute = getattr(candidate_model, arg)
# if not hasattr(candidate_attribute, "property"):
# continue
# if not type(candidate_attribute.property).__name__ == "RelationshipProperty":
# continue
# remote_model = candidate_attribute.property.argument
# return self.join(remote_model)
if not isinstance(arg, list) and not isinstance(arg, tuple):
tuples = [arg]
else:
tuples = arg
for item in tuples:
is_class = inspect.isclass(item)
is_expression = (
"BinaryExpression" in "%s" % (item) or
"BooleanExpression" in "%s" % (item) or
"BinaryExpression" in "%s" % (type(item)) or
"BooleanExpression" in "%s" % (type(item))
)
if is_class:
_models = _models + [Selection(item, "*")]
if len(tuples) == 1:
# Must find an expression that would specify how to join the tables.
from lib.rome.core.utils import get_relationships_from_class
tablename = item.__tablename__
non_function_models = filter(lambda x: x._model is not None,_models)
current_tablenames = map(lambda x: x._model.__tablename__, non_function_models)
models_classes = map(lambda x: x._model, non_function_models)
relationships = map(lambda x: get_relationships_from_class(x), models_classes)
# relationships = get_relationships_from_class(item)
flatten_relationships = [item for sublist in relationships for item in sublist]
for relationship in flatten_relationships:
tablesnames = [relationship.local_tablename, relationship.remote_object_tablename]
if tablename in tablesnames:
other_tablename = filter(lambda x: x!= tablename, tablesnames)[0]
if other_tablename in current_tablenames:
type_expression = type(relationship.initial_expression).__name__
new_criterions = []
if type_expression in ["BooleanClauseList", "list"]:
for exp in relationship.initial_expression:
new_criterions += [JoiningBooleanExpression("NORMAL", *[exp])]
elif type_expression == "BinaryExpression":
new_criterions = [JoiningBooleanExpression("NORMAL", *[relationship.initial_expression])]
_criterions += new_criterions
break
elif is_expression:
_criterions += [item]
else:
# We should have a string refering to an attribute of the Class
if len(self._models) > 0:
relationship_field = getattr(self._models[0]._model, arg)
joining_class = None
if hasattr(relationship_field, "property"):
joining_class = relationship_field.property.argument
return self.join(joining_class)
args = _models + _func + _criterions + _hints + self._initial_models + _orders
kwargs = {}
if self._session is not None:
kwargs["session"] = self._session
return Query(*args, **kwargs)
def outerjoin(self, *args, **kwargs):
return self.join(*args, **kwargs)
def options(self, *args):
_func = self._funcs[:]
_models = self._models[:]
_orders = self._orders[:]
_criterions = self._criterions[:]
_initial_models = self._initial_models[:]
_hints = self._hints[:]
args = _models + _func + _criterions + _hints + _initial_models + _orders
kwargs = {}
if self._session is not None:
kwargs["session"] = self._session
return Query(*args, **kwargs)
def order_by(self, *criterion):
_func = self._funcs[:]
_models = self._models[:]
_orders = self._orders[:]
_criterions = self._criterions[:]
_initial_models = self._initial_models[:]
_hints = self._hints[:]
args = _models + _func + _criterions + _hints + _initial_models + _orders + list(criterion)
kwargs = {}
if self._session is not None:
kwargs["session"] = self._session
return Query(*args, **kwargs)
def with_lockmode(self, mode):
return self
def subquery(self):
_func = self._funcs[:]
_models = self._models[:]
_orders = self._orders[:]
_criterions = self._criterions[:]
_initial_models = self._initial_models[:]
_hints = self._hints[:]
args = _models + _func + _criterions + _hints + _initial_models + _orders
kwargs = {}
if self._session is not None:
kwargs["session"] = self._session
return Query(*args, **kwargs).all()
def union(self, *queries):
return QueryUnion(self, *queries)
def __iter__(self):
return iter(self.all())
def __repr__(self):
return """{\\"models\\": \\"%s\\", \\"criterions\\": \\"%s\\", \\"hints\\": \\"%s\\"}""" % (self._models, self._criterions, self._hints)
import itertools
class QueryUnion(Query):
def __init__(self, main_query, *queries):
self.main_query = main_query
self.queries = list(queries)
# Quick fix to enable compatibility with Glance
self._models = list(itertools.chain(map(lambda x: x._models, self.queries)))
self._initial_models = list(itertools.chain(map(lambda x: x._initial_models, self.queries)))
self._criterions = list(itertools.chain(map(lambda x: x._criterions, self.queries)))
self._funcs = list(itertools.chain(map(lambda x: x._funcs, self.queries)))
self._hints = list(itertools.chain(map(lambda x: x._hints, self.queries)))
self._session = None
def all(self, request_uuid=None):
result = self.main_query.all()
for query in self.queries:
result += query.all()
return result
| |
# -----------------------------------------------------------------------------
# Author: Texas Department of Transportation (TPP)
#
# Licence: The MIT License (MIT)
# Copyright (C) 2014 TxDOT
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# -----------------------------------------------------------------------------
def txArchive():
"""
Create an archive of your MyModules script and push the working script
to the main folder.
"""
import os
import time
import shutil
directory = "C:\\DATAMGT\\Scripts\\MyModules"
workingCopy = "C:\\DATAMGT\\Scripts\\MyModules\MyModules_WorkingCopy.py"
archiveTime = str(time.localtime()[2]) + str(time.localtime()[1]) + \
str(time.localtime()[0]) + "_" + \
str(time.localtime()[3]) + str(time.localtime()[4])
archive = (directory + os.sep + "Archive" + archiveTime + ".py")
MyModulesMain = "C:\\Python26\\ArcGIS10.0\\Lib\\site-packages\MyModules.py"
shutil.copyfile(MyModulesMain, archive)
shutil.copyfile(workingCopy, MyModulesMain)
del directory
del workingCopy
del archiveTime
del archive
def fileChecklist(directory, extension='All'):
"""
Create a checklist file in "txt" format for any folder - list all files
by name.
"""
import os
checkFile = directory + os.sep + 'Checklist.txt'
file1 = open(checkFile, "w")
dirList = os.listdir(directory)
for fname in dirList:
if extension == 'All':
file1.write(str(fname) + "\n")
else:
if fname.endswith(extension):
file1.write(str(fname) + "\n")
file1.close()
print 'Done!'
def projectMenu():
"""
Through a defined menu, allows a user to select a project category,
create a new folder, and optionally add a project plan document.
"""
import os
import shutil
import time
# Determine project type and location
ansMenu = raw_input(
"What type of project are you creating?\n\n 1 - Data Transmittal\n\
2 - Minute Order\n 3 - Special Map\n 4 - Training Map\n\
5 - Other Location\n\nEnter Selection: ")
if ansMenu == '1':
tmYR = str(time.localtime()[0])
path = ("T:\\DATAMGT\MAPPING\\Data Transmittals" + os.sep + tmYR)
elif ansMenu == '2':
tmYR = str(time.localtime()[0])
path = ("T:\\DATAMGT\\MAPPING\\Special Maps" + os.sep + tmYR +
"\\Minute Orders")
moType = raw_input(
"\nWhat's the intent of the Minute Order?\n\n 1 - New Designation\n\
2 - Re-Designation\n 3 - Proposed Highway\n\
4 - Removal\n\nEnter Selection: ")
if moType == '1':
moChange = "New Designation"
if moType == '2':
moChange = "Re-Designation"
if moType == '3':
moChange = "Proposed Highway"
if moType == '4':
moChange = "Removal"
elif ansMenu == '3':
tmYR = str(time.localtime()[0])
path = ("T:\\DATAMGT\\MAPPING\\Special Maps" + os.sep + tmYR)
elif ansMenu == '4':
path = "T:\\DATAMGT\\MAPPING\\Training"
else:
path = raw_input("\nWhat is the filepath of your working directory?\
\n\nPath: ")
# Name the project
projName = raw_input("\nWhat is the project name?: ")
# Ask for a project plan
ansPlan = raw_input("\nWould you like to include a project plan? \
\n\nY or N: ")
# Create folder directory
folderPDF = (path + os.sep + projName + os.sep + "PDF")
folderGeoData = (path + os.sep + projName + os.sep + "Geographic Data")
folderScripts = (path + os.sep + projName + os.sep + "Scripts")
folderMaps = (path + os.sep + projName + os.sep + "Maps")
folderDoc = (path + os.sep + projName + os.sep + "Documentation")
# Change the location of the Project Template here:
projTemp = "T:\\DATAMGT\\MAPPING\\Personal Folders\\David H\
\\Scripts\\ProjectTemplate.doc"
newProjPlan = (folderDoc + os.sep + projName + ".doc")
folderList = [folderPDF, folderGeoData,
folderScripts, folderMaps, folderDoc]
for x in folderList:
if not os.path.exists(x):
os.makedirs(x)
if ansPlan is 'Y':
shutil.copyfile(projTemp, newProjPlan)
else:
pass
print " "
projDirectory = (path + os.sep + projName)
print projDirectory
# Open the file directory in Windows Explorer
os.startfile(projDirectory)
# Modify or create (if not existing) a general log file
# for all logged projects
# Change the location of the general log file here:
logFile = "T:\\DATAMGT\\MAPPING\\Personal Folders\\David H\\Scripts\
\\ProjectLogFile.txt"
# Collect user name information
userName = (os.path.expanduser("~/"))[9:16]
# Record directory creation in general log
with open(logFile, "a") as log:
log.write("\n" + time.ctime() + ", " + userName + ", " +
projName + ", " + projDirectory)
# Create a log file specifically for minute orders
# Change the location of the Minute Order log file here:
moLogFile = "T:\\DATAMGT\\MAPPING\\Personal Folders\\David H\
\\Scripts\\MinuteOrderProjectLogFile.txt"
# Log minute order specifics into Minute Order log file
with open(moLogFile, "a") as log:
log.write("\n" + time.ctime() + ", " + moChange + ", "
+ userName + ", " + projName + ", " + projDirectory)
def archiveComanche(output_path, db_connection):
"""
Argument is local output path
Args:
output_path (str): Full path to the output folder
db_connection
"""
from arcpy import env
import arcpy
import os
import time
exportTime = time.ctime()
exportTimeElements = exportTime.split(" ")
formatTime = ""
for element in exportTimeElements:
if ":" in element:
element = element.replace(":", "_")
formatTime = formatTime + "_" + element
env.workspace = db_connection
outputWorkspace = output_path
print "Creating New File Geodatabase..."
arcpy.CreateFileGDB_management(outputWorkspace, "Archive" + formatTime,
"10.0")
outputPath = outputWorkspace + "Archive" + formatTime + ".gdb"
copyFiles = ["TPP_GIS.APP_TPP_GIS_ADMIN.Roadways\\TPP_GIS.APP_TPP_GIS_ADMIN.TXDOT_Roadways",
"TPP_GIS.APP_TPP_GIS_ADMIN.RTE_CONCURRENT",
"TPP_GIS.APP_TPP_GIS_ADMIN.RTE_CONTROL_SECTION",
"TPP_GIS.APP_TPP_GIS_ADMIN.SUBFILES"]
for file in copyFiles:
if file == "TPP_GIS.APP_TPP_GIS_ADMIN.Roadways\\TPP_GIS.APP_TPP_GIS_ADMIN.TXDOT_Roadways":
fileName = file.split(".")[4]
print "Exporting " + fileName
outFC = outputPath + os.sep + fileName
arcpy.CopyFeatures_management(file, outFC)
else:
fileName = file.split(".")[2]
print "Exporting " + fileName
outFC = outputPath + os.sep + fileName
arcpy.CopyRows_management(file, outFC)
print "Archive Complete..."
def rte_concatenate(table, group_field="RTE_ID", from_field="FROM_DFO",
to_field="TO_DFO", concatenate_field_name="CONCAT",
mark_overlap=True, overlap_field_name="OVERLAPS"):
"""
Adds a field for route concatenate and populates a concatenation index.
This value marks records that belong to the same linear segment.
Optionally, checks for overlapping measures.
Example 1:
rte_concatenate("C:\\Test.gdb\\test")
Example 2:
rte_concatenate("C:\\Test.gdb\\test", "Route_ID", "FRM_Mea",
"TO_Mea", "Concat", True, "Overlap")
Args:
table_name (str): Full path to the route table
group_field (str, optional): Field name containing field for
concatenation ("RTE_ID","C_SEC"); default: "RTE_ID"
from_field (str, optional): Field name containing from measure;
default: "FROM_DFO"
to_field (str, optional): Field name containing to measure;
default: "TO_DFO"
concatenate_field_name (str, optional): Specify custom name for
concatenate field; default: "CONCAT"
mark_overlap (boolean, optional): Mark if the measures are overlapping;
default: True
overlap_field_name (str, optional): Specify custom name for concatenate
field; default: "RTE_OVERLAP"
"""
# Import arcpy module
import arcpy
import time
import os
# Establish start time
start_time = time.time()
# Create table in memory
output_dir_path = os.path.dirname(table)
output_table_name = os.path.basename(table) + "_RTE_CONCATENATE"
output_table = os.path.join(output_dir_path, output_table_name)
# Create temp table
temp_table = "in_memory//" + output_table_name
arcpy.TableSelect_analysis(table, temp_table)
# Create field list to check that valid field exists
field_list = arcpy.ListFields(temp_table)
add_field_list = [concatenate_field_name, "RC_UNIQUE"]
# Add field for marking overlap if specified by user
if mark_overlap is True:
add_field_list.append(overlap_field_name)
# Iterate through table, checking if the add field already exist
for field in field_list:
if field.name == concatenate_field_name:
add_field_list.remove(concatenate_field_name)
elif field.name == overlap_field_name and mark_overlap is True:
add_field_list.remove(overlap_field_name)
del field_list
# If valid field does not exist, add the field
if len(add_field_list) == 0:
pass
else:
for field in add_field_list:
print "Adding Field: {0}".format(field)
arcpy.AddField_management(temp_table, field, "LONG")
# Create update cursor to populate the concatenation value
sort_string = str("{0} A; {1} A".format(group_field, from_field))
fields_subset = "[group_field, from_field, to_field, concatenate_field_name,\
overlap_field_name]"
rows = arcpy.UpdateCursor(temp_table, "", "", fields_subset, sort_string)
row = rows.next()
# Create baseline variables
previous = ""
previous_to = ""
previous_unique_id = ""
counter = 0
concatenate_index = 1
# Create Empty List to hold overlaps
overlap_list = []
# begin cursor
while row:
current = row.getValue(group_field)
current_from = row.getValue(from_field)
current_to = row.getValue(to_field)
# Populate Unique ID field
unique_id = counter + 1
row.RC_UNIQUE = unique_id
# Sets initial values for the first record in the table
if counter == 0:
row.setValue(concatenate_field_name, concatenate_index)
row.setValue(overlap_field_name, 0)
# Marks a records as belonging to the same segment as previous
elif previous == current and previous_to >= current_from:
row.setValue(concatenate_field_name, concatenate_index)
if mark_overlap is True:
if previous_to > current_from:
row.setValue(overlap_field_name, previous_unique_id)
overlap_list.append((previous_unique_id, unique_id))
else:
row.setValue(overlap_field_name, 0)
# Marks the first record of a new segment in the same route
elif previous == current and previous_to < current_from:
concatenate_index += 1
row.setValue(concatenate_field_name, concatenate_index)
row.setValue(overlap_field_name, 0)
# Marks the first record of a new route
else:
concatenate_index = 1
row.setValue(concatenate_field_name, concatenate_index)
row.setValue(overlap_field_name, 0)
# Sets the current records as previous for the next row
previous = current
previous_to = current_to
previous_unique_id = unique_id
# Saves changes to the current row and get the next row object
rows.updateRow(row)
row = rows.next()
# Increment's counter value and print progress feedback
counter += 1
print counter
del row, rows
if mark_overlap is True:
print "Completing Overlap Processing..."
rows = arcpy.UpdateCursor(temp_table)
for row in rows:
unique_id = row.RC_UNIQUE
for item in overlap_list:
if item[0] == unique_id:
row.setValue(overlap_field_name, item[1])
rows.updateRow(row)
# Write out temp_table
arcpy.CopyRows_management(temp_table, output_table)
# Delete out from memory
arcpy.Delete_management(temp_table)
del temp_table, table, overlap_list, overlap_field_name, row, rows
end_time = time.time()
print "Elapsed time: {0}".format(time.strftime('%H:%M:%S',
time.gmtime(end_time - start_time)))
def rte_order(rteTable, rteIDField, frmMeasField, orderField='RTE_ORDER'):
"""
Adds segment order index by a common id and measure.
:param rteTable:
:param rteIDField:
:param frmMeasField:
:param orderField:
:return:
"""
import arcpy
sql_clause = (None, "ORDER BY {0}, {1} ASC".format(rteIDField,
frmMeasField))
current = ""
previous = ""
counter = 0
NEW_ORDER = 1
if orderField not in [f.name for f in arcpy.ListFields(rteTable)]:
print "Adding order field..."
arcpy.AddField_management(rteTable, orderField, "SHORT")
with arcpy.da.UpdateCursor(in_table=rteTable,
field_names=[rteIDField, frmMeasField,
orderField],
sql_clause=sql_clause) as rows:
for row in rows:
current = row[0]
if counter == 0:
previous = current
row[2] = NEW_ORDER
elif previous == current and counter > 0:
NEW_ORDER += 1
row[2] = NEW_ORDER
else:
NEW_ORDER = 1
row[2] = NEW_ORDER
previous = current
counter += 1
print counter
rows.updateRow(row)
def unique_values_arcpy(table, field, query=None):
"""
Return all unique values in a field as a list of strings
:param table: Input table (any format accepted by ArcPy
:param field: The field to return unique values for
:param query: Optional query
:return: Returns a list of unique values as strings
"""
import arcpy
import numpy as np
desc = arcpy.Describe(table)
field_type = None
for f in desc.fields:
if f.name == field:
field_type = f.type
break
else:
pass
if not query:
rows = arcpy.SearchCursor(table)
else:
rows = arcpy.SearchCursor(table, query)
field_values = []
for row in rows:
if row.getValue(field) is not None:
field_value = row.getValue(field)
if field_type == "SmallInteger" or field_type == "Integer":
field_values.append(field_value)
else:
field_values.append(str(field_value))
else:
field_values.append("NoneType")
data = np.array(field_values)
unique_data = np.unique(data)
return unique_data.tolist()
def unique_values_report_arcpy(input_table, output_table, max_values=25):
"""
Given an ArcGIS table or feature class, creates a CSV of all unique values
:param input_table: Input table with values
:param output_table: Output CSV file
:param max_values: Total number of unique values to report (default=25)
:return: None
"""
from os import path
import csv
import arcpy
from arcpy import env
env.workspace = path.dirname(input_table)
in_table = path.basename(input_table)
with open(output_table, 'wb') as out_csv:
spamwriter = csv.DictWriter(out_csv, ["Field", "Unique Values"])
for field in [f.name for f in arcpy.ListFields(in_table)]:
uniq_val = unique_values_arcpy(input_table, field)
if len(uniq_val) <= max_values:
print "Field: {0} Values: {1}".format(field, uniq_val)
spamwriter.writerow({'Field': field,
'Unique Values': uniq_val})
else:
print "OBSCURED - Field: {0} has more than {1} unique " \
"values".format(field, max_values)
def unique_values_report_csv(input_table, output_table, max_values=25):
"""
Given a table in CSV format creates a new CSV of all unique values
:param input_table: Input table with values
:param output_table: Output CSV file
:param max_values: Total number of unique values to report (default=25)
:return: None
"""
from os import path
import csv
import arcpy
from arcpy import env
env.workspace = path.dirname(input_table)
in_table = path.basename(input_table)
with open(output_table, 'wb') as out_csv:
spamwriter = csv.DictWriter(out_csv, ["Field", "Unique Values"])
for field in [f.name for f in arcpy.ListFields(in_table)]:
uniq_val = unique_values_arcpy(input_table, field)
if len(uniq_val) <= max_values:
print "Field: {0} Values: {1}".format(field, uniq_val)
spamwriter.writerow({'Field': field,
'Unique Values': uniq_val})
else:
print "OBSCURED - Field: {0} has more than {1} unique " \
"values".format(field, max_values)
| |
# Copyright 2013-2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Receives documents from the oplog worker threads and indexes them
into the backend.
This file is a document manager for MongoDB, but the intent
is that this file can be used as an example to add on different backends.
To extend this to other systems, simply implement the exact same class and
replace the method definitions with API calls for the desired backend.
"""
import logging
import pymongo
from gridfs import GridFS
from mongo_connector import errors, constants
from mongo_connector.util import exception_wrapper
from mongo_connector.doc_managers.doc_manager_base import DocManagerBase
wrap_exceptions = exception_wrapper({
pymongo.errors.ConnectionFailure: errors.ConnectionFailed,
pymongo.errors.OperationFailure: errors.OperationFailed})
LOG = logging.getLogger(__name__)
class DocManager(DocManagerBase):
"""The DocManager class creates a connection to the backend engine and
adds/removes documents, and in the case of rollback, searches for them.
The reason for storing id/doc pairs as opposed to doc's is so that
multiple updates to the same doc reflect the most up to date version as
opposed to multiple, slightly different versions of a doc.
We are using MongoDB native fields for _id and ns, but we also store
them as fields in the document, due to compatibility issues.
"""
def __init__(self, url, **kwargs):
""" Verify URL and establish a connection.
"""
try:
self.mongo = pymongo.MongoClient(url)
except pymongo.errors.InvalidURI:
raise errors.ConnectionFailed("Invalid URI for MongoDB")
except pymongo.errors.ConnectionFailure:
raise errors.ConnectionFailed("Failed to connect to MongoDB")
self.namespace_set = kwargs.get("namespace_set")
self.chunk_size = kwargs.get('chunk_size', constants.DEFAULT_MAX_BULK)
def _db_and_collection(self, namespace):
return namespace.split('.', 1)
@wrap_exceptions
def _namespaces(self):
"""Provides the list of namespaces being replicated to MongoDB
"""
if self.namespace_set:
return self.namespace_set
user_namespaces = []
db_list = self.mongo.database_names()
for database in db_list:
if database == "config" or database == "local":
continue
coll_list = self.mongo[database].collection_names()
for coll in coll_list:
if coll.startswith("system"):
continue
namespace = "%s.%s" % (database, coll)
user_namespaces.append(namespace)
return user_namespaces
def stop(self):
"""Stops any running threads
"""
LOG.info(
"Mongo DocManager Stopped: If you will not target this system "
"again with mongo-connector then you may drop the database "
"__mongo_connector, which holds metadata for Mongo Connector."
)
@wrap_exceptions
def handle_command(self, doc, namespace, timestamp):
db, _ = self._db_and_collection(namespace)
if doc.get('dropDatabase'):
for new_db in self.command_helper.map_db(db):
self.mongo.drop_database(db)
if doc.get('renameCollection'):
a = self.command_helper.map_namespace(doc['renameCollection'])
b = self.command_helper.map_namespace(doc['to'])
if a and b:
self.mongo.admin.command(
"renameCollection", a, to=b)
if doc.get('create'):
new_db, coll = self.command_helper.map_collection(
db, doc['create'])
if new_db:
self.mongo[new_db].create_collection(coll)
if doc.get('drop'):
new_db, coll = self.command_helper.map_collection(
db, doc['drop'])
if new_db:
self.mongo[new_db].drop_collection(coll)
@wrap_exceptions
def update(self, document_id, update_spec, namespace, timestamp):
"""Apply updates given in update_spec to the document whose id
matches that of doc.
"""
db, coll = self._db_and_collection(namespace)
updated = self.mongo[db][coll].find_and_modify(
{'_id': document_id},
update_spec,
new=True
)
return updated
@wrap_exceptions
def upsert(self, doc, namespace, timestamp):
"""Update or insert a document into Mongo
"""
database, coll = self._db_and_collection(namespace)
self.mongo["__mongo_connector"][namespace].save({
'_id': doc['_id'],
"_ts": timestamp,
"ns": namespace
})
self.mongo[database][coll].save(doc)
@wrap_exceptions
def bulk_upsert(self, docs, namespace, timestamp):
def iterate_chunks():
dbname, collname = self._db_and_collection(namespace)
collection = self.mongo[dbname][collname]
meta_collection = self.mongo['__mongo_connector'][namespace]
more_chunks = True
while more_chunks:
bulk = collection.initialize_ordered_bulk_op()
bulk_meta = meta_collection.initialize_ordered_bulk_op()
for i in range(self.chunk_size):
try:
doc = next(docs)
selector = {'_id': doc['_id']}
bulk.find(selector).upsert().replace_one(doc)
bulk_meta.find(selector).upsert().replace_one({
'_id': doc['_id'],
'ns': namespace,
'_ts': timestamp
})
except StopIteration:
more_chunks = False
if i > 0:
yield bulk, bulk_meta
break
if more_chunks:
yield bulk, bulk_meta
for bulk_op, meta_bulk_op in iterate_chunks():
try:
bulk_op.execute()
meta_bulk_op.execute()
except pymongo.errors.DuplicateKeyError as e:
LOG.warn('Continuing after DuplicateKeyError: '
+ str(e))
@wrap_exceptions
def remove(self, document_id, namespace, timestamp):
"""Removes document from Mongo
The input is a python dictionary that represents a mongo document.
The documents has ns and _ts fields.
"""
database, coll = self._db_and_collection(namespace)
doc2 = self.mongo['__mongo_connector'][namespace].find_and_modify(
{'_id': document_id}, remove=True)
if (doc2 and doc2.get('gridfs_id')):
GridFS(self.mongo[database], coll).delete(doc2['gridfs_id'])
else:
self.mongo[database][coll].remove({'_id': document_id})
@wrap_exceptions
def insert_file(self, f, namespace, timestamp):
database, coll = self._db_and_collection(namespace)
id = GridFS(self.mongo[database], coll).put(f, filename=f.filename)
self.mongo["__mongo_connector"][namespace].save({
'_id': f._id,
'_ts': timestamp,
'ns': namespace,
'gridfs_id': id
})
@wrap_exceptions
def search(self, start_ts, end_ts):
"""Called to query Mongo for documents in a time range.
"""
for namespace in self._namespaces():
database, coll = self._db_and_collection(namespace)
for ts_ns_doc in self.mongo["__mongo_connector"][namespace].find(
{'_ts': {'$lte': end_ts,
'$gte': start_ts}}
):
yield ts_ns_doc
def commit(self):
""" Performs a commit
"""
return
@wrap_exceptions
def get_last_doc(self):
"""Returns the last document stored in Mongo.
"""
def docs_by_ts():
for namespace in self._namespaces():
database, coll = self._db_and_collection(namespace)
mc_coll = self.mongo["__mongo_connector"][namespace]
for ts_ns_doc in mc_coll.find(limit=1).sort('_ts', -1):
yield ts_ns_doc
return max(docs_by_ts(), key=lambda x: x["_ts"])
| |
#
# Copyright 2016 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from collections import OrderedDict
import logbook
import pandas as pd
from pandas_datareader.data import DataReader
import pytz
from six import iteritems
from six.moves.urllib_error import HTTPError
from .benchmarks import get_benchmark_returns
from . import treasuries, treasuries_can
from ..utils.paths import (
cache_root,
data_root,
)
from ..utils.deprecate import deprecated
from zipline.utils.calendars import get_calendar
logger = logbook.Logger('Loader')
# Mapping from index symbol to appropriate bond data
INDEX_MAPPING = {
'^GSPC':
(treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'),
'^GSPTSE':
(treasuries_can, 'treasury_curves_can.csv', 'bankofcanada.ca'),
'^FTSE': # use US treasuries until UK bonds implemented
(treasuries, 'treasury_curves.csv', 'www.federalreserve.gov'),
}
ONE_HOUR = pd.Timedelta(hours=1)
nyse_cal = get_calendar('NYSE')
trading_day_nyse = nyse_cal.day
trading_days_nyse = nyse_cal.all_sessions
def last_modified_time(path):
"""
Get the last modified time of path as a Timestamp.
"""
return pd.Timestamp(os.path.getmtime(path), unit='s', tz='UTC')
def get_data_filepath(name):
"""
Returns a handle to data file.
Creates containing directory, if needed.
"""
dr = data_root()
if not os.path.exists(dr):
os.makedirs(dr)
return os.path.join(dr, name)
def get_cache_filepath(name):
cr = cache_root()
if not os.path.exists(cr):
os.makedirs(cr)
return os.path.join(cr, name)
def get_benchmark_filename(symbol):
return "%s_benchmark.csv" % symbol
def has_data_for_dates(series_or_df, first_date, last_date):
"""
Does `series_or_df` have data on or before first_date and on or after
last_date?
"""
dts = series_or_df.index
if not isinstance(dts, pd.DatetimeIndex):
raise TypeError("Expected a DatetimeIndex, but got %s." % type(dts))
first, last = dts[[0, -1]]
return (first <= first_date) and (last >= last_date)
def load_market_data(trading_day=trading_day_nyse,
trading_days=trading_days_nyse,
bm_symbol='^GSPC'):
"""
Load benchmark returns and treasury yield curves for the given calendar and
benchmark symbol.
Benchmarks are downloaded as a Series from Yahoo Finance. Treasury curves
are US Treasury Bond rates and are downloaded from 'www.federalreserve.gov'
by default. For Canadian exchanges, a loader for Canadian bonds from the
Bank of Canada is also available.
Results downloaded from the internet are cached in
~/.zipline/data. Subsequent loads will attempt to read from the cached
files before falling back to redownload.
Parameters
----------
trading_day : pandas.CustomBusinessDay, optional
A trading_day used to determine the latest day for which we
expect to have data. Defaults to an NYSE trading day.
trading_days : pd.DatetimeIndex, optional
A calendar of trading days. Also used for determining what cached
dates we should expect to have cached. Defaults to the NYSE calendar.
bm_symbol : str, optional
Symbol for the benchmark index to load. Defaults to '^GSPC', the Yahoo
ticker for the S&P 500.
Returns
-------
(benchmark_returns, treasury_curves) : (pd.Series, pd.DataFrame)
Notes
-----
Both return values are DatetimeIndexed with values dated to midnight in UTC
of each stored date. The columns of `treasury_curves` are:
'1month', '3month', '6month',
'1year','2year','3year','5year','7year','10year','20year','30year'
"""
first_date = trading_days[0]
now = pd.Timestamp.utcnow()
# We expect to have benchmark and treasury data that's current up until
# **two** full trading days prior to the most recently completed trading
# day.
# Example:
# On Thu Oct 22 2015, the previous completed trading day is Wed Oct 21.
# However, data for Oct 21 doesn't become available until the early morning
# hours of Oct 22. This means that there are times on the 22nd at which we
# cannot reasonably expect to have data for the 21st available. To be
# conservative, we instead expect that at any time on the 22nd, we can
# download data for Tuesday the 20th, which is two full trading days prior
# to the date on which we're running a test.
# We'll attempt to download new data if the latest entry in our cache is
# before this date.
last_date = trading_days[trading_days.get_loc(now, method='ffill') - 2]
br = ensure_benchmark_data(
bm_symbol,
first_date,
last_date,
now,
# We need the trading_day to figure out the close prior to the first
# date so that we can compute returns for the first date.
trading_day,
)
tc = ensure_treasury_data(
bm_symbol,
first_date,
last_date,
now,
)
benchmark_returns = br[br.index.slice_indexer(first_date, last_date)]
treasury_curves = tc[tc.index.slice_indexer(first_date, last_date)]
return benchmark_returns, treasury_curves
def ensure_benchmark_data(symbol, first_date, last_date, now, trading_day):
"""
Ensure we have benchmark data for `symbol` from `first_date` to `last_date`
Parameters
----------
symbol : str
The symbol for the benchmark to load.
first_date : pd.Timestamp
First required date for the cache.
last_date : pd.Timestamp
Last required date for the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
trading_day : pd.CustomBusinessDay
A trading day delta. Used to find the day before first_date so we can
get the close of the day prior to first_date.
We attempt to download data unless we already have data stored at the data
cache for `symbol` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
path = get_data_filepath(get_benchmark_filename(symbol))
# If the path does not exist, it means the first download has not happened
# yet, so don't try to read from 'path'.
if os.path.exists(path):
try:
data = pd.Series.from_csv(path).tz_localize('UTC')
if has_data_for_dates(data, first_date, last_date):
return data
# Don't re-download if we've successfully downloaded and written a
# file in the last hour.
last_download_time = last_modified_time(path)
if (now - last_download_time) <= ONE_HOUR:
logger.warn(
"Refusing to download new benchmark data because a "
"download succeeded at %s." % last_download_time
)
return data
except (OSError, IOError, ValueError) as e:
# These can all be raised by various versions of pandas on various
# classes of malformed input. Treat them all as cache misses.
logger.info(
"Loading data for {path} failed with error [{error}].".format(
path=path, error=e,
)
)
logger.info(
"Cache at {path} does not have data from {start} to {end}.\n"
"Downloading benchmark data for '{symbol}'.",
start=first_date,
end=last_date,
symbol=symbol,
path=path,
)
try:
data = get_benchmark_returns(
symbol,
first_date - trading_day,
last_date,
)
data.to_csv(path)
except (OSError, IOError, HTTPError):
logger.exception('failed to cache the new benchmark returns')
if not has_data_for_dates(data, first_date, last_date):
logger.warn("Still don't have expected data after redownload!")
return data
def ensure_treasury_data(bm_symbol, first_date, last_date, now):
"""
Ensure we have treasury data from treasury module associated with
`bm_symbol`.
Parameters
----------
bm_symbol : str
Benchmark symbol for which we're loading associated treasury curves.
first_date : pd.Timestamp
First date required to be in the cache.
last_date : pd.Timestamp
Last date required to be in the cache.
now : pd.Timestamp
The current time. This is used to prevent repeated attempts to
re-download data that isn't available due to scheduling quirks or other
failures.
We attempt to download data unless we already have data stored in the cache
for `module_name` whose first entry is before or on `first_date` and whose
last entry is on or after `last_date`.
If we perform a download and the cache criteria are not satisfied, we wait
at least one hour before attempting a redownload. This is determined by
comparing the current time to the result of os.path.getmtime on the cache
path.
"""
loader_module, filename, source = INDEX_MAPPING.get(
bm_symbol, INDEX_MAPPING['^GSPC']
)
first_date = max(first_date, loader_module.earliest_possible_date())
path = get_data_filepath(filename)
# If the path does not exist, it means the first download has not happened
# yet, so don't try to read from 'path'.
if os.path.exists(path):
try:
data = pd.DataFrame.from_csv(path).tz_localize('UTC')
if has_data_for_dates(data, first_date, last_date):
return data
# Don't re-download if we've successfully downloaded and written a
# file in the last hour.
last_download_time = last_modified_time(path)
if (now - last_download_time) <= ONE_HOUR:
logger.warn(
"Refusing to download new treasury data because a "
"download succeeded at %s." % last_download_time
)
return data
except (OSError, IOError, ValueError) as e:
# These can all be raised by various versions of pandas on various
# classes of malformed input. Treat them all as cache misses.
logger.info(
"Loading data for {path} failed with error [{error}].".format(
path=path, error=e,
)
)
try:
data = loader_module.get_treasury_data(first_date, last_date)
data.to_csv(path)
except (OSError, IOError, HTTPError):
logger.exception('failed to cache treasury data')
if not has_data_for_dates(data, first_date, last_date):
logger.warn("Still don't have expected data after redownload!")
return data
def _load_raw_yahoo_data(indexes=None, stocks=None, start=None, end=None):
"""Load closing prices from yahoo finance.
:Optional:
indexes : dict (Default: {'SPX': '^GSPC'})
Financial indexes to load.
stocks : list (Default: ['AAPL', 'GE', 'IBM', 'MSFT',
'XOM', 'AA', 'JNJ', 'PEP', 'KO'])
Stock closing prices to load.
start : datetime (Default: datetime(1993, 1, 1, 0, 0, 0, 0, pytz.utc))
Retrieve prices from start date on.
end : datetime (Default: datetime(2002, 1, 1, 0, 0, 0, 0, pytz.utc))
Retrieve prices until end date.
:Note:
This is based on code presented in a talk by Wes McKinney:
http://wesmckinney.com/files/20111017/notebook_output.pdf
"""
assert indexes is not None or stocks is not None, """
must specify stocks or indexes"""
if start is None:
start = pd.datetime(1990, 1, 1, 0, 0, 0, 0, pytz.utc)
if start is not None and end is not None:
assert start < end, "start date is later than end date."
data = OrderedDict()
if stocks is not None:
for stock in stocks:
logger.info('Loading stock: {}'.format(stock))
stock_pathsafe = stock.replace(os.path.sep, '--')
cache_filename = "{stock}-{start}-{end}.csv".format(
stock=stock_pathsafe,
start=start,
end=end).replace(':', '-')
cache_filepath = get_cache_filepath(cache_filename)
if os.path.exists(cache_filepath):
stkd = pd.DataFrame.from_csv(cache_filepath)
else:
stkd = DataReader(stock, 'yahoo', start, end).sort_index()
stkd.to_csv(cache_filepath)
data[stock] = stkd
if indexes is not None:
for name, ticker in iteritems(indexes):
logger.info('Loading index: {} ({})'.format(name, ticker))
stkd = DataReader(ticker, 'yahoo', start, end).sort_index()
data[name] = stkd
return data
def load_from_yahoo(indexes=None,
stocks=None,
start=None,
end=None,
adjusted=True):
"""
Loads price data from Yahoo into a dataframe for each of the indicated
assets. By default, 'price' is taken from Yahoo's 'Adjusted Close',
which removes the impact of splits and dividends. If the argument
'adjusted' is False, then the non-adjusted 'close' field is used instead.
:param indexes: Financial indexes to load.
:type indexes: dict
:param stocks: Stock closing prices to load.
:type stocks: list
:param start: Retrieve prices from start date on.
:type start: datetime
:param end: Retrieve prices until end date.
:type end: datetime
:param adjusted: Adjust the price for splits and dividends.
:type adjusted: bool
"""
data = _load_raw_yahoo_data(indexes, stocks, start, end)
if adjusted:
close_key = 'Adj Close'
else:
close_key = 'Close'
df = pd.DataFrame({key: d[close_key] for key, d in iteritems(data)})
df.index = df.index.tz_localize(pytz.utc)
return df
@deprecated(
'load_bars_from_yahoo is deprecated, please register a'
' yahoo_equities data bundle instead',
)
def load_bars_from_yahoo(indexes=None,
stocks=None,
start=None,
end=None,
adjusted=True):
"""
Loads data from Yahoo into a panel with the following
column names for each indicated security:
- open
- high
- low
- close
- volume
- price
Note that 'price' is Yahoo's 'Adjusted Close', which removes the
impact of splits and dividends. If the argument 'adjusted' is True, then
the open, high, low, and close values are adjusted as well.
:param indexes: Financial indexes to load.
:type indexes: dict
:param stocks: Stock closing prices to load.
:type stocks: list
:param start: Retrieve prices from start date on.
:type start: datetime
:param end: Retrieve prices until end date.
:type end: datetime
:param adjusted: Adjust open/high/low/close for splits and dividends.
The 'price' field is always adjusted.
:type adjusted: bool
"""
data = _load_raw_yahoo_data(indexes, stocks, start, end)
panel = pd.Panel(data)
# Rename columns
panel.minor_axis = ['open', 'high', 'low', 'close', 'volume', 'price']
panel.major_axis = panel.major_axis.tz_localize(pytz.utc)
# Adjust data
if adjusted:
adj_cols = ['open', 'high', 'low', 'close']
for ticker in panel.items:
ratio = (panel[ticker]['price'] / panel[ticker]['close'])
ratio_filtered = ratio.fillna(0).values
for col in adj_cols:
panel[ticker][col] *= ratio_filtered
return panel
def load_prices_from_csv(filepath, identifier_col, tz='UTC'):
data = pd.read_csv(filepath, index_col=identifier_col)
data.index = pd.DatetimeIndex(data.index, tz=tz)
data.sort_index(inplace=True)
return data
def load_prices_from_csv_folder(folderpath, identifier_col, tz='UTC'):
data = None
for file in os.listdir(folderpath):
if '.csv' not in file:
continue
raw = load_prices_from_csv(os.path.join(folderpath, file),
identifier_col, tz)
if data is None:
data = raw
else:
data = pd.concat([data, raw], axis=1)
return data
| |
from __future__ import unicode_literals, division, absolute_import
from builtins import * # noqa pylint: disable=unused-import, redefined-builtin
import logging
from flexget import plugin
from flexget.entry import Entry
from flexget.event import event
from flexget.config_schema import one_or_more
from flexget.utils.requests import Session, TimedLimiter, RequestException
from flexget.utils.search import normalize_scene
from flexget.plugin import PluginError
log = logging.getLogger('rarbg')
requests = Session()
requests.add_domain_limiter(TimedLimiter('torrentapi.org', '3 seconds')) # they only allow 1 request per 2 seconds
CATEGORIES = {
'all': 0,
# Movies
'x264': 17,
'x264 720p': 45,
'x264 1080p': 44,
'x264 3D': 47,
'XviD': 14,
'XviD 720p': 48,
'Full BD': 42,
# TV
'HDTV': 41,
'SDTV': 18,
# Adult
'XXX': 4,
# Music
'MusicMP3': 23,
'MusicFLAC': 25,
# Games
'Games/PC ISO': 27,
'Games/PC RIP': 28,
'Games/PS3': 40,
'Games/XBOX-360': 32,
'Software/PC ISO': 33,
# E-Books
'e-Books': 35
}
class SearchRarBG(object):
"""
RarBG search plugin. Implements https://torrentapi.org/apidocs_v2.txt
To perform search against single category:
rarbg:
category: x264 720p
To perform search against multiple categories:
rarbg:
category:
- x264 720p
- x264 1080p
Movie categories accepted: x264 720p, x264 1080p, XviD, Full BD
TV categories accepted: HDTV, SDTV
You can use also use category ID manually if you so desire (eg. x264 720p is actually category id '45')
"""
schema = {
'type': 'object',
'properties': {
'category': one_or_more({
'oneOf': [
{'type': 'integer'},
{'type': 'string', 'enum': list(CATEGORIES)},
]}),
'sorted_by': {'type': 'string', 'enum': ['seeders', 'leechers', 'last'], 'default': 'last'},
# min_seeders and min_leechers seem to be working again
'min_seeders': {'type': 'integer', 'default': 0},
'min_leechers': {'type': 'integer', 'default': 0},
'limit': {'type': 'integer', 'enum': [25, 50, 100], 'default': 25},
'ranked': {'type': 'boolean', 'default': True},
'use_tvdb': {'type': 'boolean', 'default': False},
},
"additionalProperties": False
}
base_url = 'https://torrentapi.org/pubapi_v2.php'
token = None
def get_token(self, refresh=False):
if refresh or not self.token:
try:
response = requests.get(self.base_url, params={'get_token': 'get_token', 'format': 'json',
'app_id': 'flexget'}).json()
self.token = response.get('token')
log.debug('RarBG token: %s', self.token)
except RequestException as e:
log.debug('Could not retrieve RarBG token', exc_info=True)
raise PluginError('Could not retrieve token: %s' % e)
return self.token
def get(self, params, token_error=False):
'''
Simple get-wrapper that allows updating invalid tokens
:param params: the params to be passed to requests
:param token_error: whether or not we previously have had token errors, if True we should fetch a new one
:return: json response
'''
params['token'] = self.get_token(refresh=token_error)
try:
response = requests.get(self.base_url, params=params)
log.debug('requesting: %s', response.url)
response = response.json()
except RequestException as e:
log.error('Rarbg request failed: %s', e)
return
# error code 1, 2 and 4 pertain to token errors
if response.get('error_code') in [1, 2, 4]:
log.debug('Invalid token. Error %s: %s', response['error_code'], response.get('error'))
if token_error:
raise PluginError('Could not retrieve a valid token: %s' % response.get('error'))
return self.get(params=params, token_error=True)
return response
@plugin.internet(log)
def search(self, task, entry, config):
"""
Search for entries on RarBG
"""
categories = config.get('category', 'all')
# Ensure categories a list
if not isinstance(categories, list):
categories = [categories]
# Convert named category to its respective category id number
categories = [c if isinstance(c, int) else CATEGORIES[c] for c in categories]
category_url_fragment = ';'.join(str(c) for c in categories)
entries = set()
params = {'mode': 'search', 'ranked': int(config['ranked']),
'min_seeders': config['min_seeders'], 'min_leechers': config['min_leechers'],
'sort': config['sorted_by'], 'category': category_url_fragment, 'format': 'json_extended',
'app_id': 'flexget'}
for search_string in entry.get('search_strings', [entry['title']]):
params.pop('search_string', None)
params.pop('search_imdb', None)
params.pop('search_tvdb', None)
if entry.get('movie_name') and entry.get('imdb_id'):
params['search_imdb'] = entry.get('imdb_id')
else:
query = normalize_scene(search_string)
query_url_fragment = query.encode('utf8')
params['search_string'] = query_url_fragment
if config['use_tvdb']:
plugin.get_plugin_by_name('thetvdb_lookup').instance.lazy_series_lookup(entry, 'en')
params['search_tvdb'] = entry.get('tvdb_id')
log.debug('Using tvdb id %s', entry.get('tvdb_id'))
response = self.get(params=params)
if not response:
continue
# error code 10 and 20 just mean no results were found
if response.get('error_code') in [10, 20]:
searched_string = params.get('search_string') or 'imdb={0}'.format(params.get('search_imdb')) or \
'tvdb={0}'.format(params.get('tvdb_id'))
log.debug('No results found for %s. Message from rarbg: %s', searched_string, response.get('error'))
continue
elif response.get('error'):
log.error('Error code %s: %s', response.get('error_code'), response.get('error'))
continue
else:
for result in response.get('torrent_results'):
e = Entry()
e['title'] = result.get('title')
e['url'] = result.get('download')
e['torrent_seeds'] = int(result.get('seeders'))
e['torrent_leeches'] = int(result.get('leechers'))
e['content_size'] = int(result.get('size')) / 1024 / 1024
episode_info = result.get('episode_info')
if episode_info:
e['imdb_id'] = episode_info.get('imdb')
e['tvdb_id'] = episode_info.get('tvdb')
e['tvrage_id'] = episode_info.get('tvrage')
entries.add(e)
return entries
@event('plugin.register')
def register_plugin():
plugin.register(SearchRarBG, 'rarbg', interfaces=['search'], api_ver=2)
| |
from __future__ import unicode_literals
import getpass
import optparse
import os
import subprocess
import sys
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
try:
import urllib.error as compat_urllib_error
except ImportError: # Python 2
import urllib2 as compat_urllib_error
try:
import urllib.parse as compat_urllib_parse
except ImportError: # Python 2
import urllib as compat_urllib_parse
try:
from urllib.parse import urlparse as compat_urllib_parse_urlparse
except ImportError: # Python 2
from urlparse import urlparse as compat_urllib_parse_urlparse
try:
import urllib.parse as compat_urlparse
except ImportError: # Python 2
import urlparse as compat_urlparse
try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
import cookielib as compat_cookiejar
try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
try:
import html.parser as compat_html_parser
except ImportError: # Python 2
import HTMLParser as compat_html_parser
try:
import http.client as compat_http_client
except ImportError: # Python 2
import httplib as compat_http_client
try:
from urllib.error import HTTPError as compat_HTTPError
except ImportError: # Python 2
from urllib2 import HTTPError as compat_HTTPError
try:
from urllib.request import urlretrieve as compat_urlretrieve
except ImportError: # Python 2
from urllib import urlretrieve as compat_urlretrieve
try:
from subprocess import DEVNULL
compat_subprocess_get_DEVNULL = lambda: DEVNULL
except ImportError:
compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
try:
from urllib.parse import unquote as compat_urllib_parse_unquote
except ImportError:
def compat_urllib_parse_unquote(string, encoding='utf-8', errors='replace'):
if string == '':
return string
res = string.split('%')
if len(res) == 1:
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
# pct_sequence: contiguous sequence of percent-encoded bytes, decoded
pct_sequence = b''
string = res[0]
for item in res[1:]:
try:
if not item:
raise ValueError
pct_sequence += item[:2].decode('hex')
rest = item[2:]
if not rest:
# This segment was just a single percent-encoded character.
# May be part of a sequence of code units, so delay decoding.
# (Stored in pct_sequence).
continue
except ValueError:
rest = '%' + item
# Encountered non-percent-encoded characters. Flush the current
# pct_sequence.
string += pct_sequence.decode(encoding, errors) + rest
pct_sequence = b''
if pct_sequence:
# Flush the final pct_sequence
string += pct_sequence.decode(encoding, errors)
return string
try:
from urllib.parse import parse_qs as compat_parse_qs
except ImportError: # Python 2
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
# Python 2's version is apparently totally broken
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
qs, _coerce_result = qs, unicode
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = compat_urllib_parse_unquote(
name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = compat_urllib_parse_unquote(
value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
parsed_result = {}
pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
try:
compat_str = unicode # Python 2
except NameError:
compat_str = str
try:
compat_chr = unichr # Python 2
except NameError:
compat_chr = chr
try:
from xml.etree.ElementTree import ParseError as compat_xml_parse_error
except ImportError: # Python 2.6
from xml.parsers.expat import ExpatError as compat_xml_parse_error
try:
from shlex import quote as shlex_quote
except ImportError: # Python < 3.3
def shlex_quote(s):
return "'" + s.replace("'", "'\"'\"'") + "'"
def compat_ord(c):
if type(c) is int: return c
else: return ord(c)
if sys.version_info >= (3, 0):
compat_getenv = os.getenv
compat_expanduser = os.path.expanduser
else:
# Environment variables should be decoded with filesystem encoding.
# Otherwise it will fail if any non-ASCII characters present (see #3854 #3217 #2918)
def compat_getenv(key, default=None):
from .utils import get_filesystem_encoding
env = os.getenv(key, default)
if env:
env = env.decode(get_filesystem_encoding())
return env
# HACK: The default implementations of os.path.expanduser from cpython do not decode
# environment variables with filesystem encoding. We will work around this by
# providing adjusted implementations.
# The following are os.path.expanduser implementations from cpython 2.7.8 stdlib
# for different platforms with correct environment variables decoding.
if os.name == 'posix':
def compat_expanduser(path):
"""Expand ~ and ~user constructions. If user or $HOME is unknown,
do nothing."""
if not path.startswith('~'):
return path
i = path.find('/', 1)
if i < 0:
i = len(path)
if i == 1:
if 'HOME' not in os.environ:
import pwd
userhome = pwd.getpwuid(os.getuid()).pw_dir
else:
userhome = compat_getenv('HOME')
else:
import pwd
try:
pwent = pwd.getpwnam(path[1:i])
except KeyError:
return path
userhome = pwent.pw_dir
userhome = userhome.rstrip('/')
return (userhome + path[i:]) or '/'
elif os.name == 'nt' or os.name == 'ce':
def compat_expanduser(path):
"""Expand ~ and ~user constructs.
If user or $HOME is unknown, do nothing."""
if path[:1] != '~':
return path
i, n = 1, len(path)
while i < n and path[i] not in '/\\':
i = i + 1
if 'HOME' in os.environ:
userhome = compat_getenv('HOME')
elif 'USERPROFILE' in os.environ:
userhome = compat_getenv('USERPROFILE')
elif not 'HOMEPATH' in os.environ:
return path
else:
try:
drive = compat_getenv('HOMEDRIVE')
except KeyError:
drive = ''
userhome = os.path.join(drive, compat_getenv('HOMEPATH'))
if i != 1: #~user
userhome = os.path.join(os.path.dirname(userhome), path[1:i])
return userhome + path[i:]
else:
compat_expanduser = os.path.expanduser
if sys.version_info < (3, 0):
def compat_print(s):
from .utils import preferredencoding
print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
else:
def compat_print(s):
assert type(s) == type(u'')
print(s)
try:
subprocess_check_output = subprocess.check_output
except AttributeError:
def subprocess_check_output(*args, **kwargs):
assert 'input' not in kwargs
p = subprocess.Popen(*args, stdout=subprocess.PIPE, **kwargs)
output, _ = p.communicate()
ret = p.poll()
if ret:
raise subprocess.CalledProcessError(ret, p.args, output=output)
return output
if sys.version_info < (3, 0) and sys.platform == 'win32':
def compat_getpass(prompt, *args, **kwargs):
if isinstance(prompt, compat_str):
from .utils import preferredencoding
prompt = prompt.encode(preferredencoding())
return getpass.getpass(prompt, *args, **kwargs)
else:
compat_getpass = getpass.getpass
# Old 2.6 and 2.7 releases require kwargs to be bytes
try:
(lambda x: x)(**{'x': 0})
except TypeError:
def compat_kwargs(kwargs):
return dict((bytes(k), v) for k, v in kwargs.items())
else:
compat_kwargs = lambda kwargs: kwargs
# Fix https://github.com/rg3/youtube-dl/issues/4223
# See http://bugs.python.org/issue9161 for what is broken
def workaround_optparse_bug9161():
op = optparse.OptionParser()
og = optparse.OptionGroup(op, 'foo')
try:
og.add_option('-t')
except TypeError:
real_add_option = optparse.OptionGroup.add_option
def _compat_add_option(self, *args, **kwargs):
enc = lambda v: (
v.encode('ascii', 'replace') if isinstance(v, compat_str)
else v)
bargs = [enc(a) for a in args]
bkwargs = dict(
(k, enc(v)) for k, v in kwargs.items())
return real_add_option(self, *bargs, **bkwargs)
optparse.OptionGroup.add_option = _compat_add_option
__all__ = [
'compat_HTTPError',
'compat_chr',
'compat_cookiejar',
'compat_expanduser',
'compat_getenv',
'compat_getpass',
'compat_html_entities',
'compat_html_parser',
'compat_http_client',
'compat_kwargs',
'compat_ord',
'compat_parse_qs',
'compat_print',
'compat_str',
'compat_subprocess_get_DEVNULL',
'compat_urllib_error',
'compat_urllib_parse',
'compat_urllib_parse_unquote',
'compat_urllib_parse_urlparse',
'compat_urllib_request',
'compat_urlparse',
'compat_urlretrieve',
'compat_xml_parse_error',
'shlex_quote',
'subprocess_check_output',
'workaround_optparse_bug9161',
]
| |
# Copyright 2018 Google Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
from apiclient.errors import HttpError
import cloudstorage
from google.appengine.ext import testbed
from google.cloud.bigquery.table import Table
from google.cloud.exceptions import ClientError
from urllib2 import HTTPError
import mock
from core import workers
class TestAbstractWorker(unittest.TestCase):
def setUp(self):
super(TestAbstractWorker, self).setUp()
self.testbed = testbed.Testbed()
self.testbed.activate()
# Activate which service we want to stub
self.testbed.init_taskqueue_stub()
self.testbed.init_app_identity_stub()
def tearDown(self):
super(TestAbstractWorker, self).tearDown()
self.testbed.deactivate()
def test_default_params_values(self):
class DummyWorker(workers.Worker):
PARAMS = [
('int_with_default', 'number', True, 20, 'Description'),
]
worker = DummyWorker({}, 1, 1)
self.assertIsInstance(worker._params['int_with_default'], int)
self.assertEqual(worker._params['int_with_default'], 20)
@mock.patch('core.cloud_logging.logger')
def test_log_info_succeeds(self, patched_logger):
patched_logger.log_struct.__name__ = 'foo'
worker = workers.Worker({}, 1, 1)
self.assertEqual(patched_logger.log_struct.call_count, 0)
worker.log_info('Hi there!')
self.assertEqual(patched_logger.log_struct.call_count, 1)
call_first_arg = patched_logger.log_struct.call_args[0][0]
self.assertEqual(call_first_arg.get('log_level'), 'INFO')
@mock.patch('core.cloud_logging.logger')
def test_log_warn_succeeds(self, patched_logger):
patched_logger.log_struct.__name__ = 'foo'
worker = workers.Worker({}, 1, 1)
self.assertEqual(patched_logger.log_struct.call_count, 0)
worker.log_warn('Hi there!')
self.assertEqual(patched_logger.log_struct.call_count, 1)
call_first_arg = patched_logger.log_struct.call_args[0][0]
self.assertEqual(call_first_arg.get('log_level'), 'WARNING')
@mock.patch('core.cloud_logging.logger')
def test_log_error_succeeds(self, patched_logger):
patched_logger.log_struct.__name__ = 'foo'
worker = workers.Worker({}, 1, 1)
self.assertEqual(patched_logger.log_struct.call_count, 0)
worker.log_error('Hi there!')
self.assertEqual(patched_logger.log_struct.call_count, 1)
call_first_arg = patched_logger.log_struct.call_args[0][0]
self.assertEqual(call_first_arg.get('log_level'), 'ERROR')
@mock.patch('core.cloud_logging.logger')
def test_execute_client_error_raises_worker_exception(self, patched_logger):
patched_logger.log_struct.__name__ = 'foo'
class DummyWorker(workers.Worker):
def _execute(self):
raise ClientError('There has been an issue here.')
worker = DummyWorker({}, 1, 1)
with self.assertRaises(workers.WorkerException):
worker.execute()
def test_enqueue_succeedly_add_to_the_list(self):
worker = workers.Worker({}, 1, 1)
self.assertEqual(len(worker._workers_to_enqueue), 0)
worker._enqueue('DummyClass', 'params')
self.assertEqual(len(worker._workers_to_enqueue), 1)
self.assertEqual(worker._workers_to_enqueue[0][0], 'DummyClass')
self.assertEqual(worker._workers_to_enqueue[0][1], 'params')
@mock.patch('time.sleep')
@mock.patch('core.cloud_logging.logger')
def test_retry_until_a_finite_number_of_times(self, patched_logger,
patched_time_sleep):
patched_logger.log_struct.__name__ = 'foo'
# NB: bypass the time.sleep wait, otherwise the test will take ages
patched_time_sleep.side_effect = lambda delay: delay
worker = workers.Worker({}, 1, 1)
def _raise_value_error_exception(*args, **kwargs):
raise ValueError('Wrong value.')
fake_request = mock.Mock()
fake_request.__name__ = 'foo'
fake_request.side_effect = _raise_value_error_exception
with self.assertRaises(ValueError):
worker.retry(fake_request)()
self.assertGreaterEqual(fake_request.call_count, 2)
def test_retry_raises_error_if_bad_request_error_in_apiclient(self):
worker = workers.Worker({}, 1, 1)
def _raise_value_error_exception(*args, **kwargs):
raise HttpError(mock.Mock(status=400), '')
fake_request = mock.Mock()
fake_request.__name__ = 'foo'
fake_request.side_effect = _raise_value_error_exception
with self.assertRaises(HttpError):
worker.retry(fake_request)()
self.assertEqual(fake_request.call_count, 1)
def test_retry_raises_error_if_bad_request_error_in_urllib(self):
worker = workers.Worker({}, 1, 1)
def _raise_value_error_exception(*args, **kwargs):
raise HTTPError('http://example.com/', 400, '', [], None)
fake_request = mock.Mock()
fake_request.__name__ = 'foo'
fake_request.side_effect = _raise_value_error_exception
with self.assertRaises(HTTPError):
worker.retry(fake_request)()
self.assertEqual(fake_request.call_count, 1)
class TestBQWorker(unittest.TestCase):
@mock.patch('time.sleep')
@mock.patch('google.cloud.bigquery.job.QueryJob')
def test_begin_and_wait_start_jobs(self, patched_bigquery_QueryJob,
patched_time_sleep):
# NB: bypass the time.sleep wait, otherwise the test will take ages
patched_time_sleep.side_effect = lambda delay: delay
worker = workers.BQWorker({}, 1, 1)
job0 = patched_bigquery_QueryJob()
job0.begin.side_effect = lambda: True
def _mark_as_done():
job0.state = 'DONE'
job0.reload.side_effect = _mark_as_done
job0.error_result = None
worker._begin_and_wait(job0)
job0.begin.assert_called_once()
@mock.patch('time.sleep')
@mock.patch('google.cloud.bigquery.job.QueryJob')
@mock.patch('core.workers.BQWorker._enqueue')
def test_begin_and_wait_enqueue_bqwaiter_after_some_time(self,
patched_BQWorker_enqueue, patched_bigquery_QueryJob, patched_time_sleep):
# NB: bypass the time.sleep wait, otherwise the test will take ages
patched_time_sleep.side_effect = lambda delay: delay
def _fake_enqueue(*args, **kwargs):
# Do Nothing
return True
patched_BQWorker_enqueue.side_effect = _fake_enqueue
worker = workers.BQWorker({'bq_project_id': 'BQID'}, 1, 1)
job0 = patched_bigquery_QueryJob()
job0.error_result = None
worker._begin_and_wait(job0)
patched_BQWorker_enqueue.assert_called_once()
self.assertEqual(patched_BQWorker_enqueue.call_args[0][0], 'BQWaiter')
self.assertIsInstance(patched_BQWorker_enqueue.call_args[0][1], dict)
class TestBQWaiter(unittest.TestCase):
def test_execute_enqueue_job_if_done(self):
patcher_get_client = mock.patch.object(workers.BQWaiter, '_get_client',
return_value=None)
self.addCleanup(patcher_get_client.stop)
patcher_get_client.start()
mockAsyncJob = mock.Mock()
mockAsyncJob.error_result = None
patcher_async_job = mock.patch('google.cloud.bigquery.job._AsyncJob',
return_value=mockAsyncJob)
self.addCleanup(patcher_async_job.stop)
patcher_async_job.start()
patcher_worker_enqueue = mock.patch('core.workers.BQWaiter._enqueue')
self.addCleanup(patcher_worker_enqueue.stop)
patched_enqueue = patcher_worker_enqueue.start()
worker = workers.BQWaiter(
{
'bq_project_id': 'BQID',
'job_names': ['Job1', 'Job2'],
},
1,
1)
worker._client = mock.Mock()
worker._execute()
patched_enqueue.assert_called_once()
self.assertEqual(patched_enqueue.call_args[0][0], 'BQWaiter')
class TestStorageToBQImporter(unittest.TestCase):
def setUp(self):
super(TestStorageToBQImporter, self).setUp()
self.testbed = testbed.Testbed()
self.testbed.activate()
# Activate which service we want to stub
self.testbed.init_urlfetch_stub()
self.testbed.init_app_identity_stub()
self.testbed.init_blobstore_stub()
self.testbed.init_datastore_v3_stub()
patcher_listbucket = mock.patch('cloudstorage.listbucket')
patched_listbucket = patcher_listbucket.start()
self.addCleanup(patcher_listbucket.stop)
def _fake_listbucket(bucket_prefix):
filenames = [
'input.csv',
'subdir/input.csv',
'data.csv',
'subdir/data.csv',
]
for suffix in filenames:
filename = os.path.join(bucket_prefix, suffix)
stat = cloudstorage.GCSFileStat(
filename,
0,
'686897696a7c876b7e',
0)
yield stat
patched_listbucket.side_effect = _fake_listbucket
def tearDown(self):
super(TestStorageToBQImporter, self).tearDown()
self.testbed.deactivate()
def test_get_source_uris_succeeds(self):
worker = workers.StorageToBQImporter(
{
'source_uris': [
'gs://bucket/data.csv',
'gs://bucket/subdir/data.csv',
]
},
1,
1)
source_uris = worker._get_source_uris()
self.assertEqual(len(source_uris), 2)
self.assertEqual(source_uris[0], 'gs://bucket/data.csv')
self.assertEqual(source_uris[1], 'gs://bucket/subdir/data.csv')
def test_get_source_uris_with_pattern(self):
worker = workers.StorageToBQImporter(
{
'source_uris': [
'gs://bucket/subdir/*.csv',
]
},
1,
1)
source_uris = worker._get_source_uris()
self.assertEqual(len(source_uris), 2)
self.assertEqual(source_uris[0], 'gs://bucket/subdir/input.csv')
self.assertEqual(source_uris[1], 'gs://bucket/subdir/data.csv')
class TestBQToMeasurementProtocolMixin(object):
def _use_query_results(self, response_json):
# NB: be sure to remove the jobReference from the api response used to
# create the Table instance.
response_json_copy = response_json.copy()
del response_json_copy['jobReference']
mock_dataset = mock.Mock()
mock_dataset._client = self._client
mock_table = Table('mock_table', mock_dataset)
self._client._connection.api_request.return_value = response_json
self._client.dataset.return_value = mock_dataset
mock_dataset.table.return_value = mock_table
class TestBQToMeasurementProtocolProcessor(TestBQToMeasurementProtocolMixin, unittest.TestCase):
def setUp(self):
super(TestBQToMeasurementProtocolProcessor, self).setUp()
self._client = mock.Mock()
patcher_get_client = mock.patch.object(
workers.BQToMeasurementProtocolProcessor,
'_get_client',
return_value=self._client)
self.addCleanup(patcher_get_client.stop)
patcher_get_client.start()
patcher_requests_post = mock.patch('requests.post')
self.addCleanup(patcher_requests_post.stop)
self._patched_post = patcher_requests_post.start()
self.maxDiff = None # This is to see full diff when self.assertEqual fails.
@mock.patch('time.sleep')
def test_success_with_one_post_request(self, patched_time_sleep):
# Bypass the time.sleep wait
patched_time_sleep.return_value = 1
self._worker = workers.BQToMeasurementProtocolProcessor(
{
'bq_project_id': 'BQID',
'bq_dataset_id': 'DTID',
'bq_table_id': 'table_id',
'bq_page_token': None,
'bq_batch_size': 10,
'mp_batch_size': 20,
'debug': False,
},
1,
1)
self._use_query_results({
'tableReference': {
'tableId': 'mock_table',
},
'jobReference': {
'jobId': 'two-rows-query',
},
'rows': [
{
'f': [
{'v': 'UA-12345-1'},
{'v': '35009a79-1a05-49d7-b876-2b884d0f825b'},
{'v': 'event'},
{'v': 1},
{'v': 'category'},
{'v': 'action'},
{'v': 'label'},
{'v': 0.9},
{'v': 'User Agent / 1.0'},
{'v': None},
]
},
{
'f': [
{'v': 'UA-12345-1'},
{'v': '35009a79-1a05-49d7-b876-2b884d0f825b'},
{'v': 'event'},
{'v': 1},
{'v': 'category'},
{'v': 'action'},
{'v': u'\u043c\u0435\u0442\u043a\u0430'},
{'v': 0.8},
{'v': 'User Agent / 1.0'},
{'v': 'segment1'},
]
}
],
'schema': {
'fields': [
{'name': 'tid', 'type': 'STRING'},
{'name': 'cid', 'type': 'STRING'},
{'name': 't', 'type': 'STRING'},
{'name': 'ni', 'type': 'INTEGER'},
{'name': 'ec', 'type': 'STRING'},
{'name': 'ea', 'type': 'STRING'},
{'name': 'el', 'type': 'STRING'},
{'name': 'ev', 'type': 'FLOAT'},
{'name': 'ua', 'type': 'STRING'},
{'name': 'cd1', 'type': 'STRING'},
]
}
})
mock_response = mock.Mock()
mock_response.status_code = 200
self._patched_post.return_value = mock_response
self._worker._execute()
self._patched_post.assert_called_once()
self.assertEqual(
self._patched_post.call_args[0][0],
'https://www.google-analytics.com/batch')
self.assertEqual(
self._patched_post.call_args[1],
{
'headers': {'user-agent': 'CRMint / 0.1'},
'data':
"""cid=35009a79-1a05-49d7-b876-2b884d0f825b&ea=action&ec=category&el=label&ev=0.9&ni=1&t=event&tid=UA-12345-1&ua=User+Agent+%2F+1.0&v=1
cd1=segment1&cid=35009a79-1a05-49d7-b876-2b884d0f825b&ea=action&ec=category&el=%D0%BC%D0%B5%D1%82%D0%BA%D0%B0&ev=0.8&ni=1&t=event&tid=UA-12345-1&ua=User+Agent+%2F+1.0&v=1""",
})
@mock.patch('time.sleep')
def test_success_with_enhanced_ecommerce_request(self, patched_time_sleep):
# Bypass the time.sleep wait
patched_time_sleep.return_value = 1
self._worker = workers.BQToMeasurementProtocolProcessor(
{
'bq_project_id': 'BQID',
'bq_dataset_id': 'DTID',
'bq_table_id': 'table_id',
'bq_page_token': None,
'bq_batch_size': 10,
'mp_batch_size': 20,
'debug': False,
},
1,
1)
self._use_query_results({
'tableReference': {
'tableId': 'mock_table',
},
'jobReference': {
'jobId': 'one-row-with-array-of-structs-query',
},
'rows': [
{
'f': [
{'v': 'UA-12345-6'}, # tid
{'v': '123456789.1234567890'}, # cid
{'v': 'pageview'}, # t
{'v': 'purchase'}, # pa
{'v': '987654321'}, # ti
{'v': 'Moscow'}, # ta
{'v': '1540.0'}, # tr
{'v': 'RUB'}, # cu
{
'v': [ # pr
{
'v': { # pr1
'f': [
{'v': 'SKU1'}, # pr1id
{'v': 'Product1'}, # pr1nm
{'v': 'Brand1'}, # pr1br
{'v': 'Cat1'}, # pr1ca
{'v': '110.0'}, # pr1pr
{'v': '1'} # pr1qt
]
}
},
{
'v': { # pr2
'f': [
{'v': 'SKU2'}, # pr2id
{'v': 'Product2'}, # pr2nm
{'v': 'Brand2'}, # pr2br
{'v': 'Cat2'}, # pr2ca
{'v': '220.0'}, # pr2pr
{'v': '2'} # pr2qt
]
}
},
{
'v': { # pr3
'f': [
{'v': 'SKU3'}, # pr3id
{'v': 'Product3'}, # pr3nm
{'v': 'Brand3'}, # pr3br
{'v': 'Cat3'}, # pr3ca
{'v': '330.0'}, # pr3pr
{'v': '3'} # pr3qt
]
}
}
]
},
{
'v': [ # il
{ # il1
'v': {
'f': [
{'v': 'List1'}, # il1nm
{
'v': [ # il1pi
{
'v': { # il1pi1
'f': [
{'v': 'SKU11'}, # il1pi1id
{'v': 'Product11'}, # il1pi1nm
{'v': 'Brand11'}, # il1pi1br
{'v': 'Cat11'}, # il1pi1ca
{'v': '1110.0'} # il1pi1pr
]
}
},
{
'v': { # il1pi2
'f': [
{'v': 'SKU12'}, # il1pi2id
{'v': 'Product12'}, # il1pi2nm
{'v': 'Brand12'}, # il1pi2br
{'v': 'Cat12'}, # il1pi2ca
{'v': '1220.0'} # il1pi2pr
]
}
},
{
'v': { # il1pi3
'f': [
{'v': 'SKU13'}, # il1pi3id
{'v': 'Product13'}, # il1pi3nm
{'v': 'Brand13'}, # il1pi3br
{'v': 'Cat13'}, # il1pi3ca
{'v': '1330.0'} # il1pi3pr
]
}
}
]
}
]
}
},
{ # il2
'v': {
'f': [
{'v': 'List2'}, # il2nm
{
'v': [ # il2pi
{
'v': { # il2pi1
'f': [
{'v': 'SKU21'}, # il2pi1id
{'v': 'Product21'}, # il2pi1nm
{'v': 'Brand21'}, # il2pi1br
{'v': 'Cat21'}, # il2pi1ca
{'v': '2110.0'} # il2pi1pr
]
}
},
{
'v': { # il2pi2
'f': [
{'v': 'SKU22'}, # il2pi2id
{'v': 'Product22'}, # il2pi2nm
{'v': 'Brand22'}, # il2pi2br
{'v': None}, # il2pi2ca
{'v': '2220.0'} # il2pi2pr
]
}
},
{
'v': { # il2pi3
'f': [
{'v': 'SKU23'}, # il2pi3id
{'v': 'Product23'}, # il2pi3nm
{'v': 'Brand23'}, # il2pi3br
{'v': 'Cat23'}, # il2pi3ca
{'v': '2330.0'} # il2pi3pr
]
}
}
]
}
]
}
}
]
}
]
}
],
'schema': {
'fields': [
{'name': 'tid', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'cid', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 't', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'pa', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'ti', 'type': 'INTEGER', 'mode': 'NULLABLE'},
{'name': 'ta', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'tr', 'type': 'FLOAT', 'mode': 'NULLABLE'},
{'name': 'cu', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'pr', 'type': 'RECORD', 'mode': 'REPEATED', 'fields': [
{'name': 'id', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'nm', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'br', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'ca', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'pr', 'type': 'FLOAT', 'mode': 'NULLABLE'},
{'name': 'qt', 'type': 'INTEGER', 'mode': 'NULLABLE'}
]},
{'name': 'il', 'type': 'RECORD', 'mode': 'REPEATED', 'fields': [
{'name': 'nm', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'pi', 'type': 'RECORD', 'mode': 'REPEATED', 'fields': [
{'name': 'id', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'nm', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'br', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'ca', 'type': 'STRING', 'mode': 'NULLABLE'},
{'name': 'pr', 'type': 'FLOAT', 'mode': 'NULLABLE'}
]},
]}
]
}
})
mock_response = mock.Mock()
mock_response.status_code = 200
self._patched_post.return_value = mock_response
self._worker._execute()
self._patched_post.assert_called_once()
self.assertEqual(
self._patched_post.call_args[0][0],
'https://www.google-analytics.com/batch')
self.assertEqual(
self._patched_post.call_args[1],
{
'headers': {'user-agent': 'CRMint / 0.1'},
'data': 'cid=123456789.1234567890&cu=RUB&il1nm=List1&il1pi1br=Brand11&il1pi1ca=Cat11&il1pi1id=SKU11&il1pi1nm=Product11&il1pi1pr=1110.0&il1pi2br=Brand12&il1pi2ca=Cat12&il1pi2id=SKU12&il1pi2nm=Product12&il1pi2pr=1220.0&il1pi3br=Brand13&il1pi3ca=Cat13&il1pi3id=SKU13&il1pi3nm=Product13&il1pi3pr=1330.0&il2nm=List2&il2pi1br=Brand21&il2pi1ca=Cat21&il2pi1id=SKU21&il2pi1nm=Product21&il2pi1pr=2110.0&il2pi2br=Brand22&il2pi2id=SKU22&il2pi2nm=Product22&il2pi2pr=2220.0&il2pi3br=Brand23&il2pi3ca=Cat23&il2pi3id=SKU23&il2pi3nm=Product23&il2pi3pr=2330.0&pa=purchase&pr1br=Brand1&pr1ca=Cat1&pr1id=SKU1&pr1nm=Product1&pr1pr=110.0&pr1qt=1&pr2br=Brand2&pr2ca=Cat2&pr2id=SKU2&pr2nm=Product2&pr2pr=220.0&pr2qt=2&pr3br=Brand3&pr3ca=Cat3&pr3id=SKU3&pr3nm=Product3&pr3pr=330.0&pr3qt=3&t=pageview&ta=Moscow&ti=987654321&tid=UA-12345-6&tr=1540.0&v=1'
})
@mock.patch('core.cloud_logging.logger')
@mock.patch('time.sleep')
def test_log_exception_if_http_fails(self, patched_time_sleep, patched_logger):
# Bypass the time.sleep wait
patched_time_sleep.return_value = 1
# NB: patching the StackDriver logger is needed because there is no
# testbed service available for now
patched_logger.log_struct.__name__ = 'foo'
patched_logger.log_struct.return_value = "patched_log_struct"
self._worker = workers.BQToMeasurementProtocolProcessor(
{
'bq_project_id': 'BQID',
'bq_dataset_id': 'DTID',
'bq_table_id': 'table_id',
'bq_page_token': None,
'bq_batch_size': 10,
'mp_batch_size': 20,
'debug': False,
},
1,
1)
self._use_query_results({
'tableReference': {
'tableId': 'mock_table',
},
'jobReference': {
'jobId': 'one-row-query',
},
'rows': [
{
'f': [
{'v': 'UA-12345-1'},
{'v': '35009a79-1a05-49d7-b876-2b884d0f825b'},
{'v': 'event'},
{'v': 1},
{'v': 'category'},
{'v': 'action'},
{'v': 'label'},
{'v': 'value'},
{'v': 'User Agent / 1.0'},
]
}
],
'schema': {
'fields': [
{'name': 'tid', 'type': 'STRING'},
{'name': 'cid', 'type': 'STRING'},
{'name': 't', 'type': 'STRING'},
{'name': 'ni', 'type': 'INTEGER'},
{'name': 'ec', 'type': 'STRING'},
{'name': 'ea', 'type': 'STRING'},
{'name': 'el', 'type': 'STRING'},
{'name': 'ev', 'type': 'STRING'},
{'name': 'ua', 'type': 'STRING'},
]
}
})
mock_response = mock.Mock()
mock_response.status_code = 500
self._patched_post.return_value = mock_response
self._worker._execute()
# Called 2 times because of 1 retry.
self.assertEqual(self._patched_post.call_count, 2)
# When retry stops it should log the message as an error.
patched_logger.log_error.called_once()
class TestBQToMeasurementProtocol(TestBQToMeasurementProtocolMixin, unittest.TestCase):
def setUp(self):
super(TestBQToMeasurementProtocol, self).setUp()
self._client = mock.Mock()
patcher_get_client = mock.patch.object(
workers.BQToMeasurementProtocol,
'_get_client',
return_value=self._client)
self.addCleanup(patcher_get_client.stop)
patcher_get_client.start()
@mock.patch('time.sleep')
def test_success_with_spawning_new_worker(self, patched_time_sleep):
# Bypass the time.sleep wait
patched_time_sleep.return_value = 1
self._worker = workers.BQToMeasurementProtocol(
{
'bq_project_id': 'BQID',
'bq_dataset_id': 'DTID',
'bq_table_id': 'table_id',
'bq_page_token': None,
'mp_batch_size': 20,
},
1,
1)
self._worker.MAX_ENQUEUED_JOBS = 1
api_response = {
'tableReference': {
'tableId': 'mock_table',
},
'jobReference': {
'jobId': 'one-row-query',
},
'pageToken': 'abc',
'rows': [
{
'f': [
{'v': 'UA-12345-1'},
{'v': '35009a79-1a05-49d7-b876-2b884d0f825b'},
{'v': 'event'},
{'v': 1},
{'v': 'category'},
{'v': 'action'},
{'v': 'label'},
{'v': 0.9},
{'v': 'User Agent / 1.0'},
]
},
{
'f': [
{'v': 'UA-12345-1'},
{'v': '35009a79-1a05-49d7-b876-2b884d0f825b'},
{'v': 'event'},
{'v': 1},
{'v': 'category'},
{'v': 'action'},
{'v': 'label'},
{'v': 0.8},
{'v': 'User Agent / 1.0'},
]
},
],
'schema': {
'fields': [
{'name': 'tid', 'type': 'STRING'},
{'name': 'cid', 'type': 'STRING'},
{'name': 't', 'type': 'STRING'},
{'name': 'ni', 'type': 'INTEGER'},
{'name': 'ec', 'type': 'STRING'},
{'name': 'ea', 'type': 'STRING'},
{'name': 'el', 'type': 'STRING'},
{'name': 'ev', 'type': 'FLOAT'},
{'name': 'ua', 'type': 'STRING'},
]
}
}
self._use_query_results(api_response)
patcher_worker_enqueue = mock.patch.object(workers.BQToMeasurementProtocol, '_enqueue')
self.addCleanup(patcher_worker_enqueue.stop)
patched_enqueue = patcher_worker_enqueue.start()
def _remove_next_page_token(worker_name, *args, **kwargs):
if worker_name == 'BQToMeasurementProtocol':
del api_response['pageToken']
self._use_query_results(api_response)
patched_enqueue.side_effect = _remove_next_page_token
self._worker._execute()
self.assertEqual(patched_enqueue.call_count, 2)
self.assertEqual(patched_enqueue.call_args_list[0][0][0], 'BQToMeasurementProtocolProcessor')
self.assertEqual(patched_enqueue.call_args_list[0][0][1]['bq_page_token'], None)
self.assertEqual(patched_enqueue.call_args_list[1][0][0], 'BQToMeasurementProtocol')
self.assertEqual(patched_enqueue.call_args_list[1][0][1]['bq_page_token'], 'abc')
| |
import inspect
import os
import re
import sys
from collections import abc
from typing import (
Any,
Callable,
Dict,
Iterable,
List,
Mapping,
Optional,
Sequence,
Set,
Tuple,
Union,
)
from unittest import mock
from unittest.mock import MagicMock, patch
import yaml
from django.http import HttpResponse
from jsonschema.exceptions import ValidationError
from zerver.lib.request import _REQ, arguments_map
from zerver.lib.test_classes import ZulipTestCase
from zerver.openapi.markdown_extension import (
generate_curl_example,
parse_language_and_options,
render_curl_example,
)
from zerver.openapi.openapi import (
OPENAPI_SPEC_PATH,
OpenAPISpec,
SchemaError,
get_openapi_fixture,
get_openapi_parameters,
get_openapi_paths,
match_against_openapi_regex,
openapi_spec,
to_python_type,
validate_against_openapi_schema,
validate_schema,
)
TEST_ENDPOINT = '/messages/{message_id}'
TEST_METHOD = 'patch'
TEST_RESPONSE_BAD_REQ = '400'
TEST_RESPONSE_SUCCESS = '200'
VARMAP = {
'integer': int,
'string': str,
'boolean': bool,
'object': dict,
'NoneType': type(None),
}
def schema_type(schema: Dict[str, Any]) -> Union[type, Tuple[type, object]]:
if 'oneOf' in schema:
# Hack: Just use the type of the first value
# Ideally, we'd turn this into a Union type.
return schema_type(schema['oneOf'][0])
elif schema["type"] == "array":
return (list, schema_type(schema["items"]))
else:
return VARMAP[schema["type"]]
class OpenAPIToolsTest(ZulipTestCase):
"""Make sure that the tools we use to handle our OpenAPI specification
(located in zerver/openapi/openapi.py) work as expected.
These tools are mostly dedicated to fetching parts of the -already parsed-
specification, and comparing them to objects returned by our REST API.
"""
def test_get_openapi_fixture(self) -> None:
actual = get_openapi_fixture(TEST_ENDPOINT, TEST_METHOD,
TEST_RESPONSE_BAD_REQ)
expected = {
'code': 'BAD_REQUEST',
'msg': 'You don\'t have permission to edit this message',
'result': 'error',
}
self.assertEqual(actual, expected)
def test_get_openapi_parameters(self) -> None:
actual = get_openapi_parameters(TEST_ENDPOINT, TEST_METHOD)
expected_item = {
'name': 'message_id',
'in': 'path',
'description':
'The target message\'s ID.\n',
'example': 42,
'required': True,
'schema': {'type': 'integer'},
}
assert(expected_item in actual)
def test_validate_against_openapi_schema(self) -> None:
with self.assertRaises(ValidationError,
msg=("Additional properties are not" +
" allowed ('foo' was unexpected)")):
bad_content: Dict[str, object] = {
'msg': '',
'result': 'success',
'foo': 'bar',
}
validate_against_openapi_schema(bad_content,
TEST_ENDPOINT,
TEST_METHOD,
TEST_RESPONSE_SUCCESS)
with self.assertRaises(ValidationError,
msg=("42 is not of type string")):
bad_content = {
'msg': 42,
'result': 'success',
}
validate_against_openapi_schema(bad_content,
TEST_ENDPOINT,
TEST_METHOD,
TEST_RESPONSE_SUCCESS)
with self.assertRaises(ValidationError,
msg='Expected to find the "msg" required key'):
bad_content = {
'result': 'success',
}
validate_against_openapi_schema(bad_content,
TEST_ENDPOINT,
TEST_METHOD,
TEST_RESPONSE_SUCCESS)
# No exceptions should be raised here.
good_content = {
'msg': '',
'result': 'success',
}
validate_against_openapi_schema(good_content,
TEST_ENDPOINT,
TEST_METHOD,
TEST_RESPONSE_SUCCESS)
# Overwrite the exception list with a mocked one
test_dict: Dict[str, Any] = {}
# Check that validate_against_openapi_schema correctly
# descends into 'deep' objects and arrays. Test 1 should
# pass, Test 2 has a 'deep' extraneous key and Test 3 has a
# 'deep' opaque object. Also the parameters are a heterogenous
# mix of arrays and objects to verify that our descent logic
# correctly gets to the the deeply nested objects.
with open(os.path.join(os.path.dirname(OPENAPI_SPEC_PATH),
"testing.yaml")) as test_file:
test_dict = yaml.safe_load(test_file)
openapi_spec.spec()['paths']['testing'] = test_dict
try:
validate_against_openapi_schema((test_dict['test1']['responses']['200']['content']
['application/json']['example']),
'testing', 'test1', '200')
with self.assertRaises(ValidationError, msg = 'Extraneous key "str4" in response\'s content'):
validate_against_openapi_schema((test_dict['test2']['responses']['200']
['content']['application/json']['example']),
'testing', 'test2', '200')
with self.assertRaises(SchemaError, msg = 'Opaque object "obj"'):
# Checks for opaque objects
validate_schema((test_dict['test3']['responses']['200']
['content']['application/json']['schema']))
finally:
openapi_spec.spec()['paths'].pop('testing', None)
def test_to_python_type(self) -> None:
TYPES = {
'string': str,
'number': float,
'integer': int,
'boolean': bool,
'array': list,
'object': dict,
}
for oa_type, py_type in TYPES.items():
self.assertEqual(to_python_type(oa_type), py_type)
def test_live_reload(self) -> None:
# Force the reload by making the last update date < the file's last
# modified date
openapi_spec.last_update = 0
get_openapi_fixture(TEST_ENDPOINT, TEST_METHOD)
# Check that the file has been reloaded by verifying that the last
# update date isn't zero anymore
self.assertNotEqual(openapi_spec.last_update, 0)
# Now verify calling it again doesn't call reload
with mock.patch('zerver.openapi.openapi.openapi_spec.reload') as mock_reload:
get_openapi_fixture(TEST_ENDPOINT, TEST_METHOD)
self.assertFalse(mock_reload.called)
class OpenAPIArgumentsTest(ZulipTestCase):
# This will be filled during test_openapi_arguments:
checked_endpoints: Set[str] = set()
pending_endpoints = {
#### TODO: These endpoints are a priority to document:
'/realm/presence',
'/streams/{stream_id}/members',
'/streams/{stream_id}/delete_topic',
'/users/me/presence',
'/users/me/alert_words',
'/users/me/status',
#### These realm administration settings are valuable to document:
# Delete a file uploaded by current user.
'/attachments/{attachment_id}',
# List data exports for organization (GET) or request one (POST)
'/export/realm',
# Delete a data export.
'/export/realm/{export_id}',
# Manage default streams and default stream groups
'/default_streams',
'/default_stream_groups/create',
'/default_stream_groups/{group_id}',
'/default_stream_groups/{group_id}/streams',
# Administer invitations
'/invites',
'/invites/multiuse',
'/invites/{prereg_id}',
'/invites/{prereg_id}/resend',
'/invites/multiuse/{invite_id}',
# Single-stream settings alternative to the bulk endpoint
# users/me/subscriptions/properties; probably should just be a
# section of the same page.
'/users/me/subscriptions/{stream_id}',
# Real-time-events endpoint
'/real-time',
# Rest error handling endpoint
'/rest-error-handling',
# Zulip outgoing webhook payload
'/zulip-outgoing-webhook',
#### Mobile-app only endpoints; important for mobile developers.
# Mobile interface for fetching API keys
'/fetch_api_key',
# Already documented; need to fix tracking bug
'/dev_fetch_api_key',
# Mobile interface for development environment login
'/dev_list_users',
# Registration for iOS/Android mobile push notifications.
'/users/me/android_gcm_reg_id',
'/users/me/apns_device_token',
#### These personal settings endpoints have modest value to document:
'/settings',
'/users/me/avatar',
'/users/me/api_key/regenerate',
# Not very useful outside the UI
'/settings/display',
# Much more valuable would be an org admin bulk-upload feature.
'/users/me/profile_data',
#### Should be documented as part of interactive bots documentation
'/bot_storage',
'/submessage',
'/zcommand',
#### These "organization settings" endpoint have modest value to document:
'/realm',
'/realm/domains',
'/realm/domains/{domain}',
'/bots',
'/bots/{bot_id}',
'/bots/{bot_id}/api_key/regenerate',
#### These "organization settings" endpoints have low value to document:
'/realm/profile_fields',
'/realm/profile_fields/{field_id}',
'/realm/icon',
'/realm/logo',
'/realm/deactivate',
'/realm/subdomain/{subdomain}',
#### Other low value endpoints
# Used for dead desktop app to test connectivity. To delete.
'/generate_204',
# Used for failed approach with dead Android app.
'/fetch_google_client_id',
# API for video calls we're planning to remove/replace.
'/calls/zoom/create',
#### Documented endpoints not properly detected by tooling.
# E.g. '/user_groups/<user_group_id>' in urls.py but fails the
# reverse mapping test because of the variable name
# mismatch.
'/user_groups/{group_id}', # Equivalent of what's in urls.py
'/user_groups/{user_group_id}', # What's in the OpenAPI docs
'/user_groups/{user_group_id}/members',
# Regex with an unnamed capturing group.
'/users/(?!me/)(?P<email>[^/]*)/presence',
}
# Endpoints where the documentation is currently failing our
# consistency tests. We aim to keep this list empty.
buggy_documentation_endpoints: Set[str] = set([
])
def convert_regex_to_url_pattern(self, regex_pattern: str) -> str:
""" Convert regular expressions style URL patterns to their
corresponding OpenAPI style formats. All patterns are
expected to start with ^ and end with $.
Examples:
1. /messages/{message_id} <-> r'^messages/(?P<message_id>[0-9]+)$'
2. /events <-> r'^events$'
3. '/realm/domains' <-> r'/realm\\/domains$'
"""
# TODO: Probably we should be able to address the below
# through alternative solutions (e.g. reordering urls.py
# entries or similar url organization, but for now these let
# us test more endpoints and so are worth doing).
me_pattern = '/(?!me/)'
if me_pattern in regex_pattern:
# Remove the exclude-me pattern if present.
regex_pattern = regex_pattern.replace(me_pattern, "/")
# Handle the presence-email code which has a non-slashes syntax.
regex_pattern = regex_pattern.replace('[^/]*', '.*').replace('[^/]+', '.*')
self.assertTrue(regex_pattern.startswith("^"))
self.assertTrue(regex_pattern.endswith("$"))
url_pattern = '/' + regex_pattern[1:][:-1]
url_pattern = re.sub(r"\(\?P<(\w+)>[^/]+\)", r"{\1}", url_pattern)
url_pattern = url_pattern.replace('\\', '')
return url_pattern
def ensure_no_documentation_if_intentionally_undocumented(self, url_pattern: str,
method: str,
msg: Optional[str]=None) -> None:
try:
get_openapi_parameters(url_pattern, method)
if not msg: # nocoverage
msg = f"""
We found some OpenAPI documentation for {method} {url_pattern},
so maybe we shouldn't mark it as intentionally undocumented in the urls.
"""
raise AssertionError(msg) # nocoverage
except KeyError:
return
def check_for_non_existant_openapi_endpoints(self) -> None:
""" Here, we check to see if every endpoint documented in the openapi
documentation actually exists in urls.py and thus in actual code.
Note: We define this as a helper called at the end of
test_openapi_arguments instead of as a separate test to ensure that
this test is only executed after test_openapi_arguments so that it's
results can be used here in the set operations. """
openapi_paths = set(get_openapi_paths())
undocumented_paths = openapi_paths - self.checked_endpoints
undocumented_paths -= self.buggy_documentation_endpoints
undocumented_paths -= self.pending_endpoints
try:
self.assertEqual(len(undocumented_paths), 0)
except AssertionError: # nocoverage
msg = "The following endpoints have been documented but can't be found in urls.py:"
for undocumented_path in undocumented_paths:
msg += f"\n + {undocumented_path}"
raise AssertionError(msg)
def get_type_by_priority(self, types: Sequence[Union[type, Tuple[type, object]]]) -> Union[type, Tuple[type, object]]:
priority = {list: 1, dict: 2, str: 3, int: 4, bool: 5}
tyiroirp = {1: list, 2: dict, 3: str, 4: int, 5: bool}
val = 6
for t in types:
if isinstance(t, tuple):
return t # e.g. (list, dict) or (list ,str)
v = priority.get(t, 6)
if v < val:
val = v
return tyiroirp.get(val, types[0])
def get_standardized_argument_type(self, t: Any) -> Union[type, Tuple[type, object]]:
""" Given a type from the typing module such as List[str] or Union[str, int],
convert it into a corresponding Python type. Unions are mapped to a canonical
choice among the options.
E.g. typing.Union[typing.List[typing.Dict[str, typing.Any]], NoneType]
needs to be mapped to list."""
if sys.version_info < (3, 7): # nocoverage # python 3.5-3.6
if sys.version_info < (3, 6) and isinstance(t, type(Union)): # python 3.5 has special consideration for Union
origin = Union
else:
origin = getattr(t, "__origin__", None)
else: # nocoverage # python3.7+
origin = getattr(t, "__origin__", None)
if origin == list:
origin = List
elif origin == dict:
origin = Dict
elif origin == abc.Iterable:
origin = Iterable
elif origin == abc.Mapping:
origin = Mapping
elif origin == abc.Sequence:
origin = Sequence
if not origin:
# Then it's most likely one of the fundamental data types
# I.E. Not one of the data types from the "typing" module.
return t
elif origin == Union:
subtypes = []
if sys.version_info < (3, 6): # nocoverage # in python3.6+
args = t.__union_params__
else: # nocoverage # in python3.5
args = t.__args__
for st in args:
subtypes.append(self.get_standardized_argument_type(st))
return self.get_type_by_priority(subtypes)
elif origin in [List, Iterable, Sequence]:
[st] = t.__args__
return (list, self.get_standardized_argument_type(st))
elif origin in [Dict, Mapping]:
return dict
raise AssertionError(f"Unknown origin {origin}")
def render_openapi_type_exception(self, function: Callable[..., HttpResponse],
openapi_params: Set[Tuple[str, Union[type, Tuple[type, object]]]],
function_params: Set[Tuple[str, Union[type, Tuple[type, object]]]],
diff: Set[Tuple[str, Union[type, Tuple[type, object]]]]) -> None: # nocoverage
""" Print a *VERY* clear and verbose error message for when the types
(between the OpenAPI documentation and the function declaration) don't match. """
msg = f"""
The types for the request parameters in zerver/openapi/zulip.yaml
do not match the types declared in the implementation of {function.__name__}.\n"""
msg += '='*65 + '\n'
msg += "{:<10s}{:^30s}{:>10s}\n".format("Parameter", "OpenAPI Type",
"Function Declaration Type")
msg += '='*65 + '\n'
opvtype = None
fdvtype = None
for element in diff:
vname = element[0]
for element in openapi_params:
if element[0] == vname:
opvtype = element[1]
break
for element in function_params:
if element[0] == vname:
fdvtype = element[1]
break
msg += f"{vname:<10s}{str(opvtype):^30s}{str(fdvtype):>10s}\n"
raise AssertionError(msg)
def check_argument_types(self, function: Callable[..., HttpResponse],
openapi_parameters: List[Dict[str, Any]]) -> None:
""" We construct for both the OpenAPI data and the function's definition a set of
tuples of the form (var_name, type) and then compare those sets to see if the
OpenAPI data defines a different type than that actually accepted by the function.
Otherwise, we print out the exact differences for convenient debugging and raise an
AssertionError. """
openapi_params: Set[Tuple[str, Union[type, Tuple[type, object]]]] = set()
json_params: Dict[str, Union[type, Tuple[type, object]]] = dict()
for element in openapi_parameters:
name: str = element["name"]
schema = {}
if "content" in element:
# The only content-type we use in our API is application/json.
assert "schema" in element["content"]["application/json"]
# If content_type is application/json, then the
# parameter needs to be handled specially, as REQ can
# either return the application/json as a string or it
# can either decode it and return the required
# elements. For example `to` array in /messages: POST
# is processed by REQ as a string and then its type is
# checked in the view code.
#
# Meanwhile `profile_data` in /users/{user_id}: GET is
# taken as array of objects. So treat them seperately.
schema = element["content"]["application/json"]["schema"]
json_params[name] = schema_type(schema)
continue
else:
schema = element["schema"]
openapi_params.add((name, schema_type(schema)))
function_params: Set[Tuple[str, Union[type, Tuple[type, object]]]] = set()
# Iterate through the decorators to find the original
# function, wrapped by has_request_variables, so we can parse
# its arguments.
while getattr(function, "__wrapped__", None):
function = getattr(function, "__wrapped__", None)
# Tell mypy this is never None.
assert function is not None
# Now, we do inference mapping each REQ parameter's
# declaration details to the Python/mypy types for the
# arguments passed to it.
#
# Because the mypy types are the types used inside the inner
# function (after the original data is processed by any
# validators, converters, etc.), they will not always match
# the API-level argument types. The main case where this
# happens is when a `converter` is used that changes the types
# of its parameters.
for pname, defval in inspect.signature(function).parameters.items():
defval = defval.default
if isinstance(defval, _REQ):
# TODO: The below inference logic in cases where
# there's a converter function declared is incorrect.
# Theoretically, we could restructure the converter
# function model so that we can check what type it
# excepts to be passed to make validation here
# possible.
vtype = self.get_standardized_argument_type(function.__annotations__[pname])
vname = defval.post_var_name
assert vname is not None
if vname in json_params:
# Here we have two cases. If the the REQ type is
# string then there is no point in comparing as
# JSON can always be returned as string. Ideally,
# we wouldn't use REQ for a JSON object without a
# validator in these cases, but it does happen.
#
# If the REQ type is not string then, insert the
# REQ and OPENAPI data types of the variable in
# the respective sets so that they can be dealt
# with later. In either case remove the variable
# from `json_params`.
if vtype == str:
json_params.pop(vname, None)
continue
else:
openapi_params.add((vname, json_params[vname]))
json_params.pop(vname, None)
function_params.add((vname, vtype))
# After the above operations `json_params` should be empty.
assert(len(json_params) == 0)
diff = openapi_params - function_params
if diff: # nocoverage
self.render_openapi_type_exception(function, openapi_params, function_params, diff)
def test_openapi_arguments(self) -> None:
"""This end-to-end API documentation test compares the arguments
defined in the actual code using @has_request_variables and
REQ(), with the arguments declared in our API documentation
for every API endpoint in Zulip.
First, we import the fancy-Django version of zproject/urls.py
by doing this, each has_request_variables wrapper around each
imported view function gets called to generate the wrapped
view function and thus filling the global arguments_map variable.
Basically, we're exploiting code execution during import.
Then we need to import some view modules not already imported in
urls.py. We use this different syntax because of the linters complaining
of an unused import (which is correct, but we do this for triggering the
has_request_variables decorator).
At the end, we perform a reverse mapping test that verifies that
every url pattern defined in the openapi documentation actually exists
in code.
"""
from zproject import urls as urlconf
# We loop through all the API patterns, looking in particular
# for those using the rest_dispatch decorator; we then parse
# its mapping of (HTTP_METHOD -> FUNCTION).
for p in urlconf.v1_api_and_json_patterns + urlconf.v1_api_mobile_patterns:
if p.lookup_str != 'zerver.lib.rest.rest_dispatch':
# Endpoints not using rest_dispatch don't have extra data.
methods_endpoints = dict(
GET=p.lookup_str,
)
else:
methods_endpoints = p.default_args
# since the module was already imported and is now residing in
# memory, we won't actually face any performance penalties here.
for method, value in methods_endpoints.items():
if isinstance(value, str):
function_name = value
tags: Set[str] = set()
else:
function_name, tags = value
if function_name == 'zerver.tornado.views.get_events':
# Work around the fact that the registered
# get_events view function isn't where we do
# @has_request_variables.
#
# TODO: Make this configurable via an optional argument
# to has_request_variables, e.g.
# @has_request_variables(view_func_name="zerver.tornado.views.get_events")
function_name = 'zerver.tornado.views.get_events_backend'
lookup_parts = function_name.split('.')
module = __import__('.'.join(lookup_parts[:-1]), {}, {}, [''])
function = getattr(module, lookup_parts[-1])
# Our accounting logic in the `has_request_variables()`
# code means we have the list of all arguments
# accepted by every view function in arguments_map.
accepted_arguments = set(arguments_map[function_name])
regex_pattern = p.pattern.regex.pattern
url_pattern = self.convert_regex_to_url_pattern(regex_pattern)
if "intentionally_undocumented" in tags:
self.ensure_no_documentation_if_intentionally_undocumented(url_pattern, method)
continue
if url_pattern in self.pending_endpoints:
# HACK: After all pending_endpoints have been resolved, we should remove
# this segment and the "msg" part of the `ensure_no_...` method.
msg = f"""
We found some OpenAPI documentation for {method} {url_pattern},
so maybe we shouldn't include it in pending_endpoints.
"""
self.ensure_no_documentation_if_intentionally_undocumented(url_pattern,
method, msg)
continue
try:
# Don't include OpenAPI parameters that live in
# the path; these are not extracted by REQ.
openapi_parameters = get_openapi_parameters(url_pattern, method,
include_url_parameters=False)
except Exception: # nocoverage
raise AssertionError(f"Could not find OpenAPI docs for {method} {url_pattern}")
# We now have everything we need to understand the
# function as defined in our urls.py:
#
# * method is the HTTP method, e.g. GET, POST, or PATCH
#
# * p.pattern.regex.pattern is the URL pattern; might require
# some processing to match with OpenAPI rules
#
# * accepted_arguments is the full set of arguments
# this method accepts (from the REQ declarations in
# code).
#
# * The documented parameters for the endpoint as recorded in our
# OpenAPI data in zerver/openapi/zulip.yaml.
#
# We now compare these to confirm that the documented
# argument list matches what actually appears in the
# codebase.
openapi_parameter_names = {
parameter['name'] for parameter in openapi_parameters
}
if len(accepted_arguments - openapi_parameter_names) > 0: # nocoverage
print("Undocumented parameters for",
url_pattern, method, function_name)
print(" +", openapi_parameter_names)
print(" -", accepted_arguments)
assert(url_pattern in self.buggy_documentation_endpoints)
elif len(openapi_parameter_names - accepted_arguments) > 0: # nocoverage
print("Documented invalid parameters for",
url_pattern, method, function_name)
print(" -", openapi_parameter_names)
print(" +", accepted_arguments)
assert(url_pattern in self.buggy_documentation_endpoints)
else:
self.assertEqual(openapi_parameter_names, accepted_arguments)
self.check_argument_types(function, openapi_parameters)
self.checked_endpoints.add(url_pattern)
self.check_for_non_existant_openapi_endpoints()
class ModifyExampleGenerationTestCase(ZulipTestCase):
def test_no_mod_argument(self) -> None:
res = parse_language_and_options("python")
self.assertEqual(res, ("python", {}))
def test_single_simple_mod_argument(self) -> None:
res = parse_language_and_options("curl, mod=1")
self.assertEqual(res, ("curl", {"mod": 1}))
res = parse_language_and_options("curl, mod='somevalue'")
self.assertEqual(res, ("curl", {"mod": "somevalue"}))
res = parse_language_and_options("curl, mod=\"somevalue\"")
self.assertEqual(res, ("curl", {"mod": "somevalue"}))
def test_multiple_simple_mod_argument(self) -> None:
res = parse_language_and_options("curl, mod1=1, mod2='a'")
self.assertEqual(res, ("curl", {"mod1": 1, "mod2": "a"}))
res = parse_language_and_options("curl, mod1=\"asdf\", mod2='thing', mod3=3")
self.assertEqual(res, ("curl", {"mod1": "asdf", "mod2": "thing", "mod3": 3}))
def test_single_list_mod_argument(self) -> None:
res = parse_language_and_options("curl, exclude=['param1', 'param2']")
self.assertEqual(res, ("curl", {"exclude": ["param1", "param2"]}))
res = parse_language_and_options("curl, exclude=[\"param1\", \"param2\"]")
self.assertEqual(res, ("curl", {"exclude": ["param1", "param2"]}))
res = parse_language_and_options("curl, exclude=['param1', \"param2\"]")
self.assertEqual(res, ("curl", {"exclude": ["param1", "param2"]}))
def test_multiple_list_mod_argument(self) -> None:
res = parse_language_and_options("curl, exclude=['param1', \"param2\"], special=['param3']")
self.assertEqual(res, ("curl", {"exclude": ["param1", "param2"], "special": ["param3"]}))
def test_multiple_mixed_mod_arguments(self) -> None:
res = parse_language_and_options("curl, exclude=[\"asdf\", 'sdfg'], other_key='asdf', more_things=\"asdf\", another_list=[1, \"2\"]")
self.assertEqual(res, ("curl", {"exclude": ["asdf", "sdfg"], "other_key": "asdf", "more_things": "asdf", "another_list": [1, "2"]}))
class TestCurlExampleGeneration(ZulipTestCase):
spec_mock_without_examples = {
"security": [{"basicAuth": []}],
"paths": {
"/mark_stream_as_read": {
"post": {
"description": "Mark all the unread messages in a stream as read.",
"parameters": [
{
"name": "stream_id",
"in": "query",
"description": "The ID of the stream whose messages should be marked as read.",
"schema": {
"type": "integer",
},
"required": True,
},
{
"name": "bool_param",
"in": "query",
"description": "Just a boolean parameter.",
"schema": {
"type": "boolean",
},
"required": True,
},
],
},
},
},
}
spec_mock_with_invalid_method: Dict[str, object] = {
"security": [{"basicAuth": []}],
"paths": {
"/endpoint": {
"brew": {}, # the data is irrelevant as is should be rejected.
},
},
}
spec_mock_using_object = {
"security": [{"basicAuth": []}],
"paths": {
"/endpoint": {
"get": {
"description": "Get some info.",
"parameters": [
{
"name": "param1",
"in": "query",
"description": "An object",
"content": {
"application/json": {
"schema": {
"type": "object"
},
"example": {
"key": "value",
}
}
},
"required": True,
},
],
},
},
},
}
spec_mock_using_param_in_path = {
"security": [{"basicAuth": []}],
"paths": {
"/endpoint/{param1}": {
"get": {
"description": "Get some info.",
"parameters": [
{
"name": "param1",
"in": "path",
"description": "Param in path",
"schema": {
"type": "integer",
},
"example": 35,
"required": True,
},
{
"name": "param2",
"in": "query",
"description": "An object",
"required": True,
"content": {
"application/json": {
"schema": {
"type": "object"
},
"example": {
"key": "value",
}
}
},
},
],
},
},
},
}
spec_mock_using_object_without_example = {
"security": [{"basicAuth": []}],
"paths": {
"/endpoint": {
"get": {
"description": "Get some info.",
"parameters": [
{
"name": "param1",
"in": "query",
"description": "An object",
"schema": {
"type": "object",
},
"required": True,
},
],
},
},
},
}
spec_mock_using_array_without_example = {
"security": [{"basicAuth": []}],
"paths": {
"/endpoint": {
"get": {
"description": "Get some info.",
"parameters": [
{
"name": "param1",
"in": "query",
"description": "An array",
"schema": {
"type": "array",
},
"required": True,
},
],
},
},
},
}
def curl_example(self, endpoint: str, method: str, *args: Any, **kwargs: Any) -> List[str]:
return generate_curl_example(endpoint, method,
"http://localhost:9991/api", *args, **kwargs)
def test_generate_and_render_curl_example(self) -> None:
generated_curl_example = self.curl_example("/get_stream_id", "GET")
expected_curl_example = [
"```curl",
"curl -sSX GET -G http://localhost:9991/api/v1/get_stream_id \\",
" -u BOT_EMAIL_ADDRESS:BOT_API_KEY \\",
" -d 'stream=Denmark'",
"```",
]
self.assertEqual(generated_curl_example, expected_curl_example)
def test_generate_and_render_curl_example_with_nonexistant_endpoints(self) -> None:
with self.assertRaises(KeyError):
self.curl_example("/mark_this_stream_as_read", "POST")
with self.assertRaises(KeyError):
self.curl_example("/mark_stream_as_read", "GET")
def test_generate_and_render_curl_without_auth(self) -> None:
generated_curl_example = self.curl_example("/dev_fetch_api_key", "POST")
expected_curl_example = [
"```curl",
"curl -sSX POST http://localhost:9991/api/v1/dev_fetch_api_key \\",
" -d 'username=iago@zulip.com'",
"```",
]
self.assertEqual(generated_curl_example, expected_curl_example)
@patch("zerver.openapi.openapi.OpenAPISpec.spec")
def test_generate_and_render_curl_with_default_examples(self, spec_mock: MagicMock) -> None:
spec_mock.return_value = self.spec_mock_without_examples
generated_curl_example = self.curl_example("/mark_stream_as_read", "POST")
expected_curl_example = [
"```curl",
"curl -sSX POST http://localhost:9991/api/v1/mark_stream_as_read \\",
" -u BOT_EMAIL_ADDRESS:BOT_API_KEY \\",
" -d 'stream_id=1' \\",
" -d 'bool_param=false'",
"```",
]
self.assertEqual(generated_curl_example, expected_curl_example)
@patch("zerver.openapi.openapi.OpenAPISpec.spec")
def test_generate_and_render_curl_with_invalid_method(self, spec_mock: MagicMock) -> None:
spec_mock.return_value = self.spec_mock_with_invalid_method
with self.assertRaises(ValueError):
self.curl_example("/endpoint", "BREW") # see: HTCPCP
def test_generate_and_render_curl_with_array_example(self) -> None:
generated_curl_example = self.curl_example("/messages", "GET")
expected_curl_example = [
'```curl',
'curl -sSX GET -G http://localhost:9991/api/v1/messages \\',
' -u BOT_EMAIL_ADDRESS:BOT_API_KEY \\',
" -d 'anchor=42' \\",
" -d 'num_before=4' \\",
" -d 'num_after=8' \\",
' --data-urlencode narrow=\'[{"operand": "Denmark", "operator": "stream"}]\' \\',
" -d 'client_gravatar=true' \\",
" -d 'apply_markdown=false' \\",
" -d 'use_first_unread_anchor=true'",
'```',
]
self.assertEqual(generated_curl_example, expected_curl_example)
@patch("zerver.openapi.openapi.OpenAPISpec.spec")
def test_generate_and_render_curl_with_object(self, spec_mock: MagicMock) -> None:
spec_mock.return_value = self.spec_mock_using_object
generated_curl_example = self.curl_example("/endpoint", "GET")
expected_curl_example = [
'```curl',
'curl -sSX GET -G http://localhost:9991/api/v1/endpoint \\',
' -u BOT_EMAIL_ADDRESS:BOT_API_KEY \\',
' --data-urlencode param1=\'{"key": "value"}\'',
'```',
]
self.assertEqual(generated_curl_example, expected_curl_example)
@patch("zerver.openapi.openapi.OpenAPISpec.spec")
def test_generate_and_render_curl_with_object_without_example(self, spec_mock: MagicMock) -> None:
spec_mock.return_value = self.spec_mock_using_object_without_example
with self.assertRaises(ValueError):
self.curl_example("/endpoint", "GET")
@patch("zerver.openapi.openapi.OpenAPISpec.spec")
def test_generate_and_render_curl_with_array_without_example(self, spec_mock: MagicMock) -> None:
spec_mock.return_value = self.spec_mock_using_array_without_example
with self.assertRaises(ValueError):
self.curl_example("/endpoint", "GET")
@patch("zerver.openapi.openapi.OpenAPISpec.spec")
def test_generate_and_render_curl_with_param_in_path(self, spec_mock: MagicMock) -> None:
spec_mock.return_value = self.spec_mock_using_param_in_path
generated_curl_example = self.curl_example("/endpoint/{param1}", "GET")
expected_curl_example = [
'```curl',
'curl -sSX GET -G http://localhost:9991/api/v1/endpoint/35 \\',
' -u BOT_EMAIL_ADDRESS:BOT_API_KEY \\',
' --data-urlencode param2=\'{"key": "value"}\'',
'```',
]
self.assertEqual(generated_curl_example, expected_curl_example)
def test_generate_and_render_curl_wrapper(self) -> None:
generated_curl_example = render_curl_example("/get_stream_id:GET:email:key",
api_url="https://zulip.example.com/api")
expected_curl_example = [
"```curl",
"curl -sSX GET -G https://zulip.example.com/api/v1/get_stream_id \\",
" -u email:key \\",
" -d 'stream=Denmark'",
"```",
]
self.assertEqual(generated_curl_example, expected_curl_example)
def test_generate_and_render_curl_example_with_excludes(self) -> None:
generated_curl_example = self.curl_example("/messages", "GET",
exclude=["client_gravatar", "apply_markdown"])
expected_curl_example = [
'```curl',
'curl -sSX GET -G http://localhost:9991/api/v1/messages \\',
' -u BOT_EMAIL_ADDRESS:BOT_API_KEY \\',
" -d 'anchor=42' \\",
" -d 'num_before=4' \\",
" -d 'num_after=8' \\",
' --data-urlencode narrow=\'[{"operand": "Denmark", "operator": "stream"}]\' \\',
" -d 'use_first_unread_anchor=true'",
'```',
]
self.assertEqual(generated_curl_example, expected_curl_example)
class OpenAPIAttributesTest(ZulipTestCase):
def test_attributes(self) -> None:
"""
Checks:
* All endpoints have `operationId` and `tag` attributes.
* All example responses match their schema.
* That no opaque object exists.
"""
EXCLUDE = ["/real-time", "/register", "/events"]
VALID_TAGS = ["users", "server_and_organizations", "authentication",
"real_time_events", "streams", "messages", "users",
"webhooks"]
openapi_spec = OpenAPISpec(OPENAPI_SPEC_PATH).spec()["paths"]
for path in openapi_spec:
if path in EXCLUDE:
continue
for method in openapi_spec[path]:
# Check if every file has an operationId
assert("operationId" in openapi_spec[path][method])
assert("tags" in openapi_spec[path][method])
tag = openapi_spec[path][method]["tags"][0]
assert(tag in VALID_TAGS)
for response in openapi_spec[path][method]['responses']:
response_schema = (openapi_spec[path][method]['responses'][response]
['content']['application/json']['schema'])
if 'oneOf' in response_schema:
cnt = 0
for entry in response_schema['oneOf']:
validate_schema(entry)
assert(validate_against_openapi_schema(entry['example'], path,
method, response + '_' + str(cnt)))
cnt += 1
continue
validate_schema(response_schema)
assert(validate_against_openapi_schema(response_schema['example'], path,
method, response))
class OpenAPIRegexTest(ZulipTestCase):
def test_regex(self) -> None:
"""
Calls a few documented and undocumented endpoints and checks whether they
find a match or not.
"""
# Some of the undocumentd endpoints which are very similar to
# some of the documented endpoints.
assert(match_against_openapi_regex('/users/me/presence') is None)
assert(match_against_openapi_regex('/users/me/subscriptions/23') is None)
assert(match_against_openapi_regex('/users/iago/subscriptions/23') is None)
assert(match_against_openapi_regex('/messages/matches_narrow') is None)
# Making sure documented endpoints are matched correctly.
assert(match_against_openapi_regex('/users/23/subscriptions/21') ==
'/users/{user_id}/subscriptions/{stream_id}')
assert(match_against_openapi_regex('/users/iago@zulip.com/presence') ==
'/users/{email}/presence')
assert(match_against_openapi_regex('/messages/23') ==
'/messages/{message_id}')
assert(match_against_openapi_regex('/realm/emoji/realm_emoji_1') ==
'/realm/emoji/{emoji_name}')
| |
# Copyright (c) 2014 OpenStack Foundation, all rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron_lib import constants as p_const
from neutron_lib import context
from neutron_lib import exceptions as exc
from neutron_lib.plugins.ml2 import api
from oslo_config import cfg
from six import moves
import testtools
from testtools import matchers
from neutron.plugins.ml2.drivers import type_tunnel
TUNNEL_IP_ONE = "10.10.10.10"
TUNNEL_IP_TWO = "10.10.10.20"
TUNNEL_IPV6_ONE = "2001:db8:1::10"
HOST_ONE = 'fake_host_one'
HOST_TWO = 'fake_host_two'
TUN_MIN = 100
TUN_MAX = 109
TUNNEL_RANGES = [(TUN_MIN, TUN_MAX)]
UPDATED_TUNNEL_RANGES = [(TUN_MIN + 5, TUN_MAX + 5)]
class TunnelTypeTestMixin(object):
DRIVER_CLASS = None
TYPE = None
def setUp(self):
super(TunnelTypeTestMixin, self).setUp()
self.driver = self.DRIVER_CLASS()
self.driver.tunnel_ranges = TUNNEL_RANGES
self.driver.sync_allocations()
self.context = context.Context()
def test_tunnel_type(self):
self.assertEqual(self.TYPE, self.driver.get_type())
def test_validate_provider_segment(self):
segment = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: 'phys_net',
api.SEGMENTATION_ID: None}
with testtools.ExpectedException(exc.InvalidInput):
self.driver.validate_provider_segment(segment)
segment[api.PHYSICAL_NETWORK] = None
self.driver.validate_provider_segment(segment)
segment[api.SEGMENTATION_ID] = 1
self.driver.validate_provider_segment(segment)
def test_sync_tunnel_allocations(self):
self.assertIsNone(
self.driver.get_allocation(self.context, (TUN_MIN - 1)))
self.assertFalse(
self.driver.get_allocation(self.context, (TUN_MIN)).allocated)
self.assertFalse(
self.driver.get_allocation(self.context, (TUN_MIN + 1)).allocated)
self.assertFalse(
self.driver.get_allocation(self.context, (TUN_MAX - 1)).allocated)
self.assertFalse(
self.driver.get_allocation(self.context, (TUN_MAX)).allocated)
self.assertIsNone(
self.driver.get_allocation(self.context, (TUN_MAX + 1)))
self.driver.tunnel_ranges = UPDATED_TUNNEL_RANGES
self.driver.sync_allocations()
self.assertIsNone(
self.driver.get_allocation(self.context, (TUN_MIN + 5 - 1)))
self.assertFalse(
self.driver.get_allocation(self.context, (TUN_MIN + 5)).allocated)
self.assertFalse(
self.driver.get_allocation(self.context,
(TUN_MIN + 5 + 1)).allocated)
self.assertFalse(
self.driver.get_allocation(self.context,
(TUN_MAX + 5 - 1)).allocated)
self.assertFalse(
self.driver.get_allocation(self.context, (TUN_MAX + 5)).allocated)
self.assertIsNone(
self.driver.get_allocation(self.context, (TUN_MAX + 5 + 1)))
def _test_sync_allocations_and_allocated(self, tunnel_id):
segment = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: None,
api.SEGMENTATION_ID: tunnel_id}
self.driver.reserve_provider_segment(self.context, segment)
self.driver.tunnel_ranges = UPDATED_TUNNEL_RANGES
self.driver.sync_allocations()
self.assertTrue(
self.driver.get_allocation(self.context, tunnel_id).allocated)
def test_sync_allocations_and_allocated_in_initial_range(self):
self._test_sync_allocations_and_allocated(TUN_MIN + 2)
def test_sync_allocations_and_allocated_in_final_range(self):
self._test_sync_allocations_and_allocated(TUN_MAX + 2)
def test_sync_allocations_no_op(self):
def verify_no_chunk(iterable, chunk_size):
# no segment removed/added
self.assertEqual(0, len(list(iterable)))
return []
with mock.patch.object(
type_tunnel, 'chunks', side_effect=verify_no_chunk) as chunks:
self.driver.sync_allocations()
self.assertEqual(2, len(chunks.mock_calls))
def test_partial_segment_is_partial_segment(self):
segment = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: None,
api.SEGMENTATION_ID: None}
self.assertTrue(self.driver.is_partial_segment(segment))
def test_specific_segment_is_not_partial_segment(self):
segment = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: None,
api.SEGMENTATION_ID: 101}
self.assertFalse(self.driver.is_partial_segment(segment))
def test_reserve_provider_segment_full_specs(self):
segment = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: None,
api.SEGMENTATION_ID: 101}
observed = self.driver.reserve_provider_segment(self.context, segment)
alloc = self.driver.get_allocation(self.context,
observed[api.SEGMENTATION_ID])
self.assertTrue(alloc.allocated)
with testtools.ExpectedException(exc.TunnelIdInUse):
self.driver.reserve_provider_segment(self.context, segment)
self.driver.release_segment(self.context, segment)
alloc = self.driver.get_allocation(self.context,
observed[api.SEGMENTATION_ID])
self.assertFalse(alloc.allocated)
segment[api.SEGMENTATION_ID] = 1000
observed = self.driver.reserve_provider_segment(self.context, segment)
alloc = self.driver.get_allocation(self.context,
observed[api.SEGMENTATION_ID])
self.assertTrue(alloc.allocated)
self.driver.release_segment(self.context, segment)
alloc = self.driver.get_allocation(self.context,
observed[api.SEGMENTATION_ID])
self.assertIsNone(alloc)
def test_reserve_provider_segment(self):
tunnel_ids = set()
specs = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: None}
for x in moves.range(TUN_MIN, TUN_MAX + 1):
segment = self.driver.reserve_provider_segment(self.context,
specs)
self.assertEqual(self.TYPE, segment[api.NETWORK_TYPE])
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
with testtools.ExpectedException(exc.NoNetworkAvailable):
segment = self.driver.reserve_provider_segment(self.context,
specs)
segment = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: tunnel_ids.pop()}
self.driver.release_segment(self.context, segment)
segment = self.driver.reserve_provider_segment(self.context, specs)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
for tunnel_id in tunnel_ids:
segment[api.SEGMENTATION_ID] = tunnel_id
self.driver.release_segment(self.context, segment)
def test_allocate_tenant_segment(self):
tunnel_ids = set()
for x in moves.range(TUN_MIN, TUN_MAX + 1):
segment = self.driver.allocate_tenant_segment(self.context)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
segment = self.driver.allocate_tenant_segment(self.context)
self.assertIsNone(segment)
segment = {api.NETWORK_TYPE: self.TYPE,
api.PHYSICAL_NETWORK: 'None',
api.SEGMENTATION_ID: tunnel_ids.pop()}
self.driver.release_segment(self.context, segment)
segment = self.driver.allocate_tenant_segment(self.context)
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.GreaterThan(TUN_MIN - 1))
self.assertThat(segment[api.SEGMENTATION_ID],
matchers.LessThan(TUN_MAX + 1))
tunnel_ids.add(segment[api.SEGMENTATION_ID])
for tunnel_id in tunnel_ids:
segment[api.SEGMENTATION_ID] = tunnel_id
self.driver.release_segment(self.context, segment)
def add_endpoint(self, ip=TUNNEL_IP_ONE, host=HOST_ONE):
return self.driver.add_endpoint(ip, host)
def test_add_endpoint(self):
endpoint = self.add_endpoint()
self.assertEqual(TUNNEL_IP_ONE, endpoint.ip_address)
self.assertEqual(HOST_ONE, endpoint.host)
return endpoint
def test_add_endpoint_for_existing_tunnel_ip(self):
self.add_endpoint()
with mock.patch.object(type_tunnel.LOG, 'warning') as log_warn:
self.add_endpoint()
log_warn.assert_called_once_with(mock.ANY, TUNNEL_IP_ONE)
def test_get_endpoint_by_host(self):
self.add_endpoint()
host_endpoint = self.driver.get_endpoint_by_host(HOST_ONE)
self.assertEqual(TUNNEL_IP_ONE, host_endpoint.ip_address)
return host_endpoint
def test_get_endpoint_by_host_for_not_existing_host(self):
ip_endpoint = self.driver.get_endpoint_by_host(HOST_TWO)
self.assertIsNone(ip_endpoint)
def test_get_endpoint_by_ip(self):
self.add_endpoint()
ip_endpoint = self.driver.get_endpoint_by_ip(TUNNEL_IP_ONE)
self.assertEqual(HOST_ONE, ip_endpoint.host)
return ip_endpoint
def test_get_endpoint_by_ip_for_not_existing_tunnel_ip(self):
ip_endpoint = self.driver.get_endpoint_by_ip(TUNNEL_IP_TWO)
self.assertIsNone(ip_endpoint)
def test_delete_endpoint(self):
self.add_endpoint()
self.assertIsNone(self.driver.delete_endpoint(TUNNEL_IP_ONE))
# Get all the endpoints and verify its empty
endpoints = self.driver.get_endpoints()
self.assertNotIn(TUNNEL_IP_ONE, endpoints)
class TunnelTypeMultiRangeTestMixin(object):
DRIVER_CLASS = None
TUN_MIN0 = 100
TUN_MAX0 = 101
TUN_MIN1 = 200
TUN_MAX1 = 201
TUNNEL_MULTI_RANGES = [(TUN_MIN0, TUN_MAX0), (TUN_MIN1, TUN_MAX1)]
def setUp(self):
super(TunnelTypeMultiRangeTestMixin, self).setUp()
self.driver = self.DRIVER_CLASS()
self.driver.tunnel_ranges = self.TUNNEL_MULTI_RANGES
self.driver.sync_allocations()
self.context = context.Context()
def test_release_segment(self):
segments = [self.driver.allocate_tenant_segment(self.context)
for i in range(4)]
# Release them in random order. No special meaning.
for i in (0, 2, 1, 3):
self.driver.release_segment(self.context, segments[i])
for key in (self.TUN_MIN0, self.TUN_MAX0,
self.TUN_MIN1, self.TUN_MAX1):
alloc = self.driver.get_allocation(self.context, key)
self.assertFalse(alloc.allocated)
class TunnelRpcCallbackTestMixin(object):
DRIVER_CLASS = None
TYPE = None
def setUp(self):
super(TunnelRpcCallbackTestMixin, self).setUp()
self.driver = self.DRIVER_CLASS()
def _test_tunnel_sync(self, kwargs, delete_tunnel=False):
with mock.patch.object(self.notifier,
'tunnel_update') as tunnel_update,\
mock.patch.object(self.notifier,
'tunnel_delete') as tunnel_delete:
details = self.callbacks.tunnel_sync('fake_context', **kwargs)
tunnels = details['tunnels']
for tunnel in tunnels:
self.assertEqual(kwargs['tunnel_ip'], tunnel['ip_address'])
self.assertEqual(kwargs['host'], tunnel['host'])
self.assertTrue(tunnel_update.called)
if delete_tunnel:
self.assertTrue(tunnel_delete.called)
else:
self.assertFalse(tunnel_delete.called)
def _test_tunnel_sync_raises(self, kwargs):
with mock.patch.object(self.notifier,
'tunnel_update') as tunnel_update,\
mock.patch.object(self.notifier,
'tunnel_delete') as tunnel_delete:
self.assertRaises(exc.InvalidInput,
self.callbacks.tunnel_sync,
'fake_context', **kwargs)
self.assertFalse(tunnel_update.called)
self.assertFalse(tunnel_delete.called)
def test_tunnel_sync_called_without_host_passed(self):
kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE,
'host': None}
self._test_tunnel_sync(kwargs)
def test_tunnel_sync_called_with_host_passed_for_existing_tunnel_ip(self):
self.driver.add_endpoint(TUNNEL_IP_ONE, None)
kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE,
'host': HOST_ONE}
self._test_tunnel_sync(kwargs)
def test_tunnel_sync_called_with_host_passed(self):
kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE,
'host': HOST_ONE}
self._test_tunnel_sync(kwargs)
def test_tunnel_sync_called_with_host_passed_ipv6(self):
cfg.CONF.set_override('overlay_ip_version', 6, group='ml2')
kwargs = {'tunnel_ip': TUNNEL_IPV6_ONE, 'tunnel_type': self.TYPE,
'host': HOST_ONE}
self._test_tunnel_sync(kwargs)
def test_tunnel_sync_called_for_existing_endpoint(self):
self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE)
kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE,
'host': HOST_ONE}
self._test_tunnel_sync(kwargs)
def test_tunnel_sync_called_for_existing_host_with_tunnel_ip_changed(self):
self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE)
kwargs = {'tunnel_ip': TUNNEL_IP_TWO, 'tunnel_type': self.TYPE,
'host': HOST_ONE}
self._test_tunnel_sync(kwargs, True)
def test_tunnel_sync_called_with_used_tunnel_ip_host_roaming(self):
self.driver.add_endpoint(TUNNEL_IP_ONE, HOST_ONE)
kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE,
'host': HOST_TWO}
self._test_tunnel_sync(kwargs, False)
def test_tunnel_sync_called_with_used_tunnel_ip_roaming_case_two(self):
self.driver.add_endpoint(TUNNEL_IP_ONE, None)
self.driver.add_endpoint(TUNNEL_IP_TWO, HOST_TWO)
kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE,
'host': HOST_TWO}
self._test_tunnel_sync(kwargs, False)
def test_tunnel_sync_called_without_tunnel_ip(self):
kwargs = {'tunnel_type': self.TYPE, 'host': None}
self._test_tunnel_sync_raises(kwargs)
def test_tunnel_sync_called_without_tunnel_type(self):
kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'host': None}
self._test_tunnel_sync_raises(kwargs)
def test_tunnel_sync_called_with_tunnel_overlay_mismatch(self):
cfg.CONF.set_override('overlay_ip_version', 6, group='ml2')
kwargs = {'tunnel_ip': TUNNEL_IP_ONE, 'tunnel_type': self.TYPE,
'host': HOST_ONE}
self._test_tunnel_sync_raises(kwargs)
def test_tunnel_sync_called_with_tunnel_overlay_mismatch_ipv6(self):
cfg.CONF.set_override('overlay_ip_version', 4, group='ml2')
kwargs = {'tunnel_ip': TUNNEL_IPV6_ONE, 'tunnel_type': self.TYPE,
'host': HOST_ONE}
self._test_tunnel_sync_raises(kwargs)
class TunnelTypeMTUTestMixin(object):
DRIVER_CLASS = None
TYPE = None
ENCAP_OVERHEAD = 0
def setUp(self):
super(TunnelTypeMTUTestMixin, self).setUp()
self.driver = self.DRIVER_CLASS()
def _test_get_mtu(self, ip_version):
cfg.CONF.set_override('overlay_ip_version', ip_version,
group='ml2')
ip_header_length = p_const.IP_HEADER_LENGTH[ip_version]
cfg.CONF.set_override('global_physnet_mtu', 1500)
cfg.CONF.set_override('path_mtu', 1475, group='ml2')
self.driver.physnet_mtus = {'physnet1': 1450, 'physnet2': 1400}
self.assertEqual(1475 - self.ENCAP_OVERHEAD - ip_header_length,
self.driver.get_mtu('physnet1'))
cfg.CONF.set_override('global_physnet_mtu', 1450)
cfg.CONF.set_override('path_mtu', 1475, group='ml2')
self.driver.physnet_mtus = {'physnet1': 1400, 'physnet2': 1425}
self.assertEqual(1450 - self.ENCAP_OVERHEAD - ip_header_length,
self.driver.get_mtu('physnet1'))
cfg.CONF.set_override('global_physnet_mtu', 0)
cfg.CONF.set_override('path_mtu', 1450, group='ml2')
self.driver.physnet_mtus = {'physnet1': 1425, 'physnet2': 1400}
self.assertEqual(1450 - self.ENCAP_OVERHEAD - ip_header_length,
self.driver.get_mtu('physnet1'))
cfg.CONF.set_override('global_physnet_mtu', 0)
cfg.CONF.set_override('path_mtu', 0, group='ml2')
self.driver.physnet_mtus = {}
self.assertEqual(0, self.driver.get_mtu('physnet1'))
def test_get_mtu_ipv4(self):
self._test_get_mtu(4)
def test_get_mtu_ipv6(self):
self._test_get_mtu(6)
| |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ServiceEndpointPoliciesOperations:
"""ServiceEndpointPoliciesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_04_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes the specified service endpoint policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def get(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
expand: Optional[str] = None,
**kwargs: Any
) -> "_models.ServiceEndpointPolicy":
"""Gets the specified service Endpoint Policies in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceEndpointPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_04_01.models.ServiceEndpointPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
parameters: "_models.ServiceEndpointPolicy",
**kwargs: Any
) -> "_models.ServiceEndpointPolicy":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ServiceEndpointPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
parameters: "_models.ServiceEndpointPolicy",
**kwargs: Any
) -> AsyncLROPoller["_models.ServiceEndpointPolicy"]:
"""Creates or updates a service Endpoint Policies.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param parameters: Parameters supplied to the create or update service endpoint policy
operation.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.ServiceEndpointPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ServiceEndpointPolicy or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.ServiceEndpointPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def _update_initial(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> "_models.ServiceEndpointPolicy":
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
async def begin_update(
self,
resource_group_name: str,
service_endpoint_policy_name: str,
parameters: "_models.TagsObject",
**kwargs: Any
) -> AsyncLROPoller["_models.ServiceEndpointPolicy"]:
"""Updates service Endpoint Policies.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param parameters: Parameters supplied to update service endpoint policy tags.
:type parameters: ~azure.mgmt.network.v2019_04_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling.
Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either ServiceEndpointPolicy or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2019_04_01.models.ServiceEndpointPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def list(
self,
**kwargs: Any
) -> AsyncIterable["_models.ServiceEndpointPolicyListResult"]:
"""Gets all the service endpoint policies in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceEndpointPolicyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_04_01.models.ServiceEndpointPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ServiceEndpointPolicies'} # type: ignore
def list_by_resource_group(
self,
resource_group_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ServiceEndpointPolicyListResult"]:
"""Gets all service endpoint Policies in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceEndpointPolicyListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_04_01.models.ServiceEndpointPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ServiceEndpointPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-04-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies'} # type: ignore
| |
"""The tests for the IGN Sismologia (Earthquakes) Feed platform."""
import datetime
from unittest.mock import MagicMock, call, patch
from homeassistant.components import geo_location
from homeassistant.components.geo_location import ATTR_SOURCE
from homeassistant.components.ign_sismologia.geo_location import (
ATTR_EXTERNAL_ID,
ATTR_IMAGE_URL,
ATTR_MAGNITUDE,
ATTR_PUBLICATION_DATE,
ATTR_REGION,
ATTR_TITLE,
SCAN_INTERVAL,
)
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
EVENT_HOMEASSISTANT_START,
LENGTH_KILOMETERS,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import assert_setup_component, async_fire_time_changed
CONFIG = {geo_location.DOMAIN: [{"platform": "ign_sismologia", CONF_RADIUS: 200}]}
CONFIG_WITH_CUSTOM_LOCATION = {
geo_location.DOMAIN: [
{
"platform": "ign_sismologia",
CONF_RADIUS: 200,
CONF_LATITUDE: 40.4,
CONF_LONGITUDE: -3.7,
}
]
}
def _generate_mock_feed_entry(
external_id,
title,
distance_to_home,
coordinates,
region=None,
attribution=None,
published=None,
magnitude=None,
image_url=None,
):
"""Construct a mock feed entry for testing purposes."""
feed_entry = MagicMock()
feed_entry.external_id = external_id
feed_entry.title = title
feed_entry.distance_to_home = distance_to_home
feed_entry.coordinates = coordinates
feed_entry.region = region
feed_entry.attribution = attribution
feed_entry.published = published
feed_entry.magnitude = magnitude
feed_entry.image_url = image_url
return feed_entry
async def test_setup(hass):
"""Test the general setup of the platform."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry(
"1234",
"Title 1",
15.5,
(38.0, -3.0),
region="Region 1",
attribution="Attribution 1",
published=datetime.datetime(2018, 9, 22, 8, 0, tzinfo=datetime.timezone.utc),
magnitude=5.7,
image_url="http://image.url/map.jpg",
)
mock_entry_2 = _generate_mock_feed_entry(
"2345", "Title 2", 20.5, (38.1, -3.1), magnitude=4.6
)
mock_entry_3 = _generate_mock_feed_entry(
"3456", "Title 3", 25.5, (38.2, -3.2), region="Region 3"
)
mock_entry_4 = _generate_mock_feed_entry("4567", "Title 4", 12.5, (38.3, -3.3))
# Patching 'utcnow' to gain more control over the timed update.
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"georss_ign_sismologia_client.IgnSismologiaFeed"
) as mock_feed:
mock_feed.return_value.update.return_value = (
"OK",
[mock_entry_1, mock_entry_2, mock_entry_3],
)
with assert_setup_component(1, geo_location.DOMAIN):
assert await async_setup_component(hass, geo_location.DOMAIN, CONFIG)
await hass.async_block_till_done()
# Artificially trigger update.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
# Collect events.
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
state = hass.states.get("geo_location.m_5_7_region_1")
assert state is not None
assert state.name == "M 5.7 - Region 1"
assert state.attributes == {
ATTR_EXTERNAL_ID: "1234",
ATTR_LATITUDE: 38.0,
ATTR_LONGITUDE: -3.0,
ATTR_FRIENDLY_NAME: "M 5.7 - Region 1",
ATTR_TITLE: "Title 1",
ATTR_REGION: "Region 1",
ATTR_ATTRIBUTION: "Attribution 1",
ATTR_PUBLICATION_DATE: datetime.datetime(
2018, 9, 22, 8, 0, tzinfo=datetime.timezone.utc
),
ATTR_IMAGE_URL: "http://image.url/map.jpg",
ATTR_MAGNITUDE: 5.7,
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
ATTR_SOURCE: "ign_sismologia",
ATTR_ICON: "mdi:pulse",
}
assert float(state.state) == 15.5
state = hass.states.get("geo_location.m_4_6")
assert state is not None
assert state.name == "M 4.6"
assert state.attributes == {
ATTR_EXTERNAL_ID: "2345",
ATTR_LATITUDE: 38.1,
ATTR_LONGITUDE: -3.1,
ATTR_FRIENDLY_NAME: "M 4.6",
ATTR_TITLE: "Title 2",
ATTR_MAGNITUDE: 4.6,
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
ATTR_SOURCE: "ign_sismologia",
ATTR_ICON: "mdi:pulse",
}
assert float(state.state) == 20.5
state = hass.states.get("geo_location.region_3")
assert state is not None
assert state.name == "Region 3"
assert state.attributes == {
ATTR_EXTERNAL_ID: "3456",
ATTR_LATITUDE: 38.2,
ATTR_LONGITUDE: -3.2,
ATTR_FRIENDLY_NAME: "Region 3",
ATTR_TITLE: "Title 3",
ATTR_REGION: "Region 3",
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
ATTR_SOURCE: "ign_sismologia",
ATTR_ICON: "mdi:pulse",
}
assert float(state.state) == 25.5
# Simulate an update - one existing, one new entry,
# one outdated entry
mock_feed.return_value.update.return_value = (
"OK",
[mock_entry_1, mock_entry_4, mock_entry_3],
)
async_fire_time_changed(hass, utcnow + SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
# Simulate an update - empty data, but successful update,
# so no changes to entities.
mock_feed.return_value.update.return_value = "OK_NO_DATA", None
async_fire_time_changed(hass, utcnow + 2 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
# Simulate an update - empty data, removes all entities
mock_feed.return_value.update.return_value = "ERROR", None
async_fire_time_changed(hass, utcnow + 3 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 0
async def test_setup_with_custom_location(hass):
"""Test the setup with a custom location."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry("1234", "Title 1", 20.5, (38.1, -3.1))
with patch("georss_ign_sismologia_client.IgnSismologiaFeed") as mock_feed:
mock_feed.return_value.update.return_value = "OK", [mock_entry_1]
with assert_setup_component(1, geo_location.DOMAIN):
assert await async_setup_component(
hass, geo_location.DOMAIN, CONFIG_WITH_CUSTOM_LOCATION
)
await hass.async_block_till_done()
# Artificially trigger update.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
# Collect events.
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
assert mock_feed.call_args == call(
(40.4, -3.7), filter_minimum_magnitude=0.0, filter_radius=200.0
)
| |
# Python 3
import fileinput
import glob
import shutil
import sys
import os
import re
import distutils.dir_util
VERSION_SHORT = "v.31a"
VERSION_FULL = VERSION_SHORT + ", released 12 Feb 2022"
EXEC_UGLIFYJS_WIN = "{2}/lib/uglifyjs.cmd --parse bare_returns --compress --mangle toplevel --mangle-props keep_quoted,reserved=[{3}] --output \"{1}\" \"{0}\""
EXEC_UGLIFYJS_AUTO = "uglifyjs --parse bare_returns --compress --mangle toplevel --mangle-props keep_quoted,reserved=[{3}] --output \"{1}\" \"{0}\""
USE_UGLIFYJS = "--nominify" not in sys.argv
USE_MINIFICATION = "--nominify" not in sys.argv
BUILD_WEBSITE = "--website" in sys.argv
CLIPBOARD_TRACKER = "--copytracker" in sys.argv
WORKING_DIR = os.getcwd()
# UglifyJS Setup
if os.name == "nt":
EXEC_UGLIFYJS = EXEC_UGLIFYJS_WIN
else:
EXEC_UGLIFYJS = EXEC_UGLIFYJS_AUTO
if USE_UGLIFYJS and shutil.which("uglifyjs") is None:
USE_UGLIFYJS = False
print("Could not find 'uglifyjs', JS minification will be disabled")
if USE_UGLIFYJS:
with open("reserve.txt", "r") as reserved:
RESERVED_PROPS = ",".join(line.strip() for line in reserved.readlines())
# File Utilities
def combine_files(input_pattern, output_file):
is_first_file = True
with fileinput.input(sorted(glob.glob(input_pattern))) as stream:
for line in stream:
if stream.isfirstline():
if is_first_file:
is_first_file = False
else:
output_file.write("\n")
output_file.write(line.replace("{{{version:full}}}", VERSION_FULL))
def minify_css(input_file, output_file):
if not USE_MINIFICATION:
if input_file != output_file:
shutil.copyfile(input_file, output_file)
return
with open(input_file, "r") as fin:
css = fin.read()
css = re.sub(r"^\s+(.+?):\s*(.+?)(?:\s*(!important))?;\n", r"\1:\2\3;", css, flags = re.M) # remove spaces after colons
css = re.sub(r"\{\n", r"{", css, flags = re.M) # remove new lines after {
css = re.sub(r"\n\}", r"}", css, flags = re.M) # remove new lines before }
css = re.sub(r"\n\n", r"\n", css, flags = re.M) # remove empty lines
css = re.sub(r";\}$", r"}", css, flags = re.M) # remove last semicolons
css = re.sub(r"rgb\((.*?),\s*(.*?),\s*(.*?)\)", r"rgb(\1,\2,\3)", css, flags = re.M) # remove spaces after commas in rgb()
css = re.sub(r"rgba\((.*?),\s*(.*?),\s*(.*?),\s*(.*?)\)", r"rgba(\1,\2,\3,\4)", css, flags = re.M) # remove spaces after commas in rgba()
with open(output_file, "w") as out:
out.write(css)
# Build System
def build_tracker_html():
output_file_raw = "bld/track.js"
output_file_html = "bld/track.html"
output_file_tmp = "bld/track.tmp.js"
input_pattern = "src/tracker/*.js"
with open(output_file_raw, "w") as out:
if not USE_UGLIFYJS:
out.write("(function(){\n")
combine_files(input_pattern, out)
if not USE_UGLIFYJS:
out.write("})()")
if USE_UGLIFYJS:
os.system(EXEC_UGLIFYJS.format(output_file_raw, output_file_tmp, WORKING_DIR, RESERVED_PROPS))
with open(output_file_raw, "w") as out:
out.write("javascript:(function(){")
with open(output_file_tmp, "r") as minified:
out.write(minified.read().replace("\n", " ").replace("\r", ""))
out.write("})()")
os.remove(output_file_tmp)
with open(output_file_raw, "r") as raw:
script_contents = raw.read().replace("&", "&").replace('"', """).replace("'", "'").replace("<", "<").replace(">", ">")
with open(output_file_html, "w") as out:
out.write(script_contents)
def build_tracker_userscript():
output_file = "bld/track.user.js"
input_pattern = "src/tracker/*.js"
userscript_base = "src/base/track.user.js"
with open(userscript_base, "r") as base:
userscript_contents = base.read().replace("{{{version}}}", VERSION_SHORT).split("{{{contents}}}")
with open(output_file, "w") as out:
out.write(userscript_contents[0])
combine_files(input_pattern, out)
out.write(userscript_contents[1])
def build_viewer():
output_file = "bld/viewer.html"
input_html = "src/viewer/index.html"
input_css_pattern = "src/viewer/styles/*.css"
tmp_css_file_combined = "bld/viewer.tmp.css"
tmp_css_file_minified = "bld/viewer.min.css"
with open(tmp_css_file_combined, "w") as out:
combine_files(input_css_pattern, out)
minify_css(tmp_css_file_combined, tmp_css_file_minified)
os.remove(tmp_css_file_combined)
input_js_pattern = "src/viewer/scripts/*.js"
tmp_js_file_combined = "bld/viewer.tmp.js"
tmp_js_file_minified = "bld/viewer.min.js"
with open(tmp_js_file_combined, "w") as out:
combine_files(input_js_pattern, out)
if USE_UGLIFYJS:
os.system(EXEC_UGLIFYJS.format(tmp_js_file_combined, tmp_js_file_minified, WORKING_DIR, RESERVED_PROPS))
else:
shutil.copyfile(tmp_js_file_combined, tmp_js_file_minified)
os.remove(tmp_js_file_combined)
tokens = {
"/*{js}*/": tmp_js_file_minified,
"/*{css}*/": tmp_css_file_minified
}
with open(output_file, "w") as out:
with open(input_html, "r") as fin:
for line in fin:
token = None
for token in (token for token in tokens if token in line):
with open(tokens[token], "r") as token_file:
embedded = token_file.read()
out.write(embedded)
os.remove(tokens[token])
if token is None:
out.write(line)
def build_website():
tracker_file_html = "bld/track.html"
tracker_file_userscript = "bld/track.user.js"
viewer_file = "bld/viewer.html"
web_style_file = "bld/web/style.css"
distutils.dir_util.copy_tree("web", "bld/web")
index_file = "bld/web/index.php"
with open(index_file, "r") as index:
index_contents = index.read()
with open(index_file, "w") as index:
index.write(index_contents.replace("{{{version:web}}}", VERSION_SHORT.replace(" ", " ")))
shutil.copyfile(tracker_file_html, "bld/web/build/track.html")
shutil.copyfile(tracker_file_userscript, "bld/web/build/track.user.js")
shutil.copyfile(viewer_file, "bld/web/build/viewer.html")
minify_css(web_style_file, web_style_file)
# Build Process
os.makedirs("bld", exist_ok = True)
print("Building tracker html...")
build_tracker_html()
print("Building tracker userscript...")
build_tracker_userscript()
print("Building viewer...")
build_viewer()
if BUILD_WEBSITE:
print("Building website...")
build_website()
if CLIPBOARD_TRACKER:
if os.name == "nt":
print("Copying to clipboard...")
os.system("clip < bld/track.js")
else:
print("Clipboard is only supported on Windows")
print("Done")
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""High level operations on graphs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import sys
import threading
import time
import numpy as np
from six import reraise
from tensorflow.contrib.framework.python.ops import ops as contrib_ops
from tensorflow.contrib.framework.python.ops import variables as contrib_variables
from tensorflow.contrib.learn.python.learn import monitors as monitors_lib
from tensorflow.contrib.learn.python.learn.utils import checkpoints
from tensorflow.core.framework import summary_pb2
from tensorflow.python.client import session as tf_session
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import session_manager as session_manager_lib
from tensorflow.python.training import summary_io
from tensorflow.python.training import supervisor as tf_supervisor
# Singleton for SummaryWriter per logdir folder.
_SUMMARY_WRITERS = {}
# Lock protecting _SUMMARY_WRITERS
_summary_writer_lock = threading.Lock()
def clear_summary_writers():
"""Clear cached summary writers. Currently only used for unit tests."""
_summary_writer_lock.acquire()
_SUMMARY_WRITERS.clear()
_summary_writer_lock.release()
def get_summary_writer(logdir):
"""Returns single SummaryWriter per logdir in current run.
Args:
logdir: str, folder to write summaries.
Returns:
Existing `SummaryWriter` object or new one if never wrote to given
directory.
"""
_summary_writer_lock.acquire()
if logdir not in _SUMMARY_WRITERS:
_SUMMARY_WRITERS[logdir] = summary_io.SummaryWriter(
logdir, graph=ops.get_default_graph())
_summary_writer_lock.release()
return _SUMMARY_WRITERS[logdir]
class NanLossDuringTrainingError(RuntimeError):
def __str__(self):
return 'NaN loss during training.'
def _make_saver(graph):
vars_to_save = graph.get_collection(ops.GraphKeys.VARIABLES)
if vars_to_save:
return tf_saver.Saver(vars_to_save, sharded=True)
else:
return None
def _restore_from_checkpoint(session, graph, checkpoint_path, saver=None):
logging.info('Loading model from checkpoint: %s.', checkpoint_path)
assert gfile.Glob(checkpoint_path)
saver = saver or _make_saver(graph)
if saver:
saver.restore(session, checkpoint_path)
else:
logging.info('No variables found in graph, not creating Saver() object.')
def _run_with_monitors(session, step, tensors, feed_dict, monitors):
"""Runs session for given tensors with monitor callbacks."""
for monitor in monitors:
tensors += monitor.step_begin(step)
tensors = list(set(tensors))
outputs = session.run(tensors, feed_dict=feed_dict)
outputs = dict(zip(
[t.name if isinstance(t, ops.Tensor) else t for t in tensors],
outputs))
should_stop = False
for monitor in monitors:
induce_stop = monitor.step_end(step, outputs)
should_stop = should_stop or induce_stop
return outputs, should_stop
# TODO(ptucker): Add unit test.
# TODO(wicke): switch to forced named kwargs
def train(graph,
output_dir,
train_op,
loss_op,
global_step_tensor=None,
init_op=None,
init_feed_dict=None,
init_fn=None,
log_every_steps=10,
supervisor_is_chief=True,
supervisor_master='',
supervisor_save_model_secs=600,
supervisor_save_summaries_steps=100,
feed_fn=None,
steps=None,
fail_on_nan_loss=True,
monitors=None):
"""Train a model.
Given `graph`, a directory to write outputs to (`output_dir`), and some ops,
run a training loop. The given `train_op` performs one step of training on the
model. The `loss_op` represents the objective function of the training. It is
expected to increment the `global_step_tensor`, a scalar integer tensor
counting training steps. This function uses `Supervisor` to initialize the
graph (from a checkpoint if one is available in `output_dir`), write summaries
defined in the graph, and write regular checkpoints as defined by
`supervisor_save_model_secs`.
Training continues until `global_step_tensor` evaluates to `max_steps`, or, if
`fail_on_nan_loss`, until `loss_op` evaluates to `NaN`. In that case the
program is terminated with exit code 1.
Args:
graph: A graph to train. It is expected that this graph is not in use
elsewhere.
output_dir: A directory to write outputs to.
train_op: An op that performs one training step when run.
loss_op: A scalar loss tensor.
global_step_tensor: A tensor representing the global step. If none is given,
one is extracted from the graph using the same logic as in `Supervisor`.
init_op: An op that initializes the graph. If `None`, use `Supervisor`'s
default.
init_feed_dict: A dictionary that maps `Tensor` objects to feed values.
This feed dictionary will be used when `init_op` is evaluated.
init_fn: Optional callable passed to Supervisor to initialize the model.
log_every_steps: Output logs regularly. The logs contain timing data and the
current loss.
supervisor_is_chief: Whether the current process is the chief supervisor in
charge of restoring the model and running standard services.
supervisor_master: The master string to use when preparing the session.
supervisor_save_model_secs: Save a checkpoint every
`supervisor_save_model_secs` seconds when training.
supervisor_save_summaries_steps: Save summaries every
`supervisor_save_summaries_steps` seconds when training.
feed_fn: A function that is called every iteration to produce a `feed_dict`
passed to `session.run` calls. Optional.
steps: Trains for this many steps (e.g. current global step + `steps`).
fail_on_nan_loss: If true, raise `NanLossDuringTrainingError` if `loss_op`
evaluates to `NaN`. If false, continue training as if nothing happened.
monitors: List of `BaseMonitor` subclass instances. Used for callbacks
inside the training loop.
Returns:
The final loss value.
Raises:
ValueError: If `global_step_tensor` is not provided. See
`tf.contrib.framework.get_global_step` for how we look it up if not
provided explicitly.
NanLossDuringTrainingError: If `fail_on_nan_loss` is `True`, and loss ever
evaluates to `NaN`.
"""
if not output_dir:
raise ValueError('Output directory should be non-empty.')
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
if global_step_tensor is None:
raise ValueError('No "global_step" was provided or found in the graph.')
# Get current step.
try:
start_step = checkpoints.load_variable(
output_dir, global_step_tensor.name)
except (errors.NotFoundError, ValueError):
start_step = 0
summary_writer = (get_summary_writer(output_dir)
if supervisor_is_chief else None)
# TODO(ipolosukhin): Replace all functionality of Supervisor with Monitors.
if not supervisor_is_chief:
# monitors should run only on the chief.
monitors = []
elif not monitors:
monitors = monitors_lib.get_default_monitors(
loss_op=loss_op,
summary_op=logging_ops.get_summary_op(),
save_summary_steps=supervisor_save_summaries_steps,
summary_writer=summary_writer)
max_steps = (start_step + steps) if steps else None
# Start monitors, can create graph parts.
for monitor in monitors:
monitor.begin(max_steps=max_steps)
supervisor = tf_supervisor.Supervisor(
graph,
init_op=init_op or tf_supervisor.Supervisor.USE_DEFAULT,
init_feed_dict=init_feed_dict,
is_chief=supervisor_is_chief,
logdir=output_dir,
saver=_make_saver(graph),
global_step=global_step_tensor,
summary_op=None,
summary_writer=summary_writer,
save_model_secs=supervisor_save_model_secs,
init_fn=init_fn)
session = supervisor.PrepareSession(master=supervisor_master,
start_standard_services=True)
supervisor.StartQueueRunners(session)
with session:
get_current_step = lambda: session.run(global_step_tensor)
start_step = get_current_step()
last_step = start_step
last_log_step = start_step
loss_value = None
logging.info('Training steps [%d,%s)', last_step, 'inf'
if max_steps is None else str(max_steps))
excinfo = None
try:
while not supervisor.ShouldStop() and (
(max_steps is None) or (last_step < max_steps)):
start_time = time.time()
feed_dict = feed_fn() if feed_fn is not None else None
outputs, should_stop = _run_with_monitors(
session, last_step + 1, [train_op, loss_op], feed_dict, monitors)
loss_value = outputs[loss_op.name]
if np.isnan(loss_value):
failure_message = 'Model diverged with loss = NaN.'
if fail_on_nan_loss:
logging.error(failure_message)
raise NanLossDuringTrainingError()
else:
logging.warning(failure_message)
if should_stop:
break
this_step = get_current_step()
if this_step <= last_step:
logging.error(
'Global step was not incremented by train op at step %s'
': new step %d', last_step, this_step)
last_step = this_step
is_last_step = (max_steps is not None) and (last_step >= max_steps)
if is_last_step or (last_step - last_log_step >= log_every_steps):
logging.info(
'training step %d, loss = %.5f (%.3f sec/batch).',
last_step, loss_value, float(time.time() - start_time))
last_log_step = last_step
except errors.OutOfRangeError as e:
logging.warn('Got exception during tf.learn training loop possibly '
'due to exhausted input queue %s.', e)
except BaseException as e: # pylint: disable=broad-except
# Hold on to any other exceptions while we try recording a final
# checkpoint and summary.
excinfo = sys.exc_info()
finally:
try:
# Call supervisor.Stop() from within a try block because it re-raises
# exceptions thrown by the supervised threads.
supervisor.Stop(close_summary_writer=False)
# Save one last checkpoint and summaries
# TODO(wicke): This should be handled by Supervisor
# In case we encountered an exception in the try block before we updated
# last_step, update it here (again).
last_step = get_current_step()
if supervisor_is_chief:
ckpt_path = supervisor.save_path
logging.info('Saving checkpoint for step %d to checkpoint: %s.',
last_step, ckpt_path)
supervisor.saver.save(session, ckpt_path, global_step=last_step)
# Finish monitors.
for monitor in monitors:
monitor.end()
# catch OutOfRangeError which is thrown when queue is out of data (and for
# other reasons as well).
except errors.OutOfRangeError as e:
logging.warn('OutOfRangeError in tf.learn final checkpoint possibly '
'due to exhausted input queue. Note: summary_op is not '
'expected to trigger dequeues. %s.', e)
except BaseException as e: # pylint: disable=broad-except
# If we don't already have an exception to re-raise, raise this one.
if not excinfo:
raise
# Otherwise, log this one and raise the other in the finally block.
logging.error('Got exception during tf.learn final checkpoint %s.', e)
finally:
if excinfo:
reraise(*excinfo)
return loss_value
def _get_first_op_from_collection(collection_name):
elements = ops.get_collection(collection_name)
if elements is not None:
if elements:
return elements[0]
return None
def _get_saver():
"""Lazy init and return saver."""
saver = _get_first_op_from_collection(ops.GraphKeys.SAVERS)
if saver is not None:
if saver:
saver = saver[0]
else:
saver = None
if saver is None and variables.all_variables():
saver = tf_saver.Saver()
ops.add_to_collection(ops.GraphKeys.SAVERS, saver)
return saver
def _get_ready_op():
ready_op = _get_first_op_from_collection(ops.GraphKeys.READY_OP)
if ready_op is None:
ready_op = variables.report_uninitialized_variables()
ops.add_to_collection(ops.GraphKeys.READY_OP, ready_op)
return ready_op
def _get_local_init_op():
local_init_op = _get_first_op_from_collection(
ops.GraphKeys.LOCAL_INIT_OP)
if local_init_op is None:
op_list = [variables.initialize_local_variables(),
data_flow_ops.initialize_all_tables()]
if op_list:
local_init_op = control_flow_ops.group(*op_list)
ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, local_init_op)
return local_init_op
def _eval_results_to_str(eval_results):
return ', '.join('%s = %s' % (k, v) for k, v in eval_results.items())
def _write_summary_results(output_dir, eval_results, current_global_step):
"""Writes eval results into summary file in given dir."""
logging.info('Saving evaluation summary for %d step: %s', current_global_step,
_eval_results_to_str(eval_results))
summary_writer = get_summary_writer(output_dir)
summary = summary_pb2.Summary()
for key in eval_results:
if eval_results[key] is None:
continue
value = summary.value.add()
value.tag = key
if (isinstance(eval_results[key], np.float32) or
isinstance(eval_results[key], float)):
value.simple_value = float(eval_results[key])
summary_writer.add_summary(summary, current_global_step)
summary_writer.flush()
def evaluate(graph,
output_dir,
checkpoint_path,
eval_dict,
update_op=None,
global_step_tensor=None,
supervisor_master='',
log_every_steps=10,
feed_fn=None,
max_steps=None):
"""Evaluate a model loaded from a checkpoint.
Given `graph`, a directory to write summaries to (`output_dir`), a checkpoint
to restore variables from, and a `dict` of `Tensor`s to evaluate, run an eval
loop for `max_steps` steps, or until an exception (generally, an
end-of-input signal from a reader operation) is raised from running
`eval_dict`.
In each step of evaluation, all tensors in the `eval_dict` are evaluated, and
every `log_every_steps` steps, they are logged. At the very end of evaluation,
a summary is evaluated (finding the summary ops using `Supervisor`'s logic)
and written to `output_dir`.
Args:
graph: A `Graph` to train. It is expected that this graph is not in use
elsewhere.
output_dir: A string containing the directory to write a summary to.
checkpoint_path: A string containing the path to a checkpoint to restore.
Can be `None` if the graph doesn't require loading any variables.
eval_dict: A `dict` mapping string names to tensors to evaluate. It is
evaluated in every logging step. The result of the final evaluation is
returned. If `update_op` is None, then it's evaluated in every step. If
`max_steps` is `None`, this should depend on a reader that will raise an
end-of-inupt exception when the inputs are exhausted.
update_op: A `Tensor` which is run in every step.
global_step_tensor: A `Variable` containing the global step. If `None`,
one is extracted from the graph using the same logic as in `Supervisor`.
Used to place eval summaries on training curves.
supervisor_master: The master string to use when preparing the session.
log_every_steps: Integer. Output logs every `log_every_steps` evaluation
steps. The logs contain the `eval_dict` and timing information.
feed_fn: A function that is called every iteration to produce a `feed_dict`
passed to `session.run` calls. Optional.
max_steps: Integer. Evaluate `eval_dict` this many times.
Returns:
A tuple `(eval_results, global_step)`:
eval_results: A `dict` mapping `string` to numeric values (`int`, `float`)
that are the result of running eval_dict in the last step. `None` if no
eval steps were run.
global_step: The global step this evaluation corresponds to.
"""
with graph.as_default():
global_step_tensor = contrib_variables.assert_or_get_global_step(
graph, global_step_tensor)
# Create or get summary op, global_step and saver.
saver = _get_saver()
local_init_op = _get_local_init_op()
ready_op = _get_ready_op()
session_manager = session_manager_lib.SessionManager(
local_init_op=local_init_op,
ready_op=ready_op)
session, initialized = session_manager.recover_session(
master=supervisor_master,
saver=saver,
checkpoint_dir=checkpoint_path)
# Start queue runners.
coord = coordinator.Coordinator()
threads = queue_runner.start_queue_runners(session, coord)
with session:
if not initialized:
logging.warning('Failed to initialize from %s.', checkpoint_path)
# TODO(ipolosukhin): This should be failing, but old code relies on that.
session.run(variables.initialize_all_variables())
if checkpoint_path:
_restore_from_checkpoint(session, graph, checkpoint_path, saver)
current_global_step = session.run(global_step_tensor)
eval_results = None
# TODO(amodei): Fix this to run through the eval set exactly once.
step = 0
eval_step = None
feed_dict = None
logging.info('Eval steps [%d,%s) for training step %d.', step,
'inf' if max_steps is None
else str(max_steps), current_global_step)
try:
try:
while (max_steps is None) or (step < max_steps):
step += 1
start_time = time.time()
feed_dict = feed_fn() if feed_fn is not None else None
if update_op is not None:
session.run(update_op, feed_dict=feed_dict)
else:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
# TODO(wicke): We should assert that the global step hasn't changed.
if step % log_every_steps == 0:
if eval_step is None or step != eval_step:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
duration = time.time() - start_time
logging.info('Results after %d steps (%.3f sec/batch): %s.',
step, float(duration),
_eval_results_to_str(eval_results))
finally:
if eval_results is None or step != eval_step:
eval_results = session.run(eval_dict, feed_dict=feed_dict)
eval_step = step
# Stop queue runners.
coord.request_stop()
coord.join(threads, stop_grace_period_secs=120)
# catch OutOfRangeError which is thrown when queue is out of data (and for
# other reasons as well).
except errors.OutOfRangeError as e:
if max_steps is None:
logging.info('Input queue is exhausted.')
else:
logging.warn('Input queue is exhausted: %s.', e)
# catch StopIteration which is thrown is DataReader is out of data.
except StopIteration as e:
if max_steps is None:
logging.info('Input iterator is exhausted.')
else:
logging.warn('Input iterator is exhausted: %s.', e)
# Save summaries for this evaluation.
_write_summary_results(output_dir, eval_results, current_global_step)
return eval_results, current_global_step
def run_n(output_dict, feed_dict=None, restore_checkpoint_path=None, n=1):
"""Run `output_dict` tensors `n` times, with the same `feed_dict` each run.
Args:
output_dict: A `dict` mapping string names to tensors to run. Must all be
from the same graph.
feed_dict: `dict` of input values to feed each run.
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
n: Number of times to repeat.
Returns:
A list of `n` `dict` objects, each containing values read from `output_dict`
tensors.
"""
return run_feeds(
output_dict=output_dict,
feed_dicts=itertools.repeat(feed_dict, n),
restore_checkpoint_path=restore_checkpoint_path)
# TODO(ptucker): Add save_checkpoint_path.
def run_feeds(output_dict, feed_dicts, restore_checkpoint_path=None):
"""Run `output_dict` tensors with each input in `feed_dicts`.
If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,
init all variables.
Args:
output_dict: A `dict` mapping string names to `Tensor` objects to run.
Tensors must all be from the same graph.
feed_dicts: Iterable of `dict` objects of input values to feed.
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
Returns:
A list of dicts of values read from `output_dict` tensors, one item in the
list for each item in `feed_dicts`. Keys are the same as `output_dict`,
values are the results read from the corresponding `Tensor` in
`output_dict`.
Raises:
ValueError: if `output_dict` or `feed_dicts` is None or empty.
"""
if not output_dict:
raise ValueError('output_dict is invalid: %s.' % output_dict)
if not feed_dicts:
raise ValueError('feed_dicts is invalid: %s.' % feed_dicts)
graph = contrib_ops.get_graph_from_inputs(output_dict.values())
with graph.as_default() as g:
with tf_session.Session('') as session:
if restore_checkpoint_path:
_restore_from_checkpoint(session, g, restore_checkpoint_path)
else:
session.run(variables.initialize_all_variables())
session.run(variables.initialize_local_variables())
session.run(data_flow_ops.initialize_all_tables())
coord = coordinator.Coordinator()
threads = None
try:
threads = queue_runner.start_queue_runners(session, coord=coord)
return [session.run(output_dict, f) for f in feed_dicts]
finally:
coord.request_stop()
if threads:
coord.join(threads, stop_grace_period_secs=120)
def infer(restore_checkpoint_path, output_dict, feed_dict=None):
"""Restore graph from `restore_checkpoint_path` and run `output_dict` tensors.
If `restore_checkpoint_path` is supplied, restore from checkpoint. Otherwise,
init all variables.
Args:
restore_checkpoint_path: A string containing the path to a checkpoint to
restore.
output_dict: A `dict` mapping string names to `Tensor` objects to run.
Tensors must all be from the same graph.
feed_dict: `dict` object mapping `Tensor` objects to input values to feed.
Returns:
Dict of values read from `output_dict` tensors. Keys are the same as
`output_dict`, values are the results read from the corresponding `Tensor`
in `output_dict`.
Raises:
ValueError: if `output_dict` or `feed_dicts` is None or empty.
"""
return run_feeds(output_dict=output_dict,
feed_dicts=[feed_dict] if feed_dict is not None else [None],
restore_checkpoint_path=restore_checkpoint_path)[0]
| |
"""
The Flow module lets you connect to processes or network services using a
unified API. It is primarily designed for synchronous communication flows.
It is based around the central :class:`Flow` class which uses a ``Channel``
to connect to a process. The :class:`Flow` class then uses the primitives
exposed by the ``Channel`` to provide a high level API for reading/receiving
and writing/sending data.
Examples:
>>> from pwny import *
>>> f = Flow.connect_tcp('ced.pwned.systems', 80)
>>> f.writelines([
... b'GET / HTTP/1.0',
... b'Host: ced.pwned.systems',
... b'',
... ])
>>> line = f.readline().strip()
>>> print(line == b'HTTP/1.0 200 OK')
True
>>> f.until(b'\\r\\n\\r\\n')
>>> f.read_eof(echo=True)
... lots of html ...
>>> from pwny import *
>>> f = Flow.execute('cat')
>>> f.writeline(b'hello')
>>> f.readline(echo=True)
"""
import subprocess
import sys
import socket
__all__ = [
'ProcessChannel',
'SocketChannel',
'TCPSocketChannel',
'Flow',
]
class ProcessChannel(object):
"""ProcessChannel(executable, argument..., redirect_stderr=False)
This channel type allows controlling processes. It uses python's
``subprocess.Popen`` class to execute a process and allows you to
communicate with it.
Args:
executable(str): The executable to start.
argument...(list of str): The arguments to pass to the executable.
redirect_stderr(bool): Whether to also capture the output of stderr.
"""
def __init__(self, executable, *arguments, **kwargs):
if kwargs.get('redirect_stderr'):
stderr = subprocess.STDOUT
else:
stderr = None
self._process = subprocess.Popen(
(executable,) + tuple(arguments),
bufsize=0,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=stderr,
)
def read(self, n):
"""
Read *n* bytes from the subprocess' output channel.
Args:
n(int): The number of bytes to read.
Returns:
bytes: *n* bytes of output.
Raises:
EOFError: If the process exited.
"""
d = b''
while n:
try:
block = self._process.stdout.read(n)
except ValueError:
block = None
if not block:
self._process.poll()
raise EOFError('Process ended')
d += block
n -= len(block)
return d
def write(self, data):
"""
Write *n* bytes to the subprocess' input channel.
Args:
data(bytes): The data to write.
Raises:
EOFError: If the process exited.
"""
self._process.poll()
if self._process.returncode is not None:
raise EOFError('Process ended')
self._process.stdin.write(data)
def close(self):
"""
Wait for the subprocess to exit.
"""
self._process.communicate()
def kill(self):
"""
Terminate the subprocess.
"""
self._process.kill()
class SocketChannel(object):
"""
This channel type allows controlling sockets.
Args:
socket(socket.socket): The (already connected) socket to control.
"""
def __init__(self, sock):
self._socket = sock
def read(self, n):
"""
Receive *n* bytes from the socket.
Args:
n(int): The number of bytes to read.
Returns:
bytes: *n* bytes read from the socket.
Raises:
EOFError: If the socket was closed.
"""
d = b''
while n:
try:
block = self._socket.recv(n)
except socket.error:
block = None
if not block:
raise EOFError('Socket closed')
d += block
n -= len(block)
return d
def write(self, data):
"""
Send *n* bytes to socket.
Args:
data(bytes): The data to send.
Raises:
EOFError: If the socket was closed.
"""
while data:
try:
n = self._socket.send(data)
except socket.error:
n = None
if not n:
raise EOFError('Socket closed')
data = data[n:]
def close(self):
"""
Close the socket gracefully.
"""
self._socket.close()
def kill(self):
"""
Shut down the socket immediately.
"""
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
class TCPSocketChannel(SocketChannel):
"""
Convenience subclass of :class:`SocketChannel` that allows you to connect
to a TCP hostname / port pair easily.
Args:
host(str): The hostname or IP address to connect to.
port(int): The port number to connect to.
"""
def __init__(self, host, port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
super(TCPSocketChannel, self).__init__(s)
class Flow(object):
"""
The core class of *Flow*. Takes a channel and exposes synchronous
utility functions for communications.
Usually, you'll use the convenience classmethods :meth:`connect_tcp`
or :meth:`execute` instead of manually creating the constructor
directly.
Args:
channel(``Channel``): A channel.
echo(bool): Whether or not to echo all input / output.
"""
def __init__(self, channel, echo=False):
self.channel = channel
self.echo = echo
def read(self, n, echo=None):
"""
Read *n* bytes from the channel.
Args:
n(int): The number of bytes to read from the channel.
echo(bool): Whether to write the read data to stdout.
Returns:
bytes: *n* bytes of data.
Raises:
EOFError: If the channel was closed.
"""
d = self.channel.read(n)
if echo or (echo is None and self.echo):
sys.stdout.write(d.decode('latin1'))
return d
def read_eof(self, echo=None):
"""
Read until the channel is closed.
Args:
echo(bool): Whether to write the read data to stdout.
Returns:
bytes: The read data.
"""
d = b''
while True:
try:
d += self.read(1, echo)
except EOFError:
return d
def read_until(self, s, echo=None):
"""
Read until a certain string is encountered..
Args:
s(bytes): The string to wait for.
echo(bool): Whether to write the read data to stdout.
Returns:
bytes: The data up to and including *s*.
Raises:
EOFError: If the channel was closed.
"""
s_len = len(s)
buf = self.read(s_len, echo)
while buf[-s_len:] != s:
buf += self.read(1, echo)
return buf
until = read_until #: Alias of :meth:`read_until`.
def readlines(self, n, echo=None):
"""
Read *n* lines from channel.
Args:
n(int): The number of lines to read.
echo(bool): Whether to write the read data to stdout.
Returns:
list of bytes: *n* lines which include new line characters.
Raises:
EOFError: If the channel was closed before *n* lines were read.
"""
return [
self.until(b'\n', echo)
for _ in range(n)
]
def readline(self, echo=None):
"""
Read 1 line from channel.
Args:
echo(bool): Whether to write the read data to stdout.
Returns:
bytes: The read line which includes new line character.
Raises:
EOFError: If the channel was closed before a line was read.
"""
return self.readlines(1, echo)[0]
def write(self, data, echo=None):
"""
Write data to channel.
Args:
data(bytes): The data to write to the channel.
echo(bool): Whether to echo the written data to stdout.
Raises:
EOFError: If the channel was closed before all data was sent.
"""
if echo or (echo is None and self.echo):
sys.stdout.write(data.decode('latin1'))
self.channel.write(data)
def writelines(self, lines, echo=None):
"""
Write a list of byte sequences to the channel and terminate them
with carriage return and line feed.
Args:
lines(list of bytes): The lines to send.
echo(bool): Whether to echo the written data to stdout.
Raises:
EOFError: If the channel was closed before all data was sent.
"""
self.write(b'\r\n'.join(lines + [b'']), echo)
def writeline(self, line=b'', echo=None):
"""
Write a byte sequences to the channel and terminate it with carriage
return and line feed.
Args:
line(bytes): The line to send.
echo(bool): Whether to echo the written data to stdout.
Raises:
EOFError: If the channel was closed before all data was sent.
"""
self.writelines([line], echo)
def close(self):
"""
Gracefully close the channel.
"""
self.channel.close()
def kill(self):
"""
Terminate the channel immediately.
"""
self.channel.kill()
@classmethod
def execute(cls, executable, *arguments, **kwargs):
"""execute(executable, argument..., redirect_stderr=False, echo=False):
Set up a :class:`ProcessChannel` and create a :class:`Flow` instance
for it.
Args:
executable(str): The executable to start.
argument...(list of str): The arguments to pass to the executable.
redirect_stderr(bool): Whether to also capture the output of stderr.
echo(bool): Whether to echo read/written data to stdout by default.
Returns:
:class:`Flow`: A Flow instance initialised with the process
channel.
"""
echo = kwargs.pop('echo', False)
return cls(ProcessChannel(executable, *arguments, **kwargs), echo=echo)
@classmethod
def connect_tcp(cls, host, port, echo=False):
"""
Set up a :class:`TCPSocketChannel` and create a :class:`Flow` instance
for it.
Args:
host(str): The hostname or IP address to connect to.
port(int): The port number to connect to.
echo(bool): Whether to echo read/written data to stdout by default.
Returns:
:class:`Flow`: A Flow instance initialised with the TCP socket
channel.
"""
return cls(TCPSocketChannel(host, port), echo=echo)
| |
"""
Giving models a custom manager
You can use a custom ``Manager`` in a particular model by extending the base
``Manager`` class and instantiating your custom ``Manager`` in your model.
There are two reasons you might want to customize a ``Manager``: to add extra
``Manager`` methods, and/or to modify the initial ``QuerySet`` the ``Manager``
returns.
"""
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation,
)
from django.db import models
class PersonManager(models.Manager):
def get_fun_people(self):
return self.filter(fun=True)
class PublishedBookManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(is_published=True)
class AnnotatedBookManager(models.Manager):
def get_queryset(self):
return super().get_queryset().annotate(
favorite_avg=models.Avg('favorite_books__favorite_thing_id')
)
class CustomQuerySet(models.QuerySet):
def filter(self, *args, **kwargs):
queryset = super().filter(fun=True)
queryset._filter_CustomQuerySet = True
return queryset
def public_method(self, *args, **kwargs):
return self.all()
def _private_method(self, *args, **kwargs):
return self.all()
def optout_public_method(self, *args, **kwargs):
return self.all()
optout_public_method.queryset_only = True
def _optin_private_method(self, *args, **kwargs):
return self.all()
_optin_private_method.queryset_only = False
class BaseCustomManager(models.Manager):
def __init__(self, arg):
super().__init__()
self.init_arg = arg
def filter(self, *args, **kwargs):
queryset = super().filter(fun=True)
queryset._filter_CustomManager = True
return queryset
def manager_only(self):
return self.all()
CustomManager = BaseCustomManager.from_queryset(CustomQuerySet)
class CustomInitQuerySet(models.QuerySet):
# QuerySet with an __init__() method that takes an additional argument.
def __init__(self, custom_optional_arg=None, model=None, query=None, using=None, hints=None):
super().__init__(model=model, query=query, using=using, hints=hints)
class DeconstructibleCustomManager(BaseCustomManager.from_queryset(CustomQuerySet)):
def __init__(self, a, b, c=1, d=2):
super().__init__(a)
class FunPeopleManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(fun=True)
class BoringPeopleManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(fun=False)
class Person(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
fun = models.BooleanField(default=False)
favorite_book = models.ForeignKey('Book', models.SET_NULL, null=True, related_name='favorite_books')
favorite_thing_type = models.ForeignKey('contenttypes.ContentType', models.SET_NULL, null=True)
favorite_thing_id = models.IntegerField(null=True)
favorite_thing = GenericForeignKey('favorite_thing_type', 'favorite_thing_id')
objects = PersonManager()
fun_people = FunPeopleManager()
boring_people = BoringPeopleManager()
custom_queryset_default_manager = CustomQuerySet.as_manager()
custom_queryset_custom_manager = CustomManager('hello')
custom_init_queryset_manager = CustomInitQuerySet.as_manager()
def __str__(self):
return "%s %s" % (self.first_name, self.last_name)
class FunPerson(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
fun = models.BooleanField(default=True)
favorite_book = models.ForeignKey(
'Book',
models.SET_NULL,
null=True,
related_name='fun_people_favorite_books',
)
favorite_thing_type = models.ForeignKey('contenttypes.ContentType', models.SET_NULL, null=True)
favorite_thing_id = models.IntegerField(null=True)
favorite_thing = GenericForeignKey('favorite_thing_type', 'favorite_thing_id')
objects = FunPeopleManager()
class Book(models.Model):
title = models.CharField(max_length=50)
author = models.CharField(max_length=30)
is_published = models.BooleanField(default=False)
authors = models.ManyToManyField(Person, related_name='books')
fun_authors = models.ManyToManyField(FunPerson, related_name='books')
favorite_things = GenericRelation(
Person,
content_type_field='favorite_thing_type',
object_id_field='favorite_thing_id',
)
fun_people_favorite_things = GenericRelation(
FunPerson,
content_type_field='favorite_thing_type',
object_id_field='favorite_thing_id',
)
published_objects = PublishedBookManager()
annotated_objects = AnnotatedBookManager()
class Meta:
base_manager_name = 'annotated_objects'
class FastCarManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(top_speed__gt=150)
class Car(models.Model):
name = models.CharField(max_length=10)
mileage = models.IntegerField()
top_speed = models.IntegerField(help_text="In miles per hour.")
cars = models.Manager()
fast_cars = FastCarManager()
class FastCarAsBase(Car):
class Meta:
proxy = True
base_manager_name = 'fast_cars'
class FastCarAsDefault(Car):
class Meta:
proxy = True
default_manager_name = 'fast_cars'
class RestrictedManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(is_public=True)
class RelatedModel(models.Model):
name = models.CharField(max_length=50)
class RestrictedModel(models.Model):
name = models.CharField(max_length=50)
is_public = models.BooleanField(default=False)
related = models.ForeignKey(RelatedModel, models.CASCADE)
objects = RestrictedManager()
plain_manager = models.Manager()
class OneToOneRestrictedModel(models.Model):
name = models.CharField(max_length=50)
is_public = models.BooleanField(default=False)
related = models.OneToOneField(RelatedModel, models.CASCADE)
objects = RestrictedManager()
plain_manager = models.Manager()
class AbstractPerson(models.Model):
abstract_persons = models.Manager()
objects = models.CharField(max_length=30)
class Meta:
abstract = True
class PersonFromAbstract(AbstractPerson):
pass
| |
#win_notify
'''
This will setup your computer to enable auditing for specified folders inputted into a yaml file. It will
then scan the ntfs journal for changes to those folders and report when it finds one.
'''
from __future__ import absolute_import
from time import mktime, strptime, time
import collections
import datetime
import fnmatch
import logging
import os
import yaml
import salt.ext.six
import salt.loader
import salt.utils.platform
log = logging.getLogger(__name__)
DEFAULT_MASK = ['File create', 'File delete', 'Hard link change', 'Data extend',
'Data overwrite', 'Data truncation', 'Security change', 'Rename: old name',
'Rename: new name']
__virtualname__ = 'pulsar'
CONFIG = None
CONFIG_STALENESS = 0
TOP = None
TOP_STALENESS = 0
def __virtual__():
if not salt.utils.platform.is_windows():
return False, 'This module only works on windows'
win_version = __grains__['osfullname']
if '2012' not in win_version and '2016' not in win_version:
return False, 'This module only works with Server 2012 (Win8) or higher'
return __virtualname__
def process(configfile='salt://hubblestack_pulsar/hubblestack_pulsar_win_config.yaml',
verbose=False):
r'''
Watch the configured files
Example yaml config on fileserver (targeted by configfile option)
.. code-block:: yaml
C:\Users: {}
C:\Windows:
mask:
- 'File Create'
- 'File Delete'
- 'Security Change'
exclude:
- C:\Windows\System32\*
C:\temp: {}
return: splunk_pulsar_return
batch: True
Note that if 'batch: True', the configured returner must support receiving a list of events, rather than single one-off events
the mask list can contain the following events (the default mask is create, delete, and modify):
1. Basic Info Change A user has either changed file or directory attributes, or one or more time stamps
2. Close The file or directory is closed
3. Compression Change The compression state of the file or directory is changed from or to compressed
4. Data Extend The file or directory is extended (added to)
5. Data Overwrite The data in the file or directory is overwritten
6. Data Truncation The file or directory is truncated
7. EA Change A user made a change to the extended attributes of a file or directory (These NTFS
file system attributes are not accessible to Windows-based applications)
8. Encryption Change The file or directory is encrypted or decrypted
9. File Create The file or directory is created for the first time
10. File Delete The file or directory is deleted
11. Hard Link Change An NTFS file system hard link is added to or removed from the file or directory
12. Indexable Change A user changes the FILE_ATTRIBUTE_NOT_CONTENT_INDEXED attribute (changes the file
or directory from one where content can be indexed to one where content cannot
be indexed, or vice versa)
13. Integrity Change A user changed the state of the FILE_ATTRIBUTE_INTEGRITY_STREAM attribute for the given
stream (On the ReFS file system, integrity streams maintain a checksum of all
data for that stream, so that the contents of the file can be validated during
read or write operations)
14. Named Data Extend The one or more named data streams for a file are extended (added to)
15. Named Data Overwrite The data in one or more named data streams for a file is overwritten
16. Named Data truncation The one or more named data streams for a file is truncated
17. Object ID Change The object identifier of a file or directory is changed
18. Rename New Name A file or directory is renamed, and the file name in the USN_RECORD_V2 structure is the
new name
19. Rename Old Name The file or directory is renamed, and the file name in the USN_RECORD_V2 structure is
the previous name
20. Reparse Point Change The reparse point that is contained in a file or directory is changed, or a reparse
point is added to or deleted from a file or directory
21. Security Change A change is made in the access rights to a file or directory
22. Stream Change A named stream is added to or removed from a file, or a named stream is renamed
23. Transacted Change The given stream is modified through a TxF transaction
exclude:
Exclude directories or files from triggering events in the watched directory. **Note that the directory excludes shoud
not have a trailing slash**
:return:
'''
config = __salt__['config.get']('hubblestack_pulsar' , {})
if isinstance(configfile, list):
config['paths'] = configfile
else:
config['paths'] = [configfile]
config['verbose'] = verbose
global CONFIG_STALENESS
global CONFIG
if config.get('verbose'):
log.debug('Pulsar module called.')
log.debug('Pulsar module config from pillar:\n{0}'.format(config))
ret = []
sys_check = 0
# Get config(s) from filesystem if we don't have them already
if CONFIG and CONFIG_STALENESS < config.get('refresh_frequency', 60):
CONFIG_STALENESS += 1
CONFIG.update(config)
CONFIG['verbose'] = config.get('verbose')
config = CONFIG
else:
if config.get('verbose'):
log.debug('No cached config found for pulsar, retrieving fresh from fileserver.')
new_config = config
if isinstance(config.get('paths'), list):
for path in config['paths']:
if 'salt://' in path:
path = __salt__['cp.cache_file'](path)
if os.path.isfile(path):
with open(path, 'r') as f:
new_config = _dict_update(new_config,
yaml.safe_load(f),
recursive_update=True,
merge_lists=True)
else:
log.error('Path {0} does not exist or is not a file'.format(path))
else:
log.error('Pulsar beacon \'paths\' data improperly formatted. Should be list of paths')
new_config.update(config)
config = new_config
CONFIG_STALENESS = 0
CONFIG = config
if config.get('verbose'):
log.debug('Pulsar beacon config (compiled from config list):\n{0}'.format(config))
if 'win_pulsar_file_map' not in __context__:
__context__['win_pulsar_file_map'] = {}
# check if cache path contails starting point for 'fsutil usn readjournal'
cache_path = os.path.join(__opts__['cachedir'], 'win_pulsar_usn')
# if starting point doesn't exist, create one then finish until next run
if not os.path.isfile(cache_path):
qj_dict = queryjournal('C:')
with open(cache_path, 'w') as f:
f.write(qj_dict['Next Usn'])
return ret
# check if file is out of date
currentt = time()
file_mtime = os.path.getmtime(cache_path)
threshold = int(__opts__.get('file_threshold', 900))
th_check = currentt - threshold
if th_check > file_mtime:
qj_dict = queryjournal('C:')
with open(cache_path, 'w') as f:
f.write(qj_dict['Next Usn'])
return ret
# read in start location and grab all changes since then
with open(cache_path, 'r') as f:
nusn = f.read()
nusn, jitems = readjournal('C:', nusn)
# create new starting point for next run
with open(cache_path, 'w') as f:
f.write(nusn)
# filter out unrequested changed
ret_list = usnfilter(jitems, config)
# return list of dictionaries
return ret_list
def queryjournal(drive):
'''
Gets information on the journal prosiding on the drive passed into the method
returns a dictionary with the following information:
USN Journal ID
First USN of the journal
Next USN to be written to the journal
Lowest Valid USN of the journal since the biginning of the volume (this will most likely
not be in the current journal since it only keeys a few days)
Max USN of the journal (the highest number reachable for a single Journal)
Maximum Size
Allocation Delta
Minimum record version supported
Maximum record version supported
Write range tracking (enabled or disabled)
'''
qjournal = (__salt__['cmd.run']('fsutil usn queryjournal {0}'.format(drive))).split('\r\n')
qj_dict = {}
#format into dictionary
if qjournal:
#remove empty string
qjournal.pop()
for item in qjournal:
qkey, qvalue = item.split(': ')
qj_dict[qkey.strip()] = qvalue.strip()
return qj_dict
def readjournal(drive, next_usn=0):
'''
Reads the data inside the journal. Default is to start from the beginning,
but you can pass an argument to start from whichever usn you want
Returns a list of dictionaries with the following information
list:
Individual events
dictionary:
Usn Journal ID (event number)
File Name
File name Length
Reason (what hapened to the file)
Time Stamp
File attributes
File ID
Parent file ID
Source Info
Security ID
Major version
Minor version
Record length
'''
jdata = (__salt__['cmd.run']('fsutil usn readjournal {0} startusn={1}'.format(drive, next_usn))).split('\r\n\r\n')
jd_list = []
pattern = '%m/%d/%Y %H:%M:%S'
removable = {'File name length', 'Major version', 'Minor version', 'Record length', 'Security ID', 'Source info'}
if jdata:
#prime for next delivery
jinfo = jdata[0].split('\r\n')
nusn = jinfo[2].split(' : ')[1]
#remove first item of list
jdata.pop(0)
#format into dictionary
for dlist in jdata:
if '| Close' not in dlist and 'Rename: old name' not in dlist:
continue
jd_dict = {}
i_list = dlist.split('\r\n')
for item in i_list:
if item == '':
continue
dkey, dvalue = item.split(' : ')
if dkey.strip() in removable:
continue
elif dkey.strip() == 'Time stamp':
dvalue = int(mktime(strptime(dvalue.strip(), pattern)))
jd_dict[dkey.strip()] = dvalue
elif dkey.strip() == 'Reason':
rvalues = dvalue.split(': ')
if len(rvalues) > 1:
rvalues = rvalues[1]
rvalues = rvalues.split(' | ')
dvalue = []
for v in rvalues:
if 'Close' in v:
continue
dvalue.append(v)
jd_dict[dkey.strip()] = dvalue
else:
jd_dict[dkey.strip()] = dvalue.strip()
jd_dict['Full path'] = getfilepath(jd_dict['Parent file ID'], jd_dict['File name'], drive)
del jd_dict['File ID'], jd_dict['Parent file ID']
jd_list.append(jd_dict)
return nusn, jd_list
def getfilepath(pfid, fname, drive):
'''
Gets file name and path from a File ID
'''
if pfid in __context__['win_pulsar_file_map']:
retpath = __context__['win_pulsar_file_map'][pfid] + '\\' + fname
return retpath
try:
jfullpath = (__salt__['cmd.run']('fsutil file queryfilenamebyid {0} 0x{1}'.format(drive, pfid), ignore_retcode=True)).replace('?\\', '\r\n')
if 'Error:' in jfullpath:
log.debug('Current usn cannot be queried as file')
return None
__context__['win_pulsar_file_map'][pfid] = jfullpath.split('\r\n')[1]
retpath = __context__['win_pulsar_file_map'][pfid] + '\\' + fname
return retpath
except:
log.debug('Current usn item is not a file')
return None
def usnfilter(usn_list, config_paths):
'''
Iterates through each change in the list and throws out any change not specified in the win_pulsar.yaml
'''
ret_usns = []
# iterate through active portion of the NTFS change journal
for usn in usn_list:
# iterate through win_pulsar.yaml (skips all non file paths)
for path in config_paths:
if path in {'win_notify_interval', 'return', 'batch', 'checksum', 'stats', 'paths', 'verbose'}:
continue
if not os.path.exists(path):
log.info('the folder path {} does not exist'.format(path))
continue
if isinstance(config_paths[path], dict):
mask = config_paths[path].get('mask', DEFAULT_MASK)
recurse = config_paths[path].get('recurse', True)
exclude = config_paths[path].get('exclude', False)
sum_type = config_paths[path].get('checksum', 'sha256')
else:
mask = DEFAULT_MASK
recurse = True
exclude = False
fpath = usn['Full path']
if fpath is None:
log.debug('The following change made was not a file. {0}'.format(usn))
continue
# check if base path called out in yaml is in file location called out in actual change
if path in fpath:
#check if the type of change that happened matches the list in yaml
freason = usn['Reason'][0]
if freason in mask:
throw_away = False
if exclude is not False:
for p in exclude:
# fnmatch allows for * and ? as wildcards
if fnmatch.fnmatch(fpath, p):
throw_away = True
# if the path matches a path we don't care about, stop iterating through excludes
break
if throw_away is True:
# stop iterating through win_pulsar specified paths since throw away flag was set
break
else:
usn['checksum'] = get_file_hash(fpath, sum_type)
usn['checksum_type'] = sum_type
usn['tag'], _ = os.path.split(fpath)
ret_usns.append(usn)
# don't keep checking other paths in yaml since we already found a match
break
else:
continue
# don't keep checking other paths in yaml since we already found a match
break
else:
continue
return ret_usns
def get_file_hash(usn_file, checksum):
'''
Simple function to grab the hash for each file that has been flagged
'''
try:
hashy = __salt__['file.get_hash']('{0}'.format(usn_file), form=checksum)
return hashy
except:
return ''
def canary(change_file=None):
'''
Simple module to change a file to trigger a FIM event (daily, etc)
THE SPECIFIED FILE WILL BE CREATED AND DELETED
Defaults to CONF_DIR/fim_canary.tmp, i.e. /etc/hubble/fim_canary.tmp
'''
if change_file is None:
conf_dir = os.path.dirname(__opts__['conf_file'])
change_file = os.path.join(conf_dir, 'fim_canary.tmp')
__salt__['file.touch'](change_file)
os.remove(change_file)
def _dict_update(dest, upd, recursive_update=True, merge_lists=False):
'''
Recursive version of the default dict.update
Merges upd recursively into dest
If recursive_update=False, will use the classic dict.update, or fall back
on a manual merge (helpful for non-dict types like FunctionWrapper)
If merge_lists=True, will aggregate list object types instead of replace.
This behavior is only activated when recursive_update=True. By default
merge_lists=False.
'''
if (not isinstance(dest, collections.Mapping)) \
or (not isinstance(upd, collections.Mapping)):
raise TypeError('Cannot update using non-dict types in dictupdate.update()')
updkeys = list(upd.keys())
if not set(list(dest.keys())) & set(updkeys):
recursive_update = False
if recursive_update:
for key in updkeys:
val = upd[key]
try:
dest_subkey = dest.get(key, None)
except AttributeError:
dest_subkey = None
if isinstance(dest_subkey, collections.Mapping) \
and isinstance(val, collections.Mapping):
ret = _dict_update(dest_subkey, val, merge_lists=merge_lists)
dest[key] = ret
elif isinstance(dest_subkey, list) \
and isinstance(val, list):
if merge_lists:
dest[key] = dest.get(key, []) + val
else:
dest[key] = upd[key]
else:
dest[key] = upd[key]
return dest
else:
try:
for k in upd.keys():
dest[k] = upd[k]
except AttributeError:
# this mapping is not a dict
for k in upd:
dest[k] = upd[k]
return dest
def top(topfile='salt://hubblestack_pulsar/win_top.pulsar',
verbose=False):
'''
Execute pulsar using a top.pulsar file to decide which configs to use for
this host.
The topfile should be formatted like this:
.. code-block:: yaml
pulsar:
'<salt compound match identifying host(s)>':
- list.of.paths
- using.dots.as.directory.separators
Paths in the topfile should be relative to `salt://hubblestack_pulsar`, and
the .yaml should not be included.
'''
configs = get_top_data(topfile)
configs = ['salt://hubblestack_pulsar/' + config.replace('.','/') + '.yaml'
for config in configs]
return process(configs, verbose=verbose)
def get_top_data(topfile):
'''
Cache the topfile and process the list of configs this host should use.
'''
# Get topdata from filesystem if we don't have them already
global TOP
global TOP_STALENESS
if TOP and TOP_STALENESS < 60:
TOP_STALENESS += 1
topdata = TOP
else:
log.debug('Missing/stale cached topdata found for pulsar, retrieving fresh from fileserver.')
topfile = __salt__['cp.cache_file'](topfile)
try:
with open(topfile) as handle:
topdata = yaml.safe_load(handle)
except Exception as e:
raise CommandExecutionError('Could not load topfile: {0}'.format(e))
if not isinstance(topdata, dict) or 'pulsar' not in topdata or \
not(isinstance(topdata['pulsar'], dict)):
raise CommandExecutionError('Pulsar topfile not formatted correctly')
topdata = topdata['pulsar']
TOP = topdata
TOP_STALENESS = 0
ret = []
for match, data in topdata.iteritems():
if __salt__['match.compound'](match):
ret.extend(data)
return ret
| |
from fooster.web import web
import pytest
test_key = 'Magical'
test_value = 'header'
test_header = test_key + ': ' + test_value + '\r\n'
poor_key = 'not'
poor_value = 'good'
poor_header = poor_key + ':' + poor_value + '\r\n'
good_header = poor_key + ': ' + poor_value + '\r\n'
case_key = 'wEIrd'
case_key_title = case_key.title()
case_value = 'cAse'
case_header = case_key + ': ' + case_value + '\r\n'
case_header_test = case_key + ': ' + test_value + '\r\n'
nonstr_key = 6
nonstr_value = None
def test_add_get():
headers = web.HTTPHeaders()
headers.add(test_header)
assert headers.get(test_key) == test_value
def test_add_getlist():
headers = web.HTTPHeaders()
headers.add(test_header)
assert headers.getlist(test_key) == [test_value]
def test_add_getitem():
headers = web.HTTPHeaders()
headers.add(test_header)
assert headers[test_key] == test_value
def test_getitem_empty():
headers = web.HTTPHeaders()
with pytest.raises(KeyError):
headers[test_key]
def test_getlist_empty():
headers = web.HTTPHeaders()
with pytest.raises(KeyError):
headers.getlist(test_key)
def test_getlist_default():
headers = web.HTTPHeaders()
assert headers.getlist(test_key, []) == []
def test_set_remove():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
assert headers.get(test_key) == test_value
headers.remove(test_key)
def test_set_multiple():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
headers.set(test_key, test_value)
assert headers.get(test_key) == test_value
assert headers.getlist(test_key) == [test_value] * 2
def test_set_overwrite():
headers = web.HTTPHeaders()
headers.set(test_key, test_value, True)
headers.set(test_key, test_value, True)
assert headers.get(test_key) == test_value
assert headers.getlist(test_key) == [test_value]
def test_setitem_delitem():
headers = web.HTTPHeaders()
headers[test_key] = test_value
assert headers[test_key] == test_value
del headers[test_key]
def test_remove_empty():
headers = web.HTTPHeaders()
with pytest.raises(KeyError):
headers.remove(test_key)
def test_delitem_empty():
headers = web.HTTPHeaders()
with pytest.raises(KeyError):
del headers[test_key]
def test_retrieve():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
assert headers.retrieve(test_key) == test_header
def test_len():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
assert len(headers) == 1
headers.set(poor_key, poor_value)
assert len(headers) == 2
def test_multiple_add_get_len_retrieve():
headers = web.HTTPHeaders()
headers.add(case_header)
assert len(headers) == 1
assert headers.get(case_key) == case_value
assert headers.getlist(case_key) == [case_value]
assert headers.retrieve(case_key) == case_header
headers.add(case_header)
assert len(headers) == 1
assert headers.get(case_key) == case_value
assert headers.getlist(case_key) == [case_value] * 2
assert headers.retrieve(case_key) == case_header + case_header
headers.add(case_header_test)
assert len(headers) == 1
assert headers.get(case_key) == test_value
assert headers.getlist(case_key) == [case_value] * 2 + [test_value]
assert headers.retrieve(case_key) == case_header + case_header + case_header_test
def test_multiple_set_get_len_retrieve():
headers = web.HTTPHeaders()
headers.set(case_key, case_value)
assert len(headers) == 1
assert headers.get(case_key) == case_value
assert headers.getlist(case_key) == [case_value]
assert headers.retrieve(case_key) == case_header
headers.set(case_key, case_value)
assert len(headers) == 1
assert headers.get(case_key) == case_value
assert headers.getlist(case_key) == [case_value] * 2
assert headers.retrieve(case_key) == case_header + case_header
headers.set(case_key, test_value)
assert len(headers) == 1
assert headers.get(case_key) == test_value
assert headers.getlist(case_key) == [case_value] * 2 + [test_value]
assert headers.retrieve(case_key) == case_header + case_header + case_header_test
def test_clear():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
headers.set(poor_key, poor_value)
headers.clear()
assert len(headers) == 0
def test_case():
headers = web.HTTPHeaders()
headers.set(case_key, case_value)
assert headers.get(case_key_title) == case_value
assert headers.retrieve(case_key_title) == case_header
def test_iter():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
headers.set(poor_key, poor_value)
headers.set(case_key, case_value)
header_list = []
for header in headers:
header_list.append(header)
assert test_header in header_list
assert good_header in header_list
assert case_header in header_list
def test_contains():
headers = web.HTTPHeaders()
headers.set(test_key, test_value)
headers.set(poor_key, poor_value)
headers.set(case_key, case_value)
assert test_key in headers
assert poor_key in headers
assert case_key in headers
assert test_key.upper() in headers
assert poor_key.upper() in headers
assert case_key.upper() in headers
assert test_key.lower() in headers
assert poor_key.lower() in headers
assert case_key.lower() in headers
def test_poor_header():
headers = web.HTTPHeaders()
headers.add(poor_header)
assert headers.get(poor_key) == poor_value
def test_set_key_nonstr():
headers = web.HTTPHeaders()
with pytest.raises(TypeError):
headers.set(nonstr_key, test_value)
def test_set_value_nonstr():
headers = web.HTTPHeaders()
with pytest.raises(TypeError):
headers.set(test_key, nonstr_value)
| |
#!/usr/bin/env python
#
# Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Check Arimo/Tinos/Cousine fonts for metric compatibility with their
inspiration."""
import argparse
from os import path
import re
from fontTools import ttLib
import cldr_data
import font_data
import lint_config
import render
import unicode_data
name_re = re.compile(r"(.+)-(.*)\.ttf")
family_map = {"Arimo": "Arial", "Tinos": "Times New Roman", "Cousine": "Courier New"}
style_map = {
"Regular": "",
"Bold": " Bold",
"Italic": " Italic",
"BoldItalic": " Bold Italic",
}
_excluded_chars = None
def _get_excluded_chars():
# we skip Arabic and Hebrew characters
global _excluded_chars
if not _excluded_chars:
arabic_ranges = (
"[\u0600-\u06ff \u0750-\u077f \u08a0-\u08ff \ufb50-\ufdff \ufe70-\ufefc]"
)
arabic_set = frozenset(
[ord(cp) for cp in cldr_data.unicode_set_string_to_list(arabic_ranges)]
)
# includes sheqel sign, omit?
hebrew_ranges = "[\u0590-\u05ff \u20aa \ufb1d-\ufb4f]"
hebrew_set = frozenset(
[ord(cp) for cp in cldr_data.unicode_set_string_to_list(hebrew_ranges)]
)
armenian_ranges = "[\u0530-\u058f \ufb13-\ufb17]"
armenian_set = frozenset(
[ord(cp) for cp in cldr_data.unicode_set_string_to_list(armenian_ranges)]
)
private_use_set = frozenset(range(0xE000, 0xF900))
_excluded_chars = frozenset(
arabic_set | hebrew_set | armenian_set | private_use_set
)
return _excluded_chars
def _get_class_defs(font):
try:
return font["GDEF"].table.GlyphClassDef.classDefs
except (KeyError, AttributeError):
return None
class FontCompare(object):
test_names = frozenset(["cmap", "advance", "hhea", "OS/2", "bounds", "gdef"])
@staticmethod
def check_test_list(test_list):
if not test_list:
return FontCompare.test_names
enabled_tests = None
failed = False
for test in test_list:
if test not in FontCompare.test_names:
print("unknown test: '%s'" % test)
failed = True
if failed:
print("tests are: %s" % (",".join(sorted(FontCompare.test_names))))
return None
return frozenset(test_list)
@staticmethod
def get_codepoints(range_list):
if not range_list:
return None
return lint_config.parse_int_ranges(range_list, True)
def __init__(
self, target, test, incremental, emit_config, ignored_cp, only_cp, enabled_tests
):
self.target = target
self.test = test
self.incremental = incremental # target is different version of same file
self.emit_config = emit_config # generate config lines
self.enabled_tests = enabled_tests or FontCompare.test_names
self.target_cmap = font_data.get_cmap(target)
self.test_cmap = font_data.get_cmap(test)
target_chars = set(self.target_cmap.keys()) - _get_excluded_chars()
if ignored_cp:
target_chars -= ignored_cp
if only_cp:
target_chars &= only_cp
self.target_chars = target_chars
# Assume version has two decimal places, which MTI fonts do but Adobe's do not.
target_version = font_data.printable_font_revision(target)
test_version = font_data.printable_font_revision(test)
target_names = font_data.get_name_records(target)
test_names = font_data.get_name_records(test)
self._log(
"target name: %s %s, version: %s"
% (target_names[1], target_names[2], target_version)
)
self._log(
"test name: %s %s, version %s"
% (test_names[1], test_names[2], test_version)
)
if emit_config:
font_family = test_names[1]
font_subfamily = test_names[2].replace(" ", "")
self._config(
"name like %s; weight like %s; version == %s"
% (font_family, font_subfamily, test_version)
)
def _log(self, msg):
"""Write a message that should not go to config output."""
if not self.emit_config:
print(msg)
def _logerr(self, msg):
"""Write an error that should not go to config output."""
# this is an error, but lint doesn't check for it, so no point in emitting a comment.
if not self.emit_config:
print(msg)
def _err(self, msg):
"""Write a message that should go to config as a comment, or just be logged."""
if self.emit_config:
print("# " + msg)
else:
print(msg)
def _config(self, msg):
"""Write a message that should go to config."""
if self.emit_config:
print(msg)
def _check_attribute(self, target_obj, test_obj, attr):
target_value = getattr(target_obj, attr)
test_value = getattr(test_obj, attr)
if target_value == test_value:
return None
return attr, test_value, target_value
def _check_attributes(self, target_obj, test_obj, attr_list):
result = []
for a in attr_list:
r = self._check_attribute(target_obj, test_obj, a)
if r:
result.append(r)
return result
def _test_gid(self, cp):
return self.test.getGlyphID(self.test_cmap[cp], requireReal=True)
def _target_gid(self, cp):
return self.target.getGlyphID(self.target_cmap[cp], requireReal=True)
def _cp_error_msg(self, cp, test_msg, target_msg):
test_gid = self._test_gid(cp)
target_gid = self._target_gid(cp)
if self.emit_config:
# omit character name for brevity
return "cp %04x (gid %d) %s but target (gid %d) %s" % (
cp,
test_gid,
test_msg,
target_gid,
target_msg,
)
else:
cp_name = unicode_data.name(cp)
return "cp %04x (gid %d) %s but target (gid %d) %s (%s)" % (
cp,
test_gid,
test_msg,
target_gid,
target_msg,
cp_name,
)
def _skip(self, test_name):
if test_name in self.enabled_tests:
self._log("Check %s" % test_name)
return False
return True
def check_cmaps(self):
if self._skip("cmap"):
return
self._log(
"target cmap size: %d, test cmap size: %d"
% (len(self.target_cmap), len(self.test_cmap))
)
missing_chars = self.target_chars - set(self.test_cmap.keys())
if missing_chars:
self._logerr("Missing %d chars" % len(missing_chars))
self._logerr(lint_config.write_int_ranges(missing_chars, True))
def check_advances(self):
if self._skip("advance"):
return
target_metrics = self.target["hmtx"].metrics
test_metrics = self.test["hmtx"].metrics
differences = []
for cp in self.target_chars:
if cp not in self.test_cmap:
continue
target_advance = target_metrics[self.target_cmap[cp]][0]
test_advance = test_metrics[self.test_cmap[cp]][0]
if target_advance != test_advance:
differences.append((cp, test_advance, target_advance))
# No current lint test requires specific advances of arbitrary glyphs.
if differences:
self._logerr("%d codepoints have advance differences" % len(differences))
for cp, ta, fa in sorted(differences):
self._logerr(
self._cp_error_msg(cp, "advance is %d" % fa, "advance is %d" % ta)
)
def check_hhea(self):
if self._skip("hhea"):
return
target_hhea = self.target["hhea"]
test_hhea = self.test["hhea"]
failed_attrs = self._check_attributes(
target_hhea, test_hhea, ["ascent", "descent", "lineGap"]
)
if not failed_attrs:
self._config("disable head/hhea")
return
for attr, test_val, target_val in sorted(failed_attrs):
if self.emit_config:
print("enable head/hhea/%s" % attr.lower())
else:
print(
"font hhea %s was %d but target was %d"
% (attr, test_val, target_val)
)
def check_os2(self):
if self._skip("OS/2"):
return
target_os2 = self.target["OS/2"]
test_os2 = self.test["OS/2"]
attr_name_map = {
"sTypoAscender": "ascender",
"sTypoDescender": "descender",
"sTypoLineGap": "linegap",
}
failed_attrs = self._check_attributes(
target_os2, test_os2, attr_name_map.keys()
)
if not failed_attrs:
self._config("disable head/os2")
return
for attr, test_val, target_val in sorted(failed_attrs):
if self.emit_config:
print("enable head/os2/%s" % attr_name_map[attr])
else:
print(
"font OS/2 %s was %d but target was %d"
% (attr, test_val, target_val)
)
def check_glyph_bounds(self):
# Don't compare the actual bounds, but whether they exceed the limits when the target
# font does not.
if self._skip("bounds"):
return
target_glyphset = self.target.getGlyphSet()
test_glyphset = self.test.getGlyphSet()
target_max = self.target["OS/2"].usWinAscent
test_max = self.test["OS/2"].usWinAscent
target_min = -self.target["OS/2"].usWinDescent
test_min = -self.test["OS/2"].usWinDescent
# We need to align the glyph ids, but once we get past the cmap it gets more and more
# complicated to do this. For now we'll just check the directly mapped glyphs.
differences = []
for cp in self.target_chars:
if cp not in self.test_cmap:
continue
target_glyph_name = self.target_cmap[cp]
test_glyph_name = self.test_cmap[cp]
target_ttglyph = target_glyphset[target_glyph_name]
test_ttglyph = test_glyphset[test_glyph_name]
target_ymin, target_ymax = render.get_glyph_cleaned_extents(
target_ttglyph, target_glyphset
)
test_ymin, test_ymax = render.get_glyph_cleaned_extents(
test_ttglyph, test_glyphset
)
target_exceeds_max = target_ymax > target_max
target_exceeds_min = target_ymin < target_min
test_exceeds_max = test_ymax > test_max
test_exceeds_min = test_ymin < test_min
max_failure = test_exceeds_max and not target_exceeds_max
min_failure = test_exceeds_min and not target_exceeds_min
if max_failure or min_failure:
differences.append((cp, max_failure, test_ymax, min_failure, test_ymin))
if not differences:
self._config("disable bounds/glyph")
return
self._err("%d glyphs have bounds errors" % len(differences))
self._err("glyph bounds limits max %d, min %d" % (test_max, test_min))
max_failures = []
min_failures = []
for cp, max_failure, ymax, min_failure, ymin in sorted(differences):
if max_failure:
self._err(self._cp_error_msg(cp, "above max (%d)" % ymax, "is not"))
if self.emit_config:
test_gid = self._test_gid(cp)
max_failures.append(test_gid)
if min_failure:
self._err(self._cp_error_msg(cp, "below min (%d)" % ymin, "is not"))
if self.emit_config:
test_gid = self._test_gid(cp)
min_failures.append(test_gid)
if self.emit_config:
if max_failures:
self._config(
"enable bounds/glyph/ymax only gid %s"
% lint_config.write_int_ranges(max_failures, False)
)
if min_failures:
self._config(
"enable bounds/glyph/ymin only gid %s"
% lint_config.write_int_ranges(min_failures, False)
)
def _check_gdef_class_defs(self, mark_glyphs):
"""Return False if we cannot check classDef-related info."""
self._log("Check gdef classDefs")
target_class_defs = _get_class_defs(self.target)
test_class_defs = _get_class_defs(self.test)
if mark_glyphs:
if not target_class_defs:
self._err("Have mark glyphs, but target does not have classDefs table.")
self._config("exclude /gdef/classdef/not_present")
if not test_class_defs:
self._logerr(
"Have mark glyphs, but test does not have classDefs table."
)
if (target_class_defs is not None) != (test_class_defs is not None):
if target_class_defs:
self._logerr("Target has classDefs but test does not.")
else:
self._logerr("Test has classDefs but target does not.")
return False
return bool(target_class_defs)
def _check_gdef_marks(self, mark_glyphs):
self._log("Check gdef marks")
if not mark_glyphs:
self._log("No mark glyphs in target")
return
target_class_defs = _get_class_defs(self.target)
test_class_defs = _get_class_defs(self.test)
assert target_class_defs and test_class_defs
differences = []
for cp in mark_glyphs:
if not cp in self.test_cmap:
continue
target_glyph = self.target_cmap[cp]
test_glyph = self.test_cmap[cp]
if target_glyph in target_class_defs and test_glyph not in test_class_defs:
differences.append((cp, -1))
else:
target_glyph_class = target_class_defs[target_glyph]
test_glyph_class = test_class_defs[test_glyph]
if target_glyph_class == 3 and test_glyph_class != 3:
differences.append((cp, test_glyph_class))
if differences:
self._err("%d mark glyphs have classDef errors" % len(differences))
missing_list = []
incorrect_list = []
for cp, gc in sorted(differences):
if gc == -1:
self._err(self._cp_error_msg(cp, "has no classDef", "does"))
missing_list.append(cp)
else:
self._err(
self._cp_error_msg(
cp,
"has non-combining-mark glyph class %d" % gc,
"is correct",
)
)
incorrect_list.append(cp)
if missing_list:
self._config(
"enable gdef/classdef/unlisted only cp %s"
% lint_config.write_int_ranges(missing_list, True)
)
if incorrect_list:
self._config(
"enable gdef/classdef/combining_mismatch only cp %s"
% lint_config.write_int_ranges(incorrect_list, True)
)
def _check_gdef_combining(self):
self._log("Check gdef combining")
target_class_defs = _get_class_defs(self.target)
test_class_defs = _get_class_defs(self.test)
assert target_class_defs and test_class_defs
differences = []
for cp in self.target_chars:
if not cp in self.test_cmap:
continue
target_glyph = self.target_cmap[cp]
test_glyph = self.test_cmap[cp]
target_class = target_class_defs.get(target_glyph, -1)
test_class = test_class_defs.get(test_glyph, -1)
if target_class != test_class:
differences.append((cp, test_class, target_class))
if differences:
cp_list = []
self._err("%d glyphs have classDef differences" % len(differences))
for cp, test_class, target_class in sorted(differences):
target_msg = (
"has class %d" % target_class
if target_class != -1
else "not in classDef"
)
test_msg = (
"has class %d" % test_class
if test_class != -1
else "not in classDef"
)
self._err(self._cp_error_msg(cp, test_msg, target_msg))
cp_list.append(cp)
self._config(
"enable gdef/classdef/not_combining_mismatch only cp %s"
% lint_config.write_int_ranges(cp_list, True)
)
def check_gdef(self):
if self._skip("gdef"):
return
mark_glyphs = [
cp for cp in self.target_chars if unicode_data.category(cp) == "Mn"
]
if self._check_gdef_class_defs(mark_glyphs):
self._check_gdef_marks(mark_glyphs)
self._check_gdef_combining()
def check_all(self):
self.check_cmaps()
self.check_advances()
self.check_hhea()
self.check_os2()
self.check_glyph_bounds()
self.check_gdef()
def check_font(
target_file,
test_file,
incremental_version=False,
emit_config=False,
reverse=False,
ignored_cp=None,
only_cp=None,
enabled_tests=None,
):
target = ttLib.TTFont(target_file)
test = ttLib.TTFont(test_file)
if reverse:
print("reversing comparison")
temp = target
target = test
test = temp
print()
if not emit_config:
print(
"target is previous version"
if incremental_version
else "target is reference font"
)
FontCompare(
target,
test,
incremental_version,
emit_config,
ignored_cp,
only_cp,
enabled_tests,
).check_all()
def get_reference_name_1(name):
m = name_re.match(name)
if not m:
raise ValueError("font name %s does not match expected pattern" % name)
family = m.group(1)
style = m.group(2)
target_family = family_map.get(family)
if not target_family:
raise ValueError("unrecognized font family %s" % family)
target_style = style_map.get(style)
if target_style is None:
raise ValueError("unrecognized style '%s'" % style)
return target_family + target_style + ".ttf"
_ref_name_2_map = {
"Arimo-Regular.ttf": "arial.ttf",
"Arimo-Bold.ttf": "arialbd.ttf",
"Arimo-Italic.ttf": "ariali.ttf",
"Arimo-BoldItalic.ttf": "arialbi.ttf",
"Cousine-Regular.ttf": "cour.ttf",
"Cousine-Bold.ttf": "courbd.ttf",
"Cousine-Italic.ttf": "couri.ttf",
"Cousine-BoldItalic.ttf": "courbi.ttf",
"Tinos-Regular.ttf": "times.ttf",
"Tinos-Bold.ttf": "timesbd.ttf",
"Tinos-Italic.ttf": "timesi.ttf",
"Tinos-BoldItalic.ttf": "timesbi.ttf",
}
def get_reference_name_2(name):
return _ref_name_2_map.get(name)
def get_target_path(name, target_dir):
target_name = get_reference_name_2(name)
if not target_name:
raise ValueError("could not find target name for %s" % name)
target_path = path.join(target_dir, target_name)
if not path.isfile(target_path):
# fall back
target_name = get_reference_name_1(name)
target_path = path.join(target_dir, target_name)
return target_path
def check_fonts(
target_dir,
fonts,
incremental_version=False,
emit_config=False,
reverse=False,
ignored_cp=None,
only_cp=None,
enabled_tests=None,
):
for font in fonts:
target_name = path.basename(font)
if not incremental_version:
target_path = get_target_path(target_name, target_dir)
else:
target_path = path.join(target_dir, target_name)
if not path.isfile(target_path):
raise ValueError(
"could not find %s in target dir %s" % (target_name, target_dir)
)
check_font(
target_path,
font,
incremental_version,
emit_config,
reverse,
ignored_cp,
only_cp,
enabled_tests,
)
def main():
default_target = "/usr/local/google/home/dougfelt/msfonts"
parser = argparse.ArgumentParser()
parser.add_argument("fonts", metavar="font", nargs="+", help="fonts to check")
parser.add_argument(
"-t",
"--target",
metavar="dir",
help="target font dir (default %s)" % default_target,
default=default_target,
)
parser.add_argument(
"-iv",
"--incremental_version",
help="target font is a previous drop from MTI",
action="store_true",
)
parser.add_argument("-c", "--config", help="emit config spec", action="store_true")
parser.add_argument(
"--test",
metavar="test",
help="test only named tests (%s)" % sorted(FontCompare.test_names),
nargs="+",
)
parser.add_argument(
"-r", "--reverse", help="reverse direction of comparison", action="store_true"
)
parser.add_argument(
"-ic",
"--ignore_codepoints",
metavar="ranges",
help="report no errors on these codepoints (hex ranges separated by space)",
)
parser.add_argument(
"-oc",
"--only_codepoints",
metavar="ranges",
help="only report errors on these codepoints (hex ranges separated by space)",
)
args = parser.parse_args()
enabled_tests = FontCompare.check_test_list(args.test)
if not enabled_tests:
return
ignored_cp = FontCompare.get_codepoints(args.ignore_codepoints)
only_cp = FontCompare.get_codepoints(args.only_codepoints)
check_fonts(
args.target,
args.fonts,
args.incremental_version,
args.config,
args.reverse,
ignored_cp,
only_cp,
enabled_tests,
)
if __name__ == "__main__":
main()
| |
import subprocess
import jinja2
import unittest
import os
import shutil
import json
import signal
import sys
import time
import yaml
from datetime import datetime, timedelta
from .compose import ComposeMixin
BEAT_REQUIRED_FIELDS = ["@timestamp",
"beat.name", "beat.hostname", "beat.version"]
INTEGRATION_TESTS = os.environ.get('INTEGRATION_TESTS', False)
class TimeoutError(Exception):
pass
class Proc(object):
"""
Slim wrapper on subprocess.Popen that redirects
both stdout and stderr to a file on disk and makes
sure to stop the process and close the output file when
the object gets collected.
"""
def __init__(self, args, outputfile):
self.args = args
self.output = open(outputfile, "ab")
self.stdin_read, self.stdin_write = os.pipe()
def start(self):
if sys.platform.startswith("win"):
self.proc = subprocess.Popen(
self.args,
stdin=self.stdin_read,
stdout=self.output,
stderr=subprocess.STDOUT,
bufsize=0,
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
else:
self.proc = subprocess.Popen(
self.args,
stdin=self.stdin_read,
stdout=self.output,
stderr=subprocess.STDOUT,
bufsize=0,
)
return self.proc
def kill(self):
if sys.platform.startswith("win"):
# proc.terminate on Windows does not initiate a graceful shutdown
# through the processes signal handlers it just kills it hard. So
# this sends a SIGBREAK. You cannot sends a SIGINT (CTRL_C_EVENT)
# to a process group in Windows, otherwise Ctrl+C would be
# sent.
self.proc.send_signal(signal.CTRL_BREAK_EVENT)
else:
self.proc.terminate()
def wait(self):
try:
return self.proc.wait()
finally:
self.output.close()
def check_wait(self, exit_code=0):
actual_exit_code = self.wait()
assert actual_exit_code == exit_code, "Expected exit code to be %d, but it was %d" % (
exit_code, actual_exit_code)
return actual_exit_code
def kill_and_wait(self):
self.kill()
os.close(self.stdin_write)
return self.wait()
def check_kill_and_wait(self, exit_code=0):
self.kill()
os.close(self.stdin_write)
return self.check_wait(exit_code=exit_code)
def __del__(self):
# Ensure the process is stopped.
try:
self.proc.terminate()
self.proc.kill()
except:
pass
# Ensure the output is closed.
try:
self.output.close()
except:
pass
class TestCase(unittest.TestCase, ComposeMixin):
@classmethod
def setUpClass(self):
# Path to test binary
if not hasattr(self, 'beat_name'):
self.beat_name = "beat"
if not hasattr(self, 'beat_path'):
self.beat_path = "."
# Path to test binary
if not hasattr(self, 'test_binary'):
self.test_binary = os.path.abspath(self.beat_path + "/" + self.beat_name + ".test")
# Create build path
build_dir = self.beat_path + "/build"
self.build_path = build_dir + "/system-tests/"
# Start the containers needed to run these tests
self.compose_up()
@classmethod
def tearDownClass(self):
self.compose_down()
def run_beat(self,
cmd=None,
config=None,
output=None,
logging_args=["-e", "-v", "-d", "*"],
extra_args=[],
exit_code=None):
"""
Executes beat.
Waits for the process to finish before returning to
the caller.
"""
proc = self.start_beat(cmd=cmd, config=config, output=output,
logging_args=logging_args,
extra_args=extra_args)
if exit_code != None:
return proc.check_wait(exit_code)
return proc.wait()
def start_beat(self,
cmd=None,
config=None,
output=None,
logging_args=["-e", "-v", "-d", "*"],
extra_args=[]):
"""
Starts beat and returns the process handle. The
caller is responsible for stopping / waiting for the
Proc instance.
"""
# Init defaults
if cmd is None:
cmd = self.test_binary
if config is None:
config = self.beat_name + ".yml"
if output is None:
output = self.beat_name + ".log"
args = [cmd,
"-systemTest",
"-test.coverprofile",
os.path.join(self.working_dir, "coverage.cov"),
"-path.home", os.path.normpath(self.working_dir),
"-c", os.path.join(self.working_dir, config),
]
if logging_args:
args.extend(logging_args)
if extra_args:
args.extend(extra_args)
proc = Proc(args, os.path.join(self.working_dir, output))
proc.start()
return proc
def render_config_template(self, template_name=None,
output=None, **kargs):
# Init defaults
if template_name is None:
template_name = self.beat_name
template_path = "./tests/system/config/" + template_name + ".yml.j2"
if output is None:
output = self.beat_name + ".yml"
template = self.template_env.get_template(template_path)
kargs["beat"] = self
output_str = template.render(**kargs)
with open(os.path.join(self.working_dir, output), "wb") as f:
f.write(output_str.encode('utf8'))
# Returns output as JSON object with flattened fields (. notation)
def read_output(self,
output_file=None,
required_fields=None):
# Init defaults
if output_file is None:
output_file = "output/" + self.beat_name
jsons = []
with open(os.path.join(self.working_dir, output_file), "r") as f:
for line in f:
if len(line) == 0 or line[len(line) - 1] != "\n":
# hit EOF
break
try:
jsons.append(self.flatten_object(json.loads(
line, object_pairs_hook=self.json_raise_on_duplicates), []))
except:
print("Fail to load the json {}".format(line))
raise
self.all_have_fields(jsons, required_fields or BEAT_REQUIRED_FIELDS)
return jsons
# Returns output as JSON object
def read_output_json(self, output_file=None):
# Init defaults
if output_file is None:
output_file = "output/" + self.beat_name
jsons = []
with open(os.path.join(self.working_dir, output_file), "r") as f:
for line in f:
if len(line) == 0 or line[len(line) - 1] != "\n":
# hit EOF
break
event = json.loads(line, object_pairs_hook=self.json_raise_on_duplicates)
del event['@metadata']
jsons.append(event)
return jsons
def json_raise_on_duplicates(self, ordered_pairs):
"""Reject duplicate keys. To be used as a custom hook in JSON unmarshaling
to error out in case of any duplicates in the keys."""
d = {}
for k, v in ordered_pairs:
if k in d:
raise ValueError("duplicate key: %r" % (k,))
else:
d[k] = v
return d
def copy_files(self, files, source_dir="files/"):
for file_ in files:
shutil.copy(os.path.join(source_dir, file_),
self.working_dir)
def setUp(self):
self.template_env = jinja2.Environment(
loader=jinja2.FileSystemLoader(self.beat_path)
)
# create working dir
self.working_dir = os.path.abspath(os.path.join(
self.build_path + "run", self.id()))
if os.path.exists(self.working_dir):
shutil.rmtree(self.working_dir)
os.makedirs(self.working_dir)
fields_yml = os.path.join(self.beat_path, "fields.yml")
# Only add it if it exists
if os.path.isfile(fields_yml):
shutil.copyfile(fields_yml, os.path.join(self.working_dir, "fields.yml"))
try:
# update the last_run link
if os.path.islink(self.build_path + "last_run"):
os.unlink(self.build_path + "last_run")
os.symlink(self.build_path + "run/{}".format(self.id()),
self.build_path + "last_run")
except:
# symlink is best effort and can fail when
# running tests in parallel
pass
def wait_until(self, cond, max_timeout=10, poll_interval=0.1, name="cond"):
"""
Waits until the cond function returns true,
or until the max_timeout is reached. Calls the cond
function every poll_interval seconds.
If the max_timeout is reached before cond() returns
true, an exception is raised.
"""
start = datetime.now()
while not cond():
if datetime.now() - start > timedelta(seconds=max_timeout):
raise TimeoutError("Timeout waiting for '{}' to be true. ".format(name) +
"Waited {} seconds.".format(max_timeout))
time.sleep(poll_interval)
def get_log(self, logfile=None):
"""
Returns the log as a string.
"""
if logfile is None:
logfile = self.beat_name + ".log"
with open(os.path.join(self.working_dir, logfile), 'r') as f:
data = f.read()
return data
def wait_log_contains(self, msg, logfile=None,
max_timeout=10, poll_interval=0.1,
name="log_contains"):
self.wait_until(
cond=lambda: self.log_contains(msg, logfile),
max_timeout=max_timeout,
poll_interval=poll_interval,
name=name)
def log_contains(self, msg, logfile=None):
"""
Returns true if the give logfile contains the given message.
Note that the msg must be present in a single line.
"""
return self.log_contains_count(msg, logfile) > 0
def log_contains_count(self, msg, logfile=None):
"""
Returns the number of appearances of the given string in the log file
"""
counter = 0
# Init defaults
if logfile is None:
logfile = self.beat_name + ".log"
try:
with open(os.path.join(self.working_dir, logfile), "r") as f:
for line in f:
if line.find(msg) >= 0:
counter = counter + 1
except IOError:
counter = -1
return counter
def output_lines(self, output_file=None):
""" Count number of lines in a file."""
if output_file is None:
output_file = "output/" + self.beat_name
try:
with open(os.path.join(self.working_dir, output_file), "r") as f:
return sum([1 for line in f])
except IOError:
return 0
def output_has(self, lines, output_file=None):
"""
Returns true if the output has a given number of lines.
"""
# Init defaults
if output_file is None:
output_file = "output/" + self.beat_name
try:
with open(os.path.join(self.working_dir, output_file), "r") as f:
return len([1 for line in f]) == lines
except IOError:
return False
def output_has_message(self, message, output_file=None):
"""
Returns true if the output has the given message field.
"""
try:
return any(line for line in self.read_output(output_file=output_file, required_fields=["message"])
if line.get("message") == message)
except (IOError, TypeError):
return False
def all_have_fields(self, objs, fields):
"""
Checks that the given list of output objects have
all the given fields.
Raises Exception if not true.
"""
for field in fields:
if not all([field in o for o in objs]):
raise Exception("Not all objects have a '{}' field"
.format(field))
def all_have_only_fields(self, objs, fields):
"""
Checks if the given list of output objects have all
and only the given fields.
Raises Exception if not true.
"""
self.all_have_fields(objs, fields)
self.all_fields_are_expected(objs, fields)
def all_fields_are_expected(self, objs, expected_fields,
dict_fields=[]):
"""
Checks that all fields in the objects are from the
given list of expected fields.
"""
for o in objs:
for key in o.keys():
known = key in dict_fields or key in expected_fields
ismeta = key.startswith('@metadata.')
if not(known or ismeta):
raise Exception("Unexpected key '{}' found"
.format(key))
def load_fields(self, fields_doc=None):
"""
Returns a list of fields to expect in the output dictionaries
and a second list that contains the fields that have a
dictionary type.
Reads these lists from the fields documentation.
"""
if fields_doc is None:
fields_doc = self.beat_path + "/_meta/fields.generated.yml"
def extract_fields(doc_list, name):
fields = []
dictfields = []
if doc_list is None:
return fields, dictfields
for field in doc_list:
# Chain together names
if name != "":
newName = name + "." + field["name"]
else:
newName = field["name"]
if field.get("type") == "group":
subfields, subdictfields = extract_fields(field["fields"], newName)
fields.extend(subfields)
dictfields.extend(subdictfields)
else:
fields.append(newName)
if field.get("type") in ["object", "geo_point"]:
dictfields.append(newName)
return fields, dictfields
# Not all beats have a fields.generated.yml. Fall back to fields.yml
if not os.path.isfile(fields_doc):
fields_doc = self.beat_path + "/_meta/fields.yml"
# TODO: Make fields_doc path more generic to work with beat-generator
with open(fields_doc, "r") as f:
path = os.path.abspath(os.path.dirname(__file__) + "../../../../_meta/fields.generated.yml")
if not os.path.isfile(path):
path = os.path.abspath(os.path.dirname(__file__) + "../../../../_meta/fields.common.yml")
with open(path) as f2:
content = f2.read()
#content = "fields:\n"
content += f.read()
doc = yaml.load(content)
fields = []
dictfields = []
for item in doc:
subfields, subdictfields = extract_fields(item["fields"], "")
fields.extend(subfields)
dictfields.extend(subdictfields)
return fields, dictfields
def flatten_object(self, obj, dict_fields, prefix=""):
result = {}
for key, value in obj.items():
if isinstance(value, dict) and prefix + key not in dict_fields:
new_prefix = prefix + key + "."
result.update(self.flatten_object(value, dict_fields,
new_prefix))
else:
result[prefix + key] = value
return result
def copy_files(self, files, source_dir="files/", target_dir=""):
if target_dir:
target_dir = os.path.join(self.working_dir, target_dir)
else:
target_dir = self.working_dir
for file_ in files:
shutil.copy(os.path.join(source_dir, file_),
target_dir)
def output_count(self, pred, output_file=None):
"""
Returns true if the output line count predicate returns true
"""
# Init defaults
if output_file is None:
output_file = "output/" + self.beat_name
try:
with open(os.path.join(self.working_dir, output_file), "r") as f:
return pred(len([1 for line in f]))
except IOError:
return False
def get_elasticsearch_url(self):
"""
Returns an elasticsearch.Elasticsearch instance built from the
env variables like the integration tests.
"""
return "http://{host}:{port}".format(
host=os.getenv("ES_HOST", "localhost"),
port=os.getenv("ES_PORT", "9200"),
)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Various high level TF models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from tensorflow.contrib import rnn as contrib_rnn
from tensorflow.contrib.learn.python.learn.ops import losses_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops as array_ops_
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import variable_scope as vs
from tensorflow.python.summary import summary
def linear_regression_zero_init(x, y):
"""Linear regression subgraph with zero-value initial weights and bias.
Args:
x: tensor or placeholder for input features.
y: tensor or placeholder for labels.
Returns:
Predictions and loss tensors.
"""
return linear_regression(x, y, init_mean=0.0, init_stddev=0.0)
def logistic_regression_zero_init(x, y):
"""Logistic regression subgraph with zero-value initial weights and bias.
Args:
x: tensor or placeholder for input features.
y: tensor or placeholder for labels.
Returns:
Predictions and loss tensors.
"""
return logistic_regression(x, y, init_mean=0.0, init_stddev=0.0)
def linear_regression(x, y, init_mean=None, init_stddev=1.0):
"""Creates linear regression TensorFlow subgraph.
Args:
x: tensor or placeholder for input features.
y: tensor or placeholder for labels.
init_mean: the mean value to use for initialization.
init_stddev: the standard devation to use for initialization.
Returns:
Predictions and loss tensors.
Side effects:
The variables linear_regression.weights and linear_regression.bias are
initialized as follows. If init_mean is not None, then initialization
will be done using a random normal initializer with the given init_mean
and init_stddv. (These may be set to 0.0 each if a zero initialization
is desirable for convex use cases.) If init_mean is None, then the
uniform_unit_scaling_initialzer will be used.
"""
with vs.variable_scope('linear_regression'):
scope_name = vs.get_variable_scope().name
summary.histogram('%s.x' % scope_name, x)
summary.histogram('%s.y' % scope_name, y)
dtype = x.dtype.base_dtype
y_shape = y.get_shape()
if len(y_shape) == 1:
output_shape = 1
else:
output_shape = y_shape[1]
# Set up the requested initialization.
if init_mean is None:
weights = vs.get_variable(
'weights', [x.get_shape()[1], output_shape], dtype=dtype)
bias = vs.get_variable('bias', [output_shape], dtype=dtype)
else:
weights = vs.get_variable(
'weights', [x.get_shape()[1], output_shape],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
bias = vs.get_variable(
'bias', [output_shape],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
summary.histogram('%s.weights' % scope_name, weights)
summary.histogram('%s.bias' % scope_name, bias)
return losses_ops.mean_squared_error_regressor(x, y, weights, bias)
def logistic_regression(x,
y,
class_weight=None,
init_mean=None,
init_stddev=1.0):
"""Creates logistic regression TensorFlow subgraph.
Args:
x: tensor or placeholder for input features,
shape should be [batch_size, n_features].
y: tensor or placeholder for labels (one-hot),
shape should be [batch_size, n_classes].
class_weight: tensor, [n_classes], where for each class
it has weight of the class. If not provided
will check if graph contains tensor `class_weight:0`.
If that is not provided either all ones are used.
init_mean: the mean value to use for initialization.
init_stddev: the standard devation to use for initialization.
Returns:
Predictions and loss tensors.
Side effects:
The variables linear_regression.weights and linear_regression.bias are
initialized as follows. If init_mean is not None, then initialization
will be done using a random normal initializer with the given init_mean
and init_stddv. (These may be set to 0.0 each if a zero initialization
is desirable for convex use cases.) If init_mean is None, then the
uniform_unit_scaling_initialzer will be used.
"""
with vs.variable_scope('logistic_regression'):
scope_name = vs.get_variable_scope().name
summary.histogram('%s.x' % scope_name, x)
summary.histogram('%s.y' % scope_name, y)
dtype = x.dtype.base_dtype
# Set up the requested initialization.
if init_mean is None:
weights = vs.get_variable(
'weights', [x.get_shape()[1], y.get_shape()[-1]], dtype=dtype)
bias = vs.get_variable('bias', [y.get_shape()[-1]], dtype=dtype)
else:
weights = vs.get_variable(
'weights', [x.get_shape()[1], y.get_shape()[-1]],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
bias = vs.get_variable(
'bias', [y.get_shape()[-1]],
initializer=init_ops.random_normal_initializer(
init_mean, init_stddev, dtype=dtype),
dtype=dtype)
summary.histogram('%s.weights' % scope_name, weights)
summary.histogram('%s.bias' % scope_name, bias)
# If no class weight provided, try to retrieve one from pre-defined
# tensor name in the graph.
if not class_weight:
try:
class_weight = ops.get_default_graph().get_tensor_by_name(
'class_weight:0')
except KeyError:
pass
return losses_ops.softmax_classifier(
x, y, weights, bias, class_weight=class_weight)
## This will be in TensorFlow 0.7.
## TODO(ilblackdragon): Clean this up when it's released
def _reverse_seq(input_seq, lengths):
"""Reverse a list of Tensors up to specified lengths.
Args:
input_seq: Sequence of seq_len tensors of dimension (batch_size, depth)
lengths: A tensor of dimension batch_size, containing lengths for each
sequence in the batch. If "None" is specified, simply
reverses the list.
Returns:
time-reversed sequence
"""
if lengths is None:
return list(reversed(input_seq))
for input_ in input_seq:
input_.set_shape(input_.get_shape().with_rank(2))
# Join into (time, batch_size, depth)
s_joined = array_ops_.pack(input_seq)
# Reverse along dimension 0
s_reversed = array_ops_.reverse_sequence(s_joined, lengths, 0, 1)
# Split again into list
result = array_ops_.unpack(s_reversed)
return result
def bidirectional_rnn(cell_fw,
cell_bw,
inputs,
initial_state_fw=None,
initial_state_bw=None,
dtype=None,
sequence_length=None,
scope=None):
"""Creates a bidirectional recurrent neural network.
Similar to the unidirectional case (rnn) but takes input and builds
independent forward and backward RNNs with the final forward and backward
outputs depth-concatenated, such that the output will have the format
[time][batch][cell_fw.output_size + cell_bw.output_size]. The input_size of
forward and backward cell must match. The initial state for both directions
is zero by default (but can be set optionally) and no intermediate states
are ever returned -- the network is fully unrolled for the given (passed in)
length(s) of the sequence(s) or completely unrolled if length(s) is not
given.
Args:
cell_fw: An instance of RNNCell, to be used for forward direction.
cell_bw: An instance of RNNCell, to be used for backward direction.
inputs: A length T list of inputs, each a tensor of shape
[batch_size, cell.input_size].
initial_state_fw: (optional) An initial state for the forward RNN.
This must be a tensor of appropriate type and shape
[batch_size x cell.state_size].
initial_state_bw: (optional) Same as for initial_state_fw.
dtype: (optional) The data type for the initial state. Required if
either of the initial states are not provided.
sequence_length: (optional) An int64 vector (tensor) of size
[batch_size],
containing the actual lengths for each of the sequences.
scope: VariableScope for the created subgraph; defaults to "BiRNN"
Returns:
A pair (outputs, state) where:
outputs is a length T list of outputs (one for each input), which
are depth-concatenated forward and backward outputs
state is the concatenated final state of the forward and backward RNN
Raises:
TypeError: If "cell_fw" or "cell_bw" is not an instance of RNNCell.
ValueError: If inputs is None or an empty list.
"""
if not isinstance(cell_fw, contrib_rnn.RNNCell):
raise TypeError('cell_fw must be an instance of RNNCell')
if not isinstance(cell_bw, contrib_rnn.RNNCell):
raise TypeError('cell_bw must be an instance of RNNCell')
if not isinstance(inputs, list):
raise TypeError('inputs must be a list')
if not inputs:
raise ValueError('inputs must not be empty')
name = scope or 'BiRNN'
# Forward direction
with vs.variable_scope(name + '_FW'):
output_fw, state_fw = contrib_rnn.static_rnn(cell_fw, inputs,
initial_state_fw, dtype,
sequence_length)
# Backward direction
with vs.variable_scope(name + '_BW'):
tmp, state_bw = contrib_rnn.static_rnn(
cell_bw,
_reverse_seq(inputs, sequence_length), initial_state_bw, dtype,
sequence_length)
output_bw = _reverse_seq(tmp, sequence_length)
# Concat each of the forward/backward outputs
outputs = [
array_ops_.concat_v2([fw, bw], 1) for fw, bw in zip(output_fw, output_bw)
]
return outputs, array_ops_.concat_v2([state_fw, state_bw], 1)
# End of TensorFlow 0.7
def get_rnn_model(rnn_size, cell_type, num_layers, input_op_fn, bidirectional,
target_predictor_fn, sequence_length, initial_state,
attn_length, attn_size, attn_vec_size):
"""Returns a function that creates a RNN TensorFlow subgraph.
Args:
rnn_size: The size for rnn cell, e.g. size of your word embeddings.
cell_type: The type of rnn cell, including rnn, gru, and lstm.
num_layers: The number of layers of the rnn model.
input_op_fn: Function that will transform the input tensor, such as
creating word embeddings, byte list, etc. This takes
an argument `x` for input and returns transformed `x`.
bidirectional: boolean, Whether this is a bidirectional rnn.
target_predictor_fn: Function that will predict target from input
features. This can be logistic regression,
linear regression or any other model,
that takes `x`, `y` and returns predictions and loss
tensors.
sequence_length: If sequence_length is provided, dynamic calculation is
performed. This saves computational time when unrolling past max sequence
length. Required for bidirectional RNNs.
initial_state: An initial state for the RNN. This must be a tensor of
appropriate type and shape [batch_size x cell.state_size].
attn_length: integer, the size of attention vector attached to rnn cells.
attn_size: integer, the size of an attention window attached to rnn cells.
attn_vec_size: integer, the number of convolutional features calculated on
attention state and the size of the hidden layer built from base cell
state.
Returns:
A function that creates the subgraph.
"""
def rnn_estimator(x, y):
"""RNN estimator with target predictor function on top."""
x = input_op_fn(x)
if cell_type == 'rnn':
cell_fn = contrib_rnn.BasicRNNCell
elif cell_type == 'gru':
cell_fn = contrib_rnn.GRUCell
elif cell_type == 'lstm':
cell_fn = functools.partial(
contrib_rnn.BasicLSTMCell, state_is_tuple=False)
else:
raise ValueError('cell_type {} is not supported. '.format(cell_type))
# TODO(ipolosukhin): state_is_tuple=False is deprecated
if bidirectional:
# forward direction cell
fw_cell = cell_fn(rnn_size)
bw_cell = cell_fn(rnn_size)
# attach attention cells if specified
if attn_length is not None:
fw_cell = contrib_rnn.AttentionCellWrapper(
fw_cell,
attn_length=attn_length,
attn_size=attn_size,
attn_vec_size=attn_vec_size,
state_is_tuple=False)
bw_cell = contrib_rnn.AttentionCellWrapper(
bw_cell,
attn_length=attn_length,
attn_size=attn_size,
attn_vec_size=attn_vec_size,
state_is_tuple=False)
rnn_fw_cell = contrib_rnn.MultiRNNCell(
[fw_cell] * num_layers, state_is_tuple=False)
# backward direction cell
rnn_bw_cell = contrib_rnn.MultiRNNCell(
[bw_cell] * num_layers, state_is_tuple=False)
# pylint: disable=unexpected-keyword-arg, no-value-for-parameter
_, encoding = bidirectional_rnn(
rnn_fw_cell,
rnn_bw_cell,
x,
dtype=dtypes.float32,
sequence_length=sequence_length,
initial_state_fw=initial_state,
initial_state_bw=initial_state)
else:
rnn_cell = cell_fn(rnn_size)
if attn_length is not None:
rnn_cell = contrib_rnn.AttentionCellWrapper(
rnn_cell,
attn_length=attn_length,
attn_size=attn_size,
attn_vec_size=attn_vec_size,
state_is_tuple=False)
cell = contrib_rnn.MultiRNNCell(
[rnn_cell] * num_layers, state_is_tuple=False)
_, encoding = contrib_rnn.static_rnn(
cell,
x,
dtype=dtypes.float32,
sequence_length=sequence_length,
initial_state=initial_state)
return target_predictor_fn(encoding, y)
return rnn_estimator
| |
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
import functools
from io import BytesIO
from datetime import date
from devtools_testutils import recorded_by_proxy
from azure.core.exceptions import ServiceRequestError, HttpResponseError
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer._generated.v2_1.models import AnalyzeOperationResult
from azure.ai.formrecognizer._response_handlers import prepare_prebuilt_models
from azure.ai.formrecognizer import FormRecognizerClient, FormContentType, FormRecognizerApiVersion
from testcase import FormRecognizerTest
from preparers import GlobalClientPreparer as _GlobalClientPreparer
from preparers import FormRecognizerPreparer
FormRecognizerClientPreparer = functools.partial(_GlobalClientPreparer, FormRecognizerClient)
class TestInvoice(FormRecognizerTest):
def teardown(self):
self.sleep(4)
@pytest.mark.skip()
@FormRecognizerPreparer()
@recorded_by_proxy
def test_invoice_bad_endpoint(self, formrecognizer_test_api_key, **kwargs):
with open(self.invoice_pdf, "rb") as fd:
my_file = fd.read()
with pytest.raises(ServiceRequestError):
client = FormRecognizerClient("http://notreal.azure.com", AzureKeyCredential(formrecognizer_test_api_key))
poller = client.begin_recognize_invoices(my_file)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
@recorded_by_proxy
def test_passing_enum_content_type(self, client):
with open(self.invoice_pdf, "rb") as fd:
my_file = fd.read()
poller = client.begin_recognize_invoices(
my_file,
content_type=FormContentType.APPLICATION_PDF
)
result = poller.result()
assert result is not None
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
def test_damaged_file_bytes_fails_autodetect_content_type(self, **kwargs):
client = kwargs.pop("client")
damaged_pdf = b"\x50\x44\x46\x55\x55\x55" # doesn't match any magic file numbers
with pytest.raises(ValueError):
poller = client.begin_recognize_invoices(
damaged_pdf
)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
def test_damaged_file_bytes_io_fails_autodetect(self, **kwargs):
client = kwargs.pop("client")
damaged_pdf = BytesIO(b"\x50\x44\x46\x55\x55\x55") # doesn't match any magic file numbers
with pytest.raises(ValueError):
poller = client.begin_recognize_invoices(
damaged_pdf
)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
def test_passing_bad_content_type_param_passed(self, **kwargs):
client = kwargs.pop("client")
with open(self.invoice_pdf, "rb") as fd:
my_file = fd.read()
with pytest.raises(ValueError):
poller = client.begin_recognize_invoices(
my_file,
content_type="application/jpeg"
)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
def test_auto_detect_unsupported_stream_content(self, **kwargs):
client = kwargs.pop("client")
with open(self.unsupported_content_py, "rb") as fd:
my_file = fd.read()
with pytest.raises(ValueError):
poller = client.begin_recognize_invoices(
my_file
)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
@recorded_by_proxy
def test_invoice_stream_transform_pdf(self, client):
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeOperationResult, raw_response)
extracted_invoice = prepare_prebuilt_models(analyze_result)
responses.append(analyze_result)
responses.append(extracted_invoice)
with open(self.invoice_pdf, "rb") as fd:
my_file = fd.read()
poller = client.begin_recognize_invoices(
invoice=my_file,
include_field_elements=True,
cls=callback
)
result = poller.result()
raw_response = responses[0]
returned_model = responses[1]
invoice = returned_model[0]
actual = raw_response.analyze_result.document_results[0].fields
read_results = raw_response.analyze_result.read_results
document_results = raw_response.analyze_result.document_results
page_results = raw_response.analyze_result.page_results
self.assertFormFieldsTransformCorrect(invoice.fields, actual, read_results)
# check page range
assert invoice.page_range.first_page_number == document_results[0].page_range[0]
assert invoice.page_range.last_page_number == document_results[0].page_range[1]
# Check page metadata
self.assertFormPagesTransformCorrect(invoice.pages, read_results, page_results)
@pytest.mark.live_test_only
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
@recorded_by_proxy
def test_invoice_stream_multipage_transform_pdf(self, client):
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeOperationResult, raw_response)
extracted_invoice = prepare_prebuilt_models(analyze_result)
responses.append(analyze_result)
responses.append(extracted_invoice)
with open(self.multipage_vendor_pdf, "rb") as fd:
my_file = fd.read()
poller = client.begin_recognize_invoices(
invoice=my_file,
include_field_elements=True,
cls=callback
)
result = poller.result()
raw_response = responses[0]
returned_models = responses[1]
read_results = raw_response.analyze_result.read_results
document_results = raw_response.analyze_result.document_results
page_results = raw_response.analyze_result.page_results
assert 1 == len(returned_models)
returned_model = returned_models[0]
assert 2 == len(returned_model.pages)
assert 1 == returned_model.page_range.first_page_number
assert 2 == returned_model.page_range.last_page_number
assert 1 == len(document_results)
document_result = document_results[0]
assert 1 == document_result.page_range[0] # checking first page number
assert 2 == document_result.page_range[1] # checking last page number
for invoice, document_result in zip(returned_models, document_results):
self.assertFormFieldsTransformCorrect(invoice.fields, document_result.fields, read_results)
self.assertFormPagesTransformCorrect(returned_model.pages, read_results, page_results)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
@recorded_by_proxy
def test_invoice_tiff(self, client):
with open(self.invoice_tiff, "rb") as stream:
poller = client.begin_recognize_invoices(stream)
result = poller.result()
assert len(result) == 1
invoice = result[0]
# check dict values
assert invoice.fields.get("VendorName").value == "Contoso"
assert invoice.fields.get("VendorAddress").value, '1 Redmond way Suite 6000 Redmond == WA 99243'
assert invoice.fields.get("CustomerAddressRecipient").value == "Microsoft"
assert invoice.fields.get("CustomerAddress").value, '1020 Enterprise Way Sunnayvale == CA 87659'
assert invoice.fields.get("CustomerName").value == "Microsoft"
assert invoice.fields.get("InvoiceId").value == '34278587'
assert invoice.fields.get("InvoiceDate").value, date(2017, 6 == 18)
assert invoice.fields.get("Items").value[0].value["Amount"].value == 56651.49
assert invoice.fields.get("DueDate").value, date(2017, 6 == 24)
@pytest.mark.live_test_only
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
@recorded_by_proxy
def test_invoice_multipage_pdf(self, client):
with open(self.multipage_vendor_pdf, "rb") as fd:
invoice = fd.read()
poller = client.begin_recognize_invoices(invoice)
result = poller.result()
assert len(result) == 1
invoice = result[0]
assert "prebuilt:invoice" == invoice.form_type
assert 1 == invoice.page_range.first_page_number
assert 2 == invoice.page_range.last_page_number
vendor_name = invoice.fields["VendorName"]
assert vendor_name.value == 'Southridge Video'
assert vendor_name.value_data.page_number == 2
remittance_address_recipient = invoice.fields["RemittanceAddressRecipient"]
assert remittance_address_recipient.value == "Contoso Ltd."
assert remittance_address_recipient.value_data.page_number == 1
remittance_address = invoice.fields["RemittanceAddress"]
assert remittance_address.value, '2345 Dogwood Lane Birch == Kansas 98123'
assert remittance_address.value_data.page_number == 1
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
@recorded_by_proxy
def test_invoice_jpg_include_field_elements(self, client):
with open(self.invoice_jpg, "rb") as fd:
invoice = fd.read()
poller = client.begin_recognize_invoices(invoice, include_field_elements=True)
result = poller.result()
assert len(result) == 1
invoice = result[0]
self.assertFormPagesHasValues(invoice.pages)
for field in invoice.fields.values():
if field.name == "Items":
continue
self.assertFieldElementsHasValues(field.value_data.field_elements, invoice.page_range.first_page_number)
self.assertInvoiceItemsHasValues(invoice.fields["Items"].value, invoice.page_range.first_page_number, True)
# check dict values
assert invoice.fields.get("AmountDue").value == 610.0
assert invoice.fields.get("BillingAddress").value, "123 Bill St, Redmond WA == 98052"
assert invoice.fields.get("BillingAddressRecipient").value == "Microsoft Finance"
assert invoice.fields.get("CustomerAddress").value, "123 Other St, Redmond WA == 98052"
assert invoice.fields.get("CustomerAddressRecipient").value == "Microsoft Corp"
assert invoice.fields.get("CustomerId").value == "CID-12345"
assert invoice.fields.get("CustomerName").value == "MICROSOFT CORPORATION"
assert invoice.fields.get("DueDate").value, date(2019, 12 == 15)
assert invoice.fields.get("InvoiceDate").value, date(2019, 11 == 15)
assert invoice.fields.get("InvoiceId").value == "INV-100"
assert invoice.fields.get("InvoiceTotal").value == 110.0
assert invoice.fields.get("PreviousUnpaidBalance").value == 500.0
assert invoice.fields.get("PurchaseOrder").value == "PO-3333"
assert invoice.fields.get("RemittanceAddress").value, "123 Remit St New York, NY == 10001"
assert invoice.fields.get("RemittanceAddressRecipient").value == "Contoso Billing"
assert invoice.fields.get("ServiceAddress").value, "123 Service St, Redmond WA == 98052"
assert invoice.fields.get("ServiceAddressRecipient").value == "Microsoft Services"
assert invoice.fields.get("ServiceEndDate").value, date(2019, 11 == 14)
assert invoice.fields.get("ServiceStartDate").value, date(2019, 10 == 14)
assert invoice.fields.get("ShippingAddress").value, "123 Ship St, Redmond WA == 98052"
assert invoice.fields.get("ShippingAddressRecipient").value == "Microsoft Delivery"
assert invoice.fields.get("SubTotal").value == 100.0
assert invoice.fields.get("TotalTax").value == 10.0
assert invoice.fields.get("VendorName").value == "CONTOSO LTD."
assert invoice.fields.get("VendorAddress").value, "123 456th St New York, NY == 10001"
assert invoice.fields.get("VendorAddressRecipient").value == "Contoso Headquarters"
assert invoice.fields.get("Items").value[0].value["Amount"].value == 100.0
assert invoice.fields.get("Items").value[0].value["Description"].value == "Consulting service"
assert invoice.fields.get("Items").value[0].value["Quantity"].value == 1.0
assert invoice.fields.get("Items").value[0].value["UnitPrice"].value == 1.0
@pytest.mark.live_test_only
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
def test_invoice_continuation_token(self, **kwargs):
client = kwargs.pop("client")
with open(self.invoice_tiff, "rb") as fd:
invoice = fd.read()
initial_poller = client.begin_recognize_invoices(invoice)
cont_token = initial_poller.continuation_token()
poller = client.begin_recognize_invoices(None, continuation_token=cont_token)
result = poller.result()
assert result is not None
initial_poller.wait() # necessary so azure-devtools doesn't throw assertion error
@FormRecognizerPreparer()
@FormRecognizerClientPreparer(client_kwargs={"api_version": FormRecognizerApiVersion.V2_0})
def test_invoice_v2(self, **kwargs):
client = kwargs.pop("client")
with open(self.invoice_pdf, "rb") as fd:
invoice = fd.read()
with pytest.raises(ValueError) as e:
client.begin_recognize_invoices(invoice)
assert "Method 'begin_recognize_invoices' is only available for API version V2_1 and up" in str(e.value)
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
@recorded_by_proxy
def test_invoice_locale_specified(self, client):
with open(self.invoice_tiff, "rb") as fd:
invoice = fd.read()
poller = client.begin_recognize_invoices(invoice, locale="en-US")
assert 'en-US' == poller._polling_method._initial_response.http_response.request.query['locale']
result = poller.result()
assert result
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
@recorded_by_proxy
def test_invoice_locale_error(self, client):
with open(self.invoice_pdf, "rb") as fd:
invoice = fd.read()
with pytest.raises(HttpResponseError) as e:
client.begin_recognize_invoices(invoice, locale="not a locale")
assert "locale" in e.value.error.message
@FormRecognizerPreparer()
@FormRecognizerClientPreparer()
@recorded_by_proxy
def test_pages_kwarg_specified(self, client):
with open(self.invoice_pdf, "rb") as fd:
invoice = fd.read()
poller = client.begin_recognize_invoices(invoice, pages=["1"])
assert '1' == poller._polling_method._initial_response.http_response.request.query['pages']
result = poller.result()
assert result
| |
"""A script to clean US EPA's Facility data from GHG Emitter Facilities table"""
import csv
import datacommons
import json
import os.path
import pathlib
import sys
from absl import app
from absl import flags
from shapely import geometry
# Allows the following module imports to work when running as a script
_SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(_SCRIPT_PATH, '../..')) # for Crosswalk
from us_epa.util.crosswalk import Crosswalk
FLAGS = flags.FLAGS
flags.DEFINE_string(
'epa_input_tables_path', 'tmp_data',
'Path to directory contain crosswalk.csv, V_GHG_EMITTER_FACILITIES.csv, etc.'
)
flags.DEFINE_string('epa_output_path', 'output', 'Output directory')
# Input tables we process
# Schema: https://enviro.epa.gov/enviro/ef_metadata_html.ef_metadata_table?p_table_name=<table>
_TABLES = ('V_GHG_EMITTER_FACILITIES', 'V_GHG_SUPPLIER_FACILITIES',
'V_GHG_INJECTION_FACILITIES')
# Cleaned CSV Columns
# - 'containedInPlace' is a repeated list of refs to County and Census ZCTA
# - eiaPlantCode can also be repeated
_DCID = 'dcid'
_EPA_GHG_ID = 'epaGhgrpFacilityId'
_EPA_FRS_ID = 'epaFrsId'
_EIA_PP_RELATION = 'partOf'
_NAME = 'name'
_ADDRESS = 'address'
_CIP = 'containedInPlace'
_NAICS = 'naics'
_LAT = 'latitude'
_LNG = 'longitude'
_CLEAN_CSV_HDR = (_DCID, _EPA_GHG_ID, _EPA_FRS_ID, _EIA_PP_RELATION, _NAME,
_ADDRESS, _CIP, _NAICS, _LAT, _LNG)
_OUT_FILE_PREFIX = 'us_epa_facility'
_CROSSWALK_FILE = 'crosswalks.csv'
_GEOJSON_CACHE = {}
_COUNTY_CANDIDATES_CACHE = {}
def _gen_tmcf():
lines = [
'Node: E:FacilityTable->E0',
'typeOf: dcs:EpaReportingFacility',
]
for p in _CLEAN_CSV_HDR:
lines.append(f'{p}: C:FacilityTable->{p}')
return '\n'.join(lines)
def _v(table, row, col):
return row.get(table + '.' + col, '')
def _cv(table, row, col):
return _v(table, row, col).strip().title()
def _str(v):
if not v:
return ''
return '"' + v + '"'
def _get_name(table, row):
name = _cv(table, row, 'FACILITY_NAME')
return name.replace(' Llc', ' LLC')
def _get_address(table, row):
parts = []
for k in ['ADDRESS1', 'ADDRESS2', 'CITY', 'STATE_NAME']:
p = _cv(table, row, k)
if p:
parts.append(p)
address = ', '.join(parts)
p = _cv(table, row, 'ZIP')
if p:
address += ' - ' + p
return address
def _get_cip(zip, county):
cip = []
if zip:
cip.append('dcid:' + zip)
if county:
cip.append('dcid:' + county)
return cip
def _get_naics(table, row):
column = 'NAICS_CODE' if table == 'V_GHG_INJECTION_FACILITIES' else 'PRIMARY_NAICS_CODE'
naics = _v(table, row, column)
if not naics:
return ''
return 'dcs:NAICS/' + naics
def _get_county_candidates(zcta):
"""Returns counties that the zcta is associated with.
Returns: two candidate county lists corresponding to zip and geoOverlaps respectively.
"""
if zcta in _COUNTY_CANDIDATES_CACHE:
return _COUNTY_CANDIDATES_CACHE[zcta]
candidate_lists = []
for prop in ['containedInPlace', 'geoOverlaps']:
resp = datacommons.get_property_values([zcta],
prop,
out=True,
value_type='County')
candidate_lists.append(sorted(resp[zcta]))
_COUNTY_CANDIDATES_CACHE[zcta] = candidate_lists
return candidate_lists
def _validate_latlng(lat, lng, dcid):
"""Validate whether the lat/lng is located within the given entity's geo boundary"""
gj = ''
if dcid in _GEOJSON_CACHE:
gj = _GEOJSON_CACHE[dcid]
else:
resp = datacommons.get_property_values([dcid], 'geoJsonCoordinates')
if not resp[dcid]:
print(f'Did not find GEO JSON for {dcid}')
return False
gj = resp[dcid][0]
_GEOJSON_CACHE[dcid] = gj
point = geometry.Point(float(lng), float(lat))
polygon = geometry.shape(json.loads(gj))
if not polygon.contains(point):
return False
return True
_COUNTERS = {
'given_county_wrong_latlng': [],
'given_county_correct_latlng': [],
'zipbased_county_wrong_latlng': [],
'zipbased_county_correct_latlng': [],
'missing_zip_and_county': [],
}
def _resolve_places(facility_id, zip, provided_county, lat, lng):
"""Resolve the geo relations for the given Facility
Returns resolved <zip>, <county>, <lat>, <lng>
"""
if zip == 'zip/00000':
_COUNTERS['missing_zip_and_county'].append(facility_id)
return '', '', '', ''
county_candidates = _get_county_candidates(zip)
if any(provided_county in l for l in county_candidates):
# Provided county is in the candidate list, use that.
if lat and lng and _validate_latlng(lat, lng, provided_county):
# Lat/lng is in the chosen county
_COUNTERS['given_county_correct_latlng'].append(facility_id)
return zip, provided_county, lat, lng
_COUNTERS['given_county_wrong_latlng'].append(facility_id)
return zip, provided_county, '', ''
if lat and lng:
# Prefer the county with lat/lng match.
for list in county_candidates:
for c in list:
if _validate_latlng(lat, lng, c):
_COUNTERS['zipbased_county_correct_latlng'].append(
facility_id)
return zip, c, lat, lng
# Lat or lng is empty or did not match any county. Pick a candidate county prefering
# containedInPlace over geoOverlaps.
for list in county_candidates:
if list:
_COUNTERS['zipbased_county_wrong_latlng'].append(facility_id)
return zip, list[0], '', ''
_COUNTERS['missing_zip_and_county'].append(facility_id)
return '', '', '', ''
def counters_string():
result = []
for k, v in _COUNTERS.items():
result.append(k + ' -> ' + str(len(v)) + ' - ' + ', '.join(v[:3]))
return '\n'.join(result)
def process(input_tables_path, output_path):
crosswalk = Crosswalk(os.path.join(input_tables_path, _CROSSWALK_FILE))
processed_ids = set()
with open(os.path.join(output_path, _OUT_FILE_PREFIX + '.csv'), 'w') as wfp:
# IMPORTANT: We want to escape double quote (\") if it is specified in the cell
# value, rather than the default of using two double quotes ("")
cw = csv.DictWriter(wfp,
_CLEAN_CSV_HDR,
doublequote=False,
escapechar='\\')
cw.writeheader()
for table in _TABLES:
table_path = os.path.join(input_tables_path, table + '.csv')
rows_written = 0
with open(table_path, 'r') as rfp:
cr = csv.DictReader(rfp)
for in_row in cr:
ghg_id = _v(table, in_row, 'FACILITY_ID')
assert ghg_id
if ghg_id in processed_ids:
continue
processed_ids.add(ghg_id)
lat = _v(table, in_row, 'LATITUDE')
lng = _v(table, in_row, 'LONGITUDE')
zip = 'zip/' + _v(table, in_row,
'ZIP')[:5] # zips have extension
county = 'geoId/' + _v(table, in_row, 'COUNTY_FIPS')
zip, county, lat, lng = _resolve_places(
ghg_id, zip, county, lat, lng)
out_row = {
_DCID:
_str(crosswalk.get_dcid(ghg_id)),
_EPA_GHG_ID:
_str(ghg_id),
_EPA_FRS_ID:
_str(crosswalk.get_frs_id(ghg_id)),
_EIA_PP_RELATION:
', '.join([
'dcid:eia/pp/' + v
for v in crosswalk.get_power_plant_ids(ghg_id)
]),
_NAME:
_str(_get_name(table, in_row)),
_ADDRESS:
_str(_get_address(table, in_row)),
_CIP:
', '.join(_get_cip(zip, county)),
_NAICS:
_get_naics(table, in_row),
_LAT:
_str(lat),
_LNG:
_str(lng),
}
rows_written += 1
if rows_written % 100 == 99:
print('Geo Resolution Stats: \n' + counters_string())
cw.writerow(out_row)
print('Produced ' + str(rows_written) + ' rows from ' + table)
print('Geo Resolution Stats: \n' + counters_string())
with open(os.path.join(output_path, _OUT_FILE_PREFIX + '.tmcf'), 'w') as fp:
fp.write(_gen_tmcf())
def main(_):
# Validate inputs.
assert FLAGS.epa_output_path
assert FLAGS.epa_input_tables_path
assert os.path.exists(
os.path.join(FLAGS.epa_input_tables_path, _CROSSWALK_FILE))
for t in _TABLES:
assert os.path.exists(
os.path.join(FLAGS.epa_input_tables_path, t + '.csv'))
pathlib.Path(FLAGS.epa_output_path).mkdir(exist_ok=True)
process(FLAGS.epa_input_tables_path, FLAGS.epa_output_path)
if __name__ == '__main__':
app.run(main)
| |
__author__ = 'bkeroack'
import sys
import logging
import traceback
import time
import billiard
import itertools
import pprint
pp = pprint.PrettyPrinter(indent=4)
import elita.util
import elita.util.type_check
import gitservice
import salt_control
from elita.actions.action import regen_datasvc
class FatalDeploymentError(Exception):
pass
#async callable
def run_deploy(datasvc, application, build_name, target, rolling_divisor, rolling_pause, ordered_pause, deployment_id):
'''
Asynchronous entry point for deployments
'''
# normally there's a higher level try/except block for all async actions
# we want to make sure the error is saved in the deployment object as well, not just the job
# so we duplicate the functionality here
try:
if target['groups']:
logging.debug("run_deploy: Doing rolling deployment")
dc = DeployController(datasvc, deployment_id)
rdc = RollingDeployController(datasvc, dc, deployment_id)
ret = rdc.run(application, build_name, target, rolling_divisor, rolling_pause, ordered_pause)
else:
logging.debug("run_deploy: Doing manual deployment")
dc = DeployController(datasvc, deployment_id)
ret, data = dc.run(application, build_name, target['servers'], target['gitdeploys'])
except:
exc_type, exc_obj, tb = sys.exc_info()
f_exc = traceback.format_exception(exc_type, exc_obj, tb)
results = {
"error": "unhandled exception during callable!",
"exception": f_exc
}
logging.debug("run_deploy: EXCEPTION: {}".format(f_exc))
datasvc.deploysvc.UpdateDeployment(application, deployment_id, {"status": "error"})
return {"deploy_status": "error", "details": results}
datasvc.deploysvc.CompleteDeployment(application, deployment_id)
datasvc.deploysvc.UpdateDeployment(application, deployment_id, {"status": "complete" if ret else "error"})
return {"deploy_status": "done" if ret else "error"}
class BatchCompute:
'''
Given a list of application groups that require rolling deployment and an (optional) list that do not,
compute the optimal batches of server/gitdeploy pairs. All non-rolling groups are added to the first batch.
Splitting algorithm is tolerant of outrageously large rolling_divisors.
Written in a functional style to facilitate testing.
'''
@staticmethod
def add_nonrolling_groups(batches, nonrolling_docs):
'''
Add servers and gitdeploys from nonrolling groups to the first batch
Not written in a functional style because that was totally unreadable
'''
if nonrolling_docs and len(nonrolling_docs) > 0:
assert all(map(lambda x: 'servers' in x and 'gitdeploys' in x, batches)) or not batches
assert all(map(lambda x: 'servers' in x[1] and 'gitdeploys' in x[1], nonrolling_docs.iteritems()))
non_rolling_batches = list()
for g in nonrolling_docs:
servers = nonrolling_docs[g]['servers']
gitdeploys = nonrolling_docs[g]['gitdeploys']
ordered = isinstance(nonrolling_docs[g]['gitdeploys'][0], list)
if ordered:
for i, gdb in enumerate(nonrolling_docs[g]['gitdeploys']):
if i > len(non_rolling_batches)-1:
non_rolling_batches.append({'gitdeploys': gdb, 'servers': servers})
else:
non_rolling_batches[i]['servers'] = list(set(servers + non_rolling_batches[i]['servers']))
non_rolling_batches[i]['gitdeploys'] = list(set(gdb + non_rolling_batches[i]['gitdeploys']))
if i == len(nonrolling_docs[g]['gitdeploys'])-1:
non_rolling_batches[i]['ordered_gitdeploy'] = False
else:
non_rolling_batches[i]['ordered_gitdeploy'] = True
else:
non_rolling_batches.append({'gitdeploys': gitdeploys, 'servers': servers, 'ordered_gitdeploy': False})
for i, nrb in enumerate(non_rolling_batches):
if i > len(batches)-1:
batches.append(nrb)
else:
batches[i]['servers'] = list(set(nrb['servers'] + batches[i]['servers']))
batches[i]['gitdeploys'] = list(set(nrb['gitdeploys'] + batches[i]['gitdeploys']))
batches[i]['ordered_gitdeploy'] = nrb['ordered_gitdeploy']
return batches
@staticmethod
def dedupe_batches(batches):
'''
Dedupe servers and gitdeploys list in the combined batches list:
[
{ "servers": [ "server1", "server1", ...], "gitdeploys": [ "gd1", "gd1", ...] }, #batch 0 (all groups)
{ "servers": [ "server1", "server1", ...], "gitdeploys": [ "gd1", "gd1", ...] }, #batch 1 (all groups)
...
]
'''
assert len(batches) > 0
assert all(map(lambda x: 'servers' in x and 'gitdeploys' in x, batches))
return map(lambda x: {"servers": list(set(x['servers'])),
"gitdeploys": list(set(elita.util.flatten_list(x['gitdeploys']))),
"ordered_gitdeploy": x['ordered_gitdeploy']}, batches)
@staticmethod
def reduce_group_batches(accumulated, update):
assert 'servers' in accumulated and 'servers' in update
assert 'gitdeploys' in accumulated and 'gitdeploys' in update
return {
"servers": accumulated['servers'] + update['servers'],
"gitdeploys": accumulated['gitdeploys'] + update['gitdeploys']
}
@staticmethod
def coalesce_batches(batches):
'''
Combine the big list of batches into a single list.
Function is passed a list of lists:
[
[ { "servers": [...], "gitdeploys": [...] }, ... ], # batches 0-n for group A
[ { "servers": [...], "gitdeploys": [...] }, ... ], # batches 0-n for broup B
...
]
Each nested list represents the computed batches for an individual group. All nested lists are expected to be
the same length.
'''
if not batches:
return list()
return map(
lambda batch_aggregate: reduce(
lambda acc, upd:
{
'servers': acc['servers'] + upd['servers'],
'gitdeploys': acc['gitdeploys'] + upd['gitdeploys'],
'ordered_gitdeploy': acc['ordered_gitdeploy'] and upd['ordered_gitdeploy']
}, batch_aggregate
), itertools.izip_longest(*batches, fillvalue={"servers": [], "gitdeploys": [], "ordered_gitdeploy": False}))
@staticmethod
def compute_group_batches(divisor, group):
'''
Compute batches for group.
Group is iteritems() result from group dict. group[0] is key (name), group[1] is dict of servers/gitdeploys
return list of dicts: [ { 'servers': [...], 'gitdeploys': [...] }, ... ]
'''
assert len(group) == 2
assert 'servers' in group[1]
assert 'gitdeploys' in group[1]
servers = group[1]['servers']
gitdeploys = group[1]['gitdeploys']
server_batches = elita.util.split_seq(servers, divisor)
gd_multiplier = len(server_batches) # gitdeploy_batches multipler
ordered = isinstance(gitdeploys[0], list)
if ordered:
# duplicate all server batches by the length of the gitdeploy list-of-lists
server_batches = [x for item in server_batches for x in itertools.repeat(item, len(gitdeploys))]
gitdeploy_batches = list(gitdeploys) * gd_multiplier
ordered_flags = [True] * (len(gitdeploys) - 1)
ordered_flags.append(False)
ordered_flags = ordered_flags * gd_multiplier
else:
gitdeploy_batches = [gitdeploys] * gd_multiplier
ordered_flags = [False] * gd_multiplier
assert len(gitdeploy_batches) == len(server_batches)
batches = [{'servers': sb, 'gitdeploys': gd, 'ordered_gitdeploy': of}
for sb, gd, of in zip(server_batches, gitdeploy_batches, ordered_flags)]
return batches
@staticmethod
def compute_rolling_batches(divisor, rolling_group_docs, nonrolling_group_docs):
assert isinstance(divisor, int)
assert elita.util.type_check.is_optional_dict(rolling_group_docs)
assert not rolling_group_docs or all(map(lambda x: 'servers' in x[1] and 'gitdeploys' in x[1], rolling_group_docs.iteritems()))
return BatchCompute.dedupe_batches(
BatchCompute.add_nonrolling_groups(
BatchCompute.coalesce_batches(
map(lambda x: BatchCompute.compute_group_batches(divisor, x), rolling_group_docs.iteritems() if rolling_group_docs else tuple())
), nonrolling_group_docs
)
)
class RollingDeployController:
'''
Break deployment up into server/gitdeploy batches, then invoke DeployController with each batch sequentially
'''
__metaclass__ = elita.util.LoggingMetaClass
def __init__(self, datasvc, deploy_controller, deployment_id):
'''
@type datasvc: models.DataService
'''
self.datasvc = datasvc
self.dc = deploy_controller
self.deployment_id = deployment_id
def get_nonrolling_groups(self, rolling_groups, all_groups):
return list(set(all_groups) - set(rolling_groups))
def compute_batches(self, rolling_group_docs, nonrolling_group_docs, rolling_divisor):
return BatchCompute.compute_rolling_batches(rolling_divisor, rolling_group_docs, nonrolling_group_docs)
def run_hook(self, name, application, build_name, batches, batch_number=None, target=None):
args = {
"hook_parameters": {
"deployment_id": self.deployment_id,
"build": build_name
}
}
if name == "AUTO_DEPLOYMENT_START":
args['hook_parameters']['target'] = target
args['hook_parameters']['batches'] = batches
if name == "AUTO_DEPLOYMENT_COMPLETE" or name == "AUTO_DEPLOYMENT_FAILED":
args['hook_parameters']['deployment'] = self.datasvc.deploysvc.GetDeployment(application,
self.deployment_id)
args['hook_parameters']['batches'] = batches
if "AUTO_DEPLOYMENT_BATCH" in name:
args['hook_parameters']['batch_number'] = batch_number
args['hook_parameters']['batch_count'] = len(batches)
args['hook_parameters']['batch'] = batches[batch_number]
self.datasvc.actionsvc.hooks.run_hook(application, name, args)
def run(self, application, build_name, target, rolling_divisor, rolling_pause, ordered_pause, parallel=True):
'''
Run rolling deployment. This should be called iff the deployment is called via groups/environments
'''
groups = target['groups']
rolling_groups = [g for g in groups if self.datasvc.groupsvc.GetGroup(application, g)['rolling_deploy']]
rolling_group_docs = {g: self.datasvc.groupsvc.GetGroup(application, g) for g in rolling_groups}
nonrolling_groups = self.get_nonrolling_groups(rolling_groups, groups)
nonrolling_group_docs = {g: self.datasvc.groupsvc.GetGroup(application, g) for g in nonrolling_groups}
gd_docs = [self.datasvc.gitsvc.GetGitDeploy(application, gd) for gd in target['gitdeploys']]
gitrepos = [gd['location']['gitrepo']['name'] for gd in gd_docs]
batches = self.compute_batches(rolling_group_docs, nonrolling_group_docs, rolling_divisor)
logging.debug("computed batches: {}".format(batches))
#run pre hook
self.run_hook("AUTO_DEPLOYMENT_START", application, build_name, batches, target=target)
self.datasvc.deploysvc.InitializeDeploymentPlan(application, self.deployment_id, batches, gitrepos)
self.datasvc.jobsvc.NewJobData({
"RollingDeployment": {
"batches": len(batches),
"batch_data": batches
}
})
for i, b in enumerate(batches):
logging.debug("doing DeployController.run: deploy_gds: {}".format(b['gitdeploys']))
#run start hook
self.run_hook("AUTO_DEPLOYMENT_BATCH_BEGIN", application, build_name, batches, batch_number=i)
ok, results = self.dc.run(application, build_name, b['servers'], b['gitdeploys'],
parallel=parallel, batch_number=i)
if not ok:
self.datasvc.jobsvc.NewJobData({"RollingDeployment": "error"})
self.run_hook("AUTO_DEPLOYMENT_FAILED", application, build_name, batches)
return False
#run batch done hook
self.run_hook("AUTO_DEPLOYMENT_BATCH_DONE", application, build_name, batches, batch_number=i)
deploy_doc = self.datasvc.deploysvc.GetDeployment(application, self.deployment_id)
assert deploy_doc
if deploy_doc['status'] == 'error':
self.datasvc.jobsvc.NewJobData({"message": "detected failed deployment so aborting further batches"})
self.datasvc.jobsvc.NewJobData({"RollingDeployment": "error"})
self.run_hook("AUTO_DEPLOYMENT_FAILED", application, build_name, batches)
return False
if i != (len(batches)-1):
pause = ordered_pause if b['ordered_gitdeploy'] else rolling_pause
msg = "pausing for {} seconds between batches ({})".format(pause,
"ordered" if b['ordered_gitdeploy']
else "batch complete")
self.datasvc.jobsvc.NewJobData({"RollingDeployment": msg})
logging.debug("RollingDeployController: {}".format(msg))
time.sleep(pause)
#run post hook
self.run_hook("AUTO_DEPLOYMENT_COMPLETE", application, build_name, batches, target=target)
return True
def determine_deployabe_servers(all_gd_servers, specified_servers):
return list(set(all_gd_servers).intersection(set(specified_servers)))
def _threadsafe_process_gitdeploy(gddoc, build_doc, settings, job_id, deployment_id):
'''
Threadsafe function for processing a single gitdeploy during a deployment.
Creates own instance of datasvc, etc.
'''
package = gddoc['package']
package_doc = build_doc['packages'][package]
client, datasvc = regen_datasvc(settings, job_id)
gdm = gitservice.GitDeployManager(gddoc, datasvc)
gitrepo_name = gddoc['location']['gitrepo']['name']
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
progress=10,
step='Checking out default branch')
try:
res = gdm.checkout_default_branch()
except:
exc_msg = str(sys.exc_info()[1]).split('\n')
exc_msg.insert(0, "ERROR: checkout_default_branch")
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
step=exc_msg)
return
logging.debug("_threadsafe_process_gitdeploy: git checkout output: {}".format(str(res)))
if gdm.last_build == build_doc['build_name']:
datasvc.jobsvc.NewJobData({"ProcessGitdeploys": {gitrepo_name: "already processed"}})
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
progress=100,
step='Complete (already processed)')
else:
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
progress=25,
step='Decompressing package to repository')
datasvc.jobsvc.NewJobData({"ProcessGitdeploys": {gddoc['name']: "processing"}})
try:
gdm.decompress_to_repo(package_doc)
except:
exc_msg = str(sys.exc_info()[1]).split('\n')
exc_msg.insert(0, "ERROR: decompress_to_repo")
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
step=exc_msg)
return
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
progress=50,
step='Checking for changes')
logging.debug("_threadsafe_process_gitdeploy: Checking for changes")
try:
res = gdm.check_repo_status()
except:
exc_msg = str(sys.exc_info()[1]).split('\n')
exc_msg.insert(0, "ERROR: check_repo_status")
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
step=exc_msg)
return
logging.debug("_threadsafe_process_gitdeploy: git status results: {}".format(str(res)))
if "nothing to commit" in res:
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
progress=100,
step='Complete (no changes found)')
datasvc.jobsvc.NewJobData({"ProcessGitdeploys": {gddoc['name']: "no changes"}})
else:
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
progress=60,
step='Adding changes to repository')
datasvc.jobsvc.NewJobData({"ProcessGitdeploys": {gddoc['name']: "adding to repository"}})
try:
res = gdm.add_files_to_repo()
except:
exc_msg = str(sys.exc_info()[1]).split('\n')
exc_msg.insert(0, "ERROR: add_files_to_repo")
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
step=exc_msg)
return
logging.debug("_threadsafe_process_gitdeploy: git add result: {}".format(str(res)))
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
progress=70,
step='Committing changes to repository')
datasvc.jobsvc.NewJobData({"ProcessGitdeploys": {gddoc['name']: "committing"}})
try:
res = gdm.commit_to_repo(build_doc['build_name'])
except:
exc_msg = str(sys.exc_info()[1]).split('\n')
exc_msg.insert(0, "ERROR: commit_to_repo")
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
step=exc_msg)
return
logging.debug("_threadsafe_process_gitdeploy: git commit result: {}".format(str(res)))
try:
commit_hash = gdm.get_latest_commit_hash()
datasvc.deploysvc.UpdateDeployment(gddoc['application'], deployment_id,
{'commits': {gitrepo_name: str(commit_hash)}})
logging.debug("_threadsafe_process_gitdeploy: git commit hash: {}".format(str(commit_hash)))
except:
exc_msg = str(sys.exc_info()[1]).split('\n')
exc_msg.insert(0, "ERROR: get_commit_hash")
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
step=exc_msg)
datasvc.jobsvc.NewJobData({"ProcessGitdeploys": {gddoc['name']: "checking diff"}})
try:
res = gdm.inspect_latest_diff()
except:
exc_msg = str(sys.exc_info()[1]).split('\n')
exc_msg.insert(0, "ERROR: inspect_latest_diff")
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
step=exc_msg)
return
logging.debug("_threadsafe_process_gitdeploy: inspect diff result: {}".format(str(res)))
# change to a list of dicts without filenames as keys to keep mongo happy
changed_files = [{
'filename': k,
'deletions': res[k]['deletions'],
'lines': res[k]['lines'],
'insertions': res[k]['insertions']
} for k in res]
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
changed_files=changed_files)
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
progress=90,
step='Pushing changes to gitprovider')
datasvc.jobsvc.NewJobData({"ProcessGitdeploys": {gddoc['name']: "pushing"}})
try:
res = gdm.push_repo()
except:
exc_msg = str(sys.exc_info()[1]).split('\n')
exc_msg.insert(0, "ERROR: push_repo")
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
step=exc_msg)
return
logging.debug("_threadsafe_process_gitdeploy: git push result: {}".format(str(res)))
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
progress=100,
step='Complete')
try:
gdm.update_repo_last_build(build_doc['build_name'])
except:
exc_msg = str(sys.exc_info()[1]).split('\n')
exc_msg.insert(0, "ERROR: update_repo_last_build")
datasvc.deploysvc.UpdateDeployment_Phase1(gddoc['application'], deployment_id, gitrepo_name,
step=exc_msg)
return
def _threadsafe_pull_callback(results, tag, **kwargs):
'''
Passed to run_slses_async and is used to provide realtime updates to users polling the deploy job object
'''
try:
assert all([arg in kwargs for arg in ('datasvc', 'application', 'deployment_id', 'batch_number', 'gitdeploy')])
except AssertionError:
#can't log anything to the job object because we may not have a valid DataService instance
logging.error("***************** _threadsafe_pull_callback: AssertionError: incorrect kwargs ****************")
return
datasvc = kwargs['datasvc']
app = kwargs['application']
deployment_id = kwargs['deployment_id']
batch_number = kwargs['batch_number']
gitdeploy = kwargs['gitdeploy']
try:
datasvc.jobsvc.NewJobData({"DeployServers": {"results": results, "tag": tag}})
for r in results:
#callback results always have a 'ret' key but underneath it may be a simple string or a big complex object
#for state call results. We have to unpack the state results if necessary
this_result = results[r]['ret']
datasvc.jobsvc.NewJobData(this_result)
if elita.util.type_check.is_dictlike(this_result): # state result
for state_res in this_result:
if "result" in this_result[state_res]:
state_comment = this_result[state_res]['comment'] if 'comment' in this_result[state_res] else state_res
stdout = this_result[state_res]["changes"]["stdout"] if 'changes' in this_result[state_res] and 'stdout' in this_result[state_res]['changes'] else "(none)"
stderr = this_result[state_res]["changes"]["stderr"] if 'changes' in this_result[state_res] and 'stderr' in this_result[state_res]['changes'] else "(none)"
if not this_result[state_res]["result"]: #error
logging.debug("_threadsafe_pull_callback: got error result ({}; {})".format(gitdeploy, r))
datasvc.jobsvc.NewJobData({'status': 'error', 'message': 'failing deployment due to detected error'})
datasvc.deploysvc.UpdateDeployment_Phase2(app, deployment_id, gitdeploy, [r], batch_number,
state="FAILURE: {}; stderr: {}; stdout: {}".format(state_comment, stderr, stdout))
datasvc.deploysvc.FailDeployment(app, deployment_id)
else:
logging.debug("_threadsafe_pull_callback: got successful result ({}; {}): {}".format(gitdeploy, r, state_comment))
datasvc.deploysvc.UpdateDeployment_Phase2(app, deployment_id, gitdeploy, [r], batch_number,
state=state_comment,
progress=66)
else: # simple result
logging.debug("_threadsafe_pull_callback: got simple return instead of results ({}; {})".format(gitdeploy, r))
datasvc.deploysvc.UpdateDeployment_Phase2(app, deployment_id, gitdeploy, [r], batch_number,
state="simple return: {}".format(results[r]['ret']))
except:
exc_type, exc_obj, tb = sys.exc_info()
datasvc.jobsvc.NewJobData({"_threadsafe_pull_callback EXCEPTION": traceback.format_exception(exc_type, exc_obj, tb)})
datasvc.deploysvc.FailDeployment(app, deployment_id)
def _threadsafe_pull_gitdeploy(application, gitdeploy_struct, queue, settings, job_id, deployment_id, batch_number):
'''
Thread-safe way of performing a deployment SLS call for one specific gitdeploy on a group of servers
gitdeploy_struct: { "gitdeploy_name": [ list_of_servers_to_deploy_to ] }
'''
# Wrap in a big try/except so we can log any failures in phase2 progress and fail the deployment
try:
assert settings
assert job_id
assert gitdeploy_struct
client, datasvc = regen_datasvc(settings, job_id)
gd_name = gitdeploy_struct.keys()[0]
servers = gitdeploy_struct[gd_name]
except:
exc_msg = str(sys.exc_info()[1]).split('\n')
logging.error("************* _threadsafe_pull_gitdeploy: preamble: {} *********************".format(exc_msg))
return
try:
assert application
assert queue
assert deployment_id
assert all([elita.util.type_check.is_string(gd) for gd in gitdeploy_struct])
assert all([elita.util.type_check.is_seq(gitdeploy_struct[gd]) for gd in gitdeploy_struct])
assert isinstance(batch_number, int) and batch_number >= 0
sc = salt_control.SaltController(datasvc)
rc = salt_control.RemoteCommands(sc)
assert len(gitdeploy_struct) == 1
datasvc.deploysvc.UpdateDeployment_Phase2(application, deployment_id, gd_name, servers, batch_number,
progress=10,
state="Beginning deployment")
#until salt Helium is released, we can only execute an SLS *file* as opposed to a single module call
sls_map = {sc.get_gitdeploy_entry_name(application, gd_name): servers}
if len(servers) == 0:
datasvc.jobsvc.NewJobData({"DeployServers": {gd_name: "no servers"}})
return True
gd_doc = datasvc.gitsvc.GetGitDeploy(application, gd_name)
branch = gd_doc['location']['default_branch']
path = gd_doc['location']['path']
#verify that we have salt connectivity to the target. Do three consecutive test.pings with 10 second timeouts
#if all target servers don't respond by the last attempt, fail deployment
i = 1
while True:
datasvc.deploysvc.UpdateDeployment_Phase2(application, deployment_id, gd_name, servers, batch_number,
progress=15,
state="Verifying salt connectivity (try: {})".format(i))
res = rc.ping(servers)
if all([s in res for s in servers]):
logging.debug("_threadsafe_process_gitdeploy: verify salt: all servers returned (try: {})".format(i))
break
else:
missing_servers = list(set(servers) - set(res.keys()))
logging.debug("_threadsafe_process_gitdeploy: verify salt: error: servers missing: {} (try {})".format(missing_servers, i))
if i >= 3:
datasvc.deploysvc.UpdateDeployment_Phase2(application, deployment_id, gd_name, missing_servers, batch_number,
progress=15,
state="ERROR: no salt connectivity!".format(i))
datasvc.deploysvc.FailDeployment(application, deployment_id)
logging.error("No salt connectivity to servers: {} (after {} tries)".format(missing_servers, i))
return False
i += 1
#delete stale git index lock if it exists
datasvc.deploysvc.UpdateDeployment_Phase2(application, deployment_id, gd_name, servers, batch_number,
progress=25,
state="Removing git index lock if it exists")
res = rc.rm_file_if_exists(servers, "{}/.git/index.lock".format(path))
logging.debug("_threadsafe_process_gitdeploy: delete git index lock results: {}".format(str(res)))
#clear uncommitted changes on targets
datasvc.deploysvc.UpdateDeployment_Phase2(application, deployment_id, gd_name, servers, batch_number,
progress=33,
state="Clearing uncommitted changes")
res = rc.discard_git_changes(servers, path)
logging.debug("_threadsafe_process_gitdeploy: discard git changes result: {}".format(str(res)))
res = rc.checkout_branch(servers, path, branch)
logging.debug("_threadsafe_process_gitdeploy: git checkout result: {}".format(str(res)))
datasvc.jobsvc.NewJobData({"DeployServers": {gd_name: "deploying", "servers": servers}})
logging.debug("_threadsafe_pull_gitdeploy: sls_map: {}".format(sls_map))
datasvc.deploysvc.UpdateDeployment_Phase2(application, deployment_id, gd_name, servers, batch_number,
progress=50,
state="Issuing state commands (git pull, etc)")
res = rc.run_slses_async(_threadsafe_pull_callback, sls_map, args={'datasvc': datasvc, 'application': application,
'deployment_id': deployment_id,
'batch_number': batch_number,
'gitdeploy': gd_name})
logging.debug("_threadsafe_pull_gitdeploy: results: {}".format(res))
errors = dict()
successes = dict()
for r in res:
for host in r:
for cmd in r[host]['ret']:
if "gitdeploy" in cmd:
if "result" in r[host]['ret'][cmd]:
if not r[host]['ret'][cmd]["result"]:
errors[host] = r[host]['ret'][cmd]["changes"] if "changes" in r[host]['ret'][cmd] else r[host]['ret'][cmd]
else:
if host not in successes:
successes[host] = dict()
module, state, command, subcommand = str(cmd).split('|')
if state not in successes[host]:
successes[host][state] = dict()
successes[host][state][command] = {
"stdout": r[host]['ret'][cmd]["changes"]["stdout"],
"stderr": r[host]['ret'][cmd]["changes"]["stderr"],
"retcode": r[host]['ret'][cmd]["changes"]["retcode"],
}
if len(errors) > 0:
for e in errors:
datasvc.deploysvc.UpdateDeployment_Phase2(application, deployment_id, gd_name, [e], batch_number,
state="ERROR: {}".format(errors[e]))
logging.debug("_threadsafe_pull_gitdeploy: SLS error servers: {}".format(errors.keys()))
logging.debug("_threadsafe_pull_gitdeploy: SLS error responses: {}".format(errors))
if len(successes) > 0:
datasvc.deploysvc.UpdateDeployment_Phase2(application, deployment_id, gd_name, successes.keys(), batch_number,
progress=100, state="Complete")
missing = list(set([host for r in res for host in r]).difference(set(servers)))
if missing:
datasvc.deploysvc.UpdateDeployment_Phase2(application, deployment_id, gd_name, missing, batch_number,
state="ERROR: no results (timed out waiting for salt?)")
logging.debug("_threadsafe_pull_gitdeploy: error: empty results for: {}; possible salt timeout".format(missing))
datasvc.jobsvc.NewJobData({"_threadsafe_pull_gitdeploy": "empty results for {}".format(missing)})
datasvc.deploysvc.FailDeployment(application, deployment_id)
deploy_results = {
gd_name: {
"raw_results": res,
"errors": len(errors) > 0,
"error_results": errors,
"successes": len(successes) > 0,
"success_results": successes
}
}
queue.put_nowait(deploy_results)
datasvc.jobsvc.NewJobData({
"DeployServers": deploy_results
})
logging.debug("_threadsafe_pull_gitdeploy: finished ({})".format(gitdeploy_struct))
except:
exc_type, exc_obj, tb = sys.exc_info()
exc_msg = "ERROR: Exception in _threadsafe_pull_gitdeploy: {}".format(traceback.format_exception(exc_type, exc_obj, tb))
datasvc.deploysvc.UpdateDeployment_Phase2(application, deployment_id, gd_name, servers, batch_number,
state=exc_msg)
datasvc.deploysvc.FailDeployment(application, deployment_id)
class DeployController:
'''
Class that runs deploys. Only knows about server/gitdeploy pairs, so is used for both manual-style deployments
and group/environment deployments.
'''
__metaclass__ = elita.util.LoggingMetaClass
def __init__(self, datasvc, deployment_id):
self.deployment_id = deployment_id
self.datasvc = datasvc
def run(self, app_name, build_name, servers, gitdeploys, parallel=True, batch_number=0):
'''
1. Decompress build to gitdeploy dir and push
a. Attempts to optimize by determining if build has already been decompressed to gitdeploy and skips if so
2. Determine which gitdeploys have changes (if any)
a. Build a mapping of gitdeploys_with_changes -> [ servers_to_deploy_it_to ]
b. Perform the state calls only to the server/gitdeploy pairs that have changes
@type app_name: str
@type build_name: str
@type servers: list(str)
@type gitdeploys: list(str)
'''
assert app_name and build_name and servers and gitdeploys
assert elita.util.type_check.is_string(app_name)
assert elita.util.type_check.is_string(build_name)
assert elita.util.type_check.is_seq(servers)
assert elita.util.type_check.is_seq(gitdeploys)
assert isinstance(batch_number, int) and batch_number >= 0
build_doc = self.datasvc.buildsvc.GetBuild(app_name, build_name)
gitdeploy_docs = {gd: self.datasvc.gitsvc.GetGitDeploy(app_name, gd) for gd in gitdeploys}
queue = billiard.Queue()
procs = list()
#we need to get a list of gitdeploys with unique gitrepos, so build a reverse mapping
gitrepo_gitdeploy_mapping = {gitdeploy_docs[gd]['location']['gitrepo']['name']: gd for gd in gitdeploys}
self.datasvc.deploysvc.StartDeployment_Phase(app_name, self.deployment_id, 1)
for gr in gitrepo_gitdeploy_mapping:
gd = gitrepo_gitdeploy_mapping[gr]
gddoc = gitdeploy_docs[gd]
if parallel:
p = billiard.Process(target=_threadsafe_process_gitdeploy, name=gd,
args=(gddoc, build_doc, self.datasvc.settings,
self.datasvc.job_id, self.deployment_id))
p.start()
procs.append(p)
else:
_threadsafe_process_gitdeploy(gddoc, build_doc, self.datasvc.settings,
self.datasvc.job_id, self.deployment_id)
if parallel:
error = False
for p in procs:
p.join(150)
if p.is_alive():
p.terminate()
logging.error("ERROR: _threadsafe_process_gitdeploy: timeout waiting for child process ({})!".
format(p.name))
self.datasvc.jobsvc.NewJobData({'status': 'error',
'message': 'timeout waiting for child process (process_gitdeploy: {}'.format(p.name)})
self.datasvc.deploysvc.UpdateDeployment_Phase1(app_name, self.deployment_id, p.name,
step="ERROR: timed out waiting for child process")
error = True
if p.exitcode < 0 or p.exitcode > 0:
msg = "process killed by signal {}!".format(abs(p.exitcode)) if p.exitcode < 0 \
else "process died with exit code {}".format(p.exitcode)
logging.error("_threadsafe_process_gitdeploy: {}".format(msg))
self.datasvc.jobsvc.NewJobData({'status': 'error',
'message': '{} (process_gitdeploy: {}'.format(msg, p.name)})
self.datasvc.deploysvc.UpdateDeployment_Phase1(app_name, self.deployment_id, p.name,
step="ERROR: {}".format(msg))
error = True
if error:
self.datasvc.deploysvc.FailDeployment(app_name, self.deployment_id)
return False, None
servers_by_gitdeploy = {gd: determine_deployabe_servers(gitdeploy_docs[gd]['servers'], servers) for gd in gitdeploy_docs}
queue = billiard.Queue()
procs = list()
self.datasvc.deploysvc.StartDeployment_Phase(app_name, self.deployment_id, 2)
for gd in servers_by_gitdeploy:
if parallel:
p = billiard.Process(target=_threadsafe_pull_gitdeploy, name=gd,
args=(app_name, {gd: servers_by_gitdeploy[gd]}, queue, self.datasvc.settings,
self.datasvc.job_id, self.deployment_id, batch_number))
p.start()
procs.append(p)
else:
_threadsafe_pull_gitdeploy(app_name, {gd: servers_by_gitdeploy[gd]}, queue, self.datasvc.settings,
self.datasvc.job_id, self.deployment_id, batch_number)
# pull from queue prior to joining to avoid deadlock
results = list()
i = 0
while i < len(procs):
results.append(queue.get(150))
i += 1
if parallel:
error = False
for p in procs:
p.join(150)
if p.is_alive():
p.terminate()
logging.error("_threadsafe_pull_gitdeploy: timeout waiting for child process ({})!".
format(p.name))
self.datasvc.jobsvc.NewJobData({'status': 'error',
'message': 'timeout waiting for child process (pull_gitdeploy: {}'.format(p.name)})
self.datasvc.deploysvc.UpdateDeployment_Phase2(app_name, self.deployment_id, p.name,
servers_by_gitdeploy[p.name], batch_number,
state="ERROR: timeout waiting for child process")
error = True
if p.exitcode < 0 or p.exitcode > 0:
msg = "process killed by signal {}!".format(abs(p.exitcode)) if p.exitcode < 0 \
else "process died with exit code {}".format(p.exitcode)
logging.error("_threadsafe_pull_gitdeploy: {}".format(msg))
self.datasvc.jobsvc.NewJobData({'status': 'error',
'message': '{} (pull_gitdeploy: {}'.format(msg, p.name)})
self.datasvc.deploysvc.UpdateDeployment_Phase2(app_name, self.deployment_id, p.name,
servers_by_gitdeploy[p.name], batch_number,
state="ERROR: {}".format(msg))
error = True
if error:
self.datasvc.deploysvc.FailDeployment(app_name, self.deployment_id)
return False, None
if not results:
return False, results
for r in results:
for gd in r:
if r[gd]['errors']:
return False, results
#update deployed_build
for gd in gitdeploys:
gdm = gitservice.GitDeployManager(gitdeploy_docs[gd], self.datasvc)
gdm.update_last_deployed(build_name)
return True, results
| |
import datetime
import warnings
from xml.dom import minidom
from django.contrib.syndication import views
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.utils import tzinfo
from django.utils.feedgenerator import rfc2822_date, rfc3339_date
from models import Entry
class FeedTestCase(TestCase):
fixtures = ['feeddata.json']
def assertChildNodes(self, elem, expected):
actual = set([n.nodeName for n in elem.childNodes])
expected = set(expected)
self.assertEqual(actual, expected)
def assertChildNodeContent(self, elem, expected):
for k, v in expected.items():
self.assertEqual(
elem.getElementsByTagName(k)[0].firstChild.wholeText, v)
def assertCategories(self, elem, expected):
self.assertEqual(set(i.firstChild.wholeText for i in elem.childNodes if i.nodeName == 'category'), set(expected))
######################################
# Feed view
######################################
class SyndicationFeedTest(FeedTestCase):
"""
Tests for the high-level syndication feed framework.
"""
urls = 'regressiontests.syndication.urls'
def test_rss2_feed(self):
"""
Test the structure and content of feeds generated by Rss201rev2Feed.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '2.0')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
# Find the last build date
d = Entry.objects.latest('date').date
ltz = tzinfo.LocalTimezone(d)
last_build_date = rfc2822_date(d.replace(tzinfo=ltz))
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
self.assertChildNodeContent(chan, {
'title': 'My blog',
'description': 'A more thorough description of my blog.',
'link': 'http://example.com/blog/',
'language': 'en',
'lastBuildDate': last_build_date,
#'atom:link': '',
'ttl': '600',
'copyright': 'Copyright (c) 2007, Sally Smith',
})
self.assertCategories(chan, ['python', 'django'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss2/'
)
# Find the pubdate of the first feed item
d = Entry.objects.get(pk=1).date
ltz = tzinfo.LocalTimezone(d)
pub_date = rfc2822_date(d.replace(tzinfo=ltz))
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
'guid': 'http://example.com/blog/1/',
'pubDate': pub_date,
'author': 'test@example.com (Sally Smith)',
})
self.assertCategories(items[0], ['python', 'testing'])
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description', 'guid', 'category', 'pubDate', 'author'])
def test_rss091_feed(self):
"""
Test the structure and content of feeds generated by RssUserland091Feed.
"""
response = self.client.get('/syndication/rss091/')
doc = minidom.parseString(response.content)
# Making sure there's only 1 `rss` element and that the correct
# RSS version was specified.
feed_elem = doc.getElementsByTagName('rss')
self.assertEqual(len(feed_elem), 1)
feed = feed_elem[0]
self.assertEqual(feed.getAttribute('version'), '0.91')
# Making sure there's only one `channel` element w/in the
# `rss` element.
chan_elem = feed.getElementsByTagName('channel')
self.assertEqual(len(chan_elem), 1)
chan = chan_elem[0]
self.assertChildNodes(chan, ['title', 'link', 'description', 'language', 'lastBuildDate', 'item', 'atom:link', 'ttl', 'copyright', 'category'])
# Ensure the content of the channel is correct
self.assertChildNodeContent(chan, {
'title': 'My blog',
'link': 'http://example.com/blog/',
})
self.assertCategories(chan, ['python', 'django'])
# Check feed_url is passed
self.assertEqual(
chan.getElementsByTagName('atom:link')[0].getAttribute('href'),
'http://example.com/syndication/rss091/'
)
items = chan.getElementsByTagName('item')
self.assertEqual(len(items), Entry.objects.count())
self.assertChildNodeContent(items[0], {
'title': 'My first entry',
'description': 'Overridden description: My first entry',
'link': 'http://example.com/blog/1/',
})
for item in items:
self.assertChildNodes(item, ['title', 'link', 'description'])
self.assertCategories(item, [])
def test_atom_feed(self):
"""
Test the structure and content of feeds generated by Atom1Feed.
"""
response = self.client.get('/syndication/atom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('xmlns'), 'http://www.w3.org/2005/Atom')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'rights', 'category', 'author'])
for link in feed.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/syndication/atom/')
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'category', 'updated', 'rights', 'author'])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_custom_feed_generator(self):
response = self.client.get('/syndication/custom/')
feed = minidom.parseString(response.content).firstChild
self.assertEqual(feed.nodeName, 'feed')
self.assertEqual(feed.getAttribute('django'), 'rocks')
self.assertChildNodes(feed, ['title', 'subtitle', 'link', 'id', 'updated', 'entry', 'spam', 'rights', 'category', 'author'])
entries = feed.getElementsByTagName('entry')
self.assertEqual(len(entries), Entry.objects.count())
for entry in entries:
self.assertEqual(entry.getAttribute('bacon'), 'yum')
self.assertChildNodes(entry, ['title', 'link', 'id', 'summary', 'ministry', 'rights', 'author', 'updated', 'category'])
summary = entry.getElementsByTagName('summary')[0]
self.assertEqual(summary.getAttribute('type'), 'html')
def test_title_escaping(self):
"""
Tests that titles are escaped correctly in RSS feeds.
"""
response = self.client.get('/syndication/rss2/')
doc = minidom.parseString(response.content)
for item in doc.getElementsByTagName('item'):
link = item.getElementsByTagName('link')[0]
if link.firstChild.wholeText == 'http://example.com/blog/4/':
title = item.getElementsByTagName('title')[0]
self.assertEqual(title.firstChild.wholeText, u'A & B < C > D')
def test_naive_datetime_conversion(self):
"""
Test that datetimes are correctly converted to the local time zone.
"""
# Naive date times passed in get converted to the local time zone, so
# check the recived zone offset against the local offset.
response = self.client.get('/syndication/naive-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
d = Entry.objects.latest('date').date
ltz = tzinfo.LocalTimezone(d)
latest = rfc3339_date(d.replace(tzinfo=ltz))
self.assertEqual(updated, latest)
def test_aware_datetime_conversion(self):
"""
Test that datetimes with timezones don't get trodden on.
"""
response = self.client.get('/syndication/aware-dates/')
doc = minidom.parseString(response.content)
updated = doc.getElementsByTagName('updated')[0].firstChild.wholeText
self.assertEqual(updated[-6:], '+00:42')
def test_feed_url(self):
"""
Test that the feed_url can be overridden.
"""
response = self.client.get('/syndication/feedurl/')
doc = minidom.parseString(response.content)
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href'), 'http://example.com/customfeedurl/')
def test_secure_urls(self):
"""
Test URLs are prefixed with https:// when feed is requested over HTTPS.
"""
response = self.client.get('/syndication/rss2/', **{
'wsgi.url_scheme': 'https',
})
doc = minidom.parseString(response.content)
chan = doc.getElementsByTagName('channel')[0]
self.assertEqual(
chan.getElementsByTagName('link')[0].firstChild.wholeText[0:5],
'https'
)
atom_link = chan.getElementsByTagName('atom:link')[0]
self.assertEqual(atom_link.getAttribute('href')[0:5], 'https')
for link in doc.getElementsByTagName('link'):
if link.getAttribute('rel') == 'self':
self.assertEqual(link.getAttribute('href')[0:5], 'https')
def test_item_link_error(self):
"""
Test that a ImproperlyConfigured is raised if no link could be found
for the item(s).
"""
self.assertRaises(ImproperlyConfigured,
self.client.get,
'/syndication/articles/')
def test_template_feed(self):
"""
Test that the item title and description can be overridden with
templates.
"""
response = self.client.get('/syndication/template/')
doc = minidom.parseString(response.content)
feed = doc.getElementsByTagName('rss')[0]
chan = feed.getElementsByTagName('channel')[0]
items = chan.getElementsByTagName('item')
self.assertChildNodeContent(items[0], {
'title': 'Title in your templates: My first entry',
'description': 'Description in your templates: My first entry',
'link': 'http://example.com/blog/1/',
})
def test_add_domain(self):
"""
Test add_domain() prefixes domains onto the correct URLs.
"""
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value'),
'http://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', '/foo/?arg=value', True),
'https://example.com/foo/?arg=value'
)
self.assertEqual(
views.add_domain('example.com', 'http://djangoproject.com/doc/'),
'http://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'https://djangoproject.com/doc/'),
'https://djangoproject.com/doc/'
)
self.assertEqual(
views.add_domain('example.com', 'mailto:uhoh@djangoproject.com'),
'mailto:uhoh@djangoproject.com'
)
| |
import numpy
class ApproximateInferenceEngine(object):
def __init__(self, network):
"""Defines an engine for performing approximate inference in a discrete Bayesian network.
Keyword arguments:
network -- A 'libpgm.discretebayesiannetwork.DiscreteBayesianNetwork' object representing a discrete Bayesian network.
"""
self.network = network
#a topological order will be needed in the process of generating variable assignments
self.network.toporder()
def perform_rs_inference(self, query_variable, evidence_variables, number_of_samples):
"""Calculates the probability distribution P(query_variable|evidence_variables)
using rejection sampling. Assumes that we have only one query variable.
Keyword arguments:
query_variable -- The name of the query variable (as called in the network).
evidence_variables -- A dictionary containing variable names as keys and observed values as values.
number_of_samples -- The number of samples that should be used in the sampling process.
Returns:
distribution -- A dictionary containing the values of the query variable as keys and the probabilities as values.
"""
evidence_supporting_sample_counter = 0.
distribution = dict()
number_of_variable_values = len(self.network.Vdata[query_variable]['vals'])
for i in xrange(number_of_variable_values):
distribution[self.network.Vdata[query_variable]['vals'][i]] = 0.
for i in xrange(number_of_samples):
sample_assignments = self.generate_rs_sample()
supports_evidence = True
for key in evidence_variables.keys():
if evidence_variables[key] != sample_assignments[key]:
supports_evidence = False
break
if supports_evidence:
evidence_supporting_sample_counter = evidence_supporting_sample_counter + 1.
distribution[sample_assignments[query_variable]] = distribution[sample_assignments[query_variable]] + 1.
if evidence_supporting_sample_counter > 1e-10:
for key in distribution.keys():
distribution[key] = distribution[key] / evidence_supporting_sample_counter
return distribution
def perform_lw_inference(self, query_variable, evidence_variables, number_of_samples):
"""Calculates the probability distribution P(query_variable|evidence_variables)
using likelihood weighting. Assumes that we have only one query variable.
Keyword arguments:
query_variable -- The name of the query variable (as called in the network).
evidence_variables -- A dictionary containing variable names as keys and observed values as values.
number_of_samples -- The number of samples that should be used in the sampling process.
Returns:
distribution -- A dictionary containing the values of the query variable as keys and the probabilities as values.
"""
distribution = dict()
number_of_variable_values = len(self.network.Vdata[query_variable]['vals'])
for i in xrange(number_of_variable_values):
distribution[self.network.Vdata[query_variable]['vals'][i]] = 0.
for i in xrange(number_of_samples):
sample_assignments, weight = self.generate_lw_sample(evidence_variables)
distribution[sample_assignments[query_variable]] = distribution[sample_assignments[query_variable]] + weight
normaliser = 0.
for key in distribution.keys():
normaliser = normaliser + distribution[key]
if normaliser > 1e-10:
for key in distribution.keys():
distribution[key] = distribution[key] / normaliser
return distribution
def perform_gibbs_inference(self, query_variable, evidence_variables, number_of_samples):
"""Calculates the probability distribution P(query_variable|evidence_variables)
using Gibbs sampling. Assumes that we have only one query variable.
Keyword arguments:
query_variable -- The name of the query variable (as called in the network).
evidence_variables -- A dictionary containing variable names as keys and observed values as values.
number_of_samples -- The number of samples that should be used in the sampling process.
Returns:
distribution -- A dictionary containing the values of the query variable as keys and the probabilities as values.
"""
distribution = dict()
number_of_variable_values = len(self.network.Vdata[query_variable]['vals'])
for i in xrange(number_of_variable_values):
distribution[self.network.Vdata[query_variable]['vals'][i]] = 0.
variable_assignments = dict()
#we initialise the variables randomly before generating samples
for _,variable in enumerate(self.network.V):
if variable in evidence_variables.keys():
variable_assignments[variable] = evidence_variables[variable]
else:
value_index = numpy.random.randint(0,len(self.network.Vdata[variable]['vals']))
variable_assignments[variable] = self.network.Vdata[variable]['vals'][value_index]
for i in xrange(number_of_samples):
variable_assignments = self.generate_gibbs_sample(variable_assignments, evidence_variables)
distribution[variable_assignments[query_variable]] = distribution[variable_assignments[query_variable]] + 1.
normaliser = 0.
for key in distribution.keys():
normaliser = normaliser + distribution[key]
if normaliser > 1e-10:
for key in distribution.keys():
distribution[key] = distribution[key] / normaliser
return distribution
def generate_rs_sample(self):
"""Generates a random assignment for the variables in the network.
The assignment respects the conditional probabilities in the network.
Returns:
assigned_values -- A dictionary containing variable names and their assigned values.
"""
number_of_variables = len(self.network.V)
assigned_values = dict()
for i in xrange(number_of_variables):
assigned_values[self.network.V[i]] = ''
for i in xrange(number_of_variables):
current_variable = self.network.V[i]
parent_values = self.get_parent_values(current_variable, assigned_values)
cumulative_distribution = [0.]
if parent_values == None:
number_of_values = len(self.network.Vdata[current_variable]['cprob'])
for i in xrange(number_of_values):
cumulative_distribution.append(cumulative_distribution[i] + self.network.Vdata[current_variable]['cprob'][i])
else:
number_of_values = len(self.network.Vdata[current_variable]['cprob'][parent_values])
for i in xrange(number_of_values):
cumulative_distribution.append(cumulative_distribution[i] + self.network.Vdata[current_variable]['cprob'][parent_values][i])
value_index = 1
number_of_values = len(cumulative_distribution)
random_number = numpy.random.rand()
while value_index < number_of_values and random_number > cumulative_distribution[value_index]:
value_index = value_index + 1
#we decrease the index by 1 because of the additional value in the cumulative distribution array
value_index = value_index - 1
assigned_values[current_variable] = self.network.Vdata[current_variable]['vals'][value_index]
return assigned_values
def generate_lw_sample(self, evidence_variables):
"""Generates a random assignment for the variables in the network.
The assignment respects the conditional probabilities in the network.
Keyword arguments:
evidence_variables -- A dictionary containing variable names as keys and observed values as values.
Returns:
assigned_values -- A dictionary containing variable names and their assigned values.
"""
number_of_variables = len(self.network.V)
assigned_values = dict()
for i in xrange(number_of_variables):
if self.network.V[i] in evidence_variables.keys():
assigned_values[self.network.V[i]] = evidence_variables[self.network.V[i]]
else:
assigned_values[self.network.V[i]] = ''
weight = 1.
for i in xrange(number_of_variables):
current_variable = self.network.V[i]
parent_values = self.get_parent_values(current_variable, assigned_values)
#we update the weight if we are sampling an evidence variable
if self.network.V[i] in evidence_variables.keys():
value_index = self.network.Vdata[current_variable]['vals'].index(evidence_variables[self.network.V[i]])
if parent_values == None:
weight = weight * self.network.Vdata[current_variable]['cprob'][value_index]
else:
weight = weight * self.network.Vdata[current_variable]['cprob'][parent_values][value_index]
else:
cumulative_distribution = [0.]
if parent_values == None:
number_of_values = len(self.network.Vdata[current_variable]['cprob'])
for i in xrange(number_of_values):
cumulative_distribution.append(cumulative_distribution[i] + self.network.Vdata[current_variable]['cprob'][i])
else:
number_of_values = len(self.network.Vdata[current_variable]['cprob'][parent_values])
for i in xrange(number_of_values):
cumulative_distribution.append(cumulative_distribution[i] + self.network.Vdata[current_variable]['cprob'][parent_values][i])
value_index = 1
number_of_values = len(cumulative_distribution)
random_number = numpy.random.rand()
while value_index < number_of_values and random_number > cumulative_distribution[value_index]:
value_index = value_index + 1
#we decrease the index by 1 because of the additional value in the cumulative distribution array
value_index = value_index - 1
assigned_values[current_variable] = self.network.Vdata[current_variable]['vals'][value_index]
return assigned_values, weight
def generate_gibbs_sample(self, variable_assignments, evidence_variables):
"""Generates a random assignment for the non-evidence variables in the network,
sampling each of them given their Markov blanket.
Keyword arguments:
variable_assignments -- A dictionary containing variable names as keys and variable assignments as values.
evidence_variables -- A dictionary containing variable names as keys and observed values as values.
Returns:
variable_assignments -- A dictionary containing variable names as keys and variable assignments as values.
"""
variables_to_sample = list(set(self.network.V) - set(evidence_variables.keys()))
for _,variable in enumerate(variables_to_sample):
#we calculate the product of the probabilities of the children
#given their parents if the variable has any children
value_probabilities = dict()
for _,value in enumerate(self.network.Vdata[variable]['vals']):
value_probabilities[value] = 1.
if self.network.Vdata[variable]['children'] != None:
for _,value in enumerate(self.network.Vdata[variable]['vals']):
alternative_assignments = dict(variable_assignments)
alternative_assignments[variable] = value
children_probability_product = 1.
for _,child in enumerate(self.network.Vdata[variable]['children']):
value_index = self.network.Vdata[child]['vals'].index(variable_assignments[child])
parent_values = self.get_parent_values(child, alternative_assignments)
children_probability_product = children_probability_product * self.network.Vdata[child]['cprob'][parent_values][value_index]
value_probabilities[value] = children_probability_product
parent_values = self.get_parent_values(variable, variable_assignments)
normaliser = 0.
#we multiply the children probability by the probability of the
#current variable given its parents (or by its prior if it has no parents)
if parent_values == None:
for i,value in enumerate(self.network.Vdata[variable]['vals']):
value_probabilities[value] = value_probabilities[value] * self.network.Vdata[variable]['cprob'][i]
normaliser = normaliser + value_probabilities[value]
else:
for i,value in enumerate(self.network.Vdata[variable]['vals']):
value_probabilities[value] = value_probabilities[value] * self.network.Vdata[variable]['cprob'][parent_values][i]
normaliser = normaliser + value_probabilities[value]
for _,key in enumerate(value_probabilities.keys()):
value_probabilities[key] = value_probabilities[key] / normaliser
cumulative_distribution = [0.]
for i,value in enumerate(self.network.Vdata[variable]['vals']):
cumulative_distribution.append(cumulative_distribution[i] + value_probabilities[value])
value_index = 1
number_of_values = len(cumulative_distribution)
random_number = numpy.random.rand()
while value_index < number_of_values and random_number > cumulative_distribution[value_index]:
value_index = value_index + 1
#we decrease the index by 1 because of the additional value in the cumulative distribution array
value_index = value_index - 1
variable_assignments[variable] = self.network.Vdata[variable]['vals'][value_index]
return variable_assignments
def get_parent_values(self, variable, assigned_values):
"""Returns the assigned values to the parent variables of a given variable.
Keyword arguments:
variable -- A string representing a variable in the network.
assigned_values -- A dictionary containing value assignments to variables.
Returns:
parent_values_string -- A string representing the values assigned to the parents of the given variable.
The string is in a format compatible with the network representation.
"""
if self.network.Vdata[variable]['parents'] == None:
return None
else:
number_of_parents = len(self.network.Vdata[variable]['parents'])
parent_values = []
for i in xrange(number_of_parents):
current_parent = self.network.Vdata[variable]['parents'][i]
parent_values.append(assigned_values[current_parent])
parent_values_string = "[" + ", ".join("'" + x + "'" for x in parent_values) + "]"
return parent_values_string
| |
#!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# copy from {@link https://github.com/apache/kafka/blob/trunk/kafka-patch-review.py}
import argparse
import sys
import os
import time
import datetime
import tempfile
import commands
import getpass
from jira.client import JIRA
def get_jira_config():
# read the config file
home=jira_home=os.getenv('HOME')
home=home.rstrip('/')
if not (os.path.isfile(home + '/jira.ini')):
jira_user=raw_input('JIRA user :')
jira_pass=getpass.getpass('JIRA password :')
jira_config = {'user':jira_user, 'password':jira_pass}
return jira_config
else:
jira_config = dict(line.strip().split('=') for line in open(home + '/jira.ini'))
return jira_config
def get_jira(jira_config):
options = {
'server': 'https://issues.apache.org/jira'
}
jira = JIRA(options=options,basic_auth=(jira_config['user'], jira_config['password']))
# (Force) verify the auth was really done
jira_session=jira.session()
if (jira_session is None):
raise Exception("Failed to login to the JIRA instance")
return jira
def cmd_exists(cmd):
status, result = commands.getstatusoutput(cmd)
return status
def main():
''' main(), shut up, pylint '''
popt = argparse.ArgumentParser(description='BookKeeper patch review tool')
popt.add_argument('-b', '--branch', action='store', dest='branch', required=True, help='Tracking branch to create diff against')
popt.add_argument('-j', '--jira', action='store', dest='jira', required=True, help='JIRA corresponding to the reviewboard')
popt.add_argument('-s', '--summary', action='store', dest='summary', required=False, help='Summary for the reviewboard')
popt.add_argument('-d', '--description', action='store', dest='description', required=False, help='Description for reviewboard')
popt.add_argument('-r', '--rb', action='store', dest='reviewboard', required=False, help='Review board that needs to be updated')
popt.add_argument('-t', '--testing-done', action='store', dest='testing', required=False, help='Text for the Testing Done section of the reviewboard')
popt.add_argument('-db', '--debug', action='store_true', required=False, help='Enable debug mode')
opt = popt.parse_args()
post_review_tool = None
if (cmd_exists("post-review") == 0):
post_review_tool = "post-review"
elif (cmd_exists("rbt") == 0):
post_review_tool = "rbt post"
else:
print "please install RBTools. See https://www.reviewboard.org/docs/rbtools/dev/ for details."
sys.exit(1)
patch_file=tempfile.gettempdir() + "/" + opt.jira + ".patch"
if opt.reviewboard:
ts = time.time()
st = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H:%M:%S')
patch_file=tempfile.gettempdir() + "/" + opt.jira + '_' + st + '.patch'
# first check if rebase is needed
git_branch_hash="git rev-parse " + opt.branch
p_now=os.popen(git_branch_hash)
branch_now=p_now.read()
p_now.close()
git_common_ancestor="git merge-base " + opt.branch + " HEAD"
p_then=os.popen(git_common_ancestor)
branch_then=p_then.read()
p_then.close()
if branch_now != branch_then:
print 'ERROR: Your current working branch is from an older version of ' + opt.branch + '. Please rebase first by using git pull --rebase'
sys.exit(1)
git_configure_reviewboard="git config reviewboard.url https://reviews.apache.org"
print "Configuring reviewboard url to https://reviews.apache.org"
p=os.popen(git_configure_reviewboard)
p.close()
git_remote_update="git remote update"
print "Updating your remote branches to pull the latest changes"
p=os.popen(git_remote_update)
p.close()
# Get JIRA configuration and login to JIRA to ensure the credentials work, before publishing the patch to the review board
print "Verifying JIRA connection configurations"
try:
jira_config=get_jira_config()
jira=get_jira(jira_config)
except:
print "Failed to login to the JIRA instance", sys.exc_info()[0], sys.exc_info()[1]
sys.exit(1)
git_command="git diff --no-prefix " + opt.branch + " > " + patch_file
if opt.debug:
print git_command
p=os.popen(git_command)
p.close()
print 'Getting latest patch attached to the JIRA'
tmp_dir = tempfile.mkdtemp()
get_latest_patch_command="""
PATCHFILE={0}/{1}.patch
jiraPage={0}/jira.txt
curl "https://issues.apache.org/jira/browse/{1}" > {0}/jira.txt
if [[ `grep -c 'Patch Available' {0}/jira.txt` == 0 ]] ; then
echo "{1} is not \"Patch Available\". Exiting."
echo
exit 1
fi
relativePatchURL=`grep -o '"/jira/secure/attachment/[0-9]*/[^"]*' {0}/jira.txt \
| grep -v -e 'htm[l]*$' | sort | tail -1 \
| grep -o '/jira/secure/attachment/[0-9]*/[^"]*'`
patchURL="https://issues.apache.org$relativePatchURL"
curl $patchURL > {0}/{1}.patch
""".format(tmp_dir, opt.jira)
p=os.popen(get_latest_patch_command)
p.close()
previous_patch=tmp_dir + "/" + opt.jira + ".patch"
diff_file=tmp_dir + "/" + opt.jira + ".diff"
if os.path.isfile(previous_patch) and os.stat(previous_patch).st_size > 0:
print 'Creating diff with previous version of patch uploaded to JIRA'
diff_command = "diff " + previous_patch+ " " + patch_file + " > " + diff_file
try:
p=os.popen(diff_command)
sys.stdout.flush()
p.close()
except:
pass
print 'Diff with previous version of patch uploaded to JIRA is saved to ' + diff_file
print 'Checking if the there are changes that need to be pushed'
if os.stat(diff_file).st_size == 0:
print 'No changes found on top of changes uploaded to JIRA'
print 'Aborting'
sys.exit(1)
rb_command= post_review_tool + " --publish --tracking-branch " + opt.branch + " --target-groups=bookkeeper --bugs-closed=" + opt.jira
if opt.debug:
rb_command=rb_command + " --debug"
summary="Patch for " + opt.jira
if opt.summary:
summary=opt.summary
rb_command=rb_command + " --summary \"" + summary + "\""
if opt.description:
rb_command=rb_command + " --description \"" + opt.description + "\""
if opt.reviewboard:
rb_command=rb_command + " -r " + opt.reviewboard
if opt.testing:
rb_command=rb_command + " --testing-done=" + opt.testing
if opt.debug:
print rb_command
p=os.popen(rb_command)
rb_url=""
for line in p:
print line
if line.startswith('http'):
rb_url = line
elif line.startswith("There don't seem to be any diffs"):
print 'ERROR: Your reviewboard was not created/updated since there was no diff to upload. The reasons that can cause this issue are 1) Your diff is not checked into your local branch. Please check in the diff to the local branch and retry 2) You are not specifying the local branch name as part of the --branch option. Please specify the remote branch name obtained from git branch -r'
p.close()
sys.exit(1)
elif line.startswith("Your review request still exists, but the diff is not attached") and not opt.debug:
print 'ERROR: Your reviewboard was not created/updated. Please run the script with the --debug option to troubleshoot the problem'
p.close()
sys.exit(1)
if p.close() != None:
print 'ERROR: reviewboard update failed. Exiting.'
sys.exit(1)
if opt.debug:
print 'rb url=',rb_url
print 'Creating diff against', opt.branch, 'and uploading patch to JIRA',opt.jira
issue = jira.issue(opt.jira)
attachment=open(patch_file)
jira.add_attachment(issue,attachment)
attachment.close()
comment="Created reviewboard "
if not opt.reviewboard:
print 'Created a new reviewboard',rb_url,
else:
print 'Updated reviewboard',rb_url
comment="Updated reviewboard "
comment = comment + rb_url + ' against branch ' + opt.branch
jira.add_comment(opt.jira, comment)
#update the JIRA status to PATCH AVAILABLE
transitions = jira.transitions(issue)
transitionsMap ={}
for t in transitions:
transitionsMap[t['name']] = t['id']
if('Submit Patch' in transitionsMap):
jira.transition_issue(issue, transitionsMap['Submit Patch'] , assignee={'name': jira_config['user']} )
if __name__ == '__main__':
sys.exit(main())
| |
# -*- coding: utf-8 -*-
""" Sahana Eden Delphi Decision Maker Model
@copyright: 2009-2012 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3DelphiModel",
"S3DelphiUser",
]
from gluon import *
from gluon.storage import Storage
from ..s3 import *
# =============================================================================
class S3DelphiModel(S3Model):
"""
Delphi Decision Maker
"""
names = ["delphi_group",
"delphi_membership",
"delphi_problem",
"delphi_solution",
"delphi_vote",
"delphi_comment",
"delphi_solution_represent",
]
def model(self):
T = current.T
db = current.db
UNKNOWN_OPT = current.messages.UNKNOWN_OPT
add_component = self.add_component
configure = self.configure
crud_strings = current.response.s3.crud_strings
define_table = self.define_table
# ---------------------------------------------------------------------
# Groups
# ---------------------------------------------------------------------
tablename = "delphi_group"
table = define_table(tablename,
Field("name", notnull=True, unique=True,
label = T("Group Title")),
Field("description", "text",
label = T("Description")),
Field("active", "boolean", default=True,
label = T("Active")),
*s3_meta_fields()
)
# CRUD Strings
ADD_GROUP = T("Add Group")
crud_strings[tablename] = Storage(
title_create = ADD_GROUP,
title_display = T("Group Details"),
title_list = T("Groups"),
title_update = T("Edit Group"),
title_search = T("Search Groups"),
subtitle_create = T("Add New Group"),
label_list_button = T("List Groups"),
label_create_button = ADD_GROUP,
label_delete_button = T("Delete Group"),
msg_record_created = T("Group added"),
msg_record_modified = T("Group updated"),
msg_record_deleted = T("Group deleted"),
msg_list_empty = T("No Groups currently defined"))
configure(tablename,
list_fields=["id",
"name",
"description"])
group_id = S3ReusableField("group_id", table, notnull=True,
label = T("Problem Group"),
requires = IS_ONE_OF(db, "delphi_group.id",
self.delphi_group_represent
),
represent = self.delphi_group_represent)
user_id = S3ReusableField("user_id", current.auth.settings.table_user,
notnull=True,
label = T("User"),
requires = IS_ONE_OF(db, "auth_user.id",
s3_auth_user_represent),
represent = s3_auth_user_represent)
# Memberships as component of Groups
add_component("delphi_membership",
delphi_group="group_id")
# Problems as component of Groups
add_component("delphi_problem",
delphi_group="group_id")
configure("delphi_group",
deduplicate=self.group_duplicate)
# ---------------------------------------------------------------------
# Group Membership
# ---------------------------------------------------------------------
delphi_role_opts = {
1:T("Guest"),
2:T("Contributor"),
3:T("Participant"),
4:T("Moderator")
}
tablename = "delphi_membership"
table = define_table(tablename,
group_id(),
user_id(),
Field("description",
label = T("Description")),
# @ToDo: Change how Membership Requests work
Field("req", "boolean", default=False,
label = T("Request")), # Membership Request
Field("status", "integer", default=3,
label = T("Status"),
requires = IS_IN_SET(delphi_role_opts,
zero=None),
represent = lambda opt: \
delphi_role_opts.get(opt, UNKNOWN_OPT),
comment = DIV(_class="tooltip",
_title="%s|%s|%s|%s|%s" % (T("Status"),
T("Guests can view all details"),
T("A Contributor can additionally Post comments to the proposed Solutions & add alternative Solutions"),
T("A Participant can additionally Vote"),
T("A Moderator can additionally create Problems & control Memberships")))
),
*s3_meta_fields()
)
# CRUD Strings
ADD_MEMBERSHIP = T("Add Membership")
crud_strings[tablename] = Storage(
title_create = ADD_MEMBERSHIP,
title_display = T("Membership Details"),
title_list = T("Memberships"),
title_update = T("Edit Membership"),
title_search = T("Search Memberships"),
subtitle_create = T("Add New Membership"),
label_list_button = T("List Memberships"),
label_create_button = ADD_MEMBERSHIP,
label_delete_button = T("Remove Membership"),
msg_record_created = T("Membership added"),
msg_record_modified = T("Membership updated"),
msg_record_deleted = T("Membership deleted"),
msg_list_empty = T("No Memberships currently defined"))
configure(tablename,
list_fields=["id",
"group_id",
"user_id",
"status",
"req"])
# ---------------------------------------------------------------------
# Problems
# ---------------------------------------------------------------------
tablename = "delphi_problem"
table = define_table(tablename,
group_id(),
Field("name", notnull=True, unique=True,
label = T("Problem Title")),
Field("description", "text",
represent = s3_comments_represent,
label = T("Description")),
Field("criteria", "text", notnull=True,
label = T("Criteria")),
Field("active", "boolean", default=True,
label = T("Active")),
*s3_meta_fields()
)
table.modified_on.label = T("Last Modification")
# CRUD Strings
ADD_PROBLEM = T("Add Problem")
crud_strings[tablename] = Storage(
title_create = ADD_PROBLEM,
title_display = T("Problem Details"),
title_list = T("Problems"),
title_update = T("Edit Problem"),
title_search = T("Search Problems"),
subtitle_create = T("Add New Problem"),
label_list_button = T("List Problems"),
label_create_button = ADD_PROBLEM,
label_delete_button = T("Delete Problem"),
msg_record_created = T("Problem added"),
msg_record_modified = T("Problem updated"),
msg_record_deleted = T("Problem deleted"),
msg_list_empty = T("No Problems currently defined"))
configure(tablename,
list_fields=["id",
"group_id",
"name",
"description",
"created_by",
"modified_on"])
problem_id = S3ReusableField("problem_id", table, notnull=True,
label = T("Problem"),
requires = IS_ONE_OF(db, "delphi_problem.id",
self.delphi_problem_represent
),
represent = self.delphi_problem_represent)
# Solutions as component of Problems
add_component("delphi_solution",
delphi_problem="problem_id")
configure("delphi_problem",
deduplicate=self.problem_duplicate)
# ---------------------------------------------------------------------
# Solutions
# ---------------------------------------------------------------------
tablename = "delphi_solution"
table = define_table(tablename,
problem_id(),
Field("name",
label = T("Title"),
requires = IS_NOT_EMPTY()),
Field("description", "text",
represent = s3_comments_represent,
label = T("Description")),
Field("changes", "integer",
default = 0,
writable = False,
label = T("Changes")),
*s3_meta_fields()
)
table.created_by.label = T("Suggested By")
table.modified_on.label = T("Last Modification")
# CRUD Strings
ADD_SOLUTION = T("Add Solution")
crud_strings[tablename] = Storage(
title_create = ADD_SOLUTION,
title_display = T("Solution Details"),
title_list = T("Solutions"),
title_update = T("Edit Solution"),
title_search = T("Search Solutions"),
subtitle_create = T("Add New Solution"),
label_list_button = T("List Solutions"),
label_create_button = ADD_SOLUTION,
label_delete_button = T("Delete Solution"),
msg_record_created = T("Solution added"),
msg_record_modified = T("Solution updated"),
msg_record_deleted = T("Solution deleted"),
msg_list_empty = T("No Solutions currently defined"))
table.virtualfields.append(solution_virtualfields())
configure(tablename,
list_fields=["id",
#"problem_id",
"name",
"description",
"created_by",
"modified_on",
(T("Voted on"), "votes"),
(T("Comments"), "comments"),
])
solution_id = S3ReusableField("solution_id", table,
label = T("Solution"),
requires = IS_EMPTY_OR(
IS_ONE_OF(db, "delphi_solution.id",
self.delphi_solution_represent
)),
represent = self.delphi_solution_represent)
# ---------------------------------------------------------------------
# Votes
# ---------------------------------------------------------------------
tablename = "delphi_vote"
table = define_table(tablename,
problem_id(),
solution_id(empty=False),
Field("rank", "integer",
label = T("Rank")),
*s3_meta_fields()
)
# ---------------------------------------------------------------------
# Comments
# @ToDo: Attachments?
#
# Parent field allows us to:
# * easily filter for top-level threads
# * easily filter for next level of threading
# * hook a new reply into the correct location in the hierarchy
#
# ---------------------------------------------------------------------
tablename = "delphi_comment"
table = define_table(tablename,
Field("parent", "reference delphi_comment",
requires = IS_EMPTY_OR(
IS_ONE_OF_EMPTY(db, "delphi_comment.id")),
readable=False),
problem_id(),
# @ToDo: Tag to 1+ Solutions
#solution_multi_id(),
solution_id(),
Field("body", "text", notnull=True,
label = T("Comment")),
*s3_meta_fields()
)
configure(tablename,
list_fields=["id",
"problem_id",
"solution_id",
"created_by",
"modified_on"])
# ---------------------------------------------------------------------
# Pass variables back to global scope (s3db.*)
return Storage(
delphi_solution_represent = self.delphi_solution_represent,
)
# -------------------------------------------------------------------------
@staticmethod
def delphi_group_represent(id, row=None):
""" FK representation """
if not row:
db = current.db
table = db.delphi_group
row = db(table.id == id).select(table.id,
table.name,
limitby = (0, 1)).first()
elif not id:
return current.messages.NONE
try:
return A(row.name,
_href=URL(c="delphi",
f="group",
args=[row.id]))
except:
return current.messages.UNKNOWN_OPT
# ---------------------------------------------------------------------
@staticmethod
def delphi_problem_represent(id, row=None, showlink=False,
solutions=True):
""" FK representation """
if not row:
db = current.db
table = db.delphi_problem
row = db(table.id == id).select(table.id,
table.name,
limitby = (0, 1)).first()
elif not id:
return current.messages.NONE
try:
if showlink:
if solutions:
url = URL(c="delphi", f="problem", args=[row.id, "solution"])
else:
url = URL(c="delphi", f="problem", args=[row.id])
return A(row.name, _href=url)
else:
return row.name
except:
return current.messages.UNKNOWN_OPT
# -------------------------------------------------------------------------
@staticmethod
def delphi_solution_represent(id, row=None):
""" FK representation """
if row:
return row.name
if not id:
return current.messages.NONE
db = current.db
table = db.delphi_solution
r = db(table.id == id).select(table.name,
limitby = (0, 1)).first()
try:
return r.name
except:
return current.messages.UNKNOWN_OPT
# ---------------------------------------------------------------------
@staticmethod
def group_duplicate(job):
"""
This callback will be called when importing records
it will look to see if the record being imported is a duplicate.
@param job: An S3ImportJob object which includes all the details
of the record being imported
If the record is a duplicate then it will set the job method to update
Rules for finding a duplicate:
- Look for a record with the same name, ignoring case
"""
if job.tablename == "delphi_group":
table = job.table
data = job.data
name = "name" in data and data.name
query = (table.name.lower() == name.lower())
_duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if _duplicate:
job.id = _duplicate.id
job.data.id = _duplicate.id
job.method = job.METHOD.UPDATE
# ---------------------------------------------------------------------
@staticmethod
def problem_duplicate(job):
"""
This callback will be called when importing records
it will look to see if the record being imported is a duplicate.
@param job: An S3ImportJob object which includes all the details
of the record being imported
If the record is a duplicate then it will set the job method to update
Rules for finding a duplicate:
- Look for a record with the same name, ignoring case
"""
if job.tablename == "delphi_problem":
table = job.table
name = "name" in job.data and job.data.name
query = (table.name.lower() == name.lower())
_duplicate = current.db(query).select(table.id,
limitby=(0, 1)).first()
if _duplicate:
job.id = _duplicate.id
job.data.id = _duplicate.id
job.method = job.METHOD.UPDATE
# =============================================================================
class solution_virtualfields(dict, object):
""" Virtual Fields for Solutions """
# Fields to be loaded by sqltable as qfields
# without them being list_fields
# (These cannot contain VirtualFields)
extra_fields = [
"problem_id"
]
def comments(self):
ctable = current.s3db.delphi_comment
# Prevent recursive queries
try:
query = (ctable.solution_id == self.delphi_solution.id)
except AttributeError:
# We are being instantiated inside one of the other methods
return None
comments = current.db(query).count()
url = URL(c="delphi", f="problem",
args=["solution", self.delphi_solution.id, "discuss"])
output = A(comments,
_href=url)
return output
def votes(self):
vtable = current.s3db.delphi_vote
# Prevent recursive queries
try:
query = (vtable.solution_id == self.delphi_solution.id)
except AttributeError:
# We are being instantiated inside one of the other methods
return None
votes = current.db(query).count()
url = URL(c="delphi", f="problem",
args=[self.delphi_solution.problem_id, "results"])
output = A(votes,
_href=url)
return output
# -----------------------------------------------------------------------------
class S3DelphiUser:
""" Delphi User class """
def user(self):
""" Used by Discuss() (& summary()) """
return current.s3db.auth_user[self.user_id]
def __init__(self, group_id=None):
auth = current.auth
user_id = auth.user.id if auth.is_logged_in() else None
status = 1 # guest
membership = None
if auth.s3_has_role("DelphiAdmin"):
# DelphiAdmin is Moderator for every Group
status = 4
elif user_id != None and group_id != None:
table = current.s3db.delphi_membership
query = (table.group_id == group_id) & \
(table.user_id == user_id)
membership = current.db(query).select()
if membership:
membership = membership[0]
status = membership.status
self.authorised = (status == 4)
# Only Moderators & Participants can Vote
self.can_vote = status in (3, 4)
# All but Guests can add Solutions & Discuss
self.can_add_item = status != 1
self.can_post = status != 1
self.membership = membership
self.status = status
self.user_id = user_id
# END =========================================================================
| |
#!/usr/bin/env python
# Copyright 2015 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import json
import mmap
import os
import re
import sys
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("filenames", help="list of files to check, all files if unspecified", nargs='*')
parser.add_argument("-e", "--skip-exceptions", help="ignore hack/verify-flags/exceptions.txt and print all output", action="store_true")
args = parser.parse_args()
# Cargo culted from http://stackoverflow.com/questions/898669/how-can-i-detect-if-a-file-is-binary-non-text-in-python
def is_binary(pathname):
"""Return true if the given filename is binary.
@raise EnvironmentError: if the file does not exist or cannot be accessed.
@attention: found @ http://bytes.com/topic/python/answers/21222-determine-file-type-binary-text on 6/08/2010
@author: Trent Mick <TrentM@ActiveState.com>
@author: Jorge Orpinel <jorge@orpinel.com>"""
try:
with open(pathname, 'r') as f:
CHUNKSIZE = 1024
while 1:
chunk = f.read(CHUNKSIZE)
if '\0' in chunk: # found null byte
return True
if len(chunk) < CHUNKSIZE:
break # done
except:
return True
return False
def get_all_files(rootdir):
all_files = []
for root, dirs, files in os.walk(rootdir):
# don't visit certain dirs
if 'vendor' in dirs:
dirs.remove('vendor')
if '_output' in dirs:
dirs.remove('_output')
if '_gopath' in dirs:
dirs.remove('_gopath')
if 'third_party' in dirs:
dirs.remove('third_party')
if '.git' in dirs:
dirs.remove('.git')
if 'exceptions.txt' in files:
files.remove('exceptions.txt')
if 'known-flags.txt' in files:
files.remove('known-flags.txt')
for name in files:
pathname = os.path.join(root, name)
if is_binary(pathname):
continue
all_files.append(pathname)
return all_files
def normalize_files(rootdir, files):
newfiles = []
a = ['Godeps', '_gopath', 'third_party', '.git', 'exceptions.txt', 'known-flags.txt']
for f in files:
if any(x in f for x in a):
continue
if f.endswith(".svg"):
continue
if f.endswith(".gliffy"):
continue
if f.endswith(".md"):
continue
if f.endswith(".yaml"):
continue
newfiles.append(f)
for i, f in enumerate(newfiles):
if not os.path.isabs(f):
newfiles[i] = os.path.join(rootdir, f)
return newfiles
def line_has_bad_flag(line, flagre):
results = flagre.findall(line)
for result in results:
if not "_" in result:
return False
# this should exclude many cases where jinja2 templates use kube flags
# as variables, except it uses _ for the variable name
if "{% set" + result + "= \"" in line:
return False
if "pillar[" + result + "]" in line:
return False
if "grains" + result in line:
return False
# something common in juju variables...
if "template_data[" + result + "]" in line:
return False
return True
return False
# The list of files might not be the whole repo. If someone only changed a
# couple of files we don't want to run all of the golang files looking for
# flags. Instead load the list of flags from hack/verify-flags/known-flags.txt
# If running the golang files finds a new flag not in that file, return an
# error and tell the user to add the flag to the flag list.
def get_flags(rootdir, files):
# preload the 'known' flags
pathname = os.path.join(rootdir, "hack/verify-flags/known-flags.txt")
f = open(pathname, 'r')
flags = set(f.read().splitlines())
f.close()
# preload the 'known' flags which don't follow the - standard
pathname = os.path.join(rootdir, "hack/verify-flags/excluded-flags.txt")
f = open(pathname, 'r')
excluded_flags = set(f.read().splitlines())
f.close()
regexs = [ re.compile('Var[P]?\([^,]*, "([^"]*)"'),
re.compile('.String[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Int[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Bool[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.Duration[P]?\("([^"]*)",[^,]+,[^)]+\)'),
re.compile('.StringSlice[P]?\("([^"]*)",[^,]+,[^)]+\)') ]
new_flags = set()
new_excluded_flags = set()
# walk all the files looking for any flags being declared
for pathname in files:
if not pathname.endswith(".go"):
continue
f = open(pathname, 'r')
data = f.read()
f.close()
matches = []
for regex in regexs:
matches = matches + regex.findall(data)
for flag in matches:
if any(x in flag for x in excluded_flags):
continue
if "_" in flag:
new_excluded_flags.add(flag)
if not "-" in flag:
continue
if flag not in flags:
new_flags.add(flag)
if len(new_excluded_flags) != 0:
print("Found a flag declared with an _ but which is not explicitly listed as a valid flag name in hack/verify-flags/excluded-flags.txt")
print("Are you certain this flag should not have been declared with an - instead?")
l = list(new_excluded_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
if len(new_flags) != 0:
print("Found flags in golang files not in the list of known flags. Please add these to hack/verify-flags/known-flags.txt")
l = list(new_flags)
l.sort()
print("%s" % "\n".join(l))
sys.exit(1)
return list(flags)
def flags_to_re(flags):
"""turn the list of all flags we found into a regex find both - and _ versions"""
dashRE = re.compile('[-_]')
flagREs = []
for flag in flags:
# turn all flag names into regexs which will find both types
newre = dashRE.sub('[-_]', flag)
# only match if there is not a leading or trailing alphanumeric character
flagREs.append("[^\w${]" + newre + "[^\w]")
# turn that list of regex strings into a single large RE
flagRE = "|".join(flagREs)
flagRE = re.compile(flagRE)
return flagRE
def load_exceptions(rootdir):
exceptions = set()
if args.skip_exceptions:
return exceptions
exception_filename = os.path.join(rootdir, "hack/verify-flags/exceptions.txt")
exception_file = open(exception_filename, 'r')
for exception in exception_file.read().splitlines():
out = exception.split(":", 1)
if len(out) != 2:
print("Invalid line in exceptions file: %s" % exception)
continue
filename = out[0]
line = out[1]
exceptions.add((filename, line))
return exceptions
def main():
rootdir = os.path.dirname(__file__) + "/../"
rootdir = os.path.abspath(rootdir)
exceptions = load_exceptions(rootdir)
if len(args.filenames) > 0:
files = args.filenames
else:
files = get_all_files(rootdir)
files = normalize_files(rootdir, files)
flags = get_flags(rootdir, files)
flagRE = flags_to_re(flags)
bad_lines = []
# walk all the file looking for any flag that was declared and now has an _
for pathname in files:
relname = os.path.relpath(pathname, rootdir)
f = open(pathname, 'r')
for line in f.read().splitlines():
if line_has_bad_flag(line, flagRE):
if (relname, line) not in exceptions:
bad_lines.append((relname, line))
f.close()
if len(bad_lines) != 0:
if not args.skip_exceptions:
print("Found illegal 'flag' usage. If these are false negatives you should run `hack/verify-flags-underscore.py -e > hack/verify-flags/exceptions.txt` to update the list.")
bad_lines.sort()
for (relname, line) in bad_lines:
print("%s:%s" % (relname, line))
return 1
if __name__ == "__main__":
sys.exit(main())
| |
#!/usr/bin/env python3
import gc
import logging
import time
import asyncio
import struct
import socket
from concurrent.futures import Executor, ThreadPoolExecutor
from phasortoolbox.message import Command
from phasortoolbox import Parser
LOG=logging.getLogger('phasortoolbox.client')
class Client():
"""A synchrophasor protocol connection client.
This class automates the communication to any devices that follow the IEEE Std C37.118.2-2011. The remote device could be a PMU or a PDC. This class automatically connects to the remote device, send commands if necessary, receiving data, parse the received message, and call the callback() function with parsed data messages.
There are four connection methods defined in C37.118.2-2011:
F.2.1 TCP-only method:
"The client needs to know only the server address and port. "
Example:
>>> pmu_client = Client(remote_ip='10.0.0.1',remote_port=4712, idcode=1, mode='TCP')
>>> pmu_client.run()
F.2.2 UDP-only method:
"The client must know the server address and port number. The server can respond to the client port or a different port by prior arrangement."
local_port is optional if not configured.
Example:
>>> pmu_client = Client(remote_ip='10.0.0.2',remote_port=4713, local_port=4713, idcode=2, mode='UDP')
>>> pmu_client.run()
F.2.3 TCP/UDP method:
"The server address and port must be known to the client, and the client port UDP port must be known to the server (PMU)."
Example:
>>> pmu_client = Client(remote_ip='10.0.0.3',remote_port=4712, local_port=4713 , idcode=3, mode='TCP_UDP')
>>> pmu_client.run()
F.2.4 Spontaneous data transmission method:
"The drawback to this method is lack of ability to turn the data stream on and off, ... "
remote_ip and remote_port is optional if not known.
>>> pmu_client = Client(remote_ip='10.0.0.4',local_port=4713, idcode=4, mode='UDP_S')
>>> pmu_client.run()
You need to define the callback function. Once received a data message, the callback function will be called.
Example:
>>> f = lambda message: print(message)
>>> pmu_client.callback = f
>>> pmu_client.run()
"""
def __init__(self, idcode, remote_ip=None, remote_port=None, local_port=None, mode='TCP', callback=None, process_pool=False):
"""docstring for __init__
Args:
idcode (int): The idcode of the remote device. This argument must be provided.
remote_ip (str): The IP address of the remote device. e.g. '10.0.0.1'. This argument is optional under "UDP_S" mode.
remote_port (int): The port number of the remote device. e.g. 4712. This argument is optional under "UDP_S" mode.
local_port (int): The local port number. e.g. 4712. This argument is optional under "TCP", and "UDP" mode.
mode (str): The operation mode. Options are: "TCP" for TCP-only method; "UDP" for UDP-only method; "TCP_UDP" for TCP/UDP method; "UDP_S" for Spontaneous data transmission method.
callback (function): The function called when a data message is received.
"""
self.remote_ip = remote_ip # '10.0.0.1'
self.remote_port = remote_port # 4712
self.local_port = local_port # 4712
self.idcode = idcode # 1
if callback is not None:
self.callback = callback # lambda x: None
self.mode = mode
self.receive_counter = 0
self.process_pool = process_pool
self._parser = Parser()
self._transport = None
self._protocol = None
self._pdc_callbacks = {}
self._garbage_collection = True
self.set_loop()
def callback(self, data):
"""Called when a data message is received.
This is an empty function.
Args:
data (PhasorMessage): This is the parsed data message
"""
pass
def run(self, c=0, loop=None, executor=None):
"""An event loop warper.
This function creates a new event loop and schedule the coro_run() then let the event loop run_forever(). When stopped, do some clean up.
Args:
c (int): defines the number of data messages received before stop. The default value is 0, which means run forever.
"""
self.set_loop(loop, executor)
self.loop.create_task(self.coro_run(c))
try:
self.loop.run_forever()
except KeyboardInterrupt:
pass
finally:
self.loop.run_until_complete(self.coro_close())
self.checkreceive_counter()
self.loop.close()
async def coro_run(self, c=0):
"""Make the connection.
This is a coroutine. After a connection is made, send command messages to require configuration messages and start transmission if necessary according to the running mode.
Args:
c (int): defines the number of data messages received before stop. The default value is 0, which means run forever.
"""
self.receive_counter = 0
self.c = c
if self.mode == 'TCP':
LOG.info('Connecting to: (\'{}\', {}) ...'.format(self.remote_ip, self.remote_port))
self._transport, self._protocol = await self.loop.create_connection(lambda: _TCPOnly(self.idcode, self._data_received), self.remote_ip, self.remote_port)
elif self.mode == 'UDP':
self._transport, self._protocol = await self.loop.create_datagram_endpoint(lambda: _UDPOnly(self.idcode, self._data_received), local_addr=('0.0.0.0', self.local_port) if self.local_port else None, remote_addr=(self.remote_ip, self.remote_port))
elif self.mode == 'TCP_UDP':
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.IPPROTO_UDP)
#sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sock.bind(('0.0.0.0', self.local_port))
LOG.info('Connecting to: (\'{}\', {}) ...'.format(self.remote_ip, self.remote_port))
await self.loop.create_datagram_endpoint(
lambda: _UDP_Spontaneous(self.remote_ip, None, self._data_received), sock=sock)
self._transport, self._protocol = await self.loop.create_connection(lambda: _TCPOnly(self.idcode, self._data_received),
self.remote_ip, self.remote_port)
elif self.mode == 'UDP_S':
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM,
socket.IPPROTO_UDP)
#sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sock.bind(('0.0.0.0', self.local_port))
LOG.warning('Waiting for configuration packet, this may last a minute ...')
self._transport, self._protocol = await self.loop.create_datagram_endpoint(
lambda: _UDP_Spontaneous(self.remote_ip, self.remote_port, self._data_received), sock=sock)
def _data_received(self, data, perf_counter, arr_time, addr=None):
if self.process_pool:
future = self.executor.submit(self._parser.parse, data)
if future.exception():
raise future.exception()
msgs = future.result()
else:
msgs = self._parser.parse(data)
if len(msgs)>0:
parse_time = (time.perf_counter() - perf_counter)/len(msgs)
for msg in msgs:
if msg.sync.frame_type.name == 'data':
msg.perf_counter = perf_counter
msg.arr_time = arr_time
msg.parse_time = parse_time
self.receive_counter += 1
#if self.process_pool:
# future = self.executor.submit(self.callback, msg)
# if future.exception():
# raise future.exception()
#else:
self.callback(msg)
for pdc_id in self._pdc_callbacks:
self._pdc_callbacks[pdc_id](msg)
if self._garbage_collection:
gc.collect()
if self.c == 0:
return
elif self.c > 1:
self.c -= 1
return
elif self.c == 1:
self.loop.stop()
else:
LOG.warning('"{}" message received from: {}.'.format(msg.sync.frame_type.name, self._transport.get_extra_info('peername') if addr is None else addr))
def checkreceive_counter(self):
"""Print the number of data messages received in the last run
"""
LOG.warning('{} data messages received from device "idcode {}".'.format(self.receive_counter, self.idcode))
async def coro_close(self):
"""Close the connection.
This is a coroutine. Before a connection is closed, send command messages to stop transmission if necessary according to the running mode.
"""
if self._transport:
if not self._transport.is_closing():
if self.mode != 'UDP_S':
self._protocol.close()
self._transport.close()
def set_loop(self, loop=None, executor=None):
"""Assign an event loop and and executor to the instance.
Call this function if you want the instance to run on external event loop and executor.
Args:
loop (asyncio.AbstractEventLoop): Default value is asyncio.new_event_loop()
executor (concurrent.futures.Executor): Default value is ThreadPoolExecutor()
"""
self.loop = loop if loop is not None else asyncio.new_event_loop()
self.executor = executor if executor is not None else ThreadPoolExecutor()
def _add_pdc(self, _pdc_id, _pdc_callback, loop, executor):
self._pdc_callbacks[_pdc_id] = _pdc_callback
self._garbage_collection = False
self.set_loop(loop, executor)
def _remove_pdc(self, _pdc_id):
del(self._pdc_callbacks[_pdc_id])
if self._pdc_callbacks == {}:
self._garbage_collection = True
self.set_loop()
class _TCPOnly(asyncio.Protocol):
def __init__(self, idcode, callback=lambda data: None):
self.idcode = idcode
self.buf = _stream_buffer(callback)
def data_received(self, data):
perf_counter = time.perf_counter()
arr_time = time.time()
self.buf.add_bytes(data, perf_counter, arr_time)
def connection_made(self, transport):
self.transport = transport
self.peername = transport.get_extra_info('peername')
LOG.warning('Connected to: {}.'.format(str(self.peername)))
self.transport.write(Command(self.idcode, 'off'))
LOG.info('Command "data off" sent to: {}.'.format(str(self.peername)))
self.transport.write(Command(self.idcode, 'cfg2'))
LOG.info('Command "send configuration2" sent to: {}.'.format(str(self.peername)))
self.transport.write(Command(self.idcode, 'on'))
LOG.info('Command "data on" sent to: {}.'.format(str(self.peername)))
def close(self):
self.transport.write(Command(self.idcode, 'off'))
LOG.info('Command "data off" sent to: {}.'.format(str(self.peername)))
def connection_lost(self, exc):
LOG.warning('Connection {} closed.'.format(str(self.peername)))
class _UDPOnly(asyncio.DatagramProtocol):
def __init__(self, idcode, callback=lambda data: None):
self.idcode = idcode
self.buf = _stream_buffer(callback)
def datagram_received(self, data, addr):
perf_counter = time.perf_counter()
arr_time = time.time()
self.buf.add_bytes(data, perf_counter, arr_time, addr)
def connection_made(self, transport):
self.transport = transport
self.peername = transport.get_extra_info('peername')
LOG.warning('Connected to: {}.'.format(str(self.peername)))
self.transport.sendto(Command(self.idcode, 'off'))
LOG.info('Command "data off" sent to: {}.'.format(str(self.peername)))
self.transport.sendto(Command(self.idcode, 'cfg2'))
LOG.info('Command "send configuration2" sent to: {}.'.format(str(self.peername)))
self.transport.sendto(Command(self.idcode, 'on'))
LOG.info('Command "data on" sent to: {}.'.format(str(self.peername)))
def close(self):
self.transport.sendto(Command(self.idcode, 'off'))
LOG.info('Command "data off" sent to: {}.'.format(str(self.peername)))
def connection_lost(self, exc):
LOG.warning('Connection {} closed.'.format(str(self.peername)))
class _UDP_Spontaneous(asyncio.DatagramProtocol):
def __init__(self, remote_ip=None, remote_port=None,
callback=lambda data, addr: None):
self.remote_ip = remote_ip
self.remote_port = remote_port
self._pass_score = (self.remote_ip is not None) + (self.remote_port is not None)
self.buf = _stream_buffer(callback)
def datagram_received(self, data, addr):
perf_counter = time.perf_counter()
arr_time = time.time()
#print('ha',addr[0],self.remote_ip,addr[1],self.remote_port,self._pass_score)
if (addr[0] == self.remote_ip) + (addr[1] == self.remote_port) == self._pass_score:
self.buf.add_bytes(data, perf_counter, arr_time, addr)
class _stream_buffer():
def __init__(self, callback):
self.data = b''
self.l = 1 # 1 means waiting for header, larger than 1 means waiting to process l bytes
self.callback = callback
def add_bytes(self, bytes_, perf_counter, arr_time, addr=None):
self.data += bytes_
while len(self.data) >= self.l:
if self.l == 1:
self.find_header()
if self.l == 4 and len(self.data)>=4:
self.l = struct.unpack('>H',self.data[2:4])[0]
if self.l > 4 and self.l <= len(self.data):
self.callback(self.data[:self.l], perf_counter, arr_time, addr)
self.data=self.data[self.l:]
self.l = 1
def find_header(self):
i = 0
for b in self.data:
if b == 170:
self.l = 4 # waiting for length info
break
i += 1
self.data = self.data[i:]
| |
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""GAX wrapper for Logging API requests."""
import functools
from google.api.core import page_iterator
from google.cloud.gapic.logging.v2.config_service_v2_client import (
ConfigServiceV2Client)
from google.cloud.gapic.logging.v2.logging_service_v2_client import (
LoggingServiceV2Client)
from google.cloud.gapic.logging.v2.metrics_service_v2_client import (
MetricsServiceV2Client)
from google.gax import CallOptions
from google.gax import INITIAL_PAGE
from google.gax.errors import GaxError
from google.gax.grpc import exc_to_code
from google.cloud.proto.logging.v2.logging_config_pb2 import LogSink
from google.cloud.proto.logging.v2.logging_metrics_pb2 import LogMetric
from google.cloud.proto.logging.v2.log_entry_pb2 import LogEntry
from google.protobuf.json_format import MessageToDict
from google.protobuf.json_format import ParseDict
from grpc import StatusCode
from google.cloud._helpers import make_secure_channel
from google.cloud._http import DEFAULT_USER_AGENT
from google.cloud.exceptions import Conflict
from google.cloud.exceptions import NotFound
from google.cloud.logging import __version__
from google.cloud.logging._helpers import entry_from_resource
from google.cloud.logging.sink import Sink
from google.cloud.logging.metric import Metric
class _LoggingAPI(object):
"""Helper mapping logging-related APIs.
:type gax_api:
:class:`.logging_service_v2_client.LoggingServiceV2Client`
:param gax_api: API object used to make GAX requests.
:type client: :class:`~google.cloud.logging.client.Client`
:param client: The client that owns this API object.
"""
def __init__(self, gax_api, client):
self._gax_api = gax_api
self._client = client
def list_entries(self, projects, filter_='', order_by='',
page_size=0, page_token=None):
"""Return a page of log entry resources.
:type projects: list of strings
:param projects: project IDs to include. If not passed,
defaults to the project bound to the API's client.
:type filter_: str
:param filter_:
a filter expression. See
https://cloud.google.com/logging/docs/view/advanced_filters
:type order_by: str
:param order_by: One of :data:`~google.cloud.logging.ASCENDING`
or :data:`~google.cloud.logging.DESCENDING`.
:type page_size: int
:param page_size: maximum number of entries to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of entries. If not
passed, the API will return the first page of
entries.
:rtype: :class:`~google.api.core.page_iterator.Iterator`
:returns: Iterator of :class:`~google.cloud.logging.entries._BaseEntry`
accessible to the current API.
"""
if page_token is None:
page_token = INITIAL_PAGE
options = CallOptions(page_token=page_token)
page_iter = self._gax_api.list_log_entries(
[], project_ids=projects, filter_=filter_, order_by=order_by,
page_size=page_size, options=options)
# We attach a mutable loggers dictionary so that as Logger
# objects are created by entry_from_resource, they can be
# re-used by other log entries from the same logger.
loggers = {}
item_to_value = functools.partial(
_item_to_entry, loggers=loggers)
return page_iterator._GAXIterator(
self._client, page_iter, item_to_value)
def write_entries(self, entries, logger_name=None, resource=None,
labels=None):
"""API call: log an entry resource via a POST request
:type entries: sequence of mapping
:param entries: the log entry resources to log.
:type logger_name: str
:param logger_name: name of default logger to which to log the entries;
individual entries may override.
:type resource: mapping
:param resource: default resource to associate with entries;
individual entries may override.
:type labels: mapping
:param labels: default labels to associate with entries;
individual entries may override.
"""
options = None
partial_success = False
entry_pbs = [_log_entry_mapping_to_pb(entry) for entry in entries]
self._gax_api.write_log_entries(
entry_pbs, log_name=logger_name, resource=resource, labels=labels,
partial_success=partial_success, options=options)
def logger_delete(self, project, logger_name):
"""API call: delete all entries in a logger via a DELETE request
:type project: str
:param project: ID of project containing the log entries to delete
:type logger_name: str
:param logger_name: name of logger containing the log entries to delete
"""
options = None
path = 'projects/%s/logs/%s' % (project, logger_name)
try:
self._gax_api.delete_log(path, options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(path)
raise
class _SinksAPI(object):
"""Helper mapping sink-related APIs.
:type gax_api:
:class:`.config_service_v2_client.ConfigServiceV2Client`
:param gax_api: API object used to make GAX requests.
:type client: :class:`~google.cloud.logging.client.Client`
:param client: The client that owns this API object.
"""
def __init__(self, gax_api, client):
self._gax_api = gax_api
self._client = client
def list_sinks(self, project, page_size=0, page_token=None):
"""List sinks for the project associated with this client.
:type project: str
:param project: ID of the project whose sinks are to be listed.
:type page_size: int
:param page_size: maximum number of sinks to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of sinks. If not
passed, the API will return the first page of
sinks.
:rtype: tuple, (list, str)
:returns: list of mappings, plus a "next page token" string:
if not None, indicates that more sinks can be retrieved
with another call (pass that value as ``page_token``).
"""
if page_token is None:
page_token = INITIAL_PAGE
options = CallOptions(page_token=page_token)
path = 'projects/%s' % (project,)
page_iter = self._gax_api.list_sinks(path, page_size=page_size,
options=options)
return page_iterator._GAXIterator(
self._client, page_iter, _item_to_sink)
def sink_create(self, project, sink_name, filter_, destination):
"""API call: create a sink resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.sinks/create
:type project: str
:param project: ID of the project in which to create the sink.
:type sink_name: str
:param sink_name: the name of the sink
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the sink.
:type destination: str
:param destination: destination URI for the entries exported by
the sink.
"""
options = None
parent = 'projects/%s' % (project,)
sink_pb = LogSink(name=sink_name, filter=filter_,
destination=destination)
try:
self._gax_api.create_sink(parent, sink_pb, options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.FAILED_PRECONDITION:
path = 'projects/%s/sinks/%s' % (project, sink_name)
raise Conflict(path)
raise
def sink_get(self, project, sink_name):
"""API call: retrieve a sink resource.
:type project: str
:param project: ID of the project containing the sink.
:type sink_name: str
:param sink_name: the name of the sink
:rtype: dict
:returns: The sink object returned from the API (converted from a
protobuf to a dictionary).
"""
options = None
path = 'projects/%s/sinks/%s' % (project, sink_name)
try:
sink_pb = self._gax_api.get_sink(path, options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(path)
raise
# NOTE: LogSink message type does not have an ``Any`` field
# so `MessageToDict`` can safely be used.
return MessageToDict(sink_pb)
def sink_update(self, project, sink_name, filter_, destination):
"""API call: update a sink resource.
:type project: str
:param project: ID of the project containing the sink.
:type sink_name: str
:param sink_name: the name of the sink
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the sink.
:type destination: str
:param destination: destination URI for the entries exported by
the sink.
:rtype: dict
:returns: The sink object returned from the API (converted from a
protobuf to a dictionary).
"""
options = None
path = 'projects/%s/sinks/%s' % (project, sink_name)
sink_pb = LogSink(name=path, filter=filter_, destination=destination)
try:
sink_pb = self._gax_api.update_sink(path, sink_pb, options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(path)
raise
# NOTE: LogSink message type does not have an ``Any`` field
# so `MessageToDict`` can safely be used.
return MessageToDict(sink_pb)
def sink_delete(self, project, sink_name):
"""API call: delete a sink resource.
:type project: str
:param project: ID of the project containing the sink.
:type sink_name: str
:param sink_name: the name of the sink
"""
options = None
path = 'projects/%s/sinks/%s' % (project, sink_name)
try:
self._gax_api.delete_sink(path, options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(path)
raise
class _MetricsAPI(object):
"""Helper mapping sink-related APIs.
:type gax_api:
:class:`.metrics_service_v2_client.MetricsServiceV2Client`
:param gax_api: API object used to make GAX requests.
:type client: :class:`~google.cloud.logging.client.Client`
:param client: The client that owns this API object.
"""
def __init__(self, gax_api, client):
self._gax_api = gax_api
self._client = client
def list_metrics(self, project, page_size=0, page_token=None):
"""List metrics for the project associated with this client.
:type project: str
:param project: ID of the project whose metrics are to be listed.
:type page_size: int
:param page_size: maximum number of metrics to return, If not passed,
defaults to a value set by the API.
:type page_token: str
:param page_token: opaque marker for the next "page" of metrics. If not
passed, the API will return the first page of
metrics.
:rtype: :class:`~google.api.core.page_iterator.Iterator`
:returns: Iterator of
:class:`~google.cloud.logging.metric.Metric`
accessible to the current API.
"""
if page_token is None:
page_token = INITIAL_PAGE
options = CallOptions(page_token=page_token)
path = 'projects/%s' % (project,)
page_iter = self._gax_api.list_log_metrics(
path, page_size=page_size, options=options)
return page_iterator._GAXIterator(
self._client, page_iter, _item_to_metric)
def metric_create(self, project, metric_name, filter_, description):
"""API call: create a metric resource.
See
https://cloud.google.com/logging/docs/reference/v2/rest/v2/projects.metrics/create
:type project: str
:param project: ID of the project in which to create the metric.
:type metric_name: str
:param metric_name: the name of the metric
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the metric.
:type description: str
:param description: description of the metric.
"""
options = None
parent = 'projects/%s' % (project,)
metric_pb = LogMetric(name=metric_name, filter=filter_,
description=description)
try:
self._gax_api.create_log_metric(parent, metric_pb, options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.FAILED_PRECONDITION:
path = 'projects/%s/metrics/%s' % (project, metric_name)
raise Conflict(path)
raise
def metric_get(self, project, metric_name):
"""API call: retrieve a metric resource.
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric
:rtype: dict
:returns: The metric object returned from the API (converted from a
protobuf to a dictionary).
"""
options = None
path = 'projects/%s/metrics/%s' % (project, metric_name)
try:
metric_pb = self._gax_api.get_log_metric(path, options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(path)
raise
# NOTE: LogMetric message type does not have an ``Any`` field
# so `MessageToDict`` can safely be used.
return MessageToDict(metric_pb)
def metric_update(self, project, metric_name, filter_, description):
"""API call: update a metric resource.
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric
:type filter_: str
:param filter_: the advanced logs filter expression defining the
entries exported by the metric.
:type description: str
:param description: description of the metric.
:rtype: dict
:returns: The metric object returned from the API (converted from a
protobuf to a dictionary).
"""
options = None
path = 'projects/%s/metrics/%s' % (project, metric_name)
metric_pb = LogMetric(name=path, filter=filter_,
description=description)
try:
metric_pb = self._gax_api.update_log_metric(
path, metric_pb, options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(path)
raise
# NOTE: LogMetric message type does not have an ``Any`` field
# so `MessageToDict`` can safely be used.
return MessageToDict(metric_pb)
def metric_delete(self, project, metric_name):
"""API call: delete a metric resource.
:type project: str
:param project: ID of the project containing the metric.
:type metric_name: str
:param metric_name: the name of the metric
"""
options = None
path = 'projects/%s/metrics/%s' % (project, metric_name)
try:
self._gax_api.delete_log_metric(path, options=options)
except GaxError as exc:
if exc_to_code(exc.cause) == StatusCode.NOT_FOUND:
raise NotFound(path)
raise
def _parse_log_entry(entry_pb):
"""Special helper to parse ``LogEntry`` protobuf into a dictionary.
The ``proto_payload`` field in ``LogEntry`` is of type ``Any``. This
can be problematic if the type URL in the payload isn't in the
``google.protobuf`` registry. To help with parsing unregistered types,
this function will remove ``proto_payload`` before parsing.
:type entry_pb: :class:`.log_entry_pb2.LogEntry`
:param entry_pb: Log entry protobuf.
:rtype: dict
:returns: The parsed log entry. The ``protoPayload`` key may contain
the raw ``Any`` protobuf from ``entry_pb.proto_payload`` if
it could not be parsed.
"""
try:
return MessageToDict(entry_pb)
except TypeError:
if entry_pb.HasField('proto_payload'):
proto_payload = entry_pb.proto_payload
entry_pb.ClearField('proto_payload')
entry_mapping = MessageToDict(entry_pb)
entry_mapping['protoPayload'] = proto_payload
return entry_mapping
else:
raise
def _log_entry_mapping_to_pb(mapping):
"""Helper for :meth:`write_entries`, et aliae
Performs "impedance matching" between the protobuf attrs and
the keys expected in the JSON API.
"""
entry_pb = LogEntry()
# NOTE: We assume ``mapping`` was created in ``Batch.commit``
# or ``Logger._make_entry_resource``. In either case, if
# the ``protoPayload`` key is present, we assume that the
# type URL is registered with ``google.protobuf`` and will
# not cause any issues in the JSON->protobuf conversion
# of the corresponding ``proto_payload`` in the log entry
# (it is an ``Any`` field).
ParseDict(mapping, entry_pb)
return entry_pb
def _item_to_entry(iterator, entry_pb, loggers):
"""Convert a log entry protobuf to the native object.
.. note::
This method does not have the correct signature to be used as
the ``item_to_value`` argument to
:class:`~google.api.core.page_iterator.Iterator`. It is intended to be
patched with a mutable ``loggers`` argument that can be updated
on subsequent calls. For an example, see how the method is
used above in :meth:`_LoggingAPI.list_entries`.
:type iterator: :class:`~google.api.core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type entry_pb: :class:`.log_entry_pb2.LogEntry`
:param entry_pb: Log entry protobuf returned from the API.
:type loggers: dict
:param loggers:
A mapping of logger fullnames -> loggers. If the logger
that owns the entry is not in ``loggers``, the entry
will have a newly-created logger.
:rtype: :class:`~google.cloud.logging.entries._BaseEntry`
:returns: The next log entry in the page.
"""
resource = _parse_log_entry(entry_pb)
return entry_from_resource(resource, iterator.client, loggers)
def _item_to_sink(iterator, log_sink_pb):
"""Convert a sink protobuf to the native object.
:type iterator: :class:`~google.api.core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type log_sink_pb:
:class:`.logging_config_pb2.LogSink`
:param log_sink_pb: Sink protobuf returned from the API.
:rtype: :class:`~google.cloud.logging.sink.Sink`
:returns: The next sink in the page.
"""
# NOTE: LogSink message type does not have an ``Any`` field
# so `MessageToDict`` can safely be used.
resource = MessageToDict(log_sink_pb)
return Sink.from_api_repr(resource, iterator.client)
def _item_to_metric(iterator, log_metric_pb):
"""Convert a metric protobuf to the native object.
:type iterator: :class:`~google.api.core.page_iterator.Iterator`
:param iterator: The iterator that is currently in use.
:type log_metric_pb:
:class:`.logging_metrics_pb2.LogMetric`
:param log_metric_pb: Metric protobuf returned from the API.
:rtype: :class:`~google.cloud.logging.metric.Metric`
:returns: The next metric in the page.
"""
# NOTE: LogMetric message type does not have an ``Any`` field
# so `MessageToDict`` can safely be used.
resource = MessageToDict(log_metric_pb)
return Metric.from_api_repr(resource, iterator.client)
def make_gax_logging_api(client):
"""Create an instance of the GAX Logging API.
:type client: :class:`~google.cloud.logging.client.Client`
:param client: The client that holds configuration details.
:rtype: :class:`_LoggingAPI`
:returns: A metrics API instance with the proper credentials.
"""
channel = make_secure_channel(
client._credentials, DEFAULT_USER_AGENT,
LoggingServiceV2Client.SERVICE_ADDRESS)
generated = LoggingServiceV2Client(
channel=channel, lib_name='gccl', lib_version=__version__)
return _LoggingAPI(generated, client)
def make_gax_metrics_api(client):
"""Create an instance of the GAX Metrics API.
:type client: :class:`~google.cloud.logging.client.Client`
:param client: The client that holds configuration details.
:rtype: :class:`_MetricsAPI`
:returns: A metrics API instance with the proper credentials.
"""
channel = make_secure_channel(
client._credentials, DEFAULT_USER_AGENT,
MetricsServiceV2Client.SERVICE_ADDRESS)
generated = MetricsServiceV2Client(
channel=channel, lib_name='gccl', lib_version=__version__)
return _MetricsAPI(generated, client)
def make_gax_sinks_api(client):
"""Create an instance of the GAX Sinks API.
:type client: :class:`~google.cloud.logging.client.Client`
:param client: The client that holds configuration details.
:rtype: :class:`_SinksAPI`
:returns: A metrics API instance with the proper credentials.
"""
channel = make_secure_channel(
client._credentials, DEFAULT_USER_AGENT,
ConfigServiceV2Client.SERVICE_ADDRESS)
generated = ConfigServiceV2Client(
channel=channel, lib_name='gccl', lib_version=__version__)
return _SinksAPI(generated, client)
| |
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Functions for interacting with llvm-profdata"""
import logging
import multiprocessing
import os
import re
import shutil
import subprocess
_DIR_SOURCE_ROOT = os.path.normpath(
os.path.join(os.path.dirname(__file__), '..', '..', '..'))
_JAVA_PATH = os.path.join(_DIR_SOURCE_ROOT, 'third_party', 'jdk', 'current',
'bin', 'java')
logging.basicConfig(
format='[%(asctime)s %(levelname)s] %(message)s', level=logging.DEBUG)
def _call_profdata_tool(profile_input_file_paths,
profile_output_file_path,
profdata_tool_path,
sparse=False):
"""Calls the llvm-profdata tool.
Args:
profile_input_file_paths: A list of relative paths to the files that
are to be merged.
profile_output_file_path: The path to the merged file to write.
profdata_tool_path: The path to the llvm-profdata executable.
sparse (bool): flag to indicate whether to run llvm-profdata with --sparse.
Doc: https://llvm.org/docs/CommandGuide/llvm-profdata.html#profdata-merge
Returns:
A list of paths to profiles that had to be excluded to get the merge to
succeed, suspected of being corrupted or malformed.
Raises:
CalledProcessError: An error occurred merging profiles.
"""
try:
subprocess_cmd = [
profdata_tool_path, 'merge', '-o', profile_output_file_path,
]
if sparse:
subprocess_cmd += ['-sparse=true',]
subprocess_cmd.extend(profile_input_file_paths)
logging.info('profdata command: %r', subprocess_cmd)
# Redirecting stderr is required because when error happens, llvm-profdata
# writes the error output to stderr and our error handling logic relies on
# that output.
subprocess.check_call(subprocess_cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as error:
logging.error('Failed to merge profiles, return code (%d), output: %r' %
(error.returncode, error.output))
raise error
logging.info('Profile data is created as: "%r".', profile_output_file_path)
return []
def _get_profile_paths(input_dir,
input_extension,
input_filename_pattern='.*'):
"""Finds all the profiles in the given directory (recursively)."""
paths = []
for dir_path, _sub_dirs, file_names in os.walk(input_dir):
paths.extend([
os.path.join(dir_path, fn)
for fn in file_names
if fn.endswith(input_extension) and re.search(input_filename_pattern,fn)
])
return paths
def _validate_and_convert_profraws(profraw_files,
profdata_tool_path,
sparse=False):
"""Validates and converts profraws to profdatas.
For each given .profraw file in the input, this method first validates it by
trying to convert it to an indexed .profdata file, and if the validation and
conversion succeeds, the generated .profdata file will be included in the
output, otherwise, won't.
This method is mainly used to filter out invalid profraw files.
Args:
profraw_files: A list of .profraw paths.
profdata_tool_path: The path to the llvm-profdata executable.
sparse (bool): flag to indicate whether to run llvm-profdata with --sparse.
Doc: https://llvm.org/docs/CommandGuide/llvm-profdata.html#profdata-merge
Returns:
A tulple:
A list of converted .profdata files of *valid* profraw files.
A list of *invalid* profraw files.
A list of profraw files that have counter overflows.
"""
for profraw_file in profraw_files:
if not profraw_file.endswith('.profraw'):
raise RuntimeError('%r is expected to be a .profraw file.' % profraw_file)
cpu_count = multiprocessing.cpu_count()
counts = max(10, cpu_count - 5) # Use 10+ processes, but leave 5 cpu cores.
pool = multiprocessing.Pool(counts)
output_profdata_files = multiprocessing.Manager().list()
invalid_profraw_files = multiprocessing.Manager().list()
counter_overflows = multiprocessing.Manager().list()
for profraw_file in profraw_files:
pool.apply_async(
_validate_and_convert_profraw,
(profraw_file, output_profdata_files, invalid_profraw_files,
counter_overflows, profdata_tool_path, sparse))
pool.close()
pool.join()
# Remove inputs, as they won't be needed and they can be pretty large.
for input_file in profraw_files:
os.remove(input_file)
return list(output_profdata_files), list(invalid_profraw_files), list(
counter_overflows)
def _validate_and_convert_profraw(profraw_file, output_profdata_files,
invalid_profraw_files, counter_overflows,
profdata_tool_path, sparse=False):
output_profdata_file = profraw_file.replace('.profraw', '.profdata')
subprocess_cmd = [
profdata_tool_path,
'merge',
'-o',
output_profdata_file,
]
if sparse:
subprocess_cmd.append('--sparse')
subprocess_cmd.append(profraw_file)
logging.info('profdata command: %r', subprocess_cmd)
profile_valid = False
counter_overflow = False
validation_output = None
# 1. Determine if the profile is valid.
try:
# Redirecting stderr is required because when error happens, llvm-profdata
# writes the error output to stderr and our error handling logic relies on
# that output.
validation_output = subprocess.check_output(
subprocess_cmd, stderr=subprocess.STDOUT)
if 'Counter overflow' in validation_output:
counter_overflow = True
else:
profile_valid = True
except subprocess.CalledProcessError as error:
logging.warning('Validating and converting %r to %r failed with output: %r',
profraw_file, output_profdata_file, error.output)
validation_output = error.output
# 2. Add the profile to the appropriate list(s).
if profile_valid:
output_profdata_files.append(output_profdata_file)
else:
invalid_profraw_files.append(profraw_file)
if counter_overflow:
counter_overflows.append(profraw_file)
# 3. Log appropriate message
if not profile_valid:
template = 'Bad profile: %r, output: %r'
if counter_overflow:
template = 'Counter overflow: %r, output: %r'
logging.warning(template, profraw_file, validation_output)
# 4. Delete profdata for invalid profiles if present.
if os.path.exists(output_profdata_file):
# The output file may be created before llvm-profdata determines the
# input is invalid. Delete it so that it does not leak and affect other
# merge scripts.
os.remove(output_profdata_file)
def merge_java_exec_files(input_dir, output_path, jacococli_path):
"""Merges generated .exec files to output_path.
Args:
input_dir (str): The path to traverse to find input files.
output_path (str): Where to write the merged .exec file.
jacococli_path: The path to jacococli.jar.
Raises:
CalledProcessError: merge command failed.
"""
exec_input_file_paths = _get_profile_paths(input_dir, '.exec')
if not exec_input_file_paths:
logging.info('No exec file found under %s', input_dir)
return
cmd = [_JAVA_PATH, '-jar', jacococli_path, 'merge']
cmd.extend(exec_input_file_paths)
cmd.extend(['--destfile', output_path])
subprocess.check_call(cmd, stderr=subprocess.STDOUT)
def merge_profiles(input_dir,
output_file,
input_extension,
profdata_tool_path,
input_filename_pattern='.*',
sparse=False,
skip_validation=False):
"""Merges the profiles produced by the shards using llvm-profdata.
Args:
input_dir (str): The path to traverse to find input profiles.
output_file (str): Where to write the merged profile.
input_extension (str): File extension to look for in the input_dir.
e.g. '.profdata' or '.profraw'
profdata_tool_path: The path to the llvm-profdata executable.
input_filename_pattern (str): The regex pattern of input filename. Should be
a valid regex pattern if present.
sparse (bool): flag to indicate whether to run llvm-profdata with --sparse.
Doc: https://llvm.org/docs/CommandGuide/llvm-profdata.html#profdata-merge
skip_validation (bool): flag to skip the _validate_and_convert_profraws
invocation. only applicable when input_extension is .profraw.
Returns:
The list of profiles that had to be excluded to get the merge to
succeed and a list of profiles that had a counter overflow.
"""
profile_input_file_paths = _get_profile_paths(input_dir,
input_extension,
input_filename_pattern)
invalid_profraw_files = []
counter_overflows = []
if skip_validation:
logging.warning('--skip-validation has been enabled. Skipping conversion '
'to ensure that profiles are valid.')
if input_extension == '.profraw' and not skip_validation:
profile_input_file_paths, invalid_profraw_files, counter_overflows = (
_validate_and_convert_profraws(profile_input_file_paths,
profdata_tool_path,
sparse=sparse))
logging.info((
'List of invalid .profraw files that failed to validate and convert: %r'
), invalid_profraw_files)
if counter_overflows:
logging.warning('There were %d profiles with counter overflows',
len(counter_overflows))
# The list of input files could be empty in the following scenarios:
# 1. The test target is pure Python scripts test which doesn't execute any
# C/C++ binaries, such as devtools_type_check.
# 2. The test target executes binary and does dumps coverage profile data
# files, however, all of them turned out to be invalid.
if not profile_input_file_paths:
logging.info('There is no valid profraw/profdata files to merge, skip '
'invoking profdata tools.')
return invalid_profraw_files, counter_overflows
invalid_profdata_files = _call_profdata_tool(
profile_input_file_paths=profile_input_file_paths,
profile_output_file_path=output_file,
profdata_tool_path=profdata_tool_path,
sparse=sparse)
# Remove inputs when merging profraws as they won't be needed and they can be
# pretty large. If the inputs are profdata files, do not remove them as they
# might be used again for multiple test types coverage.
if input_extension == '.profraw':
for input_file in profile_input_file_paths:
os.remove(input_file)
return invalid_profraw_files + invalid_profdata_files, counter_overflows
# We want to retry shards that contain one or more profiles that cannot be
# merged (typically due to corruption described in crbug.com/937521).
def get_shards_to_retry(bad_profiles):
bad_shard_ids = set()
def is_task_id(s):
# Swarming task ids are 16 hex chars. The pythonic way to validate this is
# to cast to int and catch a value error.
try:
assert len(s) == 16, 'Swarming task IDs are expected be of length 16'
_int_id = int(s, 16)
return True
except (AssertionError, ValueError):
return False
for profile in bad_profiles:
# E.g. /b/s/w/ir/tmp/t/tmpSvBRii/44b643576cf39f10/profraw/default-1.profraw
_base_path, task_id, _profraw, _filename = os.path.normpath(profile).rsplit(
os.path.sep, 3)
# Since we are getting a task_id from a file path, which is less than ideal,
# do some checking to at least verify that the snippet looks like a valid
# task id.
assert is_task_id(task_id)
bad_shard_ids.add(task_id)
return bad_shard_ids
| |
# encoding: utf-8
__author__ = "Nils Tobias Schmidt"
__email__ = "schmidt89 at informatik.uni-marburg.de"
from androguard.decompiler.dad import decompile
import re
from androlyze.log.Log import log
############################################################
#---Checks
############################################################
def check_own_method_implementation(apk, encoded_method):
''' Check if the `encoded_method` is implemented in an own package (no third party) '''
# package name separated with "."
apk_pn = apk.package_name.lower()
method_pn = convert_dalvik_pn_to_java_pn(encoded_method.get_class_name().lower())
# package names equal ??
if method_pn.find(apk_pn) != -1:
return True
def check_method_contains_string(enoded_method, regexp, lowercase = True, all_findings = False):
'''
Check if the `encoded_method` contains the `string` in the instruction output.
Parameters
----------
enoded_method: androguard.core.bytecodes.dvm.EncodedMethod
regexp: str
lowercase: boolean, optional (default is True)
Convert the string on which shall be matched beforehand to lowercase
all_findings : bool, optional (default is False)
If true, return a list of all match objects
Returns
-------
re match object
If not `all_findings`.
list<re match object>
Else
'''
res = []
for instr in enoded_method.get_instructions():
match_on = instr.get_output()
if lowercase:
match_on = match_on.lower()
match_object = re.search(regexp, match_on)
if match_object:
if not all_findings:
return match_object
else:
res.append(match_object)
return res
def check_instructions_one(instructions, func):
'''
Check if at least one instruction matches with `func`.
Parameters
----------
instructions: iterable<androguard.core.bytecodes.dvm.Instruction>
func: Instruction -> Bool
Returns
-------
bool
'''
for instruction in instructions:
if func(instruction):
return True
############################################################
#---Decompilation
############################################################
def decompile_pathp(pathp, dalvik_vm_format, vm_analysis, caller = True, show_class = True):
'''
Decompile either the caller or callee
Parameters
----------
pathp: androguard.androguard.core.analysis.analysis.PathP
Edge in method call graph.
dalvik_vm_format: DalvikVMFormat
Parsed .dex file.
vm_analysis: uVMAnalysis
Dex analyzer.
caller: bool, optional (default is True)
Use the src of the `pathp`, hence decompile the caller.
Otherwise the dst is used.
show_class : bool, optional (default is True)
Include the package name in the decompilation.
Returns
-------
str
The decompiled method
None
N/A
Example
-------
>>> decompile_pathp(...)
protected varargs String doInBackground(Void[] p14)
{
org.apache.http.client.methods.HttpGet v6_1 = new org.apache.http.client.methods.HttpGet("http://10.10.0.134:8080/index.html");
v6_1.addHeader("Authorization", new StringBuilder().append("Basic ").append(android.util.Base64.encodeToString(this.CREDENTIALS.getBytes(), 2)).toString());
try {
java.io.InputStream v9 = new org.apache.http.impl.client.DefaultHttpClient().execute(v6_1).getEntity().getContent();
int v7 = de.uni_marburg.ipcinetcallee.InetActivity.inputStream2String(v9);
v9.close();
} catch (org.apache.http.client.ClientProtocolException v2) {
android.util.Log.e("HTTPGetTask", "msg", v2);
v7 = 0;
} catch (java.io.IOException v5) {
android.util.Log.e("HTTPGetTask", "msg", v5);
}
return v7;
}
'''
idx = pathp.src_idx if caller else pathp.dst_idx
encoded_method = dalvik_vm_format.get_method_by_idx(idx)
if encoded_method is not None:
method_analysis = vm_analysis.get_method(encoded_method)
res = ""
if show_class:
res += "class %s\n" % encoded_method.get_class_name()
res += decompile_method_analysis(method_analysis)
return res
# TODO: ADD MORE DECOMPILERS!
def decompile_method_analysis(method_analysis):
'''
Decompile the `method_analysis` object
Parameters
----------
method_analysis: androguard.androguard.core.analysis.analysis.MethodAnalysis
Returns
-------
str
The decompiled method
Example
-------
>>> decompile_method_analysis(...)
protected varargs String doInBackground(Void[] p14)
{
org.apache.http.client.methods.HttpGet v6_1 = new org.apache.http.client.methods.HttpGet("http://10.10.0.134:8080/index.html");
v6_1.addHeader("Authorization", new StringBuilder().append("Basic ").append(android.util.Base64.encodeToString(this.CREDENTIALS.getBytes(), 2)).toString());
try {
java.io.InputStream v9 = new org.apache.http.impl.client.DefaultHttpClient().execute(v6_1).getEntity().getContent();
int v7 = de.uni_marburg.ipcinetcallee.InetActivity.inputStream2String(v9);
v9.close();
} catch (org.apache.http.client.ClientProtocolException v2) {
android.util.Log.e("HTTPGetTask", "msg", v2);
v7 = 0;
} catch (java.io.IOException v5) {
android.util.Log.e("HTTPGetTask", "msg", v5);
}
return v7;
}
'''
dv_method = decompile.DvMethod(method_analysis)
dv_method.process()
return dv_method.get_source()
############################################################
#---Disassembly
############################################################
def disassemble_encoded_method(encoded_method):
'''
Create the disassemble of the `encoded_method`
Parameters
----------
encoded_method : androguard.androguard.core.bytecodes.dvm.EncodedMethod
Returns
-------
str
The disassembled method
Example
-------
>>> disassemble_encoded_method(...)
Lde/uni_marburg/ipcinetcallee/InetActivity$HTTPGetTask; doInBackground ([Ljava/lang/Void;)Ljava/lang/String;
0 new-instance v6, Lorg/apache/http/client/methods/HttpGet;
4 const-string v10, 'http://10.10.0.134:8080/index.html'
8 invoke-direct v6, v10, Lorg/apache/http/client/methods/HttpGet;-><init>(Ljava/lang/String;)V
e iget-object v10, v13, Lde/uni_marburg/ipcinetcallee/InetActivity$HTTPGetTask;->CREDENTIALS Ljava/lang/String;
12 invoke-virtual v10, Ljava/lang/String;->getBytes()[B
18 move-result-object v10
1a const/4 v11, 2
1c invoke-static v10, v11, Landroid/util/Base64;->encodeToString([B I)Ljava/lang/String;
22 move-result-object v0
24 const-string v10, 'Authorization'
28 new-instance v11, Ljava/lang/StringBuilder;
2c invoke-direct v11, Ljava/lang/StringBuilder;-><init>()V
32 const-string v12, 'Basic '
36 invoke-virtual v11, v12, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
3c move-result-object v11
3e invoke-virtual v11, v0, Ljava/lang/StringBuilder;->append(Ljava/lang/String;)Ljava/lang/StringBuilder;
44 move-result-object v11
46 invoke-virtual v11, Ljava/lang/StringBuilder;->toString()Ljava/lang/String;
4c move-result-object v11
4e invoke-interface v6, v10, v11, Lorg/apache/http/client/methods/HttpUriRequest;->addHeader(Ljava/lang/String; Ljava/lang/String;)V
54 new-instance v4, Lorg/apache/http/impl/client/DefaultHttpClient;
58 invoke-direct v4, Lorg/apache/http/impl/client/DefaultHttpClient;-><init>()V
5e const-string v3, ''
62 const/16 v10, 8192
66 new-array v1, v10, [B
6a invoke-interface v4, v6, Lorg/apache/http/client/HttpClient;->execute(Lorg/apache/http/client/methods/HttpUriRequest;)Lorg/apache/http/HttpResponse;
70 move-result-object v8
72 invoke-interface v8, Lorg/apache/http/HttpResponse;->getEntity()Lorg/apache/http/HttpEntity;
78 move-result-object v10
7a invoke-interface v10, Lorg/apache/http/HttpEntity;->getContent()Ljava/io/InputStream;
80 move-result-object v9
82 invoke-static v9, Lde/uni_marburg/ipcinetcallee/InetActivity;->inputStream2String(Ljava/io/InputStream;)Ljava/lang/String;
88 move-result-object v7
8a invoke-virtual v9, Ljava/io/InputStream;->close()V
90 return-object v7
92 move-exception v2
94 const-string v10, 'HTTPGetTask'
98 const-string v11, 'msg'
9c invoke-static v10, v11, v2, Landroid/util/Log;->e(Ljava/lang/String; Ljava/lang/String; Ljava/lang/Throwable;)I
a2 const/4 v7, 0
a4 goto -a
a6 move-exception v5
a8 const-string v10, 'HTTPGetTask'
ac const-string v11, 'msg'
b0 invoke-static v10, v11, v5, Landroid/util/Log;->e(Ljava/lang/String; Ljava/lang/String; Ljava/lang/Throwable;)I
b6 goto -a
'''
# add method signature
disassembly = fmt_encoded_method(encoded_method) + "\n"
idx = 0
for i in encoded_method.get_instructions():
disassembly += "%x %s %s\n" % (idx, i.get_name(), i.get_output())
idx += i.get_length()
return disassembly
def disassemble_pathp(pathp, dalvik_vm_format, caller = True):
'''
Disassemble either the caller or callee.
Parameters
----------
pathp: androguard.androguard.core.analysis.analysis.PathP
Edge in method call graph.
dalvik_vm_format: DalvikVMFormat
Parsed .dex file.
caller: bool, optional (default is True)
Use the src of the `pathp`, hence disassemble the caller.
Otherwise the dst is used.
Returns
-------
str
The disassembled method.
None
N/A
See :py:method:`.disassemble_encoded_method`
'''
idx = pathp.src_idx if caller else pathp.dst_idx
encoded_method = dalvik_vm_format.get_method_by_idx(idx)
if encoded_method is not None:
return disassemble_encoded_method(encoded_method)
############################################################
#---Filtering
############################################################
def filter_own_implementations(apk, dalvik_vm_format, pathp_list):
'''
Filter the `PathP` objects which are inside the apk package
Parameters
----------
apk: Apk
The apk representation
dalvik_vm_format: DalvikVMFormat
Parsed .dex file.
pathp_list: list<androguard.androguard.core.analysis.analysis.PathP>
Returns
-------
list<androguard.androguard.core.analysis.analysis.PathP>
'''
pathp_list_check = []
for pathp in pathp_list:
encoded_method = dalvik_vm_format.get_method_by_idx(pathp.src_idx)
# package name separated with "."
apk_pn = apk.package_name.lower()
method_pn = convert_dalvik_pn_to_java_pn(encoded_method.get_class_name().lower())
# package names equal ??
if method_pn.find(apk_pn) != -1:
pathp_list_check.append(pathp)
return pathp_list_check
############################################################
#---Abstract Syntax Tree (AST)
############################################################
def ast_for_pathp(pathp, dalvik_vm_format, vm_analysis, caller = True):
'''
Disassemble either the caller or callee.
Parameters
----------
pathp: androguard.androguard.core.analysis.analysis.PathP
Edge in method call graph.
dalvik_vm_format: DalvikVMFormat
Parsed .dex file.
vm_analysis: VMAnalysis
Dex analyzer.
caller: bool, optional (default is True)
Use the src of the `pathp`, hence disassemble the caller.
Otherwise the dst is used.
Returns
-------
str
The disassembled method.
None
N/A
'''
idx = pathp.src_idx if caller else pathp.dst_idx
encoded_method = dalvik_vm_format.get_method_by_idx(idx)
if encoded_method is not None:
return ast_for_method_analysis(vm_analysis.get_method(encoded_method))
def ast_for_method_analysis(method_analysis):
'''
Create the abstract syntax tree.
Parameters
----------
method_analysis: androguard.androguard.core.analysis.analysis.MethodAnalysis
Returns
-------
dict
The abstract syntax tree of the `method_analysis`.
'''
dv_method = decompile.DvMethod(method_analysis)
dv_method.process(doAST = True)
return dv_method.ast
def ast_get_containing_collection(iterable, pattern):
'''
Check the structure recursive for matches with the regex `pattern` and return the collection that contains the match.
Parameters
----------
iterable: iterable
pattern: str
Regex
Returns
-------
iterable
'''
def ast_get_containing_collection_inner(iterable, pattern, containing_collection, findings):
def do_check(on, containing_collection):
if isinstance(on, (str, unicode)):
mo = re.search(pattern, on)
if mo:
findings.append(containing_collection)
return mo
# dict
if isinstance(iterable, dict):
# recursively check all keys and values
for k, v in iterable.items():
# do check (method has side-effect!)
do_check(k, iterable)
# recursively check value too
ast_get_containing_collection_inner(v, pattern, iterable, findings)
# lists, sets
elif isinstance(iterable, (tuple, list, set)):
for it in iterable:
ast_get_containing_collection_inner(it, pattern, iterable, findings)
# do_check on value
else:
do_check(iterable, containing_collection)
return findings
return ast_get_containing_collection_inner(iterable, pattern, iterable, [])
############################################################
#---Formatting
############################################################
def fmt_encoded_method(encoded_method):
'''
Return a string represenation of `encoded_method`.
Parameters
----------
encoded_method : androguard.androguard.core.bytecodes.dvm.EncodedMethod
Returns
-------
str
Example
-------
>>> fmt_encoded_method(...)
Lde/uni_marburg/ipcinetcallee/InetActivity$HTTPGetTask; doInBackground ([Ljava/lang/Void;)Ljava/lang/String;
'''
return "%s %s %s" % (encoded_method.get_class_name(), encoded_method.get_name(), encoded_method.get_descriptor())
############################################################
#---Converting
############################################################
def convert_dalvik_pn_to_java_pn(dalvik_pn, ignore_inner_class = True):
''' Convert e.g. "Lde/uni_marburg/ipcinetcallee/InetActivity$HTTPGetTask;" to "de.uni_marburg.ipcinetcall.InetActivity"
Parameters
----------
ignore_inner_class : bool, optional (default is True)
Strip inner class names like $HTTPGetTask
Returns
-------
str
Example
-------
>>> print convert_dalvik_pn_to_java_pn('Lde/uni_marburg/ipcinetcallee/InetActivity$HTTPGetTask;', True)
"de.uni_marburg.ipcinetcallee.InetActivity"
>>> print convert_dalvik_pn_to_java_pn('Lde/uni_marburg/ipcinetcallee/InetActivity$HTTPGetTask;', False)
"de.uni_marburg.ipcinetcallee.InetActivity.HTTPGetTask"
'''
package_name = dalvik_pn[1:]
package_name = package_name.replace("/", ".")
package_name = package_name[:-1]
if not ignore_inner_class:
package_name = re.sub("\$d*", ".", package_name)
else:
dollar_idx = package_name.find("$")
if dollar_idx != -1:
package_name = package_name[:dollar_idx]
return package_name
def convert_java_pn_to_dalvik(java_pn):
''' Convert e.g. "de.uni_marburg.ipcinetcall.InetActivity" to "Lde/uni_marburg/ipcinetcallee/InetActivity;"
Parameters
----------
java_pn: str
Package name separated with "."
Returns
-------
str
'''
java_pn = java_pn.replace(".", "/")
return 'L%s;' % java_pn
if __name__ == '__main__':
print convert_dalvik_pn_to_java_pn('Lde/uni_marburg/ipcinetcallee/InetActivity$HTTPGetTask;', True)
print convert_dalvik_pn_to_java_pn('Lde/uni_marburg/ipcinetcallee/InetActivity$HTTPGetTask;', False)
print convert_java_pn_to_dalvik('de.uni_marburg.ipcinetcallee.InetActivity')
| |
# Copyright (c) 2015 Hitachi Data Systems.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Helper class for Data Service operations."""
import os
from oslo_config import cfg
from oslo_log import log
from manila.common import constants
from manila import exception
from manila.i18n import _, _LW
from manila.share import rpcapi as share_rpc
from manila import utils
LOG = log.getLogger(__name__)
data_helper_opts = [
cfg.IntOpt(
'data_access_wait_access_rules_timeout',
default=180,
help="Time to wait for access rules to be allowed/denied on backends "
"when migrating a share (seconds)."),
cfg.StrOpt(
'data_node_access_ip',
help="The IP of the node interface connected to the admin network. "
"Used for allowing access to the mounting shares."),
cfg.StrOpt(
'data_node_access_cert',
help="The certificate installed in the data node in order to "
"allow access to certificate authentication-based shares."),
cfg.StrOpt(
'data_node_access_admin_user',
help="The admin user name registered in the security service in order "
"to allow access to user authentication-based shares."),
cfg.DictOpt(
'data_node_mount_options',
default={},
help="Mount options to be included in the mount command for share "
"protocols. Use dictionary format, example: "
"{'nfs': '-o nfsvers=3', 'cifs': '-o user=foo,pass=bar'}"),
]
CONF = cfg.CONF
CONF.register_opts(data_helper_opts)
class DataServiceHelper(object):
def __init__(self, context, db, share):
self.db = db
self.share = share
self.context = context
self.share_rpc = share_rpc.ShareAPI()
self.wait_access_rules_timeout = (
CONF.data_access_wait_access_rules_timeout)
def deny_access_to_data_service(self, access_ref_list, share_instance):
for access_ref in access_ref_list:
self._change_data_access_to_instance(
share_instance, access_ref, allow=False)
# NOTE(ganso): Cleanup methods do not throw exceptions, since the
# exceptions that should be thrown are the ones that call the cleanup
def cleanup_data_access(self, access_ref_list, share_instance_id):
try:
self.deny_access_to_data_service(
access_ref_list, share_instance_id)
except Exception:
LOG.warning(_LW("Could not cleanup access rule of share %s."),
self.share['id'])
def cleanup_temp_folder(self, instance_id, mount_path):
try:
path = os.path.join(mount_path, instance_id)
if os.path.exists(path):
os.rmdir(path)
self._check_dir_not_exists(path)
except Exception:
LOG.warning(_LW("Could not cleanup instance %(instance_id)s "
"temporary folders for data copy of "
"share %(share_id)s."), {
'instance_id': instance_id,
'share_id': self.share['id']})
def cleanup_unmount_temp_folder(self, unmount_template, mount_path,
share_instance_id):
try:
self.unmount_share_instance(unmount_template, mount_path,
share_instance_id)
except Exception:
LOG.warning(_LW("Could not unmount folder of instance"
" %(instance_id)s for data copy of "
"share %(share_id)s."), {
'instance_id': share_instance_id,
'share_id': self.share['id']})
def _change_data_access_to_instance(
self, instance, access_ref, allow=False):
self.db.share_instance_update_access_status(
self.context, instance['id'], constants.STATUS_OUT_OF_SYNC)
if allow:
self.share_rpc.allow_access(self.context, instance, access_ref)
else:
self.share_rpc.deny_access(self.context, instance, access_ref)
utils.wait_for_access_update(
self.context, self.db, instance, self.wait_access_rules_timeout)
def allow_access_to_data_service(
self, share_instance, connection_info_src,
dest_share_instance=None, connection_info_dest=None):
allow_access_to_destination_instance = (dest_share_instance and
connection_info_dest)
# NOTE(ganso): intersect the access type compatible with both instances
if allow_access_to_destination_instance:
access_mapping = {}
for a_type, protocols in (
connection_info_src['access_mapping'].items()):
for proto in protocols:
if (a_type in connection_info_dest['access_mapping'] and
proto in
connection_info_dest['access_mapping'][a_type]):
access_mapping[a_type] = access_mapping.get(a_type, [])
access_mapping[a_type].append(proto)
else:
access_mapping = connection_info_src['access_mapping']
access_list = self._get_access_entries_according_to_mapping(
access_mapping)
access_ref_list = []
for access in access_list:
values = {
'share_id': self.share['id'],
'access_type': access['access_type'],
'access_level': access['access_level'],
'access_to': access['access_to'],
}
old_access_list = self.db.share_access_get_all_by_type_and_access(
self.context, self.share['id'], access['access_type'],
access['access_to'])
for old_access in old_access_list:
self._change_data_access_to_instance(
share_instance, old_access, allow=False)
access_ref = self.db.share_instance_access_create(
self.context, values, share_instance['id'])
self._change_data_access_to_instance(
share_instance, access_ref, allow=True)
if allow_access_to_destination_instance:
access_ref = self.db.share_instance_access_create(
self.context, values, dest_share_instance['id'])
self._change_data_access_to_instance(
dest_share_instance, access_ref, allow=True)
access_ref_list.append(access_ref)
return access_ref_list
def _get_access_entries_according_to_mapping(self, access_mapping):
access_list = []
for access_type, protocols in access_mapping.items():
if access_type.lower() == 'cert':
access_to = CONF.data_node_access_cert
elif access_type.lower() == 'ip':
access_to = CONF.data_node_access_ip
elif access_type.lower() == 'user':
access_to = CONF.data_node_access_admin_user
else:
msg = _("Unsupported access type provided: %s.") % access_type
raise exception.ShareDataCopyFailed(reason=msg)
if not access_to:
msg = _("Configuration for Data node mounting access type %s "
"has not been set.") % access_type
raise exception.ShareDataCopyFailed(reason=msg)
access = {
'access_type': access_type,
'access_level': constants.ACCESS_LEVEL_RW,
'access_to': access_to,
}
access_list.append(access)
return access_list
@utils.retry(exception.NotFound, 0.1, 10, 0.1)
def _check_dir_exists(self, path):
if not os.path.exists(path):
raise exception.NotFound("Folder %s could not be found." % path)
@utils.retry(exception.Found, 0.1, 10, 0.1)
def _check_dir_not_exists(self, path):
if os.path.exists(path):
raise exception.Found("Folder %s was found." % path)
def mount_share_instance(self, mount_template, mount_path,
share_instance):
path = os.path.join(mount_path, share_instance['id'])
options = CONF.data_node_mount_options
options = {k.lower(): v for k, v in options.items()}
proto_options = options.get(share_instance['share_proto'].lower())
if not proto_options:
proto_options = ''
if not os.path.exists(path):
os.makedirs(path)
self._check_dir_exists(path)
mount_command = mount_template % {'path': path,
'options': proto_options}
utils.execute(*(mount_command.split()), run_as_root=True)
def unmount_share_instance(self, unmount_template, mount_path,
share_instance_id):
path = os.path.join(mount_path, share_instance_id)
unmount_command = unmount_template % {'path': path}
utils.execute(*(unmount_command.split()), run_as_root=True)
try:
if os.path.exists(path):
os.rmdir(path)
self._check_dir_not_exists(path)
except Exception:
LOG.warning(_LW("Folder %s could not be removed."), path)
| |
#!/usr/bin/python
#
# Hey, here's a thing:
#
# You can use this bit of python script to generate GCode to drill a PCB based on an image file that you used
# to etch the board.
#
# This script makes GCode to drill the center of sections of an image that are a given color or brightness.
#
# All you need to do is load the image file that you used to etch and color the things you want drilled.
# This should be easy since all of your drills are probably surrounded by traces and all of your traces are
# probably colored black. Just use your favorite graphic editor (such as gimp) to flood fill parts of the board
# that aren't traces or drills, leaving the drills as the only thing that are white.
#
# Run this script on your edited image and you'll get some GCode.
#
# Before you run the GCode, jog the spindle over where you want the topmost, leftmost hole to be drilled and
# zero your machine.
# The GCode will begin my moving over where the bottommost, rightmost hole would be drilled.
# Move your workpiece, return to zero rewind and restart the GCode until your machine lines up with both drills,
# then you can allow the machine to continue to drill your board.
#
from __future__ import print_function
import sys
import math
from PIL import Image
import subprocess
import re
import argparse
class BoundingBox:
def __init__(self):
self.coord = [[0, 0], [0, 0]]
self.empty = 1
def intersects(self, box):
return (((1 ^ self.empty) and (1 ^ box.empty)) and
((self.coord[0][0] < box.coord[1][0]) and
(self.coord[0][1] < box.coord[1][1]) and
(self.coord[1][0] > box.coord[0][0]) and
(self.coord[1][1] > box.coord[0][1])))
def center(self):
return [self.coord[0][0] + ((self.coord[1][0] - self.coord[0][0]) / 2),
self.coord[0][1] + ((self.coord[1][1] - self.coord[0][1]) / 2)]
def boundCoord(self, coord):
if (self.empty):
self.coord[0][0] = coord[0]
self.coord[0][1] = coord[1]
self.coord[1][0] = coord[0]
self.coord[1][1] = coord[1]
self.empty = 0
else:
if (coord[0] < self.coord[0][0]):
self.coord[0][0] = coord[0]
if (coord[1] < self.coord[0][1]):
self.coord[0][1] = coord[1]
if (coord[0] > self.coord[1][0]):
self.coord[1][0] = coord[0]
if (coord[1] > self.coord[1][1]):
self.coord[1][1] = coord[1]
class BoundingBoxList:
def __init__(self):
self.boxes = []
def addBox(self, box):
for oldBox in self.boxes:
if (oldBox.intersects(box)):
return
self.boxes.append(box)
# use ImageMagick to figure out how many pixels per inch or cm in the image file
def getDensity(filename, units = "PixelsPerInch"):
pipe = subprocess.Popen(["identify", "-format", "%x,%y", "-units", units, filename],
stdout=subprocess.PIPE)
res = re.sub('[\t\r\n"]', '', pipe.communicate()[0]).split(',')
xres = float(res[0].split(' ')[0])
yres = float(res[1].split(' ')[0])
return [xres, yres]
# make a list of drill points from an image map
class DrillMap:
def __init__(self, filename, units = 'Inches', density = [], rgbThresh = 127 * 3):
self.image = Image.open(filename)
self.pixmap = self.image.load()
if (len(density) == 0):
if (units == 'Inches'):
self.density = getDensity(filename)
else:
cmDensity = getDensity(filename, units = 'PixelsPerCentimeter')
self.density = [float(cmDensity[0]) / 10, float(cmDensity[1]) / 10]
else:
self.density = density ;
self.rgbThresh = rgbThresh ;
self.boxlist = BoundingBoxList()
self.drillList = []
self.findBoxes()
self.makeDrillList()
def coordOffset(self, coord):
return [float(coord[0]) / float(self.density[0]), float(coord[1]) / float(self.density[1])]
def isCoordOn(self, coord):
pixel = self.pixmap[coord[0], coord[1]]
if (self.image.mode == "RGB"):
sum = pixel[0] + pixel[1] + pixel[2]
return (sum > self.rgbThresh)
if (self.image.mode == "1"):
return pixel
def scanLeftToBox(self, coord, box):
y = coord[1]
x = coord[0]
while ((x >= 0) and self.isCoordOn([x, y])):
box.boundCoord([x, y])
x = x - 1
return (x != coord[0])
def scanRightToBox(self, coord, box):
y = coord[1]
x = coord[0]
while ((x <= self.image.size[1] - 1) and self.isCoordOn([x, y])):
box.boundCoord([x, y])
x = x + 1
return (x != coord[0])
def scanLineToBox(self, coord, box):
return (self.scanLeftToBox(coord, box) or self.scanRightToBox(coord, box))
def scanUpperLineToBox(self, coord, box):
if (coord[1] > 0):
upperCoord = [int(box.center()[0]), coord[1] - 1]
if (self.scanLineToBox(upperCoord, box)):
self.scanUpperLineToBox(upperCoord, box)
def scanLowerLineToBox(self, coord, box):
if (coord[1] < self.image.size[1] - 1):
lowerCoord = [box.center()[0], coord[1] + 1]
if (self.scanLineToBox(lowerCoord, box)):
self.scanLowerLineToBox(lowerCoord, box)
def scanToBox(self, coord):
box = BoundingBox()
if (self.scanRightToBox(coord, box)):
self.scanUpperLineToBox(coord, box)
self.scanLowerLineToBox(coord, box)
return box
def findBoxes(self):
y = 0
while (y < self.image.size[1] - 1):
x = 0
while (x < self.image.size[0] - 1):
if (self.isCoordOn([x, y])):
newBox = self.scanToBox([x, y])
if (not newBox.empty):
self.boxlist.addBox(newBox)
x = newBox.coord[1][0] + 1
else:
x += 1
else:
x += 1
y += 1
def makeDrillList(self):
for eachBox in self.boxlist.boxes:
self.drillList.append(self.coordOffset(eachBox.center()))
class GCode:
GCodeCommands = {'Mach3': {
'Message': '(',
'Stop': 'M0',
'Sleep': 'M01',
'SpindleCW': 'M03',
'SpindleCCW': 'M04',
'SpindleStop': 'M05',
'ToolChange': 'M06',
'Pause': 'M60',
'FastMove': 'G0',
'SlowMove': 'G1',
'Dwell': 'G4',
'InchesMode': 'G20',
'MillimetersMode': 'G21',
'MoveToOrigin': 'G28',
'ClearToolOffet': 'G49',
'Drill': 'G81',
'DrillWithDwell': 'G82',
'AbsoluteMode': 'G90',
'RelativeMode': 'G91',
'SetPosition': 'G92',
},
'EMC': {
'Message': '(MSG,',
'Stop': 'M0',
'Sleep': 'M01',
'SpindleCW': 'M03',
'SpindleCCW': 'M04',
'SpindleStop': 'M05',
'ToolChange': 'M06',
'Pause': 'M60',
'FastMove': 'G0',
'SlowMove': 'G1',
'Dwell': 'G4',
'InchesMode': 'G20',
'MillimetersMode': 'G21',
'MoveToOrigin': 'G28',
'ClearToolOffet': 'G49',
'Drill': 'G81',
'DrillWithDwell': 'G82',
'AbsoluteMode': 'G90',
'RelativeMode': 'G91',
'SetPosition': 'G92',
}}
def __init__(self, theGCodeType):
self.variant = theGCodeType
def Comment(self, string):
return " ; " + string
def Message(self, string):
return self.GCodeCommands[self.variant]['Message'] + string + " )"
def Pause(self):
return self.GCodeCommands[self.variant]['Pause']
def Spindle(self, Mode):
SpindleModes = {'Stop': 'SpindleStop', 'CW': 'SpindleCW', 'CCW': 'SpindleCCW'}
return self.GCodeCommands[self.variant][SpindleModes[Mode]]
def Units(self, theUnits):
if (theUnits == 'Inches'):
return self.GCodeCommands[self.variant]['InchesMode']
else:
return self.GCodeCommands[self.variant]['MillimetersMode']
def Absolute(self, isAbsolute = True):
if (isAbsolute):
return self.GCodeCommands[self.variant]['AbsoluteMode']
else:
return self.GCodeCommands[self.variant]['RelativeMode']
def _CommonArgs(self, X = None, Y = None, Z = None, rate = None):
OutStr = ''
if (X != None):
OutStr += ' X' + format(X, ".4f")
if (Y != None):
OutStr += ' Y' + format(Y, ".4f")
if (Z != None):
OutStr += ' Z' + format(Z, ".4f")
if (rate != None):
OutStr += ' F' + format(rate, ".4f")
return OutStr
def Move(self, X = None, Y = None, Z = None, rate = None, speed='Fast'):
OutStr = self.GCodeCommands[self.variant][speed + 'Move']
OutStr += self._CommonArgs(X = X, Y = Y, Z = Z, rate = rate)
return OutStr
def Dwell(self, seconds = 1):
OutStr = self.GCodeCommands[self.variant]['Dwell'] + ' P' + `seconds`
return OutStr
def Drill(self, X = None, Y = None, Z = None, retract = None, seconds = None, rate = None):
if (seconds != None):
OutStr = self.GCodeCommands[self.variant]['DrillWithDwell']
OutStr += ' P' + `seconds`
else:
OutStr = self.GCodeCommands[self.variant]['Drill']
OutStr += self._CommonArgs(X = X, Y = Y, Z = Z, rate = rate)
if (retract != None):
OutStr += ' R' + `retract`
return OutStr
# -------- execution starts here
# parse parameters
# TODO: add density parameter & drill color parameter & check for ImageMagick
parser = argparse.ArgumentParser()
parser.add_argument('-v', '--verbose', action='store_true', help='spew possibly useless output')
parser.add_argument('-m', '--millimeters',
action='store_const', dest='units', const='Millimeters', help='set units to millimeters')
parser.add_argument('-i', '--inches',
action='store_const', dest='units', const='Inches', help='set units to inches')
parser.add_argument('-a', '--mach3',
action='store_const', dest='gcode', const='Mach3', help='set gcode type to mach3')
parser.add_argument('-e', '--emc',
action='store_const', dest='gcode', const='EMC', help='set gcode type to emc')
parser.add_argument('-s', '--safe',
nargs=1, default='0.25', type=float, help='safe height')
parser.add_argument('-d', '--drill',
nargs=1, default='-0.2', type=float, help='drill depth')
parser.add_argument('-p', '--dwell',
nargs=1, default='0.5', type=float, help='drill dwell')
parser.add_argument('-f', '--feed',
nargs=1, default='100', type=float, help='feed rate')
parser.add_argument('input')
args = parser.parse_args()
if (args.gcode == None):
args.gcode = 'Mach3'
if (args.units == None):
args.units = 'Inches'
theMap = DrillMap(args.input, args.units)
# make drill coordinates relative to first drill
if (theMap.drillList):
firstCoord = theMap.drillList[0]
relativeDrillList = []
for drill in theMap.drillList:
newCoord = [drill[0] - firstCoord[0], drill[1] - firstCoord[1]]
relativeDrillList.append(newCoord)
# output gcode for the list of drills
# init machine, set units, zero axes
gc = GCode(args.gcode)
print(gc.Spindle('Stop'))
print(gc.Units(args.units))
print(gc.Absolute())
print(gc.Pause(), gc.Comment('Check that tool is aligned with first drill'))
print(gc.Move(Z = args.safe))
# move to last drill position and pause
lastDrill = len(relativeDrillList) - 1
print(gc.Move(X = relativeDrillList[lastDrill][0], Y = relativeDrillList[lastDrill][1]))
print(gc.Pause())
print(gc.Pause(), gc.Comment('Check that tool is aligned with last drill'))
print(gc.Spindle('CW'))
print(gc.Dwell(3))
print(gc.Message('Drilling'))
# move to each drill position and drill
for eachDrill in relativeDrillList:
print(gc.Drill(X = eachDrill[0], Y = eachDrill[1], Z = args.drill, retract = args.safe, seconds = args.dwell))
# end of GCode program
print(gc.Spindle('Stop'))
print(gc.Pause())
| |
import wx
import sys
from sas.sasgui.perspectives.calculator import calculator_widgets as widget
from sas.sasgui.perspectives.file_converter.converter_widgets import VectorInput
from wx.lib.scrolledpanel import ScrolledPanel
from sas.sasgui.guiframe.panel_base import PanelBase
from sas.sascalc.dataloader.data_info import Detector
from sas.sasgui.guiframe.events import StatusEvent
from sas.sasgui.guiframe.utils import check_float
if sys.platform.count("win32") > 0:
PANEL_TOP = 0
_STATICBOX_WIDTH = 350
PANEL_SIZE = 440
FONT_VARIANT = 0
else:
PANEL_TOP = 60
_STATICBOX_WIDTH = 380
PANEL_SIZE = 470
FONT_VARIANT = 1
class MetadataPanel(ScrolledPanel, PanelBase):
"""
A common base class to be extended by panels that deal with metadata input.
Handles input validation and passing inputted data back to ConverterPanel.
"""
def __init__(self, parent, metadata, base=None, *args, **kwargs):
ScrolledPanel.__init__(self, parent, *args, **kwargs)
PanelBase.__init__(self)
self.SetupScrolling()
self.SetWindowVariant(variant=FONT_VARIANT)
self.base = base
self.parent = parent
self._to_validate = [] # An list of inputs that should contain floats
self._vectors = [] # A list of VectorInputs to be validated
self.metadata = metadata
def get_property_string(self, name, is_float=False):
value = getattr(self.metadata, name)
if value is None or value == []:
value = ''
is_float = False
if isinstance(value, list):
value = value[0]
value = str(value)
if is_float and not '.' in value: value += '.0'
return value
def on_change(self, event):
ctrl = event.GetEventObject()
value = ctrl.GetValue()
name = ctrl.GetName()
old_value = getattr(self.metadata, name)
if value == '': value = None
if isinstance(old_value, list): value = [value]
setattr(self.metadata, name, value)
def on_close(self, event=None):
for ctrl in self._to_validate:
ctrl.SetBackgroundColour(wx.WHITE)
if ctrl.GetValue() == '': continue
if not check_float(ctrl):
msg = "{} must be a valid float".format(
ctrl.GetName().replace("_", " "))
wx.PostEvent(self.parent.manager.parent.manager.parent,
StatusEvent(status=msg, info='error'))
return False
for vector_in in self._vectors:
is_valid, invalid_ctrl = vector_in.Validate()
if not is_valid:
msg = "{} must be a valid float".format(
invalid_ctrl.GetName().replace("_", " "))
wx.PostEvent(self.parent.manager.parent.manager.parent,
StatusEvent(status=msg, info='error'))
return False
setattr(self.metadata, vector_in.GetName(), vector_in.GetValue())
return True
class DetectorPanel(MetadataPanel):
def __init__(self, parent, detector, base=None, *args, **kwargs):
if detector.name is None:
detector.name = ''
MetadataPanel.__init__(self, parent, detector, base, *args, **kwargs)
self._do_layout()
self.SetAutoLayout(True)
self.Layout()
def on_close(self, event=None):
if not MetadataPanel.on_close(self, event):
return
self.parent.manager.detector = self.metadata
self.parent.on_close(event)
def _do_layout(self):
vbox = wx.BoxSizer(wx.VERTICAL)
section = wx.StaticBox(self, -1, "Detector")
section_sizer = wx.StaticBoxSizer(section, wx.VERTICAL)
section_sizer.SetMinSize((_STATICBOX_WIDTH, -1))
input_grid = wx.GridBagSizer(5, 5)
y = 0
name_label = wx.StaticText(self, -1, "Name: ")
input_grid.Add(name_label, (y,0), (1,1), wx.ALL, 5)
name_input = wx.TextCtrl(self, -1, name="name")
input_grid.Add(name_input, (y,1), (1,1))
name_input.Bind(wx.EVT_TEXT, self.on_change)
y += 1
distance_label = wx.StaticText(self, -1,
"Distance (mm): ")
input_grid.Add(distance_label, (y, 0), (1,1), wx.ALL, 5)
distance_input = wx.TextCtrl(self, -1,
name="distance", size=(50,-1))
input_grid.Add(distance_input, (y,1), (1,1))
distance_input.Bind(wx.EVT_TEXT, self.on_change)
self._to_validate.append(distance_input)
y += 1
offset_label = wx.StaticText(self, -1, "Offset (mm): ")
input_grid.Add(offset_label, (y,0), (1,1), wx.ALL, 5)
offset_input = VectorInput(self, "offset")
input_grid.Add(offset_input.GetSizer(), (y,1), (1,1))
self._vectors.append(offset_input)
y += 1
orientation_label = wx.StaticText(self, -1, "Orientation (\xb0): ")
input_grid.Add(orientation_label, (y,0), (1,1), wx.ALL, 5)
orientation_input = VectorInput(self, "orientation", z_enabled=True,
labels=["Roll: ", "Pitch: ", "Yaw: "])
input_grid.Add(orientation_input.GetSizer(), (y,1), (1,1))
self._vectors.append(orientation_input)
y += 1
pixel_label = wx.StaticText(self, -1, "Pixel Size (mm): ")
input_grid.Add(pixel_label, (y,0), (1,1), wx.ALL, 5)
pixel_input = VectorInput(self, "pixel_size")
input_grid.Add(pixel_input.GetSizer(), (y,1), (1,1))
self._vectors.append(pixel_input)
y += 1
beam_label = wx.StaticText(self, -1, "Beam Center (mm): ")
input_grid.Add(beam_label, (y,0), (1,1), wx.ALL, 5)
beam_input = VectorInput(self, "beam_center")
input_grid.Add(beam_input.GetSizer(), (y,1), (1,1))
self._vectors.append(beam_input)
y += 1
slit_label = wx.StaticText(self, -1, "Slit Length (mm): ")
input_grid.Add(slit_label, (y,0), (1,1), wx.ALL, 5)
slit_input = wx.TextCtrl(self, -1, name="slit_length", size=(50,-1))
input_grid.Add(slit_input, (y,1), (1,1))
slit_input.Bind(wx.EVT_TEXT, self.on_change)
self._to_validate.append(slit_input)
y += 1
done_btn = wx.Button(self, -1, "Done")
input_grid.Add(done_btn, (y,0), (1,1), wx.ALL, 5)
done_btn.Bind(wx.EVT_BUTTON, self.on_close)
section_sizer.Add(input_grid)
vbox.Add(section_sizer, flag=wx.ALL, border=10)
name_input.SetValue(self.metadata.name)
distance = self.get_property_string("distance", is_float=True)
distance_input.SetValue(distance)
offset_input.SetValue(self.metadata.offset)
orientation_input.SetValue(self.metadata.orientation)
pixel_input.SetValue(self.metadata.pixel_size)
beam_input.SetValue(self.metadata.beam_center)
slit_len = self.get_property_string("slit_length", is_float=True)
slit_input.SetValue(slit_len)
vbox.Fit(self)
self.SetSizer(vbox)
class SamplePanel(MetadataPanel):
def __init__(self, parent, sample, base=None, *args, **kwargs):
MetadataPanel.__init__(self, parent, sample, base, *args, **kwargs)
if sample.name is None:
sample.name = ''
self._do_layout()
self.SetAutoLayout(True)
self.Layout()
def on_close(self, event=None):
if not MetadataPanel.on_close(self, event):
return
self.parent.manager.sample = self.metadata
self.parent.on_close(event)
def _do_layout(self):
vbox = wx.BoxSizer(wx.VERTICAL)
section = wx.StaticBox(self, -1, "Sample")
section_sizer = wx.StaticBoxSizer(section, wx.VERTICAL)
section_sizer.SetMinSize((_STATICBOX_WIDTH, -1))
input_grid = wx.GridBagSizer(5, 5)
y = 0
name_label = wx.StaticText(self, -1, "Name: ")
input_grid.Add(name_label, (y,0), (1,1), wx.ALL, 5)
name_input = wx.TextCtrl(self, -1, name="name")
input_grid.Add(name_input, (y,1), (1,1))
name_input.Bind(wx.EVT_TEXT, self.on_change)
y += 1
thickness_label = wx.StaticText(self, -1, "Thickness (mm): ")
input_grid.Add(thickness_label, (y,0), (1,1), wx.ALL, 5)
thickness_input = wx.TextCtrl(self, -1, name="thickness")
input_grid.Add(thickness_input, (y,1), (1,1))
thickness_input.Bind(wx.EVT_TEXT, self.on_change)
self._to_validate.append(thickness_input)
y += 1
transmission_label = wx.StaticText(self, -1, "Transmission: ")
input_grid.Add(transmission_label, (y,0), (1,1), wx.ALL, 5)
transmission_input = wx.TextCtrl(self, -1, name="transmission")
input_grid.Add(transmission_input, (y,1), (1,1))
transmission_input.Bind(wx.EVT_TEXT, self.on_change)
self._to_validate.append(transmission_input)
y += 1
temperature_label = wx.StaticText(self, -1, "Temperature: ")
input_grid.Add(temperature_label, (y,0), (1,1), wx.ALL, 5)
temperature_input = wx.TextCtrl(self, -1, name="temperature")
temperature_input.Bind(wx.EVT_TEXT, self.on_change)
self._to_validate.append(temperature_input)
input_grid.Add(temperature_input, (y,1), (1,1))
temp_unit_label = wx.StaticText(self, -1, "Unit: ")
input_grid.Add(temp_unit_label, (y,2), (1,1))
temp_unit_input = wx.TextCtrl(self, -1, name="temperature_unit",
size=(50,-1))
temp_unit_input.Bind(wx.EVT_TEXT, self.on_change)
input_grid.Add(temp_unit_input, (y,3), (1,1))
y += 1
position_label = wx.StaticText(self, -1, "Position (mm): ")
input_grid.Add(position_label, (y,0), (1,1), wx.ALL, 5)
position_input = VectorInput(self, "position")
self._vectors.append(position_input)
input_grid.Add(position_input.GetSizer(), (y,1), (1,2))
y += 1
orientation_label = wx.StaticText(self, -1, "Orientation (\xb0): ")
input_grid.Add(orientation_label, (y,0), (1,1), wx.ALL, 5)
orientation_input = VectorInput(self, "orientation",
labels=["Roll: ", "Pitch: ", "Yaw: "], z_enabled=True)
self._vectors.append(orientation_input)
input_grid.Add(orientation_input.GetSizer(), (y,1), (1,3))
y += 1
details_label = wx.StaticText(self, -1, "Details: ")
input_grid.Add(details_label, (y,0), (1,1), wx.ALL, 5)
details_input = wx.TextCtrl(self, -1, name="details",
style=wx.TE_MULTILINE)
input_grid.Add(details_input, (y,1), (3,3), wx.EXPAND)
y += 3
name_input.SetValue(self.metadata.name)
thickness_input.SetValue(
self.get_property_string("thickness", is_float=True))
transmission_input.SetValue(
self.get_property_string("transmission", is_float=True))
temperature_input.SetValue(
self.get_property_string("temperature", is_float=True))
temp_unit_input.SetValue(self.get_property_string("temperature_unit"))
position_input.SetValue(self.metadata.position)
orientation_input.SetValue(self.metadata.orientation)
details_input.SetValue(self.get_property_string("details"))
details_input.Bind(wx.EVT_TEXT, self.on_change)
done_btn = wx.Button(self, -1, "Done")
input_grid.Add(done_btn, (y,0), (1,1), wx.ALL, 5)
done_btn.Bind(wx.EVT_BUTTON, self.on_close)
section_sizer.Add(input_grid)
vbox.Add(section_sizer, flag=wx.ALL, border=10)
vbox.Fit(self)
self.SetSizer(vbox)
class SourcePanel(MetadataPanel):
def __init__(self, parent, source, base=None, *args, **kwargs):
MetadataPanel.__init__(self, parent, source, base, *args, **kwargs)
if source.name is None:
source.name = ''
source.wavelength_unit = 'nm'
self._do_layout()
self.SetAutoLayout(True)
self.Layout()
def on_close(self, event=None):
if not MetadataPanel.on_close(self, event):
return
self.parent.manager.source = self.metadata
self.parent.on_close(event)
def _do_layout(self):
vbox = wx.BoxSizer(wx.VERTICAL)
section = wx.StaticBox(self, -1, "Source")
section_sizer = wx.StaticBoxSizer(section, wx.VERTICAL)
section_sizer.SetMinSize((_STATICBOX_WIDTH, -1))
input_grid = wx.GridBagSizer(5, 5)
y = 0
name_label = wx.StaticText(self, -1, "Name: ")
input_grid.Add(name_label, (y,0), (1,1), wx.ALL, 5)
name_input = wx.TextCtrl(self, -1, name="name")
input_grid.Add(name_input, (y,1), (1,1))
name_input.Bind(wx.EVT_TEXT, self.on_change)
y += 1
size_label = wx.StaticText(self, -1, "Beam Size (mm): ")
input_grid.Add(size_label, (y,0), (1,1), wx.ALL, 5)
size_input = VectorInput(self, "beam_size")
self._vectors.append(size_input)
input_grid.Add(size_input.GetSizer(), (y,1), (1,1))
y += 1
shape_label = wx.StaticText(self, -1, "Beam Shape: ")
input_grid.Add(shape_label, (y,0), (1,1), wx.ALL, 5)
shape_input = wx.TextCtrl(self, -1, name="beam_shape")
shape_input.Bind(wx.EVT_TEXT, self.on_change)
input_grid.Add(shape_input, (y,1), (1,1))
y += 1
wavelength_label = wx.StaticText(self, -1, "Wavelength (nm): ")
input_grid.Add(wavelength_label, (y,0), (1,1), wx.ALL, 5)
wavelength_input = wx.TextCtrl(self, -1, name="wavelength",
size=(50,-1))
wavelength_input.Bind(wx.EVT_TEXT, self.on_change)
self._to_validate.append(wavelength_input)
input_grid.Add(wavelength_input, (y,1), (1,1))
y += 1
min_wavelength_label = wx.StaticText(self, -1, "Min. Wavelength (nm): ")
input_grid.Add(min_wavelength_label, (y,0), (1,1), wx.ALL, 5)
min_wavelength_input = wx.TextCtrl(self, -1, name="wavelength_min",
size=(50,-1))
min_wavelength_input.Bind(wx.EVT_TEXT, self.on_change)
self._to_validate.append(min_wavelength_input)
input_grid.Add(min_wavelength_input, (y,1), (1,1))
y += 1
max_wavelength_label = wx.StaticText(self, -1, "Max. Wavelength (nm): ")
input_grid.Add(max_wavelength_label, (y,0), (1,1), wx.ALL, 5)
max_wavelength_input = wx.TextCtrl(self, -1, name="wavelength_max",
size=(50,-1))
max_wavelength_input.Bind(wx.EVT_TEXT, self.on_change)
self._to_validate.append(max_wavelength_input)
input_grid.Add(max_wavelength_input, (y,1), (1,1))
y += 1
wavelength_spread_label = wx.StaticText(self, -1,
"Wavelength Spread (%): ")
input_grid.Add(wavelength_spread_label, (y,0), (1,1), wx.ALL, 5)
wavelength_spread_input = wx.TextCtrl(self, -1,
name="wavelength_spread", size=(50,-1))
wavelength_spread_input.Bind(wx.EVT_TEXT, self.on_change)
self._to_validate.append(wavelength_spread_input)
input_grid.Add(wavelength_spread_input, (y,1), (1,1))
y += 1
name_input.SetValue(self.get_property_string("name"))
size_input.SetValue(self.metadata.beam_size)
shape_input.SetValue(self.get_property_string("beam_shape"))
wavelength_input.SetValue(
self.get_property_string("wavelength", is_float=True))
min_wavelength_input.SetValue(
self.get_property_string("wavelength_min", is_float=True))
max_wavelength_input.SetValue(
self.get_property_string("wavelength_max", is_float=True))
done_btn = wx.Button(self, -1, "Done")
input_grid.Add(done_btn, (y,0), (1,1), wx.ALL, 5)
done_btn.Bind(wx.EVT_BUTTON, self.on_close)
section_sizer.Add(input_grid)
vbox.Add(section_sizer, flag=wx.ALL, border=10)
vbox.Fit(self)
self.SetSizer(vbox)
class MetadataWindow(widget.CHILD_FRAME):
def __init__(self, PanelClass, parent=None, title='', base=None,
manager=None, size=(PANEL_SIZE, PANEL_SIZE*0.8), metadata=None,
*args, **kwargs):
kwargs['title'] = title
kwargs['size'] = size
widget.CHILD_FRAME.__init__(self, parent, *args, **kwargs)
self.manager = manager
self.panel = PanelClass(self, metadata, base=None)
self.Bind(wx.EVT_CLOSE, self.on_close)
def on_close(self, event):
if self.manager is not None:
self.manager.meta_frames.remove(self)
self.Destroy()
| |
# Copyright 2015 - Mirantis, Inc.
# Copyright 2015 - StackStorm, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mistral.lang import parser as spec_parser
from mistral.lang.v2 import tasks
from mistral.workflow import states
class WorkflowCommand(object):
"""Workflow command.
A set of workflow commands form a communication protocol between workflow
controller and its clients. When workflow controller makes a decision about
how to continue a workflow it returns a set of commands so that a caller
knows what to do next.
"""
def __init__(self, wf_ex, wf_spec, task_spec, ctx, triggered_by=None):
self.wf_ex = wf_ex
self.wf_spec = wf_spec
self.task_spec = task_spec
self.ctx = ctx or {}
self.triggered_by = triggered_by
class Noop(WorkflowCommand):
"""No-operation command."""
def __repr__(self):
return "NOOP [workflow=%s]" % self.wf_ex.name
class RunTask(WorkflowCommand):
"""Instruction to run a workflow task."""
def __init__(self, wf_ex, wf_spec, task_spec, ctx, triggered_by=None):
super(RunTask, self).__init__(
wf_ex,
wf_spec,
task_spec,
ctx,
triggered_by=triggered_by
)
self.wait = False
self.unique_key = None
def is_waiting(self):
return self.wait
def get_unique_key(self):
return self.unique_key
def __repr__(self):
return (
"Run task [workflow=%s, task=%s, waif_flag=%s, triggered_by=%s]" %
(
self.wf_ex.name,
self.task_spec.get_name(),
self.wait,
self.triggered_by
)
)
class RunExistingTask(WorkflowCommand):
"""Command for running already existent task."""
def __init__(self, wf_ex, wf_spec, task_ex, reset=True, triggered_by=None):
super(RunExistingTask, self).__init__(
wf_ex,
wf_spec,
spec_parser.get_task_spec(task_ex.spec),
task_ex.in_context,
triggered_by=triggered_by
)
self.task_ex = task_ex
self.reset = reset
self.unique_key = task_ex.unique_key
class SetWorkflowState(WorkflowCommand):
"""Instruction to change a workflow state."""
def __init__(self, wf_ex, wf_spec, task_spec, ctx, new_state, msg=None,
triggered_by=None):
super(SetWorkflowState, self).__init__(
wf_ex,
wf_spec,
task_spec,
ctx,
triggered_by=triggered_by
)
self.new_state = new_state
self.msg = msg
class FailWorkflow(SetWorkflowState):
"""Instruction to fail a workflow."""
def __init__(self, wf_ex, wf_spec, task_spec, ctx, msg=None,
triggered_by=None):
super(FailWorkflow, self).__init__(
wf_ex,
wf_spec,
task_spec,
ctx,
states.ERROR,
msg=msg,
triggered_by=triggered_by
)
def __repr__(self):
return "Fail [workflow=%s]" % self.wf_ex.name
class SucceedWorkflow(SetWorkflowState):
"""Instruction to succeed a workflow."""
def __init__(self, wf_ex, wf_spec, task_spec, ctx, msg=None,
triggered_by=None):
super(SucceedWorkflow, self).__init__(
wf_ex,
wf_spec,
task_spec,
ctx,
states.SUCCESS,
msg=msg,
triggered_by=triggered_by
)
def __repr__(self):
return "Succeed [workflow=%s]" % self.wf_ex.name
class PauseWorkflow(SetWorkflowState):
"""Instruction to pause a workflow."""
def __init__(self, wf_ex, wf_spec, task_spec, ctx, msg=None,
triggered_by=None):
super(PauseWorkflow, self).__init__(
wf_ex,
wf_spec,
task_spec,
ctx,
states.PAUSED,
msg=msg,
triggered_by=triggered_by
)
def __repr__(self):
return "Pause [workflow=%s]" % self.wf_ex.name
RESERVED_CMDS = dict(zip(
tasks.RESERVED_TASK_NAMES, [
Noop,
FailWorkflow,
SucceedWorkflow,
PauseWorkflow
]
))
def get_command_class(cmd_name):
return RESERVED_CMDS[cmd_name] if cmd_name in RESERVED_CMDS else None
def create_command(cmd_name, wf_ex, wf_spec, task_spec, ctx,
params=None, triggered_by=None):
cmd_cls = get_command_class(cmd_name) or RunTask
if issubclass(cmd_cls, SetWorkflowState):
return cmd_cls(
wf_ex,
wf_spec,
task_spec,
ctx,
msg=params.get('msg'),
triggered_by=triggered_by
)
else:
return cmd_cls(
wf_ex,
wf_spec,
task_spec,
ctx,
triggered_by=triggered_by
)
| |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2017, Ansible by Red Hat, inc
#
# This file is part of Ansible by Red Hat
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = """
---
module: vyos_linkagg
version_added: "2.4"
author: "Ricardo Carrillo Cruz (@rcarrillocruz)"
short_description: Manage link aggregation groups on VyOS network devices
description:
- This module provides declarative management of link aggregation groups
on VyOS network devices.
notes:
- Tested against VYOS 1.1.7
options:
name:
description:
- Name of the link aggregation group.
required: true
mode:
description:
- Mode of the link aggregation group.
choices: ['802.3ad', 'active-backup', 'broadcast',
'round-robin', 'transmit-load-balance',
'adaptive-load-balance', 'xor-hash', 'on']
members:
description:
- List of members of the link aggregation group.
aggregate:
description: List of link aggregation definitions.
state:
description:
- State of the link aggregation group.
default: present
choices: ['present', 'absent', 'up', 'down']
"""
EXAMPLES = """
- name: configure link aggregation group
vyos_linkagg:
name: bond0
members:
- eth0
- eth1
- name: remove configuration
vyos_linkagg:
name: bond0
state: absent
- name: Create aggregate of linkagg definitions
vyos_linkagg:
aggregate:
- { name: bond0, members: [eth1] }
- { name: bond1, members: [eth2] }
- name: Remove aggregate of linkagg definitions
vyos_linkagg:
aggregate:
- name: bond0
- name: bond1
state: absent
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always, except for the platforms that use Netconf transport to manage the device.
type: list
sample:
- set interfaces bonding bond0
- set interfaces ethernet eth0 bond-group 'bond0'
- set interfaces ethernet eth1 bond-group 'bond0'
"""
from copy import deepcopy
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network_common import remove_default_spec
from ansible.module_utils.vyos import load_config, run_commands
from ansible.module_utils.vyos import vyos_argument_spec, check_args
def search_obj_in_list(name, lst):
for o in lst:
if o['name'] == name:
return o
return None
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
for w in want:
name = w['name']
members = w.get('members') or []
mode = w['mode']
if mode == 'on':
mode = '802.3ad'
state = w['state']
obj_in_have = search_obj_in_list(name, have)
if state == 'absent':
if obj_in_have:
for m in obj_in_have['members']:
commands.append('delete interfaces ethernet ' + m + ' bond-group')
commands.append('delete interfaces bonding ' + name)
else:
if not obj_in_have:
commands.append('set interfaces bonding ' + name + ' mode ' + mode)
for m in members:
commands.append('set interfaces ethernet ' + m + ' bond-group ' + name)
if state == 'down':
commands.append('set interfaces bonding ' + name + ' disable')
else:
if mode != obj_in_have['mode']:
commands.append('set interfaces bonding ' + name + ' mode ' + mode)
missing_members = list(set(members) - set(obj_in_have['members']))
for m in missing_members:
commands.append('set interfaces ethernet ' + m + ' bond-group ' + name)
if state == 'down' and obj_in_have['state'] == 'up':
commands.append('set interfaces bonding ' + name + ' disable')
elif state == 'up' and obj_in_have['state'] == 'down':
commands.append('delete interfaces bonding ' + name + ' disable')
return commands
def map_config_to_obj(module):
obj = []
output = run_commands(module, ['show interfaces bonding slaves'])
lines = output[0].splitlines()
if len(lines) > 1:
for line in lines[1:]:
splitted_line = line.split()
name = splitted_line[0]
mode = splitted_line[1]
state = splitted_line[2]
if len(splitted_line) > 4:
members = splitted_line[4:]
else:
members = []
obj.append({'name': name,
'mode': mode,
'members': members,
'state': state})
return obj
def map_params_to_obj(module):
obj = []
aggregate = module.params.get('aggregate')
if aggregate:
for item in aggregate:
for key in item:
if item.get(key) is None:
item[key] = module.params[key]
obj.append(item.copy())
else:
obj.append({
'name': module.params['name'],
'mode': module.params['mode'],
'members': module.params['members'],
'state': module.params['state']
})
return obj
def main():
""" main entry point for module execution
"""
element_spec = dict(
name=dict(),
mode=dict(choices=['802.3ad', 'active-backup', 'broadcast',
'round-robin', 'transmit-load-balance',
'adaptive-load-balance', 'xor-hash', 'on'],
default='802.3ad'),
members=dict(type='list'),
state=dict(default='present',
choices=['present', 'absent', 'up', 'down'])
)
aggregate_spec = deepcopy(element_spec)
aggregate_spec['name'] = dict(required=True)
# remove default in aggregate spec, to handle common arguments
remove_default_spec(aggregate_spec)
argument_spec = dict(
aggregate=dict(type='list', elements='dict', options=aggregate_spec),
)
argument_spec.update(element_spec)
argument_spec.update(vyos_argument_spec)
required_one_of = [['name', 'aggregate']]
mutually_exclusive = [['name', 'aggregate']]
module = AnsibleModule(argument_spec=argument_spec,
required_one_of=required_one_of,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
commit = not module.check_mode
load_config(module, commands, commit=commit)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
| |
from unittest import TestCase
from chatterbot.storage import MongoDatabaseAdapter
from chatterbot.conversation import Statement
class MongoAdapterTestCase(TestCase):
@classmethod
def setUpClass(cls):
"""
Instantiate the adapter before any tests in the test case run.
"""
from pymongo.errors import ServerSelectionTimeoutError
from pymongo import MongoClient
cls.has_mongo_connection = False
try:
client = MongoClient(
serverSelectionTimeoutMS=0.1
)
client.server_info()
cls.adapter = MongoDatabaseAdapter(
database_uri='mongodb://localhost:27017/chatterbot_test_database'
)
cls.has_mongo_connection = True
except ServerSelectionTimeoutError:
pass
def setUp(self):
"""
Skip these tests if a mongo client is not running.
"""
if not self.has_mongo_connection:
self.skipTest('Unable to connect to mongo database.')
def tearDown(self):
"""
Remove the test database.
"""
self.adapter.drop()
class MongoDatabaseAdapterTestCase(MongoAdapterTestCase):
def test_count_returns_zero(self):
"""
The count method should return a value of 0
when nothing has been saved to the database.
"""
self.assertEqual(self.adapter.count(), 0)
def test_count_returns_value(self):
"""
The count method should return a value of 1
when one item has been saved to the database.
"""
self.adapter.create(text="Test statement")
self.assertEqual(self.adapter.count(), 1)
def test_filter_text_statement_not_found(self):
"""
Test that None is returned by the find method
when a matching statement is not found.
"""
results = list(self.adapter.filter(text='Non-existant'))
self.assertEqual(len(results), 0)
def test_filter_text_statement_found(self):
"""
Test that a matching statement is returned
when it exists in the database.
"""
self.adapter.create(text='New statement')
results = list(self.adapter.filter(text='New statement'))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, 'New statement')
def test_update_adds_new_statement(self):
self.adapter.create(text='New statement')
results = list(self.adapter.filter(text='New statement'))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, 'New statement')
def test_update_modifies_existing_statement(self):
statement = Statement(text="New statement")
self.adapter.update(statement)
# Check the initial values
results = list(self.adapter.filter(text=statement.text))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].in_response_to, None)
# Update the statement value
statement.in_response_to = "New response"
self.adapter.update(statement)
# Check that the values have changed
results = list(self.adapter.filter(text=statement.text))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].in_response_to, "New response")
def test_get_random_returns_statement(self):
text = "New statement"
self.adapter.create(text=text)
random_statement = self.adapter.get_random()
self.assertEqual(random_statement.text, text)
def test_get_random_no_data(self):
from chatterbot.storage import StorageAdapter
with self.assertRaises(StorageAdapter.EmptyDatabaseException):
self.adapter.get_random()
def test_mongo_to_object(self):
self.adapter.create(text='Hello', in_response_to='Hi')
statement_data = self.adapter.statements.find_one({'text': 'Hello'})
obj = self.adapter.mongo_to_object(statement_data)
self.assertEqual(type(obj), Statement)
self.assertEqual(obj.text, 'Hello')
self.assertEqual(obj.in_response_to, 'Hi')
self.assertEqual(obj.id, statement_data['_id'])
def test_remove(self):
text = "Sometimes you have to run before you can walk."
self.adapter.create(text=text)
self.adapter.remove(text)
results = list(self.adapter.filter(text=text))
self.assertEqual(results, [])
def test_remove_response(self):
text = "Sometimes you have to run before you can walk."
self.adapter.create(text='', in_response_to=text)
self.adapter.remove(text)
results = list(self.adapter.filter(text=text))
self.assertEqual(results, [])
class MongoAdapterFilterTestCase(MongoAdapterTestCase):
def test_filter_text_no_matches(self):
self.adapter.create(
text='Testing...',
in_response_to='Why are you counting?'
)
results = list(self.adapter.filter(text='Howdy'))
self.assertEqual(len(results), 0)
def test_filter_in_response_to_no_matches(self):
self.adapter.create(
text='Testing...',
in_response_to='Why are you counting?'
)
results = list(self.adapter.filter(in_response_to='Maybe'))
self.assertEqual(len(results), 0)
def test_filter_equal_results(self):
statement1 = Statement(
text="Testing...",
in_response_to=[]
)
statement2 = Statement(
text="Testing one, two, three.",
in_response_to=[]
)
self.adapter.update(statement1)
self.adapter.update(statement2)
results = list(self.adapter.filter(in_response_to=[]))
results_text = [
result.text for result in results
]
self.assertEqual(len(results), 2)
self.assertIn(statement1.text, results_text)
self.assertIn(statement2.text, results_text)
def test_filter_no_parameters(self):
"""
If no parameters are passed to the filter,
then all statements should be returned.
"""
self.adapter.create(text="Testing...")
self.adapter.create(text="Testing one, two, three.")
results = list(self.adapter.filter())
self.assertEqual(len(results), 2)
def test_filter_in_response_to(self):
self.adapter.create(text="A", in_response_to="Yes")
self.adapter.create(text="B", in_response_to="No")
results = list(self.adapter.filter(
in_response_to="Yes"
))
# Get the first response
response = results[0]
self.assertEqual(len(results), 1)
self.assertEqual(response.in_response_to, "Yes")
def test_filter_by_tag(self):
self.adapter.create(text="Hello!", tags=["greeting", "salutation"])
self.adapter.create(text="Hi everyone!", tags=["greeting", "exclamation"])
self.adapter.create(text="The air contains Oxygen.", tags=["fact"])
results = list(self.adapter.filter(tags=["greeting"]))
results_text_list = [statement.text for statement in results]
self.assertEqual(len(results_text_list), 2)
self.assertIn("Hello!", results_text_list)
self.assertIn("Hi everyone!", results_text_list)
def test_filter_by_tags(self):
self.adapter.create(text="Hello!", tags=["greeting", "salutation"])
self.adapter.create(text="Hi everyone!", tags=["greeting", "exclamation"])
self.adapter.create(text="The air contains Oxygen.", tags=["fact"])
results = list(self.adapter.filter(
tags=["exclamation", "fact"]
))
results_text_list = [statement.text for statement in results]
self.assertEqual(len(results_text_list), 2)
self.assertIn("Hi everyone!", results_text_list)
self.assertIn("The air contains Oxygen.", results_text_list)
def test_filter_page_size(self):
self.adapter.create(text='A')
self.adapter.create(text='B')
self.adapter.create(text='C')
results = self.adapter.filter(page_size=2)
results_text_list = [statement.text for statement in results]
self.assertEqual(len(results_text_list), 3)
self.assertIn('A', results_text_list)
self.assertIn('B', results_text_list)
self.assertIn('C', results_text_list)
def test_exclude_text(self):
self.adapter.create(text='Hello!')
self.adapter.create(text='Hi everyone!')
results = list(self.adapter.filter(
exclude_text=[
'Hello!'
]
))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, 'Hi everyone!')
def test_exclude_text_words(self):
self.adapter.create(text='This is a good example.')
self.adapter.create(text='This is a bad example.')
self.adapter.create(text='This is a worse example.')
results = list(self.adapter.filter(
exclude_text_words=[
'bad', 'worse'
]
))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, 'This is a good example.')
def test_persona_not_startswith(self):
self.adapter.create(text='Hello!', persona='bot:tester')
self.adapter.create(text='Hi everyone!', persona='user:person')
results = list(self.adapter.filter(
persona_not_startswith='bot:'
))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, 'Hi everyone!')
def test_search_text_contains(self):
self.adapter.create(text='Hello!', search_text='hello exclamation')
self.adapter.create(text='Hi everyone!', search_text='hi everyone')
results = list(self.adapter.filter(
search_text_contains='everyone'
))
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, 'Hi everyone!')
def test_search_text_contains_multiple_matches(self):
self.adapter.create(text='Hello!', search_text='hello exclamation')
self.adapter.create(text='Hi everyone!', search_text='hi everyone')
results = list(self.adapter.filter(
search_text_contains='hello everyone'
))
self.assertEqual(len(results), 2)
class MongoOrderingTestCase(MongoAdapterTestCase):
"""
Test cases for the ordering of sets of statements.
"""
def test_order_by_text(self):
statement_a = Statement(text='A is the first letter of the alphabet.')
statement_b = Statement(text='B is the second letter of the alphabet.')
self.adapter.update(statement_b)
self.adapter.update(statement_a)
results = list(self.adapter.filter(order_by=['text']))
self.assertEqual(len(results), 2)
self.assertEqual(statement_a.text, results[0].text)
self.assertEqual(statement_b.text, results[1].text)
def test_order_by_created_at(self):
from datetime import datetime, timedelta
today = datetime.now()
yesterday = datetime.now() - timedelta(days=1)
statement_a = Statement(
text='A is the first letter of the alphabet.',
created_at=today
)
statement_b = Statement(
text='B is the second letter of the alphabet.',
created_at=yesterday
)
self.adapter.update(statement_b)
self.adapter.update(statement_a)
results = list(self.adapter.filter(order_by=['created_at']))
self.assertEqual(len(results), 2)
self.assertEqual(statement_a.text, results[0].text)
self.assertEqual(statement_b.text, results[1].text)
class StorageAdapterCreateTestCase(MongoAdapterTestCase):
"""
Tests for the create function of the storage adapter.
"""
def test_create_text(self):
self.adapter.create(text='testing')
results = list(self.adapter.filter())
self.assertEqual(len(results), 1)
self.assertEqual(results[0].text, 'testing')
def test_create_search_text(self):
self.adapter.create(
text='testing',
search_text='test'
)
results = list(self.adapter.filter())
self.assertEqual(len(results), 1)
self.assertEqual(results[0].search_text, 'test')
def test_create_search_in_response_to(self):
self.adapter.create(
text='testing',
search_in_response_to='test'
)
results = list(self.adapter.filter())
self.assertEqual(len(results), 1)
self.assertEqual(results[0].search_in_response_to, 'test')
def test_create_tags(self):
self.adapter.create(text='testing', tags=['a', 'b'])
results = list(self.adapter.filter())
self.assertEqual(len(results), 1)
self.assertIn('a', results[0].get_tags())
self.assertIn('b', results[0].get_tags())
def test_create_duplicate_tags(self):
"""
The storage adapter should not create a statement with tags
that are duplicates.
"""
self.adapter.create(text='testing', tags=['ab', 'ab'])
results = list(self.adapter.filter())
self.assertEqual(len(results), 1)
self.assertEqual(len(results[0].get_tags()), 1)
self.assertEqual(results[0].get_tags(), ['ab'])
def test_create_many_text(self):
self.adapter.create_many([
Statement(text='A'),
Statement(text='B')
])
results = list(self.adapter.filter())
self.assertEqual(len(results), 2)
self.assertEqual(results[0].text, 'A')
self.assertEqual(results[1].text, 'B')
def test_create_many_search_text(self):
self.adapter.create_many([
Statement(text='A', search_text='a'),
Statement(text='B', search_text='b')
])
results = list(self.adapter.filter())
self.assertEqual(len(results), 2)
self.assertEqual(results[0].search_text, 'a')
self.assertEqual(results[1].search_text, 'b')
def test_create_many_search_in_response_to(self):
self.adapter.create_many([
Statement(text='A', search_in_response_to='a'),
Statement(text='B', search_in_response_to='b')
])
results = list(self.adapter.filter())
self.assertEqual(len(results), 2)
self.assertEqual(results[0].search_in_response_to, 'a')
self.assertEqual(results[1].search_in_response_to, 'b')
def test_create_many_tags(self):
self.adapter.create_many([
Statement(text='A', tags=['first', 'letter']),
Statement(text='B', tags=['second', 'letter'])
])
results = list(self.adapter.filter())
self.assertEqual(len(results), 2)
self.assertIn('letter', results[0].get_tags())
self.assertIn('letter', results[1].get_tags())
self.assertIn('first', results[0].get_tags())
self.assertIn('second', results[1].get_tags())
def test_create_many_duplicate_tags(self):
"""
The storage adapter should not create a statement with tags
that are duplicates.
"""
self.adapter.create_many([
Statement(text='testing', tags=['ab', 'ab'])
])
results = list(self.adapter.filter())
self.assertEqual(len(results), 1)
self.assertEqual(len(results[0].get_tags()), 1)
self.assertEqual(results[0].get_tags(), ['ab'])
class StorageAdapterUpdateTestCase(MongoAdapterTestCase):
"""
Tests for the update function of the storage adapter.
"""
def test_update_adds_tags(self):
statement = self.adapter.create(text='Testing')
statement.add_tags('a', 'b')
self.adapter.update(statement)
statements = list(self.adapter.filter())
self.assertEqual(len(statements), 1)
self.assertIn('a', statements[0].get_tags())
self.assertIn('b', statements[0].get_tags())
def test_update_duplicate_tags(self):
"""
The storage adapter should not update a statement with tags
that are duplicates.
"""
statement = self.adapter.create(text='Testing', tags=['ab'])
statement.add_tags('ab')
self.adapter.update(statement)
statements = list(self.adapter.filter())
self.assertEqual(len(statements), 1)
self.assertEqual(len(statements[0].get_tags()), 1)
self.assertEqual(statements[0].get_tags(), ['ab'])
| |
import sys, time, socketserver
import emailinfo, regrules, TCGMain
from modules import pyrand, pyemail, pyhash
from modules.pyqueue import Queue
from threading import Thread
from os import walk
class SimpleServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
daemon_threads = True
allow_reuse_address = True
def __init__(self, server_address, request_handler):
socketserver.TCPServer.__init__(self, server_address, request_handler)
class UserHandler(socketserver.BaseRequestHandler):
message = {
'regusername': 'Min 4 characters, max 16 characters.\nEnter desired username: ',
'regpassword': '\nMin 8 characters, max 32 characters. Must have at least 1 letter and number.\nCannot contain symbols.\nEnter password: ',
'regemail': '\nYour activation code will be sent to this email.\nEnter a valid email: ',
'actusername': 'Enter the username of the account you wish to activate: ',
'actpassword': 'Enter the password of the account you wish to activate: ',
'actemail': 'Enter the email you used to register this account: ',
'actcode': 'Enter the activation code found in your email: ',
'act_success': 'Your account has been successfully activated.',
'invalid_act': 'Invalid Username, Password or Activation Code',
'not_activated': 'This account has not been activated yet.',
'alreadyact': 'That account has already been activated. ',
'registered': 'Your account has been registered and an activation code has been sent to your email.',
'login_success': 'Successfully logged in.',
'invalid_up': 'Invalid Username or Password.',
'log/act/reg': '(L)ogin, (A)ctivate, or (R)egister: '
}
def handle(self):
while True:
response = send_receive(self.request, self.message['log/act/reg']).lower()
if response == 'l':
self.login()
elif response == 'a':
self.activate()
elif response == 'r':
self.register()
elif response == '~':
break
else:
send_receive(self.request, 'Invalid choice: '+response, 'p')
send_receive(self.request, 'Thank you for choosing pyTCG!', 'p')
self.request.close()
def login(self):
socket = self.request
username = send_receive(socket, 'Username: ', recvsize=16)
passhash = pyhash.Sha384(send_receive(socket, 'Password: ', recvsize=32)).hexdigest
activated, actcode, user_passhash, user_emailhash = read_user(username)
activated = int(activated)
if passhash == user_passhash:
if activated:
send_receive(socket, self.message['login_success'], 'p')
else:
send_receive(socket, self.message['not_activated'], 'p')
self.activate(username, passhash)
else:
send_receive(socket, self.message['invalid_up'], 'p')
def activate(self, username=None, passhash=None):
socket = self.request
if not (username and passhash):
username = send_receive(socket, self.message['actusername'], recvsize=16)
passhash = pyhash.Sha384(send_receive(socket, self.message['actpassword'], recvsize=32)).hexdigest
user_activated, user_actcode, user_passhash, user_emailhash = read_user(username)
user_activated = int(user_activated)
del user_emailhash
if user_activated:
send_receive(socket, self.message['alreadyact'], 'p')
else:
activation_code = send_receive(socket, self.message['actcode'], recvsize=8)
if passhash == user_passhash and activation_code == user_actcode:
queues['activation'][0].put(username)
send_receive(socket, self.message['act_success'], 'p')
else:
send_receive(socket, self.message['invalid_act'], 'p')
def register(self):
try:
socket = self.request
passed = False
useremail, password, username = ('', '', '')
paramchecks = {}
while not passed:
if len(paramchecks):
estring = err_str(paramchecks, ['Username', 'Password', 'Email'])
send_receive(socket, estring, 'p', 1)
del estring
username = send_receive(socket, self.message['regusername'], recvsize=16)
password = send_receive(socket, self.message['regpassword'], recvsize=32)
useremail = send_receive(socket, self.message['regemail'], recvsize=64)
paramchecks = check_details(username, password, useremail)
passhash = pyhash.Sha384(password).hexdigest
del password
if type(paramchecks) == bool:
passed = True
del paramchecks, passed
ehash = pyhash.Sha384(useremail.lower()).hexdigest
activation_code = pyhash.Md5(pyrand.randstring(16)).hexdigest[::4]
queues['register'][0].put((username, (0, activation_code, passhash, ehash)))
emessage = 'Dear {0}, Thank you for registering your account with pyTCG! Your activation code is:\n{1}\n'.format(username, activation_code)
email_params = (useremail, emessage, 'pyTCG activation code', email, emailpass, smtpaddr, False)
queues['email'][0].put(email_params)
del username, activation_code, passhash, ehash,
send_receive(socket, self.message['registered'], 'p', 1)
except Exception as e:
print(e)
class LoginContainer(Thread):
def __init__(self):
Thread.__init__(self)
def add_sess(self, sessname):
self[sessname] = pyhash.Md5(sessname+pyrand.randstring(2))[:16]
def del_sess(self, sessname):
del self[sessname]
class Session:
def __init__(self, socket):
self.socket = socket
class QueueWorker(Thread):
def __init__(self, params):
Thread.__init__(self)
self.queue, self.funct = params
def run(self):
while True:
try:
if not self.queue.empty():
parts = self.queue.get()
self.funct(parts)
except:
self.queue.put(parts)
def send_receive(socket, sendmsg, stype='i', recvsize=1):
# Sends encoded data + command, returns decoded receive data
# p, 0x00 = no input
# i, 0x01 = input
commands = {'p': 0x00, 'i': 0x01}
send_data = str(commands[stype])+sendmsg
socket.send(send_data.encode())
if stype == 'i':
recv_data = socket.recv(64).decode()[:recvsize]
return recv_data
socket.recv(64)[:1]
def err_str(errdict, paramorder=()):
estring = ''
for param in paramorder if len(paramorder) else errdict.keys():
if len(errdict[param]):
estring += '\n'+param+': '
for error in errdict[param]:
estring += error+', '
return estring[:-2]+'\n'
def check_details(username=None, password=None, email=None):
faults = {'Username': [], 'Password': [], 'Email': []}
full_pass = True
if password:
passwordc = regrules.check_password(password)
del password
if len(passwordc):
full_pass = False
faults['Password'].extend(passwordc)
if username:
usernamec = regrules.check_username(username)
if len(usernamec):
full_pass = False
faults['Username'].extend(usernamec)
if username.lower() in read_usernames():
full_pass = False
faults['Username'].append('username taken')
if email:
emailc = regrules.check_email(email)
del email
if type(emailc) != bool:
full_pass = False
faults['Email'].append(emailc)
if full_pass:
return True
return faults
def read_usernames(userdir='users'):
return [username[:-4] for username in walk(userdir).__next__()[2]]
def write_user(details, userdir='users/'):
username, details = details
username += '.usr'
with open(userdir+username.lower(), 'w') as ufile:
for detail in details:
ufile.write(str(detail)+'\n')
return True
def read_user(username, userdir='users/'):
username += '.usr'
with open(userdir+username.lower(), 'r') as ufile:
details = tuple([detail.strip() for detail in ufile.readlines()])
return details
def is_activated(username, userdir='users/'):
if read_user(username, userdir)[0]:
return True
return False
def activate_user(username, userdir='users/'):
user_details = list(read_user(username, userdir))
user_details[0] = 1
write_user((username, user_details), userdir)
return True
incloginlimit = 5
inclogintimeout = 600
email, emailpass, smtpaddr = emailinfo.info
HOST = ''
PORT = 1337
queues = {
'register': [Queue(qtype='l'), write_user],
'activation': [Queue(qtype='l'), activate_user],
'email': [Queue(qtype='l'), pyemail.send_email],
}
meow = True
workers = {queue: QueueWorker(queues[queue]) for queue in queues}
if __name__ == "__main__":
server = SimpleServer((HOST, PORT), UserHandler)
try:
for queue in queues:
workers[queue].start()
server.serve_forever()
except KeyboardInterrupt:
sys.exit(0)
except Exception as e:
print(e)
sys.exit(0)
| |
"""
pyshtools subpackage that includes all Python wrapped Fortran routines.
"""
from __future__ import absolute_import as _absolute_import
from __future__ import division as _division
from __future__ import print_function as _print_function
import os as _os
import numpy as _np
# ---- Import all wrapped SHTOOLS functions
# legendre
from .._SHTOOLS import PlmBar
from .._SHTOOLS import PlmBar_d1
from .._SHTOOLS import PlBar
from .._SHTOOLS import PlBar_d1
from .._SHTOOLS import PlmON
from .._SHTOOLS import PlmON_d1
from .._SHTOOLS import PlON
from .._SHTOOLS import PlON_d1
from .._SHTOOLS import PlmSchmidt
from .._SHTOOLS import PlmSchmidt_d1
from .._SHTOOLS import PlSchmidt
from .._SHTOOLS import PlSchmidt_d1
from .._SHTOOLS import PLegendreA
from .._SHTOOLS import PLegendreA_d1
from .._SHTOOLS import PLegendre
from .._SHTOOLS import PLegendre_d1
# expand
from .._SHTOOLS import SHExpandDH
from .._SHTOOLS import MakeGridDH
from .._SHTOOLS import SHExpandDHC
from .._SHTOOLS import MakeGridDHC
from .._SHTOOLS import SHGLQ
from .._SHTOOLS import SHExpandGLQ
from .._SHTOOLS import MakeGridGLQ
from .._SHTOOLS import SHExpandGLQC
from .._SHTOOLS import MakeGridGLQC
from .._SHTOOLS import GLQGridCoord
from .._SHTOOLS import SHExpandLSQ
from .._SHTOOLS import MakeGrid2D
from .._SHTOOLS import MakeGridPoint
from .._SHTOOLS import MakeGridPointC
from .._SHTOOLS import SHMultiply
# shio
from .._SHTOOLS import SHRead
from .._SHTOOLS import SHReadH
from .._SHTOOLS import SHReadError
from .._SHTOOLS import SHReadErrorH
from .._SHTOOLS import SHRead2
from .._SHTOOLS import SHRead2Error
from .._SHTOOLS import SHReadJPL
from .._SHTOOLS import SHReadJPLError
from .._SHTOOLS import SHCilmToCindex
from .._SHTOOLS import SHCindexToCilm
from .._SHTOOLS import SHCilmToVector
from .._SHTOOLS import SHVectorToCilm
from .._SHTOOLS import SHrtoc
from .._SHTOOLS import SHctor
# spectralanalysis
from .._SHTOOLS import SHAdmitCorr
from .._SHTOOLS import SHConfidence
from .._SHTOOLS import SHMultiTaperSE
from .._SHTOOLS import SHMultiTaperCSE
from .._SHTOOLS import SHLocalizedAdmitCorr
from .._SHTOOLS import SHReturnTapers
from .._SHTOOLS import SHReturnTapersM
from .._SHTOOLS import ComputeDm
from .._SHTOOLS import ComputeDG82
from .._SHTOOLS import SHFindLWin
from .._SHTOOLS import SHBiasK
from .._SHTOOLS import SHMTCouplingMatrix
from .._SHTOOLS import SHBiasAdmitCorr
from .._SHTOOLS import SHMTDebias
from .._SHTOOLS import SHMTVarOpt
from .._SHTOOLS import SHSjkPG
from .._SHTOOLS import SHMultiTaperMaskSE
from .._SHTOOLS import SHMultiTaperMaskCSE
from .._SHTOOLS import SHReturnTapersMap
from .._SHTOOLS import SHBiasKMask
from .._SHTOOLS import ComputeDMap
from .._SHTOOLS import Curve2Mask
from .._SHTOOLS import SHBias
from .._SHTOOLS import SphericalCapCoef
# rotate
from .._SHTOOLS import djpi2
from .._SHTOOLS import SHRotateCoef
from .._SHTOOLS import SHRotateRealCoef
# gravmag
from .._SHTOOLS import MakeGravGridDH
from .._SHTOOLS import MakeGravGradGridDH
from .._SHTOOLS import MakeGeoidGridDH
from .._SHTOOLS import CilmPlusDH
from .._SHTOOLS import CilmMinusDH
from .._SHTOOLS import CilmPlusRhoHDH
from .._SHTOOLS import CilmMinusRhoHDH
from .._SHTOOLS import BAtoHilmDH
from .._SHTOOLS import BAtoHilmRhoHDH
from .._SHTOOLS import DownContFilterMA
from .._SHTOOLS import DownContFilterMC
from .._SHTOOLS import NormalGravity
from .._SHTOOLS import MakeMagGridDH
from .._SHTOOLS import SHMagPowerSpectrum
from .._SHTOOLS import SHMagPowerL
# utils
from .._SHTOOLS import MakeCircleCoord
from .._SHTOOLS import MakeEllipseCoord
from .._SHTOOLS import Wigner3j
from .._SHTOOLS import DHaj
__all__ = ['PlmBar', 'PlmBar_d1', 'PlBar', 'PlBar_d1', 'PlmON', 'PlmON_d1',
'PlON', 'PlON_d1', 'PlmSchmidt', 'PlmSchmidt_d1', 'PlSchmidt',
'PlSchmidt_d1', 'PLegendreA', 'PLegendreA_d1', 'PLegendre',
'PLegendre_d1', 'SHExpandDH', 'MakeGridDH', 'SHExpandDHC',
'MakeGridDHC', 'SHGLQ', 'SHExpandGLQ', 'MakeGridGLQ',
'SHExpandGLQC', 'MakeGridGLQC', 'GLQGridCoord', 'SHExpandLSQ',
'MakeGrid2D', 'MakeGridPoint', 'MakeGridPointC', 'SHMultiply',
'SHRead', 'SHReadH', 'SHReadError', 'SHReadErrorH', 'SHRead2',
'SHRead2Error', 'SHReadJPL', 'SHReadJPLError', 'SHCilmToVector',
'SHVectorToCilm', 'SHCilmToCindex', 'SHCindexToCilm', 'SHrtoc',
'SHctor', 'SHAdmitCorr', 'SHConfidence', 'SHMultiTaperSE',
'SHMultiTaperCSE', 'SHLocalizedAdmitCorr', 'SHReturnTapers',
'SHReturnTapersM', 'ComputeDm', 'ComputeDG82', 'SHFindLWin',
'SHBiasK', 'SHMTCouplingMatrix', 'SHBiasAdmitCorr', 'SHMTDebias',
'SHMTVarOpt', 'SHSjkPG', 'SHMultiTaperMaskSE',
'SHMultiTaperMaskCSE', 'SHReturnTapersMap', 'SHBiasKMask',
'ComputeDMap', 'Curve2Mask', 'SHBias', 'SphericalCapCoef',
'djpi2', 'SHRotateCoef', 'SHRotateRealCoef',
'MakeGravGridDH', 'MakeGravGradGridDH', 'MakeGeoidGridDH',
'CilmPlusDH', 'CilmMinusDH', 'CilmPlusRhoHDH', 'CilmMinusRhoHDH',
'BAtoHilmDH', 'BAtoHilmRhoHDH', 'DownContFilterMA',
'DownContFilterMC', 'NormalGravity', 'MakeMagGridDH',
'SHMagPowerSpectrum', 'SHMagPowerL',
'MakeCircleCoord', 'MakeEllipseCoord', 'Wigner3j', 'DHaj']
_fortran_functions = ['MakeGridPoint', 'MakeGridPointC', 'DownContFilterMA',
'DownContFilterMC', 'SHFindLWin', 'SHSjkPG',
'NormalGravity', 'SHConfidence', 'SHMagPowerL']
_fortran_subroutines = list(set(__all__) - set(_fortran_functions))
# ---------------------------------------------------------------------
# ---- Fill the module doc strings with documentation from external
# ---- files. The doc files are generated during intitial compilation of
# ---- pyshtools from md formatted text files.
# ---------------------------------------------------------------------
_pydocfolder = _os.path.abspath(_os.path.join(
_os.path.split(_os.path.dirname(__file__))[0], 'doc'))
for _name in __all__:
try:
_path = _os.path.join(_pydocfolder, _name.lower() + '.doc')
with open(_path) as _pydocfile:
_pydoc = _pydocfile.read()
setattr(locals()[_name], '__doc__', _pydoc)
except IOError as msg:
print(msg)
# ---- Check the exit status of Fortran routines, raise exceptions, and
# ---- strip exitstatus from the Python return values.
class SHToolsError(Exception):
pass
def _shtools_status_message(status):
'''
Determine error message to print when a SHTOOLS Fortran 95 routine exits
improperly.
'''
if (status == 1):
errmsg = 'Improper dimensions of input array.'
elif (status == 2):
errmsg = 'Improper bounds for input variable.'
elif (status == 3):
errmsg = 'Error allocating memory.'
elif (status == 4):
errmsg = 'File IO error.'
else:
errmsg = 'Unhandled Fortran 95 error.'
return errmsg
def _raise_errors(func):
def wrapped_func(*args, **kwargs):
returned_values = func(*args, **kwargs)
if returned_values[0] != 0:
raise SHToolsError(_shtools_status_message(returned_values[0]))
elif len(returned_values) == 2:
return returned_values[1]
else:
return returned_values[1:]
wrapped_func.__doc__ = func.__doc__
return wrapped_func
for _func in _fortran_subroutines:
locals()[_func] = _raise_errors(locals()[_func])
| |
# -*- coding: utf-8 -*-
"""
edacc.forms
-----------
Various WTForms used by the web frontend.
:copyright: (c) 2010 by Daniel Diepold.
:license: MIT, see LICENSE for details.
"""
from wtforms import Form, TextField, PasswordField, TextAreaField, RadioField, DecimalField, FloatField
from wtforms import FileField, SelectField, IntegerField
from wtforms import ValidationError, BooleanField, validators
from wtforms.validators import Required, Length, Email, EqualTo
from wtforms.ext.sqlalchemy.fields import QuerySelectMultipleField, \
QuerySelectField
from edacc import constants
ERROR_REQUIRED = 'This field is required.'
MAX_SC_LEN = 100 # maximum length of solver config names to display before truncating
def truncate_name(s, l):
if len(s) > l:
return s[:l / 2] + " [..] " + s[-l / 2:]
return s
class EmptyQuery(list):
""" Helper class that extends the builtin list class to always evaluate to
True.
WTForms tries to iterate over field.query or field.query_factory(). But
when field.query an empty list and evaluates to False, field.query_factory
returns None and causes an exception. """
def __nonzero__(self):
""" for Python 2.x """
return True
def __bool__(self):
""" for Python 3.x """
return True
class RegistrationForm(Form):
lastname = TextField('Last Name',
[Required(ERROR_REQUIRED),
Length(max=255)])
firstname = TextField('Given Name',
[Required(ERROR_REQUIRED),
Length(max=255)])
email = TextField('e-mail',
[Required(ERROR_REQUIRED),
Length(max=255),
Email(message='Invalid e-mail address.')])
password = PasswordField('Password',
[Required()])
password_confirm = PasswordField('Confirm Password',
[EqualTo('password',
message='Passwords must match.')])
address = TextAreaField('Postal Address')
affiliation = TextAreaField('Affiliation')
affiliation_type = SelectField('Type of affiliation', [Required()],
choices=[('company', 'Company'), ('public_institution', 'Public institution')],
default='public_institution')
country = SelectField('Country', [Required()], choices=sorted(constants.COUNTRIES, key=lambda x: x[1]))
accepted_terms = BooleanField('I have read, understood and accepted the terms and conditions.',
[Required(ERROR_REQUIRED)])
captcha = TextField()
class LoginForm(Form):
email = TextField('e-mail', [Required(ERROR_REQUIRED)])
password = PasswordField('Password',
[Required(ERROR_REQUIRED)])
permanent_login = BooleanField('Remember me')
class ChangePasswordForm(Form):
password = PasswordField('Password',
[Required()])
password_confirm = PasswordField('Confirm Password',
[EqualTo('password',
message='Passwords must match.')])
class ResetPasswordForm(Form):
email = TextField('e-mail', [Required(ERROR_REQUIRED)])
class SolverForm(Form):
name = TextField('Name', [Required(ERROR_REQUIRED)])
binary = FileField('Binary')
code = FileField('Code')
description = TextAreaField('Description')
description_pdf = FileField('Description (PDF)')
version = TextField('Version', [Required(ERROR_REQUIRED)])
run_path = TextField('Binary name')
run_command = TextField('Run command')
authors = TextField('Authors', [Required(ERROR_REQUIRED)])
parameters = TextField('Parameters', [Required(ERROR_REQUIRED)])
competition_categories = QuerySelectMultipleField(
'Competition Categories',
query_factory=lambda: [],
validators=[Required('Please choose one or more \
categories for your solver \
to compete in.')])
def validate_parameters(self, field):
if not 'INSTANCE' in field.data:
raise ValidationError('You have to specify INSTANCE as a parameter.')
def validate_code(self, field):
if field.data and not field.file.filename.endswith('.zip'):
raise ValidationError('The code archive has to be a .zip file.')
def validate_description_pdf(self, field):
if field.data and not field.file.filename.endswith('.pdf'):
raise ValidationError('Please provide a .pdf file.')
class UpdateDescriptionForm(Form):
description_pdf = FileField('Description (PDF)')
def validate_description_pdf(self, field):
if field.data and not field.file.filename.endswith('.pdf'):
raise ValidationError('Please provide a .pdf file.')
class BenchmarkForm(Form):
instance = FileField('File')
name = TextField('Name')
new_benchmark_type = TextField('New Type')
benchmark_type = QuerySelectField('Existing Type', allow_blank=True,
query_factory=lambda: [],
blank_text='Create a new type')
new_source_class = TextField('New Source Class')
new_source_class_description = TextField('New Source Class Description')
source_class = QuerySelectField('Exisiting Source Class', allow_blank=True,
query_factory=lambda: [],
blank_text='Create a new source class')
def validate_new_benchmark_type(self, field):
if self.benchmark_type.data is None and field.data.strip() == '':
raise ValidationError('Please specify a new benchmark type or choose \
an existing one.')
def validate_new_source_class(self, field):
if self.source_class.data is None and field.data.strip() == '':
raise ValidationError('Please specify a new source class or choose \
an existing one.')
def validate_instance(self, field):
if not field.file.filename:
raise ValidationError(ERROR_REQUIRED)
class BenchmarksForm(Form):
#benchmarks = FileField('File')
category = SelectField('Category', [Required(ERROR_REQUIRED)],
choices=[('random', 'Random SAT+UNSAT'), ('random_sat', 'Random SAT'),
('random_unsat', 'Random UNSAT'),
('application', 'Application SAT+UNSAT'), ('application_sat', 'Application SAT'),
('application_unsat', 'Application UNSAT'),
('combinatorial', 'Hard Combinatorial SAT+UNSAT'),
('combinatorial_sat', 'Hard Combinatorial SAT'),
('combinatorial_unsat', 'Hard Combinatorial UNSAT')],
default='random')
#def validate_benchmarks(self, field):
# if not field.file.filename:
# raise ValidationError(ERROR_REQUIRED)
# filename = field.file.filename
# if not (filename.endswith('.zip') or filename.endswith('.7z') or filename.endswith('.tar.gz') \
# or filename.endswith('.tar.bz2') or filename.endswith('.rar')):
# raise ValidationError("Please submit one of the supported archive types.")
class ResultBySolverForm(Form):
solver_config = QuerySelectField('Solver Configuration', get_label=lambda sc: truncate_name(sc.name, MAX_SC_LEN))
cost = SelectField('Cost', choices=[('resultTime', 'CPU Time'), ('wallTime', 'Walltime'), ('cost', 'Cost')])
class ResultByInstanceForm(Form):
instance = QuerySelectField('Instance', get_pk=lambda i: i.idInstance)
cost = SelectField('Cost', choices=[('resultTime', 'CPU Time'), ('wallTime', 'Walltime'), ('cost', 'Cost')])
class TwoSolversOnePropertyScatterPlotForm(Form):
solver_config1 = QuerySelectField('First Solver Configuration',
get_label=lambda sc: truncate_name(sc.name, MAX_SC_LEN))
solver_config2 = QuerySelectField('Second Solver Configuration',
get_label=lambda sc: truncate_name(sc.name, MAX_SC_LEN))
instance_filter = TextField('Filter Instances')
result_property = SelectField('Property')
i = QuerySelectMultipleField('Instances', get_pk=lambda i: i.idInstance, allow_blank=True)
xscale = RadioField('X-axis scale', choices=[('', 'linear'), ('log', 'log')], default='log')
yscale = RadioField('Y-axis scale', choices=[('', 'linear'), ('log', 'log')], default='log')
run = SelectField('Plot for run')
class OneSolverTwoResultPropertiesPlotForm(Form):
solver_config = QuerySelectField('Solver Configuration', get_label=lambda sc: truncate_name(sc.name, MAX_SC_LEN))
result_property1 = SelectField('First Result Property')
result_property2 = SelectField('Second Result Property')
instance_filter = TextField('Filter Instances')
i = QuerySelectMultipleField('Instances', get_pk=lambda i: i.idInstance, allow_blank=True)
xscale = RadioField('X-axis scale', choices=[('', 'linear'), ('log', 'log')], default='log')
yscale = RadioField('Y-axis scale', choices=[('', 'linear'), ('log', 'log')], default='log')
run = SelectField('Plot for run')
class OneSolverInstanceAgainstResultPropertyPlotForm(Form):
solver_config = QuerySelectField('Solver Configuration', get_label=lambda sc: truncate_name(sc.name, MAX_SC_LEN))
result_property = SelectField('Result Property')
instance_property = SelectField('Instance Property')
instance_filter = TextField('Filter Instances')
i = QuerySelectMultipleField('Instances', get_pk=lambda i: i.idInstance, allow_blank=True)
xscale = RadioField('X-axis scale', choices=[('', 'linear'), ('log', 'log')], default='log')
yscale = RadioField('Y-axis scale', choices=[('', 'linear'), ('log', 'log')], default='log')
run = SelectField('Plot for run')
class CactusPlotForm(Form):
result_property = SelectField('Property')
sc = QuerySelectMultipleField('Solver Configurations')
instance_filter = TextField('Filter Instances')
run = SelectField('Plot for run')
flip_axes = BooleanField("Swap axes", default=True)
log_property = BooleanField("Logarithmic property-axis", default=True)
i = QuerySelectMultipleField('Instances (Group 0)', get_pk=lambda i: i.idInstance, allow_blank=True)
class RTDComparisonForm(Form):
solver_config1 = QuerySelectField('First Solver Configuration',
get_label=lambda sc: truncate_name(sc.name, MAX_SC_LEN))
solver_config2 = QuerySelectField('Second Solver Configuration',
get_label=lambda sc: truncate_name(sc.name, MAX_SC_LEN))
result_property = SelectField('Property')
log_property = BooleanField("Logarithmic property-axis", default=True)
i = QuerySelectMultipleField('Instances', get_pk=lambda i: i.idInstance, allow_blank=True)
instance_filter = TextField('Filter Instances')
class RTDPlotsForm(Form):
sc = QuerySelectMultipleField('Solver Configurations', get_label=lambda sc: truncate_name(sc.name, MAX_SC_LEN))
result_property = SelectField('Property')
log_property = BooleanField("Logarithmic property-axis", default=True)
instance = QuerySelectField('Instance', get_pk=lambda i: i.idInstance, allow_blank=True)
instance_filter = TextField('Filter Instances')
class RTDPlotForm(Form):
#solver_config = QuerySelectField('Solver Configuration', get_label=lambda sc: truncate_name(sc.name, MAX_SC_LEN))
sc = QuerySelectMultipleField('Solver Configurations', get_label=lambda sc: truncate_name(sc.name, MAX_SC_LEN))
result_property = SelectField('Property')
log_property = BooleanField("Logarithmic property-axis", default=True)
restart_strategy = BooleanField(
u"Show restart strategy (t_rs, green=original mean, blue=mean with restarts, red=restart at)")
i = QuerySelectMultipleField('Instances', get_pk=lambda i: i.idInstance, allow_blank=True)
instance_filter = TextField('Filter Instances')
class ProbabilisticDominationForm(Form):
result_property = SelectField('Property')
solver_config1 = QuerySelectField('First Solver Configuration',
get_label=lambda sc: truncate_name(sc.name, MAX_SC_LEN))
solver_config2 = QuerySelectField('Second Solver Configuration',
get_label=lambda sc: truncate_name(sc.name, MAX_SC_LEN))
instance_filter = TextField('Filter Instances')
i = QuerySelectMultipleField('Instances', get_pk=lambda i: i.idInstance, allow_blank=True)
class BoxPlotForm(Form):
solver_configs = QuerySelectMultipleField('Solver Configurations',
get_label=lambda sc: truncate_name(sc.name, MAX_SC_LEN))
result_property = SelectField('Property')
instances = QuerySelectMultipleField('Instances')
instance_filter = TextField('Filter Instances')
i = QuerySelectMultipleField('Instances', get_pk=lambda i: i.idInstance, allow_blank=True)
class RankingForm(Form):
i = QuerySelectMultipleField('Instances', get_label=lambda i: i.get_name(), get_pk=lambda i: i.idInstance,
allow_blank=True)
sc = QuerySelectMultipleField('Solver Configurations', get_label=lambda sc: truncate_name(sc.name, MAX_SC_LEN))
calculate_average_dev = BooleanField('Calculate dispersion measures', default=False)
penalized_average_runtime = BooleanField('Calculate penalized average cost', default=False)
median_runtime = BooleanField('Calculate penalized median cost', default=False)
par_factor = IntegerField('Penalty factor', default=1)
fixed_limit = FloatField('Fixed limit')
careful_ranking = BooleanField("Calculate careful ranking", default=False)
careful_ranking_noise = FloatField("Noise", default=1.0, validators=[validators.required()])
survnoise = FloatField("Noise", default=0.0, validators=[validators.required()])
survival_ranking_alpha = FloatField("alpha", default=0.05, validators=[validators.required()])
survival_ranking = BooleanField("Calculate survival ranking", default=False)
break_careful_ties = BooleanField('Break careful ranking ties', default=False)
instance_filter = TextField('Filter Instances')
cost = SelectField('Cost', choices=[('resultTime', 'CPU Time'), ('wallTime', 'Walltime'), ('cost', 'Cost')])
property_limit = FloatField("Property limit (for result properties)", default=1.0,
validators=[validators.required()])
show_top = IntegerField("Maximum number of configurations displayed", default=1000)
class SOTAForm(Form):
sc = QuerySelectMultipleField('Solver Configurations', get_label=lambda sc: truncate_name(sc.name, MAX_SC_LEN))
i = QuerySelectMultipleField('Instances', get_label=lambda i: i.get_name(), get_pk=lambda i: i.idInstance,
allow_blank=True)
cost = SelectField('Cost', choices=[('resultTime', 'CPU Time'), ('wallTime', 'Walltime'), ('cost', 'Cost')])
instance_filter = TextField('Filter Instances')
class ResultsBySolverAndInstanceForm(Form):
solver_configs = QuerySelectMultipleField('Solver Configurations',
get_label=lambda sc: truncate_name(sc.name, MAX_SC_LEN))
cost = SelectField('Cost', choices=[('resultTime', 'CPU Time'), ('wallTime', 'Walltime'), ('cost', 'Cost')])
display_measure = SelectField('Display measure', default='par10',
choices=[('mean', 'mean'), ('median', 'median'),
('par10', 'par10'), ('min', 'min'), ('max', 'max'), ('par1', 'par1')])
i = QuerySelectMultipleField('Instances', get_pk=lambda i: i.idInstance, allow_blank=True)
instance_filter = TextField('Filter Instances')
calculate_dispersion = BooleanField('Calculate dispersion measures', default=False)
class RuntimeMatrixPlotForm(Form):
measure = SelectField('Measure', default='par10',
choices=[('mean', 'mean'),
('par10', 'par10'), ('min', 'min'), ('max', 'max')])
result_property = SelectField('Result property',
choices=[('resultTime', 'CPU Time'), ('wallTime', 'Walltime'), ('cost', 'Cost')])
class ParameterPlot2DForm(Form):
parameter1 = SelectField('First parameter')
parameter2 = SelectField('Second parameter')
log_x = BooleanField("Logarithmic x-axis")
log_y = BooleanField("Logarithmic y-axis")
log_cost = BooleanField("Logarithmic cost")
measure = SelectField('Measure', default='par10',
choices=[('mean', 'mean'), ('median', 'median'),
('par10', 'par10'), ('min', 'min'), ('max', 'max')])
i = QuerySelectMultipleField('Instances', get_pk=lambda i: i.idInstance, allow_blank=True)
instance_filter = TextField('Filter Instances')
surface_interpolation = BooleanField("Interpolate surface", default=True)
class ParameterPlot1DForm(Form):
parameter = SelectField('First parameter')
log_x = BooleanField("Logarithmic parameter-axis")
log_y = BooleanField("Logarithmic cost-axis")
measure = SelectField('Measure', default='par10',
choices=[('mean', 'mean'), ('median', 'median'),
('par10', 'par10'), ('min', 'min'), ('max', 'max')])
i = QuerySelectMultipleField('Instances', get_pk=lambda i: i.idInstance, allow_blank=True)
instance_filter = TextField('Filter Instances')
class MonitorForm(Form):
experiments = QuerySelectMultipleField('Experiments', get_label=lambda e: e.name)
status = QuerySelectMultipleField('Status', get_label=lambda e: e.description)
class ClientForm(Form):
experiments = QuerySelectMultipleField('Experiments', get_label=lambda e: e.name)
| |
"""
sourcemap.decoder
~~~~~~~~~~~~~~~~~
Includes source from:
https://github.com/martine/python-sourcemap
Original source under Apache license, see:
https://github.com/martine/python-sourcemap/blob/master/COPYING
:copyright: (c) 2013 by Matt Robenolt
:license: BSD, see LICENSE for more details.
"""
import os
import sys
from functools import partial
from .exceptions import SourceMapDecodeError
from .objects import Token, SourceMapIndex
try:
import simplejson as json
except ImportError:
import json # NOQA
__all__ = ('SourceMapDecoder',)
if sys.version_info[0] == 2:
from itertools import imap as map
text_type = unicode
else:
text_type = str
class SourceMapDecoder(object):
def parse_vlq(self, segment):
"""
Parse a string of VLQ-encoded data.
Returns:
a list of integers.
"""
values = []
cur, shift = 0, 0
for c in segment:
val = B64[ord(c)]
# Each character is 6 bits:
# 5 of value and the high bit is the continuation.
val, cont = val & 0b11111, val >> 5
cur += val << shift
shift += 5
if not cont:
# The low bit of the unpacked value is the sign.
cur, sign = cur >> 1, cur & 1
if sign:
cur = -cur
values.append(cur)
cur, shift = 0, 0
if cur or shift:
raise SourceMapDecodeError('leftover cur/shift in vlq decode')
return values
def decode(self, source):
"""Decode a source map object into a SourceMapIndex.
The index is keyed on (dst_line, dst_column) for lookups,
and a per row index is kept to help calculate which Token to retrieve.
For example:
A minified source file has two rows and two tokens per row.
# All parsed tokens
tokens = [
Token(dst_row=0, dst_col=0),
Token(dst_row=0, dst_col=5),
Token(dst_row=1, dst_col=0),
Token(dst_row=1, dst_col=12),
]
Two dimentional array of columns -> row
rows = [
[0, 5],
[0, 12],
]
Token lookup, based on location
index = {
(0, 0): tokens[0],
(0, 5): tokens[1],
(1, 0): tokens[2],
(1, 12): tokens[3],
}
To find the token at (1, 20):
- Check if there's a direct hit on the index (1, 20) => False
- Pull rows[1] => [0, 12]
- bisect_right to find the closest match:
bisect_right([0, 12], 20) => 2
- Fetch the column number before, since we want the column
lte to the bisect_right: 2-1 => row[2-1] => 12
- At this point, we know the token location, (1, 12)
- Pull (1, 12) from index => tokens[3]
"""
# According to spec (https://docs.google.com/document/d/1U1RGAehQwRypUTovF1KRlpiOFze0b-_2gc6fAH0KY0k/edit#heading=h.h7yy76c5il9v)
# A SouceMap may be prepended with ")]}'" to cause a Javascript error.
# If the file starts with that string, ignore the entire first line.
if source[:4] == ")]}'" or source[:3] == ")]}":
source = source.split('\n', 1)[1]
smap = json.loads(source)
sources = smap['sources']
sourceRoot = smap.get('sourceRoot')
names = list(map(text_type, smap['names']))
mappings = smap['mappings']
lines = mappings.split(';')
if sourceRoot is not None:
sources = list(map(partial(os.path.join, sourceRoot), sources))
# List of all tokens
tokens = []
# line_index is used to identify the closest column when looking up a token
line_index = []
# Main index of all tokens
# The index is keyed on (line, column)
index = {}
dst_col, src_id, src_line, src_col, name_id = 0, 0, 0, 0, 0
for dst_line, line in enumerate(lines):
# Create list for columns in index
line_index.append([])
segments = line.split(',')
dst_col = 0
for segment in segments:
if not segment:
continue
parse = self.parse_vlq(segment)
dst_col += parse[0]
src = None
name = None
if len(parse) > 1:
try:
src_id += parse[1]
if not 0 <= src_id < len(sources):
raise SourceMapDecodeError(
"Segment %s references source %d; there are "
"%d sources" % (segment, src_id, len(sources))
)
src = sources[src_id]
src_line += parse[2]
src_col += parse[3]
if len(parse) > 4:
name_id += parse[4]
if not 0 <= name_id < len(names):
raise SourceMapDecodeError(
"Segment %s references name %d; there are "
"%d names" % (segment, name_id, len(names))
)
name = names[name_id]
except IndexError:
raise SourceMapDecodeError(
"Invalid segment %s, parsed as %r"
% (segment, parse)
)
try:
assert dst_line >= 0, ('dst_line', dst_line)
assert dst_col >= 0, ('dst_col', dst_col)
assert src_line >= 0, ('src_line', src_line)
assert src_col >= 0, ('src_col', src_col)
except AssertionError as e:
raise SourceMapDecodeError(
"Segment %s has negative %s (%d), in file %s"
% (segment, e.message[0], e.message[1], src)
)
token = Token(dst_line, dst_col, src, src_line, src_col, name)
tokens.append(token)
# Insert into main index
index[(dst_line, dst_col)] = token
# Insert into specific line index
line_index[dst_line].append(dst_col)
return SourceMapIndex(smap, tokens, line_index, index, sources)
# Mapping of base64 letter -> integer value.
# This weird list is being allocated for faster lookups
B64 = [-1] * 123
for i, c in enumerate('ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'):
B64[ord(c)] = i
| |
from distutils.command.build_ext import build_ext as _du_build_ext
try:
# Attempt to use Pyrex for building extensions, if available
from Pyrex.Distutils.build_ext import build_ext as _build_ext
except ImportError:
_build_ext = _du_build_ext
import os, sys
from distutils.file_util import copy_file
from setuptools.extension import Library
from distutils.ccompiler import new_compiler
from distutils.sysconfig import customize_compiler, get_config_var
get_config_var("LDSHARED") # make sure _config_vars is initialized
from distutils.sysconfig import _config_vars
from distutils import log
from distutils.errors import *
have_rtld = False
use_stubs = False
libtype = 'shared'
if sys.platform == "darwin":
use_stubs = True
elif os.name != 'nt':
try:
from dl import RTLD_NOW
have_rtld = True
use_stubs = True
except ImportError:
pass
def if_dl(s):
if have_rtld:
return s
return ''
class build_ext(_build_ext):
def run(self):
"""Build extensions in build directory, then copy if --inplace"""
old_inplace, self.inplace = self.inplace, 0
_build_ext.run(self)
self.inplace = old_inplace
if old_inplace:
self.copy_extensions_to_source()
def copy_extensions_to_source(self):
build_py = self.get_finalized_command('build_py')
for ext in self.extensions:
fullname = self.get_ext_fullname(ext.name)
filename = self.get_ext_filename(fullname)
modpath = fullname.split('.')
package = '.'.join(modpath[:-1])
package_dir = build_py.get_package_dir(package)
dest_filename = os.path.join(package_dir,os.path.basename(filename))
src_filename = os.path.join(self.build_lib,filename)
# Always copy, even if source is older than destination, to ensure
# that the right extensions for the current Python/platform are
# used.
copy_file(
src_filename, dest_filename, verbose=self.verbose,
dry_run=self.dry_run
)
if ext._needs_stub:
self.write_stub(package_dir or os.curdir, ext, True)
if _build_ext is not _du_build_ext and \
not hasattr(_build_ext,'pyrex_sources'):
# Workaround for problems using some Pyrex versions w/SWIG and/or 2.4
def swig_sources(self, sources, *otherargs):
# first do any Pyrex processing
sources = _build_ext.swig_sources(self, sources) or sources
# Then do any actual SWIG stuff on the remainder
return _du_build_ext.swig_sources(self, sources, *otherargs)
def get_ext_filename(self, fullname):
filename = _build_ext.get_ext_filename(self,fullname)
ext = self.ext_map[fullname]
if isinstance(ext,Library):
fn, ext = os.path.splitext(filename)
return self.shlib_compiler.library_filename(fn,libtype)
elif use_stubs and ext._links_to_dynamic:
d,fn = os.path.split(filename)
return os.path.join(d,'dl-'+fn)
else:
return filename
def initialize_options(self):
_build_ext.initialize_options(self)
self.shlib_compiler = None
self.shlibs = []
self.ext_map = {}
def finalize_options(self):
_build_ext.finalize_options(self)
self.extensions = self.extensions or []
self.check_extensions_list(self.extensions)
self.shlibs = [ext for ext in self.extensions
if isinstance(ext,Library)]
if self.shlibs:
self.setup_shlib_compiler()
for ext in self.extensions:
ext._full_name = self.get_ext_fullname(ext.name)
for ext in self.extensions:
fullname = ext._full_name
self.ext_map[fullname] = ext
ltd = ext._links_to_dynamic = \
self.shlibs and self.links_to_dynamic(ext) or False
ext._needs_stub = ltd and use_stubs and not isinstance(ext,Library)
filename = ext._file_name = self.get_ext_filename(fullname)
libdir = os.path.dirname(os.path.join(self.build_lib,filename))
if ltd and libdir not in ext.library_dirs:
ext.library_dirs.append(libdir)
if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs:
ext.runtime_library_dirs.append(os.curdir)
def setup_shlib_compiler(self):
compiler = self.shlib_compiler = new_compiler(
compiler=self.compiler, dry_run=self.dry_run, force=self.force
)
if sys.platform == "darwin":
tmp = _config_vars.copy()
try:
# XXX Help! I don't have any idea whether these are right...
_config_vars['LDSHARED'] = \
"gcc -Wl,-x -dynamiclib -undefined dynamic_lookup"
_config_vars['CCSHARED'] = " -dynamiclib"
_config_vars['SO'] = ".dylib"
customize_compiler(compiler)
finally:
_config_vars.clear()
_config_vars.update(tmp)
else:
customize_compiler(compiler)
if self.include_dirs is not None:
compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name,value) in self.define:
compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
compiler.undefine_macro(macro)
if self.libraries is not None:
compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
compiler.set_link_objects(self.link_objects)
# hack so distutils' build_extension() builds a library instead
compiler.link_shared_object = link_shared_object.__get__(compiler)
def get_export_symbols(self, ext):
if isinstance(ext,Library):
return ext.export_symbols
return _build_ext.get_export_symbols(self,ext)
def build_extension(self, ext):
_compiler = self.compiler
try:
if isinstance(ext,Library):
self.compiler = self.shlib_compiler
_build_ext.build_extension(self,ext)
if ext._needs_stub:
self.write_stub(
self.get_finalized_command('build_py').build_lib, ext
)
finally:
self.compiler = _compiler
def links_to_dynamic(self, ext):
"""Return true if 'ext' links to a dynamic lib in the same package"""
# XXX this should check to ensure the lib is actually being built
# XXX as dynamic, and not just using a locally-found version or a
# XXX static-compiled version
libnames = dict.fromkeys([lib._full_name for lib in self.shlibs])
pkg = '.'.join(ext._full_name.split('.')[:-1]+[''])
for libname in ext.libraries:
if pkg+libname in libnames: return True
return False
def get_outputs(self):
outputs = _build_ext.get_outputs(self)
optimize = self.get_finalized_command('build_py').optimize
for ext in self.extensions:
if ext._needs_stub:
base = os.path.join(self.build_lib, *ext._full_name.split('.'))
outputs.append(base+'.py')
outputs.append(base+'.pyc')
if optimize:
outputs.append(base+'.pyo')
return outputs
def write_stub(self, output_dir, ext, compile=False):
log.info("writing stub loader for %s to %s",ext._full_name, output_dir)
stub_file = os.path.join(output_dir, *ext._full_name.split('.'))+'.py'
if compile and os.path.exists(stub_file):
raise DistutilsError(stub_file+" already exists! Please delete.")
if not self.dry_run:
f = open(stub_file,'w')
f.write('\n'.join([
"def __bootstrap__():",
" global __bootstrap__, __file__, __loader__",
" import sys, os, pkg_resources, imp"+if_dl(", dl"),
" __file__ = pkg_resources.resource_filename(__name__,%r)"
% os.path.basename(ext._file_name),
" del __bootstrap__",
" if '__loader__' in globals():",
" del __loader__",
if_dl(" old_flags = sys.getdlopenflags()"),
" old_dir = os.getcwd()",
" try:",
" os.chdir(os.path.dirname(__file__))",
if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"),
" imp.load_dynamic(__name__,__file__)",
" finally:",
if_dl(" sys.setdlopenflags(old_flags)"),
" os.chdir(old_dir)",
"__bootstrap__()",
"" # terminal \n
]))
f.close()
if compile:
from distutils.util import byte_compile
byte_compile([stub_file], optimize=0,
force=True, dry_run=self.dry_run)
optimize = self.get_finalized_command('install_lib').optimize
if optimize > 0:
byte_compile([stub_file], optimize=optimize,
force=True, dry_run=self.dry_run)
if os.path.exists(stub_file) and not self.dry_run:
os.unlink(stub_file)
if use_stubs or os.name=='nt':
# Build shared libraries
#
def link_shared_object(self, objects, output_libname, output_dir=None,
libraries=None, library_dirs=None,
runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None,
build_temp=None, target_lang=None):
self.link(
self.SHARED_LIBRARY, objects, output_libname,
output_dir, libraries, library_dirs, runtime_library_dirs,
export_symbols, debug, extra_preargs, extra_postargs,
build_temp, target_lang
)
else:
# Build static libraries everywhere else
libtype = 'static'
def link_shared_object(self, objects, output_libname, output_dir=None,
libraries=None, library_dirs=None,
runtime_library_dirs=None, export_symbols=None,
debug=0, extra_preargs=None, extra_postargs=None,
build_temp=None, target_lang=None):
# XXX we need to either disallow these attrs on Library instances,
# or warn/abort here if set, or something...
#libraries=None, library_dirs=None, runtime_library_dirs=None,
#export_symbols=None, extra_preargs=None, extra_postargs=None,
#build_temp=None
assert output_dir is None # distutils build_ext doesn't pass this
output_dir, filename = os.path.split(output_libname)
basename, ext = os.path.splitext(filename)
if self.library_filename("x").startswith('lib'):
# strip 'lib' prefix; this is kludgy if some platform uses
# a different prefix
basename = basename[3:]
self.create_static_lib(
objects, basename, output_dir, debug, target_lang)
| |
# Copyright (c) 2010-2014 openpyxl
import pytest
# package imports
from openpyxl.compat import safe_string
from openpyxl.reader.excel import load_workbook
from openpyxl.reader.style import read_style_table
from openpyxl.styles import (
numbers,
Color,
Font,
PatternFill,
GradientFill,
Border,
Side,
Alignment
)
from openpyxl.styles import borders
from openpyxl.xml.functions import Element
@pytest.fixture
def StyleReader():
from ..style import SharedStylesParser
return SharedStylesParser
@pytest.mark.parametrize("value, expected",
[
('f', False),
('0', False),
('false', False),
('1', True),
('t', True),
('true', True),
('anyvalue', True),
])
def test_bool_attrib(value, expected):
from .. style import bool_attrib
el = Element("root", value=value)
assert bool_attrib(el, "value") is expected
def test_read_pattern_fill(StyleReader, datadir):
datadir.chdir()
expected = [
PatternFill(),
PatternFill(fill_type='gray125'),
PatternFill(fill_type='solid',
start_color=Color(theme=0, tint=-0.14999847407452621),
end_color=Color(indexed=64)
),
PatternFill(fill_type='solid',
start_color=Color(theme=0),
end_color=Color(indexed=64)
),
PatternFill(fill_type='solid',
start_color=Color(indexed=62),
end_color=Color(indexed=64)
)
]
with open("bug311-styles.xml") as src:
reader = StyleReader(src.read())
for val, exp in zip(reader.parse_fills(), expected):
assert val == exp
def test_read_gradient_fill(StyleReader, datadir):
datadir.chdir()
expected = [
GradientFill(degree=90, stop=(Color(theme=0), Color(theme=4)))
]
with open("bug284-styles.xml") as src:
reader = StyleReader(src.read())
assert list(reader.parse_fills()) == expected
def test_unprotected_cell(StyleReader, datadir):
datadir.chdir()
with open ("worksheet_unprotected_style.xml") as src:
reader = StyleReader(src.read())
from openpyxl.styles import Font
reader.font_list = [Font(), Font(), Font(), Font(), Font()]
reader.parse_cell_xfs()
assert len(reader.shared_styles) == 3
# default is cells are locked
style = reader.shared_styles[0]
assert style.protection.locked is True
style = reader.shared_styles[2]
assert style.protection.locked is False
def test_read_cell_style(datadir):
datadir.chdir()
with open("empty-workbook-styles.xml") as content:
style_properties = read_style_table(content.read())
assert len(style_properties) == 3
def test_read_xf_no_number_format(datadir, StyleReader):
datadir.chdir()
with open("no_number_format.xml") as src:
reader = StyleReader(src.read())
from openpyxl.styles import Font
reader.font_list = [Font(), Font()]
reader.parse_cell_xfs()
styles = reader.shared_styles
assert len(styles) == 3
assert styles[0].number_format == 'General'
assert styles[1].number_format == 'General'
assert styles[2].number_format == 'mm-dd-yy'
def test_read_simple_style_mappings(datadir):
datadir.chdir()
with open("simple-styles.xml") as content:
style_properties = read_style_table(content.read())[0]
assert len(style_properties) == 4
assert numbers.BUILTIN_FORMATS[9] == style_properties[1].number_format
assert 'yyyy-mm-dd' == style_properties[2].number_format
def test_read_complex_style_mappings(datadir):
datadir.chdir()
with open("complex-styles.xml") as content:
style_properties = read_style_table(content.read())[0]
assert len(style_properties) == 29
assert style_properties[-1].font.bold is False
def test_read_complex_style(datadir):
datadir.chdir()
wb = load_workbook("complex-styles.xlsx")
ws = wb.get_active_sheet()
assert ws.column_dimensions['A'].width == 31.1640625
assert ws.column_dimensions['I'].style.font == Font(sz=12.0, color='FF3300FF')
assert ws.column_dimensions['I'].style.fill == PatternFill(patternType='solid', fgColor='FF006600', bgColor=Color(indexed=64))
assert ws['A2'].font == Font(sz=10, name='Arial', color=Color(theme=1))
assert ws['A3'].font == Font(sz=12, name='Arial', bold=True, color=Color(theme=1))
assert ws['A4'].font == Font(sz=14, name='Arial', italic=True, color=Color(theme=1))
assert ws['A5'].font.color.value == 'FF3300FF'
assert ws['A6'].font.color.value == 9
assert ws['A7'].fill.start_color.value == 'FFFFFF66'
assert ws['A8'].fill.start_color.value == 8
assert ws['A9'].alignment.horizontal == 'left'
assert ws['A10'].alignment.horizontal == 'right'
assert ws['A11'].alignment.horizontal == 'center'
assert ws['A12'].alignment.vertical == 'top'
assert ws['A13'].alignment.vertical == 'center'
assert ws['A14'].alignment.vertical == 'bottom'
assert ws['A15'].number_format == '0.00'
assert ws['A16'].number_format == 'mm-dd-yy'
assert ws['A17'].number_format == '0.00%'
assert 'A18:B18' in ws._merged_cells
assert ws['A19'].border == Border(
left=Side(style='thin', color='FF006600'),
top=Side(style='thin', color='FF006600'),
right=Side(style='thin', color='FF006600'),
bottom=Side(style='thin', color='FF006600'),
)
assert ws['A21'].border == Border(
left=Side(style='double', color=Color(theme=7)),
top=Side(style='double', color=Color(theme=7)),
right=Side(style='double', color=Color(theme=7)),
bottom=Side(style='double', color=Color(theme=7)),
)
assert ws['A23'].fill == PatternFill(patternType='solid', start_color='FFCCCCFF', end_color=(Color(indexed=64)))
assert ws['A23'].border.top == Side(style='mediumDashed', color=Color(theme=6))
assert 'A23:B24' in ws._merged_cells
assert ws['A25'].alignment == Alignment(wrapText=True)
assert ws['A26'].alignment == Alignment(shrinkToFit=True)
def test_change_existing_styles(datadir):
wb = load_workbook("complex-styles.xlsx")
ws = wb.get_active_sheet()
ws.column_dimensions['A'].width = 20
i_style = ws.column_dimensions['I'].style
ws.column_dimensions['I'].style = i_style.copy(fill=PatternFill(fill_type='solid',
start_color=Color('FF442200')),
font=Font(color=Color('FF002244')))
assert ws.column_dimensions['I'].style.fill.start_color.value == 'FF442200'
assert ws.column_dimensions['I'].style.font.color.value == 'FF002244'
ws.cell('A2').style = ws.cell('A2').style.copy(font=Font(name='Times New Roman',
size=12,
bold=True,
italic=True))
assert ws['A2'].font == Font(name='Times New Roman', size=12, bold=True,
italic=True)
ws.cell('A3').style = ws.cell('A3').style.copy(font=Font(name='Times New Roman',
size=14,
bold=False,
italic=True))
assert ws['A3'].font == Font(name='Times New Roman', size=14,
bold=False, italic=True)
ws.cell('A4').style = ws.cell('A4').style.copy(font=Font(name='Times New Roman',
size=16,
bold=True,
italic=False))
assert ws['A4'].font == Font(name='Times New Roman', size=16, bold=True,
italic=False)
ws.cell('A5').style = ws.cell('A5').style.copy(font=Font(color=Color('FF66FF66')))
assert ws['A5'].font == Font(color='FF66FF66')
ws.cell('A6').style = ws.cell('A6').style.copy(font=Font(color=Color(theme='1')))
assert ws['A6'].font == Font(color=Color(theme='1'))
ws.cell('A7').style = ws.cell('A7').style.copy(fill=PatternFill(fill_type='solid',
start_color=Color('FF330066')))
assert ws['A7'].fill == PatternFill(fill_type='solid',
start_color=Color('FF330066'))
ws.cell('A8').style = ws.cell('A8').style.copy(fill=PatternFill(fill_type='solid',
start_color=Color(theme='2')))
assert ws['A8'].fill == PatternFill(fill_type='solid',
start_color=Color(theme='2'))
ws.cell('A9').style = ws.cell('A9').style.copy(alignment=Alignment(horizontal='center'))
assert ws['A9'].alignment == Alignment(horizontal='center')
ws.cell('A10').style = ws.cell('A10').style.copy(alignment=Alignment(horizontal='left'))
assert ws['A10'].alignment == Alignment(horizontal='left')
ws.cell('A11').style = ws.cell('A11').style.copy(alignment=Alignment(horizontal='right'))
assert ws['A11'].alignment == Alignment(horizontal='right')
ws.cell('A12').style = ws.cell('A12').style.copy(alignment=Alignment(vertical='bottom'))
assert ws['A12'].alignment == Alignment(vertical='bottom')
ws.cell('A13').style = ws.cell('A13').style.copy(alignment=Alignment(vertical='top'))
assert ws['A13'].alignment == Alignment(vertical='top')
ws.cell('A14').style = ws.cell('A14').style.copy(alignment=Alignment(vertical='center'))
assert ws['A14'].alignment == Alignment(vertical='center')
ws.cell('A15').style = ws.cell('A15').style.copy(number_format='0.00%')
assert ws['A15'].number_format == '0.00%'
ws.cell('A16').style = ws.cell('A16').style.copy(number_format='0.00')
assert ws['A16'].number_format == '0.00'
ws.cell('A17').style = ws.cell('A17').style.copy(number_format='mm-dd-yy')
assert ws['A17'].number_format == 'mm-dd-yy'
ws.unmerge_cells('A18:B18')
ws.cell('A19').style = ws.cell('A19').style.copy(border=Border(top=Side(border_style=borders.BORDER_THIN,
color=Color('FF006600')),
bottom=Side(border_style=borders.BORDER_THIN,
color=Color('FF006600')),
left=Side(border_style=borders.BORDER_THIN,
color=Color('FF006600')),
right=Side(border_style=borders.BORDER_THIN,
color=Color('FF006600'))))
assert ws['A19'].border == Border(
top=Side(border_style=borders.BORDER_THIN, color='FF006600'),
bottom=Side(border_style=borders.BORDER_THIN, color='FF006600'),
left=Side(border_style=borders.BORDER_THIN, color='FF006600'),
right=Side(border_style=borders.BORDER_THIN, color='FF006600'))
ws.cell('A21').style = ws.cell('A21').style.copy(border=Border(top=Side(border_style=borders.BORDER_THIN,
color=Color(theme=7)),
bottom=Side(border_style=borders.BORDER_THIN,
color=Color(theme=7)),
left=Side(border_style=borders.BORDER_THIN,
color=Color(theme=7)),
right=Side(border_style=borders.BORDER_THIN,
color=Color(theme=7))))
assert ws['A21'].border == Border(
top=Side(border_style=borders.BORDER_THIN, color=Color(theme=7)),
bottom=Side(border_style=borders.BORDER_THIN, color=Color(theme=7)),
left=Side(border_style=borders.BORDER_THIN, color=Color(theme=7)),
right=Side(border_style=borders.BORDER_THIN, color=Color(theme=7)))
ws.cell('A23').style = ws.cell('A23').style.copy(border=Border(top=Side(border_style=borders.BORDER_THIN,
color=Color(theme=6))),
fill=PatternFill(fill_type='solid',
start_color=Color('FFCCCCFF')))
assert ws['A23'].border == Border(
top=Side(style=borders.BORDER_THIN, color=Color(theme=6))
)
ws.unmerge_cells('A23:B24')
ws.cell('A25').style = ws.cell('A25').style.copy(alignment=Alignment(wrap_text=False))
assert ws['A25'].alignment == Alignment(wrap_text=False)
ws.cell('A26').style = ws.cell('A26').style.copy(alignment=Alignment(shrink_to_fit=False))
assert ws['A26'].alignment == Alignment(shrink_to_fit=False)
assert ws.column_dimensions['A'].width == 20.0
def test_none_values(datadir, StyleReader):
datadir.chdir()
with open("none_value_styles.xml") as src:
reader = StyleReader(src.read())
fonts = tuple(reader.parse_fonts())
assert fonts[0].scheme is None
assert fonts[0].vertAlign is None
assert fonts[1].u is None
def test_alignment(datadir, StyleReader):
datadir.chdir()
with open("alignment_styles.xml") as src:
reader = StyleReader(src.read())
reader.parse_cell_xfs()
st1 = reader.shared_styles[2]
assert st1.alignment.textRotation == 255
| |
#!/usr/bin/env python
#
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Chromoting Directory API client implementation. Used for testing/debugging
# purposes. Requires Python 2.6: json module is not available in earlier
# versions.
import os
import httplib
import json
import urllib
import urllib2
import random
import sys
DEFAULT_DIRECTORY_SERVER = 'www.googleapis.com'
auth_filepath = os.path.join(os.path.expanduser('~'),
'.chromotingDirectoryAuthToken')
def random_uuid():
return ("%04x%04x-%04x-%04x-%04x-%04x%04x%04x" %
tuple(map(lambda x: random.randrange(0,65536), range(8))))
class Host:
def __init__(self, parameters=None):
if parameters != None:
self.host_id = parameters[u"hostId"]
self.host_name = parameters[u"hostName"]
self.public_key = parameters[u"publicKey"]
# Following fields may be missing, use get() for them.
self.jabber_id = parameters.get(u"jabberId")
self.created_time = parameters.get(u"createdTime")
self.updated_time = parameters.get(u"updatedTime")
self.status = parameters.get(u"status")
else:
self.host_id = random_uuid()
import socket
self.host_name = socket.gethostname()
self.public_key = None
self.jabber_id = None
self.created_time = None
self.updated_time = None
self.status = None
class HostDirectoryError(Exception):
def __init__(self, message, response):
Exception.__init__(self, message)
print response
self._response = response
class HostDirectory:
def __init__(self, username, auth_token, server=DEFAULT_DIRECTORY_SERVER):
self._username = username
self._auth_token = auth_token
self._base_url = '/chromoting/v1/@me/hosts'
self._http = httplib.HTTPSConnection(server)
self._headers = {"Authorization": "GoogleLogin auth=" + self._auth_token,
"Content-Type": "application/json" }
def add_host(self, host):
host_json = { 'data':
{ 'hostId': host.host_id,
'hostName': host.host_name,
'publicKey': host.public_key,
}
}
if host.jabber_id:
host_json['data']['jabberId'] = host.jabber_id
post_data = json.dumps(host_json)
self._http.request("POST", self._base_url, post_data, self._headers)
response = self._http.getresponse()
if response.status != 200:
raise HostDirectoryError(response.reason, response.read())
data = response.read()
def get_hosts(self):
self._http.request("GET", self._base_url, headers=self._headers)
response = self._http.getresponse()
if response.status != 200:
raise HostDirectoryError(response.reason, response.read())
data = response.read()
data = json.loads(data)[u'data']
results = []
if data.has_key(u'items'):
for item in data[u'items']:
results.append(Host(item))
return results
def delete_host(self, host_id):
url = self._base_url + '/' + host_id
self._http.request("DELETE", url, headers=self._headers)
response = self._http.getresponse()
if response.status / 100 != 2: # Normally 204 is returned
raise HostDirectoryError(response.reason, response.read())
data = response.read()
def usage():
sys.stderr.write(
("Usage:\n" +
" Login: \t\t%(cmd)s login\n" +
" Register host: \t%(cmd)s insert --hostId=<hostId>" +
" --hostName=<hostName> \\\n" +
"\t\t\t --publicKey=<publicKey> --jabberId=<jabberId>\n" +
" List hosts: \t\t%(cmd)s list\n" +
" Delete a host: \t%(cmd)s delete <host_id>\n")
% {"cmd" : sys.argv[0]})
return 1
class CommandError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
def load_auth_token():
try:
lines = open(auth_filepath).readlines()
except IOError as e:
raise CommandError("Can't open file (%s). Please run " +
"'%s login' and try again." %
(auth_filepath, sys.argv[0]))
if len(lines) != 2:
raise CommandError("Invalid auth file (%s). Please run " +
"'%s login' and try again." %
(auth_filepath, sys.argv[0]))
return map(lambda x: x.strip(), lines)
def login_cmd(args):
"""login command"""
if len(args) != 0:
return usage()
import getpass
import gaia_auth
print "Email:",
email = raw_input()
passwd = getpass.getpass("Password: ")
authenticator = gaia_auth.GaiaAuthenticator('chromoting');
auth_token = authenticator.authenticate(email, passwd)
# Set permission mask for created file.
os.umask(0066)
auth_file = open(auth_filepath, 'w')
auth_file.write(email)
auth_file.write('\n')
auth_file.write(auth_token)
auth_file.close()
print 'Auth token: ', auth_token
print '...saved in', auth_filepath
def list_cmd(args):
"""list command"""
if len(args) != 0:
return usage()
(username, token) = load_auth_token()
client = HostDirectory(username, token)
print '%36s %30s %s' % ("HOST ID", "HOST NAME", "JABBER ID")
for host in client.get_hosts():
print '%36s %30s %s' % (host.host_id, host.host_name, host.jabber_id)
return 0
def insert_cmd(args):
"""insert command"""
(username, token) = load_auth_token()
client = HostDirectory(username, token)
host = Host()
for arg in args:
if arg.startswith("--hostId="):
host.host_id = arg[len("--hostId="):]
elif arg.startswith("--hostName="):
host.host_name = arg[len("--hostName="):]
elif arg.startswith("--publicKey="):
host.public_key = arg[len("--publicKey="):]
elif arg.startswith("--jabberId="):
host.jabber_id = arg[len("--jabberId="):]
else:
return usage()
client.add_host(host)
return 0
def delete_cmd(args):
"""delete command"""
if len(args) != 1:
return usage()
host_id = args[0]
(username, token) = load_auth_token()
client = HostDirectory(username, token)
client.delete_host(host_id)
return 0
def main():
import sys
args = sys.argv[1:]
if len(args) == 0:
return usage()
command = args[0]
try:
if command == "help":
usage()
elif command == "login":
return login_cmd(args[1:])
elif command == "list":
return list_cmd(args[1:])
elif command == "insert":
return insert_cmd(args[1:])
elif command == "delete":
return delete_cmd(args[1:])
else:
raise CommandError("Unknown command: %s" % command);
except CommandError as e:
sys.stderr.write("%s\n" % e.args[0])
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| |
# This file is part of Indico.
# Copyright (C) 2002 - 2019 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import print_function, unicode_literals
import logging
import os
from operator import itemgetter
from celery import Celery
from celery.app.log import Logging
from celery.beat import PersistentScheduler
from contextlib2 import ExitStack
from flask_pluginengine import current_plugin, plugin_context
from sqlalchemy import inspect
from terminaltables import AsciiTable
from indico.core.celery.util import locked_task
from indico.core.config import config
from indico.core.db import db
from indico.core.notifications import flush_email_queue, init_email_queue
from indico.core.plugins import plugin_engine
from indico.util.console import cformat
from indico.util.fossilize import clearCache
from indico.util.string import return_ascii
from indico.web.flask.stats import request_stats_request_started
class IndicoCelery(Celery):
"""Celery sweetened with some Indico/Flask-related sugar
The following extra params are available on the `task` decorator:
- `request_context` -- if True, the task will run inside a Flask
`test_request_context`
- `plugin` -- if set to a plugin name or class, the task will run
inside a plugin context for that plugin. This will
override whatever plugin context is active when
sending the task.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('log', IndicoCeleryLogging)
super(IndicoCelery, self).__init__(*args, **kwargs)
self.flask_app = None
self._patch_task()
def init_app(self, app):
if not config.CELERY_BROKER and not app.config['TESTING']:
raise ValueError('Celery broker URL is not set')
self.conf['broker_url'] = config.CELERY_BROKER
self.conf['result_backend'] = config.CELERY_RESULT_BACKEND or config.CELERY_BROKER
self.conf['beat_scheduler'] = IndicoPersistentScheduler
self.conf['beat_schedule_filename'] = os.path.join(config.TEMP_DIR, 'celerybeat-schedule')
self.conf['worker_hijack_root_logger'] = False
self.conf['timezone'] = config.DEFAULT_TIMEZONE
self.conf['task_ignore_result'] = True
self.conf['task_store_errors_even_if_ignored'] = True
self.conf['worker_redirect_stdouts'] = not app.debug
# Pickle isn't pretty but that way we can pass along all types (tz-aware datetimes, sets, etc.)
self.conf['result_serializer'] = 'pickle'
self.conf['task_serializer'] = 'pickle'
self.conf['accept_content'] = ['json', 'yaml', 'pickle']
# Allow indico.conf to override settings
self.conf.update(config.CELERY_CONFIG)
assert self.flask_app is None or self.flask_app is app
self.flask_app = app
def periodic_task(self, *args, **kwargs):
"""Decorator to register a periodic task.
This behaves like the :meth:`task` decorator, but automatically
schedules the task to execute periodically, using extra kwargs
as described in the Celery documentation:
http://celery.readthedocs.org/en/latest/userguide/periodic-tasks.html#available-fields
:param locked: Set this to ``False`` if you want to allow the
task to run more than once at the same time.
"""
def decorator(f):
if kwargs.pop('locked', True):
f = locked_task(f)
entry = {
'schedule': kwargs.pop('run_every'),
'args': kwargs.pop('args', ()),
'kwargs': kwargs.pop('kwargs', {}),
'options': kwargs.pop('options', {}),
'relative': kwargs.pop('relative', False)
}
kwargs.setdefault('ignore_result', True)
task = self.task(f, *args, **kwargs)
entry['task'] = task.name
self.conf['beat_schedule'][task.name] = entry
return task
return decorator
def _patch_task(self):
"""Patches the `task` decorator to run tasks inside the indico environment"""
class IndicoTask(self.Task):
abstract = True
def apply_async(s, args=None, kwargs=None, task_id=None, producer=None,
link=None, link_error=None, shadow=None, **options):
if args is not None:
args = _CelerySAWrapper.wrap_args(args)
if kwargs is not None:
kwargs = _CelerySAWrapper.wrap_kwargs(kwargs)
if current_plugin:
options['headers'] = options.get('headers') or {} # None in a retry
options['headers']['indico_plugin'] = current_plugin.name
return super(IndicoTask, s).apply_async(args=args, kwargs=kwargs, task_id=task_id, producer=producer,
link=link, link_error=link_error, shadow=shadow, **options)
def __call__(s, *args, **kwargs):
stack = ExitStack()
stack.enter_context(self.flask_app.app_context())
if getattr(s, 'request_context', False):
stack.enter_context(self.flask_app.test_request_context(base_url=config.BASE_URL))
args = _CelerySAWrapper.unwrap_args(args)
kwargs = _CelerySAWrapper.unwrap_kwargs(kwargs)
plugin = getattr(s, 'plugin', s.request.get('indico_plugin'))
if isinstance(plugin, basestring):
plugin_name = plugin
plugin = plugin_engine.get_plugin(plugin)
if plugin is None:
stack.close()
raise ValueError('Plugin not active: ' + plugin_name)
stack.enter_context(plugin_context(plugin))
clearCache()
with stack:
request_stats_request_started()
init_email_queue()
rv = super(IndicoTask, s).__call__(*args, **kwargs)
flush_email_queue()
return rv
self.Task = IndicoTask
class IndicoCeleryLogging(Logging):
def _configure_logger(self, logger, *args, **kwargs):
# don't let celery mess with the root logger
if logger is logging.getLogger():
return
super(IndicoCeleryLogging, self)._configure_logger(logger, *args, **kwargs)
class IndicoPersistentScheduler(PersistentScheduler):
"""Celery scheduler that allows indico.conf to override specific entries"""
def setup_schedule(self):
deleted = set()
for task_name, entry in config.SCHEDULED_TASK_OVERRIDE.iteritems():
if task_name not in self.app.conf['beat_schedule']:
self.logger.error('Invalid entry in ScheduledTaskOverride: %s', task_name)
continue
if not entry:
deleted.add(task_name)
del self.app.conf['beat_schedule'][task_name]
elif isinstance(entry, dict):
assert entry.get('task') in {None, task_name} # make sure the task name is not changed
self.app.conf['beat_schedule'][task_name].update(entry)
else:
self.app.conf['beat_schedule'][task_name]['schedule'] = entry
super(IndicoPersistentScheduler, self).setup_schedule()
if not self.app.conf['worker_redirect_stdouts']:
# print the schedule unless we are in production where
# this output would get redirected to a logger which is
# not pretty, especially with the colors
self._print_schedule(deleted)
def _print_schedule(self, deleted):
table_data = [['Name', 'Schedule']]
for entry in sorted(self.app.conf['beat_schedule'].itervalues(), key=itemgetter('task')):
table_data.append([cformat('%{yellow!}{}%{reset}').format(entry['task']),
cformat('%{green}{!r}%{reset}').format(entry['schedule'])])
for task_name in sorted(deleted):
table_data.append([cformat('%{yellow}{}%{reset}').format(task_name),
cformat('%{red!}Disabled%{reset}')])
print(AsciiTable(table_data, cformat('%{white!}Periodic Tasks%{reset}')).table)
class _CelerySAWrapper(object):
"""Wrapper to safely pass SQLAlchemy objects to tasks.
This is achieved by passing only the model name and its PK values
through the Celery serializer and then fetching the actual objects
again when executing the task.
"""
__slots__ = ('identity_key',)
def __init__(self, obj):
identity_key = inspect(obj).identity_key
if identity_key is None:
raise ValueError('Cannot pass non-persistent object to Celery. Did you forget to flush?')
self.identity_key = identity_key[:2]
@property
def object(self):
obj = self.identity_key[0].get(self.identity_key[1])
if obj is None:
raise ValueError('Object not in DB: {}'.format(self))
return obj
@return_ascii
def __repr__(self):
model, args = self.identity_key[:2]
return '<{}: {}>'.format(model.__name__, ','.join(map(repr, args)))
@classmethod
def wrap_args(cls, args):
return tuple(cls(x) if isinstance(x, db.Model) else x for x in args)
@classmethod
def wrap_kwargs(cls, kwargs):
return {k: cls(v) if isinstance(v, db.Model) else v for k, v in kwargs.iteritems()}
@classmethod
def unwrap_args(cls, args):
return tuple(x.object if isinstance(x, cls) else x for x in args)
@classmethod
def unwrap_kwargs(cls, kwargs):
return {k: v.object if isinstance(v, cls) else v for k, v in kwargs.iteritems()}
| |
# Copyright (c) 2015-2018. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
import gzip
import logging
import os
import subprocess
from shutil import move
from tempfile import NamedTemporaryFile
import zipfile
import requests
import pandas as pd
from six.moves import urllib
from .common import build_path, build_local_filename
logger = logging.getLogger(__name__)
def _download(download_url, timeout=None):
if download_url.startswith("http"):
response = requests.get(download_url, timeout=timeout)
response.raise_for_status()
return response.content
else:
req = urllib.request.Request(download_url)
response = urllib.request.urlopen(req, data=None, timeout=timeout)
return response.read()
def _download_to_temp_file(
download_url,
timeout=None,
base_name="download",
ext="tmp",
use_wget_if_available=False):
if not download_url:
raise ValueError("URL not provided")
with NamedTemporaryFile(
suffix='.' + ext,
prefix=base_name,
delete=False) as tmp:
tmp_path = tmp.name
def download_using_python():
with open(tmp_path, mode="w+b") as tmp_file:
tmp_file.write(
_download(download_url, timeout=timeout))
if not use_wget_if_available:
download_using_python()
else:
try:
# first try using wget to download since this works on Travis
# even when FTP otherwise fails
wget_command_list = [
"wget",
download_url,
"-O", tmp_path,
"--no-verbose",
]
if download_url.startswith("ftp"):
wget_command_list.extend(["--passive-ftp"])
if timeout:
wget_command_list.extend(["-T", "%s" % timeout])
logger.info("Running: %s" % (" ".join(wget_command_list)))
subprocess.call(wget_command_list)
except OSError as e:
if e.errno == os.errno.ENOENT:
# wget not found
download_using_python()
else:
raise
return tmp_path
def _download_and_decompress_if_necessary(
full_path,
download_url,
timeout=None,
use_wget_if_available=False):
"""
Downloads remote file at `download_url` to local file at `full_path`
"""
logger.info("Downloading %s to %s", download_url, full_path)
filename = os.path.split(full_path)[1]
base_name, ext = os.path.splitext(filename)
tmp_path = _download_to_temp_file(
download_url=download_url,
timeout=timeout,
base_name=base_name,
ext=ext,
use_wget_if_available=use_wget_if_available)
if download_url.endswith("zip") and not filename.endswith("zip"):
logger.info("Decompressing zip into %s...", filename)
with zipfile.ZipFile(tmp_path) as z:
names = z.namelist()
assert len(names) > 0, "Empty zip archive"
if filename in names:
chosen_filename = filename
else:
# If zip archive contains multiple files, choose the biggest.
biggest_size = 0
chosen_filename = names[0]
for info in z.infolist():
if info.file_size > biggest_size:
chosen_filename = info.filename
biggest_size = info.file_size
extract_path = z.extract(chosen_filename)
move(extract_path, full_path)
os.remove(tmp_path)
elif download_url.endswith("gz") and not filename.endswith("gz"):
logger.info("Decompressing gzip into %s...", filename)
with gzip.GzipFile(tmp_path) as src:
contents = src.read()
os.remove(tmp_path)
with open(full_path, 'wb') as dst:
dst.write(contents)
elif download_url.endswith(("html", "htm")) and full_path.endswith(".csv"):
logger.info("Extracting HTML table into CSV %s...", filename)
df = pd.read_html(tmp_path, header=0)[0]
df.to_csv(full_path, sep=',', index=False, encoding='utf-8')
else:
move(tmp_path, full_path)
def file_exists(
download_url,
filename=None,
decompress=False,
subdir=None):
"""
Return True if a local file corresponding to these arguments
exists.
"""
filename = build_local_filename(download_url, filename, decompress)
full_path = build_path(filename, subdir)
return os.path.exists(full_path)
def fetch_file(
download_url,
filename=None,
decompress=False,
subdir=None,
force=False,
timeout=None,
use_wget_if_available=False):
"""
Download a remote file and store it locally in a cache directory. Don't
download it again if it's already present (unless `force` is True.)
Parameters
----------
download_url : str
Remote URL of file to download.
filename : str, optional
Local filename, used as cache key. If omitted, then determine the local
filename from the URL.
decompress : bool, optional
By default any file whose remote extension is one of (".zip", ".gzip")
and whose local filename lacks this suffix is decompressed. If a local
filename wasn't provided but you still want to decompress the stored
data then set this option to True.
subdir : str, optional
Group downloads in a single subdirectory.
force : bool, optional
By default, a remote file is not downloaded if it's already present.
However, with this argument set to True, it will be overwritten.
timeout : float, optional
Timeout for download in seconds, default is None which uses
global timeout.
use_wget_if_available: bool, optional
If the `wget` command is available, use that for download instead
of Python libraries (default True)
Returns the full path of the local file.
"""
filename = build_local_filename(download_url, filename, decompress)
full_path = build_path(filename, subdir)
if not os.path.exists(full_path) or force:
logger.info("Fetching %s from URL %s", filename, download_url)
_download_and_decompress_if_necessary(
full_path=full_path,
download_url=download_url,
timeout=timeout,
use_wget_if_available=use_wget_if_available)
else:
logger.info("Cached file %s from URL %s", filename, download_url)
return full_path
def fetch_and_transform(
transformed_filename,
transformer,
loader,
source_filename,
source_url,
subdir=None):
"""
Fetch a remote file from `source_url`, save it locally as `source_filename` and then use
the `loader` and `transformer` function arguments to turn this saved data into an in-memory
object.
"""
transformed_path = build_path(transformed_filename, subdir)
if not os.path.exists(transformed_path):
source_path = fetch_file(source_url, source_filename, subdir)
logger.info("Generating data file %s from %s", transformed_path, source_path)
result = transformer(source_path, transformed_path)
else:
logger.info("Cached data file: %s", transformed_path)
result = loader(transformed_path)
assert os.path.exists(transformed_path)
return result
def fetch_csv_dataframe(
download_url,
filename=None,
subdir=None,
**pandas_kwargs):
"""
Download a remote file from `download_url` and save it locally as `filename`.
Load that local file as a CSV into Pandas using extra keyword arguments such as sep='\t'.
"""
path = fetch_file(
download_url=download_url,
filename=filename,
decompress=True,
subdir=subdir)
return pd.read_csv(path, **pandas_kwargs)
| |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test RangeDataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.data.kernel_tests import test_base
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.ops import iterator_ops
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_dataset_ops
from tensorflow.python.ops import io_ops
from tensorflow.python.ops import parsing_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import test
class RangeDatasetTest(test_base.DatasetTestBase):
def tearDown(self):
# Remove all checkpoint files.
prefix = self._iterator_checkpoint_prefix()
pattern = prefix + "*"
files = gfile.Glob(pattern)
map(gfile.Remove, files)
def testStop(self):
stop = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(stop).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, feed_dict={stop: 5})
for i in range(5):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testStartStop(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start,
stop).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, feed_dict={start: 2, stop: 5})
for i in range(2, 5):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testStartStopStep(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
step = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start, stop,
step).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, feed_dict={start: 2, stop: 10, step: 2})
for i in range(2, 10, 2):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testZeroStep(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
step = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start, stop,
step).make_initializable_iterator()
init_op = iterator.initializer
with self.cached_session() as sess:
with self.assertRaises(errors.InvalidArgumentError):
sess.run(init_op, feed_dict={start: 2, stop: 10, step: 0})
def testNegativeStep(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
step = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start, stop,
step).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, feed_dict={start: 2, stop: 10, step: -1})
# This for loop is a no-op but will ensure that the implementation is
# consistent with range if it ever changes.
for i in range(2, 10, -1):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testStopLessThanStart(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start,
stop).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, feed_dict={start: 10, stop: 2})
# This for loop is a no-op but will ensure that the implementation is
# consistent with range if it ever changes.
for i in range(10, 2):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testStopLessThanStartWithPositiveStep(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
step = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start, stop,
step).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, feed_dict={start: 10, stop: 2, step: 2})
# This for loop is a no-op but will ensure that the implementation is
# consistent with range if it ever changes.
for i in range(10, 2, 2):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testStopLessThanStartWithNegativeStep(self):
start = array_ops.placeholder(dtypes.int64, shape=[])
stop = array_ops.placeholder(dtypes.int64, shape=[])
step = array_ops.placeholder(dtypes.int64, shape=[])
iterator = dataset_ops.Dataset.range(start, stop,
step).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
with self.cached_session() as sess:
sess.run(init_op, feed_dict={start: 10, stop: 2, step: -1})
for i in range(10, 2, -1):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def _iterator_checkpoint_prefix(self):
return os.path.join(self.get_temp_dir(), "iterator")
def _save_op(self, iterator_resource):
iterator_state_variant = gen_dataset_ops.serialize_iterator(
iterator_resource)
save_op = io_ops.write_file(
self._iterator_checkpoint_prefix(),
parsing_ops.serialize_tensor(iterator_state_variant))
return save_op
def _restore_op(self, iterator_resource):
iterator_state_variant = parsing_ops.parse_tensor(
io_ops.read_file(self._iterator_checkpoint_prefix()), dtypes.variant)
restore_op = gen_dataset_ops.deserialize_iterator(iterator_resource,
iterator_state_variant)
return restore_op
def testSaveRestore(self):
def _build_graph(start, stop):
iterator = dataset_ops.Dataset.range(start,
stop).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
# Saving and restoring in different sessions.
start = 2
stop = 10
break_point = 5
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(init_op)
sess.run(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
# Saving and restoring in same session.
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
sess.run(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testRestoreWithoutBuildingDatasetGraph(self):
def _build_graph(start, stop, num_epochs):
dataset = dataset_ops.Dataset.range(start, stop).repeat(num_epochs)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
# Saving and restoring in different sessions.
start = 2
stop = 10
num_epochs = 5
break_point = 5
break_epoch = 3
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop, num_epochs)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for _ in range(break_epoch):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
for i in range(start, break_point):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
# Create an empty IteratorResource and restore the Iterator into it.
output_types = dtypes.int64
output_shapes = tensor_shape.scalar()
iterator = iterator_ops.Iterator.from_structure(output_types,
output_shapes)
restore_op = self._restore_op(iterator._iterator_resource)
get_next = iterator.get_next()
with self.session(graph=g) as sess:
sess.run(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, sess.run(get_next))
for _ in range(break_epoch + 1, num_epochs):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testRestoreInModifiedGraph(self):
def _build_graph(start, stop):
dataset = dataset_ops.Dataset.range(start, stop)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
# Saving and restoring in different sessions.
start = 2
stop = 10
stop_1 = 8
break_point = 5
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
# Intentionally build a graph with a different value for stop to make sure
# the original dataset graph is actually getting loaded.
init_op, get_next, _, restore_op = _build_graph(start, stop_1)
with self.session(graph=g) as sess:
sess.run(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testInitThenRestore(self):
# Note: Calling init_op before restore_op is redundant. This test just makes
# sure we do not fail if restore is called on an already initialized
# iterator resource.
def _build_graph(start, stop):
dataset = dataset_ops.Dataset.range(start, stop)
iterator = dataset.make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
# Saving and restoring in different sessions.
start = 2
stop = 10
break_point = 5
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(init_op)
sess.run(restore_op)
for i in range(break_point, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testMultipleSaves(self):
def _build_graph(start, stop):
iterator = dataset_ops.Dataset.range(start,
stop).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
start = 2
stop = 10
break_point1 = 5
break_point2 = 7
with ops.Graph().as_default() as g:
init_op, get_next, save_op, _ = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
for i in range(start, break_point1):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(restore_op)
for i in range(break_point1, break_point2):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
break_point2 = 7
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(start, stop)
with self.session(graph=g) as sess:
sess.run(restore_op)
for i in range(break_point2, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSaveRestoreWithRepeat(self):
def _build_graph(start, stop, num_epochs):
iterator = dataset_ops.Dataset.range(
start, stop).repeat(num_epochs).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
start = 2
stop = 10
num_epochs = 5
break_range = 5
break_epoch = 3
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(
start, stop, num_epochs)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(restore_op)
for _ in range(break_epoch - 1):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
for i in range(start, break_range):
self.assertEqual(i, sess.run(get_next))
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop, num_epochs)
with self.session(graph=g) as sess:
sess.run(restore_op)
for i in range(break_range, stop):
self.assertEqual(i, sess.run(get_next))
for _ in range(break_epoch, num_epochs):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
def testSaveRestoreExhaustedIterator(self):
def _build_graph(start, stop, num_epochs):
iterator = dataset_ops.Dataset.range(
start, stop).repeat(num_epochs).make_initializable_iterator()
init_op = iterator.initializer
get_next = iterator.get_next()
save_op = self._save_op(iterator._iterator_resource)
restore_op = self._restore_op(iterator._iterator_resource)
return init_op, get_next, save_op, restore_op
start = 2
stop = 10
num_epochs = 5
with ops.Graph().as_default() as g:
init_op, get_next, save_op, restore_op = _build_graph(
start, stop, num_epochs)
with self.session(graph=g) as sess:
sess.run(variables.global_variables_initializer())
sess.run(init_op)
# Note: There is no checkpoint saved currently so a NotFoundError is
# raised.
with self.assertRaises(errors.NotFoundError):
sess.run(restore_op)
for _ in range(num_epochs):
for i in range(start, stop):
self.assertEqual(i, sess.run(get_next))
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
sess.run(save_op)
with ops.Graph().as_default() as g:
init_op, get_next, _, restore_op = _build_graph(start, stop, num_epochs)
with self.session(graph=g) as sess:
sess.run(restore_op)
with self.assertRaises(errors.OutOfRangeError):
sess.run(get_next)
if __name__ == "__main__":
test.main()
| |
from __future__ import unicode_literals
import mimetypes
import os
import random
import sys
import time
from email import (charset as Charset, encoders as Encoders,
message_from_string, generator)
from email.message import Message
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.message import MIMEMessage
from email.header import Header
from email.utils import formatdate, getaddresses, formataddr, parseaddr
from django.conf import settings
from django.core.mail.utils import DNS_NAME
from django.utils.encoding import force_text
from django.utils import six
# Don't BASE64-encode UTF-8 messages so that we avoid unwanted attention from
# some spam filters.
utf8_charset = Charset.Charset('utf-8')
utf8_charset.body_encoding = None # Python defaults to BASE64
# Default MIME type to use on attachments (if it is not explicitly given
# and cannot be guessed).
DEFAULT_ATTACHMENT_MIME_TYPE = 'application/octet-stream'
class BadHeaderError(ValueError):
pass
# Copied from Python 3.2+ standard library, with the following modifications:
# * Used cached hostname for performance.
# TODO: replace with email.utils.make_msgid(.., domain=DNS_NAME) when dropping
# Python 2 (Python 2's version doesn't have domain parameter) (#23905).
def make_msgid(idstring=None, domain=None):
"""Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
<20020201195627.33539.96671@nightshade.la.mastaler.com>
Optional idstring if given is a string used to strengthen the
uniqueness of the message id. Optional domain if given provides the
portion of the message id after the '@'. It defaults to the locally
defined hostname.
"""
timeval = time.time()
utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
pid = os.getpid()
randint = random.randrange(100000)
if idstring is None:
idstring = ''
else:
idstring = '.' + idstring
if domain is None:
# stdlib uses socket.getfqdn() here instead
domain = DNS_NAME
msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, domain)
return msgid
# Header names that contain structured address data (RFC #5322)
ADDRESS_HEADERS = {
'from',
'sender',
'reply-to',
'to',
'cc',
'bcc',
'resent-from',
'resent-sender',
'resent-to',
'resent-cc',
'resent-bcc',
}
def forbid_multi_line_headers(name, val, encoding):
"""Forbids multi-line headers, to prevent header injection."""
encoding = encoding or settings.DEFAULT_CHARSET
val = force_text(val)
if '\n' in val or '\r' in val:
raise BadHeaderError("Header values can't contain newlines (got %r for header %r)" % (val, name))
try:
val.encode('ascii')
except UnicodeEncodeError:
if name.lower() in ADDRESS_HEADERS:
val = ', '.join(sanitize_address(addr, encoding)
for addr in getaddresses((val,)))
else:
val = Header(val, encoding).encode()
else:
if name.lower() == 'subject':
val = Header(val).encode()
return str(name), val
def sanitize_address(addr, encoding):
if isinstance(addr, six.string_types):
addr = parseaddr(force_text(addr))
nm, addr = addr
nm = Header(nm, encoding).encode()
try:
addr.encode('ascii')
except UnicodeEncodeError: # IDN
if '@' in addr:
localpart, domain = addr.split('@', 1)
localpart = str(Header(localpart, encoding))
domain = domain.encode('idna').decode('ascii')
addr = '@'.join([localpart, domain])
else:
addr = Header(addr, encoding).encode()
return formataddr((nm, addr))
class MIMEMixin():
def as_string(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as a string.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_string() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = six.StringIO()
g = generator.Generator(fp, mangle_from_=False)
if six.PY2:
g.flatten(self, unixfrom=unixfrom)
else:
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
if six.PY2:
as_bytes = as_string
else:
def as_bytes(self, unixfrom=False, linesep='\n'):
"""Return the entire formatted message as bytes.
Optional `unixfrom' when True, means include the Unix From_ envelope
header.
This overrides the default as_bytes() implementation to not mangle
lines that begin with 'From '. See bug #13433 for details.
"""
fp = six.BytesIO()
g = generator.BytesGenerator(fp, mangle_from_=False)
g.flatten(self, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
class SafeMIMEMessage(MIMEMixin, MIMEMessage):
def __setitem__(self, name, val):
# message/rfc822 attachments must be ASCII
name, val = forbid_multi_line_headers(name, val, 'ascii')
MIMEMessage.__setitem__(self, name, val)
class SafeMIMEText(MIMEMixin, MIMEText):
def __init__(self, _text, _subtype='plain', _charset=None):
self.encoding = _charset
if _charset == 'utf-8':
# Unfortunately, Python < 3.5 doesn't support setting a Charset instance
# as MIMEText init parameter (http://bugs.python.org/issue16324).
# We do it manually and trigger re-encoding of the payload.
MIMEText.__init__(self, _text, _subtype, None)
del self['Content-Transfer-Encoding']
# Workaround for versions without http://bugs.python.org/issue19063
if (3, 2) < sys.version_info < (3, 3, 4):
payload = _text.encode(utf8_charset.output_charset)
self._payload = payload.decode('ascii', 'surrogateescape')
self.set_charset(utf8_charset)
else:
self.set_payload(_text, utf8_charset)
self.replace_header('Content-Type', 'text/%s; charset="%s"' % (_subtype, _charset))
elif _charset is None:
# the default value of '_charset' is 'us-ascii' on Python 2
MIMEText.__init__(self, _text, _subtype)
else:
MIMEText.__init__(self, _text, _subtype, _charset)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEText.__setitem__(self, name, val)
class SafeMIMEMultipart(MIMEMixin, MIMEMultipart):
def __init__(self, _subtype='mixed', boundary=None, _subparts=None, encoding=None, **_params):
self.encoding = encoding
MIMEMultipart.__init__(self, _subtype, boundary, _subparts, **_params)
def __setitem__(self, name, val):
name, val = forbid_multi_line_headers(name, val, self.encoding)
MIMEMultipart.__setitem__(self, name, val)
class EmailMessage(object):
"""
A container for email information.
"""
content_subtype = 'plain'
mixed_subtype = 'mixed'
encoding = None # None => use settings default
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, cc=None,
reply_to=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings
(or UTF-8 bytestrings). The SafeMIMEText class will handle any
necessary encoding conversions.
"""
if to:
if isinstance(to, six.string_types):
raise TypeError('"to" argument must be a list or tuple')
self.to = list(to)
else:
self.to = []
if cc:
if isinstance(cc, six.string_types):
raise TypeError('"cc" argument must be a list or tuple')
self.cc = list(cc)
else:
self.cc = []
if bcc:
if isinstance(bcc, six.string_types):
raise TypeError('"bcc" argument must be a list or tuple')
self.bcc = list(bcc)
else:
self.bcc = []
if reply_to:
if isinstance(reply_to, six.string_types):
raise TypeError('"reply_to" argument must be a list or tuple')
self.reply_to = list(reply_to)
else:
self.reply_to = []
self.from_email = from_email or settings.DEFAULT_FROM_EMAIL
self.subject = subject
self.body = body
self.attachments = attachments or []
self.extra_headers = headers or {}
self.connection = connection
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def message(self):
encoding = self.encoding or settings.DEFAULT_CHARSET
msg = SafeMIMEText(self.body, self.content_subtype, encoding)
msg = self._create_message(msg)
msg['Subject'] = self.subject
msg['From'] = self.extra_headers.get('From', self.from_email)
msg['To'] = self.extra_headers.get('To', ', '.join(self.to))
if self.cc:
msg['Cc'] = ', '.join(self.cc)
if self.reply_to:
msg['Reply-To'] = self.extra_headers.get('Reply-To', ', '.join(self.reply_to))
# Email header names are case-insensitive (RFC 2045), so we have to
# accommodate that when doing comparisons.
header_names = [key.lower() for key in self.extra_headers]
if 'date' not in header_names:
msg['Date'] = formatdate()
if 'message-id' not in header_names:
# Use cached DNS_NAME for performance
msg['Message-ID'] = make_msgid(domain=DNS_NAME)
for name, value in self.extra_headers.items():
if name.lower() in ('from', 'to'): # From and To are already handled
continue
msg[name] = value
return msg
def recipients(self):
"""
Returns a list of all recipients of the email (includes direct
addressees as well as Cc and Bcc entries).
"""
return self.to + self.cc + self.bcc
def send(self, fail_silently=False):
"""Sends the email message."""
if not self.recipients():
# Don't bother creating the network connection if there's nobody to
# send to.
return 0
return self.get_connection(fail_silently).send_messages([self])
def attach(self, filename=None, content=None, mimetype=None):
"""
Attaches a file with the given filename and content. The filename can
be omitted and the mimetype is guessed, if not provided.
If the first parameter is a MIMEBase subclass it is inserted directly
into the resulting message attachments.
"""
if isinstance(filename, MIMEBase):
assert content is None
assert mimetype is None
self.attachments.append(filename)
else:
assert content is not None
self.attachments.append((filename, content, mimetype))
def attach_file(self, path, mimetype=None):
"""Attaches a file from the filesystem."""
filename = os.path.basename(path)
with open(path, 'rb') as f:
content = f.read()
self.attach(filename, content, mimetype)
def _create_message(self, msg):
return self._create_attachments(msg)
def _create_attachments(self, msg):
if self.attachments:
encoding = self.encoding or settings.DEFAULT_CHARSET
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.mixed_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for attachment in self.attachments:
if isinstance(attachment, MIMEBase):
msg.attach(attachment)
else:
msg.attach(self._create_attachment(*attachment))
return msg
def _create_mime_attachment(self, content, mimetype):
"""
Converts the content, mimetype pair into a MIME attachment object.
If the mimetype is message/rfc822, content may be an
email.Message or EmailMessage object, as well as a str.
"""
basetype, subtype = mimetype.split('/', 1)
if basetype == 'text':
encoding = self.encoding or settings.DEFAULT_CHARSET
attachment = SafeMIMEText(content, subtype, encoding)
elif basetype == 'message' and subtype == 'rfc822':
# Bug #18967: per RFC2046 s5.2.1, message/rfc822 attachments
# must not be base64 encoded.
if isinstance(content, EmailMessage):
# convert content into an email.Message first
content = content.message()
elif not isinstance(content, Message):
# For compatibility with existing code, parse the message
# into an email.Message object if it is not one already.
content = message_from_string(content)
attachment = SafeMIMEMessage(content, subtype)
else:
# Encode non-text attachments with base64.
attachment = MIMEBase(basetype, subtype)
attachment.set_payload(content)
Encoders.encode_base64(attachment)
return attachment
def _create_attachment(self, filename, content, mimetype=None):
"""
Converts the filename, content, mimetype triple into a MIME attachment
object.
"""
if mimetype is None:
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
mimetype = DEFAULT_ATTACHMENT_MIME_TYPE
attachment = self._create_mime_attachment(content, mimetype)
if filename:
try:
filename.encode('ascii')
except UnicodeEncodeError:
if six.PY2:
filename = filename.encode('utf-8')
filename = ('utf-8', '', filename)
attachment.add_header('Content-Disposition', 'attachment',
filename=filename)
return attachment
class EmailMultiAlternatives(EmailMessage):
"""
A version of EmailMessage that makes it easy to send multipart/alternative
messages. For example, including text and HTML versions of the text is
made easier.
"""
alternative_subtype = 'alternative'
def __init__(self, subject='', body='', from_email=None, to=None, bcc=None,
connection=None, attachments=None, headers=None, alternatives=None,
cc=None, reply_to=None):
"""
Initialize a single email message (which can be sent to multiple
recipients).
All strings used to create the message can be unicode strings (or UTF-8
bytestrings). The SafeMIMEText class will handle any necessary encoding
conversions.
"""
super(EmailMultiAlternatives, self).__init__(
subject, body, from_email, to, bcc, connection, attachments,
headers, cc, reply_to,
)
self.alternatives = alternatives or []
def attach_alternative(self, content, mimetype):
"""Attach an alternative content representation."""
assert content is not None
assert mimetype is not None
self.alternatives.append((content, mimetype))
def _create_message(self, msg):
return self._create_attachments(self._create_alternatives(msg))
def _create_alternatives(self, msg):
encoding = self.encoding or settings.DEFAULT_CHARSET
if self.alternatives:
body_msg = msg
msg = SafeMIMEMultipart(_subtype=self.alternative_subtype, encoding=encoding)
if self.body:
msg.attach(body_msg)
for alternative in self.alternatives:
msg.attach(self._create_mime_attachment(*alternative))
return msg
| |
import datetime
import itertools
from django.db import connection
from django.db.models import Max, Sum
from elasticsearch.helpers import bulk as bulk_index
import olympia.core.logger
from olympia import amo
from olympia.addons.models import Addon
from olympia.amo import search as amo_search
from olympia.amo.celery import task
from olympia.bandwagon.models import Collection
from olympia.ratings.models import Rating
from olympia.users.models import UserProfile
from olympia.versions.models import Version
from . import search
from .models import (
AddonCollectionCount, CollectionCount, CollectionStats, DownloadCount,
ThemeUserCount, UpdateCount)
log = olympia.core.logger.getLogger('z.task')
@task
def update_addons_collections_downloads(data, **kw):
log.info("[%s] Updating addons+collections download totals." %
(len(data)))
query = (
"UPDATE addons_collections SET downloads=%s WHERE addon_id=%s "
"AND collection_id=%s;" * len(data))
with connection.cursor() as cursor:
cursor.execute(
query,
list(itertools.chain.from_iterable(
[var['sum'], var['addon'], var['collection']]
for var in data)))
@task
def update_collections_total(data, **kw):
log.info("[%s] Updating collections' download totals." %
(len(data)))
for var in data:
(Collection.objects.filter(pk=var['collection_id'])
.update(downloads=var['sum']))
@task
def update_global_totals(job, date, **kw):
log.info('Updating global statistics totals (%s) for (%s)' % (job, date))
jobs = _get_daily_jobs(date)
jobs.update(_get_metrics_jobs(date))
num = jobs[job]()
q = """REPLACE INTO global_stats (`name`, `count`, `date`)
VALUES (%s, %s, %s)"""
p = [job, num or 0, date]
try:
cursor = connection.cursor()
cursor.execute(q, p)
except Exception, e:
log.critical('Failed to update global stats: (%s): %s' % (p, e))
else:
log.debug('Committed global stats details: (%s) has (%s) for (%s)'
% tuple(p))
finally:
cursor.close()
def _get_daily_jobs(date=None):
"""Return a dictionary of statistics queries.
If a date is specified and applies to the job it will be used. Otherwise
the date will default to the previous day.
"""
if not date:
date = datetime.date.today() - datetime.timedelta(days=1)
# Passing through a datetime would not generate an error,
# but would pass and give incorrect values.
if isinstance(date, datetime.datetime):
raise ValueError('This requires a valid date, not a datetime')
# Testing on lte created date doesn't get you todays date, you need to do
# less than next date. That's because 2012-1-1 becomes 2012-1-1 00:00
next_date = date + datetime.timedelta(days=1)
date_str = date.strftime('%Y-%m-%d')
extra = dict(where=['DATE(created)=%s'], params=[date_str])
# If you're editing these, note that you are returning a function! This
# cheesy hackery was done so that we could pass the queries to celery
# lazily and not hammer the db with a ton of these all at once.
stats = {
# Add-on Downloads
'addon_total_downloads': lambda: DownloadCount.objects.filter(
date__lt=next_date).aggregate(sum=Sum('count'))['sum'],
'addon_downloads_new': lambda: DownloadCount.objects.filter(
date=date).aggregate(sum=Sum('count'))['sum'],
# Listed Add-on counts
'addon_count_new': Addon.objects.valid().extra(**extra).count,
# Listed Version counts
'version_count_new': Version.objects.filter(
channel=amo.RELEASE_CHANNEL_LISTED).extra(**extra).count,
# User counts
'user_count_total': UserProfile.objects.filter(
created__lt=next_date).count,
'user_count_new': UserProfile.objects.extra(**extra).count,
# Rating counts
'review_count_total': Rating.objects.filter(created__lte=date,
editorreview=0).count,
# We can't use "**extra" here, because this query joins on reviews
# itself, and thus raises the following error:
# "Column 'created' in where clause is ambiguous".
'review_count_new': Rating.objects.filter(editorreview=0).extra(
where=['DATE(reviews.created)=%s'], params=[date_str]).count,
# Collection counts
'collection_count_total': Collection.objects.filter(
created__lt=next_date).count,
'collection_count_new': Collection.objects.extra(**extra).count,
'collection_addon_downloads': (
lambda: AddonCollectionCount.objects.filter(
date__lte=date).aggregate(sum=Sum('count'))['sum']),
}
# If we're processing today's stats, we'll do some extras. We don't do
# these for re-processed stats because they change over time (eg. add-ons
# move from sandbox -> public
if date == (datetime.date.today() - datetime.timedelta(days=1)):
stats.update({
'addon_count_nominated': Addon.objects.filter(
created__lte=date, status=amo.STATUS_NOMINATED,
disabled_by_user=0).count,
'addon_count_public': Addon.objects.filter(
created__lte=date, status=amo.STATUS_PUBLIC,
disabled_by_user=0).count,
'addon_count_pending': Version.objects.filter(
created__lte=date, files__status=amo.STATUS_PENDING).count,
'collection_count_private': Collection.objects.filter(
created__lte=date, listed=0).count,
'collection_count_public': Collection.objects.filter(
created__lte=date, listed=1).count,
'collection_count_editorspicks': Collection.objects.filter(
created__lte=date, type=amo.COLLECTION_FEATURED).count,
'collection_count_normal': Collection.objects.filter(
created__lte=date, type=amo.COLLECTION_NORMAL).count,
})
return stats
def _get_metrics_jobs(date=None):
"""Return a dictionary of statistics queries.
If a date is specified and applies to the job it will be used. Otherwise
the date will default to the last date metrics put something in the db.
"""
if not date:
date = UpdateCount.objects.aggregate(max=Max('date'))['max']
# If you're editing these, note that you are returning a function!
stats = {
'addon_total_updatepings': lambda: UpdateCount.objects.filter(
date=date).aggregate(sum=Sum('count'))['sum'],
}
return stats
@task
def index_update_counts(ids, index=None, **kw):
index = index or search.get_alias()
es = amo_search.get_es()
qs = UpdateCount.objects.filter(id__in=ids)
if qs.exists():
log.info('Indexing %s updates for %s.' % (qs.count(), qs[0].date))
data = []
try:
for update in qs:
data.append(search.extract_update_count(update))
bulk_index(es, data, index=index,
doc_type=UpdateCount.get_mapping_type(), refresh=True)
except Exception, exc:
index_update_counts.retry(args=[ids, index], exc=exc, **kw)
raise
@task
def index_download_counts(ids, index=None, **kw):
index = index or search.get_alias()
es = amo_search.get_es()
qs = DownloadCount.objects.filter(id__in=ids)
if qs.exists():
log.info('Indexing %s downloads for %s.' % (qs.count(), qs[0].date))
try:
data = []
for dl in qs:
data.append(search.extract_download_count(dl))
bulk_index(es, data, index=index,
doc_type=DownloadCount.get_mapping_type(), refresh=True)
except Exception, exc:
index_download_counts.retry(args=[ids, index], exc=exc)
raise
@task
def index_collection_counts(ids, index=None, **kw):
index = index or search.get_alias()
es = amo_search.get_es()
qs = CollectionCount.objects.filter(collection__in=ids)
if qs.exists():
log.info('Indexing %s addon collection counts: %s'
% (qs.count(), qs[0].date))
data = []
try:
for collection_count in qs:
collection = collection_count.collection_id
filters = dict(collection=collection,
date=collection_count.date)
data.append(search.extract_addon_collection(
collection_count,
AddonCollectionCount.objects.filter(**filters),
CollectionStats.objects.filter(**filters)))
bulk_index(es, data, index=index,
doc_type=CollectionCount.get_mapping_type(),
refresh=True)
except Exception, exc:
index_collection_counts.retry(args=[ids], exc=exc)
raise
@task
def index_theme_user_counts(ids, index=None, **kw):
index = index or search.get_alias()
es = amo_search.get_es()
qs = ThemeUserCount.objects.filter(id__in=ids)
if qs.exists():
log.info('Indexing %s theme user counts for %s.'
% (qs.count(), qs[0].date))
data = []
try:
for user_count in qs:
data.append(search.extract_theme_user_count(user_count))
bulk_index(es, data, index=index,
doc_type=ThemeUserCount.get_mapping_type(), refresh=True)
except Exception, exc:
index_theme_user_counts.retry(args=[ids], exc=exc, **kw)
raise
| |
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.fishing.FishermanGUI
from panda3d.core import TextNode
from direct.gui.DirectGui import *
from toontown.toontowngui import TTDialog
from toontown.toonbase import ToontownGlobals, TTLocalizer
from toontown.toon import NPCToons
from FishSellGUI import FishSellGUI
from FishBrowser import FishBrowser
import FishGlobals, math
class BaitGUI:
def __init__(self, buyCallback, cancelCallback):
self.loaded = False
self.buyCallback = buyCallback
self.cancelCallback = cancelCallback
def load(self):
if self.loaded:
return
else:
self.frame = DirectFrame(relief=None, geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=(1.3, 1, 1.5), pos=(0, 0, 0.14))
self.browser = FishBrowser(self.frame, pos=(0, 0, 0.15), scale=0.8, command=self.updateFish)
self.title = DirectLabel(self.frame, relief=None, text=TTLocalizer.BaitBuyTitle, text_scale=0.08, pos=(0, 0, 0.65))
self.slider = DirectSlider(self.frame, state=DGG.NORMAL, thumb_relief=None, pageSize=5, thumb_geom=Preloaded['circle'], thumb_geom_scale=2, scale=0.4, pos=(0, 0, -0.35), range=(1, 1.01), command=self.updateSliderLabel)
self.sliderLabel = DirectLabel(self.frame, relief=None, text_scale=0.045, text=TTLocalizer.BaitBuyUnknownText, pos=(0, 0, -0.44))
self.buyButton = DirectButton(self.frame, state=DGG.NORMAL, relief=None, geom=Preloaded['yellowButton'], geom_scale=(0.6, 1, 1), text=TTLocalizer.BaitBuyText, text_scale=0.06, text_pos=(0, -0.01), pos=(-0.3, 0, -0.65), command=self.__buy)
self.cancelButton = DirectButton(self.frame, relief=None, geom=Preloaded['yellowButton'], geom_scale=(0.6, 1, 1), text=TTLocalizer.lCancel, text_scale=0.06, text_pos=(0, -0.01), pos=(0.3, 0, -0.65), command=self.__cancel)
self.beanBank = DirectLabel(self.frame, relief=None, pos=(-0.3, 0, -0.62), scale=0.5, image=Preloaded['beanBank'], text='', text_align=TextNode.ARight, text_scale=0.11, text_fg=(0.95, 0.95, 0, 1), text_shadow=(0, 0, 0, 1), text_pos=(0.75, -0.81), text_font=ToontownGlobals.getSignFont())
self.loaded = True
return
def destroy(self):
if not self.loaded:
return
self.frame.destroy()
self.browser.destroy()
del self.frame
del self.browser
del self.title
del self.slider
del self.sliderLabel
del self.buyButton
del self.cancelButton
del self.beanBank
self.loaded = False
def getBaitNum(self):
return int(math.floor(self.slider['value']))
def getCurrentBaitNum(self, bait):
return base.localAvatar.fishingBaits.get(bait, 0)
def getBait(self):
if not hasattr(self, 'browser'):
return 0
return self.browser.index * 2
def isUnlocked(self, genus):
return len(FishGlobals.getSpecies(genus)) == 1 or base.localAvatar.fishCollection.hasGenus(genus)
def disableGui(self, text):
self.slider['state'] = DGG.DISABLED
self.buyButton['state'] = DGG.DISABLED
self.sliderLabel['text'] = text
self.beanBank['text'] = str(base.localAvatar.getBankMoney())
def updateFish(self):
if not hasattr(self, 'slider'):
return
bait = self.getBait()
if not self.isUnlocked(bait):
self.disableGui(TTLocalizer.BaitBuyUnknownText)
return
maxValue = min(65535 - self.getCurrentBaitNum(bait), FishGlobals.MaxBaitBuy)
maxValue = min(maxValue, int(math.floor(base.localAvatar.getBankMoney() / FishGlobals.BaitPrice[bait])))
if maxValue <= 0:
self.disableGui(TTLocalizer.BaitBuyFullText)
return
self.slider['range'] = (1, maxValue + 0.01)
self.slider['value'] = 1
self.slider['state'] = DGG.NORMAL
self.buyButton['state'] = DGG.NORMAL
self.updateSliderLabel()
def enter(self):
self.updateFish()
def updateSliderLabel(self):
bait = self.getBait()
baitNum = self.getBaitNum()
name = TTLocalizer.FishGenusNames[bait]
currentBait = self.getCurrentBaitNum(bait)
jellybeanCost = int(math.ceil(baitNum * FishGlobals.BaitPrice[bait]))
if self.isUnlocked(bait):
self.sliderLabel['text'] = TTLocalizer.BaitBuyHelpText % (baitNum,
name,
jellybeanCost,
currentBait)
self.beanBank['text'] = str(base.localAvatar.getBankMoney() - jellybeanCost)
def __buy(self):
self.buyCallback(self.getBait(), self.getBaitNum())
self.destroy()
def __cancel(self):
self.destroy()
self.cancelCallback()
class RepairGUI:
def __init__(self, repairCallback, cancelCallback):
self.loaded = False
self.repairCallback = repairCallback
self.cancelCallback = cancelCallback
def load(self):
if self.loaded:
return
else:
self.frame = DirectFrame(relief=None, geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=(1, 1, 0.6))
self.title = DirectLabel(self.frame, relief=None, text='', text_scale=0.07, text_wordwrap=13, pos=(0, 0, 0.2))
self.slider = DirectSlider(self.frame, thumb_relief=None, pageSize=5, thumb_geom=Preloaded['circle'], thumb_geom_scale=2, scale=0.4, pos=(0, 0, 0.025), command=self.updateSliderLabel)
self.sliderLabel = DirectLabel(self.frame, relief=None, text_scale=0.045, text='', pos=(0, 0, -0.065))
self.repairButton = DirectButton(self.frame, relief=None, geom=Preloaded['yellowButton'], geom_scale=(0.6, 1, 1), text=TTLocalizer.RodRepairText, text_scale=0.06, text_pos=(0, -0.01), pos=(-0.3, 0, -0.2), command=self.__repair)
self.cancelButton = DirectButton(self.frame, relief=None, geom=Preloaded['yellowButton'], geom_scale=(0.6, 1, 1), text=TTLocalizer.lCancel, text_scale=0.06, text_pos=(0, -0.01), pos=(0.3, 0, -0.2), command=self.__cancel)
self.beanBank = DirectLabel(self.frame, relief=None, pos=(-0.35, 0, -0.12), scale=0.6, image=Preloaded['beanBank'], text='', text_align=TextNode.ARight, text_scale=0.11, text_fg=(0.95, 0.95, 0, 1), text_shadow=(0, 0, 0, 1), text_pos=(0.75, -0.81), text_font=ToontownGlobals.getSignFont())
self.loaded = True
return
def destroy(self):
if not self.loaded:
return
self.frame.destroy()
del self.frame
del self.title
del self.slider
del self.sliderLabel
del self.repairButton
del self.cancelButton
del self.beanBank
self.loaded = False
def getDurability(self, rod):
return (base.localAvatar.getFishingRodDurability()[rod], FishGlobals.Rod2Durability[rod], FishGlobals.Rod2RepairJellybean[rod])
def enter(self):
rod = base.localAvatar.getFishingRod()
durability, maxDurability, jellybeanCost = self.getDurability(rod)
durabilityCeil = min(maxDurability - durability, int(math.floor(base.localAvatar.getBankMoney() / jellybeanCost)))
self.title['text'] = TTLocalizer.RodRepairTitle % TTLocalizer.FishingRodNameDict[rod]
self.slider['range'] = (1, durabilityCeil + 0.01)
self.slider['value'] = 1
self.updateSliderLabel()
def getDurabilityRecover(self):
return int(math.floor(self.slider['value']))
def updateSliderLabel(self):
rod = base.localAvatar.getFishingRod()
durability, maxDurability, jellybeanCost = self.getDurability(rod)
durabilityRecover = self.getDurabilityRecover()
jellybeanCost = int(math.ceil(durabilityRecover * jellybeanCost))
self.sliderLabel['text'] = '%s/%s\n%s' % (durability + durabilityRecover, maxDurability, TTLocalizer.PaintGUINotice % jellybeanCost)
self.beanBank['text'] = str(base.localAvatar.getBankMoney() - jellybeanCost)
def __repair(self):
self.repairCallback(self.getDurabilityRecover())
self.destroy()
def __cancel(self):
self.destroy()
self.cancelCallback()
class FishermanGUI:
def __init__(self, saleCallback, repairCallback, upgradeCallback, buyBaitCallback, pos = (0, 0, 0)):
self.loaded = False
self.saleCallback = saleCallback
self.repairCallback = repairCallback
self.upgradeCallback = upgradeCallback
self.buyBaitCallback = buyBaitCallback
self.pos = pos
def load(self):
if self.loaded:
return
else:
self.frame = DirectFrame(relief=None, geom=DGG.getDefaultDialogGeom(), geom_color=ToontownGlobals.GlobalDialogColor, geom_scale=(0.9, 1, 1), pos=self.pos)
i = 0.375
self.repairButton = DirectButton(self.frame, relief=None, state=DGG.NORMAL, geom=Preloaded['squareBox'], geom_scale=(0.75, 1, 0.1), geom_color=(0, 0.5, 1, 1), text=TTLocalizer.FishermanRepair, text_scale=0.08, text_pos=(0, -0.02), text_shadow=(0, 0, 0, 1), text_fg=(1, 1, 1, 1), pos=(0, 0, i), command=self.__repair)
self.sellButton = DirectButton(self.frame, relief=None, state=DGG.NORMAL, geom=Preloaded['squareBox'], geom_scale=(0.75, 1, 0.1), geom_color=(0, 0.5, 1, 1), text=TTLocalizer.PetshopSell, text_scale=0.08, text_pos=(0, -0.02), text_shadow=(0, 0, 0, 1), text_fg=(1, 1, 1, 1), pos=(0, 0, i - 0.12), command=self.__sell)
self.buyRodButton = DirectButton(self.frame, relief=None, state=DGG.NORMAL, geom=Preloaded['squareBox'], geom_scale=(0.75, 1, 0.1), geom_color=(0, 0.5, 1, 1), text=TTLocalizer.FishermanBuyRod, text_scale=0.08, text_pos=(0, -0.02), text_shadow=(0, 0, 0, 1), text_fg=(1, 1, 1, 1), pos=(0, 0, i - 0.24), command=self.__buyRod)
self.upgradeTankButton = DirectButton(self.frame, relief=None, state=DGG.NORMAL, geom=Preloaded['squareBox'], geom_scale=(0.75, 1, 0.1), geom_color=(0, 0.5, 1, 1), text=TTLocalizer.FishermanUpgradeTank, text_scale=0.08, text_pos=(0, -0.02), text_shadow=(0, 0, 0, 1), text_fg=(1, 1, 1, 1), pos=(0, 0, i - 0.36), command=self.__upgradeTank)
self.buyLureButton = DirectButton(self.frame, relief=None, state=DGG.NORMAL, geom=Preloaded['squareBox'], geom_scale=(0.75, 1, 0.1), geom_color=(0, 0.5, 1, 1), text=TTLocalizer.FishermanBuyLure, text_scale=0.08, text_pos=(0, -0.02), text_shadow=(0, 0, 0, 1), text_fg=(1, 1, 1, 1), pos=(0, 0, i - 0.48), command=self.__buyLure)
self.buyBaitButton = DirectButton(self.frame, relief=None, state=DGG.NORMAL, geom=Preloaded['squareBox'], geom_scale=(0.75, 1, 0.1), geom_color=(0, 0.5, 1, 1), text=TTLocalizer.FishermanBuyBaits, text_scale=0.08, text_pos=(0, -0.02), text_shadow=(0, 0, 0, 1), text_fg=(1, 1, 1, 1), pos=(0, 0, i - 0.6), command=self.__buyBait)
self.cancelButton = DirectButton(self.frame, relief=None, state=DGG.NORMAL, geom=Preloaded['squareBox'], geom_scale=(0.75, 1, 0.1), geom_color=(1, 0, 0, 1), text=TTLocalizer.lCancel, text_scale=0.08, text_pos=(0, -0.02), text_shadow=(0, 0, 0, 1), text_fg=(1, 1, 1, 1), pos=(0, 0, i - 0.72), command=self.__cancel)
self.gui = None
self.loaded = True
return
def destroy(self):
if self.gui:
self.gui.destroy()
self.gui = None
if not self.loaded:
return
else:
self.frame.destroy()
del self.frame
del self.repairButton
del self.sellButton
del self.buyRodButton
del self.upgradeTankButton
del self.buyLureButton
del self.cancelButton
self.loaded = False
return
def enter(self):
rod = base.localAvatar.getFishingRod()
self.setButtonState(self.repairButton, base.localAvatar.getBankMoney() >= 1 and base.localAvatar.getFishingRodDurability()[rod] < FishGlobals.Rod2Durability[rod])
self.setButtonState(self.sellButton, base.localAvatar.fishTank.getTotalValue() > 0)
self.setButtonState(self.buyRodButton, base.localAvatar.getMaxFishingRod() < FishGlobals.MaxRodId)
self.setButtonState(self.upgradeTankButton, base.localAvatar.getMaxFishTank() < FishGlobals.MaxTank)
self.setButtonState(self.buyLureButton, base.localAvatar.getMaxFishingLure() < FishGlobals.MaxLureId)
self.setButtonState(self.buyBaitButton, base.localAvatar.getBankMoney() >= 3)
def setButtonState(self, button, state):
state = DGG.NORMAL if state else DGG.DISABLED
button['geom_color'] = (0, 0.5, 1, 1) if state == DGG.NORMAL else (0.5, 0.5, 0.5, 1)
button['state'] = state
def __repair(self):
self.destroy()
self.gui = RepairGUI(self.repairCallback, lambda : self.saleCallback(False))
self.gui.load()
self.gui.enter()
def __buyBait(self):
self.destroy()
self.gui = BaitGUI(self.buyBaitCallback, lambda : self.saleCallback(False))
self.gui.load()
self.gui.enter()
def __dialogAnswer(self, upgradeType, answer):
if answer < 0:
self.__cancel()
else:
self.upgradeCallback(upgradeType)
self.destroy()
def __buyRod(self):
next = base.localAvatar.getMaxFishingRod() + 1
self.__buy(next, FishGlobals.Rod2RequiredFish, TTLocalizer.FishermanRod, FishGlobals.RodPriceDict, '%s %s' % (TTLocalizer.FishingRodNameDict[next], TTLocalizer.FishermanRod), NPCToons.SELL_MOVIE_NEWROD)
def __upgradeTank(self):
priceDict = sorted(FishGlobals.TankPriceDict.keys())
next = priceDict[priceDict.index(base.localAvatar.getMaxFishTank()) + 1]
self.__buy(next, FishGlobals.Tank2RequiredFish, TTLocalizer.TankTypeName, FishGlobals.TankPriceDict, TTLocalizer.FishTank % next, NPCToons.SELL_MOVIE_NEWTANK)
def __buyLure(self):
next = base.localAvatar.getMaxFishingLure() + 1
self.__buy(next, FishGlobals.Lure2RequiredFish, TTLocalizer.LureTypeName, FishGlobals.LurePriceDict, '%s %s' % (TTLocalizer.FishingLureColors[next], TTLocalizer.FishermanLure), NPCToons.SELL_MOVIE_NEWLURE)
def __buy(self, next, requiredFishDict, name, priceDict, fullName, upgradeType):
self.destroy()
currentFish = len(base.localAvatar.fishCollection)
requiredFish = requiredFishDict[next]
if currentFish < requiredFish:
self.gui = TTDialog.TTDialog(style=TTDialog.Acknowledge, fadeScreen=True, text_wordwrap=15, text=TTLocalizer.FishermanUnlockMessage % (name.lower(), requiredFish, requiredFish - currentFish), command=self.__cancel)
return
price = priceDict[next]
bankMoney = base.localAvatar.getBankMoney()
if bankMoney < price:
self.gui = TTDialog.TTDialog(style=TTDialog.Acknowledge, fadeScreen=True, text_wordwrap=15, text=TTLocalizer.FishermanNotEnough % (price, bankMoney), command=self.__cancel)
return
self.gui = TTDialog.TTDialog(style=TTDialog.TwoChoice, fadeScreen=True, text_wordwrap=15, text=TTLocalizer.FishermanBuyQuestion % (fullName, price, bankMoney), command=lambda answer: self.__dialogAnswer(upgradeType, answer))
def __sell(self):
self.destroy()
self.gui = FishSellGUI(self.saleCallback)
def __cancel(self, _ = None):
self.saleCallback(False)
self.destroy()
| |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import os
import subprocess
from django.conf import settings
from django.test import TestCase, override_settings
from typing import Any, Dict, List
from zproject.settings import DEPLOY_ROOT
from zerver.lib.integrations import INTEGRATIONS, HUBOT_LOZENGES
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import HostRequestMock
from zerver.lib.utils import split_by
from zerver.views.integrations import (
add_api_uri_context,
add_integrations_context,
)
class DocPageTest(ZulipTestCase):
def _test(self, url, expected_content, extra_strings=[]):
# type: (str, str, List[str]) -> None
result = self.client_get(url)
self.assertEqual(result.status_code, 200)
self.assertIn(expected_content, str(result.content))
for s in extra_strings:
self.assertIn(s, str(result.content))
def test_doc_endpoints(self):
# type: () -> None
self._test('/api/', 'We hear you like APIs')
self._test('/api/endpoints/', 'pre-built API bindings for')
self._test('/about/', 'Cambridge, Massachusetts')
# Test the i18n version of one of these pages.
self._test('/en/about/', 'Cambridge, Massachusetts')
self._test('/apps/', 'Apps for every platform.')
self._test('/features/', 'Beautiful messaging')
self._test('/hello/', 'productive group chat')
self._test('/why-zulip/', 'all stakeholders can see and')
self._test('/for/open-source/', 'for open source projects')
self._test('/for/companies/', 'in a company')
self._test('/for/working-groups-and-communities/', 'standards bodies')
self._test('/integrations/',
'Over 60 native integrations.',
extra_strings=[
'And hundreds more through',
'Hubot',
'Zapier',
'IFTTT'
])
self._test('/integrations/doc-html/travis', 'Your Travis CI notifications may look like:')
self._test('/devlogin/', 'Normal users')
self._test('/devtools/', 'Useful development URLs')
self._test('/errors/404/', 'Page not found')
self._test('/errors/5xx/', 'Internal server error')
self._test('/emails/', 'Road Runner invited you to join Acme Corporation')
self._test('/register/', 'Sign up for Zulip')
result = self.client_get('/integrations/doc-html/nonexistent_integration', follow=True)
self.assertEqual(result.status_code, 404)
result = self.client_get('/new-user/')
self.assertEqual(result.status_code, 301)
self.assertIn('hello', result['Location'])
result = self.client_get('/robots.txt')
self.assertEqual(result.status_code, 301)
self.assertIn('static/robots.txt', result['Location'])
result = self.client_get('/static/robots.txt')
self.assertEqual(result.status_code, 200)
self.assertIn(
'Disallow: /',
''.join(str(x) for x in list(result.streaming_content))
)
class IntegrationTest(TestCase):
def test_check_if_every_integration_has_logo_that_exists(self):
# type: () -> None
for integration in INTEGRATIONS.values():
self.assertTrue(os.path.isfile(os.path.join(DEPLOY_ROOT, integration.logo)))
def test_check_if_every_hubot_lozenges_has_logo_that_exists(self):
# type: () -> None
for integration in HUBOT_LOZENGES.values():
self.assertTrue(os.path.isfile(os.path.join(DEPLOY_ROOT, integration.logo)))
@override_settings(REALMS_HAVE_SUBDOMAINS=False)
def test_api_url_view_base(self):
# type: () -> None
context = dict() # type: Dict[str, Any]
add_api_uri_context(context, HostRequestMock())
self.assertEqual(context["external_api_path_subdomain"], "testserver/api")
self.assertEqual(context["external_api_uri_subdomain"], "http://testserver/api")
self.assertTrue(context["html_settings_links"])
@override_settings(REALMS_HAVE_SUBDOMAINS=True)
def test_api_url_view_subdomains_base(self):
# type: () -> None
context = dict() # type: Dict[str, Any]
add_api_uri_context(context, HostRequestMock())
self.assertEqual(context["external_api_path_subdomain"], "testserver/api")
self.assertEqual(context["external_api_uri_subdomain"], "http://testserver/api")
self.assertTrue(context["html_settings_links"])
@override_settings(REALMS_HAVE_SUBDOMAINS=True)
@override_settings(SUBDOMAINS_HOMEPAGE=True)
def test_api_url_view_subdomains_homepage_base(self):
# type: () -> None
context = dict() # type: Dict[str, Any]
add_api_uri_context(context, HostRequestMock())
self.assertEqual(context["external_api_path_subdomain"], "yourZulipDomain.testserver/api")
self.assertEqual(context["external_api_uri_subdomain"], "http://yourZulipDomain.testserver/api")
self.assertFalse(context["html_settings_links"])
@override_settings(REALMS_HAVE_SUBDOMAINS=True)
def test_api_url_view_subdomains_full(self):
# type: () -> None
context = dict() # type: Dict[str, Any]
request = HostRequestMock(host="mysubdomain.testserver")
add_api_uri_context(context, request)
self.assertEqual(context["external_api_path_subdomain"], "mysubdomain.testserver/api")
self.assertEqual(context["external_api_uri_subdomain"], "http://mysubdomain.testserver/api")
self.assertTrue(context["html_settings_links"])
def test_integration_view_html_settings_links(self):
# type: () -> None
context = dict()
context['html_settings_links'] = False
add_integrations_context(context)
self.assertEqual(
context['settings_html'],
'Zulip settings page')
self.assertEqual(
context['subscriptions_html'],
'streams page')
context = dict()
context['html_settings_links'] = True
add_integrations_context(context)
self.assertEqual(
context['settings_html'],
'<a href="../../#settings">Zulip settings page</a>')
self.assertEqual(
context['subscriptions_html'],
'<a target="_blank" href="../../#streams">streams page</a>')
class AboutPageTest(ZulipTestCase):
def setUp(self):
# type: () -> None
""" Manual installation which did not execute `tools/provision`
would not have the `static/generated/github-contributors.json` fixture
file.
"""
# This block has unreliable test coverage due to the implicit
# caching here, so we exclude it from coverage.
if not os.path.exists(settings.CONTRIBUTORS_DATA):
# Copy the fixture file in `zerver/fixtures` to `static/generated`
update_script = os.path.join(os.path.dirname(__file__),
'../../tools/update-authors-json') # nocoverage
subprocess.check_call([update_script, '--use-fixture']) # nocoverage
def test_endpoint(self):
# type: () -> None
result = self.client_get('/about/')
self.assert_in_success_response(
['Contributors', 'commits', '@timabbott'],
result
)
def test_split_by(self):
# type: () -> None
"""Utility function primarily used in authors page"""
flat_list = [1, 2, 3, 4, 5, 6, 7, 8, 9]
expected_result = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
self.assertEqual(split_by(flat_list, 3, None), expected_result)
class ConfigErrorTest(ZulipTestCase):
@override_settings(GOOGLE_OAUTH2_CLIENT_ID=None)
def test_google(self):
# type: () -> None
result = self.client_get("/accounts/login/google/")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/config-error/google')
result = self.client_get(result.url)
self.assert_in_success_response(["GOOGLE_OAUTH2_CLIENT_ID"], result)
@override_settings(SOCIAL_AUTH_GITHUB_KEY=None)
def test_github(self):
# type: () -> None
result = self.client_get("/accounts/login/social/github")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/config-error/github')
result = self.client_get(result.url)
self.assert_in_success_response(["SOCIAL_AUTH_GITHUB_KEY"], result)
@override_settings(SOCIAL_AUTH_GITHUB_KEY=None)
@override_settings(DEVELOPMENT=False)
def test_github_production_error(self):
# type: () -> None
"""Test the !DEVELOPMENT code path of config-error."""
result = self.client_get("/accounts/login/social/github")
self.assertEqual(result.status_code, 302)
self.assertEqual(result.url, '/config-error/github')
result = self.client_get(result.url)
self.assert_in_success_response(["/etc/zulip/zulip-secrets.conf"], result)
def test_smtp_error(self):
# type: () -> None
result = self.client_get("/config-error/smtp")
self.assertEqual(result.status_code, 200)
self.assert_in_success_response(["/var/log/zulip"], result)
| |
# coding: utf-8
import pytest
import pandas as pd
import numpy as np
from pandas.testing import assert_series_equal
from functools import partial
from dgp.lib.transform.graph import Graph, TransformGraph, GraphError
from dgp.lib.transform.gravity import eotvos_correction, latitude_correction, free_air_correction
import dgp.lib.trajectory_ingestor as ti
from tests import sample_dir
import csv
class TestGraph:
@pytest.mark.parametrize('test_input', ['some_string',
[[1, 2], [3], "hello"],
{'a': [1, 2], 'b': 3},
{'a': [1, 2], 'b': "hello"},
])
def test_init_raises(self, test_input):
pytest.raises(TypeError, Graph(test_input))
def test_topo_sort_raises(self):
test_input = {'a': [],
'b': ['c'],
'c': ['a', 'b'],
'd': ['a', 'b', 'c']}
g = Graph(test_input)
with pytest.raises(GraphError, message='Cycle detected'):
g.topo_sort()
def add(a, b):
return a + b
class TestTransformGraph:
@pytest.fixture
def test_input(self):
graph = {'a': 1,
'b': 2,
'c': (add, 'a', 'b'),
'd': (sum, ['a', 'b', 'c'])
}
return graph
def test_init(self, test_input):
g = TransformGraph(graph=test_input)
assert g.order == ['d', 'c', 'b', 'a']
def test_execute(self, test_input):
g = TransformGraph(graph=test_input)
res = g.execute()
expected = {'a': 1, 'b': 2, 'c': 3, 'd': 6}
assert res == expected
def test_graph_setter(self, test_input):
g = TransformGraph(graph=test_input)
g.execute()
new_graph = {'a': 1,
'b': 2,
'c': (add, 'a', 'b'),
'd': (sum, ['a', 'b', 'c']),
'e': (add, 'd', 'b')
}
g.graph = new_graph
res = g.execute()
expected = {'a': 1, 'b': 2, 'c': 3, 'd': 6, 'e': 8}
assert res == expected
def test_subclass_noargs(self, test_input):
class NewTransformGraph(TransformGraph):
transform_graph = test_input
g = NewTransformGraph()
res = g.execute()
expected = {'a': 1, 'b': 2, 'c': 3, 'd': 6}
assert res == expected
def test_subclass_args(self):
class NewTransformGraph(TransformGraph):
def __init__(self, in1, in2):
self.transform_graph = {'a': in1,
'b': in2,
'c': (add, 'a', 'b'),
'd': (sum, ['a', 'b', 'c'])
}
super().__init__()
g = NewTransformGraph(1, 2)
res = g.execute()
expected = {'a': 1, 'b': 2, 'c': 3, 'd': 6}
assert res == expected
class TestCorrections:
@pytest.fixture
def trajectory_data(self):
# Ensure gps_fields are ordered correctly relative to test file
gps_fields = ['mdy', 'hms', 'lat', 'long', 'ortho_ht', 'ell_ht',
'num_stats', 'pdop']
data = ti.import_trajectory(
'tests/sample_data/eotvos_short_input.txt',
columns=gps_fields,
skiprows=1,
timeformat='hms'
)
return data
def test_eotvos(self, trajectory_data):
# TODO: More complete test that spans the range of possible inputs
result_eotvos = []
with sample_dir.joinpath('eotvos_short_result.csv').open() as fd:
test_data = csv.DictReader(fd)
for line in test_data:
result_eotvos.append(float(line['Eotvos_full']))
transform_graph = {'trajectory': trajectory_data,
'eotvos': (eotvos_correction, 'trajectory'),
}
g = TransformGraph(graph=transform_graph)
eotvos_a = g.execute()
for i, value in enumerate(eotvos_a['eotvos']):
if 1 < i < len(result_eotvos) - 2:
try:
assert value == pytest.approx(result_eotvos[i], rel=1e-2)
except AssertionError:
print("Invalid assertion at data line: {}".format(i))
raise AssertionError
@pytest.mark.skip(reason="Error on my workstation")
def test_free_air_correction(self, trajectory_data):
# TODO: More complete test that spans the range of possible inputs
s1 = pd.Series([39.9148595446, 39.9148624273], name='lat')
s2 = pd.Series([1599.197, 1599.147], name='ell_ht')
test_input = pd.concat([s1, s2], axis=1)
test_input.index = pd.Index([trajectory_data.index[0], trajectory_data.index[-1]])
expected = pd.Series([-493.308594971815, -493.293177069581],
index=pd.Index([trajectory_data.index[0],
trajectory_data.index[-1]]),
name='fac'
)
transform_graph = {'trajectory': test_input,
'fac': (free_air_correction, 'trajectory'),
}
g = TransformGraph(graph=transform_graph)
res = g.execute()
assert_series_equal(expected, res['fac'])
# assert expected == pytest.approx(res['fac'], rel=1e-8)
# check that the indices are equal
assert test_input.index.identical(res['fac'].index)
def test_latitude_correction(self, trajectory_data):
test_input = pd.DataFrame([39.9148595446, 39.9148624273])
test_input.columns = ['lat']
test_input.index = pd.Index([trajectory_data.index[0], trajectory_data.index[-1]])
expected = pd.Series([-980162.105035777, -980162.105292394],
index=pd.Index([trajectory_data.index[0],
trajectory_data.index[-1]]),
name='lat_corr'
)
transform_graph = {'trajectory': test_input,
'lat_corr': (latitude_correction, 'trajectory'),
}
g = TransformGraph(graph=transform_graph)
res = g.execute()
assert_series_equal(expected, res['lat_corr'], check_less_precise=8)
# assert expected == pytest.approx(res['lat_corr'], rel=1e-8)
# check that the indexes are equal
assert test_input.index.identical(res['lat_corr'].index)
def test_partial(self):
input_A = pd.Series(np.arange(0, 5), index=['A', 'B', 'C', 'D', 'E'])
input_B = pd.Series(np.arange(2, 7), index=['A', 'B', 'C', 'D', 'E'])
expected = pd.concat([input_A, input_B], axis=1)
concat = partial(pd.concat, join='outer', axis=1)
transform_graph = {'input_a': input_A,
'input_b': input_B,
'result': (concat, ['input_a', 'input_b'])
}
g = TransformGraph(graph=transform_graph)
result = g.execute()
res = result['result']
assert res.equals(expected)
# check that the indexes are equal
assert input_A.index.identical(res.index)
assert input_B.index.identical(res.index)
def test_graph_chaining(self):
class Graph1(TransformGraph):
def __init__(self, in1, in2):
self.transform_graph = {'a': in1,
'b': in2,
'c': (add, 'a', 'b'),
}
super().__init__()
graph2 = {'a': 1,
'b': 2,
'c': (Graph1.run(item='c'), 'a', 'b'),
'd': (sum, ['a', 'b', 'c'])
}
g = TransformGraph(graph=graph2)
result = g.execute()
expected = {'a': 1, 'b': 2, 'c': 3, 'd': 6}
assert result == expected
| |
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Version-independent api tests"""
import httplib2
from oslo_serialization import jsonutils
from glance.tests import functional
class TestApiVersions(functional.FunctionalTest):
def test_version_configurations(self):
"""Test that versioning is handled properly through all channels"""
# v1 and v2 api enabled
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = {'versions': [
{
'status': 'EXPERIMENTAL',
'id': 'v3.0',
'links': [{'href': url % '3', "rel": "self"}],
},
{
'id': 'v2.3',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.2',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v1.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
{
'id': 'v1.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
]}
versions_json = jsonutils.dumps(versions)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
self.assertEqual(versions_json, content)
def test_v2_api_configuration(self):
self.api_server.enable_v1_api = False
self.api_server.enable_v2_api = True
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = {'versions': [
{
'status': 'EXPERIMENTAL',
'id': 'v3.0',
'links': [{'href': url % '3', "rel": "self"}],
},
{
'id': 'v2.3',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.2',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
]}
versions_json = jsonutils.dumps(versions)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
self.assertEqual(versions_json, content)
def test_v1_api_configuration(self):
self.api_server.enable_v1_api = True
self.api_server.enable_v2_api = False
self.api_server.enable_v3_api = False
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = {'versions': [
{
'id': 'v1.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
{
'id': 'v1.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
]}
versions_json = jsonutils.dumps(versions)
# Verify version choices returned.
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
self.assertEqual(versions_json, content)
class TestApiPaths(functional.FunctionalTest):
def setUp(self):
super(TestApiPaths, self).setUp()
self.start_servers(**self.__dict__.copy())
url = 'http://127.0.0.1:%d/v%%s/' % self.api_port
versions = {'versions': [
{
'status': 'EXPERIMENTAL',
'id': 'v3.0',
'links': [{'href': url % '3', "rel": "self"}],
},
{
'id': 'v2.3',
'status': 'CURRENT',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.2',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v2.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '2'}],
},
{
'id': 'v1.1',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
{
'id': 'v1.0',
'status': 'SUPPORTED',
'links': [{'rel': 'self', 'href': url % '1'}],
},
]}
self.versions_json = jsonutils.dumps(versions)
images = {'images': []}
self.images_json = jsonutils.dumps(images)
def test_get_root_path(self):
"""Assert GET / with `no Accept:` header.
Verify version choices returned.
Bug lp:803260 no Accept header causes a 500 in glance-api
"""
path = 'http://%s:%d' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
self.assertEqual(self.versions_json, content)
def test_get_images_path(self):
"""Assert GET /images with `no Accept:` header.
Verify version choices returned.
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
self.assertEqual(self.versions_json, content)
def test_get_v1_images_path(self):
"""GET /v1/images with `no Accept:` header.
Verify empty images list returned.
"""
path = 'http://%s:%d/v1/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(200, response.status)
def test_get_root_path_with_unknown_header(self):
"""Assert GET / with Accept: unknown header
Verify version choices returned. Verify message in API log about
unknown accept header.
"""
path = 'http://%s:%d/' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'unknown'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(300, response.status)
self.assertEqual(self.versions_json, content)
def test_get_root_path_with_openstack_header(self):
"""Assert GET / with an Accept: application/vnd.openstack.images-v1
Verify empty image list returned
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v1'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(200, response.status)
self.assertEqual(self.images_json, content)
def test_get_images_path_with_openstack_header(self):
"""Assert GET /images with a
`Accept: application/vnd.openstack.compute-v1` header.
Verify version choices returned. Verify message in API log
about unknown accept header.
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.compute-v1'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(300, response.status)
self.assertEqual(self.versions_json, content)
def test_get_v10_images_path(self):
"""Assert GET /v1.0/images with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/v1.a/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
def test_get_v1a_images_path(self):
"""Assert GET /v1.a/images with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/v1.a/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
def test_get_va1_images_path(self):
"""Assert GET /va.1/images with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/va.1/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
self.assertEqual(self.versions_json, content)
def test_get_versions_path(self):
"""Assert GET /versions with no Accept: header
Verify version choices returned
"""
path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
self.assertEqual(self.versions_json, content)
def test_get_versions_path_with_openstack_header(self):
"""Assert GET /versions with the
`Accept: application/vnd.openstack.images-v1` header.
Verify version choices returned.
"""
path = 'http://%s:%d/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v1'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(300, response.status)
self.assertEqual(self.versions_json, content)
def test_get_v1_versions_path(self):
"""Assert GET /v1/versions with `no Accept:` header
Verify 404 returned
"""
path = 'http://%s:%d/v1/versions' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(404, response.status)
def test_get_versions_choices(self):
"""Verify version choices returned"""
path = 'http://%s:%d/v10' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
self.assertEqual(self.versions_json, content)
def test_get_images_path_with_openstack_v2_header(self):
"""Assert GET /images with a
`Accept: application/vnd.openstack.compute-v2` header.
Verify version choices returned. Verify message in API log
about unknown version in accept header.
"""
path = 'http://%s:%d/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
headers = {'Accept': 'application/vnd.openstack.images-v10'}
response, content = http.request(path, 'GET', headers=headers)
self.assertEqual(300, response.status)
self.assertEqual(self.versions_json, content)
def test_get_v12_images_path(self):
"""Assert GET /v1.2/images with `no Accept:` header
Verify version choices returned
"""
path = 'http://%s:%d/v1.2/images' % ('127.0.0.1', self.api_port)
http = httplib2.Http()
response, content = http.request(path, 'GET')
self.assertEqual(300, response.status)
self.assertEqual(self.versions_json, content)
| |
"""
Copyright (C) 2010 - 2013 TopCoder Inc., All Rights Reserved.
This file contains claim file.
ClaimFile provides the access to the file
@version 1.0 (Healthcare Fraud Prevention - Partner Database Appliance - Assembly)
@author: zeadom, TCSASSEMBLER
@since 2013-12-10
"""
import os, sys
import abc
import csv
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from partner_database_appliance_exception import PartnerDatabaseApplianceException
from claim_type import ClaimTypeFactory
class ClaimFile(object):
"""
Claim file abstract class
This class provides the access to the file
Not meant to be called outside the module, so there is no parameter check in the methods.
Attributes:
fileObject: Represents the current file object. This will be set in openFile, and closed in write,
and use in the business methods.
fileType: Represents the file type being used. Will be set by concrete implementations.
defaultFilename: Represents the default name of the file that will contain the output.
"""
__metaclass__ = abc.ABCMeta
READ = "read"
WRITE = "write"
def __init__(self):
""" Init ClaimFile """
self.fileObject = None
self.fileType = ""
self.defaultFilename = "output"
def openFile(self, filename, mode):
"""
Opens the file for processing
Args:
filename: the name of the file to open
mode: the mode of the operation (read, or write)
Raises:
PartnerDatabaseApplianceException: There is an error during the execution of this method.
"""
try:
if not filename:
filename = "%s.%s" % (self.defaultFilename, self.fileType)
if mode == self.READ:
self.fileObject = open(filename, "r")
elif mode == self.WRITE:
self.fileObject = open(filename, "w")
except Exception as e:
raise PartnerDatabaseApplianceException(*e.args)
def closeFile(self):
"""
Closes the file
Raises:
PartnerDatabaseApplianceException: There is an error during the execution of this method.
"""
try:
if self.fileObject:
self.fileObject.close()
self.fileObject = None
except Exception as e:
raise PartnerDatabaseApplianceException(*e.args)
@abc.abstractmethod
def readNextLine(self, claimType):
"""
Read next line
This is a abstract method, and should be implemented in subclass.
Reads the next line from the open file of the given claim type and returns the field values
Args:
claimType: the claimType
Returns:
The next record
Raises:
PartnerDatabaseApplianceException: There is an error during the execution of this method.
"""
return
@abc.abstractmethod
def write(self, claimType, records):
"""
Writes the records into the open file
This is a abstract method, and should be implemented in subclass.
Args:
claimType: the claimType
records: the records to write
Raises:
PartnerDatabaseApplianceException: There is an error during the execution of this method.
"""
return
def findFilenames(self, dirPath):
"""
File filenames
Helper method to get all files in the given directory for the requested file type recursively
Args:
dirPath: the path to the directory
Returns:
A list of filenames for the requested file type.
example:
["dir/a.csv", "dir/b.csv", "dir/anotherdir/c.csv"]
"""
filenames = os.listdir(dirPath)
ret = []
for filename in filenames:
absfilename = os.path.join(dirPath, filename)
if os.path.isdir(absfilename):
ret.extend(self.findFilenames(absfilename))
elif filename.endswith("." + self.fileType):
ret.append(absfilename)
return ret
class CSVClaimFile(ClaimFile):
"""
CSV Claim File Class
This class provides the access to the CSV file.
USing the csv module in the standard library
Not meant to be called outside the module, so there is no parameter check in the methods.
Attributes:
fileObject: Represents the current file object. This will be set in openFile, and closed in write,
and use in the business methods.
fileType: Represents the file type being used. Will be set by concrete implementations.
defaultFilename: Represents the default name of the file that will contain the output.
fileReader: Represents the current file reader object. Will be used in readNextLine
"""
def __init__(self):
""" Init CSVClaimFile """
super(CSVClaimFile, self).__init__()
self.fileType = "csv"
self.fileReader= None
def openFile(self, filename, mode):
"""
Opens the file for processing
Override the superclass's openFile method
Args:
filename: the name of the file to open
mode: the mode of the operation (read, or write)
Raises:
PartnerDatabaseApplianceException: There is an error during the execution of this method.
"""
try:
if not filename:
filename = "%s.%s" % (self.defaultFilename, self.fileType)
if mode == self.READ:
self.fileObject = open(filename, "r")
# fileReader will be used in readNextLine
self.fileReader = csv.reader(self.fileObject, delimiter=",")
elif mode == self.WRITE:
self.fileObject = open(filename, "w", newline="")
except Exception as e:
raise PartnerDatabaseApplianceException(*e.args)
def readNextLine(self, claimType):
"""
Read next line
Reads the next line from the open file of the given claim type and returns the field values
Args:
claimType: the claimType
Returns:
The next record
Raises:
PartnerDatabaseApplianceException: There is an error during the execution of this method.
"""
try:
return next(self.fileReader)
except StopIteration:
return None
except Exception as e:
raise PartnerDatabaseApplianceException(*e.args)
def write(self, claimType, records):
"""
Writes the records into the open file
Args:
claimType: the claimType
records: the records to write
Raises:
PartnerDatabaseApplianceException: There is an error during the execution of this method.
"""
try:
fileWriter = csv.writer(self.fileObject, delimiter=",")
claimType = ClaimTypeFactory.load(claimType)
# Remove LAST_MODIFY
fileWriter.writerow(claimType.columns[0:-1])
fileWriter.writerows(records)
except Exception as e:
raise PartnerDatabaseApplianceException(*e.args)
| |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A library of helpers for use with SamplingDecoders.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import abc
import six
from tensorflow.contrib.distributions.python.ops import bernoulli
from tensorflow.contrib.distributions.python.ops import categorical
from tensorflow.contrib.seq2seq.python.ops import decoder
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.layers import base as layers_base
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import tensor_array_ops
from tensorflow.python.util import nest
__all__ = [
"Helper",
"TrainingHelper",
"GreedyEmbeddingHelper",
"CustomHelper",
"ScheduledEmbeddingTrainingHelper",
"ScheduledOutputTrainingHelper",
]
_transpose_batch_time = decoder._transpose_batch_time # pylint: disable=protected-access
def _unstack_ta(inp):
return tensor_array_ops.TensorArray(
dtype=inp.dtype, size=array_ops.shape(inp)[0],
element_shape=inp.get_shape()[1:]).unstack(inp)
@six.add_metaclass(abc.ABCMeta)
class Helper(object):
"""Helper interface. Helper instances are used by SamplingDecoder."""
@abc.abstractproperty
def batch_size(self):
"""Returns a scalar int32 tensor."""
raise NotImplementedError("batch_size has not been implemented")
@abc.abstractmethod
def initialize(self, name=None):
"""Returns `(initial_finished, initial_inputs)`."""
pass
@abc.abstractmethod
def sample(self, time, outputs, state, name=None):
"""Returns `sample_ids`."""
pass
@abc.abstractmethod
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""Returns `(finished, next_inputs, next_state)`."""
pass
class CustomHelper(Helper):
"""Base abstract class that allows the user to customize sampling."""
def __init__(self, initialize_fn, sample_fn, next_inputs_fn):
"""Initializer.
Args:
initialize_fn: callable that returns `(finished, next_inputs)`
for the first iteration.
sample_fn: callable that takes `(time, outputs, state)`
and emits tensor `sample_ids`.
next_inputs_fn: callable that takes `(time, outputs, state, sample_ids)`
and emits `(finished, next_inputs, next_state)`.
"""
self._initialize_fn = initialize_fn
self._sample_fn = sample_fn
self._next_inputs_fn = next_inputs_fn
self._batch_size = None
@property
def batch_size(self):
if self._batch_size is None:
raise ValueError("batch_size accessed before initialize was called")
return self._batch_size
def initialize(self, name=None):
with ops.name_scope(name, "%sInitialize" % type(self).__name__):
(finished, next_inputs) = self._initialize_fn()
if self._batch_size is None:
self._batch_size = array_ops.size(finished)
return (finished, next_inputs)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(
name, "%sSample" % type(self).__name__, (time, outputs, state)):
return self._sample_fn(time=time, outputs=outputs, state=state)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(
name, "%sNextInputs" % type(self).__name__, (time, outputs, state)):
return self._next_inputs_fn(
time=time, outputs=outputs, state=state, sample_ids=sample_ids)
class TrainingHelper(Helper):
"""A helper for use during training. Only reads inputs.
Returned sample_ids are the argmax of the RNN output logits.
"""
def __init__(self, inputs, sequence_length, time_major=False, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
name: Name scope for any created operations.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
with ops.name_scope(name, "TrainingHelper", [inputs, sequence_length]):
inputs = ops.convert_to_tensor(inputs, name="inputs")
if not time_major:
inputs = nest.map_structure(_transpose_batch_time, inputs)
self._input_tas = nest.map_structure(_unstack_ta, inputs)
self._sequence_length = ops.convert_to_tensor(
sequence_length, name="sequence_length")
if self._sequence_length.get_shape().ndims != 1:
raise ValueError(
"Expected sequence_length to be a vector, but received shape: %s" %
self._sequence_length.get_shape())
self._zero_inputs = nest.map_structure(
lambda inp: array_ops.zeros_like(inp[0, :]), inputs)
self._batch_size = array_ops.size(sequence_length)
@property
def batch_size(self):
return self._batch_size
def initialize(self, name=None):
with ops.name_scope(name, "TrainingHelperInitialize"):
finished = math_ops.equal(0, self._sequence_length)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(lambda inp: inp.read(0), self._input_tas))
return (finished, next_inputs)
def sample(self, time, outputs, name=None, **unused_kwargs):
with ops.name_scope(name, "TrainingHelperSample", [time, outputs]):
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, name=None, **unused_kwargs):
"""next_inputs_fn for TrainingHelper."""
with ops.name_scope(name, "TrainingHelperNextInputs",
[time, outputs, state]):
next_time = time + 1
finished = (next_time >= self._sequence_length)
all_finished = math_ops.reduce_all(finished)
def read_from_ta(inp):
return inp.read(next_time)
next_inputs = control_flow_ops.cond(
all_finished, lambda: self._zero_inputs,
lambda: nest.map_structure(read_from_ta, self._input_tas))
return (finished, next_inputs, state)
class ScheduledEmbeddingTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling.
Returns -1s for sample_ids where no sampling took place; valid sample id
values elsewhere.
"""
def __init__(self, inputs, sequence_length, embedding, sampling_probability,
time_major=False, seed=None, scheduling_seed=None, name=None):
"""Initializer.
Args:
inputs: A (structure of) input tensors.
sequence_length: An int32 vector tensor.
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
sampling_probability: A 0D `float32` tensor: the probability of sampling
categorically from the output ids instead of reading directly from the
inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
scheduling_seed: The schedule decision rule sampling seed.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledEmbeddingSamplingWrapper",
[embedding, sampling_probability]):
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
self._seed = seed
self._scheduling_seed = scheduling_seed
super(ScheduledEmbeddingTrainingHelper, self).__init__(
inputs=inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledEmbeddingTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state]):
# Return -1s where we did not sample, and sample_ids elsewhere
select_sample_noise = random_ops.random_uniform(
[self.batch_size], seed=self._scheduling_seed)
select_sample = (self._sampling_probability > select_sample_noise)
sample_id_sampler = categorical.Categorical(logits=outputs)
return array_ops.where(
select_sample,
sample_id_sampler.sample(seed=self._seed),
array_ops.tile([-1], [self.batch_size]))
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledEmbeddingTrainingHelperSample",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledEmbeddingTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
def maybe_sample():
"""Perform scheduled sampling."""
where_sampling = math_ops.cast(
array_ops.where(sample_ids > -1), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(sample_ids <= -1), dtypes.int32)
where_sampling_flat = array_ops.reshape(where_sampling, [-1])
where_not_sampling_flat = array_ops.reshape(where_not_sampling, [-1])
sample_ids_sampling = array_ops.gather(sample_ids, where_sampling_flat)
inputs_not_sampling = array_ops.gather(
base_next_inputs, where_not_sampling_flat)
sampled_next_inputs = self._embedding_fn(sample_ids_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class ScheduledOutputTrainingHelper(TrainingHelper):
"""A training helper that adds scheduled sampling directly to outputs.
Returns False for sample_ids where no sampling took place; True elsewhere.
"""
def __init__(self, inputs, sequence_length, sampling_probability,
time_major=False, seed=None, next_input_layer=None,
auxiliary_inputs=None, name=None):
"""Initializer.
Args:
inputs: A (structure) of input tensors.
sequence_length: An int32 vector tensor.
sampling_probability: A 0D `float32` tensor: the probability of sampling
from the outputs instead of reading directly from the inputs.
time_major: Python bool. Whether the tensors in `inputs` are time major.
If `False` (default), they are assumed to be batch major.
seed: The sampling seed.
next_input_layer: (Optional) An instance of `tf.layers.Layer`, i.e.,
`tf.layers.Dense`. Optional layer to apply to the RNN output to create
the next input.
auxiliary_inputs: An optional (structure of) auxiliary input tensors with
a shape that matches `inputs` in all but (potentially) the final
dimension. These tensors will be concatenated to the sampled output or
the `inputs` when not sampling for use as the next input.
name: Name scope for any created operations.
Raises:
ValueError: if `sampling_probability` is not a scalar or vector.
"""
with ops.name_scope(name, "ScheduledOutputTrainingHelper",
[inputs, auxiliary_inputs, sampling_probability]):
self._sampling_probability = ops.convert_to_tensor(
sampling_probability, name="sampling_probability")
if self._sampling_probability.get_shape().ndims not in (0, 1):
raise ValueError(
"sampling_probability must be either a scalar or a vector. "
"saw shape: %s" % (self._sampling_probability.get_shape()))
if auxiliary_inputs is None:
maybe_concatenated_inputs = inputs
else:
inputs = ops.convert_to_tensor(inputs, name="inputs")
auxiliary_inputs = ops.convert_to_tensor(
auxiliary_inputs, name="auxiliary_inputs")
maybe_concatenated_inputs = nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
inputs, auxiliary_inputs)
if not time_major:
auxiliary_inputs = nest.map_structure(
_transpose_batch_time, auxiliary_inputs)
self._auxiliary_input_tas = (
nest.map_structure(_unstack_ta, auxiliary_inputs)
if auxiliary_inputs is not None else None)
self._seed = seed
if (next_input_layer is not None and not isinstance(next_input_layer,
layers_base._Layer)): # pylint: disable=protected-access
raise TypeError("next_input_layer must be a Layer, received: %s" %
type(next_input_layer))
self._next_input_layer = next_input_layer
super(ScheduledOutputTrainingHelper, self).__init__(
inputs=maybe_concatenated_inputs,
sequence_length=sequence_length,
time_major=time_major,
name=name)
def initialize(self, name=None):
return super(ScheduledOutputTrainingHelper, self).initialize(name=name)
def sample(self, time, outputs, state, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperSample",
[time, outputs, state]):
sampler = bernoulli.Bernoulli(probs=self._sampling_probability)
return math_ops.cast(
sampler.sample(sample_shape=self.batch_size, seed=self._seed),
dtypes.bool)
def next_inputs(self, time, outputs, state, sample_ids, name=None):
with ops.name_scope(name, "ScheduledOutputTrainingHelperNextInputs",
[time, outputs, state, sample_ids]):
(finished, base_next_inputs, state) = (
super(ScheduledOutputTrainingHelper, self).next_inputs(
time=time,
outputs=outputs,
state=state,
sample_ids=sample_ids,
name=name))
def maybe_sample():
"""Perform scheduled sampling."""
def maybe_concatenate_auxiliary_inputs(outputs_, indices=None):
"""Concatenate outputs with auxiliary inputs, if they exist."""
if self._auxiliary_input_tas is None:
return outputs_
next_time = time + 1
auxiliary_inputs = nest.map_structure(
lambda ta: ta.read(next_time), self._auxiliary_input_tas)
if indices is not None:
auxiliary_inputs = array_ops.gather_nd(auxiliary_inputs, indices)
return nest.map_structure(
lambda x, y: array_ops.concat((x, y), -1),
outputs_, auxiliary_inputs)
if self._next_input_layer is None:
return array_ops.where(
sample_ids, maybe_concatenate_auxiliary_inputs(outputs),
base_next_inputs)
where_sampling = math_ops.cast(
array_ops.where(sample_ids), dtypes.int32)
where_not_sampling = math_ops.cast(
array_ops.where(math_ops.logical_not(sample_ids)), dtypes.int32)
outputs_sampling = array_ops.gather_nd(outputs, where_sampling)
inputs_not_sampling = array_ops.gather_nd(base_next_inputs,
where_not_sampling)
sampled_next_inputs = maybe_concatenate_auxiliary_inputs(
self._next_input_layer(outputs_sampling), where_sampling)
base_shape = array_ops.shape(base_next_inputs)
return (array_ops.scatter_nd(indices=where_sampling,
updates=sampled_next_inputs,
shape=base_shape)
+ array_ops.scatter_nd(indices=where_not_sampling,
updates=inputs_not_sampling,
shape=base_shape))
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished, lambda: base_next_inputs, maybe_sample)
return (finished, next_inputs, state)
class GreedyEmbeddingHelper(Helper):
"""A helper for use during inference.
Uses the argmax of the output (treated as logits) and passes the
result through an embedding layer to get the next input.
"""
def __init__(self, embedding, start_tokens, end_token):
"""Initializer.
Args:
embedding: A callable that takes a vector tensor of `ids` (argmax ids),
or the `params` argument for `embedding_lookup`.
start_tokens: `int32` vector shaped `[batch_size]`, the start tokens.
end_token: `int32` scalar, the token that marks end of decoding.
Raises:
ValueError: if `sequence_length` is not a 1D tensor.
"""
if callable(embedding):
self._embedding_fn = embedding
else:
self._embedding_fn = (
lambda ids: embedding_ops.embedding_lookup(embedding, ids))
self._start_tokens = ops.convert_to_tensor(
start_tokens, dtype=dtypes.int32, name="start_tokens")
self._end_token = ops.convert_to_tensor(
end_token, dtype=dtypes.int32, name="end_token")
if self._start_tokens.get_shape().ndims != 1:
raise ValueError("start_tokens must be a vector")
self._batch_size = array_ops.size(start_tokens)
if self._end_token.get_shape().ndims != 0:
raise ValueError("end_token must be a scalar")
self._start_inputs = self._embedding_fn(self._start_tokens)
@property
def batch_size(self):
return self._batch_size
def initialize(self, name=None):
finished = array_ops.tile([False], [self._batch_size])
return (finished, self._start_inputs)
def sample(self, time, outputs, state, name=None):
"""sample for GreedyEmbeddingHelper."""
del time, state # unused by sample_fn
# Outputs are logits, use argmax to get the most probable id
if not isinstance(outputs, ops.Tensor):
raise TypeError("Expected outputs to be a single Tensor, got: %s" %
type(outputs))
sample_ids = math_ops.cast(
math_ops.argmax(outputs, axis=-1), dtypes.int32)
return sample_ids
def next_inputs(self, time, outputs, state, sample_ids, name=None):
"""next_inputs_fn for GreedyEmbeddingHelper."""
del time, outputs # unused by next_inputs_fn
finished = math_ops.equal(sample_ids, self._end_token)
all_finished = math_ops.reduce_all(finished)
next_inputs = control_flow_ops.cond(
all_finished,
# If we're finished, the next_inputs value doesn't matter
lambda: self._start_inputs,
lambda: self._embedding_fn(sample_ids))
return (finished, next_inputs, state)
| |
from pygame.math import Vector3 as V3
import pygame
import thorpy
from core3d import Object3D, Path3D
from light import Light, Material
from camera import Camera
import primitivemeshes
import parameters
import drawing
import vessel
import random
from scene import Scene
import levelgen
import gamelogic
import garage
import trackdecorator
import obstacle
import scenario
from core3d import ManualObject3D
################################################################################
#music / sons (des autres aussi, fonction de distance)
#quand change de categorie:
# -bruit de foule (pendant les feux) change
#statistics
def create_vessel(color):
quality = parameters.CURRENT_QUALITY
glass = Material((0,0,0),M=(120,120,120))
rest = Material(color)
t,n,c = garage.generate_vessel(rest, glass)
w = garage.random_wing(rest)
v = vessel.Vessel(None,more_triangles=[])
#quality = power*friction
#quality = turn+max_fuel
power_rand = random.random()+0.000001
power = parameters.MIN_POWER + power_rand*(parameters.MAX_POWER-parameters.MIN_POWER)
friction = power_rand
power *= parameters.ENGINE_POWER
mass = parameters.MIN_MASS + random.random()*(parameters.MAX_MASS-parameters.MIN_MASS)
turn = parameters.MIN_TURN + random.random()*(parameters.MAX_TURN-parameters.MIN_TURN)
max_fuel = quality - turn
max_fuel = parameters.MIN_FUEL + int(max_fuel*(parameters.MAX_FUEL-parameters.MIN_FUEL))
#
v.tail = vessel.Part(t.triangles, turn, friction, mass)
v.nose = vessel.Part(n.triangles, turn, friction, mass)
v.cockpit = vessel.Part(c.triangles, turn, friction, mass)
v.lwing = vessel.Part(w[0].triangles, turn/2., friction/2., mass/2.)
v.rwing = vessel.Part(w[1].triangles, turn/2., friction/2., mass/2.)
v.engine= vessel.Engine(max_fuel, power)
v.engine.mass = mass
v.engine.turn = turn
v.engine.friction = friction
#
v.refresh_mesh()
v.rotate_around_center_y(-90)
v.compute_box3D()
v.compute_dynamics()
v.from_init_rot = V3()
v.color = rest
#
return v
def init_game(hero):
parameters.players = [gamelogic.Player() for i in range(parameters.NPLAYERS-1)]
hero_color = parameters.HERO_COLOR
hero_player = gamelogic.Player(parameters.HERO_NAME,Material(hero_color))
hero_player.points = 0
parameters.player = hero_player
parameters.players += [hero_player]
if hero is None:
hero = create_vessel(hero_color)
hero.is_hero = True
hero.mass /= 2.
hero.compute_dynamics()
hero.name = "Hero" #!!
hero.attach_to_player(hero_player,reset_color=False)
def init_scene():
## random.seed(0)
#
gara = garage.Garage()
gara.play()
gara.derotate()
#
parameters.scene = Scene()
scene = parameters.scene
scene.cam = Camera(scene.screen, fov=512, d=2, objs=[])
scene.cam.set_aa(True)
#
light_pos = V3(0,1000,-1000)
light_m = V3(20,20,20)
light_M = V3(200,200,200)
light = Light(light_pos, light_m, light_M)
scene.light = light
##hero = hero.get_splitted_copy(threshold=-2.5)
scene.hero = parameters.player.vessel
hero = scene.hero
scene.objs.append(hero)
#track
nx = random.randint(3,4)
ny = random.randint(2,4)
print("nx,ny",nx,ny)
lg = levelgen.LevelGenerator(parameters.ZFINISH,nx,ny)
rw,rh = parameters.RAILW,parameters.RAILH
possible_obstacles = [primitivemeshes.p_rectangle(0.8*rw,0.8*rh,(0,0,255),(0,0,0))]
lg.random_gen(nparts=4,objects=possible_obstacles,min_density=0.1,max_density=0.8)
track = scene.track
for o in track.obstacles:
if random.random() < 0.4:
if random.random() < 0.5:
o.rotation_x = random.randint(2,5)* (2*random.randint(0,1) - 1)
else:
o.rotation_y = random.randint(2,5)* (2*random.randint(0,1) - 1)
o.obj.set_color(Material(parameters.COLOR_ROTATING))
if random.random() < 0.5:
r = random.random()
if r < 0.1:
o.movement_x = 1
elif r < 0.2:
o.movement_y = 1
elif r < 0.25:
o.movement_x = 1
o.movement_y = 1
if o.movement_x or o.movement_y:
o.obj.set_color(Material(parameters.COLOR_MOVING))
#
deco = trackdecorator.Decorator(track,track.zfinish//500) #500
#
finish = primitivemeshes.p_rectangle(track.railw,track.railh,(0,0,0))
## for pos in track.rail_centers():
for x in range(track.nx):
for y in range(track.ny):
pos = V3(track.rails[x,y].middlepos)
pos.z = track.zfinish+5
finish.set_pos(pos)
if x%2 == 0:
if y%2 == 0:
color = (0,0,0)
else:
color = (255,255,255)
else:
if y%2 == 0:
color = (255,255,255)
else:
color = (0,0,0)
finish.set_color(Material(random.choice(color)))
scene.objs.append(finish.get_copy())
scene.track = track
scene.opponents = [create_vessel(random.choice(drawing.colors)) for i in range(2)]
scene.objs += scene.opponents
## fin = Object3D("finish.stl")
## triangles = []
## for t in fin.triangles:
## isok = True
## for v in t.vertices():
## if v.y >= 0:
## isok = False
## if isok:
## triangles.append(t)
## fin = ManualObject3D(triangles)
## fin.rotate_around_center_x(-90)
## fin.scale(30.)
## fin.set_color(Material((255,255,0)))
## fin.move(V3(0,40,track.zfinish))
## track.things_objects.append(fin)
## scene.objs += [fin]
#
scene.refresh_cam()
scene.players = [parameters.player]
near = parameters.player.get_nearest_players()
for i,o in enumerate(scene.opponents):
player = near[i]
scene.put_opponent_on_rail(o,i+1,0,25)
o.attach_to_player(player)
scene.players.append(player)
o.set_ia(100, 0.01)
hero.reinit_orientation()
hero.set_pos(parameters.HERO_POS)
scene.put_hero_on_rail(0,0)
print("end main")
scene.refresh_vessels()
scene.hud.refresh_attributes()
g = gamelogic.ShowRanking("Start list", "Go to race", scene.players)
return scene, g.goback
if __name__ == "__main__":
app = thorpy.Application((parameters.W,parameters.H))
thorpy.set_theme(parameters.THEME)
## thorpy.application.SHOW_FPS = True
screen = thorpy.get_screen()
import dialog
def launch_about():
dialog.launch_blocking_alert("Credits",
"Author: Yann Thorimbert\nLibraries used: Pygame, ThorPy (www.thorpy.org)",
transp=False)
e_bckgr.unblit_and_reblit()
DEBUG = False
def play():
## if not DEBUG:
if True:
name = thorpy.Inserter.make("Choose your name",value="Hero")
box = thorpy.make_ok_box([name])
thorpy.auto_ok(box)
box.center()
## scenario.launch(box)
thorpy.launch_blocking(box,e_bckgr)
parameters.HERO_NAME = name.get_value()
tit = thorpy.make_text("Choose vessel color")
color = thorpy.ColorSetter.make("Choose vessel color")
box = thorpy.make_ok_box([tit,color])
thorpy.auto_ok(box)
box.center()
## scenario.launch(box)
thorpy.launch_blocking(box)
parameters.HERO_COLOR = color.get_value()
print("setting", parameters.HERO_COLOR)
#
vc = gamelogic.ShowRanking("Choose a vessel", "Continue", [], False, True)
vc.derotate()
thorpy.set_theme("classic")
if not DEBUG:
scenario.launch_intro_text()
scenario.launch_intro_text2()
scenario.launch_help()
thorpy.set_theme(parameters.THEME)
init_game(vc.vessels[0])
parameters.AA = vs.get_value("aa")
parameters.VISIBILITY = vs.get_value("visibility")
while True:
parameters.flush()
while True:
scene, goback = init_scene()
if not goback:
break
reac = thorpy.ConstantReaction(thorpy.THORPY_EVENT,scene.func_time,
{"id":thorpy.constants.EVENT_TIME})
g = thorpy.Ghost.make()
parameters.ghost = g
g.add_reaction(reac)
thorpy.functions.playing(30,1000//parameters.FPS)
m = thorpy.Menu(g,fps=parameters.FPS)
m.play()
gamelogic.refresh_ranking()
cat_before,c1 = gamelogic.get_category(parameters.player.ranking-1)
sr = gamelogic.ShowRanking("Ranking", "Go to garage",
scene.get_current_ranking_players(),results=True)
gamelogic.refresh_ranking()
sr.derotate()
cat_after,c2 = gamelogic.get_category(parameters.player.ranking-1)
if c2 > c1:
thorpy.launch_blocking_alert("Your category is now "+cat_after+\
"!\nCongratulations, "+parameters.HERO_NAME+".\nYou earned an extra bonus of 500 $."+\
"\n\nThe track length in this category is 1000m longer.")
parameters.player.money += 500
parameters.ZFINISH += 1000
## parameters.ENGINE_POWER += 0.005
parameters.CURRENT_QUALITY += 0.5
elif c2<c1:
thorpy.launch_blocking_alert("Your category is now "+cat_after+\
"!\nThis is very deceptive, "+parameters.HERO_NAME+".\n\n"+\
"The track length in this category is 1000m shorter.")
parameters.ZFINISH -= 1000
parameters.CURRENT_QUALITY -= 0.5
parameters.flush()
if parameters.player.ranking == parameters.players[0].ranking:
scenario.launch_end()
if parameters.player.vessel.life <=0:
parameters.player.vessel.life = 1
parameters.player.vessel.visible = True
if random.random() < parameters.MERCHANT_PROBABILITY:
garage.buy_part(None)
e_title = thorpy.make_text("The Phantom Racer", 25, (255,0,0))
e_play = thorpy.make_button("Start new game", play)
e_disp,vs = gamelogic.get_display_options()
e_font = thorpy.make_font_options_setter("./metadata", "Font options")
e_about = thorpy.make_button("About", launch_about)
e_quit = thorpy.make_button("Quit", thorpy.functions.quit_menu_func)
elements = [e_title,e_play,e_disp,e_font,e_about,e_quit]
background = thorpy.load_image("PaulinaRiva.png")
background = thorpy.get_resized_image(background,
(parameters.W,parameters.H//2),
type_=max)
e_bckgr = thorpy.Background.make(image=background,elements=elements)
thorpy.store(e_bckgr)
e_title.move((0,-50))
m = thorpy.Menu(e_bckgr)
m.play()
app.quit()
#si autres bugs d'affichages : if len(p) == len(thing.points): dans draw...
#be careful:
# cam need to know all!!!! (for moving objects)
##OverflowError: signed short integer is greater than maximum
# ==> si continue, faire comme pour Object3D avec control des val abs
#voir si refresh() de object 3d ferait pas mieux d'utiliser version GRU (cf refresh)
| |
"""Composable matchers for HTTP headers."""
import re
RE_TYPE = type(re.compile(r''))
class BaseMatcher:
"""Matcher base class."""
def match(self, request):
"""
Check HTTP request headers against some criteria.
This method checks whether the request headers satisfy some
criteria or not. Subclasses should override it and provide a
specialized implementation.
The default implementation just returns False.
`request`: a Django request.
returns: a boolean.
"""
return False
def __invert__(self):
return Not(self)
def __and__(self, other):
return And(self, other)
def __or__(self, other):
return Or(self, other)
def __xor__(self, other):
return Xor(self, other)
class And(BaseMatcher):
"""Composite matcher that implements the bitwise AND operation."""
def __init__(self, matcher1, matcher2):
"""
Initialize the instance.
`matcher1`, `matcher2`: matchers of any type.
"""
self._matchers = (matcher1, matcher2)
def match(self, request):
"""
Compute the bitwise AND between the results of two matchers.
`request`: a Django request.
returns: a boolean.
"""
return all(matcher.match(request) for matcher in self._matchers)
def __repr__(self):
return '({!r} & {!r})'.format(*self._matchers)
class Or(BaseMatcher):
"""Composite matcher that implements the bitwise OR operation."""
def __init__(self, matcher1, matcher2):
"""
Initialize the instance.
`matcher1`, `matcher2`: matchers of any type.
"""
self._matchers = (matcher1, matcher2)
def match(self, request):
"""
Compute the bitwise OR between the results of two matchers.
`request`: a Django request.
returns: a boolean.
"""
return any(matcher.match(request) for matcher in self._matchers)
def __repr__(self):
return '({!r} | {!r})'.format(*self._matchers)
class Xor(BaseMatcher):
"""Composite matcher that implements the bitwise XOR operation."""
def __init__(self, matcher1, matcher2):
"""
Initialize the instance.
`matcher1`, `matcher2`: matchers of any type.
"""
self._matcher1 = matcher1
self._matcher2 = matcher2
def match(self, request):
"""
Compute the bitwise XOR between the results of two matchers.
`request`: a Django request.
returns: a boolean.
"""
return self._matcher1.match(request) is not self._matcher2.match(request)
def __repr__(self):
return '({!r} ^ {!r})'.format(self._matcher1, self._matcher2)
class Not(BaseMatcher):
"""Composite matcher that implements the bitwise NOT operation."""
def __init__(self, matcher):
"""
Initialize the instance.
`matcher`: a matcher of any type.
"""
self._matcher = matcher
def match(self, request):
"""
Compute the bitwise NOT of the result of a matcher.
`request`: a Django request.
returns: a boolean.
"""
return not self._matcher.match(request)
def __repr__(self):
return '~{!r}'.format(self._matcher)
class Header(BaseMatcher):
"""HTTP header matcher."""
def __init__(self, name, value):
"""
Initialize the instance.
`name`: a header name, as string.
`value`: a header value, as string, compiled regular expression
object, or iterable of strings.
"""
self._name = name
self._value = value
self._compare_value = self._get_value_comparison_method()
def _get_value_comparison_method(self):
if isinstance(self._value, RE_TYPE):
return self._compare_value_to_re_object
if isinstance(self._value, str):
return self._compare_value_to_str
return self._compare_value_to_iterable
def _compare_value_to_re_object(self, request_value):
return bool(self._value.fullmatch(request_value))
def _compare_value_to_str(self, request_value):
return request_value == self._value
def _compare_value_to_iterable(self, request_value):
return request_value in set(self._value)
def match(self, request):
"""
Inspect a request for headers with given name and value.
This method checks whether:
a) the request contains a header with the same exact name the
matcher has been initialized with, and
b) the header value is equal, matches, or belongs to the value
the matcher has been initialized with, depending on that being
respectively a string, a compiled regexp object, or an iterable
of strings.
`request`: a Django request.
returns: a boolean.
"""
try:
request_value = request.META[self._name]
except KeyError:
return False
else:
return self._compare_value(request_value)
def __repr__(self):
return '{}({!r}, {!r})'.format(self.__class__.__name__, self._name, self._value)
class HeaderRegexp(BaseMatcher):
"""HTTP header matcher based on regular expressions."""
def __init__(self, name_re, value_re):
"""
Initialize the instance.
`name_re`: a header name, as regexp string or compiled regexp
object.
`value_re`: a header value, as regexp string or compiled regexp
object.
"""
self._name_re = re.compile(name_re)
self._value_re = re.compile(value_re)
def match(self, request):
"""
Inspect a request for headers that match regular expressions.
This method checks whether the request contains at least one
header whose name and value match the respective regexps the
matcher has been initialized with.
`request`: a Django request.
returns: a boolean.
"""
for name, value in request.META.items():
if self._name_re.fullmatch(name) and self._value_re.fullmatch(value):
return True
return False
def __repr__(self):
return '{}({!r}, {!r})'.format(self.__class__.__name__, self._name_re, self._value_re)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.