repo_name
stringlengths
5
100
ref
stringlengths
12
67
path
stringlengths
4
244
copies
stringlengths
1
8
content
stringlengths
0
1.05M
supertylerc/trigger
refs/heads/develop
trigger/packages/tftpy/TftpServer.py
17
"""This module implements the TFTP Server functionality. Instantiate an instance of the server, and then run the listen() method to listen for client requests. Logging is performed via a standard logging object set in TftpShared.""" import socket, os, time import select from TftpShared import * from TftpPacketTypes import * from TftpPacketFactory import TftpPacketFactory from TftpContexts import TftpContextServer class TftpServer(TftpSession): """This class implements a tftp server object. Run the listen() method to listen for client requests. It takes two optional arguments. tftproot is the path to the tftproot directory to serve files from and/or write them to. dyn_file_func is a callable that must return a file-like object to read from during downloads. This permits the serving of dynamic content.""" def __init__(self, tftproot='/tftpboot', dyn_file_func=None): self.listenip = None self.listenport = None self.sock = None # FIXME: What about multiple roots? self.root = os.path.abspath(tftproot) self.dyn_file_func = dyn_file_func # A dict of sessions, where each session is keyed by a string like # ip:tid for the remote end. self.sessions = {} if os.path.exists(self.root): log.debug("tftproot %s does exist" % self.root) if not os.path.isdir(self.root): raise TftpException, "The tftproot must be a directory." else: log.debug("tftproot %s is a directory" % self.root) if os.access(self.root, os.R_OK): log.debug("tftproot %s is readable" % self.root) else: raise TftpException, "The tftproot must be readable" if os.access(self.root, os.W_OK): log.debug("tftproot %s is writable" % self.root) else: log.warning("The tftproot %s is not writable" % self.root) else: raise TftpException, "The tftproot does not exist." def listen(self, listenip="", listenport=DEF_TFTP_PORT, timeout=SOCK_TIMEOUT): """Start a server listening on the supplied interface and port. This defaults to INADDR_ANY (all interfaces) and UDP port 69. You can also supply a different socket timeout value, if desired.""" tftp_factory = TftpPacketFactory() # Don't use new 2.5 ternary operator yet # listenip = listenip if listenip else '0.0.0.0' if not listenip: listenip = '0.0.0.0' log.info("Server requested on ip %s, port %s" % (listenip, listenport)) try: # FIXME - sockets should be non-blocking self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sock.bind((listenip, listenport)) except socket.error, err: # Reraise it for now. raise log.info("Starting receive loop...") while True: # Build the inputlist array of sockets to select() on. inputlist = [] inputlist.append(self.sock) for key in self.sessions: inputlist.append(self.sessions[key].sock) # Block until some socket has input on it. log.debug("Performing select on this inputlist: %s" % inputlist) readyinput, readyoutput, readyspecial = select.select(inputlist, [], [], SOCK_TIMEOUT) deletion_list = [] # Handle the available data, if any. Maybe we timed-out. for readysock in readyinput: # Is the traffic on the main server socket? ie. new session? if readysock == self.sock: log.debug("Data ready on our main socket") buffer, (raddress, rport) = self.sock.recvfrom(MAX_BLKSIZE) log.debug("Read %d bytes" % len(buffer)) # Forge a session key based on the client's IP and port, # which should safely work through NAT. key = "%s:%s" % (raddress, rport) if not self.sessions.has_key(key): log.debug("Creating new server context for " "session key = %s" % key) self.sessions[key] = TftpContextServer(raddress, rport, timeout, self.root, self.dyn_file_func) try: self.sessions[key].start(buffer) except TftpException, err: deletion_list.append(key) log.error("Fatal exception thrown from " "session %s: %s" % (key, str(err))) else: log.warn("received traffic on main socket for " "existing session??") log.info("Currently handling these sessions:") for session_key, session in self.sessions.items(): log.info(" %s" % session) else: # Must find the owner of this traffic. for key in self.sessions: if readysock == self.sessions[key].sock: log.info("Matched input to session key %s" % key) try: self.sessions[key].cycle() if self.sessions[key].state == None: log.info("Successful transfer.") deletion_list.append(key) except TftpException, err: deletion_list.append(key) log.error("Fatal exception thrown from " "session %s: %s" % (key, str(err))) # Break out of for loop since we found the correct # session. break else: log.error("Can't find the owner for this packet. " "Discarding.") log.debug("Looping on all sessions to check for timeouts") now = time.time() for key in self.sessions: try: self.sessions[key].checkTimeout(now) except TftpTimeout, err: log.error(str(err)) self.sessions[key].retry_count += 1 if self.sessions[key].retry_count >= TIMEOUT_RETRIES: log.debug("hit max retries on %s, giving up" % self.sessions[key]) deletion_list.append(key) else: log.debug("resending on session %s" % self.sessions[key]) self.sessions[key].state.resendLast() log.debug("Iterating deletion list.") for key in deletion_list: log.info('') log.info("Session %s complete" % key) if self.sessions.has_key(key): log.debug("Gathering up metrics from session before deleting") self.sessions[key].end() metrics = self.sessions[key].metrics if metrics.duration == 0: log.info("Duration too short, rate undetermined") else: log.info("Transferred %d bytes in %.2f seconds" % (metrics.bytes, metrics.duration)) log.info("Average rate: %.2f kbps" % metrics.kbps) log.info("%.2f bytes in resent data" % metrics.resent_bytes) log.info("%d duplicate packets" % metrics.dupcount) log.debug("Deleting session %s" % key) del self.sessions[key] log.debug("Session list is now %s" % self.sessions) else: log.warn("Strange, session %s is not on the deletion list" % key)
zxtstarry/src
refs/heads/master
trip/test/asg/config.py
20
CC = 'mpicc' CCFLAGS = '-O3 -pedantic -Wunused -Wno-long-long -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -D_GNU_SOURCE -DIWAVE_USE_MPI' CFLAGS = '-std=c99 -Wimplicit' CXX = 'mpicxx'
tejasnikumbh/ThesisCode
refs/heads/master
lib/python2.7/site-packages/numpy/lib/tests/test_function_base.py
27
from __future__ import division, absolute_import, print_function import warnings import sys import numpy as np from numpy.testing import ( run_module_suite, TestCase, assert_, assert_equal, assert_array_equal, assert_almost_equal, assert_array_almost_equal, assert_raises, assert_allclose, assert_array_max_ulp, assert_warns, assert_raises_regex, dec ) from numpy.random import rand from numpy.lib import * from numpy.compat import long class TestAny(TestCase): def test_basic(self): y1 = [0, 0, 1, 0] y2 = [0, 0, 0, 0] y3 = [1, 0, 1, 0] assert_(np.any(y1)) assert_(np.any(y3)) assert_(not np.any(y2)) def test_nd(self): y1 = [[0, 0, 0], [0, 1, 0], [1, 1, 0]] assert_(np.any(y1)) assert_array_equal(np.sometrue(y1, axis=0), [1, 1, 0]) assert_array_equal(np.sometrue(y1, axis=1), [0, 1, 1]) class TestAll(TestCase): def test_basic(self): y1 = [0, 1, 1, 0] y2 = [0, 0, 0, 0] y3 = [1, 1, 1, 1] assert_(not np.all(y1)) assert_(np.all(y3)) assert_(not np.all(y2)) assert_(np.all(~np.array(y2))) def test_nd(self): y1 = [[0, 0, 1], [0, 1, 1], [1, 1, 1]] assert_(not np.all(y1)) assert_array_equal(np.alltrue(y1, axis=0), [0, 0, 1]) assert_array_equal(np.alltrue(y1, axis=1), [0, 0, 1]) class TestCopy(TestCase): def test_basic(self): a = np.array([[1, 2], [3, 4]]) a_copy = np.copy(a) assert_array_equal(a, a_copy) a_copy[0, 0] = 10 assert_equal(a[0, 0], 1) assert_equal(a_copy[0, 0], 10) def test_order(self): # It turns out that people rely on np.copy() preserving order by # default; changing this broke scikit-learn: # https://github.com/scikit-learn/scikit-learn/commit/7842748cf777412c506a8c0ed28090711d3a3783 a = np.array([[1, 2], [3, 4]]) assert_(a.flags.c_contiguous) assert_(not a.flags.f_contiguous) a_fort = np.array([[1, 2], [3, 4]], order="F") assert_(not a_fort.flags.c_contiguous) assert_(a_fort.flags.f_contiguous) a_copy = np.copy(a) assert_(a_copy.flags.c_contiguous) assert_(not a_copy.flags.f_contiguous) a_fort_copy = np.copy(a_fort) assert_(not a_fort_copy.flags.c_contiguous) assert_(a_fort_copy.flags.f_contiguous) class TestAverage(TestCase): def test_basic(self): y1 = np.array([1, 2, 3]) assert_(average(y1, axis=0) == 2.) y2 = np.array([1., 2., 3.]) assert_(average(y2, axis=0) == 2.) y3 = [0., 0., 0.] assert_(average(y3, axis=0) == 0.) y4 = np.ones((4, 4)) y4[0, 1] = 0 y4[1, 0] = 2 assert_almost_equal(y4.mean(0), average(y4, 0)) assert_almost_equal(y4.mean(1), average(y4, 1)) y5 = rand(5, 5) assert_almost_equal(y5.mean(0), average(y5, 0)) assert_almost_equal(y5.mean(1), average(y5, 1)) y6 = np.matrix(rand(5, 5)) assert_array_equal(y6.mean(0), average(y6, 0)) def test_weights(self): y = np.arange(10) w = np.arange(10) actual = average(y, weights=w) desired = (np.arange(10) ** 2).sum()*1. / np.arange(10).sum() assert_almost_equal(actual, desired) y1 = np.array([[1, 2, 3], [4, 5, 6]]) w0 = [1, 2] actual = average(y1, weights=w0, axis=0) desired = np.array([3., 4., 5.]) assert_almost_equal(actual, desired) w1 = [0, 0, 1] actual = average(y1, weights=w1, axis=1) desired = np.array([3., 6.]) assert_almost_equal(actual, desired) # This should raise an error. Can we test for that ? # assert_equal(average(y1, weights=w1), 9./2.) # 2D Case w2 = [[0, 0, 1], [0, 0, 2]] desired = np.array([3., 6.]) assert_array_equal(average(y1, weights=w2, axis=1), desired) assert_equal(average(y1, weights=w2), 5.) def test_returned(self): y = np.array([[1, 2, 3], [4, 5, 6]]) # No weights avg, scl = average(y, returned=True) assert_equal(scl, 6.) avg, scl = average(y, 0, returned=True) assert_array_equal(scl, np.array([2., 2., 2.])) avg, scl = average(y, 1, returned=True) assert_array_equal(scl, np.array([3., 3.])) # With weights w0 = [1, 2] avg, scl = average(y, weights=w0, axis=0, returned=True) assert_array_equal(scl, np.array([3., 3., 3.])) w1 = [1, 2, 3] avg, scl = average(y, weights=w1, axis=1, returned=True) assert_array_equal(scl, np.array([6., 6.])) w2 = [[0, 0, 1], [1, 2, 3]] avg, scl = average(y, weights=w2, axis=1, returned=True) assert_array_equal(scl, np.array([1., 6.])) class TestSelect(TestCase): choices = [np.array([1, 2, 3]), np.array([4, 5, 6]), np.array([7, 8, 9])] conditions = [np.array([False, False, False]), np.array([False, True, False]), np.array([False, False, True])] def _select(self, cond, values, default=0): output = [] for m in range(len(cond)): output += [V[m] for V, C in zip(values, cond) if C[m]] or [default] return output def test_basic(self): choices = self.choices conditions = self.conditions assert_array_equal(select(conditions, choices, default=15), self._select(conditions, choices, default=15)) assert_equal(len(choices), 3) assert_equal(len(conditions), 3) def test_broadcasting(self): conditions = [np.array(True), np.array([False, True, False])] choices = [1, np.arange(12).reshape(4, 3)] assert_array_equal(select(conditions, choices), np.ones((4, 3))) # default can broadcast too: assert_equal(select([True], [0], default=[0]).shape, (1,)) def test_return_dtype(self): assert_equal(select(self.conditions, self.choices, 1j).dtype, np.complex_) # But the conditions need to be stronger then the scalar default # if it is scalar. choices = [choice.astype(np.int8) for choice in self.choices] assert_equal(select(self.conditions, choices).dtype, np.int8) d = np.array([1, 2, 3, np.nan, 5, 7]) m = np.isnan(d) assert_equal(select([m], [d]), [0, 0, 0, np.nan, 0, 0]) def test_deprecated_empty(self): with warnings.catch_warnings(record=True): warnings.simplefilter("always") assert_equal(select([], [], 3j), 3j) with warnings.catch_warnings(): warnings.simplefilter("always") assert_warns(DeprecationWarning, select, [], []) warnings.simplefilter("error") assert_raises(DeprecationWarning, select, [], []) def test_non_bool_deprecation(self): choices = self.choices conditions = self.conditions[:] with warnings.catch_warnings(): warnings.filterwarnings("always") conditions[0] = conditions[0].astype(np.int_) assert_warns(DeprecationWarning, select, conditions, choices) conditions[0] = conditions[0].astype(np.uint8) assert_warns(DeprecationWarning, select, conditions, choices) warnings.filterwarnings("error") assert_raises(DeprecationWarning, select, conditions, choices) def test_many_arguments(self): # This used to be limited by NPY_MAXARGS == 32 conditions = [np.array([False])] * 100 choices = [np.array([1])] * 100 select(conditions, choices) class TestInsert(TestCase): def test_basic(self): a = [1, 2, 3] assert_equal(insert(a, 0, 1), [1, 1, 2, 3]) assert_equal(insert(a, 3, 1), [1, 2, 3, 1]) assert_equal(insert(a, [1, 1, 1], [1, 2, 3]), [1, 1, 2, 3, 2, 3]) assert_equal(insert(a, 1, [1, 2, 3]), [1, 1, 2, 3, 2, 3]) assert_equal(insert(a, [1, -1, 3], 9), [1, 9, 2, 9, 3, 9]) assert_equal(insert(a, slice(-1, None, -1), 9), [9, 1, 9, 2, 9, 3]) assert_equal(insert(a, [-1, 1, 3], [7, 8, 9]), [1, 8, 2, 7, 3, 9]) b = np.array([0, 1], dtype=np.float64) assert_equal(insert(b, 0, b[0]), [0., 0., 1.]) assert_equal(insert(b, [], []), b) # Bools will be treated differently in the future: #assert_equal(insert(a, np.array([True]*4), 9), [9,1,9,2,9,3,9]) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', FutureWarning) assert_equal( insert(a, np.array([True]*4), 9), [1, 9, 9, 9, 9, 2, 3]) assert_(w[0].category is FutureWarning) def test_multidim(self): a = [[1, 1, 1]] r = [[2, 2, 2], [1, 1, 1]] assert_equal(insert(a, 0, [1]), [1, 1, 1, 1]) assert_equal(insert(a, 0, [2, 2, 2], axis=0), r) assert_equal(insert(a, 0, 2, axis=0), r) assert_equal(insert(a, 2, 2, axis=1), [[1, 1, 2, 1]]) a = np.array([[1, 1], [2, 2], [3, 3]]) b = np.arange(1, 4).repeat(3).reshape(3, 3) c = np.concatenate( (a[:, 0:1], np.arange(1, 4).repeat(3).reshape(3, 3).T, a[:, 1:2]), axis=1) assert_equal(insert(a, [1], [[1], [2], [3]], axis=1), b) assert_equal(insert(a, [1], [1, 2, 3], axis=1), c) # scalars behave differently, in this case exactly opposite: assert_equal(insert(a, 1, [1, 2, 3], axis=1), b) assert_equal(insert(a, 1, [[1], [2], [3]], axis=1), c) a = np.arange(4).reshape(2, 2) assert_equal(insert(a[:, :1], 1, a[:, 1], axis=1), a) assert_equal(insert(a[:1, :], 1, a[1, :], axis=0), a) # negative axis value a = np.arange(24).reshape((2, 3, 4)) assert_equal(insert(a, 1, a[:, :, 3], axis=-1), insert(a, 1, a[:, :, 3], axis=2)) assert_equal(insert(a, 1, a[:, 2, :], axis=-2), insert(a, 1, a[:, 2, :], axis=1)) # invalid axis value assert_raises(IndexError, insert, a, 1, a[:, 2, :], axis=3) assert_raises(IndexError, insert, a, 1, a[:, 2, :], axis=-4) # negative axis value a = np.arange(24).reshape((2,3,4)) assert_equal(insert(a, 1, a[:,:,3], axis=-1), insert(a, 1, a[:,:,3], axis=2)) assert_equal(insert(a, 1, a[:,2,:], axis=-2), insert(a, 1, a[:,2,:], axis=1)) def test_0d(self): # This is an error in the future a = np.array(1) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', DeprecationWarning) assert_equal(insert(a, [], 2, axis=0), np.array(2)) assert_(w[0].category is DeprecationWarning) def test_subclass(self): class SubClass(np.ndarray): pass a = np.arange(10).view(SubClass) assert_(isinstance(np.insert(a, 0, [0]), SubClass)) assert_(isinstance(np.insert(a, [], []), SubClass)) assert_(isinstance(np.insert(a, [0, 1], [1, 2]), SubClass)) assert_(isinstance(np.insert(a, slice(1, 2), [1, 2]), SubClass)) assert_(isinstance(np.insert(a, slice(1, -2, -1), []), SubClass)) # This is an error in the future: a = np.array(1).view(SubClass) assert_(isinstance(np.insert(a, 0, [0]), SubClass)) def test_index_array_copied(self): x = np.array([1, 1, 1]) np.insert([0, 1, 2], x, [3, 4, 5]) assert_equal(x, np.array([1, 1, 1])) def test_structured_array(self): a = np.array([(1, 'a'), (2, 'b'), (3, 'c')], dtype=[('foo', 'i'), ('bar', 'a1')]) val = (4, 'd') b = np.insert(a, 0, val) assert_array_equal(b[0], np.array(val, dtype=b.dtype)) val = [(4, 'd')] * 2 b = np.insert(a, [0, 2], val) assert_array_equal(b[[0, 3]], np.array(val, dtype=b.dtype)) class TestAmax(TestCase): def test_basic(self): a = [3, 4, 5, 10, -3, -5, 6.0] assert_equal(np.amax(a), 10.0) b = [[3, 6.0, 9.0], [4, 10.0, 5.0], [8, 3.0, 2.0]] assert_equal(np.amax(b, axis=0), [8.0, 10.0, 9.0]) assert_equal(np.amax(b, axis=1), [9.0, 10.0, 8.0]) class TestAmin(TestCase): def test_basic(self): a = [3, 4, 5, 10, -3, -5, 6.0] assert_equal(np.amin(a), -5.0) b = [[3, 6.0, 9.0], [4, 10.0, 5.0], [8, 3.0, 2.0]] assert_equal(np.amin(b, axis=0), [3.0, 3.0, 2.0]) assert_equal(np.amin(b, axis=1), [3.0, 4.0, 2.0]) class TestPtp(TestCase): def test_basic(self): a = [3, 4, 5, 10, -3, -5, 6.0] assert_equal(np.ptp(a, axis=0), 15.0) b = [[3, 6.0, 9.0], [4, 10.0, 5.0], [8, 3.0, 2.0]] assert_equal(np.ptp(b, axis=0), [5.0, 7.0, 7.0]) assert_equal(np.ptp(b, axis=-1), [6.0, 6.0, 6.0]) class TestCumsum(TestCase): def test_basic(self): ba = [1, 2, 10, 11, 6, 5, 4] ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] for ctype in [np.int8, np.uint8, np.int16, np.uint16, np.int32, np.uint32, np.float32, np.float64, np.complex64, np.complex128]: a = np.array(ba, ctype) a2 = np.array(ba2, ctype) tgt = np.array([1, 3, 13, 24, 30, 35, 39], ctype) assert_array_equal(np.cumsum(a, axis=0), tgt) tgt = np.array( [[1, 2, 3, 4], [6, 8, 10, 13], [16, 11, 14, 18]], ctype) assert_array_equal(np.cumsum(a2, axis=0), tgt) tgt = np.array( [[1, 3, 6, 10], [5, 11, 18, 27], [10, 13, 17, 22]], ctype) assert_array_equal(np.cumsum(a2, axis=1), tgt) class TestProd(TestCase): def test_basic(self): ba = [1, 2, 10, 11, 6, 5, 4] ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] for ctype in [np.int16, np.uint16, np.int32, np.uint32, np.float32, np.float64, np.complex64, np.complex128]: a = np.array(ba, ctype) a2 = np.array(ba2, ctype) if ctype in ['1', 'b']: self.assertRaises(ArithmeticError, prod, a) self.assertRaises(ArithmeticError, prod, a2, 1) self.assertRaises(ArithmeticError, prod, a) else: assert_equal(np.prod(a, axis=0), 26400) assert_array_equal(np.prod(a2, axis=0), np.array([50, 36, 84, 180], ctype)) assert_array_equal(np.prod(a2, axis=-1), np.array([24, 1890, 600], ctype)) class TestCumprod(TestCase): def test_basic(self): ba = [1, 2, 10, 11, 6, 5, 4] ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] for ctype in [np.int16, np.uint16, np.int32, np.uint32, np.float32, np.float64, np.complex64, np.complex128]: a = np.array(ba, ctype) a2 = np.array(ba2, ctype) if ctype in ['1', 'b']: self.assertRaises(ArithmeticError, cumprod, a) self.assertRaises(ArithmeticError, cumprod, a2, 1) self.assertRaises(ArithmeticError, cumprod, a) else: assert_array_equal(np.cumprod(a, axis=-1), np.array([1, 2, 20, 220, 1320, 6600, 26400], ctype)) assert_array_equal(np.cumprod(a2, axis=0), np.array([[1, 2, 3, 4], [5, 12, 21, 36], [50, 36, 84, 180]], ctype)) assert_array_equal(np.cumprod(a2, axis=-1), np.array([[1, 2, 6, 24], [5, 30, 210, 1890], [10, 30, 120, 600]], ctype)) class TestDiff(TestCase): def test_basic(self): x = [1, 4, 6, 7, 12] out = np.array([3, 2, 1, 5]) out2 = np.array([-1, -1, 4]) out3 = np.array([0, 5]) assert_array_equal(diff(x), out) assert_array_equal(diff(x, n=2), out2) assert_array_equal(diff(x, n=3), out3) def test_nd(self): x = 20 * rand(10, 20, 30) out1 = x[:, :, 1:] - x[:, :, :-1] out2 = out1[:, :, 1:] - out1[:, :, :-1] out3 = x[1:, :, :] - x[:-1, :, :] out4 = out3[1:, :, :] - out3[:-1, :, :] assert_array_equal(diff(x), out1) assert_array_equal(diff(x, n=2), out2) assert_array_equal(diff(x, axis=0), out3) assert_array_equal(diff(x, n=2, axis=0), out4) class TestDelete(TestCase): def setUp(self): self.a = np.arange(5) self.nd_a = np.arange(5).repeat(2).reshape(1, 5, 2) def _check_inverse_of_slicing(self, indices): a_del = delete(self.a, indices) nd_a_del = delete(self.nd_a, indices, axis=1) msg = 'Delete failed for obj: %r' % indices # NOTE: The cast should be removed after warning phase for bools if not isinstance(indices, (slice, int, long, np.integer)): indices = np.asarray(indices, dtype=np.intp) indices = indices[(indices >= 0) & (indices < 5)] assert_array_equal(setxor1d(a_del, self.a[indices, ]), self.a, err_msg=msg) xor = setxor1d(nd_a_del[0, :, 0], self.nd_a[0, indices, 0]) assert_array_equal(xor, self.nd_a[0, :, 0], err_msg=msg) def test_slices(self): lims = [-6, -2, 0, 1, 2, 4, 5] steps = [-3, -1, 1, 3] for start in lims: for stop in lims: for step in steps: s = slice(start, stop, step) self._check_inverse_of_slicing(s) def test_fancy(self): # Deprecation/FutureWarning tests should be kept after change. self._check_inverse_of_slicing(np.array([[0, 1], [2, 1]])) with warnings.catch_warnings(): warnings.filterwarnings('error', category=DeprecationWarning) assert_raises(DeprecationWarning, delete, self.a, [100]) assert_raises(DeprecationWarning, delete, self.a, [-100]) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', category=FutureWarning) self._check_inverse_of_slicing([0, -1, 2, 2]) obj = np.array([True, False, False], dtype=bool) self._check_inverse_of_slicing(obj) assert_(w[0].category is FutureWarning) assert_(w[1].category is FutureWarning) def test_single(self): self._check_inverse_of_slicing(0) self._check_inverse_of_slicing(-4) def test_0d(self): a = np.array(1) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings('always', '', DeprecationWarning) assert_equal(delete(a, [], axis=0), a) assert_(w[0].category is DeprecationWarning) def test_subclass(self): class SubClass(np.ndarray): pass a = self.a.view(SubClass) assert_(isinstance(delete(a, 0), SubClass)) assert_(isinstance(delete(a, []), SubClass)) assert_(isinstance(delete(a, [0, 1]), SubClass)) assert_(isinstance(delete(a, slice(1, 2)), SubClass)) assert_(isinstance(delete(a, slice(1, -2)), SubClass)) class TestGradient(TestCase): def test_basic(self): v = [[1, 1], [3, 4]] x = np.array(v) dx = [np.array([[2., 3.], [2., 3.]]), np.array([[0., 0.], [1., 1.]])] assert_array_equal(gradient(x), dx) assert_array_equal(gradient(v), dx) def test_badargs(self): # for 2D array, gradient can take 0, 1, or 2 extra args x = np.array([[1, 1], [3, 4]]) assert_raises(SyntaxError, gradient, x, np.array([1., 1.]), np.array([1., 1.]), np.array([1., 1.])) def test_masked(self): # Make sure that gradient supports subclasses like masked arrays x = np.ma.array([[1, 1], [3, 4]], mask=[[False, False], [False, False]]) out = gradient(x)[0] assert_equal(type(out), type(x)) # And make sure that the output and input don't have aliased mask # arrays assert_(x.mask is not out.mask) # Also check that edge_order=2 doesn't alter the original mask x2 = np.ma.arange(5) x2[2] = np.ma.masked np.gradient(x2, edge_order=2) assert_array_equal(x2.mask, [False, False, True, False, False]) def test_datetime64(self): # Make sure gradient() can handle special types like datetime64 x = np.array( ['1910-08-16', '1910-08-11', '1910-08-10', '1910-08-12', '1910-10-12', '1910-12-12', '1912-12-12'], dtype='datetime64[D]') dx = np.array( [-5, -3, 0, 31, 61, 396, 731], dtype='timedelta64[D]') assert_array_equal(gradient(x), dx) assert_(dx.dtype == np.dtype('timedelta64[D]')) def test_timedelta64(self): # Make sure gradient() can handle special types like timedelta64 x = np.array( [-5, -3, 10, 12, 61, 321, 300], dtype='timedelta64[D]') dx = np.array( [2, 7, 7, 25, 154, 119, -21], dtype='timedelta64[D]') assert_array_equal(gradient(x), dx) assert_(dx.dtype == np.dtype('timedelta64[D]')) def test_second_order_accurate(self): # Testing that the relative numerical error is less that 3% for # this example problem. This corresponds to second order # accurate finite differences for all interior and boundary # points. x = np.linspace(0, 1, 10) dx = x[1] - x[0] y = 2 * x ** 3 + 4 * x ** 2 + 2 * x analytical = 6 * x ** 2 + 8 * x + 2 num_error = np.abs((np.gradient(y, dx, edge_order=2) / analytical) - 1) assert_(np.all(num_error < 0.03) == True) class TestAngle(TestCase): def test_basic(self): x = [1 + 3j, np.sqrt(2) / 2.0 + 1j * np.sqrt(2) / 2, 1, 1j, -1, -1j, 1 - 3j, -1 + 3j] y = angle(x) yo = [ np.arctan(3.0 / 1.0), np.arctan(1.0), 0, np.pi / 2, np.pi, -np.pi / 2.0, -np.arctan(3.0 / 1.0), np.pi - np.arctan(3.0 / 1.0)] z = angle(x, deg=1) zo = np.array(yo) * 180 / np.pi assert_array_almost_equal(y, yo, 11) assert_array_almost_equal(z, zo, 11) class TestTrimZeros(TestCase): """ only testing for integer splits. """ def test_basic(self): a = np.array([0, 0, 1, 2, 3, 4, 0]) res = trim_zeros(a) assert_array_equal(res, np.array([1, 2, 3, 4])) def test_leading_skip(self): a = np.array([0, 0, 1, 0, 2, 3, 4, 0]) res = trim_zeros(a) assert_array_equal(res, np.array([1, 0, 2, 3, 4])) def test_trailing_skip(self): a = np.array([0, 0, 1, 0, 2, 3, 0, 4, 0]) res = trim_zeros(a) assert_array_equal(res, np.array([1, 0, 2, 3, 0, 4])) class TestExtins(TestCase): def test_basic(self): a = np.array([1, 3, 2, 1, 2, 3, 3]) b = extract(a > 1, a) assert_array_equal(b, [3, 2, 2, 3, 3]) def test_place(self): a = np.array([1, 4, 3, 2, 5, 8, 7]) place(a, [0, 1, 0, 1, 0, 1, 0], [2, 4, 6]) assert_array_equal(a, [1, 2, 3, 4, 5, 6, 7]) def test_both(self): a = rand(10) mask = a > 0.5 ac = a.copy() c = extract(mask, a) place(a, mask, 0) place(a, mask, c) assert_array_equal(a, ac) class TestVectorize(TestCase): def test_simple(self): def addsubtract(a, b): if a > b: return a - b else: return a + b f = vectorize(addsubtract) r = f([0, 3, 6, 9], [1, 3, 5, 7]) assert_array_equal(r, [1, 6, 1, 2]) def test_scalar(self): def addsubtract(a, b): if a > b: return a - b else: return a + b f = vectorize(addsubtract) r = f([0, 3, 6, 9], 5) assert_array_equal(r, [5, 8, 1, 4]) def test_large(self): x = np.linspace(-3, 2, 10000) f = vectorize(lambda x: x) y = f(x) assert_array_equal(y, x) def test_ufunc(self): import math f = vectorize(math.cos) args = np.array([0, 0.5*np.pi, np.pi, 1.5*np.pi, 2*np.pi]) r1 = f(args) r2 = np.cos(args) assert_array_equal(r1, r2) def test_keywords(self): import math def foo(a, b=1): return a + b f = vectorize(foo) args = np.array([1, 2, 3]) r1 = f(args) r2 = np.array([2, 3, 4]) assert_array_equal(r1, r2) r1 = f(args, 2) r2 = np.array([3, 4, 5]) assert_array_equal(r1, r2) def test_keywords_no_func_code(self): # This needs to test a function that has keywords but # no func_code attribute, since otherwise vectorize will # inspect the func_code. import random try: f = vectorize(random.randrange) except: raise AssertionError() def test_keywords2_ticket_2100(self): r"""Test kwarg support: enhancement ticket 2100""" import math def foo(a, b=1): return a + b f = vectorize(foo) args = np.array([1, 2, 3]) r1 = f(a=args) r2 = np.array([2, 3, 4]) assert_array_equal(r1, r2) r1 = f(b=1, a=args) assert_array_equal(r1, r2) r1 = f(args, b=2) r2 = np.array([3, 4, 5]) assert_array_equal(r1, r2) def test_keywords3_ticket_2100(self): """Test excluded with mixed positional and kwargs: ticket 2100""" def mypolyval(x, p): _p = list(p) res = _p.pop(0) while _p: res = res*x + _p.pop(0) return res vpolyval = np.vectorize(mypolyval, excluded=['p', 1]) ans = [3, 6] assert_array_equal(ans, vpolyval(x=[0, 1], p=[1, 2, 3])) assert_array_equal(ans, vpolyval([0, 1], p=[1, 2, 3])) assert_array_equal(ans, vpolyval([0, 1], [1, 2, 3])) def test_keywords4_ticket_2100(self): """Test vectorizing function with no positional args.""" @vectorize def f(**kw): res = 1.0 for _k in kw: res *= kw[_k] return res assert_array_equal(f(a=[1, 2], b=[3, 4]), [3, 8]) def test_keywords5_ticket_2100(self): """Test vectorizing function with no kwargs args.""" @vectorize def f(*v): return np.prod(v) assert_array_equal(f([1, 2], [3, 4]), [3, 8]) def test_coverage1_ticket_2100(self): def foo(): return 1 f = vectorize(foo) assert_array_equal(f(), 1) def test_assigning_docstring(self): def foo(x): return x doc = "Provided documentation" f = vectorize(foo, doc=doc) assert_equal(f.__doc__, doc) def test_UnboundMethod_ticket_1156(self): """Regression test for issue 1156""" class Foo: b = 2 def bar(self, a): return a**self.b assert_array_equal(vectorize(Foo().bar)(np.arange(9)), np.arange(9)**2) assert_array_equal(vectorize(Foo.bar)(Foo(), np.arange(9)), np.arange(9)**2) def test_execution_order_ticket_1487(self): """Regression test for dependence on execution order: issue 1487""" f1 = vectorize(lambda x: x) res1a = f1(np.arange(3)) res1b = f1(np.arange(0.1, 3)) f2 = vectorize(lambda x: x) res2b = f2(np.arange(0.1, 3)) res2a = f2(np.arange(3)) assert_equal(res1a, res2a) assert_equal(res1b, res2b) def test_string_ticket_1892(self): """Test vectorization over strings: issue 1892.""" f = np.vectorize(lambda x: x) s = '0123456789'*10 assert_equal(s, f(s)) #z = f(np.array([s,s])) #assert_array_equal([s,s], f(s)) def test_cache(self): """Ensure that vectorized func called exactly once per argument.""" _calls = [0] @vectorize def f(x): _calls[0] += 1 return x**2 f.cache = True x = np.arange(5) assert_array_equal(f(x), x*x) assert_equal(_calls[0], len(x)) def test_otypes(self): f = np.vectorize(lambda x: x) f.otypes = 'i' x = np.arange(5) assert_array_equal(f(x), x) class TestDigitize(TestCase): def test_forward(self): x = np.arange(-6, 5) bins = np.arange(-5, 5) assert_array_equal(digitize(x, bins), np.arange(11)) def test_reverse(self): x = np.arange(5, -6, -1) bins = np.arange(5, -5, -1) assert_array_equal(digitize(x, bins), np.arange(11)) def test_random(self): x = rand(10) bin = np.linspace(x.min(), x.max(), 10) assert_(np.all(digitize(x, bin) != 0)) def test_right_basic(self): x = [1, 5, 4, 10, 8, 11, 0] bins = [1, 5, 10] default_answer = [1, 2, 1, 3, 2, 3, 0] assert_array_equal(digitize(x, bins), default_answer) right_answer = [0, 1, 1, 2, 2, 3, 0] assert_array_equal(digitize(x, bins, True), right_answer) def test_right_open(self): x = np.arange(-6, 5) bins = np.arange(-6, 4) assert_array_equal(digitize(x, bins, True), np.arange(11)) def test_right_open_reverse(self): x = np.arange(5, -6, -1) bins = np.arange(4, -6, -1) assert_array_equal(digitize(x, bins, True), np.arange(11)) def test_right_open_random(self): x = rand(10) bins = np.linspace(x.min(), x.max(), 10) assert_(np.all(digitize(x, bins, True) != 10)) def test_monotonic(self): x = [-1, 0, 1, 2] bins = [0, 0, 1] assert_array_equal(digitize(x, bins, False), [0, 2, 3, 3]) assert_array_equal(digitize(x, bins, True), [0, 0, 2, 3]) bins = [1, 1, 0] assert_array_equal(digitize(x, bins, False), [3, 2, 0, 0]) assert_array_equal(digitize(x, bins, True), [3, 3, 2, 0]) bins = [1, 1, 1, 1] assert_array_equal(digitize(x, bins, False), [0, 0, 4, 4]) assert_array_equal(digitize(x, bins, True), [0, 0, 0, 4]) bins = [0, 0, 1, 0] assert_raises(ValueError, digitize, x, bins) bins = [1, 1, 0, 1] assert_raises(ValueError, digitize, x, bins) class TestUnwrap(TestCase): def test_simple(self): #check that unwrap removes jumps greather that 2*pi assert_array_equal(unwrap([1, 1 + 2 * np.pi]), [1, 1]) #check that unwrap maintans continuity assert_(np.all(diff(unwrap(rand(10) * 100)) < np.pi)) class TestFilterwindows(TestCase): def test_hanning(self): #check symmetry w = hanning(10) assert_array_almost_equal(w, flipud(w), 7) #check known value assert_almost_equal(np.sum(w, axis=0), 4.500, 4) def test_hamming(self): #check symmetry w = hamming(10) assert_array_almost_equal(w, flipud(w), 7) #check known value assert_almost_equal(np.sum(w, axis=0), 4.9400, 4) def test_bartlett(self): #check symmetry w = bartlett(10) assert_array_almost_equal(w, flipud(w), 7) #check known value assert_almost_equal(np.sum(w, axis=0), 4.4444, 4) def test_blackman(self): #check symmetry w = blackman(10) assert_array_almost_equal(w, flipud(w), 7) #check known value assert_almost_equal(np.sum(w, axis=0), 3.7800, 4) class TestTrapz(TestCase): def test_simple(self): x = np.arange(-10, 10, .1) r = trapz(np.exp(-.5*x**2) / np.sqrt(2*np.pi), dx=0.1) #check integral of normal equals 1 assert_almost_equal(r, 1, 7) def test_ndim(self): x = np.linspace(0, 1, 3) y = np.linspace(0, 2, 8) z = np.linspace(0, 3, 13) wx = np.ones_like(x) * (x[1] - x[0]) wx[0] /= 2 wx[-1] /= 2 wy = np.ones_like(y) * (y[1] - y[0]) wy[0] /= 2 wy[-1] /= 2 wz = np.ones_like(z) * (z[1] - z[0]) wz[0] /= 2 wz[-1] /= 2 q = x[:, None, None] + y[None, :, None] + z[None, None, :] qx = (q * wx[:, None, None]).sum(axis=0) qy = (q * wy[None, :, None]).sum(axis=1) qz = (q * wz[None, None, :]).sum(axis=2) # n-d `x` r = trapz(q, x=x[:, None, None], axis=0) assert_almost_equal(r, qx) r = trapz(q, x=y[None, :, None], axis=1) assert_almost_equal(r, qy) r = trapz(q, x=z[None, None, :], axis=2) assert_almost_equal(r, qz) # 1-d `x` r = trapz(q, x=x, axis=0) assert_almost_equal(r, qx) r = trapz(q, x=y, axis=1) assert_almost_equal(r, qy) r = trapz(q, x=z, axis=2) assert_almost_equal(r, qz) def test_masked(self): #Testing that masked arrays behave as if the function is 0 where #masked x = np.arange(5) y = x * x mask = x == 2 ym = np.ma.array(y, mask=mask) r = 13.0 # sum(0.5 * (0 + 1) * 1.0 + 0.5 * (9 + 16)) assert_almost_equal(trapz(ym, x), r) xm = np.ma.array(x, mask=mask) assert_almost_equal(trapz(ym, xm), r) xm = np.ma.array(x, mask=mask) assert_almost_equal(trapz(y, xm), r) def test_matrix(self): #Test to make sure matrices give the same answer as ndarrays x = np.linspace(0, 5) y = x * x r = trapz(y, x) mx = np.matrix(x) my = np.matrix(y) mr = trapz(my, mx) assert_almost_equal(mr, r) class TestSinc(TestCase): def test_simple(self): assert_(sinc(0) == 1) w = sinc(np.linspace(-1, 1, 100)) #check symmetry assert_array_almost_equal(w, flipud(w), 7) def test_array_like(self): x = [0, 0.5] y1 = sinc(np.array(x)) y2 = sinc(list(x)) y3 = sinc(tuple(x)) assert_array_equal(y1, y2) assert_array_equal(y1, y3) class TestHistogram(TestCase): def setUp(self): pass def tearDown(self): pass def test_simple(self): n = 100 v = rand(n) (a, b) = histogram(v) #check if the sum of the bins equals the number of samples assert_equal(np.sum(a, axis=0), n) #check that the bin counts are evenly spaced when the data is from a # linear function (a, b) = histogram(np.linspace(0, 10, 100)) assert_array_equal(a, 10) def test_one_bin(self): # Ticket 632 hist, edges = histogram([1, 2, 3, 4], [1, 2]) assert_array_equal(hist, [2, ]) assert_array_equal(edges, [1, 2]) assert_raises(ValueError, histogram, [1, 2], bins=0) h, e = histogram([1, 2], bins=1) assert_equal(h, np.array([2])) assert_allclose(e, np.array([1., 2.])) def test_normed(self): # Check that the integral of the density equals 1. n = 100 v = rand(n) a, b = histogram(v, normed=True) area = np.sum(a * diff(b)) assert_almost_equal(area, 1) # Check with non-constant bin widths (buggy but backwards compatible) v = np.arange(10) bins = [0, 1, 5, 9, 10] a, b = histogram(v, bins, normed=True) area = np.sum(a * diff(b)) assert_almost_equal(area, 1) def test_density(self): # Check that the integral of the density equals 1. n = 100 v = rand(n) a, b = histogram(v, density=True) area = np.sum(a * diff(b)) assert_almost_equal(area, 1) # Check with non-constant bin widths v = np.arange(10) bins = [0, 1, 3, 6, 10] a, b = histogram(v, bins, density=True) assert_array_equal(a, .1) assert_equal(np.sum(a*diff(b)), 1) # Variale bin widths are especially useful to deal with # infinities. v = np.arange(10) bins = [0, 1, 3, 6, np.inf] a, b = histogram(v, bins, density=True) assert_array_equal(a, [.1, .1, .1, 0.]) # Taken from a bug report from N. Becker on the numpy-discussion # mailing list Aug. 6, 2010. counts, dmy = np.histogram( [1, 2, 3, 4], [0.5, 1.5, np.inf], density=True) assert_equal(counts, [.25, 0]) def test_outliers(self): # Check that outliers are not tallied a = np.arange(10) + .5 # Lower outliers h, b = histogram(a, range=[0, 9]) assert_equal(h.sum(), 9) # Upper outliers h, b = histogram(a, range=[1, 10]) assert_equal(h.sum(), 9) # Normalization h, b = histogram(a, range=[1, 9], normed=True) assert_almost_equal((h * diff(b)).sum(), 1, decimal=15) # Weights w = np.arange(10) + .5 h, b = histogram(a, range=[1, 9], weights=w, normed=True) assert_equal((h * diff(b)).sum(), 1) h, b = histogram(a, bins=8, range=[1, 9], weights=w) assert_equal(h, w[1:-1]) def test_type(self): # Check the type of the returned histogram a = np.arange(10) + .5 h, b = histogram(a) assert_(issubdtype(h.dtype, int)) h, b = histogram(a, normed=True) assert_(issubdtype(h.dtype, float)) h, b = histogram(a, weights=np.ones(10, int)) assert_(issubdtype(h.dtype, int)) h, b = histogram(a, weights=np.ones(10, float)) assert_(issubdtype(h.dtype, float)) def test_f32_rounding(self): # gh-4799, check that the rounding of the edges works with float32 x = np.array([276.318359 , -69.593948 , 21.329449], dtype=np.float32) y = np.array([5005.689453, 4481.327637, 6010.369629], dtype=np.float32) counts_hist, xedges, yedges = np.histogram2d(x, y, bins=100) assert_equal(counts_hist.sum(), 3.) def test_weights(self): v = rand(100) w = np.ones(100) * 5 a, b = histogram(v) na, nb = histogram(v, normed=True) wa, wb = histogram(v, weights=w) nwa, nwb = histogram(v, weights=w, normed=True) assert_array_almost_equal(a * 5, wa) assert_array_almost_equal(na, nwa) # Check weights are properly applied. v = np.linspace(0, 10, 10) w = np.concatenate((np.zeros(5), np.ones(5))) wa, wb = histogram(v, bins=np.arange(11), weights=w) assert_array_almost_equal(wa, w) # Check with integer weights wa, wb = histogram([1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1]) assert_array_equal(wa, [4, 5, 0, 1]) wa, wb = histogram( [1, 2, 2, 4], bins=4, weights=[4, 3, 2, 1], normed=True) assert_array_almost_equal(wa, np.array([4, 5, 0, 1]) / 10. / 3. * 4) # Check weights with non-uniform bin widths a, b = histogram( np.arange(9), [0, 1, 3, 6, 10], weights=[2, 1, 1, 1, 1, 1, 1, 1, 1], density=True) assert_almost_equal(a, [.2, .1, .1, .075]) def test_empty(self): a, b = histogram([], bins=([0, 1])) assert_array_equal(a, np.array([0])) assert_array_equal(b, np.array([0, 1])) class TestHistogramdd(TestCase): def test_simple(self): x = np.array([[-.5, .5, 1.5], [-.5, 1.5, 2.5], [-.5, 2.5, .5], [.5, .5, 1.5], [.5, 1.5, 2.5], [.5, 2.5, 2.5]]) H, edges = histogramdd(x, (2, 3, 3), range=[[-1, 1], [0, 3], [0, 3]]) answer = np.array([[[0, 1, 0], [0, 0, 1], [1, 0, 0]], [[0, 1, 0], [0, 0, 1], [0, 0, 1]]]) assert_array_equal(H, answer) # Check normalization ed = [[-2, 0, 2], [0, 1, 2, 3], [0, 1, 2, 3]] H, edges = histogramdd(x, bins=ed, normed=True) assert_(np.all(H == answer / 12.)) # Check that H has the correct shape. H, edges = histogramdd(x, (2, 3, 4), range=[[-1, 1], [0, 3], [0, 4]], normed=True) answer = np.array([[[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]], [[0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 1, 0]]]) assert_array_almost_equal(H, answer / 6., 4) # Check that a sequence of arrays is accepted and H has the correct # shape. z = [np.squeeze(y) for y in split(x, 3, axis=1)] H, edges = histogramdd( z, bins=(4, 3, 2), range=[[-2, 2], [0, 3], [0, 2]]) answer = np.array([[[0, 0], [0, 0], [0, 0]], [[0, 1], [0, 0], [1, 0]], [[0, 1], [0, 0], [0, 0]], [[0, 0], [0, 0], [0, 0]]]) assert_array_equal(H, answer) Z = np.zeros((5, 5, 5)) Z[list(range(5)), list(range(5)), list(range(5))] = 1. H, edges = histogramdd([np.arange(5), np.arange(5), np.arange(5)], 5) assert_array_equal(H, Z) def test_shape_3d(self): # All possible permutations for bins of different lengths in 3D. bins = ((5, 4, 6), (6, 4, 5), (5, 6, 4), (4, 6, 5), (6, 5, 4), (4, 5, 6)) r = rand(10, 3) for b in bins: H, edges = histogramdd(r, b) assert_(H.shape == b) def test_shape_4d(self): # All possible permutations for bins of different lengths in 4D. bins = ((7, 4, 5, 6), (4, 5, 7, 6), (5, 6, 4, 7), (7, 6, 5, 4), (5, 7, 6, 4), (4, 6, 7, 5), (6, 5, 7, 4), (7, 5, 4, 6), (7, 4, 6, 5), (6, 4, 7, 5), (6, 7, 5, 4), (4, 6, 5, 7), (4, 7, 5, 6), (5, 4, 6, 7), (5, 7, 4, 6), (6, 7, 4, 5), (6, 5, 4, 7), (4, 7, 6, 5), (4, 5, 6, 7), (7, 6, 4, 5), (5, 4, 7, 6), (5, 6, 7, 4), (6, 4, 5, 7), (7, 5, 6, 4)) r = rand(10, 4) for b in bins: H, edges = histogramdd(r, b) assert_(H.shape == b) def test_weights(self): v = rand(100, 2) hist, edges = histogramdd(v) n_hist, edges = histogramdd(v, normed=True) w_hist, edges = histogramdd(v, weights=np.ones(100)) assert_array_equal(w_hist, hist) w_hist, edges = histogramdd(v, weights=np.ones(100) * 2, normed=True) assert_array_equal(w_hist, n_hist) w_hist, edges = histogramdd(v, weights=np.ones(100, int) * 2) assert_array_equal(w_hist, 2 * hist) def test_identical_samples(self): x = np.zeros((10, 2), int) hist, edges = histogramdd(x, bins=2) assert_array_equal(edges[0], np.array([-0.5, 0., 0.5])) def test_empty(self): a, b = histogramdd([[], []], bins=([0, 1], [0, 1])) assert_array_max_ulp(a, np.array([[0.]])) a, b = np.histogramdd([[], [], []], bins=2) assert_array_max_ulp(a, np.zeros((2, 2, 2))) def test_bins_errors(self): """There are two ways to specify bins. Check for the right errors when mixing those.""" x = np.arange(8).reshape(2, 4) assert_raises(ValueError, np.histogramdd, x, bins=[-1, 2, 4, 5]) assert_raises(ValueError, np.histogramdd, x, bins=[1, 0.99, 1, 1]) assert_raises( ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 2, 3]]) assert_raises( ValueError, np.histogramdd, x, bins=[1, 1, 1, [1, 2, 3, -3]]) assert_(np.histogramdd(x, bins=[1, 1, 1, [1, 2, 3, 4]])) def test_inf_edges(self): """Test using +/-inf bin edges works. See #1788.""" with np.errstate(invalid='ignore'): x = np.arange(6).reshape(3, 2) expected = np.array([[1, 0], [0, 1], [0, 1]]) h, e = np.histogramdd(x, bins=[3, [-np.inf, 2, 10]]) assert_allclose(h, expected) h, e = np.histogramdd(x, bins=[3, np.array([-1, 2, np.inf])]) assert_allclose(h, expected) h, e = np.histogramdd(x, bins=[3, [-np.inf, 3, np.inf]]) assert_allclose(h, expected) def test_rightmost_binedge(self): """Test event very close to rightmost binedge. See Github issue #4266""" x = [0.9999999995] bins = [[0.,0.5,1.0]] hist, _ = histogramdd(x, bins=bins) assert_(hist[0] == 0.0) assert_(hist[1] == 1.) x = [1.0] bins = [[0.,0.5,1.0]] hist, _ = histogramdd(x, bins=bins) assert_(hist[0] == 0.0) assert_(hist[1] == 1.) x = [1.0000000001] bins = [[0.,0.5,1.0]] hist, _ = histogramdd(x, bins=bins) assert_(hist[0] == 0.0) assert_(hist[1] == 1.) x = [1.0001] bins = [[0.,0.5,1.0]] hist, _ = histogramdd(x, bins=bins) assert_(hist[0] == 0.0) assert_(hist[1] == 0.0) class TestUnique(TestCase): def test_simple(self): x = np.array([4, 3, 2, 1, 1, 2, 3, 4, 0]) assert_(np.all(unique(x) == [0, 1, 2, 3, 4])) assert_(unique(np.array([1, 1, 1, 1, 1])) == np.array([1])) x = ['widget', 'ham', 'foo', 'bar', 'foo', 'ham'] assert_(np.all(unique(x) == ['bar', 'foo', 'ham', 'widget'])) x = np.array([5 + 6j, 1 + 1j, 1 + 10j, 10, 5 + 6j]) assert_(np.all(unique(x) == [1 + 1j, 1 + 10j, 5 + 6j, 10])) class TestCheckFinite(TestCase): def test_simple(self): a = [1, 2, 3] b = [1, 2, np.inf] c = [1, 2, np.nan] np.lib.asarray_chkfinite(a) assert_raises(ValueError, np.lib.asarray_chkfinite, b) assert_raises(ValueError, np.lib.asarray_chkfinite, c) def test_dtype_order(self): """Regression test for missing dtype and order arguments""" a = [1, 2, 3] a = np.lib.asarray_chkfinite(a, order='F', dtype=np.float64) assert_(a.dtype == np.float64) class TestCorrCoef(TestCase): A = np.array( [[0.15391142, 0.18045767, 0.14197213], [0.70461506, 0.96474128, 0.27906989], [0.9297531, 0.32296769, 0.19267156]]) B = np.array( [[0.10377691, 0.5417086, 0.49807457], [0.82872117, 0.77801674, 0.39226705], [0.9314666, 0.66800209, 0.03538394]]) res1 = np.array( [[1., 0.9379533, -0.04931983], [0.9379533, 1., 0.30007991], [-0.04931983, 0.30007991, 1.]]) res2 = np.array( [[1., 0.9379533, -0.04931983, 0.30151751, 0.66318558, 0.51532523], [0.9379533, 1., 0.30007991, -0.04781421, 0.88157256, 0.78052386], [-0.04931983, 0.30007991, 1., -0.96717111, 0.71483595, 0.83053601], [0.30151751, -0.04781421, -0.96717111, 1., -0.51366032, -0.66173113], [0.66318558, 0.88157256, 0.71483595, -0.51366032, 1., 0.98317823], [0.51532523, 0.78052386, 0.83053601, -0.66173113, 0.98317823, 1.]]) def test_non_array(self): assert_almost_equal(np.corrcoef([0, 1, 0], [1, 0, 1]), [[1., -1.], [-1., 1.]]) def test_simple(self): assert_almost_equal(corrcoef(self.A), self.res1) assert_almost_equal(corrcoef(self.A, self.B), self.res2) def test_ddof(self): assert_almost_equal(corrcoef(self.A, ddof=-1), self.res1) assert_almost_equal(corrcoef(self.A, self.B, ddof=-1), self.res2) def test_complex(self): x = np.array([[1, 2, 3], [1j, 2j, 3j]]) assert_allclose(corrcoef(x), np.array([[1., -1.j], [1.j, 1.]])) def test_xy(self): x = np.array([[1, 2, 3]]) y = np.array([[1j, 2j, 3j]]) assert_allclose(np.corrcoef(x, y), np.array([[1., -1.j], [1.j, 1.]])) def test_empty(self): with warnings.catch_warnings(record=True): warnings.simplefilter('always', RuntimeWarning) assert_array_equal(corrcoef(np.array([])), np.nan) assert_array_equal(corrcoef(np.array([]).reshape(0, 2)), np.array([]).reshape(0, 0)) assert_array_equal(corrcoef(np.array([]).reshape(2, 0)), np.array([[np.nan, np.nan], [np.nan, np.nan]])) def test_wrong_ddof(self): x = np.array([[0, 2], [1, 1], [2, 0]]).T with warnings.catch_warnings(record=True): warnings.simplefilter('always', RuntimeWarning) assert_array_equal(corrcoef(x, ddof=5), np.array([[np.nan, np.nan], [np.nan, np.nan]])) class TestCov(TestCase): def test_basic(self): x = np.array([[0, 2], [1, 1], [2, 0]]).T assert_allclose(cov(x), np.array([[1., -1.], [-1., 1.]])) def test_complex(self): x = np.array([[1, 2, 3], [1j, 2j, 3j]]) assert_allclose(cov(x), np.array([[1., -1.j], [1.j, 1.]])) def test_xy(self): x = np.array([[1, 2, 3]]) y = np.array([[1j, 2j, 3j]]) assert_allclose(cov(x, y), np.array([[1., -1.j], [1.j, 1.]])) def test_empty(self): with warnings.catch_warnings(record=True): warnings.simplefilter('always', RuntimeWarning) assert_array_equal(cov(np.array([])), np.nan) assert_array_equal(cov(np.array([]).reshape(0, 2)), np.array([]).reshape(0, 0)) assert_array_equal(cov(np.array([]).reshape(2, 0)), np.array([[np.nan, np.nan], [np.nan, np.nan]])) def test_wrong_ddof(self): x = np.array([[0, 2], [1, 1], [2, 0]]).T with warnings.catch_warnings(record=True): warnings.simplefilter('always', RuntimeWarning) assert_array_equal(cov(x, ddof=5), np.array([[np.inf, -np.inf], [-np.inf, np.inf]])) class Test_I0(TestCase): def test_simple(self): assert_almost_equal( i0(0.5), np.array(1.0634833707413234)) A = np.array([0.49842636, 0.6969809, 0.22011976, 0.0155549]) assert_almost_equal( i0(A), np.array([1.06307822, 1.12518299, 1.01214991, 1.00006049])) B = np.array([[0.827002, 0.99959078], [0.89694769, 0.39298162], [0.37954418, 0.05206293], [0.36465447, 0.72446427], [0.48164949, 0.50324519]]) assert_almost_equal( i0(B), np.array([[1.17843223, 1.26583466], [1.21147086, 1.03898290], [1.03633899, 1.00067775], [1.03352052, 1.13557954], [1.05884290, 1.06432317]])) class TestKaiser(TestCase): def test_simple(self): assert_(np.isfinite(kaiser(1, 1.0))) assert_almost_equal(kaiser(0, 1.0), np.array([])) assert_almost_equal(kaiser(2, 1.0), np.array([0.78984831, 0.78984831])) assert_almost_equal(kaiser(5, 1.0), np.array([0.78984831, 0.94503323, 1., 0.94503323, 0.78984831])) assert_almost_equal(kaiser(5, 1.56789), np.array([0.58285404, 0.88409679, 1., 0.88409679, 0.58285404])) def test_int_beta(self): kaiser(3, 4) class TestMsort(TestCase): def test_simple(self): A = np.array([[0.44567325, 0.79115165, 0.54900530], [0.36844147, 0.37325583, 0.96098397], [0.64864341, 0.52929049, 0.39172155]]) assert_almost_equal( msort(A), np.array([[0.36844147, 0.37325583, 0.39172155], [0.44567325, 0.52929049, 0.54900530], [0.64864341, 0.79115165, 0.96098397]])) class TestMeshgrid(TestCase): def test_simple(self): [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7]) assert_array_equal(X, np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]])) assert_array_equal(Y, np.array([[4, 4, 4], [5, 5, 5], [6, 6, 6], [7, 7, 7]])) def test_single_input(self): [X] = meshgrid([1, 2, 3, 4]) assert_array_equal(X, np.array([1, 2, 3, 4])) def test_no_input(self): args = [] assert_array_equal([], meshgrid(*args)) def test_indexing(self): x = [1, 2, 3] y = [4, 5, 6, 7] [X, Y] = meshgrid(x, y, indexing='ij') assert_array_equal(X, np.array([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]])) assert_array_equal(Y, np.array([[4, 5, 6, 7], [4, 5, 6, 7], [4, 5, 6, 7]])) # Test expected shapes: z = [8, 9] assert_(meshgrid(x, y)[0].shape == (4, 3)) assert_(meshgrid(x, y, indexing='ij')[0].shape == (3, 4)) assert_(meshgrid(x, y, z)[0].shape == (4, 3, 2)) assert_(meshgrid(x, y, z, indexing='ij')[0].shape == (3, 4, 2)) assert_raises(ValueError, meshgrid, x, y, indexing='notvalid') def test_sparse(self): [X, Y] = meshgrid([1, 2, 3], [4, 5, 6, 7], sparse=True) assert_array_equal(X, np.array([[1, 2, 3]])) assert_array_equal(Y, np.array([[4], [5], [6], [7]])) def test_invalid_arguments(self): # Test that meshgrid complains about invalid arguments # Regression test for issue #4755: # https://github.com/numpy/numpy/issues/4755 assert_raises(TypeError, meshgrid, [1, 2, 3], [4, 5, 6, 7], indices='ij') class TestPiecewise(TestCase): def test_simple(self): # Condition is single bool list x = piecewise([0, 0], [True, False], [1]) assert_array_equal(x, [1, 0]) # List of conditions: single bool list x = piecewise([0, 0], [[True, False]], [1]) assert_array_equal(x, [1, 0]) # Conditions is single bool array x = piecewise([0, 0], np.array([True, False]), [1]) assert_array_equal(x, [1, 0]) # Condition is single int array x = piecewise([0, 0], np.array([1, 0]), [1]) assert_array_equal(x, [1, 0]) # List of conditions: int array x = piecewise([0, 0], [np.array([1, 0])], [1]) assert_array_equal(x, [1, 0]) x = piecewise([0, 0], [[False, True]], [lambda x:-1]) assert_array_equal(x, [0, -1]) def test_two_conditions(self): x = piecewise([1, 2], [[True, False], [False, True]], [3, 4]) assert_array_equal(x, [3, 4]) def test_default(self): # No value specified for x[1], should be 0 x = piecewise([1, 2], [True, False], [2]) assert_array_equal(x, [2, 0]) # Should set x[1] to 3 x = piecewise([1, 2], [True, False], [2, 3]) assert_array_equal(x, [2, 3]) def test_0d(self): x = np.array(3) y = piecewise(x, x > 3, [4, 0]) assert_(y.ndim == 0) assert_(y == 0) x = 5 y = piecewise(x, [[True], [False]], [1, 0]) assert_(y.ndim == 0) assert_(y == 1) def test_0d_comparison(self): x = 3 y = piecewise(x, [x <= 3, x > 3], [4, 0]) class TestBincount(TestCase): def test_simple(self): y = np.bincount(np.arange(4)) assert_array_equal(y, np.ones(4)) def test_simple2(self): y = np.bincount(np.array([1, 5, 2, 4, 1])) assert_array_equal(y, np.array([0, 2, 1, 0, 1, 1])) def test_simple_weight(self): x = np.arange(4) w = np.array([0.2, 0.3, 0.5, 0.1]) y = np.bincount(x, w) assert_array_equal(y, w) def test_simple_weight2(self): x = np.array([1, 2, 4, 5, 2]) w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) y = np.bincount(x, w) assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1])) def test_with_minlength(self): x = np.array([0, 1, 0, 1, 1]) y = np.bincount(x, minlength=3) assert_array_equal(y, np.array([2, 3, 0])) def test_with_minlength_smaller_than_maxvalue(self): x = np.array([0, 1, 1, 2, 2, 3, 3]) y = np.bincount(x, minlength=2) assert_array_equal(y, np.array([1, 2, 2, 2])) def test_with_minlength_and_weights(self): x = np.array([1, 2, 4, 5, 2]) w = np.array([0.2, 0.3, 0.5, 0.1, 0.2]) y = np.bincount(x, w, 8) assert_array_equal(y, np.array([0, 0.2, 0.5, 0, 0.5, 0.1, 0, 0])) def test_empty(self): x = np.array([], dtype=int) y = np.bincount(x) assert_array_equal(x, y) def test_empty_with_minlength(self): x = np.array([], dtype=int) y = np.bincount(x, minlength=5) assert_array_equal(y, np.zeros(5, dtype=int)) def test_with_incorrect_minlength(self): x = np.array([], dtype=int) assert_raises_regex(TypeError, "an integer is required", lambda: np.bincount(x, minlength="foobar")) assert_raises_regex(ValueError, "must be positive", lambda: np.bincount(x, minlength=-1)) assert_raises_regex(ValueError, "must be positive", lambda: np.bincount(x, minlength=0)) x = np.arange(5) assert_raises_regex(TypeError, "an integer is required", lambda: np.bincount(x, minlength="foobar")) assert_raises_regex(ValueError, "minlength must be positive", lambda: np.bincount(x, minlength=-1)) assert_raises_regex(ValueError, "minlength must be positive", lambda: np.bincount(x, minlength=0)) class TestInterp(TestCase): def test_exceptions(self): assert_raises(ValueError, interp, 0, [], []) assert_raises(ValueError, interp, 0, [0], [1, 2]) def test_basic(self): x = np.linspace(0, 1, 5) y = np.linspace(0, 1, 5) x0 = np.linspace(0, 1, 50) assert_almost_equal(np.interp(x0, x, y), x0) def test_right_left_behavior(self): assert_equal(interp([-1, 0, 1], [0], [1]), [1, 1, 1]) assert_equal(interp([-1, 0, 1], [0], [1], left=0), [0, 1, 1]) assert_equal(interp([-1, 0, 1], [0], [1], right=0), [1, 1, 0]) assert_equal(interp([-1, 0, 1], [0], [1], left=0, right=0), [0, 1, 0]) def test_scalar_interpolation_point(self): x = np.linspace(0, 1, 5) y = np.linspace(0, 1, 5) x0 = 0 assert_almost_equal(np.interp(x0, x, y), x0) x0 = .3 assert_almost_equal(np.interp(x0, x, y), x0) x0 = np.float32(.3) assert_almost_equal(np.interp(x0, x, y), x0) x0 = np.float64(.3) assert_almost_equal(np.interp(x0, x, y), x0) x0 = np.nan assert_almost_equal(np.interp(x0, x, y), x0) def test_zero_dimensional_interpolation_point(self): x = np.linspace(0, 1, 5) y = np.linspace(0, 1, 5) x0 = np.array(.3) assert_almost_equal(np.interp(x0, x, y), x0) x0 = np.array(.3, dtype=object) assert_almost_equal(np.interp(x0, x, y), .3) def test_if_len_x_is_small(self): xp = np.arange(0, 10, 0.0001) fp = np.sin(xp) assert_almost_equal(np.interp(np.pi, xp, fp), 0.0) def compare_results(res, desired): for i in range(len(desired)): assert_array_equal(res[i], desired[i]) class TestScoreatpercentile(TestCase): def test_basic(self): x = np.arange(8) * 0.5 assert_equal(np.percentile(x, 0), 0.) assert_equal(np.percentile(x, 100), 3.5) assert_equal(np.percentile(x, 50), 1.75) def test_api(self): d = np.ones(5) np.percentile(d, 5, None, None, False) np.percentile(d, 5, None, None, False, 'linear') o = np.ones((1,)) np.percentile(d, 5, None, o, False, 'linear') def test_2D(self): x = np.array([[1, 1, 1], [1, 1, 1], [4, 4, 3], [1, 1, 1], [1, 1, 1]]) assert_array_equal(np.percentile(x, 50, axis=0), [1, 1, 1]) def test_linear(self): # Test defaults assert_equal(np.percentile(range(10), 50), 4.5) # explicitly specify interpolation_method 'fraction' (the default) assert_equal(np.percentile(range(10), 50, interpolation='linear'), 4.5) def test_lower_higher(self): # interpolation_method 'lower'/'higher' assert_equal(np.percentile(range(10), 50, interpolation='lower'), 4) assert_equal(np.percentile(range(10), 50, interpolation='higher'), 5) def test_midpoint(self): assert_equal(np.percentile(range(10), 51, interpolation='midpoint'), 4.5) def test_nearest(self): assert_equal(np.percentile(range(10), 51, interpolation='nearest'), 5) assert_equal(np.percentile(range(10), 49, interpolation='nearest'), 4) def test_sequence(self): x = np.arange(8) * 0.5 assert_equal(np.percentile(x, [0, 100, 50]), [0, 3.5, 1.75]) def test_axis(self): x = np.arange(12).reshape(3, 4) assert_equal(np.percentile(x, (25, 50, 100)), [2.75, 5.5, 11.0]) r0 = [[2, 3, 4, 5], [4, 5, 6, 7], [8, 9, 10, 11]] assert_equal(np.percentile(x, (25, 50, 100), axis=0), r0) r1 = [[0.75, 1.5, 3], [4.75, 5.5, 7], [8.75, 9.5, 11]] assert_equal(np.percentile(x, (25, 50, 100), axis=1), np.array(r1).T) # ensure qth axis is always first as with np.array(old_percentile(..)) x = np.arange(3 * 4 * 5 * 6).reshape(3, 4, 5, 6) assert_equal(np.percentile(x, (25, 50)).shape, (2,)) assert_equal(np.percentile(x, (25, 50, 75)).shape, (3,)) assert_equal(np.percentile(x, (25, 50), axis=0).shape, (2, 4, 5, 6)) assert_equal(np.percentile(x, (25, 50), axis=1).shape, (2, 3, 5, 6)) assert_equal(np.percentile(x, (25, 50), axis=2).shape, (2, 3, 4, 6)) assert_equal(np.percentile(x, (25, 50), axis=3).shape, (2, 3, 4, 5)) assert_equal(np.percentile(x, (25, 50, 75), axis=1).shape, (3, 3, 5, 6)) assert_equal(np.percentile(x, (25, 50), interpolation="higher").shape, (2,)) assert_equal(np.percentile(x, (25, 50, 75), interpolation="higher").shape, (3,)) assert_equal(np.percentile(x, (25, 50), axis=0, interpolation="higher").shape, (2, 4, 5, 6)) assert_equal(np.percentile(x, (25, 50), axis=1, interpolation="higher").shape, (2, 3, 5, 6)) assert_equal(np.percentile(x, (25, 50), axis=2, interpolation="higher").shape, (2, 3, 4, 6)) assert_equal(np.percentile(x, (25, 50), axis=3, interpolation="higher").shape, (2, 3, 4, 5)) assert_equal(np.percentile(x, (25, 50, 75), axis=1, interpolation="higher").shape, (3, 3, 5, 6)) def test_scalar_q(self): # test for no empty dimensions for compatiblity with old percentile x = np.arange(12).reshape(3, 4) assert_equal(np.percentile(x, 50), 5.5) self.assertTrue(np.isscalar(np.percentile(x, 50))) r0 = np.array([ 4., 5., 6., 7.]) assert_equal(np.percentile(x, 50, axis=0), r0) assert_equal(np.percentile(x, 50, axis=0).shape, r0.shape) r1 = np.array([ 1.5, 5.5, 9.5]) assert_almost_equal(np.percentile(x, 50, axis=1), r1) assert_equal(np.percentile(x, 50, axis=1).shape, r1.shape) out = np.empty(1) assert_equal(np.percentile(x, 50, out=out), 5.5) assert_equal(out, 5.5) out = np.empty(4) assert_equal(np.percentile(x, 50, axis=0, out=out), r0) assert_equal(out, r0) out = np.empty(3) assert_equal(np.percentile(x, 50, axis=1, out=out), r1) assert_equal(out, r1) # test for no empty dimensions for compatiblity with old percentile x = np.arange(12).reshape(3, 4) assert_equal(np.percentile(x, 50, interpolation='lower'), 5.) self.assertTrue(np.isscalar(np.percentile(x, 50))) r0 = np.array([ 4., 5., 6., 7.]) c0 = np.percentile(x, 50, interpolation='lower', axis=0) assert_equal(c0, r0) assert_equal(c0.shape, r0.shape) r1 = np.array([ 1., 5., 9.]) c1 = np.percentile(x, 50, interpolation='lower', axis=1) assert_almost_equal(c1, r1) assert_equal(c1.shape, r1.shape) out = np.empty((), dtype=x.dtype) c = np.percentile(x, 50, interpolation='lower', out=out) assert_equal(c, 5) assert_equal(out, 5) out = np.empty(4, dtype=x.dtype) c = np.percentile(x, 50, interpolation='lower', axis=0, out=out) assert_equal(c, r0) assert_equal(out, r0) out = np.empty(3, dtype=x.dtype) c = np.percentile(x, 50, interpolation='lower', axis=1, out=out) assert_equal(c, r1) assert_equal(out, r1) def test_exception(self): assert_raises(ValueError, np.percentile, [1, 2], 56, interpolation='foobar') assert_raises(ValueError, np.percentile, [1], 101) assert_raises(ValueError, np.percentile, [1], -1) assert_raises(ValueError, np.percentile, [1], list(range(50)) + [101]) assert_raises(ValueError, np.percentile, [1], list(range(50)) + [-0.1]) def test_percentile_list(self): assert_equal(np.percentile([1, 2, 3], 0), 1) def test_percentile_out(self): x = np.array([1, 2, 3]) y = np.zeros((3,)) p = (1, 2, 3) np.percentile(x, p, out=y) assert_equal(y, np.percentile(x, p)) x = np.array([[1, 2, 3], [4, 5, 6]]) y = np.zeros((3, 3)) np.percentile(x, p, axis=0, out=y) assert_equal(y, np.percentile(x, p, axis=0)) y = np.zeros((3, 2)) np.percentile(x, p, axis=1, out=y) assert_equal(y, np.percentile(x, p, axis=1)) x = np.arange(12).reshape(3, 4) # q.dim > 1, float r0 = np.array([[2., 3., 4., 5.], [4., 5., 6., 7.]]) out = np.empty((2, 4)) assert_equal(np.percentile(x, (25, 50), axis=0, out=out), r0) assert_equal(out, r0) r1 = np.array([[0.75, 4.75, 8.75], [1.5, 5.5, 9.5]]) out = np.empty((2, 3)) assert_equal(np.percentile(x, (25, 50), axis=1, out=out), r1) assert_equal(out, r1) # q.dim > 1, int r0 = np.array([[0, 1, 2, 3], [4, 5, 6, 7]]) out = np.empty((2, 4), dtype=x.dtype) c = np.percentile(x, (25, 50), interpolation='lower', axis=0, out=out) assert_equal(c, r0) assert_equal(out, r0) r1 = np.array([[0, 4, 8], [1, 5, 9]]) out = np.empty((2, 3), dtype=x.dtype) c = np.percentile(x, (25, 50), interpolation='lower', axis=1, out=out) assert_equal(c, r1) assert_equal(out, r1) def test_percentile_empty_dim(self): # empty dims are preserved d = np.arange(11*2).reshape(11, 1, 2, 1) assert_array_equal(np.percentile(d, 50, axis=0).shape, (1, 2, 1)) assert_array_equal(np.percentile(d, 50, axis=1).shape, (11, 2, 1)) assert_array_equal(np.percentile(d, 50, axis=2).shape, (11, 1, 1)) assert_array_equal(np.percentile(d, 50, axis=3).shape, (11, 1, 2)) assert_array_equal(np.percentile(d, 50, axis=-1).shape, (11, 1, 2)) assert_array_equal(np.percentile(d, 50, axis=-2).shape, (11, 1, 1)) assert_array_equal(np.percentile(d, 50, axis=-3).shape, (11, 2, 1)) assert_array_equal(np.percentile(d, 50, axis=-4).shape, (1, 2, 1)) assert_array_equal(np.percentile(d, 50, axis=2, interpolation='midpoint').shape, (11, 1, 1)) assert_array_equal(np.percentile(d, 50, axis=-2, interpolation='midpoint').shape, (11, 1, 1)) assert_array_equal(np.array(np.percentile(d, [10, 50], axis=0)).shape, (2, 1, 2, 1)) assert_array_equal(np.array(np.percentile(d, [10, 50], axis=1)).shape, (2, 11, 2, 1)) assert_array_equal(np.array(np.percentile(d, [10, 50], axis=2)).shape, (2, 11, 1, 1)) assert_array_equal(np.array(np.percentile(d, [10, 50], axis=3)).shape, (2, 11, 1, 2)) def test_percentile_no_overwrite(self): a = np.array([2, 3, 4, 1]) np.percentile(a, [50], overwrite_input=False) assert_equal(a, np.array([2, 3, 4, 1])) a = np.array([2, 3, 4, 1]) np.percentile(a, [50]) assert_equal(a, np.array([2, 3, 4, 1])) def test_no_p_overwrite(self): p = np.linspace(0., 100., num=5) np.percentile(np.arange(100.), p, interpolation="midpoint") assert_array_equal(p, np.linspace(0., 100., num=5)) p = np.linspace(0., 100., num=5).tolist() np.percentile(np.arange(100.), p, interpolation="midpoint") assert_array_equal(p, np.linspace(0., 100., num=5).tolist()) def test_percentile_overwrite(self): a = np.array([2, 3, 4, 1]) b = np.percentile(a, [50], overwrite_input=True) assert_equal(b, np.array([2.5])) b = np.percentile([2, 3, 4, 1], [50], overwrite_input=True) assert_equal(b, np.array([2.5])) def test_extended_axis(self): o = np.random.normal(size=(71, 23)) x = np.dstack([o] * 10) assert_equal(np.percentile(x, 30, axis=(0, 1)), np.percentile(o, 30)) x = np.rollaxis(x, -1, 0) assert_equal(np.percentile(x, 30, axis=(-2, -1)), np.percentile(o, 30)) x = x.swapaxes(0, 1).copy() assert_equal(np.percentile(x, 30, axis=(0, -1)), np.percentile(o, 30)) x = x.swapaxes(0, 1).copy() assert_equal(np.percentile(x, [25, 60], axis=(0, 1, 2)), np.percentile(x, [25, 60], axis=None)) assert_equal(np.percentile(x, [25, 60], axis=(0,)), np.percentile(x, [25, 60], axis=0)) d = np.arange(3 * 5 * 7 * 11).reshape(3, 5, 7, 11) np.random.shuffle(d) assert_equal(np.percentile(d, 25, axis=(0, 1, 2))[0], np.percentile(d[:, :, :, 0].flatten(), 25)) assert_equal(np.percentile(d, [10, 90], axis=(0, 1, 3))[:, 1], np.percentile(d[:, :, 1, :].flatten(), [10, 90])) assert_equal(np.percentile(d, 25, axis=(3, 1, -4))[2], np.percentile(d[:, :, 2, :].flatten(), 25)) assert_equal(np.percentile(d, 25, axis=(3, 1, 2))[2], np.percentile(d[2, :, :, :].flatten(), 25)) assert_equal(np.percentile(d, 25, axis=(3, 2))[2, 1], np.percentile(d[2, 1, :, :].flatten(), 25)) assert_equal(np.percentile(d, 25, axis=(1, -2))[2, 1], np.percentile(d[2, :, :, 1].flatten(), 25)) assert_equal(np.percentile(d, 25, axis=(1, 3))[2, 2], np.percentile(d[2, :, 2, :].flatten(), 25)) def test_extended_axis_invalid(self): d = np.ones((3, 5, 7, 11)) assert_raises(IndexError, np.percentile, d, axis=-5, q=25) assert_raises(IndexError, np.percentile, d, axis=(0, -5), q=25) assert_raises(IndexError, np.percentile, d, axis=4, q=25) assert_raises(IndexError, np.percentile, d, axis=(0, 4), q=25) assert_raises(ValueError, np.percentile, d, axis=(1, 1), q=25) def test_keepdims(self): d = np.ones((3, 5, 7, 11)) assert_equal(np.percentile(d, 7, axis=None, keepdims=True).shape, (1, 1, 1, 1)) assert_equal(np.percentile(d, 7, axis=(0, 1), keepdims=True).shape, (1, 1, 7, 11)) assert_equal(np.percentile(d, 7, axis=(0, 3), keepdims=True).shape, (1, 5, 7, 1)) assert_equal(np.percentile(d, 7, axis=(1,), keepdims=True).shape, (3, 1, 7, 11)) assert_equal(np.percentile(d, 7, (0, 1, 2, 3), keepdims=True).shape, (1, 1, 1, 1)) assert_equal(np.percentile(d, 7, axis=(0, 1, 3), keepdims=True).shape, (1, 1, 7, 1)) assert_equal(np.percentile(d, [1, 7], axis=(0, 1, 3), keepdims=True).shape, (2, 1, 1, 7, 1)) assert_equal(np.percentile(d, [1, 7], axis=(0, 3), keepdims=True).shape, (2, 1, 5, 7, 1)) class TestMedian(TestCase): def test_basic(self): a0 = np.array(1) a1 = np.arange(2) a2 = np.arange(6).reshape(2, 3) assert_equal(np.median(a0), 1) assert_allclose(np.median(a1), 0.5) assert_allclose(np.median(a2), 2.5) assert_allclose(np.median(a2, axis=0), [1.5, 2.5, 3.5]) assert_equal(np.median(a2, axis=1), [1, 4]) assert_allclose(np.median(a2, axis=None), 2.5) a = np.array([0.0444502, 0.0463301, 0.141249, 0.0606775]) assert_almost_equal((a[1] + a[3]) / 2., np.median(a)) a = np.array([0.0463301, 0.0444502, 0.141249]) assert_equal(a[0], np.median(a)) a = np.array([0.0444502, 0.141249, 0.0463301]) assert_equal(a[-1], np.median(a)) # check array scalar result assert_equal(np.median(a).ndim, 0) a[1] = np.nan assert_equal(np.median(a).ndim, 0) def test_axis_keyword(self): a3 = np.array([[2, 3], [0, 1], [6, 7], [4, 5]]) for a in [a3, np.random.randint(0, 100, size=(2, 3, 4))]: orig = a.copy() np.median(a, axis=None) for ax in range(a.ndim): np.median(a, axis=ax) assert_array_equal(a, orig) assert_allclose(np.median(a3, axis=0), [3, 4]) assert_allclose(np.median(a3.T, axis=1), [3, 4]) assert_allclose(np.median(a3), 3.5) assert_allclose(np.median(a3, axis=None), 3.5) assert_allclose(np.median(a3.T), 3.5) def test_overwrite_keyword(self): a3 = np.array([[2, 3], [0, 1], [6, 7], [4, 5]]) a0 = np.array(1) a1 = np.arange(2) a2 = np.arange(6).reshape(2, 3) assert_allclose(np.median(a0.copy(), overwrite_input=True), 1) assert_allclose(np.median(a1.copy(), overwrite_input=True), 0.5) assert_allclose(np.median(a2.copy(), overwrite_input=True), 2.5) assert_allclose(np.median(a2.copy(), overwrite_input=True, axis=0), [1.5, 2.5, 3.5]) assert_allclose( np.median(a2.copy(), overwrite_input=True, axis=1), [1, 4]) assert_allclose( np.median(a2.copy(), overwrite_input=True, axis=None), 2.5) assert_allclose( np.median(a3.copy(), overwrite_input=True, axis=0), [3, 4]) assert_allclose(np.median(a3.T.copy(), overwrite_input=True, axis=1), [3, 4]) a4 = np.arange(3 * 4 * 5, dtype=np.float32).reshape((3, 4, 5)) map(np.random.shuffle, a4) assert_allclose(np.median(a4, axis=None), np.median(a4.copy(), axis=None, overwrite_input=True)) assert_allclose(np.median(a4, axis=0), np.median(a4.copy(), axis=0, overwrite_input=True)) assert_allclose(np.median(a4, axis=1), np.median(a4.copy(), axis=1, overwrite_input=True)) assert_allclose(np.median(a4, axis=2), np.median(a4.copy(), axis=2, overwrite_input=True)) def test_array_like(self): x = [1, 2, 3] assert_almost_equal(np.median(x), 2) x2 = [x] assert_almost_equal(np.median(x2), 2) assert_allclose(np.median(x2, axis=0), x) def test_subclass(self): # gh-3846 class MySubClass(np.ndarray): def __new__(cls, input_array, info=None): obj = np.asarray(input_array).view(cls) obj.info = info return obj def mean(self, axis=None, dtype=None, out=None): return -7 a = MySubClass([1,2,3]) assert_equal(np.median(a), -7) def test_object(self): o = np.arange(7.); assert_(type(np.median(o.astype(object))), float) o[2] = np.nan assert_(type(np.median(o.astype(object))), float) def test_extended_axis(self): o = np.random.normal(size=(71, 23)) x = np.dstack([o] * 10) assert_equal(np.median(x, axis=(0, 1)), np.median(o)) x = np.rollaxis(x, -1, 0) assert_equal(np.median(x, axis=(-2, -1)), np.median(o)) x = x.swapaxes(0, 1).copy() assert_equal(np.median(x, axis=(0, -1)), np.median(o)) assert_equal(np.median(x, axis=(0, 1, 2)), np.median(x, axis=None)) assert_equal(np.median(x, axis=(0, )), np.median(x, axis=0)) assert_equal(np.median(x, axis=(-1, )), np.median(x, axis=-1)) d = np.arange(3 * 5 * 7 * 11).reshape(3, 5, 7, 11) np.random.shuffle(d) assert_equal(np.median(d, axis=(0, 1, 2))[0], np.median(d[:, :, :, 0].flatten())) assert_equal(np.median(d, axis=(0, 1, 3))[1], np.median(d[:, :, 1, :].flatten())) assert_equal(np.median(d, axis=(3, 1, -4))[2], np.median(d[:, :, 2, :].flatten())) assert_equal(np.median(d, axis=(3, 1, 2))[2], np.median(d[2, :, :, :].flatten())) assert_equal(np.median(d, axis=(3, 2))[2, 1], np.median(d[2, 1, :, :].flatten())) assert_equal(np.median(d, axis=(1, -2))[2, 1], np.median(d[2, :, :, 1].flatten())) assert_equal(np.median(d, axis=(1, 3))[2, 2], np.median(d[2, :, 2, :].flatten())) def test_extended_axis_invalid(self): d = np.ones((3, 5, 7, 11)) assert_raises(IndexError, np.median, d, axis=-5) assert_raises(IndexError, np.median, d, axis=(0, -5)) assert_raises(IndexError, np.median, d, axis=4) assert_raises(IndexError, np.median, d, axis=(0, 4)) assert_raises(ValueError, np.median, d, axis=(1, 1)) def test_keepdims(self): d = np.ones((3, 5, 7, 11)) assert_equal(np.median(d, axis=None, keepdims=True).shape, (1, 1, 1, 1)) assert_equal(np.median(d, axis=(0, 1), keepdims=True).shape, (1, 1, 7, 11)) assert_equal(np.median(d, axis=(0, 3), keepdims=True).shape, (1, 5, 7, 1)) assert_equal(np.median(d, axis=(1,), keepdims=True).shape, (3, 1, 7, 11)) assert_equal(np.median(d, axis=(0, 1, 2, 3), keepdims=True).shape, (1, 1, 1, 1)) assert_equal(np.median(d, axis=(0, 1, 3), keepdims=True).shape, (1, 1, 7, 1)) class TestAdd_newdoc_ufunc(TestCase): def test_ufunc_arg(self): assert_raises(TypeError, add_newdoc_ufunc, 2, "blah") assert_raises(ValueError, add_newdoc_ufunc, np.add, "blah") def test_string_arg(self): assert_raises(TypeError, add_newdoc_ufunc, np.add, 3) class TestAdd_newdoc(TestCase): @dec.skipif(sys.flags.optimize == 2) def test_add_doc(self): # test np.add_newdoc tgt = "Current flat index into the array." self.assertEqual(np.core.flatiter.index.__doc__[:len(tgt)], tgt) self.assertTrue(len(np.core.ufunc.identity.__doc__) > 300) self.assertTrue(len(np.lib.index_tricks.mgrid.__doc__) > 300) if __name__ == "__main__": run_module_suite()
Koheron/zynq-sdk
refs/heads/master
python/koheron/cli.py
2
import click # -------------------------------------------- # Call koheron-server # -------------------------------------------- class ConnectionType(object): def __init__(self, host="", unixsock=""): self.host = host @click.group() @click.option('--host', default='', help='Host ip address', envvar='HOST') @click.pass_context def cli(ctx, host): if host != "": ctx.obj = ConnectionType(host=str(host)) @cli.command() def version(): ''' Get the version of koheron python library ''' from .version import __version__ click.echo(__version__) @cli.command() @click.pass_obj def devices(conn_type): ''' Get the list of devices ''' from .koheron import KoheronClient client = KoheronClient(host=conn_type.host) click.echo(client.devices_idx) @cli.command() @click.pass_obj @click.option('--device', default=None) def commands(conn_type, device): ''' Get the list of commands for a specified device ''' from .koheron import KoheronClient client = KoheronClient(host=conn_type.host) if device is None: click.echo(client.commands) else: device_idx = client.devices_idx[device] click.echo(client.commands[device_idx]) # -------------------------------------------- # Call HTTP API # -------------------------------------------- @cli.command() @click.pass_obj @click.argument('instrument_zip') @click.option('--run', is_flag=True) def upload(conn_type, instrument_zip, run): ''' Upload instrument.zip ''' from .koheron import upload_instrument upload_instrument(conn_type.host, instrument_zip, run=run) @cli.command() @click.pass_obj @click.argument('instrument_name', required=False) @click.option('--restart', is_flag=True) def run(conn_type, instrument_name, restart): ''' Run a given instrument ''' from .koheron import run_instrument run_instrument(conn_type.host, instrument_name, restart=restart)
Nikoala/CouchPotatoServer
refs/heads/develop
couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/ku6.py
147
from __future__ import unicode_literals from .common import InfoExtractor class Ku6IE(InfoExtractor): _VALID_URL = r'http://v\.ku6\.com/show/(?P<id>[a-zA-Z0-9\-\_]+)(?:\.)*html' _TEST = { 'url': 'http://v.ku6.com/show/JG-8yS14xzBr4bCn1pu0xw...html', 'md5': '01203549b9efbb45f4b87d55bdea1ed1', 'info_dict': { 'id': 'JG-8yS14xzBr4bCn1pu0xw', 'ext': 'f4v', 'title': 'techniques test', } } def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_regex( r'<h1 title=.*>(.*?)</h1>', webpage, 'title') dataUrl = 'http://v.ku6.com/fetchVideo4Player/%s.html' % video_id jsonData = self._download_json(dataUrl, video_id) downloadUrl = jsonData['data']['f'] return { 'id': video_id, 'title': title, 'url': downloadUrl }
wang1986one/pyo
refs/heads/master
examples/fft/02_fft_cross.py
12
#! /usr/bin/env python # encoding: utf-8 """ Performs the cross-synthesis of two sounds. """ from pyo import * s = Server(duplex=0).boot() snd1 = SfPlayer("../snds/baseballmajeur_m.aif", loop=True).mix(2) snd2 = FM(carrier=[75,100,125,150], ratio=[.999,.5005], index=20, mul=.4).mix(2) size = 1024 olaps = 4 fin1 = FFT(snd1, size=size, overlaps=olaps) fin2 = FFT(snd2, size=size, overlaps=olaps) # get the magnitude of the first sound mag = Sqrt(fin1["real"]*fin1["real"] + fin1["imag"]*fin1["imag"], mul=10) # scale `real` and `imag` parts of the second sound by the magnitude of the first one real = fin2["real"] * mag imag = fin2["imag"] * mag fout = IFFT(real, imag, size=size, overlaps=olaps) ffout = fout.mix(2).out() # change of fft size must be done on all fft and ifft objects at the same time! def setSize(x): fin1.size = x fin2.size = x fout.size = x s.gui(locals())
analogbyte/ansible-modules-extras
refs/heads/devel
notification/twilio.py
39
#!/usr/bin/python # -*- coding: utf-8 -*- # (c) 2015, Matt Makai <matthew.makai@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- version_added: "1.6" module: twilio short_description: Sends a text message to a mobile phone through Twilio. description: - Sends a text message to a phone number through the Twilio messaging API. notes: - This module is non-idempotent because it sends an email through the external API. It is idempotent only in the case that the module fails. - Like the other notification modules, this one requires an external dependency to work. In this case, you'll need a Twilio account with a purchased or verified phone number to send the text message. options: account_sid: description: user's Twilio account token found on the account page required: true auth_token: description: user's Twilio authentication token required: true msg: description: the body of the text message required: true to_number: description: one or more phone numbers to send the text message to, format +15551112222 required: true from_number: description: the Twilio number to send the text message from, format +15551112222 required: true media_url: description: a URL with a picture, video or sound clip to send with an MMS (multimedia message) instead of a plain SMS required: false author: "Matt Makai (@makaimc)" ''' EXAMPLES = ''' # send an SMS about the build status to (555) 303 5681 # note: replace account_sid and auth_token values with your credentials # and you have to have the 'from_number' on your Twilio account - twilio: msg: "All servers with webserver role are now configured." account_sid: "ACXXXXXXXXXXXXXXXXX" auth_token: "ACXXXXXXXXXXXXXXXXX" from_number: "+15552014545" to_number: "+15553035681" delegate_to: localhost # send an SMS to multiple phone numbers about the deployment # note: replace account_sid and auth_token values with your credentials # and you have to have the 'from_number' on your Twilio account - twilio: msg: "This server's configuration is now complete." account_sid: "ACXXXXXXXXXXXXXXXXX" auth_token: "ACXXXXXXXXXXXXXXXXX" from_number: "+15553258899" to_number: - "+15551113232" - "+12025551235" - "+19735559010" delegate_to: localhost # send an MMS to a single recipient with an update on the deployment # and an image of the results # note: replace account_sid and auth_token values with your credentials # and you have to have the 'from_number' on your Twilio account - twilio: msg: "Deployment complete!" account_sid: "ACXXXXXXXXXXXXXXXXX" auth_token: "ACXXXXXXXXXXXXXXXXX" from_number: "+15552014545" to_number: "+15553035681" media_url: "https://demo.twilio.com/logo.png" delegate_to: localhost ''' # ======================================= # twilio module support methods # import urllib def post_twilio_api(module, account_sid, auth_token, msg, from_number, to_number, media_url=None): URI = "https://api.twilio.com/2010-04-01/Accounts/%s/Messages.json" \ % (account_sid,) AGENT = "Ansible" data = {'From':from_number, 'To':to_number, 'Body':msg} if media_url: data['MediaUrl'] = media_url encoded_data = urllib.urlencode(data) headers = {'User-Agent': AGENT, 'Content-type': 'application/x-www-form-urlencoded', 'Accept': 'application/json', } # Hack module params to have the Basic auth params that fetch_url expects module.params['url_username'] = account_sid.replace('\n', '') module.params['url_password'] = auth_token.replace('\n', '') return fetch_url(module, URI, data=encoded_data, headers=headers) # ======================================= # Main # def main(): module = AnsibleModule( argument_spec=dict( account_sid=dict(required=True), auth_token=dict(required=True, no_log=True), msg=dict(required=True), from_number=dict(required=True), to_number=dict(required=True), media_url=dict(default=None, required=False), ), supports_check_mode=True ) account_sid = module.params['account_sid'] auth_token = module.params['auth_token'] msg = module.params['msg'] from_number = module.params['from_number'] to_number = module.params['to_number'] media_url = module.params['media_url'] if not isinstance(to_number, list): to_number = [to_number] for number in to_number: r, info = post_twilio_api(module, account_sid, auth_token, msg, from_number, number, media_url) if info['status'] != 200: module.fail_json(msg="unable to send message to %s" % number) module.exit_json(msg=msg, changed=False) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * if __name__ == '__main__': main()
Akoten/django-import-export
refs/heads/master
tests/core/tests/__init__.py
17
from .test import *
mrquim/repository.mrquim
refs/heads/master
repo/plugin.video.castaway/resources/lib/sources/live_tv/lmshows.py
4
from __future__ import unicode_literals from resources.lib.modules import client,control import re,sys,xbmcgui,os class info(): def __init__(self): self.mode = 'lmshows' self.name = 'LMShows.com' self.icon = 'lmshows.jpg' self.paginated = False self.categorized = False self.multilink = False class main(): def __init__(self): self.base = 'http://lmshows.se/' def channels(self): html = client.request(self.base) channels = re.findall('href=[\"\']([^\"\']+)[\"\']><img src=[\"\']([^\"\']+)[\"\'] alt=[\"\']([^\"\']+)[\"\'].+?class=[\"\']ch-cover',html) events = [] for c in channels: url = self.base + c[0] img = self.base + c[1] title = c[2] if c[0] == 'tr.php': continue events.append((url,title,img)) events.append(('http://lmshows.se/sb.php','SpongeBob SquarePants','http://vignette2.wikia.nocookie.net/spongebobtv/images/0/0b/SpongeBob-Logo.jpg/revision/latest?cb=20100716014643')) events=list(set(events)) events.append(('http://www.ustream.tv/embed/19964595','Toonami Aftermath','http://66.media.tumblr.com/tumblr_lnfu9bYqaH1qa0xnuo1_500.png')) events.sort(key=lambda x: x[1]) return events def resolve(self,url): import liveresolver return liveresolver.resolve(url,cache_timeout=0)
prophile/bong
refs/heads/master
bong/notify.py
1
import subprocess import sys def notify(message, sys=sys): if sys.platform == 'linux': subprocess.check_call(['notify-send', message]) elif sys.platform == 'darwin': subprocess.check_call(['terminal-notifier', '-message', message]) else: print(message)
johankaito/fufuka
refs/heads/master
microblog/old-flask/lib/python2.7/site-packages/pip/_vendor/html5lib/filters/inject_meta_charset.py
1730
from __future__ import absolute_import, division, unicode_literals from . import _base class Filter(_base.Filter): def __init__(self, source, encoding): _base.Filter.__init__(self, source) self.encoding = encoding def __iter__(self): state = "pre_head" meta_found = (self.encoding is None) pending = [] for token in _base.Filter.__iter__(self): type = token["type"] if type == "StartTag": if token["name"].lower() == "head": state = "in_head" elif type == "EmptyTag": if token["name"].lower() == "meta": # replace charset with actual encoding has_http_equiv_content_type = False for (namespace, name), value in token["data"].items(): if namespace is not None: continue elif name.lower() == 'charset': token["data"][(namespace, name)] = self.encoding meta_found = True break elif name == 'http-equiv' and value.lower() == 'content-type': has_http_equiv_content_type = True else: if has_http_equiv_content_type and (None, "content") in token["data"]: token["data"][(None, "content")] = 'text/html; charset=%s' % self.encoding meta_found = True elif token["name"].lower() == "head" and not meta_found: # insert meta into empty head yield {"type": "StartTag", "name": "head", "data": token["data"]} yield {"type": "EmptyTag", "name": "meta", "data": {(None, "charset"): self.encoding}} yield {"type": "EndTag", "name": "head"} meta_found = True continue elif type == "EndTag": if token["name"].lower() == "head" and pending: # insert meta into head (if necessary) and flush pending queue yield pending.pop(0) if not meta_found: yield {"type": "EmptyTag", "name": "meta", "data": {(None, "charset"): self.encoding}} while pending: yield pending.pop(0) meta_found = True state = "post_head" if state == "in_head": pending.append(token) else: yield token
webdev1001/ansible
refs/heads/devel
v2/ansible/playbook/playbook_include.py
2
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os from ansible.parsing.splitter import split_args, parse_kv from ansible.parsing.yaml.objects import AnsibleBaseYAMLObject, AnsibleMapping from ansible.playbook.attribute import FieldAttribute from ansible.playbook.base import Base from ansible.playbook.conditional import Conditional from ansible.playbook.taggable import Taggable class PlaybookInclude(Base): _name = FieldAttribute(isa='string') _include = FieldAttribute(isa='string') _vars = FieldAttribute(isa='dict', default=dict()) @staticmethod def load(data, basedir, variable_manager=None, loader=None): return PlaybookInclude().load_data(ds=data, basedir=basedir, variable_manager=variable_manager, loader=loader) def load_data(self, ds, basedir, variable_manager=None, loader=None): ''' Overrides the base load_data(), as we're actually going to return a new Playbook() object rather than a PlaybookInclude object ''' # import here to avoid a dependency loop from ansible.playbook import Playbook # first, we use the original parent method to correctly load the object # via the munge/load_data system we normally use for other playbook objects new_obj = super(PlaybookInclude, self).load_data(ds, variable_manager, loader) # then we use the object to load a Playbook pb = Playbook(loader=loader) file_name = new_obj.include if not os.path.isabs(file_name): file_name = os.path.join(basedir, file_name) pb._load_playbook_data(file_name=file_name, variable_manager=variable_manager) # finally, playbook includes can specify a list of variables, which are simply # used to update the vars of each play in the playbook for entry in pb._entries: entry.vars.update(new_obj.vars) return pb def munge(self, ds): ''' Regorganizes the data for a PlaybookInclude datastructure to line up with what we expect the proper attributes to be ''' assert isinstance(ds, dict) # the new, cleaned datastructure, which will have legacy # items reduced to a standard structure new_ds = AnsibleMapping() if isinstance(ds, AnsibleBaseYAMLObject): new_ds.copy_position_info(ds) for (k,v) in ds.iteritems(): if k == 'include': self._munge_include(ds, new_ds, k, v) elif k.replace("with_", "") in lookup_loader: self._munge_loop(ds, new_ds, k, v) else: # some basic error checking, to make sure vars are properly # formatted and do not conflict with k=v parameters # FIXME: we could merge these instead, but controlling the order # in which they're encountered could be difficult if k == 'vars': if 'vars' in new_ds: raise AnsibleParserError("include parameters cannot be mixed with 'vars' entries for include statements", obj=ds) elif not isinstance(v, dict): raise AnsibleParserError("vars for include statements must be specified as a dictionary", obj=ds) new_ds[k] = v return super(PlaybookInclude, self).munge(new_ds) def _munge_include(self, ds, new_ds, k, v): ''' Splits the include line up into filename and parameters ''' # The include line must include at least one item, which is the filename # to include. Anything after that should be regarded as a parameter to the include items = split_args(v) if len(items) == 0: raise AnsibleParserError("include statements must specify the file name to include", obj=ds) else: # FIXME/TODO: validate that items[0] is a file, which also # exists and is readable new_ds['include'] = items[0] if len(items) > 1: # rejoin the parameter portion of the arguments and # then use parse_kv() to get a dict of params back params = parse_kv(" ".join(items[1:])) if 'vars' in new_ds: # FIXME: see fixme above regarding merging vars raise AnsibleParserError("include parameters cannot be mixed with 'vars' entries for include statements", obj=ds) new_ds['vars'] = params
stacywsmith/ansible
refs/heads/devel
test/units/module_utils/test_facts.py
60
# This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division) __metaclass__ = type import os # for testing from ansible.compat.tests import unittest from ansible.compat.tests.mock import Mock, patch from ansible.module_utils import facts class BaseTestFactsPlatform(unittest.TestCase): platform_id = 'Generic' fact_class = facts.Hardware """Verify that the automagic in Hardware.__new__ selects the right subclass.""" @patch('platform.system') def test_new(self, mock_platform): mock_platform.return_value = self.platform_id inst = self.fact_class(module=Mock(), load_on_init=False) self.assertIsInstance(inst, self.fact_class) self.assertEqual(inst.platform, self.platform_id) def test_subclass(self): # 'Generic' will try to map to platform.system() that we are not mocking here if self.platform_id == 'Generic': return inst = self.fact_class(module=Mock(), load_on_init=False) self.assertIsInstance(inst, self.fact_class) self.assertEqual(inst.platform, self.platform_id) class TestLinuxFactsPlatform(BaseTestFactsPlatform): platform_id = 'Linux' fact_class = facts.LinuxHardware class TestSunOSHardware(BaseTestFactsPlatform): platform_id = 'SunOS' fact_class = facts.SunOSHardware class TestOpenBSDHardware(BaseTestFactsPlatform): platform_id = 'OpenBSD' fact_class = facts.OpenBSDHardware class TestFreeBSDHardware(BaseTestFactsPlatform): platform_id = 'FreeBSD' fact_class = facts.FreeBSDHardware class TestDragonFlyHardware(BaseTestFactsPlatform): platform_id = 'DragonFly' fact_class = facts.DragonFlyHardware class TestNetBSDHardware(BaseTestFactsPlatform): platform_id = 'NetBSD' fact_class = facts.NetBSDHardware class TestAIXHardware(BaseTestFactsPlatform): platform_id = 'AIX' fact_class = facts.AIX class TestHPUXHardware(BaseTestFactsPlatform): platform_id = 'HP-UX' fact_class = facts.HPUX class TestDarwinHardware(BaseTestFactsPlatform): platform_id = 'Darwin' fact_class = facts.Darwin class TestGenericNetwork(BaseTestFactsPlatform): platform_id = 'Generic' fact_class = facts.Network class TestLinuxNetwork(BaseTestFactsPlatform): platform_id = 'Generic' fact_class = facts.Network class TestGenericBsdIfconfigNetwork(BaseTestFactsPlatform): platform_id = 'Generic_BSD_Ifconfig' fact_class = facts.GenericBsdIfconfigNetwork class TestHPUXNetwork(BaseTestFactsPlatform): platform_id = 'HP-UX' fact_class = facts.HPUXNetwork class TestDarwinNetwork(BaseTestFactsPlatform): platform_id = 'Darwin' fact_class = facts.DarwinNetwork class TestFreeBSDNetwork(BaseTestFactsPlatform): platform_id = 'FreeBSD' fact_class = facts.FreeBSDNetwork class TestDragonFlyNetwork(BaseTestFactsPlatform): platform_id = 'DragonFly' fact_class = facts.DragonFlyNetwork class TestAIXNetwork(BaseTestFactsPlatform): platform_id = 'AIX' fact_class = facts.AIXNetwork class TestOpenBSDNetwork(BaseTestFactsPlatform): platform_id = 'OpenBSD' fact_class = facts.OpenBSDNetwork class TestSunOSNetwork(BaseTestFactsPlatform): platform_id = 'SunOS' fact_class = facts.SunOSNetwork class TestLinuxVirtual(BaseTestFactsPlatform): platform_id = 'Linux' fact_class = facts.LinuxVirtual class TestFreeBSDVirtual(BaseTestFactsPlatform): platform_id = 'FreeBSD' fact_class = facts.FreeBSDNetwork class TestDragonFlyVirtual(BaseTestFactsPlatform): platform_id = 'DragonFly' fact_class = facts.DragonFlyNetwork class TestOpenBSDVirtual(BaseTestFactsPlatform): platform_id = 'OpenBSD' fact_class = facts.OpenBSDVirtual class TestHPUXVirtual(BaseTestFactsPlatform): platform_id = 'HP-UX' fact_class = facts.HPUXVirtual class TestSunOSVirtual(BaseTestFactsPlatform): platform_id = 'SunOS' fact_class = facts.SunOSVirtual LSBLK_OUTPUT = b""" /dev/sda /dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0 /dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK /dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d /dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce /dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d /dev/sr0 /dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390 /dev/loop1 7c1b0f30-cf34-459f-9a70-2612f82b870a /dev/loop9 0f031512-ab15-497d-9abd-3a512b4a9390 /dev/loop9 7c1b4444-cf34-459f-9a70-2612f82b870a /dev/mapper/docker-253:1-1050967-pool /dev/loop2 /dev/mapper/docker-253:1-1050967-pool """ LSBLK_OUTPUT_2 = b""" /dev/sda /dev/sda1 32caaec3-ef40-4691-a3b6-438c3f9bc1c0 /dev/sda2 66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK /dev/mapper/fedora_dhcp129--186-swap eae6059d-2fbe-4d1c-920d-a80bbeb1ac6d /dev/mapper/fedora_dhcp129--186-root d34cf5e3-3449-4a6c-8179-a1feb2bca6ce /dev/mapper/fedora_dhcp129--186-home 2d3e4853-fa69-4ccf-8a6a-77b05ab0a42d /dev/mapper/an-example-mapper with a space in the name 84639acb-013f-4d2f-9392-526a572b4373 /dev/sr0 /dev/loop0 0f031512-ab15-497d-9abd-3a512b4a9390 """ LSBLK_UUIDS = {'/dev/sda1': '66Ojcd-ULtu-1cZa-Tywo-mx0d-RF4O-ysA9jK'} MTAB = """ sysfs /sys sysfs rw,seclabel,nosuid,nodev,noexec,relatime 0 0 proc /proc proc rw,nosuid,nodev,noexec,relatime 0 0 devtmpfs /dev devtmpfs rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755 0 0 securityfs /sys/kernel/security securityfs rw,nosuid,nodev,noexec,relatime 0 0 tmpfs /dev/shm tmpfs rw,seclabel,nosuid,nodev 0 0 devpts /dev/pts devpts rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000 0 0 tmpfs /run tmpfs rw,seclabel,nosuid,nodev,mode=755 0 0 tmpfs /sys/fs/cgroup tmpfs ro,seclabel,nosuid,nodev,noexec,mode=755 0 0 cgroup /sys/fs/cgroup/systemd cgroup rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd 0 0 pstore /sys/fs/pstore pstore rw,seclabel,nosuid,nodev,noexec,relatime 0 0 cgroup /sys/fs/cgroup/devices cgroup rw,nosuid,nodev,noexec,relatime,devices 0 0 cgroup /sys/fs/cgroup/freezer cgroup rw,nosuid,nodev,noexec,relatime,freezer 0 0 cgroup /sys/fs/cgroup/memory cgroup rw,nosuid,nodev,noexec,relatime,memory 0 0 cgroup /sys/fs/cgroup/pids cgroup rw,nosuid,nodev,noexec,relatime,pids 0 0 cgroup /sys/fs/cgroup/blkio cgroup rw,nosuid,nodev,noexec,relatime,blkio 0 0 cgroup /sys/fs/cgroup/cpuset cgroup rw,nosuid,nodev,noexec,relatime,cpuset 0 0 cgroup /sys/fs/cgroup/cpu,cpuacct cgroup rw,nosuid,nodev,noexec,relatime,cpu,cpuacct 0 0 cgroup /sys/fs/cgroup/hugetlb cgroup rw,nosuid,nodev,noexec,relatime,hugetlb 0 0 cgroup /sys/fs/cgroup/perf_event cgroup rw,nosuid,nodev,noexec,relatime,perf_event 0 0 cgroup /sys/fs/cgroup/net_cls,net_prio cgroup rw,nosuid,nodev,noexec,relatime,net_cls,net_prio 0 0 configfs /sys/kernel/config configfs rw,relatime 0 0 /dev/mapper/fedora_dhcp129--186-root / ext4 rw,seclabel,relatime,data=ordered 0 0 selinuxfs /sys/fs/selinux selinuxfs rw,relatime 0 0 systemd-1 /proc/sys/fs/binfmt_misc autofs rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct 0 0 debugfs /sys/kernel/debug debugfs rw,seclabel,relatime 0 0 hugetlbfs /dev/hugepages hugetlbfs rw,seclabel,relatime 0 0 tmpfs /tmp tmpfs rw,seclabel 0 0 mqueue /dev/mqueue mqueue rw,seclabel,relatime 0 0 /dev/loop0 /var/lib/machines btrfs rw,seclabel,relatime,space_cache,subvolid=5,subvol=/ 0 0 /dev/sda1 /boot ext4 rw,seclabel,relatime,data=ordered 0 0 /dev/mapper/fedora_dhcp129--186-home /home ext4 rw,seclabel,relatime,data=ordered 0 0 tmpfs /run/user/1000 tmpfs rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000 0 0 gvfsd-fuse /run/user/1000/gvfs fuse.gvfsd-fuse rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 fusectl /sys/fs/fuse/connections fusectl rw,relatime 0 0 grimlock.g.a: /home/adrian/sshfs-grimlock fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 grimlock.g.a:test_path/path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 grimlock.g.a:path_with'single_quotes /home/adrian/sshfs-grimlock-single-quote-2 fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 grimlock.g.a:/mnt/data/foto's /home/adrian/fotos fuse.sshfs rw,nosuid,nodev,relatime,user_id=1000,group_id=1000 0 0 """ MTAB_ENTRIES = \ [ ['sysfs', '/sys', 'sysfs', 'rw,seclabel,nosuid,nodev,noexec,relatime', '0', '0'], ['proc', '/proc', 'proc', 'rw,nosuid,nodev,noexec,relatime', '0', '0'], ['devtmpfs', '/dev', 'devtmpfs', 'rw,seclabel,nosuid,size=8044400k,nr_inodes=2011100,mode=755', '0', '0'], ['securityfs', '/sys/kernel/security', 'securityfs', 'rw,nosuid,nodev,noexec,relatime', '0', '0'], ['tmpfs', '/dev/shm', 'tmpfs', 'rw,seclabel,nosuid,nodev', '0', '0'], ['devpts', '/dev/pts', 'devpts', 'rw,seclabel,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000', '0', '0'], ['tmpfs', '/run', 'tmpfs', 'rw,seclabel,nosuid,nodev,mode=755', '0', '0'], ['tmpfs', '/sys/fs/cgroup', 'tmpfs', 'ro,seclabel,nosuid,nodev,noexec,mode=755', '0', '0'], ['cgroup', '/sys/fs/cgroup/systemd', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd', '0', '0'], ['pstore', '/sys/fs/pstore', 'pstore', 'rw,seclabel,nosuid,nodev,noexec,relatime', '0', '0'], ['cgroup', '/sys/fs/cgroup/devices', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,devices', '0', '0'], ['cgroup', '/sys/fs/cgroup/freezer', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,freezer', '0', '0'], ['cgroup', '/sys/fs/cgroup/memory', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,memory', '0', '0'], ['cgroup', '/sys/fs/cgroup/pids', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,pids', '0', '0'], ['cgroup', '/sys/fs/cgroup/blkio', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,blkio', '0', '0'], ['cgroup', '/sys/fs/cgroup/cpuset', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,cpuset', '0', '0'], ['cgroup', '/sys/fs/cgroup/cpu,cpuacct', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,cpu,cpuacct', '0', '0'], ['cgroup', '/sys/fs/cgroup/hugetlb', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,hugetlb', '0', '0'], ['cgroup', '/sys/fs/cgroup/perf_event', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,perf_event', '0', '0'], ['cgroup', '/sys/fs/cgroup/net_cls,net_prio', 'cgroup', 'rw,nosuid,nodev,noexec,relatime,net_cls,net_prio', '0', '0'], ['configfs', '/sys/kernel/config', 'configfs', 'rw,relatime', '0', '0'], ['/dev/mapper/fedora_dhcp129--186-root', '/', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'], ['selinuxfs', '/sys/fs/selinux', 'selinuxfs', 'rw,relatime', '0', '0'], ['systemd-1', '/proc/sys/fs/binfmt_misc', 'autofs', 'rw,relatime,fd=24,pgrp=1,timeout=0,minproto=5,maxproto=5,direct', '0', '0'], ['debugfs', '/sys/kernel/debug', 'debugfs', 'rw,seclabel,relatime', '0', '0'], ['hugetlbfs', '/dev/hugepages', 'hugetlbfs', 'rw,seclabel,relatime', '0', '0'], ['tmpfs', '/tmp', 'tmpfs', 'rw,seclabel', '0', '0'], ['mqueue', '/dev/mqueue', 'mqueue', 'rw,seclabel,relatime', '0', '0'], ['/dev/loop0', '/var/lib/machines', 'btrfs', 'rw,seclabel,relatime,space_cache,subvolid=5,subvol=/', '0', '0'], ['/dev/sda1', '/boot', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'], # A 'none' fstype ['/dev/sdz3', '/not/a/real/device', 'none', 'rw,seclabel,relatime,data=ordered', '0', '0'], # lets assume this is a bindmount ['/dev/sdz4', '/not/a/real/bind_mount', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'], ['/dev/mapper/fedora_dhcp129--186-home', '/home', 'ext4', 'rw,seclabel,relatime,data=ordered', '0', '0'], ['tmpfs', '/run/user/1000', 'tmpfs', 'rw,seclabel,nosuid,nodev,relatime,size=1611044k,mode=700,uid=1000,gid=1000', '0', '0'], ['gvfsd-fuse', '/run/user/1000/gvfs', 'fuse.gvfsd-fuse', 'rw,nosuid,nodev,relatime,user_id=1000,group_id=1000', '0', '0'], ['fusectl', '/sys/fs/fuse/connections', 'fusectl', 'rw,relatime', '0', '0']] BIND_MOUNTS = ['/not/a/real/bind_mount'] with open(os.path.join(os.path.dirname(__file__), 'fixtures/findmount_output.txt')) as f: FINDMNT_OUTPUT = f.read() class TestFactsLinuxHardwareGetMountFacts(unittest.TestCase): # FIXME: mock.patch instead def setUp(self): # The @timeout tracebacks if there isn't a GATHER_TIMEOUT is None (the default until get_all_facts sets it via global) facts.GATHER_TIMEOUT = 10 def tearDown(self): facts.GATHER_TIMEOUT = None # The Hardware subclasses freakout if instaniated directly, so # mock platform.system and inst Hardware() so we get a LinuxHardware() # we can test. @patch('ansible.module_utils.facts.LinuxHardware._mtab_entries', return_value=MTAB_ENTRIES) @patch('ansible.module_utils.facts.LinuxHardware._find_bind_mounts', return_value=BIND_MOUNTS) @patch('ansible.module_utils.facts.LinuxHardware._lsblk_uuid', return_value=LSBLK_UUIDS) def test_get_mount_facts(self, mock_lsblk_uuid, mock_find_bind_mounts, mock_mtab_entries): module = Mock() # Returns a LinuxHardware-ish lh = facts.LinuxHardware(module=module, load_on_init=False) # Nothing returned, just self.facts modified as a side effect lh.get_mount_facts() self.assertIsInstance(lh.facts, dict) self.assertIn('mounts', lh.facts) self.assertIsInstance(lh.facts['mounts'], list) self.assertIsInstance(lh.facts['mounts'][0], dict) @patch('ansible.module_utils.facts.get_file_content', return_value=MTAB) def test_get_mtab_entries(self, mock_get_file_content): module = Mock() lh = facts.LinuxHardware(module=module, load_on_init=False) mtab_entries = lh._mtab_entries() self.assertIsInstance(mtab_entries, list) self.assertIsInstance(mtab_entries[0], list) self.assertEqual(len(mtab_entries), 38) @patch('ansible.module_utils.facts.LinuxHardware._run_findmnt', return_value=(0, FINDMNT_OUTPUT, '')) def test_find_bind_mounts(self, mock_run_findmnt): module = Mock() lh = facts.LinuxHardware(module=module, load_on_init=False) bind_mounts = lh._find_bind_mounts() # If bind_mounts becomes another seq type, feel free to change self.assertIsInstance(bind_mounts, set) self.assertEqual(len(bind_mounts), 1) self.assertIn('/not/a/real/bind_mount', bind_mounts) @patch('ansible.module_utils.facts.LinuxHardware._run_findmnt', return_value=(37, '', '')) def test_find_bind_mounts_non_zero(self, mock_run_findmnt): module = Mock() lh = facts.LinuxHardware(module=module, load_on_init=False) bind_mounts = lh._find_bind_mounts() self.assertIsInstance(bind_mounts, set) self.assertEqual(len(bind_mounts), 0) def test_find_bind_mounts_no_findmnts(self): module = Mock() module.get_bin_path = Mock(return_value=None) lh = facts.LinuxHardware(module=module, load_on_init=False) bind_mounts = lh._find_bind_mounts() self.assertIsInstance(bind_mounts, set) self.assertEqual(len(bind_mounts), 0) @patch('ansible.module_utils.facts.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT,'')) def test_lsblk_uuid(self, mock_run_lsblk): module = Mock() lh = facts.LinuxHardware(module=module, load_on_init=False) lsblk_uuids = lh._lsblk_uuid() self.assertIsInstance(lsblk_uuids, dict) self.assertIn(b'/dev/loop9', lsblk_uuids) self.assertIn(b'/dev/sda1', lsblk_uuids) self.assertEquals(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0') @patch('ansible.module_utils.facts.LinuxHardware._run_lsblk', return_value=(37, LSBLK_OUTPUT,'')) def test_lsblk_uuid_non_zero(self, mock_run_lsblk): module = Mock() lh = facts.LinuxHardware(module=module, load_on_init=False) lsblk_uuids = lh._lsblk_uuid() self.assertIsInstance(lsblk_uuids, dict) self.assertEquals(len(lsblk_uuids), 0) def test_lsblk_uuid_no_lsblk(self): module = Mock() module.get_bin_path = Mock(return_value=None) lh = facts.LinuxHardware(module=module, load_on_init=False) lsblk_uuids = lh._lsblk_uuid() self.assertIsInstance(lsblk_uuids, dict) self.assertEquals(len(lsblk_uuids), 0) @patch('ansible.module_utils.facts.LinuxHardware._run_lsblk', return_value=(0, LSBLK_OUTPUT_2,'')) def test_lsblk_uuid_dev_with_space_in_name(self, mock_run_lsblk): module = Mock() lh = facts.LinuxHardware(module=module, load_on_init=False) lsblk_uuids = lh._lsblk_uuid() self.assertIsInstance(lsblk_uuids, dict) self.assertIn(b'/dev/loop0', lsblk_uuids) self.assertIn(b'/dev/sda1', lsblk_uuids) self.assertEquals(lsblk_uuids[b'/dev/mapper/an-example-mapper with a space in the name'], b'84639acb-013f-4d2f-9392-526a572b4373') self.assertEquals(lsblk_uuids[b'/dev/sda1'], b'32caaec3-ef40-4691-a3b6-438c3f9bc1c0')
PriceChild/ansible
refs/heads/devel
lib/ansible/inventory/vars_plugins/__init__.py
145
# Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type
newswangerd/ansible
refs/heads/devel
examples/scripts/my_test_info.py
29
#!/usr/bin/python # Copyright: (c) 2020, Your Name <YourName@example.org> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = r''' --- module: my_test_info short_description: This is my test info module version_added: "1.0.0" description: This is my longer description explaining my test info module. options: name: description: This is the message to send to the test module. required: true type: str author: - Your Name (@yourGitHubHandle) ''' EXAMPLES = r''' # Pass in a message - name: Test with a message my_namespace.my_collection.my_test_info: name: hello world ''' RETURN = r''' # These are examples of possible return values, and in general should use other names for return values. original_message: description: The original name param that was passed in. type: str returned: always sample: 'hello world' message: description: The output message that the test module generates. type: str returned: always sample: 'goodbye' my_useful_info: description: The dictionary containing information about your system. type: dict returned: always sample: { 'foo': 'bar', 'answer': 42, } ''' from ansible.module_utils.basic import AnsibleModule def run_module(): # define available arguments/parameters a user can pass to the module module_args = dict( name=dict(type='str', required=True), ) # seed the result dict in the object # we primarily care about changed and state # changed is if this module effectively modified the target # state will include any data that you want your module to pass back # for consumption, for example, in a subsequent task result = dict( changed=False, original_message='', message='', my_useful_info={}, ) # the AnsibleModule object will be our abstraction working with Ansible # this includes instantiation, a couple of common attr would be the # args/params passed to the execution, as well as if the module # supports check mode module = AnsibleModule( argument_spec=module_args, supports_check_mode=True ) # if the user is working with this module in only check mode we do not # want to make any changes to the environment, just return the current # state with no modifications if module.check_mode: module.exit_json(**result) # manipulate or modify the state as needed (this is going to be the # part where your module will do what it needs to do) result['original_message'] = module.params['name'] result['message'] = 'goodbye' result['my_useful_info'] = { 'foo': 'bar', 'answer': 42, } # in the event of a successful module execution, you will want to # simple AnsibleModule.exit_json(), passing the key/value results module.exit_json(**result) def main(): run_module() if __name__ == '__main__': main()
pkilambi/ceilometer
refs/heads/master
ceilometer/network/services/base.py
3
# # Copyright 2014 Cisco Systems,Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_log import log from ceilometer.agent import plugin_base from ceilometer import neutron_client LOG = log.getLogger(__name__) # status map for converting metric status to volume int STATUS = { 'inactive': 0, 'active': 1, 'pending_create': 2, } class BaseServicesPollster(plugin_base.PollsterBase): FIELDS = [] nc = neutron_client.Client() @staticmethod def _iter_cache(cache, meter_name, method): if meter_name not in cache: cache[meter_name] = list(method()) return iter(cache[meter_name]) def extract_metadata(self, metric): return dict((k, metric[k]) for k in self.FIELDS) @staticmethod def get_status_id(value): status = value.lower() return STATUS.get(status, -1)
gi0baro/weppy-assets
refs/heads/master
weppy_assets/webassets/loaders.py
1
"""Loaders are helper classes which will read environments and/or bundles from a source, like a configuration file. This can be used as an alternative to an imperative setup. """ import os, sys from os import path import glob, fnmatch import inspect import types from . import six try: import yaml except ImportError: pass from . import Environment from .bundle import Bundle from .exceptions import EnvironmentError from .filter import register_filter from .importlib import import_module __all__ = ('Loader', 'LoaderError', 'PythonLoader', 'YAMLLoader', 'GlobLoader',) class LoaderError(Exception): """Loaders should raise this when they can't deal with a given file. """ class YAMLLoader(object): """Will load an environment or a set of bundles from `YAML <http://en.wikipedia.org/wiki/YAML>`_ files. """ def __init__(self, file_or_filename): try: yaml except NameError: raise EnvironmentError('PyYAML is not installed') else: self.yaml = yaml self.file_or_filename = file_or_filename def _yield_bundle_contents(self, data): """Yield bundle contents from the given dict. Each item yielded will be either a string representing a file path or a bundle.""" contents = data.get('contents', []) if isinstance(contents, six.string_types): contents = contents, for content in contents: if isinstance(content, dict): content = self._get_bundle(content) yield content def _get_bundle(self, data): """Return a bundle initialised by the given dict.""" kwargs = dict( filters=data.get('filters', None), output=data.get('output', None), debug=data.get('debug', None), extra=data.get('extra', {}), config=data.get('config', {}), depends=data.get('depends', None)) return Bundle(*list(self._yield_bundle_contents(data)), **kwargs) def _get_bundles(self, obj, known_bundles=None): """Return a dict that keys bundle names to bundles.""" bundles = {} for key, data in six.iteritems(obj): if data is None: data = {} bundles[key] = self._get_bundle(data) # now we need to recurse through the bundles and get any that # are included in each other. for bundle_name, bundle in bundles.items(): # copy contents contents = list(bundle.contents) for i, item in enumerate(bundle.contents): if item in bundles: contents[i] = bundles[item] elif known_bundles and item in known_bundles: contents[i] = known_bundles[item] # cast back to a tuple contents = tuple(contents) if contents != bundle.contents: bundle.contents = contents return bundles def _open(self): """Returns a (fileobj, filename) tuple. The filename can be False if it is unknown. """ if isinstance(self.file_or_filename, six.string_types): return open(self.file_or_filename), self.file_or_filename file = self.file_or_filename return file, getattr(file, 'name', False) @classmethod def _get_import_resolver(cls): """ method that can be overridden in tests """ from zope.dottedname.resolve import resolve as resolve_dotted return resolve_dotted def load_bundles(self, environment=None): """Load a list of :class:`Bundle` instances defined in the YAML file. Expects the following format: .. code-block:: yaml bundle-name: filters: sass,cssutils output: cache/default.css contents: - css/jquery.ui.calendar.css - css/jquery.ui.slider.css another-bundle: # ... Bundles may reference each other: .. code-block:: yaml js-all: contents: - jquery.js - jquery-ui # This is a bundle reference jquery-ui: contents: jqueryui/*.js If an ``environment`` argument is given, it's bundles may be referenced as well. Note that you may pass any compatibly dict-like object. Finally, you may also use nesting: .. code-block:: yaml js-all: contents: - jquery.js # This is a nested bundle - contents: "*.coffee" filters: coffeescript """ # TODO: Support a "consider paths relative to YAML location, return # as absolute paths" option? f, _ = self._open() try: obj = self.yaml.load(f) or {} return self._get_bundles(obj, environment) finally: f.close() def load_environment(self): """Load an :class:`Environment` instance defined in the YAML file. Expects the following format: .. code-block:: yaml directory: ../static url: /media debug: True updater: timestamp filters: - my_custom_package.my_filter config: compass_bin: /opt/compass another_custom_config_value: foo bundles: # ... All values, including ``directory`` and ``url`` are optional. The syntax for defining bundles is the same as for :meth:`~.YAMLLoader.load_bundles`. Sample usage:: from webassets.loaders import YAMLLoader loader = YAMLLoader('asset.yml') env = loader.load_environment() env['some-bundle'].urls() """ f, filename = self._open() try: obj = self.yaml.load(f) or {} env = Environment() # Load environment settings for setting in ('debug', 'cache', 'versions', 'url_expire', 'auto_build', 'url', 'directory', 'manifest', 'load_path', 'cache_file_mode', # TODO: The deprecated values; remove at some point 'expire', 'updater'): if setting in obj: setattr(env, setting, obj[setting]) # Treat the 'directory' option special, make it relative to the # path of the YAML file, if we know it. if filename and 'directory' in env.config: env.directory = path.normpath( path.join(path.dirname(filename), env.config['directory'])) # Treat the 'filters' option special, it should resolve the # entries as classes and register them to the environment if 'filters' in obj: try: resolve_dotted = self._get_import_resolver() except ImportError: raise EnvironmentError( "In order to use custom filters in the YAMLLoader " "you must install the zope.dottedname package") for filter_class in obj['filters']: try: cls = resolve_dotted(filter_class) except ImportError: raise LoaderError("Unable to resolve class %s" % filter_class) if inspect.isclass(cls): register_filter(cls) else: raise LoaderError("Custom filters must be classes " "not modules or functions") # Load custom config options if 'config' in obj: env.config.update(obj['config']) # Load bundles bundles = self._get_bundles(obj.get('bundles', {})) for name, bundle in six.iteritems(bundles): env.register(name, bundle) return env finally: f.close() class PythonLoader(object): """Basically just a simple helper to import a Python file and retrieve the bundles defined there. """ environment = "environment" def __init__(self, module_name): if isinstance(module_name, types.ModuleType): self.module = module_name else: sys.path.insert(0, '') # Ensure the current directory is on the path try: try: if ":" in module_name: module_name, env = module_name.split(":") self.environment = env self.module = import_module(module_name) except ImportError as e: raise LoaderError(e) finally: sys.path.pop(0) def load_bundles(self): """Load ``Bundle`` objects defined in the Python module. Collects all bundles in the global namespace. """ bundles = {} for name in dir(self.module): value = getattr(self.module, name) if isinstance(value, Bundle): bundles[name] = value return bundles def load_environment(self): """Load an ``Environment`` defined in the Python module. Expects as default a global name ``environment`` to be defined, or overriden by passing a string ``module:environent`` to the constructor. """ try: return getattr(self.module, self.environment) except AttributeError as e: raise LoaderError(e) def recursive_glob(treeroot, pattern): """ From: http://stackoverflow.com/questions/2186525/2186639#2186639 """ results = [] for base, dirs, files in os.walk(treeroot): goodfiles = fnmatch.filter(files, pattern) results.extend(os.path.join(base, f) for f in goodfiles) return results class GlobLoader(object): """Base class with some helpers for loaders which need to search for files. """ def glob_files(self, f, recursive=False): if isinstance(f, tuple): return iter(recursive_glob(f[0], f[1])) else: return iter(glob.glob(f)) def with_file(self, filename, then_run): """Call ``then_run`` with the file contents. """ file = open(filename, 'rb') try: contents = file.read() try: return then_run(filename, contents) except LoaderError: # We can't handle this file. pass finally: file.close()
rhattersley/iris
refs/heads/master
lib/iris/io/__init__.py
4
# (C) British Crown Copyright 2010 - 2016, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """ Provides an interface to manage URI scheme support in iris. """ from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa import six import glob import os.path import types import re import collections import iris.fileformats import iris.cube import iris.exceptions # Saving routines, indexed by file extension. class _SaversDict(dict): """A dictionary that can only have string keys with no overlap.""" def __setitem__(self, key, value): if not isinstance(key, six.string_types): raise ValueError("key is not a string") if key in self: raise ValueError("A saver already exists for", key) for k in self.keys(): if k.endswith(key) or key.endswith(k): raise ValueError("key %s conflicts with existing key %s" % (key, k)) dict.__setitem__(self, key, value) _savers = _SaversDict() def run_callback(callback, cube, field, filename): """ Runs the callback mechanism given the appropriate arguments. Args: * callback: A function to add metadata from the originating field and/or URI which obeys the following rules: 1. Function signature must be: ``(cube, field, filename)``. 2. Modifies the given cube inplace, unless a new cube is returned by the function. 3. If the cube is to be rejected the callback must raise an :class:`iris.exceptions.IgnoreCubeException`. .. note:: It is possible that this function returns None for certain callbacks, the caller of this function should handle this case. """ if callback is None: return cube # Call the callback function on the cube, generally the function will # operate on the cube in place, but it is also possible that the function # will return a completely new cube instance. try: result = callback(cube, field, filename) except iris.exceptions.IgnoreCubeException: result = None else: if result is None: result = cube elif not isinstance(result, iris.cube.Cube): raise TypeError("Callback function returned an " "unhandled data type.") return result def decode_uri(uri, default='file'): r''' Decodes a single URI into scheme and scheme-specific parts. In addition to well-formed URIs, it also supports bare file paths. Both Windows and UNIX style paths are accepted. .. testsetup:: from iris.io import * Examples: >>> from iris.io import decode_uri >>> print(decode_uri('http://www.thing.com:8080/resource?id=a:b')) ('http', '//www.thing.com:8080/resource?id=a:b') >>> print(decode_uri('file:///data/local/dataZoo/...')) ('file', '///data/local/dataZoo/...') >>> print(decode_uri('/data/local/dataZoo/...')) ('file', '/data/local/dataZoo/...') >>> print(decode_uri('file:///C:\data\local\dataZoo\...')) ('file', '///C:\\data\\local\\dataZoo\\...') >>> print(decode_uri('C:\data\local\dataZoo\...')) ('file', 'C:\\data\\local\\dataZoo\\...') >>> print(decode_uri('dataZoo/...')) ('file', 'dataZoo/...') ''' # make sure scheme has at least 2 letters to avoid windows drives # put - last in the brackets so it refers to the character, not a range # reference on valid schemes: http://tools.ietf.org/html/std66#section-3.1 match = re.match(r"^([a-zA-Z][a-zA-Z0-9+.-]+):(.+)", uri) if match: scheme = match.group(1) part = match.group(2) else: # Catch bare UNIX and Windows paths scheme = default part = uri return scheme, part def expand_filespecs(file_specs): """ Find all matching file paths from a list of file-specs. Args: * file_specs (iterable of string): File paths which may contain '~' elements or wildcards. Returns: A list of matching file paths. If any of the file-specs matches no existing files, an exception is raised. """ # Remove any hostname component - currently unused filenames = [os.path.expanduser(fn[2:] if fn.startswith('//') else fn) for fn in file_specs] # Try to expand all filenames as globs glob_expanded = {fn : sorted(glob.glob(fn)) for fn in filenames} # If any of the specs expanded to an empty list then raise an error value_lists = glob_expanded.values() if not all(value_lists): raise IOError("One or more of the files specified did not exist %s." % ["%s expanded to %s" % (pattern, expanded if expanded else "empty") for pattern, expanded in six.iteritems(glob_expanded)]) return sum(value_lists, []) def load_files(filenames, callback, constraints=None): """ Takes a list of filenames which may also be globs, and optionally a constraint set and a callback function, and returns a generator of Cubes from the given files. .. note:: Typically, this function should not be called directly; instead, the intended interface for loading is :func:`iris.load`. """ all_file_paths = expand_filespecs(filenames) # Create default dict mapping iris format handler to its associated filenames handler_map = collections.defaultdict(list) for fn in all_file_paths: with open(fn, 'rb') as fh: handling_format_spec = iris.fileformats.FORMAT_AGENT.get_spec(os.path.basename(fn), fh) handler_map[handling_format_spec].append(fn) # Call each iris format handler with the approriate filenames for handling_format_spec in sorted(handler_map): fnames = handler_map[handling_format_spec] if handling_format_spec.constraint_aware_handler: for cube in handling_format_spec.handler(fnames, callback, constraints): yield cube else: for cube in handling_format_spec.handler(fnames, callback): yield cube def load_http(urls, callback): """ Takes a list of urls and a callback function, and returns a generator of Cubes from the given URLs. .. note:: Typically, this function should not be called directly; instead, the intended interface for loading is :func:`iris.load`. """ # Create default dict mapping iris format handler to its associated filenames handler_map = collections.defaultdict(list) for url in urls: handling_format_spec = iris.fileformats.FORMAT_AGENT.get_spec(url, None) handler_map[handling_format_spec].append(url) # Call each iris format handler with the appropriate filenames for handling_format_spec in sorted(handler_map): fnames = handler_map[handling_format_spec] for cube in handling_format_spec.handler(fnames, callback): yield cube def _dot_save(cube, target): # A simple wrapper for `iris.fileformats.dot.save` which allows the # saver to be registered without triggering the import of # `iris.fileformats.dot`. import iris.fileformats.dot return iris.fileformats.dot.save(cube, target) def _dot_save_png(cube, target, **kwargs): # A simple wrapper for `iris.fileformats.dot.save_png` which allows the # saver to be registered without triggering the import of # `iris.fileformats.dot`. import iris.fileformats.dot return iris.fileformats.dot.save_png(cube, target, **kwargs) def _grib_save(cube, target, append=False, **kwargs): # A simple wrapper for `iris.fileformats.grib.save_grib2` which # allows the saver to be registered without having `gribapi` # installed. try: import gribapi except ImportError: raise RuntimeError('Unable to save GRIB file - the ECMWF ' '`gribapi` package is not installed.') return iris.fileformats.grib.save_grib2(cube, target, append, **kwargs) def _check_init_savers(): # TODO: Raise a ticket to resolve the cyclic import error that requires # us to initialise this on first use. Probably merge io and fileformats. if "pp" not in _savers: _savers.update({"pp": iris.fileformats.pp.save, "nc": iris.fileformats.netcdf.save, "dot": _dot_save, "dotpng": _dot_save_png, "grib2": _grib_save}) def add_saver(file_extension, new_saver): """ Add a custom saver to the Iris session. Args: * file_extension - A string such as "pp" or "my_format". * new_saver - A function of the form ``my_saver(cube, target)``. See also :func:`iris.io.save` """ # Make sure it's a func with 2+ args if not hasattr(new_saver, "__call__") or new_saver.__code__.co_argcount < 2: raise ValueError("Saver routines must be callable with 2+ arguments.") # Try to add this saver. Invalid keys will be rejected. _savers[file_extension] = new_saver def find_saver(filespec): """ Find the saver function appropriate to the given filename or extension. Args: * filespec - A string such as "my_file.pp" or "PP". Returns: A save function or None. Save functions can be passed to :func:`iris.io.save`. """ _check_init_savers() matches = [ext for ext in _savers if filespec.lower().endswith('.' + ext) or filespec.lower() == ext] # Multiple matches could occur if one of the savers included a '.': # e.g. _savers = {'.dot.png': dot_png_saver, '.png': png_saver} if len(matches) > 1: fmt = "Multiple savers found for %r: %s" matches = ', '.join(map(repr, matches)) raise ValueError(fmt % (filespec, matches)) return _savers[matches[0]] if matches else None def save(source, target, saver=None, **kwargs): """ Save one or more Cubes to file (or other writable). Iris currently supports three file formats for saving, which it can recognise by filename extension: * netCDF - the Unidata network Common Data Format: * see :func:`iris.fileformats.netcdf.save` * GRIB2 - the WMO GRIdded Binary data format; * see :func:`iris.fileformats.grib.save_grib2` * PP - the Met Office UM Post Processing Format. * see :func:`iris.fileformats.pp.save` A custom saver can be provided to the function to write to a different file format. Args: * source - A :class:`iris.cube.Cube`, :class:`iris.cube.CubeList` or sequence of cubes. * target - A filename (or writable, depending on file format). When given a filename or file, Iris can determine the file format. Kwargs: * saver - Optional. Specifies the save function to use. If omitted, Iris will attempt to determine the format. This keyword can be used to implement a custom save format. Function form must be: ``my_saver(cube, target)`` plus any custom keywords. It is assumed that a saver will accept an ``append`` keyword if it's file format can handle multiple cubes. See also :func:`iris.io.add_saver`. All other keywords are passed through to the saver function; see the relevant saver documentation for more information on keyword arguments. Examples:: # Save a cube to PP iris.save(my_cube, "myfile.pp") # Save a cube list to a PP file, appending to the contents of the file # if it already exists iris.save(my_cube_list, "myfile.pp", append=True) # Save a cube to netCDF, defaults to NETCDF4 file format iris.save(my_cube, "myfile.nc") # Save a cube list to netCDF, using the NETCDF3_CLASSIC storage option iris.save(my_cube_list, "myfile.nc", netcdf_format="NETCDF3_CLASSIC") .. warning:: Saving a cube whose data has been loaded lazily (if `cube.has_lazy_data()` returns `True`) to the same file it expects to load data from will cause both the data in-memory and the data on disk to be lost. .. code-block:: python cube = iris.load_cube('somefile.nc') # The next line causes data loss in 'somefile.nc' and the cube. iris.save(cube, 'somefile.nc') In general, overwriting a file which is the source for any lazily loaded data can result in corruption. Users should proceed with caution when attempting to overwrite an existing file. """ # Determine format from filename if isinstance(target, six.string_types) and saver is None: saver = find_saver(target) elif hasattr(target, 'name') and saver is None: saver = find_saver(target.name) elif isinstance(saver, six.string_types): saver = find_saver(saver) if saver is None: raise ValueError("Cannot save; no saver") # Single cube? if isinstance(source, iris.cube.Cube): saver(source, target, **kwargs) # CubeList or sequence of cubes? elif (isinstance(source, iris.cube.CubeList) or (isinstance(source, (list, tuple)) and all([isinstance(i, iris.cube.Cube) for i in source]))): # Only allow cubelist saving for those fileformats that are capable. if not 'iris.fileformats.netcdf' in saver.__module__: # Make sure the saver accepts an append keyword if not "append" in saver.__code__.co_varnames: raise ValueError("Cannot append cubes using saver function " "'%s' in '%s'" % (saver.__code__.co_name, saver.__code__.co_filename)) # Force append=True for the tail cubes. Don't modify the incoming # kwargs. kwargs = kwargs.copy() for i, cube in enumerate(source): if i != 0: kwargs['append'] = True saver(cube, target, **kwargs) # Netcdf saver. else: saver(source, target, **kwargs) else: raise ValueError("Cannot save; non Cube found in source")
jaxkodex/odoo
refs/heads/8.0
addons/stock_landed_costs/stock_landed_costs.py
77
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields, osv import openerp.addons.decimal_precision as dp from openerp.exceptions import Warning from openerp.tools import float_compare, float_round from openerp.tools.translate import _ import product class stock_landed_cost(osv.osv): _name = 'stock.landed.cost' _description = 'Stock Landed Cost' _inherit = 'mail.thread' _track = { 'state': { 'stock_landed_costs.mt_stock_landed_cost_open': lambda self, cr, uid, obj, ctx=None: obj['state'] == 'done', }, } def _total_amount(self, cr, uid, ids, name, args, context=None): result = {} for cost in self.browse(cr, uid, ids, context=context): total = 0.0 for line in cost.cost_lines: total += line.price_unit result[cost.id] = total return result def _get_cost_line(self, cr, uid, ids, context=None): cost_to_recompute = [] for line in self.pool.get('stock.landed.cost.lines').browse(cr, uid, ids, context=context): cost_to_recompute.append(line.cost_id.id) return cost_to_recompute def get_valuation_lines(self, cr, uid, ids, picking_ids=None, context=None): picking_obj = self.pool.get('stock.picking') lines = [] if not picking_ids: return lines for picking in picking_obj.browse(cr, uid, picking_ids): for move in picking.move_lines: #it doesn't make sense to make a landed cost for a product that isn't set as being valuated in real time at real cost if move.product_id.valuation != 'real_time' or move.product_id.cost_method != 'real': continue total_cost = 0.0 total_qty = move.product_qty weight = move.product_id and move.product_id.weight * move.product_qty volume = move.product_id and move.product_id.volume * move.product_qty for quant in move.quant_ids: total_cost += quant.cost vals = dict(product_id=move.product_id.id, move_id=move.id, quantity=move.product_uom_qty, former_cost=total_cost * total_qty, weight=weight, volume=volume) lines.append(vals) if not lines: raise osv.except_osv(_('Error!'), _('The selected picking does not contain any move that would be impacted by landed costs. Landed costs are only possible for products configured in real time valuation with real price costing method. Please make sure it is the case, or you selected the correct picking')) return lines _columns = { 'name': fields.char('Name', track_visibility='always', readonly=True, copy=False), 'date': fields.date('Date', required=True, states={'done': [('readonly', True)]}, track_visibility='onchange', copy=False), 'picking_ids': fields.many2many('stock.picking', string='Pickings', states={'done': [('readonly', True)]}, copy=False), 'cost_lines': fields.one2many('stock.landed.cost.lines', 'cost_id', 'Cost Lines', states={'done': [('readonly', True)]}, copy=True), 'valuation_adjustment_lines': fields.one2many('stock.valuation.adjustment.lines', 'cost_id', 'Valuation Adjustments', states={'done': [('readonly', True)]}), 'description': fields.text('Item Description', states={'done': [('readonly', True)]}), 'amount_total': fields.function(_total_amount, type='float', string='Total', digits_compute=dp.get_precision('Account'), store={ 'stock.landed.cost': (lambda self, cr, uid, ids, c={}: ids, ['cost_lines'], 20), 'stock.landed.cost.lines': (_get_cost_line, ['price_unit', 'quantity', 'cost_id'], 20), }, track_visibility='always' ), 'state': fields.selection([('draft', 'Draft'), ('done', 'Posted'), ('cancel', 'Cancelled')], 'State', readonly=True, track_visibility='onchange', copy=False), 'account_move_id': fields.many2one('account.move', 'Journal Entry', readonly=True, copy=False), 'account_journal_id': fields.many2one('account.journal', 'Account Journal', required=True, states={'done': [('readonly', True)]}), } _defaults = { 'name': lambda obj, cr, uid, context: obj.pool.get('ir.sequence').get(cr, uid, 'stock.landed.cost'), 'state': 'draft', 'date': fields.date.context_today, } def _create_accounting_entries(self, cr, uid, line, move_id, qty_out, context=None): product_obj = self.pool.get('product.template') cost_product = line.cost_line_id and line.cost_line_id.product_id if not cost_product: return False accounts = product_obj.get_product_accounts(cr, uid, line.product_id.product_tmpl_id.id, context=context) debit_account_id = accounts['property_stock_valuation_account_id'] already_out_account_id = accounts['stock_account_output'] credit_account_id = line.cost_line_id.account_id.id or cost_product.property_account_expense.id or cost_product.categ_id.property_account_expense_categ.id if not credit_account_id: raise osv.except_osv(_('Error!'), _('Please configure Stock Expense Account for product: %s.') % (cost_product.name)) return self._create_account_move_line(cr, uid, line, move_id, credit_account_id, debit_account_id, qty_out, already_out_account_id, context=context) def _create_account_move_line(self, cr, uid, line, move_id, credit_account_id, debit_account_id, qty_out, already_out_account_id, context=None): """ Generate the account.move.line values to track the landed cost. Afterwards, for the goods that are already out of stock, we should create the out moves """ aml_obj = self.pool.get('account.move.line') base_line = { 'name': line.name, 'move_id': move_id, 'product_id': line.product_id.id, 'quantity': line.quantity, } debit_line = dict(base_line, account_id=debit_account_id) credit_line = dict(base_line, account_id=credit_account_id) diff = line.additional_landed_cost if diff > 0: debit_line['debit'] = diff credit_line['credit'] = diff else: # negative cost, reverse the entry debit_line['credit'] = -diff credit_line['debit'] = -diff aml_obj.create(cr, uid, debit_line, context=context) aml_obj.create(cr, uid, credit_line, context=context) #Create account move lines for quants already out of stock if qty_out > 0: debit_line = dict(debit_line, name=(line.name + ": " + str(qty_out) + _(' already out')), quantity=qty_out, account_id=already_out_account_id) credit_line = dict(credit_line, name=(line.name + ": " + str(qty_out) + _(' already out')), quantity=qty_out, account_id=debit_account_id) diff = diff * qty_out / line.quantity if diff > 0: debit_line['debit'] = diff credit_line['credit'] = diff else: # negative cost, reverse the entry debit_line['credit'] = -diff credit_line['debit'] = -diff aml_obj.create(cr, uid, debit_line, context=context) aml_obj.create(cr, uid, credit_line, context=context) return True def _create_account_move(self, cr, uid, cost, context=None): vals = { 'journal_id': cost.account_journal_id.id, 'period_id': self.pool.get('account.period').find(cr, uid, cost.date, context=context)[0], 'date': cost.date, 'ref': cost.name } return self.pool.get('account.move').create(cr, uid, vals, context=context) def _check_sum(self, cr, uid, landed_cost, context=None): """ Will check if each cost line its valuation lines sum to the correct amount and if the overall total amount is correct also """ costcor = {} tot = 0 for valuation_line in landed_cost.valuation_adjustment_lines: if costcor.get(valuation_line.cost_line_id): costcor[valuation_line.cost_line_id] += valuation_line.additional_landed_cost else: costcor[valuation_line.cost_line_id] = valuation_line.additional_landed_cost tot += valuation_line.additional_landed_cost prec = self.pool['decimal.precision'].precision_get(cr, uid, 'Account') # float_compare returns 0 for equal amounts res = not bool(float_compare(tot, landed_cost.amount_total, precision_digits=prec)) for costl in costcor.keys(): if float_compare(costcor[costl], costl.price_unit, precision_digits=prec): res = False return res def button_validate(self, cr, uid, ids, context=None): quant_obj = self.pool.get('stock.quant') for cost in self.browse(cr, uid, ids, context=context): if cost.state != 'draft': raise Warning(_('Only draft landed costs can be validated')) if not cost.valuation_adjustment_lines or not self._check_sum(cr, uid, cost, context=context): raise osv.except_osv(_('Error!'), _('You cannot validate a landed cost which has no valid valuation lines.')) move_id = self._create_account_move(cr, uid, cost, context=context) quant_dict = {} for line in cost.valuation_adjustment_lines: if not line.move_id: continue per_unit = line.final_cost / line.quantity diff = per_unit - line.former_cost_per_unit quants = [quant for quant in line.move_id.quant_ids] for quant in quants: if quant.id not in quant_dict: quant_dict[quant.id] = quant.cost + diff else: quant_dict[quant.id] += diff for key, value in quant_dict.items(): print value quant_obj.write(cr, uid, key, {'cost': value}, context=context) qty_out = 0 for quant in line.move_id.quant_ids: if quant.location_id.usage != 'internal': qty_out += quant.qty self._create_accounting_entries(cr, uid, line, move_id, qty_out, context=context) self.write(cr, uid, cost.id, {'state': 'done', 'account_move_id': move_id}, context=context) return True def button_cancel(self, cr, uid, ids, context=None): cost = self.browse(cr, uid, ids, context=context) if cost.state == 'done': raise Warning(_('Validated landed costs cannot be cancelled, ' 'but you could create negative landed costs to reverse them')) return cost.write({'state': 'cancel'}) def unlink(self, cr, uid, ids, context=None): # cancel or raise first self.button_cancel(cr, uid, ids, context) return super(stock_landed_cost, self).unlink(cr, uid, ids, context=context) def compute_landed_cost(self, cr, uid, ids, context=None): line_obj = self.pool.get('stock.valuation.adjustment.lines') unlink_ids = line_obj.search(cr, uid, [('cost_id', 'in', ids)], context=context) line_obj.unlink(cr, uid, unlink_ids, context=context) digits = dp.get_precision('Product Price')(cr) towrite_dict = {} for cost in self.browse(cr, uid, ids, context=None): if not cost.picking_ids: continue picking_ids = [p.id for p in cost.picking_ids] total_qty = 0.0 total_cost = 0.0 total_weight = 0.0 total_volume = 0.0 total_line = 0.0 vals = self.get_valuation_lines(cr, uid, [cost.id], picking_ids=picking_ids, context=context) for v in vals: for line in cost.cost_lines: v.update({'cost_id': cost.id, 'cost_line_id': line.id}) self.pool.get('stock.valuation.adjustment.lines').create(cr, uid, v, context=context) total_qty += v.get('quantity', 0.0) total_cost += v.get('former_cost', 0.0) total_weight += v.get('weight', 0.0) total_volume += v.get('volume', 0.0) total_line += 1 for line in cost.cost_lines: value_split = 0.0 for valuation in cost.valuation_adjustment_lines: value = 0.0 if valuation.cost_line_id and valuation.cost_line_id.id == line.id: if line.split_method == 'by_quantity' and total_qty: per_unit = (line.price_unit / total_qty) value = valuation.quantity * per_unit elif line.split_method == 'by_weight' and total_weight: per_unit = (line.price_unit / total_weight) value = valuation.weight * per_unit elif line.split_method == 'by_volume' and total_volume: per_unit = (line.price_unit / total_volume) value = valuation.volume * per_unit elif line.split_method == 'equal': value = (line.price_unit / total_line) elif line.split_method == 'by_current_cost_price' and total_cost: per_unit = (line.price_unit / total_cost) value = valuation.former_cost * per_unit else: value = (line.price_unit / total_line) if digits: value = float_round(value, precision_digits=digits[1], rounding_method='UP') value = min(value, line.price_unit - value_split) value_split += value if valuation.id not in towrite_dict: towrite_dict[valuation.id] = value else: towrite_dict[valuation.id] += value if towrite_dict: for key, value in towrite_dict.items(): line_obj.write(cr, uid, key, {'additional_landed_cost': value}, context=context) return True class stock_landed_cost_lines(osv.osv): _name = 'stock.landed.cost.lines' _description = 'Stock Landed Cost Lines' def onchange_product_id(self, cr, uid, ids, product_id=False, context=None): result = {} if not product_id: return {'value': {'quantity': 0.0, 'price_unit': 0.0}} product = self.pool.get('product.product').browse(cr, uid, product_id, context=context) result['name'] = product.name result['split_method'] = product.split_method result['price_unit'] = product.standard_price result['account_id'] = product.property_account_expense and product.property_account_expense.id or product.categ_id.property_account_expense_categ.id return {'value': result} _columns = { 'name': fields.char('Description'), 'cost_id': fields.many2one('stock.landed.cost', 'Landed Cost', required=True, ondelete='cascade'), 'product_id': fields.many2one('product.product', 'Product', required=True), 'price_unit': fields.float('Cost', required=True, digits_compute=dp.get_precision('Product Price')), 'split_method': fields.selection(product.SPLIT_METHOD, string='Split Method', required=True), 'account_id': fields.many2one('account.account', 'Account', domain=[('type', '<>', 'view'), ('type', '<>', 'closed')]), } class stock_valuation_adjustment_lines(osv.osv): _name = 'stock.valuation.adjustment.lines' _description = 'Stock Valuation Adjustment Lines' def _amount_final(self, cr, uid, ids, name, args, context=None): result = {} for line in self.browse(cr, uid, ids, context=context): result[line.id] = { 'former_cost_per_unit': 0.0, 'final_cost': 0.0, } result[line.id]['former_cost_per_unit'] = (line.former_cost / line.quantity if line.quantity else 1.0) result[line.id]['final_cost'] = (line.former_cost + line.additional_landed_cost) return result def _get_name(self, cr, uid, ids, name, arg, context=None): res = {} for line in self.browse(cr, uid, ids, context=context): res[line.id] = line.product_id.code or line.product_id.name or '' if line.cost_line_id: res[line.id] += ' - ' + line.cost_line_id.name return res _columns = { 'name': fields.function(_get_name, type='char', string='Description', store=True), 'cost_id': fields.many2one('stock.landed.cost', 'Landed Cost', required=True, ondelete='cascade'), 'cost_line_id': fields.many2one('stock.landed.cost.lines', 'Cost Line', readonly=True), 'move_id': fields.many2one('stock.move', 'Stock Move', readonly=True), 'product_id': fields.many2one('product.product', 'Product', required=True), 'quantity': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True), 'weight': fields.float('Weight', digits_compute=dp.get_precision('Product Unit of Measure')), 'volume': fields.float('Volume', digits_compute=dp.get_precision('Product Unit of Measure')), 'former_cost': fields.float('Former Cost', digits_compute=dp.get_precision('Product Price')), 'former_cost_per_unit': fields.function(_amount_final, multi='cost', string='Former Cost(Per Unit)', type='float', digits_compute=dp.get_precision('Account'), store=True), 'additional_landed_cost': fields.float('Additional Landed Cost', digits_compute=dp.get_precision('Product Price')), 'final_cost': fields.function(_amount_final, multi='cost', string='Final Cost', type='float', digits_compute=dp.get_precision('Account'), store=True), } _defaults = { 'quantity': 1.0, 'weight': 1.0, 'volume': 1.0, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Carmezim/tensorflow
refs/heads/master
tensorflow/python/feature_column/feature_column.py
5
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """This API defines FeatureColumn abstraction. FeatureColumns provide a high level abstraction for ingesting and representing features. FeatureColumns are also the primary way of encoding features for canned ${tf.estimator.Estimator}s. When using FeatureColumns with `Estimators`, the type of feature column you should choose depends on (1) the feature type and (2) the model type. (1) Feature type: * Continuous features can be represented by `numeric_column`. * Categorical features can be represented by any `categorical_column_with_*` column: - `categorical_column_with_keys` - `categorical_column_with_vocabulary_file` - `categorical_column_with_hash_bucket` - `categorical_column_with_integerized_feature` (2) Model type: * Deep neural network models (`DNNClassifier`, `DNNRegressor`). Continuous features can be directly fed into deep neural network models. age_column = numeric_column("age") To feed sparse features into DNN models, wrap the column with `embedding_column` or `indicator_column`. `indicator_column` is recommended for features with only a few possible values. For features with many possible values, `embedding_column` is recommended. embedded_dept_column = embedding_column( categorical_column_with_keys("department", ["math", "philosphy", ...]), dimension=10) * Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`). Sparse features can be fed directly into linear models. They behave like an indicator column but with an efficient implementation. dept_column = categorical_column_with_keys("department", ["math", "philosophy", "english"]) It is recommended that continuous features be bucketized before being fed into linear models. bucketized_age_column = bucketized_column( source_column=age_column, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) Sparse features can be crossed (also known as conjuncted or combined) in order to form non-linearities, and then fed into linear models. cross_dept_age_column = crossed_column( columns=[department_column, bucketized_age_column], hash_bucket_size=1000) Example of building canned `Estimator`s using FeatureColumns: # Define features and transformations deep_feature_columns = [age_column, embedded_dept_column] wide_feature_columns = [dept_column, bucketized_age_column, cross_dept_age_column] # Build deep model estimator = DNNClassifier( feature_columns=deep_feature_columns, hidden_units=[500, 250, 50]) estimator.train(...) # Or build a wide model estimator = LinearClassifier( feature_columns=wide_feature_columns) estimator.train(...) # Or build a wide and deep model! estimator = DNNLinearCombinedClassifier( linear_feature_columns=wide_feature_columns, dnn_feature_columns=deep_feature_columns, dnn_hidden_units=[500, 250, 50]) estimator.train(...) FeatureColumns can also be transformed into a generic input layer for custom models using `input_from_feature_columns`. Example of building model using FeatureColumns, this can be used in a `model_fn` which is given to the {tf.estimator.Estimator}: # Building model via layers deep_feature_columns = [age_column, embedded_dept_column] columns_to_tensor = parse_feature_columns_from_examples( serialized=my_data, feature_columns=deep_feature_columns) first_layer = input_from_feature_columns( columns_to_tensors=columns_to_tensor, feature_columns=deep_feature_columns) second_layer = fully_connected(first_layer, ...) NOTE: Functions prefixed with "_" indicate experimental or private parts of the API subject to change, and should not be relied upon! """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import collections import math import numpy as np import six from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor as sparse_tensor_lib from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import check_ops from tensorflow.python.ops import embedding_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import lookup_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import string_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.training import checkpoint_utils from tensorflow.python.util import nest def input_layer(features, feature_columns, weight_collections=None, trainable=True): """Returns a dense `Tensor` as input layer based on given `feature_columns`. Generally a single example in training data is described with FeatureColumns. At the first layer of the model, this column oriented data should be converted to a single `Tensor`. Example: ```python price = numeric_column('price') keywords_embedded = embedding_column( categorical_column_with_hash_bucket("keywords", 10K), dimensions=16) all_feature_columns = [price, keywords_embedded, ...] dense_tensor = input_layer(features, all_feature_columns) for units in [128, 64, 32]: dense_tensor = tf.layers.dense(dense_tensor, units, tf.nn.relu) prediction = tf.layers.dense(dense_tensor, 1) ``` Args: features: A mapping from key to tensors. `FeatureColumn`s look up via these keys. For example `numeric_column('price') will look at 'price' key in this dict. Values can be a `SparseTensor` or a `Tensor` depends on corresponding `FeatureColumn`. feature_columns: An iterable containing all the `FeatureColumn`s. All items should be instances of classes derived from `_DenseColumn` such as `numeric_column`, `embedding_column`, `bucketized_column`, `indicator_column`. If you have categorical features, you can wrap them with with an `embedding_column` or `indicator_column`. weight_collections: A list of collection names to which the Variable will be added. Note that, variables will also be added to collections `tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). Returns: A `Tensor` which represents input layer of a model. Its shape is (batch_size, first_layer_dimension) and its dtype is `float32`. first_layer_dimension is determined based on given `feature_columns`. Raises: ValueError: if an item in `feature_columns` is not a `_DenseColumn`. """ _check_feature_columns(feature_columns) for column in feature_columns: if not isinstance(column, _DenseColumn): raise ValueError( 'Items of feature_columns must be a _DenseColumn. ' 'You can wrap a categorical column with an ' 'embedding_column or indicator_column. Given: {}'.format(column)) weight_collections = list(weight_collections or []) if ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections: weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES) if ops.GraphKeys.MODEL_VARIABLES not in weight_collections: weight_collections.append(ops.GraphKeys.MODEL_VARIABLES) with variable_scope.variable_scope( None, default_name='input_layer', values=features.values()): builder = _LazyBuilder(features) output_tensors = [] ordered_columns = [] for column in sorted(feature_columns, key=lambda x: x.name): ordered_columns.append(column) with variable_scope.variable_scope(None, default_name=column.name): tensor = column._get_dense_tensor( # pylint: disable=protected-access builder, weight_collections=weight_collections, trainable=trainable) num_elements = column._variable_shape.num_elements() # pylint: disable=protected-access batch_size = array_ops.shape(tensor)[0] tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements)) output_tensors.append(tensor) _verify_static_batch_size_equality(output_tensors, ordered_columns) return array_ops.concat(output_tensors, 1) def linear_model(features, feature_columns, units=1, sparse_combiner='sum', weight_collections=None, trainable=True): """Returns a linear prediction `Tensor` based on given `feature_columns`. This function generates a weighted sum based on output dimension `units`. Weighted sum refers to logits in classification problems. It refers to the prediction itself for linear regression problems. Note on supported columns: `linear_model` treats categorical columns as `indicator_column`s while `input_layer` explicitly requires wrapping each of them with an `embedding_column` or an `indicator_column`. Example: ```python price = numeric_column('price') price_buckets = bucketized_column(price, boundaries=[0., 10., 100., 1000.]) keywords = categorical_column_with_hash_bucket("keywords", 10K) all_feature_columns = [price_buckets, keywords, ...] prediction = linear_model(features, all_feature_columns) ``` Args: features: A mapping from key to tensors. `FeatureColumn`s look up via these keys. For example `numeric_column('price')` will look at 'price' key in this dict. Values are `Tensor` or `SparseTensor` depending on corresponding `FeatureColumn`. feature_columns: An iterable containing all the FeatureColumns. All items should be instances of classes derived from FeatureColumn. units: An integer, dimensionality of the output space. Default value is 1. sparse_combiner: A string specifying how to reduce if a sparse column is multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum" the default. "sqrtn" often achieves good accuracy, in particular with bag-of-words columns. It combines each sparse columns independently. * "sum": do not normalize features in the column * "mean": do l1 normalization on features in the column * "sqrtn": do l2 normalization on features in the column weight_collections: A list of collection names to which the Variable will be added. Note that, variables will also be added to collections `tf.GraphKeys.GLOBAL_VARIABLES` and `ops.GraphKeys.MODEL_VARIABLES`. trainable: If `True` also add the variable to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see `tf.Variable`). Returns: A `Tensor` which represents predictions/logits of a linear model. Its shape is (batch_size, units) and its dtype is `float32`. Raises: ValueError: if an item in `feature_columns` is neither a `_DenseColumn` nor `_CategoricalColumn`. """ _check_feature_columns(feature_columns) for column in feature_columns: if not isinstance(column, (_DenseColumn, _CategoricalColumn)): raise ValueError('Items of feature_columns must be either a _DenseColumn ' 'or _CategoricalColumn. Given: {}'.format(column)) weight_collections = list(weight_collections or []) if ops.GraphKeys.GLOBAL_VARIABLES not in weight_collections: weight_collections.append(ops.GraphKeys.GLOBAL_VARIABLES) if ops.GraphKeys.MODEL_VARIABLES not in weight_collections: weight_collections.append(ops.GraphKeys.MODEL_VARIABLES) with variable_scope.variable_scope( None, default_name='linear_model', values=features.values()): weighted_sums = [] ordered_columns = [] builder = _LazyBuilder(features) for column in sorted(feature_columns, key=lambda x: x.name): with variable_scope.variable_scope(None, default_name=column.name): ordered_columns.append(column) if isinstance(column, _CategoricalColumn): weighted_sums.append(_create_categorical_column_weighted_sum( column, builder, units, sparse_combiner, weight_collections, trainable)) else: weighted_sums.append(_create_dense_column_weighted_sum( column, builder, units, weight_collections, trainable)) _verify_static_batch_size_equality(weighted_sums, ordered_columns) predictions_no_bias = math_ops.add_n( weighted_sums, name='weighted_sum_no_bias') bias = variable_scope.get_variable( 'bias_weights', shape=[units], initializer=init_ops.zeros_initializer(), trainable=trainable, collections=weight_collections) predictions = nn_ops.bias_add( predictions_no_bias, bias, name='weighted_sum') return predictions def _transform_features(features, feature_columns): """Returns transformed features based on features columns passed in. Please note that most probably you would not need to use this function. Please check `input_layer` and `linear_model` to see whether they will satisfy your use case or not. Example: ```python # Define features and transformations crosses_a_x_b = crossed_column( columns=["sparse_feature_a", "sparse_feature_b"], hash_bucket_size=10000) price_buckets = bucketized_column( source_column=numeric_column("price"), boundaries=[...]) columns = [crosses_a_x_b, price_buckets] features = tf.parse_example(..., features=make_parse_example_spec(columns)) transformed = transform_features(features=features, feature_columns=columns) assertCountEqual(columns, transformed.keys()) ``` Args: features: A mapping from key to tensors. `FeatureColumn`s look up via these keys. For example `numeric_column('price') will look at 'price' key in this dict. Values can be a `SparseTensor` or a `Tensor` depends on corresponding `FeatureColumn`. feature_columns: An iterable containing all the `FeatureColumn`s. Returns: A `dict` mapping FeatureColumn to `Tensor` and `SparseTensor` values. """ _check_feature_columns(feature_columns) outputs = {} with ops.name_scope( None, default_name='transform_features', values=features.values()): builder = _LazyBuilder(features) for column in sorted(feature_columns, key=lambda x: x.name): with ops.name_scope(None, default_name=column.name): outputs[column] = builder.get(column) return outputs def make_parse_example_spec(feature_columns): """Creates parsing spec dictionary from input feature_columns. The returned dictionary can be used as arg 'features' in `tf.parse_example`. Typical usage example: ```python # Define features and transformations feature_a = categorical_column_with_vocabulary_file(...) feature_b = numeric_column(...) feature_c_bucketized = bucketized_column(numeric_column("feature_c"), ...) feature_a_x_feature_c = crossed_column( columns=[feature_a, feature_c_bucketized], ...) feature_columns = set( [feature_b, feature_c_bucketized, feature_a_x_feature_c]) features = tf.parse_example( serialized=serialized_examples, features=make_parse_example_spec(feature_columns)) ``` For the above example, make_parse_example_spec would return the dict: { "feature_a": parsing_ops.VarLenFeature(tf.string), "feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32), "feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32) } Args: feature_columns: An iterable containing all feature columns. All items should be instances of classes derived from `_FeatureColumn`. Returns: A dict mapping each feature key to a `FixedLenFeature` or `VarLenFeature` value. Raises: ValueError: If any of the given `feature_columns` is not a `_FeatureColumn` instance. """ result = {} for column in feature_columns: if not isinstance(column, _FeatureColumn): raise ValueError( 'All feature_columns must be _FeatureColumn instances. ' 'Given: {}'.format(column)) config = column._parse_example_spec # pylint: disable=protected-access for key, value in six.iteritems(config): if key in result and value != result[key]: raise ValueError( 'feature_columns contain different parse_spec for key ' '{}. Given {} and {}'.format(key, value, result[key])) result.update(config) return result def embedding_column( categorical_column, dimension, combiner='mean', initializer=None, ckpt_to_load_from=None, tensor_name_in_ckpt=None, max_norm=None, trainable=True): """`_DenseColumn` that converts from sparse, categorical input. Use this when your inputs are sparse, but you want to convert them to a dense representation (e.g., to feed to a DNN). Inputs must be `SparseTensor` by way of the provided `categorical_column._get_sparse_tensors`. Any of the `categorical_column_*` columns can be provided as input. Here is an example embedding of an identity column for a DNN model: ```python video_id = categorical_column_with_identity( key='video_id', num_buckets=1000000, default_value=0) columns = [embedding_column(video_id, 9),...] features = tf.parse_example(..., features=parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: categorical_column: A `_CategoricalColumn` created by a `categorical_column_with_*` function. This column produces the sparse IDs that are inputs to the embedding lookup. dimension: An integer specifying dimension of the embedding, must be > 0. combiner: A string specifying how to reduce if there are multiple entries in a single row. Currently 'mean', 'sqrtn' and 'sum' are supported, with 'mean' the default. 'sqrtn' often achieves good accuracy, in particular with bag-of-words columns. Each of this can be thought as example level normalizations on the column. For more information, see `tf.embedding_lookup_sparse`. initializer: A variable initializer function to be used in embedding variable initialization. If not specified, defaults to `tf.truncated_normal_initializer` with mean `0.0` and standard deviation `1/sqrt(categorical_column._num_buckets)`. ckpt_to_load_from: String representing checkpoint name/pattern fromwhich to restore column weights. Required if `tensor_name_in_ckpt` is not `None`. tensor_name_in_ckpt: Name of the `Tensor` in `ckpt_to_load_from` from which to restore the column weights. Required if `ckpt_to_load_from` is not `None`. max_norm: If not `None`, embedding values are l2-normalized to this value. trainable: Whether or not the embedding is trainable. Default is True. Returns: `_DenseColumn` that converts from sparse input. Raises: ValueError: if `dimension` not > 0. ValueError: if exactly one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified. ValueError: if `initializer` is specified and is not callable. """ if (dimension is None) or (dimension < 1): raise ValueError('Invalid dimension {}.'.format(dimension)) if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None): raise ValueError('Must specify both `ckpt_to_load_from` and ' '`tensor_name_in_ckpt` or none of them.') if (initializer is not None) and (not callable(initializer)): raise ValueError('initializer must be callable if specified. ' 'Embedding of column_name: {}'.format( categorical_column.name)) if initializer is None: initializer = init_ops.truncated_normal_initializer( mean=0.0, stddev=1 / math.sqrt(dimension)) return _EmbeddingColumn( categorical_column=categorical_column, dimension=dimension, combiner=combiner, initializer=initializer, ckpt_to_load_from=ckpt_to_load_from, tensor_name_in_ckpt=tensor_name_in_ckpt, max_norm=max_norm, trainable=trainable) def numeric_column(key, shape=(1,), default_value=None, dtype=dtypes.float32, normalizer_fn=None): """Represents real valued or numerical features. Example: ```python price = numeric_column('price') columns = [price, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) # or bucketized_price = bucketized_column(price, boundaries=[...]) columns = [bucketized_price, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. shape: An iterable of integers specifies the shape of the `Tensor`. An integer can be given which means a single dimension `Tensor` with given width. The `Tensor` representing the column will have the shape of [batch_size] + `shape`. default_value: A single value compatible with `dtype` or an iterable of values compatible with `dtype` which the column takes on during `tf.Example` parsing if data is missing. A default value of `None` will cause `tf.parse_example` to fail if an example does not contain this column. If a single value is provided, the same value will be applied as the default value for every item. If an iterable of values is provided, the shape of the `default_value` should be equal to the given `shape`. dtype: defines the type of values. Default value is `tf.float32`. Must be a non-quantized, real integer or floating point type. normalizer_fn: If not `None`, a function that can be used to normalize the value of the tensor after `default_value` is applied for parsing. Normalizer function takes the input `Tensor` as its argument, and returns the output `Tensor`. (e.g. lambda x: (x - 3.0) / 4.2). Please note that even though most common use case of this function is normalization, it can be used for any kind of Tensorflow transformations. Returns: A _NumericColumn. Raises: TypeError: if any dimension in shape is not an int ValueError: if any dimension in shape is not a positive integer TypeError: if `default_value` is an iterable but not compatible with `shape` TypeError: if `default_value` is not compatible with `dtype`. ValueError: if `dtype` is not convertible to `tf.float32`. """ shape = _check_shape(shape, key) if not (dtype.is_integer or dtype.is_floating): raise ValueError('dtype must be convertible to float. ' 'dtype: {}, key: {}'.format(dtype, key)) default_value = _check_default_value(shape, default_value, dtype, key) if normalizer_fn is not None and not callable(normalizer_fn): raise TypeError( 'normalizer_fn must be a callable. Given: {}'.format(normalizer_fn)) return _NumericColumn( key, shape=shape, default_value=default_value, dtype=dtype, normalizer_fn=normalizer_fn) def bucketized_column(source_column, boundaries): """Represents discretized dense input. Buckets include the left boundary, and exclude the right boundary. Namely, `boundaries=[0., 1., 2.]` generates buckets `(-inf, 0.)`, `[0., 1.)`, `[1., 2.)`, and `[2., +inf)`. For example, if the inputs are `boundaries` = [0, 10, 100] input tensor = [[-5, 10000] [150, 10] [5, 100]] then the output will be output = [[0, 3] [3, 2] [1, 3]] Example: ```python price = numeric_column('price') bucketized_price = bucketized_column(price, boundaries=[...]) columns = [bucketized_price, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) # or columns = [bucketized_price, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` `bucketized_column` can also be crossed with another categorical column using `crossed_column`: ```python price = numeric_column('price') # bucketized_column converts numerical feature to a categorical one. bucketized_price = bucketized_column(price, boundaries=[...]) # 'keywords' is a string feature. price_x_keywords = crossed_column([bucketized_price, 'keywords'], 50K) all_feature_columns = [price_x_keywords, ...] linear_prediction = linear_model(features, all_feature_columns) ``` Args: source_column: A one-dimensional dense column which is generated with `numeric_column`. boundaries: A sorted list or tuple of floats specifying the boundaries. Returns: A `_BucketizedColumn`. Raises: ValueError: If `source_column` is not a numeric column, or if it is not one-dimensional. ValueError: If `boundaries` is not a sorted list or tuple. """ if not isinstance(source_column, _NumericColumn): raise ValueError( 'source_column must be a column generated with numeric_column(). ' 'Given: {}'.format(source_column)) if len(source_column.shape) > 1: raise ValueError( 'source_column must be one-dimensional column. ' 'Given: {}'.format(source_column)) if (not boundaries or not (isinstance(boundaries, list) or isinstance(boundaries, tuple))): raise ValueError('boundaries must be a sorted list.') for i in range(len(boundaries) - 1): if boundaries[i] >= boundaries[i + 1]: raise ValueError('boundaries must be a sorted list.') return _BucketizedColumn(source_column, tuple(boundaries)) def _assert_string_or_int(dtype, prefix): if (dtype != dtypes.string) and (not dtype.is_integer): raise ValueError( '{} dtype must be string or integer. dtype: {}.'.format(prefix, dtype)) def categorical_column_with_hash_bucket(key, hash_bucket_size, dtype=dtypes.string): """Represents sparse feature where ids are set by hashing. Use this when your sparse features are in string or integer format, and you want to distribute your inputs into a finite number of buckets by hashing. output_id = Hash(input_feature_string) % bucket_size `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string. Note that these values are independent of the `default_value` argument. Example: ```python keywords = categorical_column_with_hash_bucket("keywords", 10K) columns = [keywords, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) # or keywords_embedded = embedding_column(keywords, 16) columns = [keywords_embedded, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. hash_bucket_size: An int > 1. The number of buckets. dtype: The type of features. Only string and integer types are supported. Returns: A `_HashedCategoricalColumn`. Raises: ValueError: `hash_bucket_size` is not greater than 1. ValueError: `dtype` is neither string nor integer. """ if hash_bucket_size is None: raise ValueError('hash_bucket_size must be set. ' 'key: {}'.format(key)) if hash_bucket_size < 1: raise ValueError('hash_bucket_size must be at least 1. ' 'hash_bucket_size: {}, key: {}'.format( hash_bucket_size, key)) _assert_string_or_int(dtype, prefix='column_name: {}'.format(key)) return _HashedCategoricalColumn(key, hash_bucket_size, dtype) def categorical_column_with_vocabulary_file( key, vocabulary_file, vocabulary_size, num_oov_buckets=0, default_value=None, dtype=dtypes.string): """A `_CategoricalColumn` with a vocabulary file. Use this when your inputs are in string or integer format, and you have a vocabulary file that maps each value to an integer ID. By default, out-of-vocabulary values are ignored. Use either (but not both) of `num_oov_buckets` and `default_value` to specify how to include out-of-vocabulary values. `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string. Note that these values are independent of the `default_value` argument. Example with `num_oov_buckets`: File '/us/states.txt' contains 50 lines, each with a 2-character U.S. state abbreviation. All inputs with values in that file are assigned an ID 0-49, corresponding to its line number. All other values are hashed and assigned an ID 50-54. ```python states = categorical_column_with_vocabulary_file( key='states', vocabulary_file='/us/states.txt', vocabulary_size=50, num_oov_buckets=5) columns = [states, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction = linear_model(features, columns) ``` Example with `default_value`: File '/us/states.txt' contains 51 lines - the first line is 'XX', and the other 50 each have a 2-character U.S. state abbreviation. Both a literal 'XX' in input, and other values missing from the file, will be assigned ID 0. All others are assigned the corresponding line number 1-50. ```python states = categorical_column_with_vocabulary_file( key='states', vocabulary_file='/us/states.txt', vocabulary_size=51, default_value=0) columns = [states, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) And to make an embedding with either: ```python columns = [embedding_column(states, 3),...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. vocabulary_file: The vocabulary file name. vocabulary_size: Number of the elements in the vocabulary. This must be no greater than length of `vocabulary_file`, if less than length, later values are ignored. num_oov_buckets: Non-negative integer, the number of out-of-vocabulary buckets. All out-of-vocabulary inputs will be assigned IDs in the range `[vocabulary_size, vocabulary_size+num_oov_buckets)` based on a hash of the input value. A positive `num_oov_buckets` can not be specified with `default_value`. default_value: The integer ID value to return for out-of-vocabulary feature values, defaults to -1. This can not be specified with a positive `num_oov_buckets`. dtype: The type of features. Only string and integer types are supported. Returns: A `_CategoricalColumn` with a vocabulary file. Raises: ValueError: `vocabulary_file` is missing. ValueError: `vocabulary_size` is missing or < 1. ValueError: `num_oov_buckets` is not a non-negative integer. ValueError: `dtype` is neither string nor integer. """ if not vocabulary_file: raise ValueError('Missing vocabulary_file in {}.'.format(key)) # `vocabulary_size` isn't required for lookup, but it is for `_num_buckets`. if (vocabulary_size is None) or (vocabulary_size < 1): raise ValueError('Invalid vocabulary_size in {}.'.format(key)) if num_oov_buckets: if default_value is not None: raise ValueError( 'Can\'t specify both num_oov_buckets and default_value in {}.'.format( key)) if num_oov_buckets < 0: raise ValueError('Invalid num_oov_buckets {} in {}.'.format( num_oov_buckets, key)) _assert_string_or_int(dtype, prefix='column_name: {}'.format(key)) return _VocabularyFileCategoricalColumn( key=key, vocabulary_file=vocabulary_file, vocabulary_size=vocabulary_size, num_oov_buckets=0 if num_oov_buckets is None else num_oov_buckets, default_value=-1 if default_value is None else default_value, dtype=dtype) def categorical_column_with_vocabulary_list( key, vocabulary_list, dtype=None, default_value=-1): """A `_CategoricalColumn` with in-memory vocabulary. Logic for feature f is: id = f in vocabulary_list ? vocabulary_list.index_of(f) : default_value Use this when your inputs are in string or integer format, and you have an in-memory vocabulary mapping each value to an integer ID. By default, out-of-vocabulary values are ignored. Use `default_value` to specify how to include out-of-vocabulary values. `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string. Note that these values are independent of the `default_value` argument. In the following examples, each input in `vocabulary_list` is assigned an ID 0-4 corresponding to its index (e.g., input 'B' produces output 2). All other inputs are assigned `default_value` 0. Linear model: ```python colors = categorical_column_with_vocabulary_list( key='colors', vocabulary_list=('X', 'R', 'G', 'B', 'Y'), default_value=0) columns = [colors, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` Embedding for a DNN model: ```python columns = [embedding_column(colors, 3),...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. vocabulary_list: An ordered iterable defining the vocabulary. Each feature is mapped to the index of its value (if present) in `vocabulary_list`. Must be castable to `dtype`. dtype: The type of features. Only string and integer types are supported. If `None`, it will be inferred from `vocabulary_list`. default_value: The value to use for values not in `vocabulary_list`. Returns: A `_CategoricalColumn` with in-memory vocabulary. Raises: ValueError: if `vocabulary_list` is empty, or contains duplicate keys. ValueError: if `dtype` is not integer or string. """ if (vocabulary_list is None) or (len(vocabulary_list) < 1): raise ValueError( 'vocabulary_list {} must be non-empty, column_name: {}'.format( vocabulary_list, key)) if len(set(vocabulary_list)) != len(vocabulary_list): raise ValueError( 'Duplicate keys in vocabulary_list {}, column_name: {}'.format( vocabulary_list, key)) vocabulary_dtype = dtypes.as_dtype(np.array(vocabulary_list).dtype) _assert_string_or_int( vocabulary_dtype, prefix='column_name: {} vocabulary'.format(key)) if dtype is None: dtype = vocabulary_dtype elif dtype.is_integer != vocabulary_dtype.is_integer: raise ValueError( 'dtype {} and vocabulary dtype {} do not match, column_name: {}'.format( dtype, vocabulary_dtype, key)) _assert_string_or_int(dtype, prefix='column_name: {}'.format(key)) return _VocabularyListCategoricalColumn( key=key, vocabulary_list=tuple(vocabulary_list), dtype=dtype, default_value=default_value) def categorical_column_with_identity(key, num_buckets, default_value=None): """A `_CategoricalColumn` that returns identity values. Use this when your inputs are integers in the range `[0, num_buckets)`, and you want to use the input value itself as the categorical ID. Values outside this range will result in `default_value` if specified, otherwise it will fail. Typically, this is used for contiguous ranges of integer indexes, but it doesn't have to be. This might be inefficient, however, if many of IDs are unused. Consider `categorical_column_with_hash_bucket` in that case. `features[key]` is either `Tensor` or `SparseTensor`. If `Tensor`, missing values can be represented by `-1` for int and `''` for string. Note that these values are independent of the `default_value` argument. In the following examples, each input in the range `[0, 1000000)` is assigned the same value. All other inputs are assigned `default_value` 0. Note that a literal 0 in inputs will result in the same default ID. Linear model: ```python video_id = categorical_column_with_identity( key='video_id', num_buckets=1000000, default_value=0) columns = [video_id, ...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` Embedding for a DNN model: ```python columns = [embedding_column(video_id, 9),...] features = tf.parse_example(..., features=make_parse_example_spec(columns)) dense_tensor = input_layer(features, columns) ``` Args: key: A unique string identifying the input feature. It is used as the column name and the dictionary key for feature parsing configs, feature `Tensor` objects, and feature columns. num_buckets: Range of inputs and outputs is `[0, num_buckets)`. default_value: If `None`, this column's graph operations will fail for out-of-range inputs. Otherwise, this value must be in the range `[0, num_buckets)`, and will replace inputs in that range. Returns: A `_CategoricalColumn` that returns identity values. Raises: ValueError: if `num_buckets` is less than one. ValueError: if `default_value` is not in range `[0, num_buckets)`. """ if num_buckets < 1: raise ValueError( 'num_buckets {} < 1, column_name {}'.format(num_buckets, key)) if (default_value is not None) and ( (default_value < 0) or (default_value >= num_buckets)): raise ValueError( 'default_value {} not in range [0, {}), column_name {}'.format( default_value, num_buckets, key)) return _IdentityCategoricalColumn( key=key, num_buckets=num_buckets, default_value=default_value) def indicator_column(categorical_column): """Represents multi-hot representation of given categorical column. Used to wrap any `categorical_column_*`. ```python name = indicator_column(categorical_column_with_vocabulary_list('name', ['bob', 'george', 'wanda']) all_feature_columns = [name, ...] dense_tensor = input_layer(features, all_feature_columns) dense_tensor == [[1, 0, 0]] # If "name" bytes_list is ["bob"] dense_tensor == [[1, 0, 1]] # If "name" bytes_list is ["bob", "wanda"] dense_tensor == [[2, 0, 0]] # If "name" bytes_list is ["bob", "bob"] ``` Args: categorical_column: A `_CategoricalColumn` which is created by `categorical_column_with_*` or crossed_column functions. Returns: An `_IndicatorColumn`. """ return _IndicatorColumn(categorical_column) def weighted_categorical_column( categorical_column, weight_feature_key, dtype=dtypes.float32): """Applies weight values to a `_CategoricalColumn`. Use this when each of your sparse inputs has both an ID and a value. For example, if you're representing text documents as a collection of word frequencies, you can provide 2 parallel sparse input features ('terms' and 'frequencies' below). Example: Input `tf.Example` objects: [ features { feature { key: "terms" value {bytes_list {value: "very" value: "model"}} } feature { key: "frequencies" value {float_list {value: 0.3 value: 0.1}} } }, features { feature { key: "terms" value {bytes_list {value: "when" value: "course" value: "human"}} } feature { key: "frequencies" value {float_list {value: 0.4 value: 0.1 value: 0.2}} } } ] ```python categorical_column = categorical_column_with_hash_bucket( column_name='terms', hash_bucket_size=1000) weighted_column = weighted_categorical_column( categorical_column=categorical_column, weight_feature_key='frequencies') columns = [weighted_column, ...] features = tf.parse_example(..., features=parse_example_spec(columns)) linear_prediction, _, _ = linear_model(features, columns) ``` This assumes the input dictionary contains a `SparseTensor` for key 'terms', and a `SparseTensor` for key 'frequencies'. These 2 tensors must have the same indices and dense shape. Args: categorical_column: A `_CategoricalColumn` created by `categorical_column_with_*` functions. weight_feature_key: String key for weight values. dtype: Type of weights, such as `tf.float32`. Only float and integer weights are supported. Returns: A `_CategoricalColumn` composed of two sparse features: one represents id, the other represents weight (value) of the id feature in that example. Raises: ValueError: if `dtype` is not convertible to float. """ if (dtype is None) or not (dtype.is_integer or dtype.is_floating): raise ValueError('dtype {} is not convertible to float.'.format(dtype)) return _WeightedCategoricalColumn( categorical_column=categorical_column, weight_feature_key=weight_feature_key, dtype=dtype) def crossed_column(keys, hash_bucket_size, hash_key=None): """Returns a column for performing crosses of categorical features. Crossed features will be hashed according to `hash_bucket_size`. Conceptually, the transformation can be thought of as: Hash(cartesian product of features) % `hash_bucket_size` For example, if the input features are: * SparseTensor referred by first key: shape = [2, 2] [0, 0]: "a" [1, 0]: "b" [1, 1]: "c" * SparseTensor referred by second key: shape = [2, 1] [0, 0]: "d" [1, 0]: "e" then crossed feature will look like: shape = [2, 2] [0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size [1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size [1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size Here is an example to create a linear model with crosses of string features: ```python keywords_x_doc_terms = crossed_column(['keywords', 'doc_terms'], 50K) all_feature_columns = [keywords_x_doc_terms, ...] linear_prediction = linear_model(features, all_feature_columns) ``` You could also use vocabulary lookup before crossing: ```python keywords = categorical_column_with_vocabulary_file( 'keywords', '/path/to/vocabulary/file', vocabulary_size=1K) keywords_x_doc_terms = crossed_column([keywords, 'doc_terms'], 50K) all_feature_columns = [keywords_x_doc_terms, ...] linear_prediction = linear_model(features, all_feature_columns) ``` If an input feature is of numeric type, you can use `categorical_column_with_identity`, or `bucketized_column`, as in the example: ```python # vertical_id is an integer categorical feature. vertical_id = categorical_column_with_identity('vertical_id', 10K) price = numeric_column('price') # bucketized_column converts numerical feature to a categorical one. bucketized_price = bucketized_column(price, boundaries=[...]) vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K) all_feature_columns = [vertical_id_x_price, ...] linear_prediction = linear_model(features, all_feature_columns) ``` To use crossed column in DNN model, you need to add it in an embedding column as in this example: ```python vertical_id_x_price = crossed_column([vertical_id, bucketized_price], 50K) vertical_id_x_price_embedded = embedding_column(vertical_id_x_price, 10) dense_tensor = input_layer(features, [vertical_id_x_price_embedded, ...]) ``` Args: keys: An iterable identifying the features to be crossed. Each element can be either: * string: Will use the corresponding feature which must be of string type. * `_CategoricalColumn`: Will use the transformed tensor produced by this column. Does not support hashed categorical column. hash_bucket_size: An int > 1. The number of buckets. hash_key: Specify the hash_key that will be used by the `FingerprintCat64` function to combine the crosses fingerprints on SparseCrossOp (optional). Returns: A `_CrossedColumn`. Raises: ValueError: If `len(keys) < 2`. ValueError: If any of the keys is neither a string nor `_CategoricalColumn`. ValueError: If any of the keys is `_HashedCategoricalColumn`. ValueError: If `hash_bucket_size < 1`. """ if not hash_bucket_size or hash_bucket_size < 1: raise ValueError('hash_bucket_size must be > 1. ' 'hash_bucket_size: {}'.format(hash_bucket_size)) if not keys or len(keys) < 2: raise ValueError( 'keys must be a list with length > 1. Given: {}'.format(keys)) for key in keys: if (not isinstance(key, six.string_types) and not isinstance(key, _CategoricalColumn)): raise ValueError( 'Unsupported key type. All keys must be either string, or ' 'categorical column except _HashedCategoricalColumn. ' 'Given: {}'.format(key)) if isinstance(key, _HashedCategoricalColumn): raise ValueError( '_HashedCategoricalColumn is not supported. Instead, use the feature ' 'name as a string. Given: {}'.format(key)) return _CrossedColumn( keys=tuple(keys), hash_bucket_size=hash_bucket_size, hash_key=hash_key) class _FeatureColumn(object): """Represents a feature column abstraction. WARNING: Do not subclass this layer unless you know what you are doing: the API is subject to future changes. To distinguish the concept of a feature family and a specific binary feature within a family, we refer to a feature family like "country" as a feature column. Following is an example feature in a `tf.Example` format: {key: "country", value: [ "US" ]} In this example the value of feature is "US" and "country" refers to the column of the feature. This class is an abstract class. User should not create instances of this. """ __metaclass__ = abc.ABCMeta @abc.abstractproperty def name(self): """Returns string. used for variable_scope and naming.""" pass @abc.abstractmethod def _transform_feature(self, inputs): """Returns intermediate representation (usually a `Tensor`). Uses `inputs` to create an intermediate representation (usually a `Tensor`) that other feature columns can use. Example usage of `inputs`: Let's say a Feature column depends on raw feature ('raw') and another `_FeatureColumn` (input_fc). To access corresponding `Tensor`s, inputs will be used as follows: ```python raw_tensor = inputs.get('raw') fc_tensor = inputs.get(input_fc) ``` Args: inputs: A `_LazyBuilder` object to access inputs. Returns: Transformed feature `Tensor`. """ pass @abc.abstractproperty def _parse_example_spec(self): """Returns a `tf.Example` parsing spec as dict. It is used for get_parsing_spec for `tf.parse_example`. Returned spec is a dict from keys ('string') to `VarLenFeature`, `FixedLenFeature`, and other supported objects. Please check documentation of ${tf.parse_example} for all supported spec objects. Let's say a Feature column depends on raw feature ('raw') and another `_FeatureColumn` (input_fc). One possible implementation of _parse_example_spec is as follows: ```python spec = {'raw': tf.FixedLenFeature(...)} spec.update(input_fc._parse_example_spec) return spec ``` """ pass class _DenseColumn(_FeatureColumn): """Represents a column which can be represented as `Tensor`. WARNING: Do not subclass this layer unless you know what you are doing: the API is subject to future changes. Some examples of this type are: numeric_column, embedding_column, indicator_column. """ __metaclass__ = abc.ABCMeta @abc.abstractproperty def _variable_shape(self): """`TensorShape` of `_get_dense_tensor`, without batch dimension.""" pass @abc.abstractmethod def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): """Returns a `Tensor`. The output of this function will be used by model-builder-functions. For example the pseudo code of `input_layer` will be like: ```python def input_layer(features, feature_columns, ...): outputs = [fc._get_dense_tensor(...) for fc in feature_columns] return tf.concat(outputs) ``` Args: inputs: A `_LazyBuilder` object to access inputs. weight_collections: List of graph collections to which Variables (if any will be created) are added. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see ${tf.Variable}). Returns: `Tensor` of shape [batch_size] + `_variable_shape`. """ pass def _create_dense_column_weighted_sum( column, builder, units, weight_collections, trainable): """Create a weighted sum of a dense column for linear_model.""" tensor = column._get_dense_tensor( # pylint: disable=protected-access builder, weight_collections=weight_collections, trainable=trainable) num_elements = column._variable_shape.num_elements() # pylint: disable=protected-access batch_size = array_ops.shape(tensor)[0] tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements)) weight = variable_scope.get_variable( name='weights', shape=[num_elements, units], initializer=init_ops.zeros_initializer(), trainable=trainable, collections=weight_collections) return math_ops.matmul(tensor, weight, name='weighted_sum') class _CategoricalColumn(_FeatureColumn): """Represents a categorical feature. WARNING: Do not subclass this layer unless you know what you are doing: the API is subject to future changes. A categorical feature typically handled with a ${tf.SparseTensor} of IDs. """ __metaclass__ = abc.ABCMeta IdWeightPair = collections.namedtuple( # pylint: disable=invalid-name 'IdWeightPair', ['id_tensor', 'weight_tensor']) @abc.abstractproperty def _num_buckets(self): """Returns number of buckets in this sparse feature.""" pass @abc.abstractmethod def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): """Returns an IdWeightPair. `IdWeightPair` is a pair of `SparseTensor`s which represents ids and weights. `IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets` `SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a `SparseTensor` of `float` or `None` to indicate all weights should be taken to be 1. If specified, `weight_tensor` must have exactly the same shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing output of a `VarLenFeature` which is a ragged matrix. Args: inputs: A `LazyBuilder` as a cache to get input tensors required to create `IdWeightPair`. weight_collections: List of graph collections to which variables (if any will be created) are added. trainable: If `True` also add variables to the graph collection `GraphKeys.TRAINABLE_VARIABLES` (see ${tf.get_variable}). """ pass def _create_categorical_column_weighted_sum( column, builder, units, sparse_combiner, weight_collections, trainable): """Create a weighted sum of a categorical column for linear_model.""" sparse_tensors = column._get_sparse_tensors( # pylint: disable=protected-access builder, weight_collections=weight_collections, trainable=trainable) weight = variable_scope.get_variable( name='weight', shape=(column._num_buckets, units), # pylint: disable=protected-access initializer=init_ops.zeros_initializer(), trainable=trainable, collections=weight_collections) return _safe_embedding_lookup_sparse( weight, sparse_tensors.id_tensor, sparse_weights=sparse_tensors.weight_tensor, combiner=sparse_combiner, name='weighted_sum') class _LazyBuilder(object): """Handles caching of transformations while building the model. `FeatureColumn` specifies how to digest an input column to the network. Some feature columns require data transformations. This class caches those transformations. Some features may be used in more than one place. For example, one can use a bucketized feature by itself and a cross with it. In that case we should create only one bucketization op instead of creating ops for each feature column separately. To handle re-use of transformed columns, `_LazyBuilder` caches all previously transformed columns. Example: We're trying to use the following `FeatureColumns`: ```python bucketized_age = fc.bucketized_column(fc.numeric_column("age"), ...) keywords = fc.categorical_column_with_hash_buckets("keywords", ...) age_X_keywords = fc.crossed_column([bucketized_age, keywords]) ... = linear_model(features, [bucketized_age, keywords, age_X_keywords] ``` If we transform each column independently, then we'll get duplication of bucketization (one for cross, one for bucketization itself). The `_LazyBuilder` eliminates this duplication. """ def __init__(self, features): """Creates a `_LazyBuilder`. Args: features: A mapping from feature column to objects that are `Tensor` or `SparseTensor`, or can be converted to same via `sparse_tensor.convert_to_tensor_or_sparse_tensor`. A `string` key signifies a base feature (not-transformed). A `FeatureColumn` key means that this `Tensor` is the output of an existing `FeatureColumn` which can be reused. """ self._features = features.copy() self._feature_tensors = {} def get(self, key): """Returns a `Tensor` for the given key. A `str` key is used to access a base feature (not-transformed). When a `_FeatureColumn` is passed, the transformed feature is returned if it already exists, otherwise the given `_FeatureColumn` is asked to provide its transformed output, which is then cached. Args: key: a `str` or a `_FeatureColumn`. Returns: The transformed `Tensor` corresponding to the `key`. Raises: ValueError: if key is not found or a transformed `Tensor` cannot be computed. """ if key in self._feature_tensors: # FeatureColumn is already transformed or converted. return self._feature_tensors[key] if key in self._features: # FeatureColumn is a raw feature. feature_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor( self._features[key]) self._feature_tensors[key] = feature_tensor return feature_tensor if not isinstance(key, (str, _FeatureColumn)): raise TypeError('"key" must be either a "str" or "_FeatureColumn". ' 'Provided: {}'.format(key)) if not isinstance(key, _FeatureColumn): raise ValueError('Feature {} is not in features dictionary.'.format(key)) column = key logging.debug('Transforming feature_column %s.', column) transformed = column._transform_feature(self) # pylint: disable=protected-access if transformed is None: raise ValueError('Column {} is not supported.'.format(column.name)) self._feature_tensors[column] = transformed return transformed # TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py def _shape_offsets(shape): """Returns moving offset for each dimension given shape.""" offsets = [] for dim in reversed(shape): if offsets: offsets.append(dim * offsets[-1]) else: offsets.append(dim) offsets.reverse() return offsets # TODO(ptucker): Move to third_party/tensorflow/python/ops/sparse_ops.py def _to_sparse_input(input_tensor, ignore_value=None): """Converts a `Tensor` to a `SparseTensor`, dropping ignore_value cells. If `input_tensor` is already a `SparseTensor`, just return it. Args: input_tensor: A string or integer `Tensor`. ignore_value: Entries in `dense_tensor` equal to this value will be absent from the resulting `SparseTensor`. If `None`, default value of `dense_tensor`'s dtype will be used ('' for `str`, -1 for `int`). Returns: A `SparseTensor` with the same shape as `input_tensor`. Raises: ValueError: when `input_tensor`'s rank is `None`. """ input_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor( input_tensor) if isinstance(input_tensor, sparse_tensor_lib.SparseTensor): return input_tensor with ops.name_scope(None, 'to_sparse_input', (input_tensor, ignore_value,)): input_rank = input_tensor.get_shape().ndims if input_rank is None: # TODO(b/32318825): Implement dense_to_sparse_tensor for undefined rank. raise ValueError('Undefined input_tensor shape.') if ignore_value is None: ignore_value = '' if input_tensor.dtype == dtypes.string else -1 dense_shape = math_ops.cast(array_ops.shape(input_tensor), dtypes.int64) indices = array_ops.where(math_ops.not_equal( input_tensor, math_ops.cast(ignore_value, input_tensor.dtype))) # Flattens the tensor and indices for use with gather. flat_tensor = array_ops.reshape(input_tensor, [-1]) flat_indices = indices[:, input_rank - 1] # Computes the correct flattened indices for 2d (or higher) tensors. if input_rank > 1: higher_dims = indices[:, :input_rank - 1] shape_offsets = array_ops.stack( _shape_offsets(array_ops.unstack(dense_shape)[1:])) offsets = math_ops.reduce_sum( math_ops.multiply(higher_dims, shape_offsets), reduction_indices=[1]) flat_indices = math_ops.add(flat_indices, offsets) values = array_ops.gather(flat_tensor, flat_indices) return sparse_tensor_lib.SparseTensor(indices, values, dense_shape) def _check_feature_columns(feature_columns): """Verifies feature_columns input.""" if isinstance(feature_columns, dict): raise ValueError('Expected feature_columns to be iterable, found dict.') for column in feature_columns: if not isinstance(column, _FeatureColumn): raise ValueError('Items of feature_columns must be a _FeatureColumn.' 'Given: {}.'.format(column)) if not feature_columns: raise ValueError('feature_columns must not be empty.') name_to_column = dict() for column in feature_columns: if column.name in name_to_column: raise ValueError('Duplicate feature column name found for columns: {} ' 'and {}. This usually means that these columns refer to ' 'same base feature. Either one must be discarded or a ' 'duplicated but renamed item must be inserted in ' 'features dict.'.format(column, name_to_column[column.name])) name_to_column[column.name] = column class _NumericColumn(_DenseColumn, collections.namedtuple('_NumericColumn', [ 'key', 'shape', 'default_value', 'dtype', 'normalizer_fn' ])): """see `numeric_column`.""" @property def name(self): return self.key @property def _parse_example_spec(self): return { self.key: parsing_ops.FixedLenFeature(self.shape, self.dtype, self.default_value) } def _transform_feature(self, inputs): input_tensor = inputs.get(self.key) if isinstance(input_tensor, sparse_tensor_lib.SparseTensor): raise ValueError( 'The corresponding Tensor of numerical column must be a Tensor. ' 'SparseTensor is not supported. key: {}'.format(self.key)) if self.normalizer_fn is not None: input_tensor = self.normalizer_fn(input_tensor) return math_ops.to_float(input_tensor) @property def _variable_shape(self): return tensor_shape.TensorShape(self.shape) def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): """Returns dense `Tensor` representing numeric feature. Args: inputs: A `_LazyBuilder` object to access inputs. weight_collections: Unused `weight_collections` since no variables are created in this function. trainable: Unused `trainable` bool since no variables are created in this function. Returns: Dense `Tensor` created within `_transform_feature`. """ # Do nothing with weight_collections and trainable since no variables are # created in this function. del weight_collections del trainable # Feature has been already transformed. Return the intermediate # representation created by _transform_feature. return inputs.get(self) class _BucketizedColumn(_DenseColumn, _CategoricalColumn, collections.namedtuple('_BucketizedColumn', [ 'source_column', 'boundaries'])): """See `bucketized_column`.""" @property def name(self): return '{}_bucketized'.format(self.source_column.name) @property def _parse_example_spec(self): return self.source_column._parse_example_spec # pylint: disable=protected-access def _transform_feature(self, inputs): source_tensor = inputs.get(self.source_column) return math_ops._bucketize( # pylint: disable=protected-access source_tensor, boundaries=self.boundaries) @property def _variable_shape(self): return tensor_shape.TensorShape( tuple(self.source_column.shape) + (len(self.boundaries) + 1,)) def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): del weight_collections del trainable input_tensor = inputs.get(self) return array_ops.one_hot( indices=math_ops.to_int64(input_tensor), depth=len(self.boundaries) + 1, on_value=1., off_value=0.) @property def _num_buckets(self): # By construction, source_column is always one-dimensional. return (len(self.boundaries) + 1) * self.source_column.shape[0] def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): input_tensor = inputs.get(self) batch_size = array_ops.shape(input_tensor)[0] # By construction, source_column is always one-dimensional. source_dimension = self.source_column.shape[0] i1 = array_ops.reshape( array_ops.tile( array_ops.expand_dims(math_ops.range(0, batch_size), 1), [1, source_dimension]), (-1,)) i2 = array_ops.tile(math_ops.range(0, source_dimension), [batch_size]) # Flatten the bucket indices and unique them across dimensions # E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets bucket_indices = ( array_ops.reshape(input_tensor, (-1,)) + (len(self.boundaries) + 1) * i2) indices = math_ops.to_int64(array_ops.transpose(array_ops.stack((i1, i2)))) dense_shape = math_ops.to_int64(array_ops.stack( [batch_size, source_dimension])) sparse_tensor = sparse_tensor_lib.SparseTensor( indices=indices, values=bucket_indices, dense_shape=dense_shape) return _CategoricalColumn.IdWeightPair(sparse_tensor, None) class _EmbeddingColumn( _DenseColumn, collections.namedtuple('_EmbeddingColumn', ( 'categorical_column', 'dimension', 'combiner', 'initializer', 'ckpt_to_load_from', 'tensor_name_in_ckpt', 'max_norm', 'trainable' ))): """See `_embedding_column`.""" @property def name(self): if not hasattr(self, '_name'): self._name = '{}_embedding'.format(self.categorical_column.name) return self._name @property def _parse_example_spec(self): return self.categorical_column._parse_example_spec # pylint: disable=protected-access def _transform_feature(self, inputs): return inputs.get(self.categorical_column) @property def _variable_shape(self): if not hasattr(self, '_shape'): self._shape = tensor_shape.vector(self.dimension) return self._shape def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): # Get sparse IDs and weights. sparse_tensors = self.categorical_column._get_sparse_tensors( # pylint: disable=protected-access inputs, weight_collections=weight_collections, trainable=trainable) sparse_ids = sparse_tensors.id_tensor sparse_weights = sparse_tensors.weight_tensor # Create embedding weight, and restore from checkpoint if necessary. embedding_weights = variable_scope.get_variable( name='embedding_weights', shape=(self.categorical_column._num_buckets, self.dimension), # pylint: disable=protected-access dtype=dtypes.float32, initializer=self.initializer, trainable=self.trainable and trainable, collections=weight_collections) if self.ckpt_to_load_from is not None: to_restore = embedding_weights if isinstance(to_restore, variables.PartitionedVariable): to_restore = to_restore._get_variable_list() # pylint: disable=protected-access checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, { self.tensor_name_in_ckpt: to_restore }) # Return embedding lookup result. return _safe_embedding_lookup_sparse( embedding_weights=embedding_weights, sparse_ids=sparse_ids, sparse_weights=sparse_weights, combiner=self.combiner, name='%s_weights' % self.name, max_norm=self.max_norm) def _create_tuple(shape, value): """Returns a tuple with given shape and filled with value.""" if shape: return tuple([_create_tuple(shape[1:], value) for _ in range(shape[0])]) return value def _as_tuple(value): if not nest.is_sequence(value): return value return tuple([_as_tuple(v) for v in value]) def _check_shape(shape, key): """Returns shape if it's valid, raises error otherwise.""" assert shape is not None if not nest.is_sequence(shape): shape = [shape] shape = tuple(shape) for dimension in shape: if not isinstance(dimension, int): raise TypeError('shape dimensions must be integer. ' 'shape: {}, key: {}'.format(shape, key)) if dimension < 1: raise ValueError('shape dimensions must be greater than 0. ' 'shape: {}, key: {}'.format(shape, key)) return shape def _is_shape_and_default_value_compatible(default_value, shape): """Verifies compatibility of shape and default_value.""" # Invalid condition: # * if default_value is not a scalar and shape is empty # * or if default_value is an iterable and shape is not empty if nest.is_sequence(default_value) != bool(shape): return False if not shape: return True if len(default_value) != shape[0]: return False for i in range(shape[0]): if not _is_shape_and_default_value_compatible(default_value[i], shape[1:]): return False return True def _check_default_value(shape, default_value, dtype, key): """Returns default value as tuple if it's valid, otherwise raises errors. This function verifies that `default_value` is compatible with both `shape` and `dtype`. If it is not compatible, it raises an error. If it is compatible, it casts default_value to a tuple and returns it. `key` is used only for error message. Args: shape: An iterable of integers specifies the shape of the `Tensor`. default_value: If a single value is provided, the same value will be applied as the default value for every item. If an iterable of values is provided, the shape of the `default_value` should be equal to the given `shape`. dtype: defines the type of values. Default value is `tf.float32`. Must be a non-quantized, real integer or floating point type. key: Column name, used only for error messages. Returns: A tuple which will be used as default value. Raises: TypeError: if `default_value` is an iterable but not compatible with `shape` TypeError: if `default_value` is not compatible with `dtype`. ValueError: if `dtype` is not convertible to `tf.float32`. """ if default_value is None: return None if isinstance(default_value, int): return _create_tuple(shape, default_value) if isinstance(default_value, float) and dtype.is_floating: return _create_tuple(shape, default_value) if callable(getattr(default_value, 'tolist', None)): # Handles numpy arrays default_value = default_value.tolist() if nest.is_sequence(default_value): if not _is_shape_and_default_value_compatible(default_value, shape): raise ValueError( 'The shape of default_value must be equal to given shape. ' 'default_value: {}, shape: {}, key: {}'.format( default_value, shape, key)) # Check if the values in the list are all integers or are convertible to # floats. is_list_all_int = all( isinstance(v, int) for v in nest.flatten(default_value)) is_list_has_float = any( isinstance(v, float) for v in nest.flatten(default_value)) if is_list_all_int: return _as_tuple(default_value) if is_list_has_float and dtype.is_floating: return _as_tuple(default_value) raise TypeError('default_value must be compatible with dtype. ' 'default_value: {}, dtype: {}, key: {}'.format( default_value, dtype, key)) class _HashedCategoricalColumn( _CategoricalColumn, collections.namedtuple('_HashedCategoricalColumn', ['key', 'hash_bucket_size', 'dtype'])): """see `categorical_column_with_hash_bucket`.""" @property def name(self): return self.key @property def _parse_example_spec(self): return {self.key: parsing_ops.VarLenFeature(self.dtype)} def _transform_feature(self, inputs): input_tensor = _to_sparse_input(inputs.get(self.key)) if not isinstance(input_tensor, sparse_tensor_lib.SparseTensor): raise ValueError('SparseColumn input must be a SparseTensor.') _assert_string_or_int( input_tensor.dtype, prefix='column_name: {} input_tensor'.format(self.key)) if self.dtype.is_integer != input_tensor.dtype.is_integer: raise ValueError( 'Column dtype and SparseTensors dtype must be compatible. ' 'key: {}, column dtype: {}, tensor dtype: {}'.format( self.key, self.dtype, input_tensor.dtype)) if self.dtype == dtypes.string: sparse_values = input_tensor.values else: sparse_values = string_ops.as_string(input_tensor.values) sparse_id_values = string_ops.string_to_hash_bucket_fast( sparse_values, self.hash_bucket_size, name='lookup') return sparse_tensor_lib.SparseTensor( input_tensor.indices, sparse_id_values, input_tensor.dense_shape) @property def _num_buckets(self): """Returns number of buckets in this sparse feature.""" return self.hash_bucket_size def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): return _CategoricalColumn.IdWeightPair(inputs.get(self), None) class _VocabularyFileCategoricalColumn( _CategoricalColumn, collections.namedtuple('_VocabularyFileCategoricalColumn', ( 'key', 'vocabulary_file', 'vocabulary_size', 'num_oov_buckets', 'dtype', 'default_value' ))): """See `categorical_column_with_vocabulary_file`.""" @property def name(self): return self.key @property def _parse_example_spec(self): return {self.key: parsing_ops.VarLenFeature(self.dtype)} def _transform_feature(self, inputs): input_tensor = _to_sparse_input(inputs.get(self.key)) if self.dtype.is_integer != input_tensor.dtype.is_integer: raise ValueError( 'Column dtype and SparseTensors dtype must be compatible. ' 'key: {}, column dtype: {}, tensor dtype: {}'.format( self.key, self.dtype, input_tensor.dtype)) _assert_string_or_int( input_tensor.dtype, prefix='column_name: {} input_tensor'.format(self.key)) key_dtype = self.dtype if input_tensor.dtype.is_integer: # `index_table_from_file` requires 64-bit integer keys. key_dtype = dtypes.int64 input_tensor = math_ops.to_int64(input_tensor) return lookup_ops.index_table_from_file( vocabulary_file=self.vocabulary_file, num_oov_buckets=self.num_oov_buckets, vocab_size=self.vocabulary_size, default_value=self.default_value, key_dtype=key_dtype, name='{}_lookup'.format(self.key)).lookup(input_tensor) @property def _num_buckets(self): """Returns number of buckets in this sparse feature.""" return self.vocabulary_size + self.num_oov_buckets def _get_sparse_tensors( self, inputs, weight_collections=None, trainable=None): return _CategoricalColumn.IdWeightPair(inputs.get(self), None) class _VocabularyListCategoricalColumn( _CategoricalColumn, collections.namedtuple('_VocabularyListCategoricalColumn', ( 'key', 'vocabulary_list', 'dtype', 'default_value' ))): """See `categorical_column_with_vocabulary_list`.""" @property def name(self): return self.key @property def _parse_example_spec(self): return {self.key: parsing_ops.VarLenFeature(self.dtype)} def _transform_feature(self, inputs): input_tensor = _to_sparse_input(inputs.get(self.key)) if self.dtype.is_integer != input_tensor.dtype.is_integer: raise ValueError( 'Column dtype and SparseTensors dtype must be compatible. ' 'key: {}, column dtype: {}, tensor dtype: {}'.format( self.key, self.dtype, input_tensor.dtype)) _assert_string_or_int( input_tensor.dtype, prefix='column_name: {} input_tensor'.format(self.key)) key_dtype = self.dtype if input_tensor.dtype.is_integer: # `index_table_from_tensor` requires 64-bit integer keys. key_dtype = dtypes.int64 input_tensor = math_ops.to_int64(input_tensor) return lookup_ops.index_table_from_tensor( vocabulary_list=tuple(self.vocabulary_list), default_value=self.default_value, dtype=key_dtype, name='{}_lookup'.format(self.key)).lookup(input_tensor) @property def _num_buckets(self): """Returns number of buckets in this sparse feature.""" return len(self.vocabulary_list) def _get_sparse_tensors( self, inputs, weight_collections=None, trainable=None): return _CategoricalColumn.IdWeightPair(inputs.get(self), None) class _IdentityCategoricalColumn( _CategoricalColumn, collections.namedtuple('_IdentityCategoricalColumn', ( 'key', 'num_buckets', 'default_value' ))): """See `categorical_column_with_identity`.""" @property def name(self): return self.key @property def _parse_example_spec(self): return {self.key: parsing_ops.VarLenFeature(dtypes.int64)} def _transform_feature(self, inputs): input_tensor = _to_sparse_input(inputs.get(self.key)) if not input_tensor.dtype.is_integer: raise ValueError( 'Invalid input, not integer. key: {} dtype: {}'.format( self.key, input_tensor.dtype)) values = math_ops.to_int64(input_tensor.values, name='values') num_buckets = math_ops.to_int64(self.num_buckets, name='num_buckets') zero = math_ops.to_int64(0, name='zero') if self.default_value is None: # Fail if values are out-of-range. assert_less = check_ops.assert_less( values, num_buckets, data=(values, num_buckets), name='assert_less_than_num_buckets') assert_greater = check_ops.assert_greater_equal( values, zero, data=(values,), name='assert_greater_or_equal_0') with ops.control_dependencies((assert_less, assert_greater)): values = array_ops.identity(values) else: # Assign default for out-of-range values. values = array_ops.where( math_ops.logical_or( values < zero, values >= num_buckets, name='out_of_range'), array_ops.fill( dims=array_ops.shape(values), value=math_ops.to_int64(self.default_value), name='default_values'), values) return sparse_tensor_lib.SparseTensor( indices=input_tensor.indices, values=values, dense_shape=input_tensor.dense_shape) @property def _num_buckets(self): """Returns number of buckets in this sparse feature.""" return self.num_buckets def _get_sparse_tensors( self, inputs, weight_collections=None, trainable=None): return _CategoricalColumn.IdWeightPair(inputs.get(self), None) class _WeightedCategoricalColumn( _CategoricalColumn, collections.namedtuple('_WeightedCategoricalColumn', ( 'categorical_column', 'weight_feature_key', 'dtype' ))): """See `weighted_categorical_column`.""" @property def name(self): return '{}_weighted_by_{}'.format( self.categorical_column.name, self.weight_feature_key) @property def _parse_example_spec(self): config = self.categorical_column._parse_example_spec # pylint: disable=protected-access if self.weight_feature_key in config: raise ValueError('Parse config {} already exists for {}.'.format( config[self.weight_feature_key], self.weight_feature_key)) config[self.weight_feature_key] = parsing_ops.VarLenFeature(self.dtype) return config @property def _num_buckets(self): return self.categorical_column._num_buckets # pylint: disable=protected-access def _transform_feature(self, inputs): weight_tensor = inputs.get(self.weight_feature_key) if weight_tensor is None: raise ValueError('Missing weights {}.'.format(self.weight_feature_key)) weight_tensor = sparse_tensor_lib.convert_to_tensor_or_sparse_tensor( weight_tensor) if self.dtype != weight_tensor.dtype.base_dtype: raise ValueError('Bad dtype, expected {}, but got {}.'.format( self.dtype, weight_tensor.dtype)) if not isinstance(weight_tensor, sparse_tensor_lib.SparseTensor): # The weight tensor can be a regular Tensor. In this case, sparsify it. weight_tensor = _to_sparse_input(weight_tensor, ignore_value=0.0) if not weight_tensor.dtype.is_floating: weight_tensor = math_ops.to_float(weight_tensor) return (inputs.get(self.categorical_column), weight_tensor) def _get_sparse_tensors( self, inputs, weight_collections=None, trainable=None): del weight_collections del trainable tensors = inputs.get(self) return _CategoricalColumn.IdWeightPair(tensors[0], tensors[1]) class _CrossedColumn( _CategoricalColumn, collections.namedtuple('_CrossedColumn', ['keys', 'hash_bucket_size', 'hash_key'])): """See `crossed_column`.""" @property def name(self): feature_names = [] for key in _collect_leaf_level_keys(self): if isinstance(key, _FeatureColumn): feature_names.append(key.name) else: # key must be a string feature_names.append(key) return '_X_'.join(sorted(feature_names)) @property def _parse_example_spec(self): config = {} for key in self.keys: if isinstance(key, _FeatureColumn): config.update(key._parse_example_spec) # pylint: disable=protected-access else: # key must be a string config.update({key: parsing_ops.VarLenFeature(dtypes.string)}) return config def _transform_feature(self, inputs): feature_tensors = [] for key in _collect_leaf_level_keys(self): if isinstance(key, six.string_types): feature_tensors.append(inputs.get(key)) elif isinstance(key, _CategoricalColumn): ids_and_weights = key._get_sparse_tensors(inputs) # pylint: disable=protected-access if ids_and_weights.weight_tensor is not None: raise ValueError( 'crossed_column does not support weight_tensor, but the given ' 'column populates weight_tensor. ' 'Given column: {}'.format(key.name)) feature_tensors.append(ids_and_weights.id_tensor) else: raise ValueError('Unsupported column type. Given: {}'.format(key)) return sparse_ops._sparse_cross_hashed( # pylint: disable=protected-access inputs=feature_tensors, num_buckets=self.hash_bucket_size, hash_key=self.hash_key) @property def _num_buckets(self): """Returns number of buckets in this sparse feature.""" return self.hash_bucket_size def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): return _CategoricalColumn.IdWeightPair(inputs.get(self), None) def _collect_leaf_level_keys(cross): """Collects base keys by expanding all nested crosses. Args: cross: A `_CrossedColumn`. Returns: A list of strings or `_CategoricalColumn` instances. """ leaf_level_keys = [] for k in cross.keys: if isinstance(k, _CrossedColumn): leaf_level_keys.extend(_collect_leaf_level_keys(k)) else: leaf_level_keys.append(k) return leaf_level_keys # TODO(zakaria): Move this to embedding_ops and make it public. def _safe_embedding_lookup_sparse(embedding_weights, sparse_ids, sparse_weights=None, combiner='mean', default_id=None, name=None, partition_strategy='div', max_norm=None): """Lookup embedding results, accounting for invalid IDs and empty features. The partitioned embedding in `embedding_weights` must all be the same shape except for the first dimension. The first dimension is allowed to vary as the vocabulary size is not necessarily a multiple of `P`. `embedding_weights` may be a `PartitionedVariable` as returned by using `tf.get_variable()` with a partitioner. Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs with non-positive weight. For an entry with no features, the embedding vector for `default_id` is returned, or the 0-vector if `default_id` is not supplied. The ids and weights may be multi-dimensional. Embeddings are always aggregated along the last dimension. Args: embedding_weights: A list of `P` float `Tensor`s or values representing partitioned embedding `Tensor`s. Alternatively, a `PartitionedVariable` created by partitioning along dimension 0. The total unpartitioned shape should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the vocab size and `e_1, ..., e_m` are the embedding dimensions. sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the ids. `d_0` is typically batch size. sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing float weights corresponding to `sparse_ids`, or `None` if all weights are be assumed to be 1.0. combiner: A string specifying how to combine embedding results for each entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the default. default_id: The id to use for an entry with no features. name: A name for this operation (optional). partition_strategy: A string specifying the partitioning strategy. Currently `"div"` and `"mod"` are supported. Default is `"div"`. max_norm: If not `None`, all embeddings are l2-normalized to max_norm before combining. Returns: Dense `Tensor` of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`. Raises: ValueError: if `embedding_weights` is empty. """ if embedding_weights is None: raise ValueError('Missing embedding_weights %s.' % embedding_weights) if isinstance(embedding_weights, variables.PartitionedVariable): embedding_weights = list(embedding_weights) # get underlying Variables. if not isinstance(embedding_weights, list): embedding_weights = [embedding_weights] if len(embedding_weights) < 1: raise ValueError('Missing embedding_weights %s.' % embedding_weights) dtype = sparse_weights.dtype if sparse_weights is not None else None embedding_weights = [ ops.convert_to_tensor(w, dtype=dtype) for w in embedding_weights ] with ops.name_scope(name, 'embedding_lookup', embedding_weights + [sparse_ids, sparse_weights]) as scope: # Reshape higher-rank sparse ids and weights to linear segment ids. original_shape = sparse_ids.dense_shape original_rank_dim = sparse_ids.dense_shape.get_shape()[0] original_rank = ( array_ops.size(original_shape) if original_rank_dim.value is None else original_rank_dim.value) sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [ math_ops.reduce_prod( array_ops.slice(original_shape, [0], [original_rank - 1])), array_ops.gather(original_shape, original_rank - 1)]) if sparse_weights is not None: sparse_weights = sparse_tensor_lib.SparseTensor( sparse_ids.indices, sparse_weights.values, sparse_ids.dense_shape) # Prune invalid ids and weights. sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights) # Fill in dummy values for empty features, if necessary. sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(sparse_ids, default_id or 0) if sparse_weights is not None: sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0) result = embedding_ops.embedding_lookup_sparse( embedding_weights, sparse_ids, sparse_weights, combiner=combiner, partition_strategy=partition_strategy, name=None if default_id is None else scope, max_norm=max_norm) if default_id is None: # Broadcast is_row_empty to the same shape as embedding_lookup_result, # for use in Select. is_row_empty = array_ops.tile( array_ops.reshape(is_row_empty, [-1, 1]), array_ops.stack([1, array_ops.shape(result)[1]])) result = array_ops.where(is_row_empty, array_ops.zeros_like(result), result, name=scope) # Reshape back from linear ids back into higher-dimensional dense result. final_result = array_ops.reshape( result, array_ops.concat([ array_ops.slice( math_ops.cast(original_shape, dtypes.int32), [0], [original_rank - 1]), array_ops.slice(array_ops.shape(result), [1], [-1]) ], 0)) final_result.set_shape(tensor_shape.unknown_shape( (original_rank_dim - 1).value).concatenate(result.get_shape()[1:])) return final_result def _prune_invalid_ids(sparse_ids, sparse_weights): """Prune invalid IDs (< 0) from the input ids and weights.""" is_id_valid = math_ops.greater_equal(sparse_ids.values, 0) if sparse_weights is not None: is_id_valid = math_ops.logical_and( is_id_valid, math_ops.greater(sparse_weights.values, 0)) sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid) if sparse_weights is not None: sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid) return sparse_ids, sparse_weights class _IndicatorColumn(_DenseColumn, collections.namedtuple('_IndicatorColumn', ['categorical_column'])): """Represents a one-hot column for use in deep networks. Args: categorical_column: A `_CategoricalColumn` which is created by `categorical_column_with_*` function. """ @property def name(self): return '{}_indicator'.format(self.categorical_column.name) def _transform_feature(self, inputs): """Returns dense `Tensor` representing feature. Args: inputs: A `_LazyBuilder` object to access inputs. Returns: Transformed feature `Tensor`. """ id_weight_pair = self.categorical_column._get_sparse_tensors(inputs) # pylint: disable=protected-access id_tensor = id_weight_pair.id_tensor weight_tensor = id_weight_pair.weight_tensor # If the underlying column is weighted, return the input as a dense tensor. if weight_tensor is not None: weighted_column = sparse_ops.sparse_merge( sp_ids=id_tensor, sp_values=weight_tensor, vocab_size=self._variable_shape[-1]) return sparse_ops.sparse_tensor_to_dense(weighted_column) dense_id_tensor = sparse_ops.sparse_tensor_to_dense( id_tensor, default_value=-1) # One hot must be float for tf.concat reasons since all other inputs to # input_layer are float32. one_hot_id_tensor = array_ops.one_hot( dense_id_tensor, depth=self._variable_shape[-1], on_value=1.0, off_value=0.0) # Reduce to get a multi-hot per example. return math_ops.reduce_sum(one_hot_id_tensor, axis=[1]) @property def _parse_example_spec(self): return self.categorical_column._parse_example_spec # pylint: disable=protected-access @property def _variable_shape(self): """Returns a `TensorShape` representing the shape of the dense `Tensor`.""" return tensor_shape.TensorShape([1, self.categorical_column._num_buckets]) # pylint: disable=protected-access def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): """Returns dense `Tensor` representing feature. Args: inputs: A `_LazyBuilder` object to access inputs. weight_collections: Unused `weight_collections` since no variables are created in this function. trainable: Unused `trainable` bool since no variables are created in this function. Returns: Dense `Tensor` created within `_transform_feature`. """ # Do nothing with weight_collections and trainable since no variables are # created in this function. del weight_collections del trainable # Feature has been already transformed. Return the intermediate # representation created by _transform_feature. return inputs.get(self) def _verify_static_batch_size_equality(tensors, columns): # bath_size is a tf.Dimension object. expected_batch_size = None for i in range(0, len(tensors)): if tensors[i].shape[0].value is not None: if expected_batch_size is None: bath_size_column_index = i expected_batch_size = tensors[i].shape[0] elif not expected_batch_size.is_compatible_with(tensors[i].shape[0]): raise ValueError( 'Batch size (first dimension) of each feature must be same. ' 'Batch size of columns ({}, {}): ({}, {})'.format( columns[bath_size_column_index].name, columns[i].name, expected_batch_size, tensors[i].shape[0]))
Bisaha/foundations-homeworks
refs/heads/master
01/intro.py
3
print ("Hello world!") print ('Hello world!') #can add notes print (10) print(10 + 10) print("hello" + "world!") print ("Hello" + str(10)) name = "Mr. Soma" print ("Hello, " + name) name = input("What's your name?") year_of_birth = input("What year were you born?") age = 2016 - int(year_of_birth) print ("Hello, " + name)
bwrsandman/OpenUpgrade
refs/heads/8.0
addons/procurement_jit_stock/__openerp__.py
241
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Just In Time Scheduling with Stock', 'version': '1.0', 'category': 'Base', 'description': """ If you install this module, it can make sure that not only the ship of pick-pack-ship will be created in batch, but the pick and the pack also. (which will dramatically improve performance) Will be removed from Saas-6 and will be put in procurement_jit over there, where procurement_jit will depend on stock """, 'author': 'OpenERP SA', 'website': 'https://www.odoo.com/page/manufacturing', 'depends': ['procurement_jit', 'stock'], 'data': [], 'demo': [], 'test': [], 'installable': True, 'auto_install': True, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
shinfan/api-client-staging
refs/heads/master
generated/python/gapic-google-cloud-spanner-admin-database-v1/setup.py
7
"""A setup module for the GAPIC Cloud Spanner Database Admin API library. See: https://packaging.python.org/en/latest/distributing.html https://github.com/pypa/sampleproject """ from setuptools import setup, find_packages import sys install_requires = [ 'google-gax>=0.15.7, <0.16dev', 'oauth2client>=2.0.0, <4.0dev', 'proto-google-cloud-spanner-admin-database-v1[grpc]>=0.15.4, <0.16dev', 'googleapis-common-protos[grpc]>=1.5.2, <2.0dev', 'grpc-google-iam-v1>=0.11.1, <0.12dev', ] setup( name='gapic-google-cloud-spanner-admin-database-v1', version='0.15.4', author='Google Inc', author_email='googleapis-packages@google.com', classifiers=[ 'Intended Audience :: Developers', 'Development Status :: 3 - Alpha', 'Intended Audience :: Developers', 'License :: OSI Approved :: Apache Software License', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: Implementation :: CPython', ], description='GAPIC library for the Cloud Spanner Database Admin API', include_package_data=True, long_description=open('README.rst').read(), install_requires=install_requires, license='Apache-2.0', packages=find_packages(), namespace_packages=[ 'google', 'google.cloud', 'google.cloud.gapic', 'google.cloud.gapic.spanner_admin_database' ], url='https://github.com/googleapis/googleapis')
johnkit/vtk-dev
refs/heads/master
IO/EnSight/Testing/Python/EnSightSelectArrays.py
20
#!/usr/bin/env python import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() # create a rendering window and renderer ren1 = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren1) renWin.StereoCapableWindowOn() iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) reader = vtk.vtkGenericEnSightReader() # Make sure all algorithms use the composite data pipeline cdp = vtk.vtkCompositeDataPipeline() reader.SetDefaultExecutivePrototype(cdp) reader.SetCaseFileName("" + str(VTK_DATA_ROOT) + "/Data/EnSight/blow1_ascii.case") reader.SetTimeValue(1) reader.ReadAllVariablesOff() reader.SetPointArrayStatus("displacement",1) reader.SetCellArrayStatus("thickness",1) reader.SetCellArrayStatus("displacement",1) geom = vtk.vtkGeometryFilter() geom.SetInputConnection(reader.GetOutputPort()) mapper = vtk.vtkHierarchicalPolyDataMapper() mapper.SetInputConnection(geom.GetOutputPort()) mapper.SetScalarRange(0.5,1.0) actor = vtk.vtkActor() actor.SetMapper(mapper) # assign our actor to the renderer ren1.AddActor(actor) # enable user interface interactor iren.Initialize() ren1.GetActiveCamera().SetPosition(99.3932,17.6571,-22.6071) ren1.GetActiveCamera().SetFocalPoint(3.5,12,1.5) ren1.GetActiveCamera().SetViewAngle(30) ren1.GetActiveCamera().SetViewUp(0.239617,-0.01054,0.97081) ren1.ResetCameraClippingRange() renWin.Render() # prevent the tk window from showing up then start the event loop reader.SetDefaultExecutivePrototype(None) # --- end of script --
MartinHjelmare/home-assistant
refs/heads/dev
homeassistant/components/russound_rnet/__init__.py
36
"""The russound_rnet component."""
sovaa/sire
refs/heads/master
sire/shared.py
1307
pass
jeanlinux/calibre
refs/heads/master
src/calibre/ebooks/oeb/polish/embed.py
14
#!/usr/bin/env python2 # vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:fdm=marker:ai from __future__ import (unicode_literals, division, absolute_import, print_function) __license__ = 'GPL v3' __copyright__ = '2013, Kovid Goyal <kovid at kovidgoyal.net>' __docformat__ = 'restructuredtext en' import sys from lxml import etree from calibre import prints from calibre.ebooks.oeb.base import XHTML from calibre.ebooks.oeb.polish.stats import normalize_font_properties from calibre.utils.filenames import ascii_filename props = {'font-family':None, 'font-weight':'normal', 'font-style':'normal', 'font-stretch':'normal'} def matching_rule(font, rules): ff = font['font-family'] if not isinstance(ff, basestring): ff = tuple(ff)[0] family = icu_lower(ff) wt = font['font-weight'] style = font['font-style'] stretch = font['font-stretch'] for rule in rules: if rule['font-style'] == style and rule['font-stretch'] == stretch and rule['font-weight'] == wt: ff = rule['font-family'] if not isinstance(ff, basestring): ff = tuple(ff)[0] if icu_lower(ff) == family: return rule def embed_font(container, font, all_font_rules, report, warned): rule = matching_rule(font, all_font_rules) ff = font['font-family'] if not isinstance(ff, basestring): ff = ff[0] if rule is None: from calibre.utils.fonts.scanner import font_scanner, NoFonts if ff in warned: return try: fonts = font_scanner.fonts_for_family(ff) except NoFonts: report(_('Failed to find fonts for family: %s, not embedding') % ff) warned.add(ff) return wt = int(font.get('font-weight', '400')) for f in fonts: if f['weight'] == wt and f['font-style'] == font.get('font-style', 'normal') and f['font-stretch'] == font.get('font-stretch', 'normal'): report('Embedding font %s from %s' % (f['full_name'], f['path'])) data = font_scanner.get_font_data(f) fname = f['full_name'] ext = 'otf' if f['is_otf'] else 'ttf' fname = ascii_filename(fname).replace(' ', '-').replace('(', '').replace(')', '') item = container.generate_item('fonts/%s.%s'%(fname, ext), id_prefix='font') name = container.href_to_name(item.get('href'), container.opf_name) with container.open(name, 'wb') as out: out.write(data) href = container.name_to_href(name) rule = {k:f.get(k, v) for k, v in props.iteritems()} rule['src'] = 'url(%s)' % href rule['name'] = name return rule msg = _('Failed to find font matching: family: %(family)s; weight: %(weight)s; style: %(style)s; stretch: %(stretch)s') % dict( family=ff, weight=font['font-weight'], style=font['font-style'], stretch=font['font-stretch']) if msg not in warned: warned.add(msg) report(msg) else: name = rule['src'] href = container.name_to_href(name) rule = {k:ff if k == 'font-family' else rule.get(k, v) for k, v in props.iteritems()} rule['src'] = 'url(%s)' % href rule['name'] = name return rule def embed_all_fonts(container, stats, report): all_font_rules = tuple(stats.all_font_rules.itervalues()) warned = set() rules, nrules = [], [] modified = set() for path in container.spine_items: name = container.abspath_to_name(path) fu = stats.font_usage_map.get(name, None) fs = stats.font_spec_map.get(name, None) fr = stats.font_rule_map.get(name, None) if None in (fs, fu, fr): continue fs = {icu_lower(x) for x in fs} for font in fu.itervalues(): if icu_lower(font['font-family']) not in fs: continue rule = matching_rule(font, fr) if rule is None: # This font was not already embedded in this HTML file, before # processing started rule = matching_rule(font, nrules) if rule is None: rule = embed_font(container, font, all_font_rules, report, warned) if rule is not None: rules.append(rule) nrules.append(normalize_font_properties(rule.copy())) modified.add(name) stats.font_stats[rule['name']] = font['text'] else: # This font was previously embedded by this code, update its stats stats.font_stats[rule['name']] |= font['text'] modified.add(name) if not rules: report(_('No embeddable fonts found')) return False # Write out CSS rules = [';\n\t'.join('%s: %s' % ( k, '"%s"' % v if k == 'font-family' else v) for k, v in rulel.iteritems() if (k in props and props[k] != v and v != '400') or k == 'src') for rulel in rules] css = '\n\n'.join(['@font-face {\n\t%s\n}' % r for r in rules]) item = container.generate_item('fonts.css', id_prefix='font_embed') name = container.href_to_name(item.get('href'), container.opf_name) with container.open(name, 'wb') as out: out.write(css.encode('utf-8')) # Add link to CSS in all files that need it for spine_name in modified: root = container.parsed(spine_name) head = root.xpath('//*[local-name()="head"][1]')[0] href = container.name_to_href(name, spine_name) etree.SubElement(head, XHTML('link'), rel='stylesheet', type='text/css', href=href).tail = '\n' container.dirty(spine_name) return True if __name__ == '__main__': from calibre.ebooks.oeb.polish.container import get_container from calibre.ebooks.oeb.polish.stats import StatsCollector from calibre.utils.logging import default_log default_log.filter_level = default_log.DEBUG inbook = sys.argv[-1] ebook = get_container(inbook, default_log) report = [] stats = StatsCollector(ebook, do_embed=True) embed_all_fonts(ebook, stats, report.append) outbook, ext = inbook.rpartition('.')[0::2] outbook += '_subset.'+ext ebook.commit(outbook) prints('\nReport:') for msg in report: prints(msg) print() prints('Output written to:', outbook)
voytekresearch/neurodsp
refs/heads/master
neurodsp/tests/sim/test_info.py
1
"""Tests for simulation info functions.""" from pytest import raises from neurodsp.sim.info import * ################################################################################################### ################################################################################################### def test_get_sim_funcs(): for module in SIM_MODULES: funcs = get_sim_funcs(module) assert isinstance(funcs, dict) # Check the error for requesting non-existing function with raises(ValueError): get_sim_func('bad_mod') def test_get_sim_names(): for module in SIM_MODULES: funcs = get_sim_names(module) assert isinstance(funcs, list) def test_get_sim_func(): # Check a successful function request func = get_sim_func('sim_oscillation') # Check the error for requesting non-existing function with raises(ValueError): get_sim_func('bad_func')
apocalypsebg/odoo
refs/heads/8.0
addons/gamification/tests/__init__.py
268
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Business Applications # Copyright (c) 2013 OpenERP S.A. <http://openerp.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from . import test_challenge
bogdandrutu/grpc
refs/heads/master
src/python/grpcio_tests/tests/interop/_secure_interop_test.py
3
# Copyright 2015, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the name of Google Inc. nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """Secure client-server interoperability as a unit test.""" import unittest from grpc.beta import implementations from src.proto.grpc.testing import test_pb2 from tests.interop import _interop_test_case from tests.interop import methods from tests.interop import resources from tests.unit.beta import test_utilities _SERVER_HOST_OVERRIDE = 'foo.test.google.fr' class SecureInteropTest( _interop_test_case.InteropTestCase, unittest.TestCase): def setUp(self): self.server = test_pb2.beta_create_TestService_server(methods.TestService()) port = self.server.add_secure_port( '[::]:0', implementations.ssl_server_credentials( [(resources.private_key(), resources.certificate_chain())])) self.server.start() self.stub = test_pb2.beta_create_TestService_stub( test_utilities.not_really_secure_channel( 'localhost', port, implementations.ssl_channel_credentials( resources.test_root_certificates()), _SERVER_HOST_OVERRIDE)) def tearDown(self): self.server.stop(0) if __name__ == '__main__': unittest.main(verbosity=2)
bansalrajnikant/djime
refs/heads/master
djime/statistics/views.py
1
import datetime from django.contrib.auth.decorators import login_required from django.http import * from django.shortcuts import render_to_response, get_object_or_404 from django.template import RequestContext from djime.statistics.forms import DateSelectionForm from teams.models import Team from djime.models import TimeSlice import djime.statistics.flashcharts as flashcharts from django.utils.translation import ugettext as _ from django.contrib.auth.models import User from exceptions import ValueError from math import floor @login_required() def index(request): return render_to_response('statistics/index.html', {}, context_instance=RequestContext(request)) @login_required() def display_user_week(request, user_id, year, week): if int(request.user.id) != int(user_id): return HttpResponseForbidden(_('Access denied')) return render_to_response('statistics/display_user_week.html', {'week': week, 'year': year, 'user_id': user_id}, context_instance=RequestContext(request)) @login_required() def display_user_month(request, user_id, year, month): if int(request.user.id) != int(user_id): return HttpResponseForbidden(_('Access denied')) return render_to_response('statistics/display_user_month.html', {'month' : month, 'year': year, 'user_id': user_id}, context_instance=RequestContext(request)) @login_required() def user_date_selection_form(request, user_id): if request.method not in ('POST', 'GET'): return HttpResponseNotAllowed('POST', 'GET') if request.method == 'GET': form = DateSelectionForm() return render_to_response('statistics/user_date_selection.html', {'user_id': user_id, 'form': form}, context_instance=RequestContext(request)) if request.method == 'POST': form = DateSelectionForm(request.POST) if form.is_valid(): start = form.cleaned_data['start'] end = form.cleaned_data['end'] return HttpResponseRedirect('/statistics/user/%s/date/%s/%s/' % (user_id, start, end)) else: return render_to_response('statistics/user_date_selection.html', {'user_id': user_id, 'form': form}, context_instance=RequestContext(request)) @login_required() def display_user_date_selection(request, user_id, start_date, end_date): if int(request.user.id) != int(user_id): return HttpResponseForbidden('Access denied') s_date = start_date.split('-') e_date = end_date.split('-') try: date_diff = datetime.date(int(e_date[0]), int(e_date[1]), int(e_date[2])) - datetime.date(int(s_date[0]), int(s_date[1]), int(s_date[2])) if date_diff < datetime.timedelta(days=60) and date_diff > datetime.timedelta(days=0): return render_to_response('statistics/display_user_date.html', {'user_id': user_id, 'start_date': start_date, 'end_date': end_date}, context_instance=RequestContext(request)) else: return HttpResponse(_('Invalid date, min 1 day and max 60 days')) except ValueError: return HttpResponse(_('Invalid date, must be yyyy-mm-dd')) @login_required() def display_team_week(request, team_id, year, week): team = get_object_or_404(Team, pk=int(team_id)) members = team.members.all() members_id = [] for member in members: members_id.append(member.id) if request.user.id not in members_id: return HttpResponseForbidden(_('Access denied')) return render_to_response('statistics/display_team_week.html', {'week': week, 'year': year, 'team_id': team_id, 'team': team}, context_instance=RequestContext(request)) @login_required() def display_team_month(request, team_id, year, month): team = get_object_or_404(Team, pk=int(team_id)) members = team.members.all() members_id = [] for member in members: members_id.append(member.id) if request.user.id not in members_id: return HttpResponseForbidden(_('Access denied')) return render_to_response('statistics/display_team_month.html', {'month' : month, 'year': year, 'team_id': team_id, 'team': team}, context_instance=RequestContext(request)) @login_required() def team_date_selection_form(request, team_id): if request.method not in ('POST', 'GET'): return HttpResponseNotAllowed('POST', 'GET') if request.method == 'GET': form = DateSelectionForm() return render_to_response('statistics/team_date_selection.html', {'team_id': team_id, 'form': form, 'team': team}, context_instance=RequestContext(request)) if request.method == 'POST': form = DateSelectionForm(request.POST) if form.is_valid(): start = form.cleaned_data['start'] end = form.cleaned_data['end'] return HttpResponseRedirect('/statistics/team/%s/date/%s/%s/' % (team_id, start, end)) else: return render_to_response('statistics/team_date_selection.html', {'team_id': team_id, 'form': form, 'team': team}, context_instance=RequestContext(request)) @login_required() def display_team_date_selection(request, team_id, start_date, end_date): team = get_object_or_404(Team, pk=int(team_id)) members = team.members.all() members_id = [] for member in members: members_id.append(member.id) if request.user.id not in members_id: return HttpResponseForbidden(_('Access denied')) s_date = start_date.split('-') e_date = end_date.split('-') try: date_diff = datetime.date(int(e_date[0]), int(e_date[1]), int(e_date[2])) - datetime.date(int(s_date[0]), int(s_date[1]), int(s_date[2])) if date_diff < datetime.timedelta(days=60) and date_diff > datetime.timedelta(days=0): return render_to_response('statistics/display_team_date.html', {'team_id': team_id, 'start_date': start_date, 'end_date': end_date, 'team': team, 'team': team}, context_instance=RequestContext(request)) else: return HttpResponse(_('Invalid date, min 1 day and max 60 days')) except ValueError: return HttpResponse(_('Invalid date, must be yyyy-mm-dd')) @login_required() def display_team_stat_week(request, team_id, week, year): team = get_object_or_404(Team, pk=int(team_id)) members = team.members.all() members_id = [] for member in members: members_id.append(member.id) if request.user.id not in members_id: return HttpResponseForbidden(_('Access denied')) return render_to_response('statistics/display_team_stat_week.html', {'week': week, 'year': year, 'team_id': team_id, 'team': team}, context_instance=RequestContext(request)) @login_required() def display_team_stat_month(request, team_id, month, year): team = get_object_or_404(Team, pk=int(team_id)) members = team.members.all() members_id = [] for member in members: members_id.append(member.id) if request.user.id not in members_id: return HttpResponseForbidden(_('Access denied')) return render_to_response('statistics/display_team_stat_month.html', {'month': month, 'year': year, 'team_id': team_id, 'team': team}, context_instance=RequestContext(request)) @login_required() def team_stat_date_selection_form(request, team_id): if request.method not in ('POST', 'GET'): return HttpResponseNotAllowed('POST', 'GET') if request.method == 'GET': form = DateSelectionForm() return render_to_response('statistics/team_stat_date_selection.html', {'team_id': team_id, 'form': form, 'team': team}, context_instance=RequestContext(request)) if request.method == 'POST': form = DateSelectionForm(request.POST) if form.is_valid(): start = form.cleaned_data['start'] end = form.cleaned_data['end'] return HttpResponseRedirect('/statistics/team_stat/%s/date/%s/%s/' % (team_id, start, end)) else: return render_to_response('statistics/team_stat_date_selection.html', {'team_id': team_id, 'form': form, 'team': team}, context_instance=RequestContext(request)) @login_required() def display_team_stat_date_selection(request, team_id, start_date, end_date): team = get_object_or_404(Team, pk=int(team_id)) members = team.members.all() members_id = [] for member in members: members_id.append(member.id) if request.user.id not in members_id: return HttpResponseForbidden(_('Access denied')) s_date = start_date.split('-') e_date = end_date.split('-') try: date_diff = datetime.date(int(e_date[0]), int(e_date[1]), int(e_date[2])) - datetime.date(int(s_date[0]), int(s_date[1]), int(s_date[2])) if date_diff < datetime.timedelta(days=60) and date_diff > datetime.timedelta(days=0): return render_to_response('statistics/display_team_stat_date.html', {'team_id': team_id, 'start_date': start_date, 'end_date': end_date, 'team': team}, context_instance=RequestContext(request)) else: return HttpResponse(_('Invalid date, min 1 day and max 60 days')) except ValueError: return HttpResponse(_('Invalid date, must be yyyy-mm-dd')) def data_user_week(request, week, year, user_id): if request.method != 'GET': return HttpResponseNotAllowed('GET') week = int(week) year = int(year) return HttpResponse(flashcharts.user_week_json(request.user, week, year)) def data_user_month(request, month, year, user_id): if request.method != 'GET': return HttpResponseNotAllowed('GET') month = int(month) year = int(year) return HttpResponse(flashcharts.user_month_json(request.user, month, year)) def data_user_date(request, user_id, start_date, end_date): return HttpResponse(flashcharts.user_date_json(request.user, start_date, end_date)) def data_team_week(request, week, year, team_id): # this method is the same as user. if request.method != 'GET': return HttpResponseNotAllowed('GET') week = int(week) year = int(year) team = get_object_or_404(Team, pk=int(team_id)) return HttpResponse(flashcharts.team_week_json(team, week, year)) def data_team_month(request, month, year, team_id): if request.method != 'GET': return HttpResponseNotAllowed('GET') month = int(month) year = int(year) team = get_object_or_404(Team, pk=int(team_id)) return HttpResponse(flashcharts.team_month_json(team, month, year)) def data_team_date(request, team_id, start_date, end_date): team = get_object_or_404(Team, pk=int(team_id)) return HttpResponse(flashcharts.team_date_json(team, start_date, end_date)) def data_team_stat_week(request, team_id, week, year): week = int(week) year = int(year) team = get_object_or_404(Team, pk=int(team_id)) return HttpResponse(flashcharts.team_stat_week_json(team, week, year)) def data_team_stat_month(request, team_id, month, year): team = get_object_or_404(Team, pk=int(team_id)) month = int(month) year = int(year) return HttpResponse(flashcharts.team_stat_month_json(team, month, year)) def data_team_stat_date(request, team_id, start_date, end_date): team = get_object_or_404(Team, pk=int(team_id)) return HttpResponse(flashcharts.team_stat_date_json(team, start_date, end_date)) @login_required() def billing_index(request): users = User.objects.all() return render_to_response('statistics/billing_index.html', {'users': users}, context_instance=RequestContext(request)) @login_required() def user_billing(request, user_id): user = get_object_or_404(User, pk=user_id) if request.method == 'GET': return render_to_response('statistics/billing_time_page.html', {'sellected_user': user, 'form': DateSelectionForm()}, context_instance=RequestContext(request)) elif request.method == 'POST': post = request.POST if request.POST.has_key('number-of-weeks'): date = request.POST['start-date'] number_of_weeks = request.POST['number-of-weeks'] if not re.match("[0-9]{4}[-]{1}[0-9]{2}[-]{1}[0-9]{2}$", request.POST['start-date']): request.user.message_set.create(message=_("Invalid date format, must be yyyy-mm-dd.")) return render_to_response('statistics/billing_time_page.html', {'sellected_user': user, 'form': DateSelectionForm()}, context_instance=RequestContext(request)) return HttpResponseRedirect('/statistics/billing/%s/week/%s/%s/' % (user_id, date, number_of_weeks)) raise elif request.POST.has_key('date'): form = DateSelectionForm(request.POST) if form.is_valid(): start = form.cleaned_data['start'] end = form.cleaned_data['end'] return HttpResponseRedirect('/statistics/billing/%s/date/%s/%s/' % (user_id, start, end)) else: return render_to_response('statistics/billing_time_page.html', {'sellected_user': user, 'form': form}, context_instance=RequestContext(request)) @login_required() def user_billing_weeks(request, user_id, date, number_of_weeks): user = get_object_or_404(User, pk=user_id) if number_of_weeks > 5: number_of_weeks = 4 date_list = date.split('-') try: start_date = datetime.date(int(date_list[0]), int(date_list[1]), int(date_list[2])) except ValueError: return HttpResponse(_('Invalid date, must be yyyy-mm-dd')) end_date = start_date + datetime.timedelta(days=number_of_weeks*7) slice_set = TimeSlice.objects.filter(user=user, begin__range=(start_date, end_date)) project_dict = {} for time_slice in slice_set: if time_slice.slip.project not in project_dict.keys(): project_dict[time_slice.slip.project] = {} project_dict[time_slice.slip.project]['slips'] = {} project_dict[time_slice.slip.project]['slips'][time_slice.slip] = [time_slice.slip, time_slice.duration] project_dict[time_slice.slip.project]['duration'] = time_slice.duration project_dict[time_slice.slip.project]['project'] = time_slice.slip.project else: if time_slice.slip not in project_dict[time_slice.slip.project]['slips'].keys(): project_dict[time_slice.slip.project]['slips'][time_slice.slip] = [time_slice.slip, time_slice.duration] project_dict[time_slice.slip.project]['duration'] += time_slice.duration else: project_dict[time_slice.slip.project]['slips'][time_slice.slip][1] += time_slice.duration project_dict[time_slice.slip.project]['duration'] += time_slice.duration for key in project_dict.keys(): project_dict[key]['duration'] = '%02i:%02i' % (floor(project_dict[key]['duration'] / 3600), floor(project_dict[key]['duration'] % 3600 ) / 60) for key_slip in project_dict[key]['slips'].keys(): project_dict[key]['slips'][key_slip][1] = '%02i:%02i' % (floor(project_dict[key]['slips'][key_slip][1] / 3600), floor(project_dict[key]['slips'][key_slip][1] % 3600 ) / 60) return render_to_response('statistics/billing_page.html', {'user': user, 'start_date': start_date, 'end_date': end_date, 'project_dict': project_dict}, context_instance=RequestContext(request)) def user_billing_date(request, user_id, start_date, end_date): user = get_object_or_404(User, pk=user_id) try: start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d').date() end_date = datetime.datetime.strptime(end_date, '%Y-%m-%d').date() except ValueError: return HttpResponse('Invalid dateformat, must be yyyy-mm-dd') slice_set = TimeSlice.objects.filter(user=user, begin__range=(start_date, end_date)) project_dict = {} for time_slice in slice_set: if time_slice.slip.project not in project_dict.keys(): project_dict[time_slice.slip.project] = {} project_dict[time_slice.slip.project]['slips'] = {} project_dict[time_slice.slip.project]['slips'][time_slice.slip] = [time_slice.slip, time_slice.duration] project_dict[time_slice.slip.project]['duration'] = time_slice.duration project_dict[time_slice.slip.project]['project'] = time_slice.slip.project else: if time_slice.slip not in project_dict[time_slice.slip.project]['slips'].keys(): project_dict[time_slice.slip.project]['slips'][time_slice.slip] = [time_slice.slip, time_slice.duration] project_dict[time_slice.slip.project]['duration'] += time_slice.duration else: project_dict[time_slice.slip.project]['slips'][time_slice.slip][1] += time_slice.duration project_dict[time_slice.slip.project]['duration'] += time_slice.duration for key in project_dict.keys(): project_dict[key]['duration'] = '%02i:%02i' % (floor(project_dict[key]['duration'] / 3600), floor(project_dict[key]['duration'] % 3600 ) / 60) for key_slip in project_dict[key]['slips'].keys(): project_dict[key]['slips'][key_slip][1] = '%02i:%02i' % (floor(project_dict[key]['slips'][key_slip][1] / 3600), floor(project_dict[key]['slips'][key_slip][1] % 3600 ) / 60) return render_to_response('statistics/billing_page.html', {'user': user, 'start_date': start_date, 'end_date': end_date, 'project_dict': project_dict}, context_instance=RequestContext(request))
christchron/or-tools
refs/heads/master
examples/python/subset_sum.py
32
# Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Subset sum problem in Google CP Solver. From Katta G. Murty: 'Optimization Models for Decision Making', page 340 http://ioe.engin.umich.edu/people/fac/books/murty/opti_model/junior-7.pdf ''' Example 7.8.1 A bank van had several bags of coins, each containing either 16, 17, 23, 24, 39, or 40 coins. While the van was parked on the street, thieves stole some bags. A total of 100 coins were lost. It is required to find how many bags were stolen. ''' Compare with the following models: * Comet: http://www.hakank.org/comet/subset_sum.co * ECLiPSE: http://www.hakank.org/eclipse/subset_sum.ecl * Gecode: http://www.hakank.org/gecode/subset_sum.cpp * MiniZinc: http://www.hakank.org/minizinc/subset_sum.mzn * Tailor/Essence': http://www.hakank.org/tailor/subset_sum.py * SICStus: http://hakank.org/sicstus/subset_sum.pl This model was created by Hakan Kjellerstrand (hakank@bonetmail.com) Also see my other Google CP Solver models: http://www.hakank.org/google_or_tools/ """ import string import sys from ortools.constraint_solver import pywrapcp def subset_sum(solver, values, total): n = len(values) x = [solver.IntVar(0, n) for i in range(n)] ss = solver.IntVar(0, n) solver.Add(ss == solver.Sum(x)) solver.Add(total == solver.ScalProd(x, values)) return x, ss def main(coins, total): # Create the solver. solver = pywrapcp.Solver("n-queens") # # data # print "coins:", coins print "total:", total print # # declare variables # # # constraints # x, ss = subset_sum(solver, coins, total) # # solution and search # solution = solver.Assignment() solution.Add(x) solution.Add(ss) # db: DecisionBuilder db = solver.Phase(x, solver.CHOOSE_FIRST_UNBOUND, solver.ASSIGN_MIN_VALUE) solver.NewSearch(db) num_solutions = 0 while solver.NextSolution(): print "ss:", ss.Value() print "x: ", [x[i].Value() for i in range(len(x))] print num_solutions += 1 solver.EndSearch() print print "num_solutions:", num_solutions print "failures:", solver.Failures() print "branches:", solver.Branches() print "WallTime:", solver.WallTime() coins = [16, 17, 23, 24, 39, 40] total = 100 if __name__ == "__main__": if len(sys.argv) > 1: total = string.atoi(sys.argv[1]) main(coins, total)
neilpanchal/iPython-Notebook-Profile
refs/heads/master
profile_neil/ipython_notebook_config.py
1
# Configuration file for ipython-notebook. c = get_config() #------------------------------------------------------------------------------ # NotebookApp configuration #------------------------------------------------------------------------------ # NotebookApp will inherit config from: BaseIPythonApplication, Application # The IP address the notebook server will listen on. # c.NotebookApp.ip = 'localhost' # extra paths to look for Javascript notebook extensions # c.NotebookApp.extra_nbextensions_path = [] # Whether to open in a browser after starting. The specific browser used is # platform dependent and determined by the python standard library `webbrowser` # module, unless it is overridden using the --browser (NotebookApp.browser) # configuration option. # c.NotebookApp.open_browser = True # The cluster manager class to use. # c.NotebookApp.cluster_manager_class = <class 'IPython.html.services.clusters.clustermanager.ClusterManager'> # The base URL for websockets, if it differs from the HTTP server (hint: it # almost certainly doesn't). # # Should be in the form of an HTTP origin: ws[s]://hostname[:port] # c.NotebookApp.websocket_url = '' # The name of the IPython directory. This directory is used for logging # configuration (through profiles), history storage, etc. The default is usually # $HOME/.ipython. This option can also be specified through the environment # variable IPYTHONDIR. # c.NotebookApp.ipython_dir = '' # The notebook manager class to use. # c.NotebookApp.contents_manager_class = <class 'IPython.html.services.contents.filemanager.FileContentsManager'> # Supply extra arguments that will be passed to Jinja environment. # c.NotebookApp.jinja_environment_options = {} # The Logging format template # c.NotebookApp.log_format = '[%(name)s]%(highlevel)s %(message)s' # Path to an extra config file to load. # # If specified, load this config file in addition to any other IPython config. # c.NotebookApp.extra_config_file = '' # The IPython profile to use. # c.NotebookApp.profile = 'default' # Set the log level by value or name. # c.NotebookApp.log_level = 30 # Whether to overwrite existing config files when copying # c.NotebookApp.overwrite = False # The full path to a private key file for usage with SSL/TLS. # c.NotebookApp.keyfile = '' # The logout handler class to use. # c.NotebookApp.logout_handler_class = <class 'IPython.html.auth.logout.LogoutHandler'> # Create a massive crash report when IPython encounters what may be an internal # error. The default is to append a short message to the usual traceback # c.NotebookApp.verbose_crash = False # DEPRECATED, use tornado_settings # c.NotebookApp.webapp_settings = {} # Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded- # For headerssent by the upstream reverse proxy. Necessary if the proxy handles # SSL # c.NotebookApp.trust_xheaders = False # The kernel manager class to use. # c.NotebookApp.kernel_manager_class = <class 'IPython.html.services.kernels.kernelmanager.MappingKernelManager'> # Whether to enable MathJax for typesetting math/TeX # # MathJax is the javascript library IPython uses to render math/LaTeX. It is # very large, so you may want to disable it if you have a slow internet # connection, or for offline use of the notebook. # # When disabled, equations etc. will appear as their untransformed TeX source. # c.NotebookApp.enable_mathjax = True # The random bytes used to secure cookies. By default this is a new random # number every time you start the Notebook. Set it to a value in a config file # to enable logins to persist across server sessions. # # Note: Cookie secrets should be kept private, do not share config files with # cookie_secret stored in plaintext (you can read the value from a file). # c.NotebookApp.cookie_secret = b'' # The default URL to redirect to from `/` # c.NotebookApp.default_url = '/tree' # The config manager class to use # c.NotebookApp.config_manager_class = <class 'IPython.html.services.config.manager.ConfigManager'> # Whether to install the default config files into the profile dir. If a new # profile is being created, and IPython contains config files for that profile, # then they will be staged into the new directory. Otherwise, default config # files will be automatically generated. # c.NotebookApp.copy_config_files = False # The login handler class to use. # c.NotebookApp.login_handler_class = <class 'IPython.html.auth.login.LoginHandler'> # Specify what command to use to invoke a web browser when opening the notebook. # If not specified, the default browser will be determined by the `webbrowser` # standard library module, which allows setting of the BROWSER environment # variable to override it. # c.NotebookApp.browser = '' # The url for MathJax.js. # c.NotebookApp.mathjax_url = '' # Extra paths to search for serving static files. # # This allows adding javascript/css to be available from the notebook server # machine, or overriding individual files in the IPython # c.NotebookApp.extra_static_paths = [] # Hashed password to use for web authentication. # # To generate, type in a python/IPython shell: # # from IPython.lib import passwd; passwd() # # The string should be of the form type:salt:hashed-password. # c.NotebookApp.password = '' # The port the notebook server will listen on. # c.NotebookApp.port = 8888 # Supply overrides for the tornado.web.Application that the IPython notebook # uses. # c.NotebookApp.tornado_settings = {} # DEPRECATED use base_url # c.NotebookApp.base_project_url = '/' # The number of additional ports to try if the specified port is not available. # c.NotebookApp.port_retries = 50 # The directory to use for notebooks and kernels. # c.NotebookApp.notebook_dir = '' # The session manager class to use. # c.NotebookApp.session_manager_class = <class 'IPython.html.services.sessions.sessionmanager.SessionManager'> # Supply SSL options for the tornado HTTPServer. See the tornado docs for # details. # c.NotebookApp.ssl_options = {} # The kernel spec manager class to use. Should be a subclass of # `IPython.kernel.kernelspec.KernelSpecManager`. # # The Api of KernelSpecManager is provisional and might change without warning # between this version of IPython and the next stable one. # c.NotebookApp.kernel_spec_manager_class = <class 'IPython.kernel.kernelspec.KernelSpecManager'> # The base URL for the notebook server. # # Leading and trailing slashes can be omitted, and will automatically be added. # c.NotebookApp.base_url = '/' # The date format used by logging formatters for %(asctime)s # c.NotebookApp.log_datefmt = '%Y-%m-%d %H:%M:%S' # Set the Access-Control-Allow-Credentials: true header # c.NotebookApp.allow_credentials = False # Use a regular expression for the Access-Control-Allow-Origin header # # Requests from an origin matching the expression will get replies with: # # Access-Control-Allow-Origin: origin # # where `origin` is the origin of the request. # # Ignored if allow_origin is set. # c.NotebookApp.allow_origin_pat = '' # Reraise exceptions encountered loading server extensions? # c.NotebookApp.reraise_server_extension_failures = False # # c.NotebookApp.file_to_run = '' # Set the Access-Control-Allow-Origin header # # Use '*' to allow any origin to access your server. # # Takes precedence over allow_origin_pat. # c.NotebookApp.allow_origin = '' # The file where the cookie secret is stored. # c.NotebookApp.cookie_secret_file = '' # Extra paths to search for serving jinja templates. # # Can be used to override templates from IPython.html.templates. # c.NotebookApp.extra_template_paths = [] # The full path to an SSL/TLS certificate file. # c.NotebookApp.certfile = '' # Python modules to load as notebook server extensions. This is an experimental # API, and may change in future releases. # c.NotebookApp.server_extensions = [] # DISABLED: use %pylab or %matplotlib in the notebook to enable matplotlib. # c.NotebookApp.pylab = 'disabled' #------------------------------------------------------------------------------ # KernelManager configuration #------------------------------------------------------------------------------ # Manages a single kernel in a subprocess on this host. # # This version starts kernels with Popen. # KernelManager will inherit config from: ConnectionFileMixin # set the control (ROUTER) port [default: random] # c.KernelManager.control_port = 0 # set the shell (ROUTER) port [default: random] # c.KernelManager.shell_port = 0 # Set the kernel's IP address [default localhost]. If the IP address is # something other than localhost, then Consoles on other machines will be able # to connect to the Kernel, so be careful! # c.KernelManager.ip = '' # set the stdin (ROUTER) port [default: random] # c.KernelManager.stdin_port = 0 # set the heartbeat port [default: random] # c.KernelManager.hb_port = 0 # Should we autorestart the kernel if it dies. # c.KernelManager.autorestart = False # set the iopub (PUB) port [default: random] # c.KernelManager.iopub_port = 0 # JSON file in which to store connection info [default: kernel-<pid>.json] # # This file will contain the IP, ports, and authentication key needed to connect # clients to this kernel. By default, this file will be created in the security # dir of the current profile, but can be specified by absolute path. # c.KernelManager.connection_file = '' # # c.KernelManager.transport = 'tcp' # DEPRECATED: Use kernel_name instead. # # The Popen Command to launch the kernel. Override this if you have a custom # kernel. If kernel_cmd is specified in a configuration file, IPython does not # pass any arguments to the kernel, because it cannot make any assumptions about # the arguments that the kernel understands. In particular, this means that the # kernel does not receive the option --debug if it given on the IPython command # line. # c.KernelManager.kernel_cmd = [] #------------------------------------------------------------------------------ # ProfileDir configuration #------------------------------------------------------------------------------ # An object to manage the profile directory and its resources. # # The profile directory is used by all IPython applications, to manage # configuration, logging and security. # # This object knows how to find, create and manage these directories. This # should be used by any code that wants to handle profiles. # Set the profile location directly. This overrides the logic used by the # `profile` option. # c.ProfileDir.location = '' #------------------------------------------------------------------------------ # Session configuration #------------------------------------------------------------------------------ # Object for handling serialization and sending of messages. # # The Session object handles building messages and sending them with ZMQ sockets # or ZMQStream objects. Objects can communicate with each other over the # network via Session objects, and only need to work with the dict-based IPython # message spec. The Session will handle serialization/deserialization, security, # and metadata. # # Sessions support configurable serialization via packer/unpacker traits, and # signing with HMAC digests via the key/keyfile traits. # # Parameters ---------- # # debug : bool # whether to trigger extra debugging statements # packer/unpacker : str : 'json', 'pickle' or import_string # importstrings for methods to serialize message parts. If just # 'json' or 'pickle', predefined JSON and pickle packers will be used. # Otherwise, the entire importstring must be used. # # The functions must accept at least valid JSON input, and output *bytes*. # # For example, to use msgpack: # packer = 'msgpack.packb', unpacker='msgpack.unpackb' # pack/unpack : callables # You can also set the pack/unpack callables for serialization directly. # session : bytes # the ID of this Session object. The default is to generate a new UUID. # username : unicode # username added to message headers. The default is to ask the OS. # key : bytes # The key used to initialize an HMAC signature. If unset, messages # will not be signed or checked. # keyfile : filepath # The file containing a key. If this is set, `key` will be initialized # to the contents of the file. # Debug output in the Session # c.Session.debug = False # The name of the packer for serializing messages. Should be one of 'json', # 'pickle', or an import name for a custom callable serializer. # c.Session.packer = 'json' # Threshold (in bytes) beyond which an object's buffer should be extracted to # avoid pickling. # c.Session.buffer_threshold = 1024 # Username for the Session. Default is your system username. # c.Session.username = 'Neil' # The name of the unpacker for unserializing messages. Only used with custom # functions for `packer`. # c.Session.unpacker = 'json' # execution key, for signing messages. # c.Session.key = b'' # The digest scheme used to construct the message signatures. Must have the form # 'hmac-HASH'. # c.Session.signature_scheme = 'hmac-sha256' # Threshold (in bytes) beyond which a buffer should be sent without copying. # c.Session.copy_threshold = 65536 # path to file containing execution key. # c.Session.keyfile = '' # The UUID identifying this session. # c.Session.session = '' # The maximum number of items for a container to be introspected for custom # serialization. Containers larger than this are pickled outright. # c.Session.item_threshold = 64 # The maximum number of digests to remember. # # The digest history will be culled when it exceeds this value. # c.Session.digest_history_size = 65536 # Metadata dictionary, which serves as the default top-level metadata dict for # each message. # c.Session.metadata = {} #------------------------------------------------------------------------------ # MappingKernelManager configuration #------------------------------------------------------------------------------ # A KernelManager that handles notebook mapping and HTTP error handling # MappingKernelManager will inherit config from: MultiKernelManager # # c.MappingKernelManager.root_dir = '' # The name of the default kernel to start # c.MappingKernelManager.default_kernel_name = 'python3' # The kernel manager class. This is configurable to allow subclassing of the # KernelManager for customized behavior. # c.MappingKernelManager.kernel_manager_class = 'IPython.kernel.ioloop.IOLoopKernelManager' #------------------------------------------------------------------------------ # ContentsManager configuration #------------------------------------------------------------------------------ # Base class for serving files and directories. # # This serves any text or binary file, as well as directories, with special # handling for JSON notebook documents. # # Most APIs take a path argument, which is always an API-style unicode path, and # always refers to a directory. # # - unicode, not url-escaped # - '/'-separated # - leading and trailing '/' will be stripped # - if unspecified, path defaults to '', # indicating the root path. # The base name used when creating untitled files. # c.ContentsManager.untitled_file = 'untitled' # The base name used when creating untitled directories. # c.ContentsManager.untitled_directory = 'Untitled Folder' # # c.ContentsManager.checkpoints = None # Python callable or importstring thereof # # To be called on a contents model prior to save. # # This can be used to process the structure, such as removing notebook outputs # or other side effects that should not be saved. # # It will be called as (all arguments passed by keyword):: # # hook(path=path, model=model, contents_manager=self) # # - model: the model to be saved. Includes file contents. # Modifying this dict will affect the file that is stored. # - path: the API path of the save destination # - contents_manager: this ContentsManager instance # c.ContentsManager.pre_save_hook = None # # c.ContentsManager.checkpoints_class = <class 'IPython.html.services.contents.checkpoints.Checkpoints'> # Glob patterns to hide in file and directory listings. # c.ContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~'] # The base name used when creating untitled notebooks. # c.ContentsManager.untitled_notebook = 'Untitled' # # c.ContentsManager.checkpoints_kwargs = {} #------------------------------------------------------------------------------ # FileContentsManager configuration #------------------------------------------------------------------------------ # FileContentsManager will inherit config from: ContentsManager # # c.FileContentsManager.root_dir = '' # The base name used when creating untitled files. # c.FileContentsManager.untitled_file = 'untitled' # The base name used when creating untitled directories. # c.FileContentsManager.untitled_directory = 'Untitled Folder' # # c.FileContentsManager.checkpoints = None # Python callable or importstring thereof # # To be called on a contents model prior to save. # # This can be used to process the structure, such as removing notebook outputs # or other side effects that should not be saved. # # It will be called as (all arguments passed by keyword):: # # hook(path=path, model=model, contents_manager=self) # # - model: the model to be saved. Includes file contents. # Modifying this dict will affect the file that is stored. # - path: the API path of the save destination # - contents_manager: this ContentsManager instance # c.FileContentsManager.pre_save_hook = None # # c.FileContentsManager.checkpoints_class = <class 'IPython.html.services.contents.checkpoints.Checkpoints'> # Glob patterns to hide in file and directory listings. # c.FileContentsManager.hide_globs = ['__pycache__', '*.pyc', '*.pyo', '.DS_Store', '*.so', '*.dylib', '*~'] # The base name used when creating untitled notebooks. # c.FileContentsManager.untitled_notebook = 'Untitled' # Python callable or importstring thereof # # to be called on the path of a file just saved. # # This can be used to process the file on disk, such as converting the notebook # to a script or HTML via nbconvert. # # It will be called as (all arguments passed by keyword):: # # hook(os_path=os_path, model=model, contents_manager=instance) # # - path: the filesystem path to the file just written - model: the model # representing the file - contents_manager: this ContentsManager instance # c.FileContentsManager.post_save_hook = None # DEPRECATED, use post_save_hook # c.FileContentsManager.save_script = False # # c.FileContentsManager.checkpoints_kwargs = {} #------------------------------------------------------------------------------ # NotebookNotary configuration #------------------------------------------------------------------------------ # A class for computing and verifying notebook signatures. # The hashing algorithm used to sign notebooks. # c.NotebookNotary.algorithm = 'sha256' # The sqlite file in which to store notebook signatures. By default, this will # be in your IPython profile. You can set it to ':memory:' to disable sqlite # writing to the filesystem. # c.NotebookNotary.db_file = '' # The secret key with which notebooks are signed. # c.NotebookNotary.secret = b'' # The file where the secret key is stored. # c.NotebookNotary.secret_file = '' # The number of notebook signatures to cache. When the number of signatures # exceeds this value, the oldest 25% of signatures will be culled. # c.NotebookNotary.cache_size = 65535 #------------------------------------------------------------------------------ # KernelSpecManager configuration #------------------------------------------------------------------------------ # Whitelist of allowed kernel names. # # By default, all installed kernels are allowed. # c.KernelSpecManager.whitelist = set()
JRock007/boxxy
refs/heads/master
dist/Boxxy.app/Contents/Resources/lib/python2.7/numpy/f2py/capi_maps.py
58
#!/usr/bin/env python """ Copyright 1999,2000 Pearu Peterson all rights reserved, Pearu Peterson <pearu@ioc.ee> Permission to use, modify, and distribute this software is given under the terms of the NumPy License. NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. $Date: 2005/05/06 10:57:33 $ Pearu Peterson """ from __future__ import division, absolute_import, print_function __version__ = "$Revision: 1.60 $"[10:-1] from . import __version__ f2py_version = __version__.version import copy import re import os import sys from .auxfuncs import * from .crackfortran import markoutercomma from . import cb_rules # Numarray and Numeric users should set this False using_newcore = True depargs=[] lcb_map={} lcb2_map={} # forced casting: mainly caused by the fact that Python or Numeric # C/APIs do not support the corresponding C types. c2py_map={'double': 'float', 'float': 'float', # forced casting 'long_double': 'float', # forced casting 'char': 'int', # forced casting 'signed_char': 'int', # forced casting 'unsigned_char': 'int', # forced casting 'short': 'int', # forced casting 'unsigned_short': 'int', # forced casting 'int': 'int', # (forced casting) 'long': 'int', 'long_long': 'long', 'unsigned': 'int', # forced casting 'complex_float': 'complex', # forced casting 'complex_double': 'complex', 'complex_long_double': 'complex', # forced casting 'string': 'string', } c2capi_map={'double':'NPY_DOUBLE', 'float':'NPY_FLOAT', 'long_double':'NPY_DOUBLE', # forced casting 'char':'NPY_CHAR', 'unsigned_char':'NPY_UBYTE', 'signed_char':'NPY_BYTE', 'short':'NPY_SHORT', 'unsigned_short':'NPY_USHORT', 'int':'NPY_INT', 'unsigned':'NPY_UINT', 'long':'NPY_LONG', 'long_long':'NPY_LONG', # forced casting 'complex_float':'NPY_CFLOAT', 'complex_double':'NPY_CDOUBLE', 'complex_long_double':'NPY_CDOUBLE', # forced casting 'string':'NPY_CHAR'} #These new maps aren't used anyhere yet, but should be by default # unless building numeric or numarray extensions. if using_newcore: c2capi_map={'double': 'NPY_DOUBLE', 'float': 'NPY_FLOAT', 'long_double': 'NPY_LONGDOUBLE', 'char': 'NPY_BYTE', 'unsigned_char': 'NPY_UBYTE', 'signed_char': 'NPY_BYTE', 'short': 'NPY_SHORT', 'unsigned_short': 'NPY_USHORT', 'int': 'NPY_INT', 'unsigned': 'NPY_UINT', 'long': 'NPY_LONG', 'unsigned_long': 'NPY_ULONG', 'long_long': 'NPY_LONGLONG', 'unsigned_long_long': 'NPY_ULONGLONG', 'complex_float': 'NPY_CFLOAT', 'complex_double': 'NPY_CDOUBLE', 'complex_long_double': 'NPY_CDOUBLE', 'string': 'NPY_CHAR', # f2py 2e is not ready for NPY_STRING (must set itemisize etc) #'string':'NPY_STRING' } c2pycode_map={'double':'d', 'float':'f', 'long_double':'d', # forced casting 'char':'1', 'signed_char':'1', 'unsigned_char':'b', 'short':'s', 'unsigned_short':'w', 'int':'i', 'unsigned':'u', 'long':'l', 'long_long':'L', 'complex_float':'F', 'complex_double':'D', 'complex_long_double':'D', # forced casting 'string':'c' } if using_newcore: c2pycode_map={'double':'d', 'float':'f', 'long_double':'g', 'char':'b', 'unsigned_char':'B', 'signed_char':'b', 'short':'h', 'unsigned_short':'H', 'int':'i', 'unsigned':'I', 'long':'l', 'unsigned_long':'L', 'long_long':'q', 'unsigned_long_long':'Q', 'complex_float':'F', 'complex_double':'D', 'complex_long_double':'G', 'string':'S'} c2buildvalue_map={'double':'d', 'float':'f', 'char':'b', 'signed_char':'b', 'short':'h', 'int':'i', 'long':'l', 'long_long':'L', 'complex_float':'N', 'complex_double':'N', 'complex_long_double':'N', 'string':'z'} if sys.version_info[0] >= 3: # Bytes, not Unicode strings c2buildvalue_map['string'] = 'y' if using_newcore: #c2buildvalue_map=??? pass f2cmap_all={'real':{'':'float','4':'float','8':'double','12':'long_double','16':'long_double'}, 'integer':{'':'int','1':'signed_char','2':'short','4':'int','8':'long_long', '-1':'unsigned_char','-2':'unsigned_short','-4':'unsigned', '-8':'unsigned_long_long'}, 'complex':{'':'complex_float','8':'complex_float', '16':'complex_double','24':'complex_long_double', '32':'complex_long_double'}, 'complexkind':{'':'complex_float','4':'complex_float', '8':'complex_double','12':'complex_long_double', '16':'complex_long_double'}, 'logical':{'':'int','1':'char','2':'short','4':'int','8':'long_long'}, 'double complex':{'':'complex_double'}, 'double precision':{'':'double'}, 'byte':{'':'char'}, 'character':{'':'string'} } if os.path.isfile('.f2py_f2cmap'): # User defined additions to f2cmap_all. # .f2py_f2cmap must contain a dictionary of dictionaries, only. # For example, {'real':{'low':'float'}} means that Fortran 'real(low)' is # interpreted as C 'float'. # This feature is useful for F90/95 users if they use PARAMETERSs # in type specifications. try: outmess('Reading .f2py_f2cmap ...\n') f = open('.f2py_f2cmap', 'r') d = eval(f.read(), {}, {}) f.close() for k, d1 in d.items(): for k1 in d1.keys(): d1[k1.lower()] = d1[k1] d[k.lower()] = d[k] for k in d.keys(): if k not in f2cmap_all: f2cmap_all[k]={} for k1 in d[k].keys(): if d[k][k1] in c2py_map: if k1 in f2cmap_all[k]: outmess("\tWarning: redefinition of {'%s':{'%s':'%s'->'%s'}}\n"%(k, k1, f2cmap_all[k][k1], d[k][k1])) f2cmap_all[k][k1] = d[k][k1] outmess('\tMapping "%s(kind=%s)" to "%s"\n' % (k, k1, d[k][k1])) else: errmess("\tIgnoring map {'%s':{'%s':'%s'}}: '%s' must be in %s\n"%(k, k1, d[k][k1], d[k][k1], list(c2py_map.keys()))) outmess('Succesfully applied user defined changes from .f2py_f2cmap\n') except Exception as msg: errmess('Failed to apply user defined changes from .f2py_f2cmap: %s. Skipping.\n' % (msg)) cformat_map={'double': '%g', 'float': '%g', 'long_double': '%Lg', 'char': '%d', 'signed_char': '%d', 'unsigned_char': '%hhu', 'short': '%hd', 'unsigned_short': '%hu', 'int': '%d', 'unsigned': '%u', 'long': '%ld', 'unsigned_long': '%lu', 'long_long': '%ld', 'complex_float': '(%g,%g)', 'complex_double': '(%g,%g)', 'complex_long_double': '(%Lg,%Lg)', 'string': '%s', } ############### Auxiliary functions def getctype(var): """ Determines C type """ ctype='void' if isfunction(var): if 'result' in var: a=var['result'] else: a=var['name'] if a in var['vars']: return getctype(var['vars'][a]) else: errmess('getctype: function %s has no return value?!\n'%a) elif issubroutine(var): return ctype elif 'typespec' in var and var['typespec'].lower() in f2cmap_all: typespec = var['typespec'].lower() f2cmap=f2cmap_all[typespec] ctype=f2cmap[''] # default type if 'kindselector' in var: if '*' in var['kindselector']: try: ctype=f2cmap[var['kindselector']['*']] except KeyError: errmess('getctype: "%s %s %s" not supported.\n'%(var['typespec'], '*', var['kindselector']['*'])) elif 'kind' in var['kindselector']: if typespec+'kind' in f2cmap_all: f2cmap=f2cmap_all[typespec+'kind'] try: ctype=f2cmap[var['kindselector']['kind']] except KeyError: if typespec in f2cmap_all: f2cmap=f2cmap_all[typespec] try: ctype=f2cmap[str(var['kindselector']['kind'])] except KeyError: errmess('getctype: "%s(kind=%s)" is mapped to C "%s" (to override define dict(%s = dict(%s="<C typespec>")) in %s/.f2py_f2cmap file).\n'\ %(typespec, var['kindselector']['kind'], ctype, typespec, var['kindselector']['kind'], os.getcwd())) else: if not isexternal(var): errmess('getctype: No C-type found in "%s", assuming void.\n'%var) return ctype def getstrlength(var): if isstringfunction(var): if 'result' in var: a=var['result'] else: a=var['name'] if a in var['vars']: return getstrlength(var['vars'][a]) else: errmess('getstrlength: function %s has no return value?!\n'%a) if not isstring(var): errmess('getstrlength: expected a signature of a string but got: %s\n'%(repr(var))) len='1' if 'charselector' in var: a=var['charselector'] if '*' in a: len=a['*'] elif 'len' in a: len=a['len'] if re.match(r'\(\s*([*]|[:])\s*\)', len) or re.match(r'([*]|[:])', len): #if len in ['(*)','*','(:)',':']: if isintent_hide(var): errmess('getstrlength:intent(hide): expected a string with defined length but got: %s\n'%(repr(var))) len='-1' return len def getarrdims(a,var,verbose=0): global depargs ret={} if isstring(var) and not isarray(var): ret['dims']=getstrlength(var) ret['size']=ret['dims'] ret['rank']='1' elif isscalar(var): ret['size']='1' ret['rank']='0' ret['dims']='' elif isarray(var): # if not isintent_c(var): # var['dimension'].reverse() dim=copy.copy(var['dimension']) ret['size']='*'.join(dim) try: ret['size']=repr(eval(ret['size'])) except: pass ret['dims']=','.join(dim) ret['rank']=repr(len(dim)) ret['rank*[-1]']=repr(len(dim)*[-1])[1:-1] for i in range(len(dim)): # solve dim for dependecies v=[] if dim[i] in depargs: v=[dim[i]] else: for va in depargs: if re.match(r'.*?\b%s\b.*'%va, dim[i]): v.append(va) for va in v: if depargs.index(va)>depargs.index(a): dim[i]='*' break ret['setdims'], i='', -1 for d in dim: i=i+1 if d not in ['*', ':', '(*)', '(:)']: ret['setdims']='%s#varname#_Dims[%d]=%s,'%(ret['setdims'], i, d) if ret['setdims']: ret['setdims']=ret['setdims'][:-1] ret['cbsetdims'], i='', -1 for d in var['dimension']: i=i+1 if d not in ['*', ':', '(*)', '(:)']: ret['cbsetdims']='%s#varname#_Dims[%d]=%s,'%(ret['cbsetdims'], i, d) elif isintent_in(var): outmess('getarrdims:warning: assumed shape array, using 0 instead of %r\n' \ % (d)) ret['cbsetdims']='%s#varname#_Dims[%d]=%s,'%(ret['cbsetdims'], i, 0) elif verbose : errmess('getarrdims: If in call-back function: array argument %s must have bounded dimensions: got %s\n'%(repr(a), repr(d))) if ret['cbsetdims']: ret['cbsetdims']=ret['cbsetdims'][:-1] # if not isintent_c(var): # var['dimension'].reverse() return ret def getpydocsign(a, var): global lcb_map if isfunction(var): if 'result' in var: af=var['result'] else: af=var['name'] if af in var['vars']: return getpydocsign(af, var['vars'][af]) else: errmess('getctype: function %s has no return value?!\n'%af) return '', '' sig, sigout=a, a opt='' if isintent_in(var): opt='input' elif isintent_inout(var): opt='in/output' out_a = a if isintent_out(var): for k in var['intent']: if k[:4]=='out=': out_a = k[4:] break init='' ctype=getctype(var) if hasinitvalue(var): init, showinit=getinit(a, var) init = ', optional\\n Default: %s' % showinit if isscalar(var): if isintent_inout(var): sig='%s : %s rank-0 array(%s,\'%s\')%s'%(a, opt, c2py_map[ctype], c2pycode_map[ctype], init) else: sig='%s : %s %s%s'%(a, opt, c2py_map[ctype], init) sigout='%s : %s'%(out_a, c2py_map[ctype]) elif isstring(var): if isintent_inout(var): sig='%s : %s rank-0 array(string(len=%s),\'c\')%s'%(a, opt, getstrlength(var), init) else: sig='%s : %s string(len=%s)%s'%(a, opt, getstrlength(var), init) sigout='%s : string(len=%s)'%(out_a, getstrlength(var)) elif isarray(var): dim=var['dimension'] rank=repr(len(dim)) sig='%s : %s rank-%s array(\'%s\') with bounds (%s)%s'%(a, opt, rank, c2pycode_map[ctype], ','.join(dim), init) if a==out_a: sigout='%s : rank-%s array(\'%s\') with bounds (%s)'\ %(a, rank, c2pycode_map[ctype], ','.join(dim)) else: sigout='%s : rank-%s array(\'%s\') with bounds (%s) and %s storage'\ %(out_a, rank, c2pycode_map[ctype], ','.join(dim), a) elif isexternal(var): ua='' if a in lcb_map and lcb_map[a] in lcb2_map and 'argname' in lcb2_map[lcb_map[a]]: ua=lcb2_map[lcb_map[a]]['argname'] if not ua==a: ua=' => %s'%ua else: ua='' sig='%s : call-back function%s'%(a, ua) sigout=sig else: errmess('getpydocsign: Could not resolve docsignature for "%s".\\n'%a) return sig, sigout def getarrdocsign(a, var): ctype=getctype(var) if isstring(var) and (not isarray(var)): sig='%s : rank-0 array(string(len=%s),\'c\')'%(a, getstrlength(var)) elif isscalar(var): sig='%s : rank-0 array(%s,\'%s\')'%(a, c2py_map[ctype], c2pycode_map[ctype],) elif isarray(var): dim=var['dimension'] rank=repr(len(dim)) sig='%s : rank-%s array(\'%s\') with bounds (%s)'%(a, rank, c2pycode_map[ctype], ','.join(dim)) return sig def getinit(a, var): if isstring(var): init, showinit='""', "''" else: init, showinit='', '' if hasinitvalue(var): init=var['='] showinit=init if iscomplex(var) or iscomplexarray(var): ret={} try: v = var["="] if ',' in v: ret['init.r'], ret['init.i']=markoutercomma(v[1:-1]).split('@,@') else: v = eval(v, {}, {}) ret['init.r'], ret['init.i']=str(v.real), str(v.imag) except: raise ValueError('getinit: expected complex number `(r,i)\' but got `%s\' as initial value of %r.' % (init, a)) if isarray(var): init='(capi_c.r=%s,capi_c.i=%s,capi_c)'%(ret['init.r'], ret['init.i']) elif isstring(var): if not init: init, showinit='""', "''" if init[0]=="'": init='"%s"'%(init[1:-1].replace('"', '\\"')) if init[0]=='"': showinit="'%s'"%(init[1:-1]) return init, showinit def sign2map(a, var): """ varname,ctype,atype init,init.r,init.i,pytype vardebuginfo,vardebugshowvalue,varshowvalue varrfromat intent """ global lcb_map, cb_map out_a = a if isintent_out(var): for k in var['intent']: if k[:4]=='out=': out_a = k[4:] break ret={'varname':a,'outvarname':out_a} ret['ctype']=getctype(var) intent_flags = [] for f, s in isintent_dict.items(): if f(var): intent_flags.append('F2PY_%s'%s) if intent_flags: #XXX: Evaluate intent_flags here. ret['intent'] = '|'.join(intent_flags) else: ret['intent'] = 'F2PY_INTENT_IN' if isarray(var): ret['varrformat']='N' elif ret['ctype'] in c2buildvalue_map: ret['varrformat']=c2buildvalue_map[ret['ctype']] else: ret['varrformat']='O' ret['init'], ret['showinit']=getinit(a, var) if hasinitvalue(var) and iscomplex(var) and not isarray(var): ret['init.r'], ret['init.i'] = markoutercomma(ret['init'][1:-1]).split('@,@') if isexternal(var): ret['cbnamekey']=a if a in lcb_map: ret['cbname']=lcb_map[a] ret['maxnofargs']=lcb2_map[lcb_map[a]]['maxnofargs'] ret['nofoptargs']=lcb2_map[lcb_map[a]]['nofoptargs'] ret['cbdocstr']=lcb2_map[lcb_map[a]]['docstr'] ret['cblatexdocstr']=lcb2_map[lcb_map[a]]['latexdocstr'] else: ret['cbname']=a errmess('sign2map: Confused: external %s is not in lcb_map%s.\n'%(a, list(lcb_map.keys()))) if isstring(var): ret['length']=getstrlength(var) if isarray(var): ret=dictappend(ret, getarrdims(a, var)) dim=copy.copy(var['dimension']) if ret['ctype'] in c2capi_map: ret['atype']=c2capi_map[ret['ctype']] # Debug info if debugcapi(var): il=[isintent_in, 'input', isintent_out, 'output', isintent_inout, 'inoutput', isrequired, 'required', isoptional, 'optional', isintent_hide, 'hidden', iscomplex, 'complex scalar', l_and(isscalar, l_not(iscomplex)), 'scalar', isstring, 'string', isarray, 'array', iscomplexarray, 'complex array', isstringarray, 'string array', iscomplexfunction, 'complex function', l_and(isfunction, l_not(iscomplexfunction)), 'function', isexternal, 'callback', isintent_callback, 'callback', isintent_aux, 'auxiliary', #ismutable,'mutable',l_not(ismutable),'immutable', ] rl=[] for i in range(0, len(il), 2): if il[i](var): rl.append(il[i+1]) if isstring(var): rl.append('slen(%s)=%s'%(a, ret['length'])) if isarray(var): # if not isintent_c(var): # var['dimension'].reverse() ddim=','.join(map(lambda x, y:'%s|%s'%(x, y), var['dimension'], dim)) rl.append('dims(%s)'%ddim) # if not isintent_c(var): # var['dimension'].reverse() if isexternal(var): ret['vardebuginfo']='debug-capi:%s=>%s:%s'%(a, ret['cbname'], ','.join(rl)) else: ret['vardebuginfo']='debug-capi:%s %s=%s:%s'%(ret['ctype'], a, ret['showinit'], ','.join(rl)) if isscalar(var): if ret['ctype'] in cformat_map: ret['vardebugshowvalue']='debug-capi:%s=%s'%(a, cformat_map[ret['ctype']]) if isstring(var): ret['vardebugshowvalue']='debug-capi:slen(%s)=%%d %s=\\"%%s\\"'%(a, a) if isexternal(var): ret['vardebugshowvalue']='debug-capi:%s=%%p'%(a) if ret['ctype'] in cformat_map: ret['varshowvalue']='#name#:%s=%s'%(a, cformat_map[ret['ctype']]) ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) if isstring(var): ret['varshowvalue']='#name#:slen(%s)=%%d %s=\\"%%s\\"'%(a, a) ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, var) if hasnote(var): ret['note']=var['note'] return ret def routsign2map(rout): """ name,NAME,begintitle,endtitle rname,ctype,rformat routdebugshowvalue """ global lcb_map name = rout['name'] fname = getfortranname(rout) ret={'name': name, 'texname': name.replace('_', '\\_'), 'name_lower': name.lower(), 'NAME': name.upper(), 'begintitle': gentitle(name), 'endtitle': gentitle('end of %s'%name), 'fortranname': fname, 'FORTRANNAME': fname.upper(), 'callstatement': getcallstatement(rout) or '', 'usercode': getusercode(rout) or '', 'usercode1': getusercode1(rout) or '', } if '_' in fname: ret['F_FUNC'] = 'F_FUNC_US' else: ret['F_FUNC'] = 'F_FUNC' if '_' in name: ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC_US' else: ret['F_WRAPPEDFUNC'] = 'F_WRAPPEDFUNC' lcb_map={} if 'use' in rout: for u in rout['use'].keys(): if u in cb_rules.cb_map: for un in cb_rules.cb_map[u]: ln=un[0] if 'map' in rout['use'][u]: for k in rout['use'][u]['map'].keys(): if rout['use'][u]['map'][k]==un[0]: ln=k;break lcb_map[ln]=un[1] #else: # errmess('routsign2map: cb_map does not contain module "%s" used in "use" statement.\n'%(u)) elif 'externals' in rout and rout['externals']: errmess('routsign2map: Confused: function %s has externals %s but no "use" statement.\n'%(ret['name'], repr(rout['externals']))) ret['callprotoargument'] = getcallprotoargument(rout, lcb_map) or '' if isfunction(rout): if 'result' in rout: a=rout['result'] else: a=rout['name'] ret['rname']=a ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, rout) ret['ctype']=getctype(rout['vars'][a]) if hasresultnote(rout): ret['resultnote']=rout['vars'][a]['note'] rout['vars'][a]['note']=['See elsewhere.'] if ret['ctype'] in c2buildvalue_map: ret['rformat']=c2buildvalue_map[ret['ctype']] else: ret['rformat']='O' errmess('routsign2map: no c2buildvalue key for type %s\n'%(repr(ret['ctype']))) if debugcapi(rout): if ret['ctype'] in cformat_map: ret['routdebugshowvalue']='debug-capi:%s=%s'%(a, cformat_map[ret['ctype']]) if isstringfunction(rout): ret['routdebugshowvalue']='debug-capi:slen(%s)=%%d %s=\\"%%s\\"'%(a, a) if isstringfunction(rout): ret['rlength']=getstrlength(rout['vars'][a]) if ret['rlength']=='-1': errmess('routsign2map: expected explicit specification of the length of the string returned by the fortran function %s; taking 10.\n'%(repr(rout['name']))) ret['rlength']='10' if hasnote(rout): ret['note']=rout['note'] rout['note']=['See elsewhere.'] return ret def modsign2map(m): """ modulename """ if ismodule(m): ret={'f90modulename':m['name'], 'F90MODULENAME':m['name'].upper(), 'texf90modulename':m['name'].replace('_', '\\_')} else: ret={'modulename':m['name'], 'MODULENAME':m['name'].upper(), 'texmodulename':m['name'].replace('_', '\\_')} ret['restdoc'] = getrestdoc(m) or [] if hasnote(m): ret['note']=m['note'] #m['note']=['See elsewhere.'] ret['usercode'] = getusercode(m) or '' ret['usercode1'] = getusercode1(m) or '' if m['body']: ret['interface_usercode'] = getusercode(m['body'][0]) or '' else: ret['interface_usercode'] = '' ret['pymethoddef'] = getpymethoddef(m) or '' if 'coutput' in m: ret['coutput'] = m['coutput'] if 'f2py_wrapper_output' in m: ret['f2py_wrapper_output'] = m['f2py_wrapper_output'] return ret def cb_sign2map(a,var,index=None): ret={'varname':a} if index is None or 1: # disable 7712 patch ret['varname_i'] = ret['varname'] else: ret['varname_i'] = ret['varname'] + '_' + str(index) ret['ctype']=getctype(var) if ret['ctype'] in c2capi_map: ret['atype']=c2capi_map[ret['ctype']] if ret['ctype'] in cformat_map: ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) if isarray(var): ret=dictappend(ret, getarrdims(a, var)) ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, var) if hasnote(var): ret['note']=var['note'] var['note']=['See elsewhere.'] return ret def cb_routsign2map(rout, um): """ name,begintitle,endtitle,argname ctype,rctype,maxnofargs,nofoptargs,returncptr """ ret={'name':'cb_%s_in_%s'%(rout['name'], um), 'returncptr':''} if isintent_callback(rout): if '_' in rout['name']: F_FUNC='F_FUNC_US' else: F_FUNC='F_FUNC' ret['callbackname'] = '%s(%s,%s)' \ % (F_FUNC, rout['name'].lower(), rout['name'].upper(), ) ret['static'] = 'extern' else: ret['callbackname'] = ret['name'] ret['static'] = 'static' ret['argname']=rout['name'] ret['begintitle']=gentitle(ret['name']) ret['endtitle']=gentitle('end of %s'%ret['name']) ret['ctype']=getctype(rout) ret['rctype']='void' if ret['ctype']=='string': ret['rctype']='void' else: ret['rctype']=ret['ctype'] if ret['rctype']!='void': if iscomplexfunction(rout): ret['returncptr'] = """ #ifdef F2PY_CB_RETURNCOMPLEX return_value= #endif """ else: ret['returncptr'] = 'return_value=' if ret['ctype'] in cformat_map: ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) if isstringfunction(rout): ret['strlength']=getstrlength(rout) if isfunction(rout): if 'result' in rout: a=rout['result'] else: a=rout['name'] if hasnote(rout['vars'][a]): ret['note']=rout['vars'][a]['note'] rout['vars'][a]['note']=['See elsewhere.'] ret['rname']=a ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, rout) if iscomplexfunction(rout): ret['rctype']=""" #ifdef F2PY_CB_RETURNCOMPLEX #ctype# #else void #endif """ else: if hasnote(rout): ret['note']=rout['note'] rout['note']=['See elsewhere.'] nofargs=0 nofoptargs=0 if 'args' in rout and 'vars' in rout: for a in rout['args']: var=rout['vars'][a] if l_or(isintent_in, isintent_inout)(var): nofargs=nofargs+1 if isoptional(var): nofoptargs=nofoptargs+1 ret['maxnofargs']=repr(nofargs) ret['nofoptargs']=repr(nofoptargs) if hasnote(rout) and isfunction(rout) and 'result' in rout: ret['routnote']=rout['note'] rout['note']=['See elsewhere.'] return ret def common_sign2map(a, var): # obsolute ret={'varname':a} ret['ctype']=getctype(var) if isstringarray(var): ret['ctype']='char' if ret['ctype'] in c2capi_map: ret['atype']=c2capi_map[ret['ctype']] if ret['ctype'] in cformat_map: ret['showvalueformat']='%s'%(cformat_map[ret['ctype']]) if isarray(var): ret=dictappend(ret, getarrdims(a, var)) elif isstring(var): ret['size']=getstrlength(var) ret['rank']='1' ret['pydocsign'], ret['pydocsignout']=getpydocsign(a, var) if hasnote(var): ret['note']=var['note'] var['note']=['See elsewhere.'] ret['arrdocstr']=getarrdocsign(a, var) # for strings this returns 0-rank but actually is 1-rank return ret
alajara/servo
refs/heads/master
tests/wpt/harness/wptrunner/tests/test_update.py
59
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. import unittest import StringIO import pytest from .. import metadata, manifestupdate from mozlog import structuredlog, handlers, formatters class TestExpectedUpdater(unittest.TestCase): def create_manifest(self, data, test_path="path/to/test.ini"): f = StringIO.StringIO(data) return manifestupdate.compile(f, test_path) def create_updater(self, data, **kwargs): expected_tree = {} id_path_map = {} for test_path, test_ids, manifest_str in data: if isinstance(test_ids, (str, unicode)): test_ids = [test_ids] expected_tree[test_path] = self.create_manifest(manifest_str, test_path) for test_id in test_ids: id_path_map[test_id] = test_path return metadata.ExpectedUpdater(expected_tree, id_path_map, **kwargs) def create_log(self, *args, **kwargs): logger = structuredlog.StructuredLogger("expected_test") data = StringIO.StringIO() handler = handlers.StreamHandler(data, formatters.JSONFormatter()) logger.add_handler(handler) log_entries = ([("suite_start", {"tests": [], "run_info": kwargs.get("run_info", {})})] + list(args) + [("suite_end", {})]) for item in log_entries: action, kwargs = item getattr(logger, action)(**kwargs) logger.remove_handler(handler) data.seek(0) return data def coalesce_results(self, trees): for tree in trees: for test in tree.iterchildren(): for subtest in test.iterchildren(): subtest.coalesce_expected() test.coalesce_expected() @pytest.mark.xfail def test_update_0(self): prev_data = [("path/to/test.htm.ini", ["/path/to/test.htm"], """[test.htm] type: testharness [test1] expected: FAIL""")] new_data = self.create_log(("test_start", {"test": "/path/to/test.htm"}), ("test_status", {"test": "/path/to/test.htm", "subtest": "test1", "status": "PASS", "expected": "FAIL"}), ("test_end", {"test": "/path/to/test.htm", "status": "OK"})) updater = self.create_updater(prev_data) updater.update_from_log(new_data) new_manifest = updater.expected_tree["path/to/test.htm.ini"] self.coalesce_results([new_manifest]) self.assertTrue(new_manifest.is_empty) @pytest.mark.xfail def test_update_1(self): test_id = "/path/to/test.htm" prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm] type: testharness [test1] expected: ERROR""")] new_data = self.create_log(("test_start", {"test": test_id}), ("test_status", {"test": test_id, "subtest": "test1", "status": "FAIL", "expected": "ERROR"}), ("test_end", {"test": test_id, "status": "OK"})) updater = self.create_updater(prev_data) updater.update_from_log(new_data) new_manifest = updater.expected_tree["path/to/test.htm.ini"] self.coalesce_results([new_manifest]) self.assertFalse(new_manifest.is_empty) self.assertEquals(new_manifest.get_test(test_id).children[0].get("expected"), "FAIL") @pytest.mark.xfail def test_new_subtest(self): test_id = "/path/to/test.htm" prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm] type: testharness [test1] expected: FAIL""")] new_data = self.create_log(("test_start", {"test": test_id}), ("test_status", {"test": test_id, "subtest": "test1", "status": "FAIL", "expected": "FAIL"}), ("test_status", {"test": test_id, "subtest": "test2", "status": "FAIL", "expected": "PASS"}), ("test_end", {"test": test_id, "status": "OK"})) updater = self.create_updater(prev_data) updater.update_from_log(new_data) new_manifest = updater.expected_tree["path/to/test.htm.ini"] self.coalesce_results([new_manifest]) self.assertFalse(new_manifest.is_empty) self.assertEquals(new_manifest.get_test(test_id).children[0].get("expected"), "FAIL") self.assertEquals(new_manifest.get_test(test_id).children[1].get("expected"), "FAIL") @pytest.mark.xfail def test_update_multiple_0(self): test_id = "/path/to/test.htm" prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm] type: testharness [test1] expected: FAIL""")] new_data_0 = self.create_log(("test_start", {"test": test_id}), ("test_status", {"test": test_id, "subtest": "test1", "status": "FAIL", "expected": "FAIL"}), ("test_end", {"test": test_id, "status": "OK"}), run_info={"debug": False, "os": "osx"}) new_data_1 = self.create_log(("test_start", {"test": test_id}), ("test_status", {"test": test_id, "subtest": "test1", "status": "TIMEOUT", "expected": "FAIL"}), ("test_end", {"test": test_id, "status": "OK"}), run_info={"debug": False, "os": "linux"}) updater = self.create_updater(prev_data) updater.update_from_log(new_data_0) updater.update_from_log(new_data_1) new_manifest = updater.expected_tree["path/to/test.htm.ini"] self.coalesce_results([new_manifest]) self.assertFalse(new_manifest.is_empty) self.assertEquals(new_manifest.get_test(test_id).children[0].get( "expected", {"debug": False, "os": "osx"}), "FAIL") self.assertEquals(new_manifest.get_test(test_id).children[0].get( "expected", {"debug": False, "os": "linux"}), "TIMEOUT") @pytest.mark.xfail def test_update_multiple_1(self): test_id = "/path/to/test.htm" prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm] type: testharness [test1] expected: FAIL""")] new_data_0 = self.create_log(("test_start", {"test": test_id}), ("test_status", {"test": test_id, "subtest": "test1", "status": "FAIL", "expected": "FAIL"}), ("test_end", {"test": test_id, "status": "OK"}), run_info={"debug": False, "os": "osx"}) new_data_1 = self.create_log(("test_start", {"test": test_id}), ("test_status", {"test": test_id, "subtest": "test1", "status": "TIMEOUT", "expected": "FAIL"}), ("test_end", {"test": test_id, "status": "OK"}), run_info={"debug": False, "os": "linux"}) updater = self.create_updater(prev_data) updater.update_from_log(new_data_0) updater.update_from_log(new_data_1) new_manifest = updater.expected_tree["path/to/test.htm.ini"] self.coalesce_results([new_manifest]) self.assertFalse(new_manifest.is_empty) self.assertEquals(new_manifest.get_test(test_id).children[0].get( "expected", {"debug": False, "os": "osx"}), "FAIL") self.assertEquals(new_manifest.get_test(test_id).children[0].get( "expected", {"debug": False, "os": "linux"}), "TIMEOUT") self.assertEquals(new_manifest.get_test(test_id).children[0].get( "expected", {"debug": False, "os": "windows"}), "FAIL") @pytest.mark.xfail def test_update_multiple_2(self): test_id = "/path/to/test.htm" prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm] type: testharness [test1] expected: FAIL""")] new_data_0 = self.create_log(("test_start", {"test": test_id}), ("test_status", {"test": test_id, "subtest": "test1", "status": "FAIL", "expected": "FAIL"}), ("test_end", {"test": test_id, "status": "OK"}), run_info={"debug": False, "os": "osx"}) new_data_1 = self.create_log(("test_start", {"test": test_id}), ("test_status", {"test": test_id, "subtest": "test1", "status": "TIMEOUT", "expected": "FAIL"}), ("test_end", {"test": test_id, "status": "OK"}), run_info={"debug": True, "os": "osx"}) updater = self.create_updater(prev_data) updater.update_from_log(new_data_0) updater.update_from_log(new_data_1) new_manifest = updater.expected_tree["path/to/test.htm.ini"] self.coalesce_results([new_manifest]) self.assertFalse(new_manifest.is_empty) self.assertEquals(new_manifest.get_test(test_id).children[0].get( "expected", {"debug": False, "os": "osx"}), "FAIL") self.assertEquals(new_manifest.get_test(test_id).children[0].get( "expected", {"debug": True, "os": "osx"}), "TIMEOUT") @pytest.mark.xfail def test_update_multiple_3(self): test_id = "/path/to/test.htm" prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm] type: testharness [test1] expected: if debug: FAIL if not debug and os == "osx": TIMEOUT""")] new_data_0 = self.create_log(("test_start", {"test": test_id}), ("test_status", {"test": test_id, "subtest": "test1", "status": "FAIL", "expected": "FAIL"}), ("test_end", {"test": test_id, "status": "OK"}), run_info={"debug": False, "os": "osx"}) new_data_1 = self.create_log(("test_start", {"test": test_id}), ("test_status", {"test": test_id, "subtest": "test1", "status": "TIMEOUT", "expected": "FAIL"}), ("test_end", {"test": test_id, "status": "OK"}), run_info={"debug": True, "os": "osx"}) updater = self.create_updater(prev_data) updater.update_from_log(new_data_0) updater.update_from_log(new_data_1) new_manifest = updater.expected_tree["path/to/test.htm.ini"] self.coalesce_results([new_manifest]) self.assertFalse(new_manifest.is_empty) self.assertEquals(new_manifest.get_test(test_id).children[0].get( "expected", {"debug": False, "os": "osx"}), "FAIL") self.assertEquals(new_manifest.get_test(test_id).children[0].get( "expected", {"debug": True, "os": "osx"}), "TIMEOUT") @pytest.mark.xfail def test_update_ignore_existing(self): test_id = "/path/to/test.htm" prev_data = [("path/to/test.htm.ini", [test_id], """[test.htm] type: testharness [test1] expected: if debug: TIMEOUT if not debug and os == "osx": NOTRUN""")] new_data_0 = self.create_log(("test_start", {"test": test_id}), ("test_status", {"test": test_id, "subtest": "test1", "status": "FAIL", "expected": "PASS"}), ("test_end", {"test": test_id, "status": "OK"}), run_info={"debug": False, "os": "linux"}) new_data_1 = self.create_log(("test_start", {"test": test_id}), ("test_status", {"test": test_id, "subtest": "test1", "status": "FAIL", "expected": "PASS"}), ("test_end", {"test": test_id, "status": "OK"}), run_info={"debug": True, "os": "windows"}) updater = self.create_updater(prev_data, ignore_existing=True) updater.update_from_log(new_data_0) updater.update_from_log(new_data_1) new_manifest = updater.expected_tree["path/to/test.htm.ini"] self.coalesce_results([new_manifest]) self.assertFalse(new_manifest.is_empty) self.assertEquals(new_manifest.get_test(test_id).children[0].get( "expected", {"debug": True, "os": "osx"}), "FAIL") self.assertEquals(new_manifest.get_test(test_id).children[0].get( "expected", {"debug": False, "os": "osx"}), "FAIL")
ryfeus/lambda-packs
refs/heads/master
Selenium_PhantomJS/source/pip/vcs/subversion.py
170
from __future__ import absolute_import import logging import os import re from pip._vendor.six.moves.urllib import parse as urllib_parse from pip.index import Link from pip.utils import rmtree, display_path from pip.utils.logging import indent_log from pip.vcs import vcs, VersionControl _svn_xml_url_re = re.compile('url="([^"]+)"') _svn_rev_re = re.compile('committed-rev="(\d+)"') _svn_url_re = re.compile(r'URL: (.+)') _svn_revision_re = re.compile(r'Revision: (.+)') _svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"') _svn_info_xml_url_re = re.compile(r'<url>(.*)</url>') logger = logging.getLogger(__name__) class Subversion(VersionControl): name = 'svn' dirname = '.svn' repo_name = 'checkout' schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn') def get_info(self, location): """Returns (url, revision), where both are strings""" assert not location.rstrip('/').endswith(self.dirname), \ 'Bad directory: %s' % location output = self.run_command( ['info', location], show_stdout=False, extra_environ={'LANG': 'C'}, ) match = _svn_url_re.search(output) if not match: logger.warning( 'Cannot determine URL of svn checkout %s', display_path(location), ) logger.debug('Output that cannot be parsed: \n%s', output) return None, None url = match.group(1).strip() match = _svn_revision_re.search(output) if not match: logger.warning( 'Cannot determine revision of svn checkout %s', display_path(location), ) logger.debug('Output that cannot be parsed: \n%s', output) return url, None return url, match.group(1) def export(self, location): """Export the svn repository at the url to the destination location""" url, rev = self.get_url_rev() rev_options = get_rev_options(url, rev) logger.info('Exporting svn repository %s to %s', url, location) with indent_log(): if os.path.exists(location): # Subversion doesn't like to check out over an existing # directory --force fixes this, but was only added in svn 1.5 rmtree(location) self.run_command( ['export'] + rev_options + [url, location], show_stdout=False) def switch(self, dest, url, rev_options): self.run_command(['switch'] + rev_options + [url, dest]) def update(self, dest, rev_options): self.run_command(['update'] + rev_options + [dest]) def obtain(self, dest): url, rev = self.get_url_rev() rev_options = get_rev_options(url, rev) if rev: rev_display = ' (to revision %s)' % rev else: rev_display = '' if self.check_destination(dest, url, rev_options, rev_display): logger.info( 'Checking out %s%s to %s', url, rev_display, display_path(dest), ) self.run_command(['checkout', '-q'] + rev_options + [url, dest]) def get_location(self, dist, dependency_links): for url in dependency_links: egg_fragment = Link(url).egg_fragment if not egg_fragment: continue if '-' in egg_fragment: # FIXME: will this work when a package has - in the name? key = '-'.join(egg_fragment.split('-')[:-1]).lower() else: key = egg_fragment if key == dist.key: return url.split('#', 1)[0] return None def get_revision(self, location): """ Return the maximum revision for all files under a given location """ # Note: taken from setuptools.command.egg_info revision = 0 for base, dirs, files in os.walk(location): if self.dirname not in dirs: dirs[:] = [] continue # no sense walking uncontrolled subdirs dirs.remove(self.dirname) entries_fn = os.path.join(base, self.dirname, 'entries') if not os.path.exists(entries_fn): # FIXME: should we warn? continue dirurl, localrev = self._get_svn_url_rev(base) if base == location: base_url = dirurl + '/' # save the root url elif not dirurl or not dirurl.startswith(base_url): dirs[:] = [] continue # not part of the same svn tree, skip it revision = max(revision, localrev) return revision def get_url_rev(self): # hotfix the URL scheme after removing svn+ from svn+ssh:// readd it url, rev = super(Subversion, self).get_url_rev() if url.startswith('ssh://'): url = 'svn+' + url return url, rev def get_url(self, location): # In cases where the source is in a subdirectory, not alongside # setup.py we have to look up in the location until we find a real # setup.py orig_location = location while not os.path.exists(os.path.join(location, 'setup.py')): last_location = location location = os.path.dirname(location) if location == last_location: # We've traversed up to the root of the filesystem without # finding setup.py logger.warning( "Could not find setup.py for directory %s (tried all " "parent directories)", orig_location, ) return None return self._get_svn_url_rev(location)[0] def _get_svn_url_rev(self, location): from pip.exceptions import InstallationError entries_path = os.path.join(location, self.dirname, 'entries') if os.path.exists(entries_path): with open(entries_path) as f: data = f.read() else: # subversion >= 1.7 does not have the 'entries' file data = '' if (data.startswith('8') or data.startswith('9') or data.startswith('10')): data = list(map(str.splitlines, data.split('\n\x0c\n'))) del data[0][0] # get rid of the '8' url = data[0][3] revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0] elif data.startswith('<?xml'): match = _svn_xml_url_re.search(data) if not match: raise ValueError('Badly formatted data: %r' % data) url = match.group(1) # get repository URL revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0] else: try: # subversion >= 1.7 xml = self.run_command( ['info', '--xml', location], show_stdout=False, ) url = _svn_info_xml_url_re.search(xml).group(1) revs = [ int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml) ] except InstallationError: url, revs = None, [] if revs: rev = max(revs) else: rev = 0 return url, rev def get_src_requirement(self, dist, location): repo = self.get_url(location) if repo is None: return None # FIXME: why not project name? egg_project_name = dist.egg_name().split('-', 1)[0] rev = self.get_revision(location) return 'svn+%s@%s#egg=%s' % (repo, rev, egg_project_name) def check_version(self, dest, rev_options): """Always assume the versions don't match""" return False def get_rev_options(url, rev): if rev: rev_options = ['-r', rev] else: rev_options = [] r = urllib_parse.urlsplit(url) if hasattr(r, 'username'): # >= Python-2.5 username, password = r.username, r.password else: netloc = r[1] if '@' in netloc: auth = netloc.split('@')[0] if ':' in auth: username, password = auth.split(':', 1) else: username, password = auth, None else: username, password = None, None if username: rev_options += ['--username', username] if password: rev_options += ['--password', password] return rev_options vcs.register(Subversion)
kalahbrown/HueBigSQL
refs/heads/master
desktop/core/ext-py/Django-1.6.10/django/utils/baseconv.py
238
# Copyright (c) 2010 Guilherme Gondim. All rights reserved. # Copyright (c) 2009 Simon Willison. All rights reserved. # Copyright (c) 2002 Drew Perttula. All rights reserved. # # License: # Python Software Foundation License version 2 # # See the file "LICENSE" for terms & conditions for usage, and a DISCLAIMER OF # ALL WARRANTIES. # # This Baseconv distribution contains no GNU General Public Licensed (GPLed) # code so it may be used in proprietary projects just like prior ``baseconv`` # distributions. # # All trademarks referenced herein are property of their respective holders. # """ Convert numbers from base 10 integers to base X strings and back again. Sample usage:: >>> base20 = BaseConverter('0123456789abcdefghij') >>> base20.encode(1234) '31e' >>> base20.decode('31e') 1234 >>> base20.encode(-1234) '-31e' >>> base20.decode('-31e') -1234 >>> base11 = BaseConverter('0123456789-', sign='$') >>> base11.encode('$1234') '$-22' >>> base11.decode('$-22') '$1234' """ BASE2_ALPHABET = '01' BASE16_ALPHABET = '0123456789ABCDEF' BASE56_ALPHABET = '23456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnpqrstuvwxyz' BASE36_ALPHABET = '0123456789abcdefghijklmnopqrstuvwxyz' BASE62_ALPHABET = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz' BASE64_ALPHABET = BASE62_ALPHABET + '-_' class BaseConverter(object): decimal_digits = '0123456789' def __init__(self, digits, sign='-'): self.sign = sign self.digits = digits if sign in self.digits: raise ValueError('Sign character found in converter base digits.') def __repr__(self): return "<BaseConverter: base%s (%s)>" % (len(self.digits), self.digits) def encode(self, i): neg, value = self.convert(i, self.decimal_digits, self.digits, '-') if neg: return self.sign + value return value def decode(self, s): neg, value = self.convert(s, self.digits, self.decimal_digits, self.sign) if neg: value = '-' + value return int(value) def convert(self, number, from_digits, to_digits, sign): if str(number)[0] == sign: number = str(number)[1:] neg = 1 else: neg = 0 # make an integer out of the number x = 0 for digit in str(number): x = x * len(from_digits) + from_digits.index(digit) # create the result in base 'len(to_digits)' if x == 0: res = to_digits[0] else: res = '' while x > 0: digit = x % len(to_digits) res = to_digits[digit] + res x = int(x // len(to_digits)) return neg, res base2 = BaseConverter(BASE2_ALPHABET) base16 = BaseConverter(BASE16_ALPHABET) base36 = BaseConverter(BASE36_ALPHABET) base56 = BaseConverter(BASE56_ALPHABET) base62 = BaseConverter(BASE62_ALPHABET) base64 = BaseConverter(BASE64_ALPHABET, sign='$')
Niektory/fifengine
refs/heads/master
tests/fife_test/tests/PychanWidgetEventsTest.py
5
#!/usr/bin/env python # -*- coding: utf-8 -*- # #################################################################### # Copyright (C) 2005-2013 by the FIFE team # http://www.fifengine.net # This file is part of FIFE. # # FIFE is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the # Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # #################################################################### from fife import fife from fife.extensions import pychan from fife.extensions.pychan.tools import callbackWithArguments as cbwa import scripts.test as test class PychanWidgetEventsTest(test.Test): def create(self, engine, application): self._application = application self._engine = engine self._running = False self._controlsPanel = pychan.loadXML('data/gui/widget_events_controls.xml') self._controlsPanel.position = (0, 0) self._controlsPanel.mapEvents({ "controlBtn" : self._toggleWindow, }) self._outputBox = self._controlsPanel.findChild(name="outputBox") self._window = pychan.loadXML('data/gui/widget_events_window.xml') self._window.capture(event_name="widgetHidden", callback=cbwa(self._widgetHiddenCallback, self._window.name)) self._window.capture(event_name="widgetShown", callback=cbwa(self._widgetShownCallback, self._window.name)) self._window.capture(event_name="widgetMoved", callback=cbwa(self._widgetMovedCallback, self._window.name)) self._window.mapEvents({ "testButtonHide/ancestorHidden" : cbwa(self._widgetHiddenCallback, self._window.findChild(name="testButtonHide").name), "testButtonShow/ancestorShown" : cbwa(self._widgetShownCallback, self._window.findChild(name="testButtonShow").name), "testButtonMove/ancestorMoved" : cbwa(self._widgetMovedCallback, self._window.findChild(name="testButtonMove").name) }) def destroy(self): #del self._controlsPanel del self._window def run(self): self._running = True self._controlsPanel.show() def stop(self): self._running = False #any callbacks referencing _controlsPanel or _window and listening for a widgetHidden event #should be removed, because after hiding these widget hierarchies will be freed from memory self._window.mapEvents({ "testButtonHide/ancestorHidden" : None, }) self._window.capture(event_name="widgetHidden", callback=None) self._controlsPanel.hide() self._window.hide() def isRunning(self): return self._running def getName(self): return "PychanWidgetEventsTest" def getAuthor(self): return "vdaras" def getDescription(self): return "Use this to test that widget events are working as expected." def _toggleWindow(self): if not self._window.isVisible(): self._window.show() self._controlsPanel.findChild(name="controlBtn").text = unicode("Hide Window", "UTF-8") else: self._window.hide() self._controlsPanel.findChild(name="controlBtn").text = unicode("Show Window", "UTF-8") def _widgetHiddenCallback(self, widget_name): txt = unicode("Widget with name %s was hidden" % widget_name, "UTF-8") self._printToOutput(txt) def _widgetShownCallback(self, widget_name): txt = unicode("Widget with name %s was shown" % widget_name, "UTF-8") self._printToOutput(txt) def _widgetMovedCallback(self, widget_name): txt = unicode("%s was moved" % widget_name, "UTF-8") self._printToOutput(txt) def _printToOutput(self, txt): label = pychan.widgets.Label(max_size=(200,1000),wrap_text=True) label.text = txt self._outputBox.addChild(label) self._controlsPanel.adaptLayout()
BtbN/xbmc
refs/heads/master
tools/EventClients/Clients/PS3 Sixaxis Controller/ps3d.py
168
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright (C) 2008-2013 Team XBMC # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. import sys import traceback import time import struct import threading import os if os.path.exists("../../lib/python"): sys.path.append("../PS3 BD Remote") sys.path.append("../../lib/python") from bt.hid import HID from bt.bt import bt_lookup_name from xbmcclient import XBMCClient from ps3 import sixaxis from ps3_remote import process_keys as process_remote try: from ps3 import sixwatch except Exception, e: print "Failed to import sixwatch now disabled: " + str(e) sixwatch = None try: import zeroconf except: zeroconf = None ICON_PATH = "../../icons/" else: # fallback to system wide modules from kodi.bt.hid import HID from kodi.bt.bt import bt_lookup_name from kodi.xbmcclient import XBMCClient from kodi.ps3 import sixaxis from kodi.ps3_remote import process_keys as process_remote from kodi.defs import * try: from kodi.ps3 import sixwatch except Exception, e: print "Failed to import sixwatch now disabled: " + str(e) sixwatch = None try: import kodi.zeroconf as zeroconf except: zeroconf = None event_threads = [] def printerr(): trace = "" exception = "" exc_list = traceback.format_exception_only (sys.exc_type, sys.exc_value) for entry in exc_list: exception += entry tb_list = traceback.format_tb(sys.exc_info()[2]) for entry in tb_list: trace += entry print("%s\n%s" % (exception, trace), "Script Error") class StoppableThread ( threading.Thread ): def __init__(self): threading.Thread.__init__(self) self._stop = False self.set_timeout(0) def stop_thread(self): self._stop = True def stop(self): return self._stop def close_sockets(self): if self.isock: try: self.isock.close() except: pass self.isock = None if self.csock: try: self.csock.close() except: pass self.csock = None self.last_action = 0 def set_timeout(self, seconds): self.timeout = seconds def reset_timeout(self): self.last_action = time.time() def idle_time(self): return time.time() - self.last_action def timed_out(self): if (time.time() - self.last_action) > self.timeout: return True else: return False class PS3SixaxisThread ( StoppableThread ): def __init__(self, csock, isock, ipaddr="127.0.0.1"): StoppableThread.__init__(self) self.csock = csock self.isock = isock self.xbmc = XBMCClient(name="PS3 Sixaxis", icon_file=ICON_PATH + "/bluetooth.png", ip=ipaddr) self.set_timeout(600) def run(self): six = sixaxis.sixaxis(self.xbmc, self.csock, self.isock) self.xbmc.connect() self.reset_timeout() try: while not self.stop(): if self.timed_out(): raise Exception("PS3 Sixaxis powering off, timed out") if self.idle_time() > 50: self.xbmc.connect() try: if six.process_socket(self.isock): self.reset_timeout() except Exception, e: print e break except Exception, e: printerr() six.close() self.close_sockets() class PS3RemoteThread ( StoppableThread ): def __init__(self, csock, isock, ipaddr="127.0.0.1"): StoppableThread.__init__(self) self.csock = csock self.isock = isock self.xbmc = XBMCClient(name="PS3 Blu-Ray Remote", icon_file=ICON_PATH + "/bluetooth.png", ip=ipaddr) self.set_timeout(600) self.services = [] self.current_xbmc = 0 def run(self): self.xbmc.connect() try: # start the zeroconf thread if possible try: self.zeroconf_thread = ZeroconfThread() self.zeroconf_thread.add_service('_xbmc-events._udp', self.zeroconf_service_handler) self.zeroconf_thread.start() except Exception, e: print str(e) # main thread loop while not self.stop(): status = process_remote(self.isock, self.xbmc) if status == 2: # 2 = socket read timeout if self.timed_out(): raise Exception("PS3 Blu-Ray Remote powering off, "\ "timed out") elif status == 3: # 3 = ps and skip + self.next_xbmc() elif status == 4: # 4 = ps and skip - self.previous_xbmc() elif not status: # 0 = keys are normally processed self.reset_timeout() # process_remote() will raise an exception on read errors except Exception, e: print str(e) self.zeroconf_thread.stop() self.close_sockets() def next_xbmc(self): """ Connect to the next XBMC instance """ self.current_xbmc = (self.current_xbmc + 1) % len( self.services ) self.reconnect() return def previous_xbmc(self): """ Connect to the previous XBMC instance """ self.current_xbmc -= 1 if self.current_xbmc < 0 : self.current_xbmc = len( self.services ) - 1 self.reconnect() return def reconnect(self): """ Reconnect to an XBMC instance based on self.current_xbmc """ try: service = self.services[ self.current_xbmc ] print "Connecting to %s" % service['name'] self.xbmc.connect( service['address'], service['port'] ) self.xbmc.send_notification("PS3 Blu-Ray Remote", "New Connection", None) except Exception, e: print str(e) def zeroconf_service_handler(self, event, service): """ Zeroconf event handler """ if event == zeroconf.SERVICE_FOUND: # new xbmc service detected self.services.append( service ) elif event == zeroconf.SERVICE_LOST: # xbmc service lost try: # search for the service by name, since IP+port isn't available for s in self.services: # nuke it, if found if service['name'] == s['name']: self.services.remove(s) break except: pass return class SixWatch(threading.Thread): def __init__(self, mac): threading.Thread.__init__(self) self.mac = mac self.daemon = True self.start() def run(self): while True: try: sixwatch.main(self.mac) except Exception, e: print "Exception caught in sixwatch, restarting: " + str(e) class ZeroconfThread ( threading.Thread ): """ """ def __init__(self): threading.Thread.__init__(self) self._zbrowser = None self._services = [] def run(self): if zeroconf: # create zeroconf service browser self._zbrowser = zeroconf.Browser() # add the requested services for service in self._services: self._zbrowser.add_service( service[0], service[1] ) # run the event loop self._zbrowser.run() return def stop(self): """ Stop the zeroconf browser """ try: self._zbrowser.stop() except: pass return def add_service(self, type, handler): """ Add a new service to search for. NOTE: Services must be added before thread starts. """ self._services.append( [ type, handler ] ) def usage(): print """ PS3 Sixaxis / Blu-Ray Remote HID Server v0.1 Usage: ps3.py [bdaddress] [XBMC host] bdaddress => address of local bluetooth device to use (default: auto) (e.g. aa:bb:cc:dd:ee:ff) ip address => IP address or hostname of the XBMC instance (default: localhost) (e.g. 192.168.1.110) """ def start_hidd(bdaddr=None, ipaddr="127.0.0.1"): devices = [ 'PLAYSTATION(R)3 Controller', 'BD Remote Control' ] hid = HID(bdaddr) watch = None if sixwatch: try: print "Starting USB sixwatch" watch = SixWatch(hid.get_local_address()) except Exception, e: print "Failed to initialize sixwatch" + str(e) pass while True: if hid.listen(): (csock, addr) = hid.get_control_socket() device_name = bt_lookup_name(addr[0]) if device_name == devices[0]: # handle PS3 controller handle_ps3_controller(hid, ipaddr) elif device_name == devices[1]: # handle the PS3 remote handle_ps3_remote(hid, ipaddr) else: print "Unknown Device: %s" % (device_name) def handle_ps3_controller(hid, ipaddr): print "Received connection from a Sixaxis PS3 Controller" csock = hid.get_control_socket()[0] isock = hid.get_interrupt_socket()[0] sixaxis = PS3SixaxisThread(csock, isock, ipaddr) add_thread(sixaxis) sixaxis.start() return def handle_ps3_remote(hid, ipaddr): print "Received connection from a PS3 Blu-Ray Remote" csock = hid.get_control_socket()[0] isock = hid.get_interrupt_socket()[0] isock.settimeout(1) remote = PS3RemoteThread(csock, isock, ipaddr) add_thread(remote) remote.start() return def add_thread(thread): global event_threads event_threads.append(thread) def main(): if len(sys.argv)>3: return usage() bdaddr = "" ipaddr = "127.0.0.1" try: for addr in sys.argv[1:]: try: # ensure that the addr is of the format 'aa:bb:cc:dd:ee:ff' if "".join([ str(len(a)) for a in addr.split(":") ]) != "222222": raise Exception("Invalid format") bdaddr = addr print "Connecting to Bluetooth device: %s" % bdaddr except Exception, e: try: ipaddr = addr print "Connecting to : %s" % ipaddr except: print str(e) return usage() except Exception, e: pass print "Starting HID daemon" start_hidd(bdaddr, ipaddr) if __name__=="__main__": try: main() finally: for t in event_threads: try: print "Waiting for thread "+str(t)+" to terminate" t.stop_thread() if t.isAlive(): t.join() print "Thread "+str(t)+" terminated" except Exception, e: print str(e) pass
jayceyxc/hue
refs/heads/master
desktop/core/ext-py/cryptography-1.3.1/src/_cffi_src/commoncrypto/seccertificate.py
8
# This file is dual licensed under the terms of the Apache License, Version # 2.0, and the BSD License. See the LICENSE file in the root of this repository # for complete details. from __future__ import absolute_import, division, print_function INCLUDES = """ #include <Security/SecCertificate.h> """ TYPES = """ typedef ... *SecCertificateRef; """ FUNCTIONS = """ SecCertificateRef SecCertificateCreateWithData(CFAllocatorRef, CFDataRef); """ MACROS = """ """ CUSTOMIZATIONS = """ """
numenta/nupic
refs/heads/master
src/nupic/swarming/utils.py
10
# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import copy import json import os import sys import tempfile import logging import re import traceback import StringIO from collections import namedtuple import pprint import shutil import types import signal import uuid import validictory from nupic.database.client_jobs_dao import ( ClientJobsDAO, InvalidConnectionException) # TODO: Note the function 'rUpdate' is also duplicated in the # nupic.data.dictutils module -- we will eventually want to change this # TODO: 'ValidationError', 'validate', 'loadJSONValueFromFile' duplicated in # nupic.data.jsonhelpers -- will want to remove later class JobFailException(Exception): """ If a model raises this exception, then the runModelXXX code will mark the job as canceled so that all other workers exit immediately, and mark the job as failed. """ pass def getCopyrightHead(): return """# ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2013, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU Affero Public License for more details. # # You should have received a copy of the GNU Affero Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- """ def _paramsFileHead(): """ This is the first portion of every sub-experiment params file we generate. Between the head and the tail are the experiment specific options. """ str = getCopyrightHead() + \ """ ## This file defines parameters for a prediction experiment. ############################################################################### # IMPORTANT!!! # This params file is dynamically generated by the RunExperimentPermutations # script. Any changes made manually will be over-written the next time # RunExperimentPermutations is run!!! ############################################################################### from nupic.frameworks.opf.exp_description_helpers import importBaseDescription # the sub-experiment configuration config ={ """ return str def _paramsFileTail(): """ This is the tail of every params file we generate. Between the head and the tail are the experiment specific options. """ str = \ """ } mod = importBaseDescription('base.py', config) locals().update(mod.__dict__) """ return str def _appendReportKeys(keys, prefix, results): """ Generate a set of possible report keys for an experiment's results. A report key is a string of key names separated by colons, each key being one level deeper into the experiment results dict. For example, 'key1:key2'. This routine is called recursively to build keys that are multiple levels deep from the results dict. Parameters: ----------------------------------------------------------- keys: Set of report keys accumulated so far prefix: prefix formed so far, this is the colon separated list of key names that led up to the dict passed in results results: dictionary of results at this level. """ allKeys = results.keys() allKeys.sort() for key in allKeys: if hasattr(results[key], 'keys'): _appendReportKeys(keys, "%s%s:" % (prefix, key), results[key]) else: keys.add("%s%s" % (prefix, key)) class _BadKeyError(Exception): """ If a model raises this exception, then the runModelXXX code will mark the job as canceled so that all other workers exit immediately, and mark the job as failed. """ pass def _matchReportKeys(reportKeyREs=[], allReportKeys=[]): """ Extract all items from the 'allKeys' list whose key matches one of the regular expressions passed in 'reportKeys'. Parameters: ---------------------------------------------------------------------------- reportKeyREs: List of regular expressions allReportKeys: List of all keys retval: list of keys from allReportKeys that match the regular expressions in 'reportKeyREs' If an invalid regular expression was included in 'reportKeys', then BadKeyError() is raised """ matchingReportKeys = [] # Extract the report items of interest for keyRE in reportKeyREs: # Find all keys that match this regular expression matchObj = re.compile(keyRE) found = False for keyName in allReportKeys: match = matchObj.match(keyName) if match and match.end() == len(keyName): matchingReportKeys.append(keyName) found = True if not found: raise _BadKeyError(keyRE) return matchingReportKeys def _getReportItem(itemName, results): """ Get a specific item by name out of the results dict. The format of itemName is a string of dictionary keys separated by colons, each key being one level deeper into the results dict. For example, 'key1:key2' would fetch results['key1']['key2']. If itemName is not found in results, then None is returned """ subKeys = itemName.split(':') subResults = results for subKey in subKeys: subResults = subResults[subKey] return subResults def filterResults(allResults, reportKeys, optimizeKey=None): """ Given the complete set of results generated by an experiment (passed in 'results'), filter out and return only the ones the caller wants, as specified through 'reportKeys' and 'optimizeKey'. A report key is a string of key names separated by colons, each key being one level deeper into the experiment results dict. For example, 'key1:key2'. Parameters: ------------------------------------------------------------------------- results: dict of all results generated by an experiment reportKeys: list of items from the results dict to include in the report. These can be regular expressions. optimizeKey: Which report item, if any, we will be optimizing for. This can also be a regular expression, but is an error if it matches more than one key from the experiment's results. retval: (reportDict, optimizeDict) reportDict: a dictionary of the metrics named by desiredReportKeys optimizeDict: A dictionary containing 1 item: the full name and value of the metric identified by the optimizeKey """ # Init return values optimizeDict = dict() # Get all available report key names for this experiment allReportKeys = set() _appendReportKeys(keys=allReportKeys, prefix='', results=allResults) #---------------------------------------------------------------------------- # Extract the report items that match the regular expressions passed in reportKeys matchingKeys = _matchReportKeys(reportKeys, allReportKeys) # Extract the values of the desired items reportDict = dict() for keyName in matchingKeys: value = _getReportItem(keyName, allResults) reportDict[keyName] = value # ------------------------------------------------------------------------- # Extract the report item that matches the regular expression passed in # optimizeKey if optimizeKey is not None: matchingKeys = _matchReportKeys([optimizeKey], allReportKeys) if len(matchingKeys) == 0: raise _BadKeyError(optimizeKey) elif len(matchingKeys) > 1: raise _BadOptimizeKeyError(optimizeKey, matchingKeys) optimizeKeyFullName = matchingKeys[0] # Get the value of the optimize metric value = _getReportItem(optimizeKeyFullName, allResults) optimizeDict[optimizeKeyFullName] = value reportDict[optimizeKeyFullName] = value # Return info return(reportDict, optimizeDict) def _quoteAndEscape(string): """ string: input string (ascii or unicode) Returns: a quoted string with characters that are represented in python via escape sequences converted to those escape sequences """ assert type(string) in types.StringTypes return pprint.pformat(string) def _handleModelRunnerException(jobID, modelID, jobsDAO, experimentDir, logger, e): """ Perform standard handling of an exception that occurs while running a model. Parameters: ------------------------------------------------------------------------- jobID: ID for this hypersearch job in the jobs table modelID: model ID jobsDAO: ClientJobsDAO instance experimentDir: directory containing the experiment logger: the logger to use e: the exception that occurred retval: (completionReason, completionMsg) """ msg = StringIO.StringIO() print >>msg, "Exception occurred while running model %s: %r (%s)" % ( modelID, e, type(e)) traceback.print_exc(None, msg) completionReason = jobsDAO.CMPL_REASON_ERROR completionMsg = msg.getvalue() logger.error(completionMsg) # Write results to the model database for the error case. Ignore # InvalidConnectionException, as this is usually caused by orphaned models # # TODO: do we really want to set numRecords to 0? Last updated value might # be useful for debugging if type(e) is not InvalidConnectionException: jobsDAO.modelUpdateResults(modelID, results=None, numRecords=0) # TODO: Make sure this wasn't the best model in job. If so, set the best # appropriately # If this was an exception that should mark the job as failed, do that # now. if type(e) == JobFailException: workerCmpReason = jobsDAO.jobGetFields(jobID, ['workerCompletionReason'])[0] if workerCmpReason == ClientJobsDAO.CMPL_REASON_SUCCESS: jobsDAO.jobSetFields(jobID, fields=dict( cancel=True, workerCompletionReason = ClientJobsDAO.CMPL_REASON_ERROR, workerCompletionMsg = ": ".join(str(i) for i in e.args)), useConnectionID=False, ignoreUnchanged=True) return (completionReason, completionMsg) def runModelGivenBaseAndParams(modelID, jobID, baseDescription, params, predictedField, reportKeys, optimizeKey, jobsDAO, modelCheckpointGUID, logLevel=None, predictionCacheMaxRecords=None): """ This creates an experiment directory with a base.py description file created from 'baseDescription' and a description.py generated from the given params dict and then runs the experiment. Parameters: ------------------------------------------------------------------------- modelID: ID for this model in the models table jobID: ID for this hypersearch job in the jobs table baseDescription: Contents of a description.py with the base experiment description params: Dictionary of specific parameters to override within the baseDescriptionFile. predictedField: Name of the input field for which this model is being optimized reportKeys: Which metrics of the experiment to store into the results dict of the model's database entry optimizeKey: Which metric we are optimizing for jobsDAO Jobs data access object - the interface to the jobs database which has the model's table. modelCheckpointGUID: A persistent, globally-unique identifier for constructing the model checkpoint key logLevel: override logging level to this value, if not None retval: (completionReason, completionMsg) """ from nupic.swarming.ModelRunner import OPFModelRunner # The logger for this method logger = logging.getLogger('com.numenta.nupic.hypersearch.utils') # -------------------------------------------------------------------------- # Create a temp directory for the experiment and the description files experimentDir = tempfile.mkdtemp() try: logger.info("Using experiment directory: %s" % (experimentDir)) # Create the decription.py from the overrides in params paramsFilePath = os.path.join(experimentDir, 'description.py') paramsFile = open(paramsFilePath, 'wb') paramsFile.write(_paramsFileHead()) items = params.items() items.sort() for (key,value) in items: quotedKey = _quoteAndEscape(key) if isinstance(value, basestring): paramsFile.write(" %s : '%s',\n" % (quotedKey , value)) else: paramsFile.write(" %s : %s,\n" % (quotedKey , value)) paramsFile.write(_paramsFileTail()) paramsFile.close() # Write out the base description baseParamsFile = open(os.path.join(experimentDir, 'base.py'), 'wb') baseParamsFile.write(baseDescription) baseParamsFile.close() # Store the experiment's sub-description file into the model table # for reference fd = open(paramsFilePath) expDescription = fd.read() fd.close() jobsDAO.modelSetFields(modelID, {'genDescription': expDescription}) # Run the experiment now try: runner = OPFModelRunner( modelID=modelID, jobID=jobID, predictedField=predictedField, experimentDir=experimentDir, reportKeyPatterns=reportKeys, optimizeKeyPattern=optimizeKey, jobsDAO=jobsDAO, modelCheckpointGUID=modelCheckpointGUID, logLevel=logLevel, predictionCacheMaxRecords=predictionCacheMaxRecords) signal.signal(signal.SIGINT, runner.handleWarningSignal) (completionReason, completionMsg) = runner.run() except InvalidConnectionException: raise except Exception, e: (completionReason, completionMsg) = _handleModelRunnerException(jobID, modelID, jobsDAO, experimentDir, logger, e) finally: # delete our temporary directory tree shutil.rmtree(experimentDir) signal.signal(signal.SIGINT, signal.default_int_handler) # Return completion reason and msg return (completionReason, completionMsg) def runDummyModel(modelID, jobID, params, predictedField, reportKeys, optimizeKey, jobsDAO, modelCheckpointGUID, logLevel=None, predictionCacheMaxRecords=None): from nupic.swarming.dummy_model_runner import OPFDummyModelRunner # The logger for this method logger = logging.getLogger('com.numenta.nupic.hypersearch.utils') # Run the experiment now try: if type(params) is bool: params = {} runner = OPFDummyModelRunner(modelID=modelID, jobID=jobID, params=params, predictedField=predictedField, reportKeyPatterns=reportKeys, optimizeKeyPattern=optimizeKey, jobsDAO=jobsDAO, modelCheckpointGUID=modelCheckpointGUID, logLevel=logLevel, predictionCacheMaxRecords=predictionCacheMaxRecords) (completionReason, completionMsg) = runner.run() # The dummy model runner will call sys.exit(1) if # NTA_TEST_sysExitFirstNModels is set and the number of models in the # models table is <= NTA_TEST_sysExitFirstNModels except SystemExit: sys.exit(1) except InvalidConnectionException: raise except Exception, e: (completionReason, completionMsg) = _handleModelRunnerException(jobID, modelID, jobsDAO, "NA", logger, e) # Return completion reason and msg return (completionReason, completionMsg) # Passed as parameter to ActivityMgr # # repeating: True if the activity is a repeating activite, False if one-shot # period: period of activity's execution (number of "ticks") # cb: a callable to call upon expiration of period; will be called # as cb() PeriodicActivityRequest = namedtuple("PeriodicActivityRequest", ("repeating", "period", "cb")) class PeriodicActivityMgr(object): """ TODO: move to shared script so that we can share it with run_opf_experiment """ # iteratorHolder: a list holding one iterator; we use a list so that we can # replace the iterator for repeating activities (a tuple would not # allow it if the field was an imutable value) Activity = namedtuple("Activity", ("repeating", "period", "cb", "iteratorHolder")) def __init__(self, requestedActivities): """ requestedActivities: a sequence of PeriodicActivityRequest elements """ self.__activities = [] for req in requestedActivities: act = self.Activity(repeating=req.repeating, period=req.period, cb=req.cb, iteratorHolder=[iter(xrange(req.period))]) self.__activities.append(act) return def tick(self): """ Activity tick handler; services all activities Returns: True if controlling iterator says it's okay to keep going; False to stop """ # Run activities whose time has come for act in self.__activities: if not act.iteratorHolder[0]: continue try: next(act.iteratorHolder[0]) except StopIteration: act.cb() if act.repeating: act.iteratorHolder[0] = iter(xrange(act.period)) else: act.iteratorHolder[0] = None return True def generatePersistentJobGUID(): """Generates a "persistentJobGUID" value. Parameters: ---------------------------------------------------------------------- retval: A persistentJobGUID value """ return "JOB_UUID1-" + str(uuid.uuid1()) def identityConversion(value, _keys): return value def rCopy(d, f=identityConversion, discardNoneKeys=True, deepCopy=True): """Recursively copies a dict and returns the result. Args: d: The dict to copy. f: A function to apply to values when copying that takes the value and the list of keys from the root of the dict to the value and returns a value for the new dict. discardNoneKeys: If True, discard key-value pairs when f returns None for the value. deepCopy: If True, all values in returned dict are true copies (not the same object). Returns: A new dict with keys and values from d replaced with the result of f. """ # Optionally deep copy the dict. if deepCopy: d = copy.deepcopy(d) newDict = {} toCopy = [(k, v, newDict, ()) for k, v in d.iteritems()] while len(toCopy) > 0: k, v, d, prevKeys = toCopy.pop() prevKeys = prevKeys + (k,) if isinstance(v, dict): d[k] = dict() toCopy[0:0] = [(innerK, innerV, d[k], prevKeys) for innerK, innerV in v.iteritems()] else: #print k, v, prevKeys newV = f(v, prevKeys) if not discardNoneKeys or newV is not None: d[k] = newV return newDict def rApply(d, f): """Recursively applies f to the values in dict d. Args: d: The dict to recurse over. f: A function to apply to values in d that takes the value and a list of keys from the root of the dict to the value. """ remainingDicts = [(d, ())] while len(remainingDicts) > 0: current, prevKeys = remainingDicts.pop() for k, v in current.iteritems(): keys = prevKeys + (k,) if isinstance(v, dict): remainingDicts.insert(0, (v, keys)) else: f(v, keys) def clippedObj(obj, maxElementSize=64): """ Return a clipped version of obj suitable for printing, This is useful when generating log messages by printing data structures, but don't want the message to be too long. If passed in a dict, list, or namedtuple, each element of the structure's string representation will be limited to 'maxElementSize' characters. This will return a new object where the string representation of each element has been truncated to fit within maxElementSize. """ # Is it a named tuple? if hasattr(obj, '_asdict'): obj = obj._asdict() # Printing a dict? if isinstance(obj, dict): objOut = dict() for key,val in obj.iteritems(): objOut[key] = clippedObj(val) # Printing a list? elif hasattr(obj, '__iter__'): objOut = [] for val in obj: objOut.append(clippedObj(val)) # Some other object else: objOut = str(obj) if len(objOut) > maxElementSize: objOut = objOut[0:maxElementSize] + '...' return objOut class ValidationError(validictory.ValidationError): pass def validate(value, **kwds): """ Validate a python value against json schema: validate(value, schemaPath) validate(value, schemaDict) value: python object to validate against the schema The json schema may be specified either as a path of the file containing the json schema or as a python dictionary using one of the following keywords as arguments: schemaPath: Path of file containing the json schema object. schemaDict: Python dictionary containing the json schema object Returns: nothing Raises: ValidationError when value fails json validation """ assert len(kwds.keys()) >= 1 assert 'schemaPath' in kwds or 'schemaDict' in kwds schemaDict = None if 'schemaPath' in kwds: schemaPath = kwds.pop('schemaPath') schemaDict = loadJsonValueFromFile(schemaPath) elif 'schemaDict' in kwds: schemaDict = kwds.pop('schemaDict') try: validictory.validate(value, schemaDict, **kwds) except validictory.ValidationError as e: raise ValidationError(e) def loadJsonValueFromFile(inputFilePath): """ Loads a json value from a file and converts it to the corresponding python object. inputFilePath: Path of the json file; Returns: python value that represents the loaded json value """ with open(inputFilePath) as fileObj: value = json.load(fileObj) return value def sortedJSONDumpS(obj): """ Return a JSON representation of obj with sorted keys on any embedded dicts. This insures that the same object will always be represented by the same string even if it contains dicts (where the sort order of the keys is normally undefined). """ itemStrs = [] if isinstance(obj, dict): items = obj.items() items.sort() for key, value in items: itemStrs.append('%s: %s' % (json.dumps(key), sortedJSONDumpS(value))) return '{%s}' % (', '.join(itemStrs)) elif hasattr(obj, '__iter__'): for val in obj: itemStrs.append(sortedJSONDumpS(val)) return '[%s]' % (', '.join(itemStrs)) else: return json.dumps(obj)
vightel/FloodMapsWorkshop
refs/heads/master
python/ef5_inundation.py
3
# # Processes Flood Inundation Maps from EF5 http://flash.ou.edu/pakistan/ # import os, sys from datetime import date from dateutil.parser import parse import glob, fnmatch, urllib, math, shutil from osgeo import gdal import numpy import argparse import config import json from browseimage import MakeBrowseImage from s3 import CopyToS3 from level import CreateLevel force = 0 verbose = 0 BASE_DIR = config.EF5_DIR def execute( cmd ): if verbose: print cmd os.system(cmd) def process(mydir, scene, s3_bucket, s3_folder): fullName = os.path.join(mydir, scene+".tif") if not os.path.exists(fullName): print "File does not exist", fullName sys.exit(-1) # Flood inundation map for Namibia has to large of an extent [[10,-30,30,-10]] # we can trim it [15, -20, 20, -10] subsetFileName = os.path.join(mydir, "%s_subset.tif" % scene) if force or not os.path.exists(subsetFileName): bbox = [15, -20, 20, -12] warpOptions = "-q -overwrite -co COMPRESS=DEFLATE -t_srs EPSG:4326 -te %s %s %s %s " % (bbox[0], bbox[1], bbox[2], bbox[3]) warpCmd = 'gdalwarp ' + warpOptions + fullName + ' ' + subsetFileName execute( warpCmd ) #sys.exit(-1) geojsonDir = os.path.join(mydir,"geojson") if not os.path.exists(geojsonDir): os.makedirs(geojsonDir) levelsDir = os.path.join(mydir,"levels") if not os.path.exists(levelsDir): os.makedirs(levelsDir) merge_filename = os.path.join(geojsonDir, "ef5.%s.geojson" % scene) topojson_filename = os.path.join(geojsonDir, "..", "ef5.%s.topojson" % scene) browse_filename = os.path.join(geojsonDir, "..", "ef5.%s_browse.tif" % scene) small_filename = os.path.join(geojsonDir, "..", "ef5.%s_small_browse.tif" % scene) osm_bg_image = os.path.join(geojsonDir, "..", "osm_bg.png") sw_osm_image = os.path.join(geojsonDir, "..", "ef5.%s_thn.jpg" % scene) ds = gdal.Open( subsetFileName ) band = ds.GetRasterBand(1) data = band.ReadAsArray(0, 0, ds.RasterXSize, ds.RasterYSize ) levels = [ 21, 13, 8, 5, 3, 2, 1] hexColors = [ "#fee5d9", "#fcbba1", "#fc9272", "#fb6a4a", "#ef3b2c", "#cb181d", "#99000d"] if force or not os.path.exists(topojson_filename+".gz"): if verbose: print "Processing", subsetFileName for l in levels: fileName = os.path.join(levelsDir, scene+"_level_%d.tif"%l) CreateLevel(l, geojsonDir, fileName, ds, data, "height", force, verbose) jsonDict = dict(type='FeatureCollection', features=[]) for l in reversed(levels): fileName = os.path.join(geojsonDir, "height_level_%d.geojson"%l) if os.path.exists(fileName): print "merge", fileName with open(fileName) as data_file: data = json.load(data_file) if 'features' in data: for f in data['features']: jsonDict['features'].append(f) with open(merge_filename, 'w') as outfile: json.dump(jsonDict, outfile) # Convert to topojson cmd = "topojson -p -o "+ topojson_filename + " " + merge_filename execute(cmd) cmd = "gzip --keep "+ topojson_filename execute(cmd) if force or not os.path.exists(sw_osm_image): MakeBrowseImage(ds, browse_filename, subsetFileName, osm_bg_image, sw_osm_image, levels, hexColors, force, verbose, 6) # we could remove geojsonDir and levelsDir #cmd = "rm -rf %s %s" %(geojsonDir, levelsDir) ds = None file_list = [ sw_osm_image, topojson_filename, topojson_filename+".gz", fullName ] CopyToS3( s3_bucket, s3_folder, file_list, force, verbose ) # =============================== # Main # # python ef5_inundation.py --date 2015-02-03 -v -f if __name__ == '__main__': aws_access_key = os.environ.get('AWS_ACCESSKEYID') aws_secret_access_key = os.environ.get('AWS_SECRETACCESSKEY') parser = argparse.ArgumentParser(description='Generate EF5 flood map') apg_input = parser.add_argument_group('Input') apg_input.add_argument("-f", "--force", action='store_true', help="HydroSHEDS forces new water image to be generated") apg_input.add_argument("-v", "--verbose", action='store_true', help="Verbose on/off") apg_input.add_argument("-d", "--date", help="Date 2015-03-20 or today if not defined") todaystr = date.today().strftime("%Y-%m-%d") options = parser.parse_args() dt = options.date or todaystr force = options.force verbose = options.verbose today = parse(dt) year = today.year month = today.month day = today.day doy = today.strftime('%j') ef5_dir = os.path.join(BASE_DIR,str(year),doy) old_fileName = "%d%02d%02d.120000" % (year,month,day) old_fullName = os.path.join(ef5_dir, old_fileName) fileName = "%d%02d%02d" % (year,month,day) fullName = os.path.join(ef5_dir, fileName) shutil.copy2(old_fullName+".tif", fullName+".tif") s3_folder = os.path.join("ef5", str(year), doy) s3_bucket = 'ojo-d4' # Namibia process(ef5_dir, fileName, s3_bucket, s3_folder)
FireballDWF/cloud-custodian
refs/heads/master
c7n/resources/sqs.py
3
# Copyright 2016-2017 Capital One Services, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from __future__ import absolute_import, division, print_function, unicode_literals from botocore.exceptions import ClientError import json from c7n.actions import RemovePolicyBase from c7n.filters import CrossAccountAccessFilter, MetricsFilter from c7n.filters.kms import KmsRelatedFilter from c7n.manager import resources from c7n.utils import local_session from c7n.query import QueryResourceManager, TypeInfo from c7n.actions import BaseAction from c7n.utils import type_schema from c7n.tags import universal_augment, register_universal_tags @resources.register('sqs') class SQS(QueryResourceManager): class resource_type(TypeInfo): service = 'sqs' arn_type = "" enum_spec = ('list_queues', 'QueueUrls', None) detail_spec = ("get_queue_attributes", "QueueUrl", None, "Attributes") id = 'QueueUrl' arn = "QueueArn" filter_name = 'QueueNamePrefix' filter_type = 'scalar' name = 'QueueUrl' date = 'CreatedTimestamp' dimension = 'QueueName' default_report_fields = ( 'QueueArn', 'CreatedTimestamp', 'ApproximateNumberOfMessages', ) def get_permissions(self): perms = super(SQS, self).get_permissions() perms.append('sqs:GetQueueAttributes') return perms def get_resources(self, ids, cache=True): ids_normalized = [] for i in ids: if not i.startswith('https://'): ids_normalized.append(i) continue ids_normalized.append(i.rsplit('/', 1)[-1]) return super(SQS, self).get_resources(ids_normalized, cache) def augment(self, resources): client = local_session(self.session_factory).client('sqs') def _augment(r): try: queue = self.retry( client.get_queue_attributes, QueueUrl=r, AttributeNames=['All'])['Attributes'] queue['QueueUrl'] = r except ClientError as e: if e.response['Error']['Code'] == 'AWS.SimpleQueueService.NonExistentQueue': return if e.response['Error']['Code'] == 'AccessDenied': self.log.warning("Denied access to sqs %s" % r) return raise return queue with self.executor_factory(max_workers=2) as w: return universal_augment( self, list(filter(None, w.map(_augment, resources)))) register_universal_tags( SQS.filter_registry, SQS.action_registry, compatibility=False) @SQS.filter_registry.register('metrics') class MetricsFilter(MetricsFilter): def get_dimensions(self, resource): return [ {'Name': 'QueueName', 'Value': resource['QueueUrl'].rsplit('/', 1)[-1]}] @SQS.filter_registry.register('cross-account') class SQSCrossAccount(CrossAccountAccessFilter): """Filter SQS queues which have cross account permissions :example: .. code-block:: yaml policies: - name: sqs-cross-account resource: sqs filters: - type: cross-account """ permissions = ('sqs:GetQueueAttributes',) @SQS.filter_registry.register('kms-key') class KmsFilter(KmsRelatedFilter): """ Filter a resource by its associcated kms key and optionally the aliasname of the kms key by using 'c7n:AliasName' The KmsMasterId returned for SQS sometimes has the alias name directly in the value. :example: .. code-block:: yaml policies: - name: sqs-kms-key-filters resource: aws.sqs filters: - or: - type: value key: KmsMasterKeyId value: "^(alias/aws/)" op: regex - type: kms-key key: c7n:AliasName value: "^(alias/aws/)" op: regex """ RelatedIdsExpression = 'KmsMasterKeyId' @SQS.action_registry.register('remove-statements') class RemovePolicyStatement(RemovePolicyBase): """Action to remove policy statements from SQS :example: .. code-block:: yaml policies: - name: remove-sqs-cross-account resource: sqs filters: - type: cross-account actions: - type: remove-statements statement_ids: matched """ permissions = ('sqs:GetQueueAttributes', 'sqs:RemovePermission') def process(self, resources): results = [] client = local_session(self.manager.session_factory).client('sqs') for r in resources: try: results += filter(None, [self.process_resource(client, r)]) except Exception: self.log.exception( "Error processing sqs:%s", r['QueueUrl']) return results def process_resource(self, client, resource): p = resource.get('Policy') if p is None: return p = json.loads(resource['Policy']) statements, found = self.process_policy( p, resource, CrossAccountAccessFilter.annotation_key) if not found: return for f in found: client.remove_permission( QueueUrl=resource['QueueUrl'], Label=f['Sid']) return {'Name': resource['QueueUrl'], 'State': 'PolicyRemoved', 'Statements': found} @SQS.action_registry.register('delete') class DeleteSqsQueue(BaseAction): """Action to delete a SQS queue To prevent unwanted deletion of SQS queues, it is recommended to include a filter :example: .. code-block:: yaml policies: - name: sqs-delete resource: sqs filters: - KmsMasterKeyId: absent actions: - type: delete """ schema = type_schema('delete') permissions = ('sqs:DeleteQueue',) def process(self, queues): client = local_session(self.manager.session_factory).client('sqs') for q in queues: self.process_queue(client, q) def process_queue(self, client, queue): try: client.delete_queue(QueueUrl=queue['QueueUrl']) except (client.exceptions.QueueDoesNotExist, client.exceptions.QueueDeletedRecently): pass @SQS.action_registry.register('set-encryption') class SetEncryption(BaseAction): """Action to set encryption key on SQS queue :example: .. code-block:: yaml policies: - name: sqs-set-encrypt resource: sqs filters: - KmsMasterKeyId: absent actions: - type: set-encryption key: "<alias of kms key>" """ schema = type_schema( 'set-encryption', key={'type': 'string'}, required=('key',)) permissions = ('sqs:SetQueueAttributes',) def process(self, queues): # get KeyId key = "alias/" + self.data.get('key') session = local_session(self.manager.session_factory) key_id = session.client( 'kms').describe_key(KeyId=key)['KeyMetadata']['KeyId'] client = session.client('sqs') for q in queues: self.process_queue(client, q, key_id) def process_queue(self, client, queue, key_id): try: client.set_queue_attributes( QueueUrl=queue['QueueUrl'], Attributes={'KmsMasterKeyId': key_id} ) except (client.exceptions.QueueDoesNotExist,) as e: self.log.exception( "Exception modifying queue:\n %s" % e) @SQS.action_registry.register('set-retention-period') class SetRetentionPeriod(BaseAction): """Action to set the retention period on an SQS queue (in seconds) :example: .. code-block:: yaml policies: - name: sqs-reduce-long-retention-period resource: sqs filters: - type: value key: MessageRetentionPeriod value_type: integer value: 345600 op: ge actions: - type: set-retention-period period: 86400 """ schema = type_schema( 'set-retention-period', period={'type': 'integer', 'minimum': 60, 'maximum': 1209600}) permissions = ('sqs:SetQueueAttributes',) def process(self, queues): client = local_session(self.manager.session_factory).client('sqs') period = str(self.data.get('period', 345600)) for q in queues: client.set_queue_attributes( QueueUrl=q['QueueUrl'], Attributes={ 'MessageRetentionPeriod': period})
heiher/libreoffice-core
refs/heads/mips64-dev
pyuno/qa/pytests/testcollections_XCellRange.py
4
#!/usr/bin/env python # # This file is part of the LibreOffice project. # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # import unittest import uno from testcollections_base import CollectionsTestBase from com.sun.star.beans import PropertyValue from com.sun.star.table import CellAddress # Tests behaviour of objects implementing XCellRange using the new-style # collection accessors class TestXCellRange(CollectionsTestBase): # TODO negative indices # Tests syntax: # cell = cellrange[0,0] # Access cell by indices # For: # Spreadsheet # Cell at Row 0, Col 0 def test_XCellRange_Spreadsheet_Cell_00(self): # Given spr = self.createBlankSpreadsheet() sht = spr.Sheets.getByIndex(0) # When cell = sht[0,0] # Then self.assertEqual(0, cell.CellAddress.Sheet) self.assertEqual(0, cell.CellAddress.Row) self.assertEqual(0, cell.CellAddress.Column) # Tests syntax: # cell = cellrange[0,0] # Access cell by indices # For: # Text table # Cell at Row 0, Col 0 def test_XCellRange_Table_Cell_00(self): # Given doc = self.createBlankTextDocument() textTable = doc.createInstance('com.sun.star.text.TextTable') textTable.initialize(10,10) cursor = doc.Text.createTextCursor() doc.Text.insertTextContent(cursor, textTable, False) tbl = doc.TextTables.getByIndex(0) # When cell = tbl[0,0] # Then self.assertEqual('A1', cell.CellName) # Tests syntax: # cell = cellrange[0,0] # Access cell by indices # For: # Spreadsheet # Cell at Row 3, Col 7 def test_XCellRange_Spreadsheet_Cell_37(self): # Given spr = self.createBlankSpreadsheet() sht = spr.Sheets.getByIndex(0) # When rng = sht[3,7] # Then self.assertEqual(0, rng.CellAddress.Sheet) self.assertEqual(3, rng.CellAddress.Row) self.assertEqual(7, rng.CellAddress.Column) # Tests syntax: # cell = cellrange[0,0] # Access cell by indices # For: # Text table # Cell at Row 3, Col 7 def test_XCellRange_Table_Cell_37(self): # Given doc = self.createBlankTextDocument() textTable = doc.createInstance('com.sun.star.text.TextTable') textTable.initialize(10,10) cursor = doc.Text.createTextCursor() doc.Text.insertTextContent(cursor, textTable, False) tbl = doc.TextTables.getByIndex(0) # When cell = tbl[3,7] # Then self.assertEqual('H4', cell.CellName) # Tests syntax: # rng = cellrange[0,1:2] # Access cell range by index,slice # For: # Spreadsheet def test_XCellRange_Spreadsheet_Range_Index_Slice(self): # Given spr = self.createBlankSpreadsheet() sht = spr.Sheets.getByIndex(0) # When rng = sht[0,1:3] # Then self.assertEqual(0, rng.RangeAddress.Sheet) self.assertEqual(0, rng.RangeAddress.StartRow) self.assertEqual(1, rng.RangeAddress.StartColumn) self.assertEqual(0, rng.RangeAddress.EndRow) self.assertEqual(2, rng.RangeAddress.EndColumn) # Tests syntax: # rng = cellrange[0,1:2] # Access cell range by index,slice # For: # Text table def test_XCellRange_Table_Range_Index_Slice(self): # Given doc = self.createBlankTextDocument() textTable = doc.createInstance('com.sun.star.text.TextTable') textTable.initialize(10,10) cursor = doc.Text.createTextCursor() doc.Text.insertTextContent(cursor, textTable, False) tbl = doc.TextTables.getByIndex(0) doc.lockControllers() tbl.DataArray = tuple(tuple(str(100 + y) for y in range(10*x,10*x + 10)) for x in range(10)) doc.unlockControllers() # When rng = tbl[0,1:3] # Then self.assertEqual((('101', '102'),), rng.DataArray) # Tests syntax: # rng = cellrange[1:2,0] # Access cell range by slice,index # For: # Spreadsheet def test_XCellRange_Spreadsheet_Range_Slice_Index(self): # Given spr = self.createBlankSpreadsheet() sht = spr.Sheets.getByIndex(0) # When rng = sht[1:3,0] # Then self.assertEqual(0, rng.RangeAddress.Sheet) self.assertEqual(1, rng.RangeAddress.StartRow) self.assertEqual(0, rng.RangeAddress.StartColumn) self.assertEqual(2, rng.RangeAddress.EndRow) self.assertEqual(0, rng.RangeAddress.EndColumn) # Tests syntax: # rng = cellrange[1:2,0] # Access cell range by index,slice # For: # Text table def test_XCellRange_Table_Range_Slice_Index(self): # Given doc = self.createBlankTextDocument() textTable = doc.createInstance('com.sun.star.text.TextTable') textTable.initialize(10,10) cursor = doc.Text.createTextCursor() doc.Text.insertTextContent(cursor, textTable, False) tbl = doc.TextTables.getByIndex(0) doc.lockControllers() tbl.DataArray = tuple(tuple(str(100 + y) for y in range(10*x,10*x + 10)) for x in range(10)) doc.unlockControllers() # When rng = tbl[1:3,0] # Then self.assertEqual((('110',), ('120',)), rng.DataArray) # Tests syntax: # rng = cellrange[0:1,2:3] # Access cell range by slices # For: # Spreadsheet def test_XCellRange_Spreadsheet_Range_Slices(self): # Given spr = self.createBlankSpreadsheet() sht = spr.Sheets.getByIndex(0) # When rng = sht[1:3,3:5] # Then self.assertEqual(0, rng.RangeAddress.Sheet) self.assertEqual(1, rng.RangeAddress.StartRow) self.assertEqual(3, rng.RangeAddress.StartColumn) self.assertEqual(2, rng.RangeAddress.EndRow) self.assertEqual(4, rng.RangeAddress.EndColumn) # Tests syntax: # rng = cellrange[0:1,2:3] # Access cell range by slices # For: # Spreadsheet # Zero rows/columns def test_XCellRange_Spreadsheet_Range_Slices_Invalid(self): # Given spr = self.createBlankSpreadsheet() sht = spr.Sheets.getByIndex(0) # When / Then with self.assertRaises(KeyError): rng = sht[1:1,3:5] with self.assertRaises(KeyError): rng = sht[1:3,3:3] # Tests syntax: # rng = cellrange[0:1,2:3] # Access cell range by slices # For: # Text table def test_XCellRange_Table_Range_Slices(self): # Given doc = self.createBlankTextDocument() textTable = doc.createInstance('com.sun.star.text.TextTable') textTable.initialize(10,10) cursor = doc.Text.createTextCursor() doc.Text.insertTextContent(cursor, textTable, False) tbl = doc.TextTables.getByIndex(0) doc.lockControllers() tbl.DataArray = tuple(tuple(str(100 + y) for y in range(10*x,10*x + 10)) for x in range(10)) doc.unlockControllers() # When rng = tbl[1:3,3:5] # Then self.assertEqual((('113', '114'), ('123', '124')), rng.DataArray) # Tests syntax: # rng = cellrange['A1:B2'] # Access cell range by descriptor # For: # Spreadsheet def test_XCellRange_Spreadsheet_Range_Descriptor(self): # Given spr = self.createBlankSpreadsheet() sht = spr.Sheets.getByIndex(0) # When rng = sht['A3:B4'] # Then self.assertEqual(0, rng.RangeAddress.Sheet) self.assertEqual(2, rng.RangeAddress.StartRow) self.assertEqual(0, rng.RangeAddress.StartColumn) self.assertEqual(3, rng.RangeAddress.EndRow) self.assertEqual(1, rng.RangeAddress.EndColumn) # Tests syntax: # rng = cellrange['A1:B2'] # Access cell range by descriptor # For: # Table def test_XCellRange_Table_Range_Descriptor(self): # Given doc = self.createBlankTextDocument() textTable = doc.createInstance('com.sun.star.text.TextTable') textTable.initialize(10,10) cursor = doc.Text.createTextCursor() doc.Text.insertTextContent(cursor, textTable, False) tbl = doc.TextTables.getByIndex(0) doc.lockControllers() tbl.DataArray = tuple(tuple(str(100 + y) for y in range(10*x,10*x + 10)) for x in range(10)) doc.unlockControllers() # When rng = tbl['A3:B4'] # Then self.assertEqual((('120', '121'), ('130', '131')), rng.DataArray) # Tests syntax: # rng = cellrange['Name'] # Access cell range by name # For: # Spreadsheet def test_XCellRange_Spreadsheet_Range_Name(self): # Given spr = self.createBlankSpreadsheet() sht = spr.Sheets.getByIndex(0) expr = '$' + sht.Name + '.$C2:F10' addr = CellAddress(Sheet=0,Row=1,Column=2) sht.NamedRanges.addNewByName('foo', expr, addr, 0) # When rng = sht['foo'] # Then self.assertEqual(0, rng.RangeAddress.Sheet) self.assertEqual(1, rng.RangeAddress.StartRow) self.assertEqual(2, rng.RangeAddress.StartColumn) self.assertEqual(9, rng.RangeAddress.EndRow) self.assertEqual(5, rng.RangeAddress.EndColumn) # Tests syntax: # rng = cellrange[0] # Access cell range by row index # For: # Spreadsheet def test_XCellRange_Spreadsheet_Range_RowIndex(self): # Given spr = self.createBlankSpreadsheet() sht = spr.Sheets.getByIndex(0) # When rng = sht[0] # Then self.assertEqual(0, rng.RangeAddress.Sheet) self.assertEqual(0, rng.RangeAddress.StartRow) self.assertEqual(0, rng.RangeAddress.StartColumn) self.assertEqual(0, rng.RangeAddress.EndRow) self.assertEqual(1023, rng.RangeAddress.EndColumn) # Tests syntax: # rng = cellrange[0,:] # Access cell range by row index # For: # Spreadsheet def test_XCellRange_Spreadsheet_Range_RowIndex_FullSlice(self): # Given spr = self.createBlankSpreadsheet() sht = spr.Sheets.getByIndex(0) # When rng = sht[0,:] # Then self.assertEqual(0, rng.RangeAddress.Sheet) self.assertEqual(0, rng.RangeAddress.StartRow) self.assertEqual(0, rng.RangeAddress.StartColumn) self.assertEqual(0, rng.RangeAddress.EndRow) self.assertEqual(1023, rng.RangeAddress.EndColumn) # Tests syntax: # rng = cellrange[:,0] # Access cell range by column index # For: # Spreadsheet def test_XCellRange_Spreadsheet_Range_FullSlice_ColumnIndex(self): # Given spr = self.createBlankSpreadsheet() sht = spr.Sheets.getByIndex(0) # When rng = sht[:,0] # Then self.assertEqual(0, rng.RangeAddress.Sheet) self.assertEqual(0, rng.RangeAddress.StartRow) self.assertEqual(0, rng.RangeAddress.StartColumn) self.assertEqual(1048575, rng.RangeAddress.EndRow) self.assertEqual(0, rng.RangeAddress.EndColumn) if __name__ == '__main__': unittest.main() # vim:set shiftwidth=4 softtabstop=4 expandtab:
shaistaansari/django
refs/heads/master
tests/template_tests/syntax_tests/test_if_equal.py
368
from django.test import SimpleTestCase from ..utils import setup class IfEqualTagTests(SimpleTestCase): @setup({'ifequal01': '{% ifequal a b %}yes{% endifequal %}'}) def test_ifequal01(self): output = self.engine.render_to_string('ifequal01', {'a': 1, 'b': 2}) self.assertEqual(output, '') @setup({'ifequal02': '{% ifequal a b %}yes{% endifequal %}'}) def test_ifequal02(self): output = self.engine.render_to_string('ifequal02', {'a': 1, 'b': 1}) self.assertEqual(output, 'yes') @setup({'ifequal03': '{% ifequal a b %}yes{% else %}no{% endifequal %}'}) def test_ifequal03(self): output = self.engine.render_to_string('ifequal03', {'a': 1, 'b': 2}) self.assertEqual(output, 'no') @setup({'ifequal04': '{% ifequal a b %}yes{% else %}no{% endifequal %}'}) def test_ifequal04(self): output = self.engine.render_to_string('ifequal04', {'a': 1, 'b': 1}) self.assertEqual(output, 'yes') @setup({'ifequal05': '{% ifequal a \'test\' %}yes{% else %}no{% endifequal %}'}) def test_ifequal05(self): output = self.engine.render_to_string('ifequal05', {'a': 'test'}) self.assertEqual(output, 'yes') @setup({'ifequal06': '{% ifequal a \'test\' %}yes{% else %}no{% endifequal %}'}) def test_ifequal06(self): output = self.engine.render_to_string('ifequal06', {'a': 'no'}) self.assertEqual(output, 'no') @setup({'ifequal07': '{% ifequal a "test" %}yes{% else %}no{% endifequal %}'}) def test_ifequal07(self): output = self.engine.render_to_string('ifequal07', {'a': 'test'}) self.assertEqual(output, 'yes') @setup({'ifequal08': '{% ifequal a "test" %}yes{% else %}no{% endifequal %}'}) def test_ifequal08(self): output = self.engine.render_to_string('ifequal08', {'a': 'no'}) self.assertEqual(output, 'no') @setup({'ifequal09': '{% ifequal a "test" %}yes{% else %}no{% endifequal %}'}) def test_ifequal09(self): output = self.engine.render_to_string('ifequal09') self.assertEqual(output, 'no') @setup({'ifequal10': '{% ifequal a b %}yes{% else %}no{% endifequal %}'}) def test_ifequal10(self): output = self.engine.render_to_string('ifequal10') self.assertEqual(output, 'yes') # SMART SPLITTING @setup({'ifequal-split01': '{% ifequal a "test man" %}yes{% else %}no{% endifequal %}'}) def test_ifequal_split01(self): output = self.engine.render_to_string('ifequal-split01') self.assertEqual(output, 'no') @setup({'ifequal-split02': '{% ifequal a "test man" %}yes{% else %}no{% endifequal %}'}) def test_ifequal_split02(self): output = self.engine.render_to_string('ifequal-split02', {'a': 'foo'}) self.assertEqual(output, 'no') @setup({'ifequal-split03': '{% ifequal a "test man" %}yes{% else %}no{% endifequal %}'}) def test_ifequal_split03(self): output = self.engine.render_to_string('ifequal-split03', {'a': 'test man'}) self.assertEqual(output, 'yes') @setup({'ifequal-split04': '{% ifequal a \'test man\' %}yes{% else %}no{% endifequal %}'}) def test_ifequal_split04(self): output = self.engine.render_to_string('ifequal-split04', {'a': 'test man'}) self.assertEqual(output, 'yes') @setup({'ifequal-split05': '{% ifequal a \'i "love" you\' %}yes{% else %}no{% endifequal %}'}) def test_ifequal_split05(self): output = self.engine.render_to_string('ifequal-split05', {'a': ''}) self.assertEqual(output, 'no') @setup({'ifequal-split06': '{% ifequal a \'i "love" you\' %}yes{% else %}no{% endifequal %}'}) def test_ifequal_split06(self): output = self.engine.render_to_string('ifequal-split06', {'a': 'i "love" you'}) self.assertEqual(output, 'yes') @setup({'ifequal-split07': '{% ifequal a \'i "love" you\' %}yes{% else %}no{% endifequal %}'}) def test_ifequal_split07(self): output = self.engine.render_to_string('ifequal-split07', {'a': 'i love you'}) self.assertEqual(output, 'no') @setup({'ifequal-split08': r"{% ifequal a 'I\'m happy' %}yes{% else %}no{% endifequal %}"}) def test_ifequal_split08(self): output = self.engine.render_to_string('ifequal-split08', {'a': "I'm happy"}) self.assertEqual(output, 'yes') @setup({'ifequal-split09': r"{% ifequal a 'slash\man' %}yes{% else %}no{% endifequal %}"}) def test_ifequal_split09(self): output = self.engine.render_to_string('ifequal-split09', {'a': 'slash\man'}) self.assertEqual(output, 'yes') @setup({'ifequal-split10': r"{% ifequal a 'slash\man' %}yes{% else %}no{% endifequal %}"}) def test_ifequal_split10(self): output = self.engine.render_to_string('ifequal-split10', {'a': 'slashman'}) self.assertEqual(output, 'no') # NUMERIC RESOLUTION @setup({'ifequal-numeric01': '{% ifequal x 5 %}yes{% endifequal %}'}) def test_ifequal_numeric01(self): output = self.engine.render_to_string('ifequal-numeric01', {'x': '5'}) self.assertEqual(output, '') @setup({'ifequal-numeric02': '{% ifequal x 5 %}yes{% endifequal %}'}) def test_ifequal_numeric02(self): output = self.engine.render_to_string('ifequal-numeric02', {'x': 5}) self.assertEqual(output, 'yes') @setup({'ifequal-numeric03': '{% ifequal x 5.2 %}yes{% endifequal %}'}) def test_ifequal_numeric03(self): output = self.engine.render_to_string('ifequal-numeric03', {'x': 5}) self.assertEqual(output, '') @setup({'ifequal-numeric04': '{% ifequal x 5.2 %}yes{% endifequal %}'}) def test_ifequal_numeric04(self): output = self.engine.render_to_string('ifequal-numeric04', {'x': 5.2}) self.assertEqual(output, 'yes') @setup({'ifequal-numeric05': '{% ifequal x 0.2 %}yes{% endifequal %}'}) def test_ifequal_numeric05(self): output = self.engine.render_to_string('ifequal-numeric05', {'x': 0.2}) self.assertEqual(output, 'yes') @setup({'ifequal-numeric06': '{% ifequal x .2 %}yes{% endifequal %}'}) def test_ifequal_numeric06(self): output = self.engine.render_to_string('ifequal-numeric06', {'x': 0.2}) self.assertEqual(output, 'yes') @setup({'ifequal-numeric07': '{% ifequal x 2. %}yes{% endifequal %}'}) def test_ifequal_numeric07(self): output = self.engine.render_to_string('ifequal-numeric07', {'x': 2}) self.assertEqual(output, '') @setup({'ifequal-numeric08': '{% ifequal x "5" %}yes{% endifequal %}'}) def test_ifequal_numeric08(self): output = self.engine.render_to_string('ifequal-numeric08', {'x': 5}) self.assertEqual(output, '') @setup({'ifequal-numeric09': '{% ifequal x "5" %}yes{% endifequal %}'}) def test_ifequal_numeric09(self): output = self.engine.render_to_string('ifequal-numeric09', {'x': '5'}) self.assertEqual(output, 'yes') @setup({'ifequal-numeric10': '{% ifequal x -5 %}yes{% endifequal %}'}) def test_ifequal_numeric10(self): output = self.engine.render_to_string('ifequal-numeric10', {'x': -5}) self.assertEqual(output, 'yes') @setup({'ifequal-numeric11': '{% ifequal x -5.2 %}yes{% endifequal %}'}) def test_ifequal_numeric11(self): output = self.engine.render_to_string('ifequal-numeric11', {'x': -5.2}) self.assertEqual(output, 'yes') @setup({'ifequal-numeric12': '{% ifequal x +5 %}yes{% endifequal %}'}) def test_ifequal_numeric12(self): output = self.engine.render_to_string('ifequal-numeric12', {'x': 5}) self.assertEqual(output, 'yes') # FILTER EXPRESSIONS AS ARGUMENTS @setup({'ifequal-filter01': '{% ifequal a|upper "A" %}x{% endifequal %}'}) def test_ifequal_filter01(self): output = self.engine.render_to_string('ifequal-filter01', {'a': 'a'}) self.assertEqual(output, 'x') @setup({'ifequal-filter02': '{% ifequal "A" a|upper %}x{% endifequal %}'}) def test_ifequal_filter02(self): output = self.engine.render_to_string('ifequal-filter02', {'a': 'a'}) self.assertEqual(output, 'x') @setup({'ifequal-filter03': '{% ifequal a|upper b|upper %}x{% endifequal %}'}) def test_ifequal_filter03(self): output = self.engine.render_to_string('ifequal-filter03', {'a': 'x', 'b': 'X'}) self.assertEqual(output, 'x') @setup({'ifequal-filter04': '{% ifequal x|slice:"1" "a" %}x{% endifequal %}'}) def test_ifequal_filter04(self): output = self.engine.render_to_string('ifequal-filter04', {'x': 'aaa'}) self.assertEqual(output, 'x') @setup({'ifequal-filter05': '{% ifequal x|slice:"1"|upper "A" %}x{% endifequal %}'}) def test_ifequal_filter05(self): output = self.engine.render_to_string('ifequal-filter05', {'x': 'aaa'}) self.assertEqual(output, 'x') class IfNotEqualTagTests(SimpleTestCase): @setup({'ifnotequal01': '{% ifnotequal a b %}yes{% endifnotequal %}'}) def test_ifnotequal01(self): output = self.engine.render_to_string('ifnotequal01', {'a': 1, 'b': 2}) self.assertEqual(output, 'yes') @setup({'ifnotequal02': '{% ifnotequal a b %}yes{% endifnotequal %}'}) def test_ifnotequal02(self): output = self.engine.render_to_string('ifnotequal02', {'a': 1, 'b': 1}) self.assertEqual(output, '') @setup({'ifnotequal03': '{% ifnotequal a b %}yes{% else %}no{% endifnotequal %}'}) def test_ifnotequal03(self): output = self.engine.render_to_string('ifnotequal03', {'a': 1, 'b': 2}) self.assertEqual(output, 'yes') @setup({'ifnotequal04': '{% ifnotequal a b %}yes{% else %}no{% endifnotequal %}'}) def test_ifnotequal04(self): output = self.engine.render_to_string('ifnotequal04', {'a': 1, 'b': 1}) self.assertEqual(output, 'no')
ksh/gpitrainingv2
refs/heads/master
modules/dashboard/course_settings.py
13
# Copyright 2012 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes supporting updates to basic course settings.""" __author__ = 'Abhinav Khandelwal (abhinavk@google.com)' from controllers.utils import ApplicationHandler from controllers.utils import BaseRESTHandler from controllers.utils import XsrfTokenManager from models import courses from models import roles from models import transforms from models import vfs from modules.oeditor import oeditor import yaml import messages from google.appengine.api import users def is_editable_fs(app_context): return app_context.fs.impl.__class__ == vfs.DatastoreBackedFileSystem class CourseSettingsRights(object): """Manages view/edit rights for files.""" @classmethod def can_view(cls, handler): return roles.Roles.is_course_admin(handler.app_context) @classmethod def can_edit(cls, handler): return roles.Roles.is_course_admin(handler.app_context) @classmethod def can_delete(cls, handler): return cls.can_edit(handler) @classmethod def can_add(cls, handler): return cls.can_edit(handler) class CourseSettingsHandler(ApplicationHandler): """Course settings handler.""" def post_edit_basic_course_settings(self): """Handles editing of course.yaml.""" assert is_editable_fs(self.app_context) # Check if course.yaml exists; create if not. fs = self.app_context.fs.impl course_yaml = fs.physical_to_logical('/course.yaml') if not fs.isfile(course_yaml): fs.put(course_yaml, vfs.string_to_stream( courses.EMPTY_COURSE_YAML % users.get_current_user().email())) self.redirect(self.get_action_url( 'edit_basic_settings', key='/course.yaml')) def get_edit_basic_settings(self): """Shows editor for course.yaml.""" key = self.request.get('key') exit_url = self.canonicalize_url('/dashboard?action=settings') rest_url = self.canonicalize_url('/rest/course/settings') form_html = oeditor.ObjectEditor.get_html_for( self, CourseSettingsRESTHandler.REGISTORY.get_json_schema(), CourseSettingsRESTHandler.REGISTORY.get_schema_dict(), key, rest_url, exit_url, required_modules=CourseSettingsRESTHandler.REQUIRED_MODULES) template_values = {} template_values['page_title'] = self.format_title('Edit Settings') template_values['page_description'] = messages.EDIT_SETTINGS_DESCRIPTION template_values['main_content'] = form_html self.render_page(template_values) class CourseSettingsRESTHandler(BaseRESTHandler): """Provides REST API for a file.""" REGISTORY = courses.create_course_registry() REQUIRED_MODULES = [ 'inputex-date', 'inputex-string', 'inputex-textarea', 'inputex-url', 'inputex-checkbox', 'inputex-select', 'inputex-uneditable', 'gcb-rte'] URI = '/rest/course/settings' @classmethod def validate_content(cls, content): yaml.safe_load(content) def get_course_dict(self): return self.get_course().get_environ(self.app_context) def get_group_id(self, email): if not email or '@googlegroups.com' not in email: return None return email.split('@')[0] def get_groups_web_url(self, email): group_id = self.get_group_id(email) if not group_id: return None return 'https://groups.google.com/group/' + group_id def get_groups_embed_url(self, email): group_id = self.get_group_id(email) if not group_id: return None return 'https://groups.google.com/forum/embed/?place=forum/' + group_id def get(self): """Handles REST GET verb and returns an object as JSON payload.""" assert is_editable_fs(self.app_context) key = self.request.get('key') if not CourseSettingsRights.can_view(self): transforms.send_json_response( self, 401, 'Access denied.', {'key': key}) return # Load data if possible. fs = self.app_context.fs.impl filename = fs.physical_to_logical(key) try: stream = fs.get(filename) except: # pylint: disable=bare-except stream = None if not stream: transforms.send_json_response( self, 404, 'Object not found.', {'key': key}) return # Prepare data. entity = {} CourseSettingsRESTHandler.REGISTORY.convert_entity_to_json_entity( self.get_course_dict(), entity) # Render JSON response. json_payload = transforms.dict_to_json( entity, CourseSettingsRESTHandler.REGISTORY.get_json_schema_dict()) transforms.send_json_response( self, 200, 'Success.', payload_dict=json_payload, xsrf_token=XsrfTokenManager.create_xsrf_token( 'basic-course-settings-put')) def put(self): """Handles REST PUT verb with JSON payload.""" assert is_editable_fs(self.app_context) request = transforms.loads(self.request.get('request')) key = request.get('key') if not self.assert_xsrf_token_or_fail( request, 'basic-course-settings-put', {'key': key}): return if not CourseSettingsRights.can_edit(self): transforms.send_json_response( self, 401, 'Access denied.', {'key': key}) return payload = request.get('payload') request_data = {} CourseSettingsRESTHandler.REGISTORY.convert_json_to_entity( transforms.loads(payload), request_data) course_data = request_data['course'] if 'forum_email' in course_data.keys(): forum_email = course_data['forum_email'] forum_web_url = self.get_groups_web_url(forum_email) if forum_web_url: course_data['forum_url'] = forum_web_url forum_web_url = self.get_groups_embed_url(forum_email) if forum_web_url: course_data['forum_embed_url'] = forum_web_url if 'announcement_list_email' in course_data.keys(): announcement_email = course_data['announcement_list_email'] announcement_web_url = self.get_groups_web_url(announcement_email) if announcement_web_url: course_data['announcement_list_url'] = announcement_web_url entity = courses.deep_dict_merge(request_data, self.get_course_dict()) content = yaml.safe_dump(entity) try: self.validate_content(content) content_stream = vfs.string_to_stream(unicode(content)) except Exception as e: # pylint: disable=W0703 transforms.send_json_response(self, 412, 'Validation error: %s' % e) return # Store new file content. fs = self.app_context.fs.impl filename = fs.physical_to_logical(key) fs.put(filename, content_stream) # Send reply. transforms.send_json_response(self, 200, 'Saved.') def delete(self): """Handles REST DELETE verb.""" request = transforms.loads(self.request.get('request')) key = request.get('key') transforms.send_json_response( self, 401, 'Access denied.', {'key': key})
nuagenetworks/vspk-python
refs/heads/master
vspk/v5_0/fetchers/nuvnfcatalogs_fetcher.py
2
# -*- coding: utf-8 -*- # # Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from bambou import NURESTFetcher class NUVNFCatalogsFetcher(NURESTFetcher): """ Represents a NUVNFCatalogs fetcher Notes: This fetcher enables to fetch NUVNFCatalog objects. See: bambou.NURESTFetcher """ @classmethod def managed_class(cls): """ Return NUVNFCatalog class that is managed. Returns: .NUVNFCatalog: the managed class """ from .. import NUVNFCatalog return NUVNFCatalog
crosswalk-project/chromium-crosswalk-efl
refs/heads/efl/crosswalk-10/39.0.2171.19
components/policy/tools/make_policy_zip.py
159
#!/usr/bin/env python # Copyright (c) 2011 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Creates a zip archive with policy template files. The list of input files is extracted from a grd file with grit. This is to keep the length of input arguments below the limit on Windows. """ import optparse import os import sys import zipfile def add_files_to_zip(zip_file, base_dir, file_list): """Pack a list of files into a zip archive, that is already opened for writing. Args: zip_file: An object representing the zip archive. base_dir: Base path of all the files in the real file system. files: List of file paths to add, all relative to base_dir. The zip entries will only contain this componenet of the path. """ for file_path in file_list: zip_file.write(base_dir + file_path, file_path) return 0 def get_grd_outputs(grit_cmd, grit_defines, grd_file, grd_strip_path_prefix): grit_path = os.path.join(os.getcwd(), os.path.dirname(grit_cmd)) sys.path.append(grit_path) import grit_info outputs = grit_info.Outputs(grd_file, grit_defines, 'GRIT_DIR/../gritsettings/resource_ids') result = [] for item in outputs: assert item.startswith(grd_strip_path_prefix) result.append(item[len(grd_strip_path_prefix):]) return result def main(argv): """Pack a list of files into a zip archive. Args: zip_path: The file name of the zip archive. base_dir: Base path of input files. locales: The list of locales that are used to generate the list of file names using INPUT_FILES. """ parser = optparse.OptionParser() parser.add_option("--output", dest="output") parser.add_option("--basedir", dest="basedir") parser.add_option("--grit_info", dest="grit_info") parser.add_option("--grd_input", dest="grd_input") parser.add_option("--grd_strip_path_prefix", dest="grd_strip_path_prefix") parser.add_option("--extra_input", action="append", dest="extra_input", default=[]) parser.add_option("-D", action="append", dest="grit_defines", default=[]) parser.add_option("-E", action="append", dest="grit_build_env", default=[]) options, args = parser.parse_args(argv[1:]) if (options.basedir[-1] != '/'): options.basedir += '/' grit_defines = {} for define in options.grit_defines: grit_defines[define] = 1 file_list = options.extra_input file_list += get_grd_outputs(options.grit_info, grit_defines, options.grd_input, options.grd_strip_path_prefix) zip_file = zipfile.ZipFile(options.output, 'w', zipfile.ZIP_DEFLATED) try: return add_files_to_zip(zip_file, options.basedir, file_list) finally: zip_file.close() if '__main__' == __name__: sys.exit(main(sys.argv))
switowski/invenio
refs/heads/master
invenio/testsuite/test_ext_crossref.py
7
# -*- coding: utf-8 -*- # # This file is part of Invenio # Copyright (C) 2014, 2015 CERN. # # Invenio is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License as # published by the Free Software Foundation; either version 2 of the # License, or (at your option) any later version. # # Invenio is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Invenio; if not, write to the Free Software Foundation, # Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. """Test *CrossRef* integration.""" from __future__ import absolute_import import httpretty import pkg_resources from invenio.ext.crossref import CrossRef from invenio.testsuite import make_test_suite, run_test_suite, InvenioTestCase class CrossRefMixin(InvenioTestCase): """Custom CrossRef configuration.""" @property def config(self): """Remove CrossRef from extensions to get full control of the test.""" from invenio.base.config import EXTENSIONS cfg = super(CrossRefMixin, self).config cfg["EXTENSIONS"] = filter( lambda k: not k.startswith("invenio.ext.crossref"), EXTENSIONS) cfg["CROSSREF_API_URL"] = "http://api.example.org/works/" cfg["CACHE_TYPE"] = "simple" return cfg class TestCrossRef(CrossRefMixin): """Test of extension creation.""" def test_creation(self): assert "crossref" not in self.app.extensions CrossRef(app=self.app) assert isinstance(self.app.extensions["crossref"], CrossRef) def test_creation_old_flask(self): # Simulate old Flask (pre 0.9) del self.app.extensions CrossRef(app=self.app) assert isinstance(self.app.extensions["crossref"], CrossRef) def test_creation_init(self): assert "crossref" not in self.app.extensions r = CrossRef() r.init_app(app=self.app) assert isinstance(self.app.extensions["crossref"], CrossRef) def test_double_creation(self): CrossRef(app=self.app) self.assertRaises(RuntimeError, CrossRef, app=self.app) class TestCrossRefQuery(CrossRefMixin): """Test CrossRef query response parsing.""" def setUp(self): self.crossref = CrossRef(app=self.app) def tearDown(self): del self.crossref @httpretty.activate def test_found_result(self): httpretty.register_uri( httpretty.GET, self.app.config["CROSSREF_API_URL"] + "10.1103/PhysRevLett.19.1264", body=pkg_resources.resource_string( "invenio.testsuite", "data/response_export_crossref.json"), status=200 ) response = self.app.extensions["crossref"].search("10.1103/PhysRevLett.19.1264") self.assertEqual(response.status_code, 200) @httpretty.activate def test_zero_results_found(self): httpretty.register_uri( httpretty.GET, self.app.config["CROSSREF_API_URL"] + "10.1088/0067-0049/192/2/18a", body=pkg_resources.resource_string( "invenio.testsuite", "data/response_export_crossref_zero.json"), status=200 ) TEST_SUITE = make_test_suite(TestCrossRef, TestCrossRefQuery) if __name__ == "__main__": run_test_suite(TEST_SUITE)
jkyeung/XlsxWriter
refs/heads/master
xlsxwriter/test/comparison/test_chart_column04.py
1
############################################################################### # # Tests for XlsxWriter. # # Copyright (c), 2013-2016, John McNamara, jmcnamara@cpan.org # from ..excel_comparsion_test import ExcelComparisonTest from ...workbook import Workbook class TestCompareXLSXFiles(ExcelComparisonTest): """ Test file created by XlsxWriter against a file created by Excel. """ def setUp(self): self.maxDiff = None filename = 'chart_column04.xlsx' test_dir = 'xlsxwriter/test/comparison/' self.got_filename = test_dir + '_test_' + filename self.exp_filename = test_dir + 'xlsx_files/' + filename self.ignore_files = [] self.ignore_elements = {'xl/workbook.xml': ['<fileVersion', '<calcPr']} def test_create_file(self): """Test the creation of a simple XlsxWriter file.""" workbook = Workbook(self.got_filename) worksheet = workbook.add_worksheet() chart = workbook.add_chart({'type': 'column'}) chart.axis_ids = [63591936, 63593856] chart.axis2_ids = [63613568, 63612032] data = [[1, 2, 3, 4, 5], [6, 8, 6, 4, 2]] worksheet.write_column('A1', data[0]) worksheet.write_column('B1', data[1]) chart.add_series({'values': '=Sheet1!$A$1:$A$5'}) chart.add_series({'values': '=Sheet1!$B$1:$B$5', 'y2_axis': 1}) worksheet.insert_chart('E9', chart) workbook.close() self.assertExcelEqual()
slevenhagen/odoo-npg
refs/heads/8.0
addons/crm/res_partner.py
71
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import fields,osv class res_partner(osv.osv): """ Inherits partner and adds CRM information in the partner form """ _inherit = 'res.partner' def _opportunity_meeting_phonecall_count(self, cr, uid, ids, field_name, arg, context=None): res = dict(map(lambda x: (x,{'opportunity_count': 0, 'meeting_count': 0}), ids)) # the user may not have access rights for opportunities or meetings try: for partner in self.browse(cr, uid, ids, context): if partner.is_company: operator = 'child_of' else: operator = '=' opp_ids = self.pool['crm.lead'].search(cr, uid, [('partner_id', operator, partner.id), ('type', '=', 'opportunity'), ('probability', '<', '100')], context=context) res[partner.id] = { 'opportunity_count': len(opp_ids), 'meeting_count': len(partner.meeting_ids), } except: pass for partner in self.browse(cr, uid, ids, context): res[partner.id]['phonecall_count'] = len(partner.phonecall_ids) return res _columns = { 'section_id': fields.many2one('crm.case.section', 'Sales Team'), 'opportunity_ids': fields.one2many('crm.lead', 'partner_id',\ 'Leads and Opportunities', domain=[('probability', 'not in', ['0', '100'])]), 'meeting_ids': fields.many2many('calendar.event', 'calendar_event_res_partner_rel','res_partner_id', 'calendar_event_id', 'Meetings'), 'phonecall_ids': fields.one2many('crm.phonecall', 'partner_id',\ 'Phonecalls'), 'opportunity_count': fields.function(_opportunity_meeting_phonecall_count, string="Opportunity", type='integer', multi='opp_meet'), 'meeting_count': fields.function(_opportunity_meeting_phonecall_count, string="# Meetings", type='integer', multi='opp_meet'), 'phonecall_count': fields.function(_opportunity_meeting_phonecall_count, string="Phonecalls", type="integer", multi='opp_meet'), } def redirect_partner_form(self, cr, uid, partner_id, context=None): search_view = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'view_res_partner_filter') value = { 'domain': "[]", 'view_type': 'form', 'view_mode': 'form,tree', 'res_model': 'res.partner', 'res_id': int(partner_id), 'view_id': False, 'context': context, 'type': 'ir.actions.act_window', 'search_view_id': search_view and search_view[1] or False } return value def make_opportunity(self, cr, uid, ids, opportunity_summary, planned_revenue=0.0, probability=0.0, partner_id=None, context=None): categ_obj = self.pool.get('crm.case.categ') categ_ids = categ_obj.search(cr, uid, [('object_id.model','=','crm.lead')]) lead_obj = self.pool.get('crm.lead') opportunity_ids = {} for partner in self.browse(cr, uid, ids, context=context): if not partner_id: partner_id = partner.id opportunity_id = lead_obj.create(cr, uid, { 'name' : opportunity_summary, 'planned_revenue' : planned_revenue, 'probability' : probability, 'partner_id' : partner_id, 'categ_ids' : categ_ids and categ_ids[0:1] or [], 'type': 'opportunity' }, context=context) opportunity_ids[partner_id] = opportunity_id return opportunity_ids def schedule_meeting(self, cr, uid, ids, context=None): partner_ids = list(ids) partner_ids.append(self.pool.get('res.users').browse(cr, uid, uid).partner_id.id) res = self.pool.get('ir.actions.act_window').for_xml_id(cr, uid, 'calendar', 'action_calendar_event', context) res['context'] = { 'search_default_partner_ids': list(ids), 'default_partner_ids': partner_ids, } return res
M32Media/redash
refs/heads/master
tests/handlers/test_query_snippets.py
14
from tests import BaseTestCase from redash.models import QuerySnippet class TestQuerySnippetResource(BaseTestCase): def test_get_snippet(self): snippet = self.factory.create_query_snippet() rv = self.make_request('get', '/api/query_snippets/{}'.format(snippet.id)) for field in ('snippet', 'description', 'trigger'): self.assertEqual(rv.json[field], getattr(snippet, field)) def test_update_snippet(self): snippet = self.factory.create_query_snippet() data = { 'snippet': 'updated', 'trigger': 'updated trigger', 'description': 'updated description' } rv = self.make_request('post', '/api/query_snippets/{}'.format(snippet.id), data=data) for field in ('snippet', 'description', 'trigger'): self.assertEqual(rv.json[field], data[field]) def test_delete_snippet(self): snippet = self.factory.create_query_snippet() rv = self.make_request('delete', '/api/query_snippets/{}'.format(snippet.id)) self.assertIsNone(QuerySnippet.query.get(snippet.id)) class TestQuerySnippetListResource(BaseTestCase): def test_create_snippet(self): data = { 'snippet': 'updated', 'trigger': 'updated trigger', 'description': 'updated description' } rv = self.make_request('post', '/api/query_snippets', data=data) self.assertEqual(rv.status_code, 200) def test_list_all_snippets(self): snippet1 = self.factory.create_query_snippet() snippet2 = self.factory.create_query_snippet() snippet_diff_org = self.factory.create_query_snippet(org=self.factory.create_org()) rv = self.make_request('get', '/api/query_snippets') ids = [s['id'] for s in rv.json] self.assertIn(snippet1.id, ids) self.assertIn(snippet2.id, ids) self.assertNotIn(snippet_diff_org.id, ids)
tumbl3w33d/ansible
refs/heads/devel
test/units/modules/network/f5/test_bigip_profile_http2.py
22
# -*- coding: utf-8 -*- # # Copyright: (c) 2018, F5 Networks Inc. # GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import json import pytest import sys if sys.version_info < (2, 7): pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7") from ansible.module_utils.basic import AnsibleModule try: from library.modules.bigip_profile_http2 import ApiParameters from library.modules.bigip_profile_http2 import ModuleParameters from library.modules.bigip_profile_http2 import ModuleManager from library.modules.bigip_profile_http2 import ArgumentSpec # In Ansible 2.8, Ansible changed import paths. from test.units.compat import unittest from test.units.compat.mock import Mock from test.units.modules.utils import set_module_args except ImportError: from ansible.modules.network.f5.bigip_profile_http2 import ApiParameters from ansible.modules.network.f5.bigip_profile_http2 import ModuleParameters from ansible.modules.network.f5.bigip_profile_http2 import ModuleManager from ansible.modules.network.f5.bigip_profile_http2 import ArgumentSpec # Ansible 2.8 imports from units.compat import unittest from units.compat.mock import Mock from units.modules.utils import set_module_args fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures') fixture_data = {} def load_fixture(name): path = os.path.join(fixture_path, name) if path in fixture_data: return fixture_data[path] with open(path) as f: data = f.read() try: data = json.loads(data) except Exception: pass fixture_data[path] = data return data class TestParameters(unittest.TestCase): def test_module_parameters(self): args = dict( name='foo', parent='bar', description='This is a Test', streams=20, enforce_tls_requirements=True, frame_size=1024, activation_modes=['always'], insert_header=True, insert_header_name='FOO' ) p = ModuleParameters(params=args) assert p.name == 'foo' assert p.parent == '/Common/bar' assert p.description == 'This is a Test' assert p.streams == 20 assert p.enforce_tls_requirements == 'enabled' assert p.frame_size == 1024 assert p.activation_modes == ['always'] assert p.insert_header == 'enabled' assert p.insert_header_name == 'FOO' def test_api_parameters(self): args = load_fixture('load_ltm_http2_profile.json') p = ApiParameters(params=args) assert p.name == 'test' assert p.streams == 10 assert p.enforce_tls_requirements == 'enabled' class TestManager(unittest.TestCase): def setUp(self): self.spec = ArgumentSpec() def test_create(self, *args): # Configure the arguments that would be sent to the Ansible module set_module_args(dict( name='foo', enforce_tls_requirements='yes', parent='bar', provider=dict( server='localhost', password='password', user='admin' ) )) module = AnsibleModule( argument_spec=self.spec.argument_spec, supports_check_mode=self.spec.supports_check_mode ) mm = ModuleManager(module=module) # Override methods to force specific logic in the module to happen mm.exists = Mock(return_value=False) mm.create_on_device = Mock(return_value=True) results = mm.exec_module() assert results['changed'] is True assert results['enforce_tls_requirements'] == 'yes'
courtarro/gnuradio-wg-grc
refs/heads/master
gr-blocks/python/blocks/qa_vco.py
54
#!/usr/bin/env python # # Copyright 2013 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gr, gr_unittest, blocks import math def sig_source_f(samp_rate, freq, amp, N): t = map(lambda x: float(x)/samp_rate, xrange(N)) y = map(lambda x: amp*math.cos(2.*math.pi*freq*x), t) return y def sig_source_c(samp_rate, freq, amp, N): t = map(lambda x: float(x)/samp_rate, xrange(N)) y = map(lambda x: math.cos(2.*math.pi*freq*x) + \ 1j*math.sin(2.*math.pi*freq*x), t) return y class test_vco(gr_unittest.TestCase): def setUp (self): self.tb = gr.top_block () def tearDown (self): self.tb = None def test_001(self): src_data = 200*[0,] + 200*[0.5,] + 200*[1,] expected_result = 200*[1,] + \ sig_source_f(1, 0.125, 1, 200) + \ sig_source_f(1, 0.25, 1, 200) src = blocks.vector_source_f(src_data) op = blocks.vco_f(1, math.pi/2.0, 1) dst = blocks.vector_sink_f() self.tb.connect(src, op, dst) self.tb.run() result_data = dst.data() self.assertFloatTuplesAlmostEqual(expected_result, result_data, 5) def test_002(self): src_data = 200*[0,] + 200*[0.5,] + 200*[1,] expected_result = 200*[1,] + \ sig_source_c(1, 0.125, 1, 200) + \ sig_source_c(1, 0.25, 1, 200) src = blocks.vector_source_f(src_data) op = blocks.vco_c(1, math.pi/2.0, 1) dst = blocks.vector_sink_c() self.tb.connect(src, op, dst) self.tb.run() result_data = dst.data() self.assertComplexTuplesAlmostEqual(expected_result, result_data, 5) if __name__ == '__main__': gr_unittest.run(test_vco, "test_vco.xml")
cloudera/hue
refs/heads/master
desktop/core/ext-py/Django-1.11.29/django/utils/translation/template.py
58
from __future__ import unicode_literals import re import warnings from django.template.base import ( TOKEN_BLOCK, TOKEN_COMMENT, TOKEN_TEXT, TOKEN_VAR, TRANSLATOR_COMMENT_MARK, Lexer, ) from django.utils import six from django.utils.encoding import force_text from django.utils.six import StringIO from . import TranslatorCommentWarning, trim_whitespace dot_re = re.compile(r'\S') def blankout(src, char): """ Change every non-whitespace character to the given char. Used in the templatize function. """ return dot_re.sub(char, src) context_re = re.compile(r"""^\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?'))\s*""") inline_re = re.compile( # Match the trans 'some text' part r"""^\s*trans\s+((?:"[^"]*?")|(?:'[^']*?'))""" # Match and ignore optional filters r"""(?:\s*\|\s*[^\s:]+(?::(?:[^\s'":]+|(?:"[^"]*?")|(?:'[^']*?')))?)*""" # Match the optional context part r"""(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?\s*""" ) block_re = re.compile(r"""^\s*blocktrans(\s+.*context\s+((?:"[^"]*?")|(?:'[^']*?')))?(?:\s+|$)""") endblock_re = re.compile(r"""^\s*endblocktrans$""") plural_re = re.compile(r"""^\s*plural$""") constant_re = re.compile(r"""_\(((?:".*?")|(?:'.*?'))\)""") def templatize(src, origin=None, charset='utf-8'): """ Turn a Django template into something that is understood by xgettext. It does so by translating the Django translation tags into standard gettext function invocations. """ src = force_text(src, charset) out = StringIO('') message_context = None intrans = False inplural = False trimmed = False singular = [] plural = [] incomment = False comment = [] lineno_comment_map = {} comment_lineno_cache = None # Adding the u prefix allows gettext to recognize the Unicode string # (#26093). raw_prefix = 'u' if six.PY3 else '' def join_tokens(tokens, trim=False): message = ''.join(tokens) if trim: message = trim_whitespace(message) return message for t in Lexer(src).tokenize(): if incomment: if t.token_type == TOKEN_BLOCK and t.contents == 'endcomment': content = ''.join(comment) translators_comment_start = None for lineno, line in enumerate(content.splitlines(True)): if line.lstrip().startswith(TRANSLATOR_COMMENT_MARK): translators_comment_start = lineno for lineno, line in enumerate(content.splitlines(True)): if translators_comment_start is not None and lineno >= translators_comment_start: out.write(' # %s' % line) else: out.write(' #\n') incomment = False comment = [] else: comment.append(t.contents) elif intrans: if t.token_type == TOKEN_BLOCK: endbmatch = endblock_re.match(t.contents) pluralmatch = plural_re.match(t.contents) if endbmatch: if inplural: if message_context: out.write(' npgettext({p}{!r}, {p}{!r}, {p}{!r},count) '.format( message_context, join_tokens(singular, trimmed), join_tokens(plural, trimmed), p=raw_prefix, )) else: out.write(' ngettext({p}{!r}, {p}{!r}, count) '.format( join_tokens(singular, trimmed), join_tokens(plural, trimmed), p=raw_prefix, )) for part in singular: out.write(blankout(part, 'S')) for part in plural: out.write(blankout(part, 'P')) else: if message_context: out.write(' pgettext({p}{!r}, {p}{!r}) '.format( message_context, join_tokens(singular, trimmed), p=raw_prefix, )) else: out.write(' gettext({p}{!r}) '.format( join_tokens(singular, trimmed), p=raw_prefix, )) for part in singular: out.write(blankout(part, 'S')) message_context = None intrans = False inplural = False singular = [] plural = [] elif pluralmatch: inplural = True else: filemsg = '' if origin: filemsg = 'file %s, ' % origin raise SyntaxError( "Translation blocks must not include other block tags: " "%s (%sline %d)" % (t.contents, filemsg, t.lineno) ) elif t.token_type == TOKEN_VAR: if inplural: plural.append('%%(%s)s' % t.contents) else: singular.append('%%(%s)s' % t.contents) elif t.token_type == TOKEN_TEXT: contents = t.contents.replace('%', '%%') if inplural: plural.append(contents) else: singular.append(contents) else: # Handle comment tokens (`{# ... #}`) plus other constructs on # the same line: if comment_lineno_cache is not None: cur_lineno = t.lineno + t.contents.count('\n') if comment_lineno_cache == cur_lineno: if t.token_type != TOKEN_COMMENT: for c in lineno_comment_map[comment_lineno_cache]: filemsg = '' if origin: filemsg = 'file %s, ' % origin warn_msg = ( "The translator-targeted comment '%s' " "(%sline %d) was ignored, because it wasn't " "the last item on the line." ) % (c, filemsg, comment_lineno_cache) warnings.warn(warn_msg, TranslatorCommentWarning) lineno_comment_map[comment_lineno_cache] = [] else: out.write('# %s' % ' | '.join(lineno_comment_map[comment_lineno_cache])) comment_lineno_cache = None if t.token_type == TOKEN_BLOCK: imatch = inline_re.match(t.contents) bmatch = block_re.match(t.contents) cmatches = constant_re.findall(t.contents) if imatch: g = imatch.group(1) if g[0] == '"': g = g.strip('"') elif g[0] == "'": g = g.strip("'") g = g.replace('%', '%%') if imatch.group(2): # A context is provided context_match = context_re.match(imatch.group(2)) message_context = context_match.group(1) if message_context[0] == '"': message_context = message_context.strip('"') elif message_context[0] == "'": message_context = message_context.strip("'") out.write(' pgettext({p}{!r}, {p}{!r}) '.format( message_context, g, p=raw_prefix )) message_context = None else: out.write(' gettext({p}{!r}) '.format(g, p=raw_prefix)) elif bmatch: for fmatch in constant_re.findall(t.contents): out.write(' _(%s) ' % fmatch) if bmatch.group(1): # A context is provided context_match = context_re.match(bmatch.group(1)) message_context = context_match.group(1) if message_context[0] == '"': message_context = message_context.strip('"') elif message_context[0] == "'": message_context = message_context.strip("'") intrans = True inplural = False trimmed = 'trimmed' in t.split_contents() singular = [] plural = [] elif cmatches: for cmatch in cmatches: out.write(' _(%s) ' % cmatch) elif t.contents == 'comment': incomment = True else: out.write(blankout(t.contents, 'B')) elif t.token_type == TOKEN_VAR: parts = t.contents.split('|') cmatch = constant_re.match(parts[0]) if cmatch: out.write(' _(%s) ' % cmatch.group(1)) for p in parts[1:]: if p.find(':_(') >= 0: out.write(' %s ' % p.split(':', 1)[1]) else: out.write(blankout(p, 'F')) elif t.token_type == TOKEN_COMMENT: if t.contents.lstrip().startswith(TRANSLATOR_COMMENT_MARK): lineno_comment_map.setdefault(t.lineno, []).append(t.contents) comment_lineno_cache = t.lineno else: out.write(blankout(t.contents, 'X')) return out.getvalue()
hammerlab/mhcflurry
refs/heads/master
mhcflurry/local_parallelism.py
1
""" Infrastructure for "local" parallelism, i.e. multiprocess parallelism on one compute node. """ import traceback import sys import os import time from multiprocessing import Pool, Queue, cpu_count from six.moves import queue from multiprocessing.util import Finalize from pprint import pprint import random import numpy from .common import configure_tensorflow def add_local_parallelism_args(parser): """ Add local parallelism arguments to the given argparse.ArgumentParser. Parameters ---------- parser : argparse.ArgumentParser """ group = parser.add_argument_group("Local parallelism") group.add_argument( "--num-jobs", default=0, type=int, metavar="N", help="Number of local processes to parallelize training over. " "Set to 0 for serial run. Default: %(default)s.") group.add_argument( "--backend", choices=("tensorflow-gpu", "tensorflow-cpu", "tensorflow-default"), help="Keras backend. If not specified will use system default.") group.add_argument( "--gpus", type=int, metavar="N", help="Number of GPUs to attempt to parallelize across. Requires running " "in parallel.") group.add_argument( "--max-workers-per-gpu", type=int, metavar="N", default=1000, help="Maximum number of workers to assign to a GPU. Additional tasks will " "run on CPU.") group.add_argument( "--max-tasks-per-worker", type=int, metavar="N", default=None, help="Restart workers after N tasks. Workaround for tensorflow memory " "leaks. Requires Python >=3.2.") group.add_argument( "--worker-log-dir", default=None, help="Write worker stdout and stderr logs to given directory.") def worker_pool_with_gpu_assignments_from_args(args): """ Create a multiprocessing.Pool where each worker uses its own GPU. Uses commandline arguments. See `worker_pool_with_gpu_assignments`. Parameters ---------- args : argparse.ArgumentParser Returns ------- multiprocessing.Pool """ return worker_pool_with_gpu_assignments( num_jobs=args.num_jobs, num_gpus=args.gpus, backend=args.backend, max_workers_per_gpu=args.max_workers_per_gpu, max_tasks_per_worker=args.max_tasks_per_worker, worker_log_dir=args.worker_log_dir, ) def worker_pool_with_gpu_assignments( num_jobs, num_gpus=0, backend=None, max_workers_per_gpu=1, max_tasks_per_worker=None, worker_log_dir=None): """ Create a multiprocessing.Pool where each worker uses its own GPU. Parameters ---------- num_jobs : int Number of worker processes. num_gpus : int backend : string max_workers_per_gpu : int max_tasks_per_worker : int worker_log_dir : string Returns ------- multiprocessing.Pool """ if num_jobs == 0: if backend: configure_tensorflow(backend) return None worker_init_kwargs = [{} for _ in range(num_jobs)] if num_gpus: print("Attempting to round-robin assign each worker a GPU.") if backend != "tensorflow-default": print("Forcing keras backend to be tensorflow-default") backend = "tensorflow-default" gpu_assignments_remaining = dict(( (gpu, max_workers_per_gpu) for gpu in range(num_gpus) )) for (worker_num, kwargs) in enumerate(worker_init_kwargs): if gpu_assignments_remaining: # Use a GPU gpu_num = sorted( gpu_assignments_remaining, key=lambda key: gpu_assignments_remaining[key])[0] gpu_assignments_remaining[gpu_num] -= 1 if not gpu_assignments_remaining[gpu_num]: del gpu_assignments_remaining[gpu_num] gpu_assignment = [gpu_num] else: # Use CPU gpu_assignment = [] kwargs.update({ 'gpu_device_nums': gpu_assignment, 'keras_backend': backend }) print("Worker %d assigned GPUs: %s" % ( worker_num, gpu_assignment)) if worker_log_dir: for kwargs in worker_init_kwargs: kwargs["worker_log_dir"] = worker_log_dir worker_pool = make_worker_pool( processes=num_jobs, initializer=worker_init, initializer_kwargs_per_process=worker_init_kwargs, max_tasks_per_worker=max_tasks_per_worker) return worker_pool def make_worker_pool( processes=None, initializer=None, initializer_kwargs_per_process=None, max_tasks_per_worker=None): """ Convenience wrapper to create a multiprocessing.Pool. This function adds support for per-worker initializer arguments, which are not natively supported by the multiprocessing module. The motivation for this feature is to support allocating each worker to a (different) GPU. IMPLEMENTATION NOTE: The per-worker initializer arguments are implemented using a Queue. Each worker reads its arguments from this queue when it starts. When it terminates, it adds its initializer arguments back to the queue, so a future process can initialize itself using these arguments. There is one issue with this approach, however. If a worker crashes, it never repopulates the queue of initializer arguments. This will prevent any future worker from re-using those arguments. To deal with this issue we add a second 'backup queue'. This queue always contains the full set of initializer arguments: whenever a worker reads from it, it always pushes the pop'd args back to the end of the queue immediately. If the primary arg queue is ever empty, then workers will read from this backup queue. Parameters ---------- processes : int Number of workers. Default: num CPUs. initializer : function, optional Init function to call in each worker initializer_kwargs_per_process : list of dict, optional Arguments to pass to initializer function for each worker. Length of list must equal the number of workers. max_tasks_per_worker : int, optional Restart workers after this many tasks. Requires Python >=3.2. Returns ------- multiprocessing.Pool """ if not processes: processes = cpu_count() pool_kwargs = { 'processes': processes, } if max_tasks_per_worker: pool_kwargs["maxtasksperchild"] = max_tasks_per_worker if initializer: if initializer_kwargs_per_process: assert len(initializer_kwargs_per_process) == processes kwargs_queue = Queue() kwargs_queue_backup = Queue() for kwargs in initializer_kwargs_per_process: kwargs_queue.put(kwargs) kwargs_queue_backup.put(kwargs) pool_kwargs["initializer"] = worker_init_entry_point pool_kwargs["initargs"] = ( initializer, kwargs_queue, kwargs_queue_backup) else: pool_kwargs["initializer"] = initializer worker_pool = Pool(**pool_kwargs) print("Started pool: %s" % str(worker_pool)) pprint(pool_kwargs) return worker_pool def worker_init_entry_point( init_function, arg_queue=None, backup_arg_queue=None): kwargs = {} if arg_queue: try: kwargs = arg_queue.get(block=False) except queue.Empty: print("Argument queue empty. Using round robin arg queue.") kwargs = backup_arg_queue.get(block=True) backup_arg_queue.put(kwargs) # On exit we add the init args back to the queue so restarted workers # (e.g. when when running with maxtasksperchild) will pickup init # arguments from a previously exited worker. Finalize(None, arg_queue.put, (kwargs,), exitpriority=1) print("Initializing worker: %s" % str(kwargs)) init_function(**kwargs) def worker_init(keras_backend=None, gpu_device_nums=None, worker_log_dir=None): if worker_log_dir: sys.stderr = sys.stdout = open(os.path.join( worker_log_dir, "LOG-worker.%d.%d.txt" % (os.getpid(), int(time.time()))), "w") # Each worker needs distinct random numbers numpy.random.seed() random.seed() if keras_backend or gpu_device_nums: print("WORKER pid=%d assigned GPU devices: %s" % ( os.getpid(), gpu_device_nums)) configure_tensorflow( keras_backend, gpu_device_nums=gpu_device_nums) # Solution suggested in https://bugs.python.org/issue13831 class WrapException(Exception): """ Add traceback info to exception so exceptions raised in worker processes can still show traceback info when re-raised in the parent. """ def __init__(self): exc_type, exc_value, exc_tb = sys.exc_info() self.exception = exc_value self.formatted = ''.join(traceback.format_exception(exc_type, exc_value, exc_tb)) def __str__(self): return '%s\nOriginal traceback:\n%s' % (Exception.__str__(self), self.formatted) def call_wrapped(function, *args, **kwargs): """ Run function on args and kwargs and return result, wrapping any exception raised in a WrapException. Parameters ---------- function : arbitrary function Any other arguments provided are passed to the function. Returns ------- object """ try: return function(*args, **kwargs) except: raise WrapException() def call_wrapped_kwargs(function, kwargs): """ Invoke function on given kwargs and return result, wrapping any exception raised in a WrapException. Parameters ---------- function : arbitrary function kwargs : dict Returns ------- object result of calling function(**kwargs) """ return call_wrapped(function, **kwargs)
jmcarbo/openerp7
refs/heads/master
openerp/addons/knowledge/__openerp__.py
122
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name' : 'Knowledge Management System', 'version' : '1.0', 'depends' : ['base','base_setup'], 'author' : 'OpenERP SA', 'category': 'Hidden/Dependency', 'description': """ Installer for knowledge-based Hidden. ===================================== Makes the Knowledge Application Configuration available from where you can install document and Wiki based Hidden. """, 'website': 'http://www.openerp.com', 'data': [ 'security/knowledge_security.xml', 'security/ir.model.access.csv', 'knowledge_view.xml', 'res_config_view.xml', ], 'demo': ['knowledge_demo.xml'], 'installable': True, 'auto_install': False, 'images': ['images/1_config_knowledge.jpeg'], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
LeahDresd2/Leah
refs/heads/master
py/openage/convert/__main__.py
46
#!/usr/bin/env python3 from . import datafile from . import mediafile from .util import set_verbosity import argparse def main(): # the convert script has 1 mode: # mode 0: generate media files # this requires the aoe installation # database files as csv will be generated, as well as usable # media files like .png and .opus. # see `mediafile.py` for the implementation. #construct argument parser p = argparse.ArgumentParser(description='openage conversion script. allows usage of original media files.') #common options p.add_argument("-v", "--verbose", help="Turn on verbose log messages", action='count', default=0) #p.set_defaults(handler=lambda x: p.print_help()) #convert script has multiple subsystems sp = p.add_subparsers(dest='module', help="available convert subsystems") #media conversion: media_cmd = sp.add_parser("media", help="convert media files to free formats") media_cmd.add_argument("-e", "--extrafiles", help = "Extract extra files that are not needed, but useful (mainly visualizations).", action='store_true') media_cmd.add_argument("--no-opus", help="Don't use opus conversion for audio files", action='store_true') media_cmd.add_argument("--use-dat-cache", help="Potentially use a pickle cache file for the read empires.dat file", action='store_true') mcmd_g0 = media_cmd.add_mutually_exclusive_group(required=True) mcmd_g0.add_argument("-o", "--output", metavar="output_directory", help="The data output directory") mcmd_g0.add_argument("-l", "--list-files", help="List files in the game archives", action='store_true') media_cmd.add_argument("srcdir", help="The Age of Empires II root directory") media_cmd.add_argument("extract", metavar="resource", nargs="*", help="A specific extraction rule, such as graphics:*.slp, terrain:15008.slp or *:*.wav. If no rules are specified, *:*.* is assumed") #set handler for media conversion media_cmd.set_defaults(handler=mediafile.media_convert) #actually parse argv and run main args = p.parse_args() set_verbosity(args.verbose) if args.module == None: p.print_help() else: args.handler(args) if __name__ == "__main__": main()
tempbottle/kbengine
refs/heads/master
kbe/res/scripts/common/Lib/site-packages/pip/_vendor/requests/packages/chardet/charsetprober.py
3126
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Universal charset detector code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 2001 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # Shy Shalom - original C code # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### from . import constants import re class CharSetProber: def __init__(self): pass def reset(self): self._mState = constants.eDetecting def get_charset_name(self): return None def feed(self, aBuf): pass def get_state(self): return self._mState def get_confidence(self): return 0.0 def filter_high_bit_only(self, aBuf): aBuf = re.sub(b'([\x00-\x7F])+', b' ', aBuf) return aBuf def filter_without_english_letters(self, aBuf): aBuf = re.sub(b'([A-Za-z])+', b' ', aBuf) return aBuf def filter_with_english_letters(self, aBuf): # TODO return aBuf
jhawkesworth/ansible
refs/heads/devel
lib/ansible/modules/cloud/amazon/iam.py
31
#!/usr/bin/python # Copyright: Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.1', 'status': ['stableinterface'], 'supported_by': 'community'} DOCUMENTATION = ''' --- module: iam short_description: Manage IAM users, groups, roles and keys description: - Allows for the management of IAM users, user API keys, groups, roles. version_added: "2.0" options: iam_type: description: - Type of IAM resource choices: ["user", "group", "role"] name: description: - Name of IAM resource to create or identify required: true new_name: description: - When state is update, will replace name with new_name on IAM resource new_path: description: - When state is update, will replace the path with new_path on the IAM resource state: description: - Whether to create, delete or update the IAM resource. Note, roles cannot be updated. required: true choices: [ "present", "absent", "update" ] path: description: - When creating or updating, specify the desired path of the resource. If state is present, it will replace the current path to match what is passed in when they do not match. default: "/" trust_policy: description: - The inline (JSON or YAML) trust policy document that grants an entity permission to assume the role. Mutually exclusive with C(trust_policy_filepath). version_added: "2.2" trust_policy_filepath: description: - The path to the trust policy document that grants an entity permission to assume the role. Mutually exclusive with C(trust_policy). version_added: "2.2" access_key_state: description: - When type is user, it creates, removes, deactivates or activates a user's access key(s). Note that actions apply only to keys specified. choices: [ "create", "remove", "active", "inactive"] key_count: description: - When access_key_state is create it will ensure this quantity of keys are present. Defaults to 1. default: '1' access_key_ids: description: - A list of the keys that you want impacted by the access_key_state parameter. groups: description: - A list of groups the user should belong to. When update, will gracefully remove groups not listed. password: description: - When type is user and state is present, define the users login password. Also works with update. Note that always returns changed. update_password: default: always choices: ['always', 'on_create'] description: - C(always) will update passwords if they differ. C(on_create) will only set the password for newly created users. notes: - 'Currently boto does not support the removal of Managed Policies, the module will error out if your user/group/role has managed policies when you try to do state=absent. They will need to be removed manually.' author: - "Jonathan I. Davila (@defionscode)" - "Paul Seiffert (@seiffert)" extends_documentation_fragment: - aws - ec2 ''' EXAMPLES = ''' # Basic user creation example tasks: - name: Create two new IAM users with API keys iam: iam_type: user name: "{{ item }}" state: present password: "{{ temp_pass }}" access_key_state: create loop: - jcleese - mpython # Advanced example, create two new groups and add the pre-existing user # jdavila to both groups. task: - name: Create Two Groups, Mario and Luigi iam: iam_type: group name: "{{ item }}" state: present loop: - Mario - Luigi register: new_groups - name: iam: iam_type: user name: jdavila state: update groups: "{{ item.created_group.group_name }}" loop: "{{ new_groups.results }}" # Example of role with custom trust policy for Lambda service - name: Create IAM role with custom trust relationship iam: iam_type: role name: AAALambdaTestRole state: present trust_policy: Version: '2012-10-17' Statement: - Action: sts:AssumeRole Effect: Allow Principal: Service: lambda.amazonaws.com ''' RETURN = ''' role_result: description: the IAM.role dict returned by Boto type: str returned: if iam_type=role and state=present sample: { "arn": "arn:aws:iam::A1B2C3D4E5F6:role/my-new-role", "assume_role_policy_document": "...truncated...", "create_date": "2017-09-02T14:32:23Z", "path": "/", "role_id": "AROAA1B2C3D4E5F6G7H8I", "role_name": "my-new-role" } roles: description: a list containing the name of the currently defined roles type: list returned: if iam_type=role and state=present sample: [ "my-new-role", "my-existing-role-1", "my-existing-role-2", "my-existing-role-3", "my-existing-role-...", ] ''' import json import traceback try: import boto.exception import boto.iam import boto.iam.connection except ImportError: pass # Taken care of by ec2.HAS_BOTO from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.ec2 import (HAS_BOTO, boto_exception, connect_to_aws, ec2_argument_spec, get_aws_connection_info) def _paginate(func, attr): ''' paginates the results from func by continuously passing in the returned marker if the results were truncated. this returns an iterator over the items in the returned response. `attr` is the name of the attribute to iterate over in the response. ''' finished, marker = False, None while not finished: res = func(marker=marker) for item in getattr(res, attr): yield item finished = res.is_truncated == 'false' if not finished: marker = res.marker def list_all_groups(iam): return [item['group_name'] for item in _paginate(iam.get_all_groups, 'groups')] def list_all_users(iam): return [item['user_name'] for item in _paginate(iam.get_all_users, 'users')] def list_all_roles(iam): return [item['role_name'] for item in _paginate(iam.list_roles, 'roles')] def list_all_instance_profiles(iam): return [item['instance_profile_name'] for item in _paginate(iam.list_instance_profiles, 'instance_profiles')] def create_user(module, iam, name, pwd, path, key_state, key_count): key_qty = 0 keys = [] try: user_meta = iam.create_user( name, path).create_user_response.create_user_result.user changed = True if pwd is not None: pwd = iam.create_login_profile(name, pwd) if key_state in ['create']: if key_count: while key_count > key_qty: keys.append(iam.create_access_key( user_name=name).create_access_key_response. create_access_key_result. access_key) key_qty += 1 else: keys = None except boto.exception.BotoServerError as err: module.fail_json(changed=False, msg=str(err)) else: user_info = dict(created_user=user_meta, password=pwd, access_keys=keys) return (user_info, changed) def delete_dependencies_first(module, iam, name): changed = False # try to delete any keys try: current_keys = [ck['access_key_id'] for ck in iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] for key in current_keys: iam.delete_access_key(key, name) changed = True except boto.exception.BotoServerError as err: module.fail_json(changed=changed, msg="Failed to delete keys: %s" % err, exception=traceback.format_exc()) # try to delete login profiles try: login_profile = iam.get_login_profiles(name).get_login_profile_response iam.delete_login_profile(name) changed = True except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if 'Login Profile for User ' + name + ' cannot be found.' not in error_msg: module.fail_json(changed=changed, msg="Failed to delete login profile: %s" % err, exception=traceback.format_exc()) # try to detach policies try: for policy in iam.get_all_user_policies(name).list_user_policies_result.policy_names: iam.delete_user_policy(name, policy) changed = True except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if 'must detach all policies first' in error_msg: module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears" "that %s has Managed Polices. This is not " "currently supported by boto. Please detach the polices " "through the console and try again." % name) module.fail_json(changed=changed, msg="Failed to delete policies: %s" % err, exception=traceback.format_exc()) # try to deactivate associated MFA devices try: mfa_devices = iam.get_all_mfa_devices(name).get('list_mfa_devices_response', {}).get('list_mfa_devices_result', {}).get('mfa_devices', []) for device in mfa_devices: iam.deactivate_mfa_device(name, device['serial_number']) changed = True except boto.exception.BotoServerError as err: module.fail_json(changed=changed, msg="Failed to deactivate associated MFA devices: %s" % err, exception=traceback.format_exc()) return changed def delete_user(module, iam, name): changed = delete_dependencies_first(module, iam, name) try: iam.delete_user(name) except boto.exception.BotoServerError as ex: module.fail_json(changed=changed, msg="Failed to delete user %s: %s" % (name, ex), exception=traceback.format_exc()) else: changed = True return name, changed def update_user(module, iam, name, new_name, new_path, key_state, key_count, keys, pwd, updated): changed = False name_change = False if updated and new_name: name = new_name try: current_keys = [ck['access_key_id'] for ck in iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] status = [ck['status'] for ck in iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] key_qty = len(current_keys) except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if 'cannot be found' in error_msg and updated: current_keys = [ck['access_key_id'] for ck in iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata] status = [ck['status'] for ck in iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata] name = new_name else: module.fail_json(changed=False, msg=str(err)) updated_key_list = {} if new_name or new_path: c_path = iam.get_user(name).get_user_result.user['path'] if (name != new_name) or (c_path != new_path): changed = True try: if not updated: user = iam.update_user( name, new_user_name=new_name, new_path=new_path).update_user_response.response_metadata else: user = iam.update_user( name, new_path=new_path).update_user_response.response_metadata user['updates'] = dict( old_username=name, new_username=new_name, old_path=c_path, new_path=new_path) except boto.exception.BotoServerError as err: error_msg = boto_exception(err) module.fail_json(changed=False, msg=str(err)) else: if not updated: name_change = True if pwd: try: iam.update_login_profile(name, pwd) changed = True except boto.exception.BotoServerError: try: iam.create_login_profile(name, pwd) changed = True except boto.exception.BotoServerError as err: error_msg = boto_exception(str(err)) if 'Password does not conform to the account password policy' in error_msg: module.fail_json(changed=False, msg="Password doesn't conform to policy") else: module.fail_json(msg=error_msg) try: current_keys = [ck['access_key_id'] for ck in iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] status = [ck['status'] for ck in iam.get_all_access_keys(name).list_access_keys_result.access_key_metadata] key_qty = len(current_keys) except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if 'cannot be found' in error_msg and updated: current_keys = [ck['access_key_id'] for ck in iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata] status = [ck['status'] for ck in iam.get_all_access_keys(new_name).list_access_keys_result.access_key_metadata] name = new_name else: module.fail_json(changed=False, msg=str(err)) new_keys = [] if key_state == 'create': try: while key_count > key_qty: new_keys.append(iam.create_access_key( user_name=name).create_access_key_response.create_access_key_result.access_key) key_qty += 1 changed = True except boto.exception.BotoServerError as err: module.fail_json(changed=False, msg=str(err)) if keys and key_state: for access_key in keys: if key_state in ('active', 'inactive'): if access_key in current_keys: for current_key, current_key_state in zip(current_keys, status): if key_state != current_key_state.lower(): try: iam.update_access_key(access_key, key_state.capitalize(), user_name=name) changed = True except boto.exception.BotoServerError as err: module.fail_json(changed=False, msg=str(err)) else: module.fail_json(msg="Supplied keys not found for %s. " "Current keys: %s. " "Supplied key(s): %s" % (name, current_keys, keys) ) if key_state == 'remove': if access_key in current_keys: try: iam.delete_access_key(access_key, user_name=name) except boto.exception.BotoServerError as err: module.fail_json(changed=False, msg=str(err)) else: changed = True try: final_keys, final_key_status = \ [ck['access_key_id'] for ck in iam.get_all_access_keys(name). list_access_keys_result. access_key_metadata],\ [ck['status'] for ck in iam.get_all_access_keys(name). list_access_keys_result. access_key_metadata] except boto.exception.BotoServerError as err: module.fail_json(changed=changed, msg=str(err)) for fk, fks in zip(final_keys, final_key_status): updated_key_list.update({fk: fks}) return name_change, updated_key_list, changed, new_keys def set_users_groups(module, iam, name, groups, updated=None, new_name=None): """ Sets groups for a user, will purge groups not explicitly passed, while retaining pre-existing groups that also are in the new list. """ changed = False if updated: name = new_name try: orig_users_groups = [og['group_name'] for og in iam.get_groups_for_user( name).list_groups_for_user_result.groups] remove_groups = [ rg for rg in frozenset(orig_users_groups).difference(groups)] new_groups = [ ng for ng in frozenset(groups).difference(orig_users_groups)] except boto.exception.BotoServerError as err: module.fail_json(changed=changed, msg=str(err)) else: if len(orig_users_groups) > 0: for new in new_groups: iam.add_user_to_group(new, name) for rm in remove_groups: iam.remove_user_from_group(rm, name) else: for group in groups: try: iam.add_user_to_group(group, name) except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if ('The group with name %s cannot be found.' % group) in error_msg: module.fail_json(changed=False, msg="Group %s doesn't exist" % group) if len(remove_groups) > 0 or len(new_groups) > 0: changed = True return (groups, changed) def create_group(module=None, iam=None, name=None, path=None): changed = False try: iam.create_group( name, path).create_group_response.create_group_result.group except boto.exception.BotoServerError as err: module.fail_json(changed=changed, msg=str(err)) else: changed = True return name, changed def delete_group(module=None, iam=None, name=None): changed = False try: iam.delete_group(name) except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if ('must delete policies first') in error_msg: for policy in iam.get_all_group_policies(name).list_group_policies_result.policy_names: iam.delete_group_policy(name, policy) try: iam.delete_group(name) except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if ('must delete policies first') in error_msg: module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears" "that %s has Managed Polices. This is not " "currently supported by boto. Please detach the polices " "through the console and try again." % name) else: module.fail_json(changed=changed, msg=str(error_msg)) else: changed = True else: module.fail_json(changed=changed, msg=str(error_msg)) else: changed = True return changed, name def update_group(module=None, iam=None, name=None, new_name=None, new_path=None): changed = False try: current_group_path = iam.get_group( name).get_group_response.get_group_result.group['path'] if new_path: if current_group_path != new_path: iam.update_group(name, new_path=new_path) changed = True if new_name: if name != new_name: iam.update_group(name, new_group_name=new_name, new_path=new_path) changed = True name = new_name except boto.exception.BotoServerError as err: module.fail_json(changed=changed, msg=str(err)) return changed, name, new_path, current_group_path def create_role(module, iam, name, path, role_list, prof_list, trust_policy_doc): changed = False iam_role_result = None instance_profile_result = None try: if name not in role_list: changed = True iam_role_result = iam.create_role(name, assume_role_policy_document=trust_policy_doc, path=path).create_role_response.create_role_result.role if name not in prof_list: instance_profile_result = iam.create_instance_profile(name, path=path) \ .create_instance_profile_response.create_instance_profile_result.instance_profile iam.add_role_to_instance_profile(name, name) else: instance_profile_result = iam.get_instance_profile(name).get_instance_profile_response.get_instance_profile_result.instance_profile except boto.exception.BotoServerError as err: module.fail_json(changed=changed, msg=str(err)) else: updated_role_list = list_all_roles(iam) iam_role_result = iam.get_role(name).get_role_response.get_role_result.role return changed, updated_role_list, iam_role_result, instance_profile_result def delete_role(module, iam, name, role_list, prof_list): changed = False iam_role_result = None instance_profile_result = None try: if name in role_list: cur_ins_prof = [rp['instance_profile_name'] for rp in iam.list_instance_profiles_for_role(name). list_instance_profiles_for_role_result. instance_profiles] for profile in cur_ins_prof: iam.remove_role_from_instance_profile(profile, name) try: iam.delete_role(name) except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if ('must detach all policies first') in error_msg: for policy in iam.list_role_policies(name).list_role_policies_result.policy_names: iam.delete_role_policy(name, policy) try: iam_role_result = iam.delete_role(name) except boto.exception.BotoServerError as err: error_msg = boto_exception(err) if ('must detach all policies first') in error_msg: module.fail_json(changed=changed, msg="All inline polices have been removed. Though it appears" "that %s has Managed Polices. This is not " "currently supported by boto. Please detach the polices " "through the console and try again." % name) else: module.fail_json(changed=changed, msg=str(err)) else: changed = True else: changed = True for prof in prof_list: if name == prof: instance_profile_result = iam.delete_instance_profile(name) except boto.exception.BotoServerError as err: module.fail_json(changed=changed, msg=str(err)) else: updated_role_list = list_all_roles(iam) return changed, updated_role_list, iam_role_result, instance_profile_result def main(): argument_spec = ec2_argument_spec() argument_spec.update(dict( iam_type=dict( default=None, required=True, choices=['user', 'group', 'role']), groups=dict(type='list', default=None, required=False), state=dict( default=None, required=True, choices=['present', 'absent', 'update']), password=dict(default=None, required=False, no_log=True), update_password=dict(default='always', required=False, choices=['always', 'on_create']), access_key_state=dict(default=None, required=False, choices=[ 'active', 'inactive', 'create', 'remove', 'Active', 'Inactive', 'Create', 'Remove']), access_key_ids=dict(type='list', default=None, required=False), key_count=dict(type='int', default=1, required=False), name=dict(default=None, required=False), trust_policy_filepath=dict(default=None, required=False), trust_policy=dict(type='dict', default=None, required=False), new_name=dict(default=None, required=False), path=dict(default='/', required=False), new_path=dict(default=None, required=False) ) ) module = AnsibleModule( argument_spec=argument_spec, mutually_exclusive=[['trust_policy', 'trust_policy_filepath']], ) if not HAS_BOTO: module.fail_json(msg='This module requires boto, please install it') state = module.params.get('state').lower() iam_type = module.params.get('iam_type').lower() groups = module.params.get('groups') name = module.params.get('name') new_name = module.params.get('new_name') password = module.params.get('password') update_pw = module.params.get('update_password') path = module.params.get('path') new_path = module.params.get('new_path') key_count = module.params.get('key_count') key_state = module.params.get('access_key_state') trust_policy = module.params.get('trust_policy') trust_policy_filepath = module.params.get('trust_policy_filepath') key_ids = module.params.get('access_key_ids') if key_state: key_state = key_state.lower() if any([n in key_state for n in ['active', 'inactive']]) and not key_ids: module.fail_json(changed=False, msg="At least one access key has to be defined in order" " to use 'active' or 'inactive'") if iam_type == 'user' and module.params.get('password') is not None: pwd = module.params.get('password') elif iam_type != 'user' and module.params.get('password') is not None: module.fail_json(msg="a password is being specified when the iam_type " "is not user. Check parameters") else: pwd = None if iam_type != 'user' and (module.params.get('access_key_state') is not None or module.params.get('access_key_id') is not None): module.fail_json(msg="the IAM type must be user, when IAM access keys " "are being modified. Check parameters") if iam_type == 'role' and state == 'update': module.fail_json(changed=False, msg="iam_type: role, cannot currently be updated, " "please specify present or absent") # check if trust_policy is present -- it can be inline JSON or a file path to a JSON file if trust_policy_filepath: try: with open(trust_policy_filepath, 'r') as json_data: trust_policy_doc = json.dumps(json.load(json_data)) except Exception as e: module.fail_json(msg=str(e) + ': ' + trust_policy_filepath) elif trust_policy: try: trust_policy_doc = json.dumps(trust_policy) except Exception as e: module.fail_json(msg=str(e) + ': ' + trust_policy) else: trust_policy_doc = None region, ec2_url, aws_connect_kwargs = get_aws_connection_info(module) try: if region: iam = connect_to_aws(boto.iam, region, **aws_connect_kwargs) else: iam = boto.iam.connection.IAMConnection(**aws_connect_kwargs) except boto.exception.NoAuthHandlerFound as e: module.fail_json(msg=str(e)) result = {} changed = False try: orig_group_list = list_all_groups(iam) orig_user_list = list_all_users(iam) orig_role_list = list_all_roles(iam) orig_prof_list = list_all_instance_profiles(iam) except boto.exception.BotoServerError as err: module.fail_json(msg=err.message) if iam_type == 'user': been_updated = False user_groups = None user_exists = any([n in [name, new_name] for n in orig_user_list]) if user_exists: current_path = iam.get_user(name).get_user_result.user['path'] if not new_path and current_path != path: new_path = path path = current_path if state == 'present' and not user_exists and not new_name: (meta, changed) = create_user( module, iam, name, password, path, key_state, key_count) keys = iam.get_all_access_keys(name).list_access_keys_result.\ access_key_metadata if groups: (user_groups, changed) = set_users_groups( module, iam, name, groups, been_updated, new_name) module.exit_json( user_meta=meta, groups=user_groups, keys=keys, changed=changed) elif state in ['present', 'update'] and user_exists: if update_pw == 'on_create': password = None if name not in orig_user_list and new_name in orig_user_list: been_updated = True name_change, key_list, user_changed, new_key = update_user( module, iam, name, new_name, new_path, key_state, key_count, key_ids, password, been_updated) if new_key: user_meta = {'access_keys': list(new_key)} user_meta['access_keys'].extend( [{'access_key_id': key, 'status': value} for key, value in key_list.items() if key not in [it['access_key_id'] for it in new_key]]) else: user_meta = { 'access_keys': [{'access_key_id': key, 'status': value} for key, value in key_list.items()]} if name_change and new_name: orig_name = name name = new_name if isinstance(groups, list): user_groups, groups_changed = set_users_groups( module, iam, name, groups, been_updated, new_name) if groups_changed == user_changed: changed = groups_changed else: changed = True else: changed = user_changed if new_name and new_path: module.exit_json(changed=changed, groups=user_groups, old_user_name=orig_name, new_user_name=new_name, old_path=path, new_path=new_path, keys=key_list, created_keys=new_key, user_meta=user_meta) elif new_name and not new_path and not been_updated: module.exit_json( changed=changed, groups=user_groups, old_user_name=orig_name, new_user_name=new_name, keys=key_list, created_keys=new_key, user_meta=user_meta) elif new_name and not new_path and been_updated: module.exit_json( changed=changed, groups=user_groups, user_name=new_name, keys=key_list, key_state=key_state, created_keys=new_key, user_meta=user_meta) elif not new_name and new_path: module.exit_json( changed=changed, groups=user_groups, user_name=name, old_path=path, new_path=new_path, keys=key_list, created_keys=new_key, user_meta=user_meta) else: module.exit_json( changed=changed, groups=user_groups, user_name=name, keys=key_list, created_keys=new_key, user_meta=user_meta) elif state == 'update' and not user_exists: module.fail_json( msg="The user %s does not exist. No update made." % name) elif state == 'absent': if user_exists: try: set_users_groups(module, iam, name, '') name, changed = delete_user(module, iam, name) module.exit_json(deleted_user=name, changed=changed) except Exception as ex: module.fail_json(changed=changed, msg=str(ex)) else: module.exit_json( changed=False, msg="User %s is already absent from your AWS IAM users" % name) elif iam_type == 'group': group_exists = name in orig_group_list if state == 'present' and not group_exists: new_group, changed = create_group(module=module, iam=iam, name=name, path=path) module.exit_json(changed=changed, group_name=new_group) elif state in ['present', 'update'] and group_exists: changed, updated_name, updated_path, cur_path = update_group( module=module, iam=iam, name=name, new_name=new_name, new_path=new_path) if new_path and new_name: module.exit_json(changed=changed, old_group_name=name, new_group_name=updated_name, old_path=cur_path, new_group_path=updated_path) if new_path and not new_name: module.exit_json(changed=changed, group_name=name, old_path=cur_path, new_group_path=updated_path) if not new_path and new_name: module.exit_json(changed=changed, old_group_name=name, new_group_name=updated_name, group_path=cur_path) if not new_path and not new_name: module.exit_json( changed=changed, group_name=name, group_path=cur_path) elif state == 'update' and not group_exists: module.fail_json( changed=changed, msg="Update Failed. Group %s doesn't seem to exist!" % name) elif state == 'absent': if name in orig_group_list: removed_group, changed = delete_group(module=module, iam=iam, name=name) module.exit_json(changed=changed, delete_group=removed_group) else: module.exit_json(changed=changed, msg="Group already absent") elif iam_type == 'role': role_list = [] if state == 'present': changed, role_list, role_result, instance_profile_result = create_role( module, iam, name, path, orig_role_list, orig_prof_list, trust_policy_doc) elif state == 'absent': changed, role_list, role_result, instance_profile_result = delete_role( module, iam, name, orig_role_list, orig_prof_list) elif state == 'update': module.fail_json( changed=False, msg='Role update not currently supported by boto.') module.exit_json(changed=changed, roles=role_list, role_result=role_result, instance_profile_result=instance_profile_result) if __name__ == '__main__': main()
anurag03/integration_tests
refs/heads/master
cfme/tests/infrastructure/test_child_tenant.py
1
import fauxfactory import pytest from riggerlib import recursive_update from widgetastic.utils import partial_match from cfme import test_requirements from cfme.infrastructure.provider.rhevm import RHEVMProvider from cfme.infrastructure.provider.virtualcenter import VMwareProvider from cfme.markers.env_markers.provider import ONE_PER_TYPE from cfme.provisioning import do_vm_provisioning from cfme.utils.blockers import GH from cfme.utils.generators import random_vm_name pytestmark = [ test_requirements.quota, pytest.mark.meta(server_roles="+automate"), pytest.mark.usefixtures('uses_infra_providers'), pytest.mark.long_running, pytest.mark.provider([VMwareProvider, RHEVMProvider], scope="module", selector=ONE_PER_TYPE) ] @pytest.fixture def vm_name(): return random_vm_name(context='quota') @pytest.fixture def template_name(provisioning): return provisioning["template"] @pytest.fixture def prov_data(vm_name, provisioning): return { "catalog": {'vm_name': vm_name}, "environment": {'automatic_placement': True}, "network": {'vlan': partial_match(provisioning['vlan'])} } @pytest.fixture def set_child_tenant_quota(request, appliance, new_child): field, value = request.param new_child.set_quota(**{'{}_cb'.format(field): True, field: value}) yield # will refresh page as navigation to configuration is blocked if alerts are on the page appliance.server.login_admin() appliance.server.browser.refresh() new_child.set_quota(**{'{}_cb'.format(field): False}) @pytest.fixture(scope='module') def new_tenant(appliance): collection = appliance.collections.tenants tenant = collection.create(name='tenant{}'.format(fauxfactory.gen_alphanumeric()), description='tenant_des{}'.format(fauxfactory.gen_alphanumeric()), parent=collection.get_root_tenant()) yield tenant if tenant.exists: tenant.delete() @pytest.fixture(scope='module') def new_child(appliance, new_tenant): collection = appliance.collections.tenants child_tenant = collection.create(name='tenant{}'.format(fauxfactory.gen_alphanumeric()), description='tenant_des{}'.format( fauxfactory.gen_alphanumeric()), parent=new_tenant) yield child_tenant if child_tenant.exists: child_tenant.delete() @pytest.fixture(scope='module') def new_group(appliance, new_child, new_tenant): collection = appliance.collections.groups group = collection.create(description='group_{}'.format(fauxfactory.gen_alphanumeric()), role='EvmRole-super_administrator', tenant='My Company/{}/{}'.format(new_tenant.name, new_child.name)) yield group if group.exists: group.delete() @pytest.fixture(scope='module') def new_user(appliance, new_group, new_credential): collection = appliance.collections.users user = collection.create( name='user_{}'.format(fauxfactory.gen_alphanumeric()), credential=new_credential, email='xyz@redhat.com', groups=new_group, cost_center='Workload', value_assign='Database') yield user if user.exists: user.delete() @pytest.mark.rhel_testing @pytest.mark.rhv3 @pytest.mark.meta(blockers=[GH('ManageIQ/integration_tests:7385', unblock=lambda provider, appliance_version: not provider.one_of(RHEVMProvider) or appliance_version < '5.9')]) # first arg of parametrize is the list of fixtures or parameters, # second arg is a list of lists, with each one a test is to be generated # sequence is important here # indirect is the list where we define which fixtures are to be passed values indirectly. @pytest.mark.parametrize( ['set_child_tenant_quota', 'custom_prov_data', 'extra_msg', 'approve'], [ [('cpu', '2'), {'hardware': {'num_sockets': '8'}}, '', False], [('storage', '0.01'), {}, '', False], [('memory', '2'), {'hardware': {'memory': '4096'}}, '', False], [('vm', '1'), {'catalog': {'num_vms': '4'}}, '###', True] ], indirect=['set_child_tenant_quota'], ids=['max_cpu', 'max_storage', 'max_memory', 'max_vms'] ) def test_child_tenant_quota_enforce_via_lifecycle_infra(appliance, provider, setup_provider, new_user, set_child_tenant_quota, extra_msg, custom_prov_data, approve, prov_data, vm_name, template_name): """Test child tenant feature via lifecycle method. Metadata: test_flag: quota """ with new_user: recursive_update(prov_data, custom_prov_data) do_vm_provisioning(appliance, template_name=template_name, provider=provider, vm_name=vm_name, provisioning_data=prov_data, smtp_test=False, wait=False, request=None) # nav to requests page to check quota validation request_description = 'Provision from [{}] to [{}{}]'.format(template_name, vm_name, extra_msg) provision_request = appliance.collections.requests.instantiate(request_description) if approve: provision_request.approve_request(method='ui', reason="Approved") provision_request.wait_for_request(method='ui') assert provision_request.row.reason.text == "Quota Exceeded"
rhdedgar/openshift-tools
refs/heads/stg
openshift/installer/vendored/openshift-ansible-3.6.173.0.27/roles/lib_openshift/src/lib/base.py
8
# pylint: skip-file # flake8: noqa # pylint: disable=too-many-lines # noqa: E301,E302,E303,T001 class OpenShiftCLIError(Exception): '''Exception class for openshiftcli''' pass ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')] def locate_oc_binary(): ''' Find and return oc binary file ''' # https://github.com/openshift/openshift-ansible/issues/3410 # oc can be in /usr/local/bin in some cases, but that may not # be in $PATH due to ansible/sudo paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS oc_binary = 'oc' # Use shutil.which if it is available, otherwise fallback to a naive path search try: which_result = shutil.which(oc_binary, path=os.pathsep.join(paths)) if which_result is not None: oc_binary = which_result except AttributeError: for path in paths: if os.path.exists(os.path.join(path, oc_binary)): oc_binary = os.path.join(path, oc_binary) break return oc_binary # pylint: disable=too-few-public-methods class OpenShiftCLI(object): ''' Class to wrap the command line tools ''' def __init__(self, namespace, kubeconfig='/etc/origin/master/admin.kubeconfig', verbose=False, all_namespaces=False): ''' Constructor for OpenshiftCLI ''' self.namespace = namespace self.verbose = verbose self.kubeconfig = Utils.create_tmpfile_copy(kubeconfig) self.all_namespaces = all_namespaces self.oc_binary = locate_oc_binary() # Pylint allows only 5 arguments to be passed. # pylint: disable=too-many-arguments def _replace_content(self, resource, rname, content, force=False, sep='.'): ''' replace the current object with the content ''' res = self._get(resource, rname) if not res['results']: return res fname = Utils.create_tmpfile(rname + '-') yed = Yedit(fname, res['results'][0], separator=sep) changes = [] for key, value in content.items(): changes.append(yed.put(key, value)) if any([change[0] for change in changes]): yed.write() atexit.register(Utils.cleanup, [fname]) return self._replace(fname, force) return {'returncode': 0, 'updated': False} def _replace(self, fname, force=False): '''replace the current object with oc replace''' # We are removing the 'resourceVersion' to handle # a race condition when modifying oc objects yed = Yedit(fname) results = yed.delete('metadata.resourceVersion') if results[0]: yed.write() cmd = ['replace', '-f', fname] if force: cmd.append('--force') return self.openshift_cmd(cmd) def _create_from_content(self, rname, content): '''create a temporary file and then call oc create on it''' fname = Utils.create_tmpfile(rname + '-') yed = Yedit(fname, content=content) yed.write() atexit.register(Utils.cleanup, [fname]) return self._create(fname) def _create(self, fname): '''call oc create on a filename''' return self.openshift_cmd(['create', '-f', fname]) def _delete(self, resource, name=None, selector=None): '''call oc delete on a resource''' cmd = ['delete', resource] if selector is not None: cmd.append('--selector={}'.format(selector)) elif name is not None: cmd.append(name) else: raise OpenShiftCLIError('Either name or selector is required when calling delete.') return self.openshift_cmd(cmd) def _process(self, template_name, create=False, params=None, template_data=None): # noqa: E501 '''process a template template_name: the name of the template to process create: whether to send to oc create after processing params: the parameters for the template template_data: the incoming template's data; instead of a file ''' cmd = ['process'] if template_data: cmd.extend(['-f', '-']) else: cmd.append(template_name) if params: param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()] cmd.append('-v') cmd.extend(param_str) results = self.openshift_cmd(cmd, output=True, input_data=template_data) if results['returncode'] != 0 or not create: return results fname = Utils.create_tmpfile(template_name + '-') yed = Yedit(fname, results['results']) yed.write() atexit.register(Utils.cleanup, [fname]) return self.openshift_cmd(['create', '-f', fname]) def _get(self, resource, name=None, selector=None): '''return a resource by name ''' cmd = ['get', resource] if selector is not None: cmd.append('--selector={}'.format(selector)) elif name is not None: cmd.append(name) cmd.extend(['-o', 'json']) rval = self.openshift_cmd(cmd, output=True) # Ensure results are retuned in an array if 'items' in rval: rval['results'] = rval['items'] elif not isinstance(rval['results'], list): rval['results'] = [rval['results']] return rval def _schedulable(self, node=None, selector=None, schedulable=True): ''' perform oadm manage-node scheduable ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) cmd.append('--schedulable={}'.format(schedulable)) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # noqa: E501 def _list_pods(self, node=None, selector=None, pod_selector=None): ''' perform oadm list pods node: the node in which to list pods selector: the label selector filter if provided pod_selector: the pod selector filter if provided ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) if pod_selector: cmd.append('--pod-selector={}'.format(pod_selector)) cmd.extend(['--list-pods', '-o', 'json']) return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') # pylint: disable=too-many-arguments def _evacuate(self, node=None, selector=None, pod_selector=None, dry_run=False, grace_period=None, force=False): ''' perform oadm manage-node evacuate ''' cmd = ['manage-node'] if node: cmd.extend(node) else: cmd.append('--selector={}'.format(selector)) if dry_run: cmd.append('--dry-run') if pod_selector: cmd.append('--pod-selector={}'.format(pod_selector)) if grace_period: cmd.append('--grace-period={}'.format(int(grace_period))) if force: cmd.append('--force') cmd.append('--evacuate') return self.openshift_cmd(cmd, oadm=True, output=True, output_type='raw') def _version(self): ''' return the openshift version''' return self.openshift_cmd(['version'], output=True, output_type='raw') def _import_image(self, url=None, name=None, tag=None): ''' perform image import ''' cmd = ['import-image'] image = '{0}'.format(name) if tag: image += ':{0}'.format(tag) cmd.append(image) if url: cmd.append('--from={0}/{1}'.format(url, image)) cmd.append('-n{0}'.format(self.namespace)) cmd.append('--confirm') return self.openshift_cmd(cmd) def _run(self, cmds, input_data): ''' Actually executes the command. This makes mocking easier. ''' curr_env = os.environ.copy() curr_env.update({'KUBECONFIG': self.kubeconfig}) proc = subprocess.Popen(cmds, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=curr_env) stdout, stderr = proc.communicate(input_data) return proc.returncode, stdout.decode('utf-8'), stderr.decode('utf-8') # pylint: disable=too-many-arguments,too-many-branches def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json', input_data=None): '''Base command for oc ''' cmds = [self.oc_binary] if oadm: cmds.append('adm') cmds.extend(cmd) if self.all_namespaces: cmds.extend(['--all-namespaces']) elif self.namespace is not None and self.namespace.lower() not in ['none', 'emtpy']: # E501 cmds.extend(['-n', self.namespace]) if self.verbose: print(' '.join(cmds)) try: returncode, stdout, stderr = self._run(cmds, input_data) except OSError as ex: returncode, stdout, stderr = 1, '', 'Failed to execute {}: {}'.format(subprocess.list2cmdline(cmds), ex) rval = {"returncode": returncode, "cmd": ' '.join(cmds)} if output_type == 'json': rval['results'] = {} if output and stdout: try: rval['results'] = json.loads(stdout) except ValueError as verr: if "No JSON object could be decoded" in verr.args: rval['err'] = verr.args elif output_type == 'raw': rval['results'] = stdout if output else '' if self.verbose: print("STDOUT: {0}".format(stdout)) print("STDERR: {0}".format(stderr)) if 'err' in rval or returncode != 0: rval.update({"stderr": stderr, "stdout": stdout}) return rval class Utils(object): ''' utilities for openshiftcli modules ''' @staticmethod def _write(filename, contents): ''' Actually write the file contents to disk. This helps with mocking. ''' with open(filename, 'w') as sfd: sfd.write(contents) @staticmethod def create_tmp_file_from_contents(rname, data, ftype='yaml'): ''' create a file in tmp with name and contents''' tmp = Utils.create_tmpfile(prefix=rname) if ftype == 'yaml': # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage # pylint: disable=no-member if hasattr(yaml, 'RoundTripDumper'): Utils._write(tmp, yaml.dump(data, Dumper=yaml.RoundTripDumper)) else: Utils._write(tmp, yaml.safe_dump(data, default_flow_style=False)) elif ftype == 'json': Utils._write(tmp, json.dumps(data)) else: Utils._write(tmp, data) # Register cleanup when module is done atexit.register(Utils.cleanup, [tmp]) return tmp @staticmethod def create_tmpfile_copy(inc_file): '''create a temporary copy of a file''' tmpfile = Utils.create_tmpfile('lib_openshift-') Utils._write(tmpfile, open(inc_file).read()) # Cleanup the tmpfile atexit.register(Utils.cleanup, [tmpfile]) return tmpfile @staticmethod def create_tmpfile(prefix='tmp'): ''' Generates and returns a temporary file name ''' with tempfile.NamedTemporaryFile(prefix=prefix, delete=False) as tmp: return tmp.name @staticmethod def create_tmp_files_from_contents(content, content_type=None): '''Turn an array of dict: filename, content into a files array''' if not isinstance(content, list): content = [content] files = [] for item in content: path = Utils.create_tmp_file_from_contents(item['path'] + '-', item['data'], ftype=content_type) files.append({'name': os.path.basename(item['path']), 'path': path}) return files @staticmethod def cleanup(files): '''Clean up on exit ''' for sfile in files: if os.path.exists(sfile): if os.path.isdir(sfile): shutil.rmtree(sfile) elif os.path.isfile(sfile): os.remove(sfile) @staticmethod def exists(results, _name): ''' Check to see if the results include the name ''' if not results: return False if Utils.find_result(results, _name): return True return False @staticmethod def find_result(results, _name): ''' Find the specified result by name''' rval = None for result in results: if 'metadata' in result and result['metadata']['name'] == _name: rval = result break return rval @staticmethod def get_resource_file(sfile, sfile_type='yaml'): ''' return the service file ''' contents = None with open(sfile) as sfd: contents = sfd.read() if sfile_type == 'yaml': # AUDIT:no-member makes sense here due to ruamel.YAML/PyYAML usage # pylint: disable=no-member if hasattr(yaml, 'RoundTripLoader'): contents = yaml.load(contents, yaml.RoundTripLoader) else: contents = yaml.safe_load(contents) elif sfile_type == 'json': contents = json.loads(contents) return contents @staticmethod def filter_versions(stdout): ''' filter the oc version output ''' version_dict = {} version_search = ['oc', 'openshift', 'kubernetes'] for line in stdout.strip().split('\n'): for term in version_search: if not line: continue if line.startswith(term): version_dict[term] = line.split()[-1] # horrible hack to get openshift version in Openshift 3.2 # By default "oc version in 3.2 does not return an "openshift" version if "openshift" not in version_dict: version_dict["openshift"] = version_dict["oc"] return version_dict @staticmethod def add_custom_versions(versions): ''' create custom versions strings ''' versions_dict = {} for tech, version in versions.items(): # clean up "-" from version if "-" in version: version = version.split("-")[0] if version.startswith('v'): versions_dict[tech + '_numeric'] = version[1:].split('+')[0] # "v3.3.0.33" is what we have, we want "3.3" versions_dict[tech + '_short'] = version[1:4] return versions_dict @staticmethod def openshift_installed(): ''' check if openshift is installed ''' import rpm transaction_set = rpm.TransactionSet() rpmquery = transaction_set.dbMatch("name", "atomic-openshift") return rpmquery.count() > 0 # Disabling too-many-branches. This is a yaml dictionary comparison function # pylint: disable=too-many-branches,too-many-return-statements,too-many-statements @staticmethod def check_def_equal(user_def, result_def, skip_keys=None, debug=False): ''' Given a user defined definition, compare it with the results given back by our query. ''' # Currently these values are autogenerated and we do not need to check them skip = ['metadata', 'status'] if skip_keys: skip.extend(skip_keys) for key, value in result_def.items(): if key in skip: continue # Both are lists if isinstance(value, list): if key not in user_def: if debug: print('User data does not have key [%s]' % key) print('User data: %s' % user_def) return False if not isinstance(user_def[key], list): if debug: print('user_def[key] is not a list key=[%s] user_def[key]=%s' % (key, user_def[key])) return False if len(user_def[key]) != len(value): if debug: print("List lengths are not equal.") print("key=[%s]: user_def[%s] != value[%s]" % (key, len(user_def[key]), len(value))) print("user_def: %s" % user_def[key]) print("value: %s" % value) return False for values in zip(user_def[key], value): if isinstance(values[0], dict) and isinstance(values[1], dict): if debug: print('sending list - list') print(type(values[0])) print(type(values[1])) result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug) if not result: print('list compare returned false') return False elif value != user_def[key]: if debug: print('value should be identical') print(user_def[key]) print(value) return False # recurse on a dictionary elif isinstance(value, dict): if key not in user_def: if debug: print("user_def does not have key [%s]" % key) return False if not isinstance(user_def[key], dict): if debug: print("dict returned false: not instance of dict") return False # before passing ensure keys match api_values = set(value.keys()) - set(skip) user_values = set(user_def[key].keys()) - set(skip) if api_values != user_values: if debug: print("keys are not equal in dict") print(user_values) print(api_values) return False result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug) if not result: if debug: print("dict returned false") print(result) return False # Verify each key, value pair is the same else: if key not in user_def or value != user_def[key]: if debug: print("value not equal; user_def does not have key") print(key) print(value) if key in user_def: print(user_def[key]) return False if debug: print('returning true') return True class OpenShiftCLIConfig(object): '''Generic Config''' def __init__(self, rname, namespace, kubeconfig, options): self.kubeconfig = kubeconfig self.name = rname self.namespace = namespace self._options = options @property def config_options(self): ''' return config options ''' return self._options def to_option_list(self, ascommalist=''): '''return all options as a string if ascommalist is set to the name of a key, and the value of that key is a dict, format the dict as a list of comma delimited key=value pairs''' return self.stringify(ascommalist) def stringify(self, ascommalist=''): ''' return the options hash as cli params in a string if ascommalist is set to the name of a key, and the value of that key is a dict, format the dict as a list of comma delimited key=value pairs ''' rval = [] for key in sorted(self.config_options.keys()): data = self.config_options[key] if data['include'] \ and (data['value'] or isinstance(data['value'], int)): if key == ascommalist: val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())]) else: val = data['value'] rval.append('--{}={}'.format(key.replace('_', '-'), val)) return rval
karan/warehouse
refs/heads/master
tests/unit/accounts/test_core.py
5
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import pretend from warehouse import accounts from warehouse.accounts.interfaces import IUserService from warehouse.accounts.services import database_login_factory class TestLogin: def test_with_no_user(self): service = pretend.stub( find_userid=pretend.call_recorder(lambda username: None), ) request = pretend.stub( find_service=pretend.call_recorder(lambda iface, context: service), ) assert accounts._login("myuser", "mypass", request) is None assert request.find_service.calls == [ pretend.call(IUserService, context=None), ] assert service.find_userid.calls == [pretend.call("myuser")] def test_with_invalid_password(self): userid = pretend.stub() service = pretend.stub( find_userid=pretend.call_recorder(lambda username: userid), check_password=pretend.call_recorder( lambda userid, password: False ), ) request = pretend.stub( find_service=pretend.call_recorder(lambda iface, context: service), ) assert accounts._login("myuser", "mypass", request) is None assert request.find_service.calls == [ pretend.call(IUserService, context=None), ] assert service.find_userid.calls == [pretend.call("myuser")] assert service.check_password.calls == [pretend.call(userid, "mypass")] def test_with_valid_password(self, monkeypatch): principals = pretend.stub() authenticate = pretend.call_recorder( lambda userid, request: principals ) monkeypatch.setattr(accounts, "_authenticate", authenticate) userid = pretend.stub() service = pretend.stub( find_userid=pretend.call_recorder(lambda username: userid), check_password=pretend.call_recorder( lambda userid, password: True ), ) request = pretend.stub( find_service=pretend.call_recorder(lambda iface, context: service), ) assert accounts._login("myuser", "mypass", request) is principals assert request.find_service.calls == [ pretend.call(IUserService, context=None), ] assert service.find_userid.calls == [pretend.call("myuser")] assert service.check_password.calls == [pretend.call(userid, "mypass")] assert authenticate.calls == [pretend.call(userid, request)] class TestAuthenticate: def test_with_user(self): user = pretend.stub() service = pretend.stub( get_user=pretend.call_recorder(lambda userid: user) ) request = pretend.stub(find_service=lambda iface, context: service) assert accounts._authenticate(1, request) == [] assert service.get_user.calls == [pretend.call(1)] def test_without_user(self): service = pretend.stub( get_user=pretend.call_recorder(lambda userid: None) ) request = pretend.stub(find_service=lambda iface, context: service) assert accounts._authenticate(1, request) is None assert service.get_user.calls == [pretend.call(1)] class TestUser: def test_with_user(self): user = pretend.stub() service = pretend.stub( get_user=pretend.call_recorder(lambda userid: user) ) request = pretend.stub( find_service=lambda iface, context: service, authenticated_userid=100, ) assert accounts._user(request) is user assert service.get_user.calls == [pretend.call(100)] def test_without_users(self): service = pretend.stub( get_user=pretend.call_recorder(lambda userid: None) ) request = pretend.stub( find_service=lambda iface, context: service, authenticated_userid=100, ) assert accounts._user(request) is None assert service.get_user.calls == [pretend.call(100)] def test_without_userid(self): request = pretend.stub(authenticated_userid=None) assert accounts._user(request) is None def test_includeme(monkeypatch): authn_obj = pretend.stub() authn_cls = pretend.call_recorder(lambda callback: authn_obj) authz_obj = pretend.stub() authz_cls = pretend.call_recorder(lambda: authz_obj) monkeypatch.setattr(accounts, "SessionAuthenticationPolicy", authn_cls) monkeypatch.setattr(accounts, "ACLAuthorizationPolicy", authz_cls) config = pretend.stub( register_service_factory=pretend.call_recorder( lambda factory, iface: None ), add_request_method=pretend.call_recorder(lambda f, name, reify: None), set_authentication_policy=pretend.call_recorder(lambda p: None), set_authorization_policy=pretend.call_recorder(lambda p: None), ) accounts.includeme(config) config.register_service_factory.calls == [ pretend.call(database_login_factory, IUserService), ] config.add_request_method.calls == [ pretend.call(accounts._user, name="user", reify=True), ] config.set_authentication_policy.calls == [pretend.call(authn_obj)] config.set_authorization_policy.calls == [pretend.call(authz_obj)] authn_cls.calls == [pretend.call(callback=accounts._authenticate)] authz_cls.calls == [pretend.call()]
drousis/pywebdata
refs/heads/master
pywebdata/parameter.py
1
import math from itertools import ifilter iotypes = {'float': float, 'int': int} def frange(a, b=None, incr=1.): if b is None: b, a = a, 0. else: a = float(a) count = int(math.ceil(b - a)/incr) return (a + n*incr for n in range(count)) class Output(object): def __init__(self, iotype, f_parse=None): self.iotype = iotypes[iotype] self.value = None self.f_parse = f_parse class Input(object): def __init__(self, iotype, required=True, min=None, max=None, default=None, incr=None): self.iotype = iotypes[iotype] self.is_required = required self._min = min self._max = max self._default = default self._incr = incr self.value = None self.valid = True def update(self, value): satisfy_min = not self._min or (self._min and value >= self._min) satisfy_max = not self._max or (self._max and value <= self._max) if satisfy_min and satisfy_max: self.value = value self.valid = True else: self.valid = False def set_incr(self, incr): self._incr = incr def get_min(self): return self._min def get_max(self): return self._max def get_incr(self): return self._incr def get_range(self, condition_list): _min = self.get_min() _max = self.get_max() _incr = self.get_incr() whole_range = frange(_min, _max+_incr, _incr) def satisfy_all(val): def satisfy_one(cond): return cond['operator'](val, self.iotype(cond['value'])) comps = map(satisfy_one, condition_list) return all(comps) return ifilter(satisfy_all, whole_range)
ToAruShiroiNeko/revscoring
refs/heads/master
revscoring/utilities/__init__.py
4
""" This module implements a set of utilities for extracting features and train/testing :class:`revscoring.scorer_models.MLScorerModel` from the command-line. When the revscoring python package is installed, a `revscoring` utility should be available from the commandline. Run `revscoring -h` for more information: extract_features ++++++++++++++++ .. automodule:: revscoring.utilities.extract_features score +++++ .. automodule:: revscoring.utilities.score train_test ++++++++++ .. automodule:: revscoring.utilities.train_test """
plin1112/pysimm
refs/heads/stable
pysimm/models/monomers/dreiding/pe.py
3
from pysimm import system, lmps, forcefield from pysimm.apps.random_walk import random_walk def monomer(): try: s = system.read_pubchem_smiles('CC') except: import os s = system.read_mol(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'CC.mol')) f = forcefield.Dreiding() s.apply_forcefield(f) c1 = s.particles[1] c2 = s.particles[2] c1.linker = 'head' c2.linker = 'tail' for b in c1.bonds: if b.a.elem == 'H' or b.b.elem == 'H': pb = b.a if b.b is c1 else b.b s.particles.remove(pb.tag, update=False) break for b in c2.bonds: if b.a.elem == 'H' or b.b.elem == 'H': pb = b.a if b.b is c2 else b.b s.particles.remove(pb.tag, update=False) break s.remove_spare_bonding() s.pair_style = 'lj/cut' lmps.quick_min(s, min_style='fire') s.add_particle_bonding() return s def polymer_chain(length): mon = monomer() polym = random_walk(mon, length, forcefield=forcefield.Dreiding()) return polym def polymer_system(chains=10, mn=1000, pdi=1, density=0.3): if pdi != 1: print('disperse molecular weight distributions not supported yet') return mon = monomer() chain_length = int(mn/mon.mass) polym = random_walk(mon, chain_length, density=density/chains, forcefield=forcefield.Dreiding()) for chain in range(chains-1): polym = random_walk(mon, chain_length, s_=polym, density=None, forcefield=forcefield.Dreiding()) return polym
olasitarska/django
refs/heads/master
django/db/backends/sqlite3/schema.py
7
import codecs import copy from decimal import Decimal from django.utils import six from django.apps.registry import Apps from django.db.backends.schema import BaseDatabaseSchemaEditor from django.db.models.fields.related import ManyToManyField class DatabaseSchemaEditor(BaseDatabaseSchemaEditor): sql_delete_table = "DROP TABLE %(table)s" sql_create_inline_fk = "REFERENCES %(to_table)s (%(to_column)s)" def quote_value(self, value): # Inner import to allow nice failure for backend if not present import _sqlite3 try: value = _sqlite3.adapt(value) except _sqlite3.ProgrammingError: pass # Manual emulation of SQLite parameter quoting if isinstance(value, type(True)): return str(int(value)) elif isinstance(value, (Decimal, float)): return str(value) elif isinstance(value, six.integer_types): return str(value) elif isinstance(value, six.string_types): return "'%s'" % six.text_type(value).replace("\'", "\'\'") elif value is None: return "NULL" elif isinstance(value, (bytes, bytearray, six.memoryview)): # Bytes are only allowed for BLOB fields, encoded as string # literals containing hexadecimal data and preceded by a single "X" # character: # value = b'\x01\x02' => value_hex = b'0102' => return X'0102' value = bytes(value) hex_encoder = codecs.getencoder('hex_codec') value_hex, _length = hex_encoder(value) # Use 'ascii' encoding for b'01' => '01', no need to use force_text here. return "X'%s'" % value_hex.decode('ascii') else: raise ValueError("Cannot quote parameter value %r of type %s" % (value, type(value))) def _remake_table(self, model, create_fields=[], delete_fields=[], alter_fields=[], override_uniques=None): """ Shortcut to transform a model from old_model into new_model """ # Work out the new fields dict / mapping body = dict((f.name, f) for f in model._meta.local_fields) # Since mapping might mix column names and default values, # its values must be already quoted. mapping = dict((f.column, self.quote_name(f.column)) for f in model._meta.local_fields) # This maps field names (not columns) for things like unique_together rename_mapping = {} # If any of the new or altered fields is introducing a new PK, # remove the old one restore_pk_field = None if any(f.primary_key for f in create_fields) or any(n.primary_key for o, n in alter_fields): for name, field in list(body.items()): if field.primary_key: field.primary_key = False restore_pk_field = field if field.auto_created: del body[name] del mapping[field.column] # Add in any created fields for field in create_fields: body[field.name] = field # If there's a default, insert it into the copy map if field.has_default(): mapping[field.column] = self.quote_value( self.effective_default(field) ) # Add in any altered fields for (old_field, new_field) in alter_fields: del body[old_field.name] del mapping[old_field.column] body[new_field.name] = new_field mapping[new_field.column] = self.quote_name(old_field.column) rename_mapping[old_field.name] = new_field.name # Remove any deleted fields for field in delete_fields: del body[field.name] del mapping[field.column] # Remove any implicit M2M tables if isinstance(field, ManyToManyField) and field.rel.through._meta.auto_created: return self.delete_model(field.rel.through) # Work inside a new app registry apps = Apps() # Provide isolated instances of the fields to the new model body # Instantiating the new model with an alternate db_table will alter # the internal references of some of the provided fields. body = copy.deepcopy(body) # Work out the new value of unique_together, taking renames into # account if override_uniques is None: override_uniques = [ [rename_mapping.get(n, n) for n in unique] for unique in model._meta.unique_together ] # Construct a new model for the new state meta_contents = { 'app_label': model._meta.app_label, 'db_table': model._meta.db_table + "__new", 'unique_together': override_uniques, 'apps': apps, } meta = type("Meta", tuple(), meta_contents) body['Meta'] = meta body['__module__'] = model.__module__ temp_model = type(model._meta.object_name, model.__bases__, body) # Create a new table with that format. We remove things from the # deferred SQL that match our table name, too self.deferred_sql = [x for x in self.deferred_sql if model._meta.db_table not in x] self.create_model(temp_model) # Copy data from the old table field_maps = list(mapping.items()) self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % ( self.quote_name(temp_model._meta.db_table), ', '.join(self.quote_name(x) for x, y in field_maps), ', '.join(y for x, y in field_maps), self.quote_name(model._meta.db_table), )) # Delete the old table self.delete_model(model, handle_autom2m=False) # Rename the new to the old self.alter_db_table(temp_model, temp_model._meta.db_table, model._meta.db_table) # Run deferred SQL on correct table for sql in self.deferred_sql: self.execute(sql.replace(temp_model._meta.db_table, model._meta.db_table)) self.deferred_sql = [] # Fix any PK-removed field if restore_pk_field: restore_pk_field.primary_key = True def delete_model(self, model, handle_autom2m=True): if handle_autom2m: super(DatabaseSchemaEditor, self).delete_model(model) else: # Delete the table (and only that) self.execute(self.sql_delete_table % { "table": self.quote_name(model._meta.db_table), }) def add_field(self, model, field): """ Creates a field on a model. Usually involves adding a column, but may involve adding a table instead (for M2M fields) """ # Special-case implicit M2M tables if isinstance(field, ManyToManyField) and field.rel.through._meta.auto_created: return self.create_model(field.rel.through) self._remake_table(model, create_fields=[field]) def remove_field(self, model, field): """ Removes a field from a model. Usually involves deleting a column, but for M2Ms may involve deleting a table. """ # M2M fields are a special case if isinstance(field, ManyToManyField): # For implicit M2M tables, delete the auto-created table if field.rel.through._meta.auto_created: self.delete_model(field.rel.through) # For explicit "through" M2M fields, do nothing # For everything else, remake. else: # It might not actually have a column behind it if field.db_parameters(connection=self.connection)['type'] is None: return self._remake_table(model, delete_fields=[field]) def _alter_field(self, model, old_field, new_field, old_type, new_type, old_db_params, new_db_params, strict=False): """Actually perform a "physical" (non-ManyToMany) field update.""" # Alter by remaking table self._remake_table(model, alter_fields=[(old_field, new_field)]) def alter_unique_together(self, model, old_unique_together, new_unique_together): """ Deals with a model changing its unique_together. Note: The input unique_togethers must be doubly-nested, not the single- nested ["foo", "bar"] format. """ self._remake_table(model, override_uniques=new_unique_together) def _alter_many_to_many(self, model, old_field, new_field, strict): """ Alters M2Ms to repoint their to= endpoints. """ if old_field.rel.through._meta.db_table == new_field.rel.through._meta.db_table: # The field name didn't change, but some options did; we have to propagate this altering. self._remake_table( old_field.rel.through, alter_fields=[( # We need the field that points to the target model, so we can tell alter_field to change it - # this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model) old_field.rel.through._meta.get_field_by_name(old_field.m2m_reverse_field_name())[0], new_field.rel.through._meta.get_field_by_name(new_field.m2m_reverse_field_name())[0], )], override_uniques=(new_field.m2m_field_name(), new_field.m2m_reverse_field_name()), ) return # Make a new through table self.create_model(new_field.rel.through) # Copy the data across self.execute("INSERT INTO %s (%s) SELECT %s FROM %s" % ( self.quote_name(new_field.rel.through._meta.db_table), ', '.join([ "id", new_field.m2m_column_name(), new_field.m2m_reverse_name(), ]), ', '.join([ "id", old_field.m2m_column_name(), old_field.m2m_reverse_name(), ]), self.quote_name(old_field.rel.through._meta.db_table), )) # Delete the old through table self.delete_model(old_field.rel.through)
suninsky/ReceiptOCR
refs/heads/master
Python/server/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langgreekmodel.py
2762
######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # 255: Control characters that usually does not exist in any text # 254: Carriage/Return # 253: symbol (punctuation) that does not belong to word # 252: 0 - 9 # Character Mapping Table: Latin7_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40 79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50 253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60 78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90 253,233, 90,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0 253,253,253,253,247,248, 61, 36, 46, 71, 73,253, 54,253,108,123, # b0 110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0 35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0 124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0 9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0 ) win1253_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253, 82,100,104, 94, 98,101,116,102,111,187,117, 92, 88,113, 85, # 40 79,118,105, 83, 67,114,119, 95, 99,109,188,253,253,253,253,253, # 50 253, 72, 70, 80, 81, 60, 96, 93, 89, 68,120, 97, 77, 86, 69, 55, # 60 78,115, 65, 66, 58, 76,106,103, 87,107,112,253,253,253,253,253, # 70 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 80 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 90 253,233, 61,253,253,253,253,253,253,253,253,253,253, 74,253,253, # a0 253,253,253,253,247,253,253, 36, 46, 71, 73,253, 54,253,108,123, # b0 110, 31, 51, 43, 41, 34, 91, 40, 52, 47, 44, 53, 38, 49, 59, 39, # c0 35, 48,250, 37, 33, 45, 56, 50, 84, 57,120,121, 17, 18, 22, 15, # d0 124, 1, 29, 20, 21, 3, 32, 13, 25, 5, 11, 16, 10, 6, 30, 4, # e0 9, 8, 14, 7, 2, 12, 28, 23, 42, 24, 64, 75, 19, 26, 27,253, # f0 ) # Model Table: # total sequences: 100% # first 512 sequences: 98.2851% # first 1024 sequences:1.7001% # rest sequences: 0.0359% # negative sequences: 0.0148% GreekLangModel = ( 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,2,2,3,3,3,3,3,3,3,3,1,3,3,3,0,2,2,3,3,0,3,0,3,2,0,3,3,3,0, 3,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,0,3,3,0,3,2,3,3,0,3,2,3,3,3,0,0,3,0,3,0,3,3,2,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, 0,2,3,2,2,3,3,3,3,3,3,3,3,0,3,3,3,3,0,2,3,3,0,3,3,3,3,2,3,3,3,0, 2,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,0,2,1,3,3,3,3,2,3,3,2,3,3,2,0, 0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,3,0,3,2,3,3,0, 2,0,1,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,2,3,0,0,0,0,3,3,0,3,1,3,3,3,0,3,3,0,3,3,3,3,0,0,0,0, 2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,0,3,0,3,3,3,3,3,0,3,2,2,2,3,0,2,3,3,3,3,3,2,3,3,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,3,2,2,2,3,3,3,3,0,3,1,3,3,3,3,2,3,3,3,3,3,3,3,2,2,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,2,0,3,0,0,0,3,3,2,3,3,3,3,3,0,0,3,2,3,0,2,3,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,3,3,3,3,0,0,3,3,0,2,3,0,3,0,3,3,3,0,0,3,0,3,0,2,2,3,3,0,0, 0,0,1,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,2,0,3,2,3,3,3,3,0,3,3,3,3,3,0,3,3,2,3,2,3,3,2,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,2,3,2,3,3,3,3,3,3,0,2,3,2,3,2,2,2,3,2,3,3,2,3,0,2,2,2,3,0, 2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,0,0,0,3,3,3,2,3,3,0,0,3,0,3,0,0,0,3,2,0,3,0,3,0,0,2,0,2,0, 0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,0,3,3,3,3,3,3,0,3,3,0,3,0,0,0,3,3,0,3,3,3,0,0,1,2,3,0, 3,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,2,0,0,3,2,2,3,3,0,3,3,3,3,3,2,1,3,0,3,2,3,3,2,1,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,3,0,2,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,3,0,3,2,3,0,0,3,3,3,0, 3,0,0,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,0,3,3,3,3,3,3,0,0,3,0,3,0,0,0,3,2,0,3,2,3,0,0,3,2,3,0, 2,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,1,2,2,3,3,3,3,3,3,0,2,3,0,3,0,0,0,3,3,0,3,0,2,0,0,2,3,1,0, 2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,3,3,3,3,0,3,0,3,3,2,3,0,3,3,3,3,3,3,0,3,3,3,0,2,3,0,0,3,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,3,3,3,0,0,3,0,0,0,3,3,0,3,0,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,0,0,0,3,3,3,3,3,3,0,0,3,0,2,0,0,0,3,3,0,3,0,3,0,0,2,0,2,0, 0,0,0,0,1,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,3,0,3,0,2,0,3,2,0,3,2,3,2,3,0,0,3,2,3,2,3,3,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,0,0,2,3,3,3,3,3,0,0,0,3,0,2,1,0,0,3,2,2,2,0,3,0,0,2,2,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,3,3,3,2,0,3,0,3,0,3,3,0,2,1,2,3,3,0,0,3,0,3,0,3,3,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,3,3,3,0,3,3,3,3,3,3,0,2,3,0,3,0,0,0,2,1,0,2,2,3,0,0,2,2,2,0, 0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,3,0,0,2,3,3,3,2,3,0,0,1,3,0,2,0,0,0,0,3,0,1,0,2,0,0,1,1,1,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,3,1,0,3,0,0,0,3,2,0,3,2,3,3,3,0,0,3,0,3,2,2,2,1,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,3,3,3,0,0,3,0,0,0,0,2,0,2,3,3,2,2,2,2,3,0,2,0,2,2,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,3,3,3,2,0,0,0,0,0,0,2,3,0,2,0,2,3,2,0,0,3,0,3,0,3,1,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,3,2,3,3,2,2,3,0,2,0,3,0,0,0,2,0,0,0,0,1,2,0,2,0,2,0, 0,2,0,2,0,2,2,0,0,1,0,2,2,2,0,2,2,2,0,2,2,2,0,0,2,0,0,1,0,0,0,0, 0,2,0,3,3,2,0,0,0,0,0,0,1,3,0,2,0,2,2,2,0,0,2,0,3,0,0,2,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,3,0,2,3,2,0,2,2,0,2,0,2,2,0,2,0,2,2,2,0,0,0,0,0,0,2,3,0,0,0,2, 0,1,2,0,0,0,0,2,2,0,0,0,2,1,0,2,2,0,0,0,0,0,0,1,0,2,0,0,0,0,0,0, 0,0,2,1,0,2,3,2,2,3,2,3,2,0,0,3,3,3,0,0,3,2,0,0,0,1,1,0,2,0,2,2, 0,2,0,2,0,2,2,0,0,2,0,2,2,2,0,2,2,2,2,0,0,2,0,0,0,2,0,1,0,0,0,0, 0,3,0,3,3,2,2,0,3,0,0,0,2,2,0,2,2,2,1,2,0,0,1,2,2,0,0,3,0,0,0,2, 0,1,2,0,0,0,1,2,0,0,0,0,0,0,0,2,2,0,1,0,0,2,0,0,0,2,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,3,3,2,2,0,0,0,2,0,2,3,3,0,2,0,0,0,0,0,0,2,2,2,0,2,2,0,2,0,2, 0,2,2,0,0,2,2,2,2,1,0,0,2,2,0,2,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0, 0,2,0,3,2,3,0,0,0,3,0,0,2,2,0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,0,2, 0,0,2,2,0,0,2,2,2,0,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,2,0,0,3,2,0,2,2,2,2,2,0,0,0,2,0,0,0,0,2,0,1,0,0,2,0,1,0,0,0, 0,2,2,2,0,2,2,0,1,2,0,2,2,2,0,2,2,2,2,1,2,2,0,0,2,0,0,0,0,0,0,0, 0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 0,2,0,2,0,2,2,0,0,0,0,1,2,1,0,0,2,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,3,2,3,0,0,2,0,0,0,2,2,0,2,0,0,0,1,0,0,2,0,2,0,2,2,0,0,0,0, 0,0,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0, 0,2,2,3,2,2,0,0,0,0,0,0,1,3,0,2,0,2,2,0,0,0,1,0,2,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,0,2,0,3,2,0,2,0,0,0,0,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 0,0,2,0,0,0,0,1,1,0,0,2,1,2,0,2,2,0,1,0,0,1,0,0,0,2,0,0,0,0,0,0, 0,3,0,2,2,2,0,0,2,0,0,0,2,0,0,0,2,3,0,2,0,0,0,0,0,0,2,2,0,0,0,2, 0,1,2,0,0,0,1,2,2,1,0,0,0,2,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,1,2,0,2,2,0,2,0,0,2,0,0,0,0,1,2,1,0,2,1,0,0,0,0,0,0,0,0,0,0, 0,0,2,0,0,0,3,1,2,2,0,2,0,0,0,0,2,0,0,0,2,0,0,3,0,0,0,0,2,2,2,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,1,0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,2, 0,2,2,0,0,2,2,2,2,2,0,1,2,0,0,0,2,2,0,1,0,2,0,0,2,2,0,0,0,0,0,0, 0,0,0,0,1,0,0,0,0,0,0,0,3,0,0,2,0,0,0,0,0,0,0,0,2,0,2,0,0,0,0,2, 0,1,2,0,0,0,0,2,2,1,0,1,0,1,0,2,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0, 0,2,0,1,2,0,0,0,0,0,0,0,0,0,0,2,0,0,2,2,0,0,0,0,1,0,0,0,0,0,0,2, 0,2,2,0,0,0,0,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0, 0,2,2,2,2,0,0,0,3,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,1, 0,0,2,0,0,0,0,1,2,0,0,0,0,0,0,2,2,1,1,0,0,0,0,0,0,1,0,0,0,0,0,0, 0,2,0,2,2,2,0,0,2,0,0,0,0,0,0,0,2,2,2,0,0,0,2,0,0,0,0,0,0,0,0,2, 0,0,1,0,0,0,0,2,1,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0, 0,3,0,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,2, 0,0,2,0,0,0,0,2,2,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,2,0,2,2,1,0,0,0,0,0,0,2,0,0,2,0,2,2,2,0,0,0,0,0,0,2,0,0,0,0,2, 0,0,2,0,0,2,0,2,2,0,0,0,0,2,0,2,0,0,0,0,0,2,0,0,0,2,0,0,0,0,0,0, 0,0,3,0,0,0,2,2,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,2,0,0,0,0,0, 0,2,2,2,2,2,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1, 0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,2,2,0,0,0,0,0,2,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0, 0,2,0,0,0,2,0,0,0,0,0,1,0,0,0,0,2,2,0,0,0,1,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,2,0,0,0, 0,2,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,1,0,0,2,0,2,0,0,0, 0,0,0,0,0,0,0,0,2,1,0,0,0,0,0,0,2,0,0,0,1,2,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, ) Latin7GreekModel = { 'charToOrderMap': Latin7_CharToOrderMap, 'precedenceMatrix': GreekLangModel, 'mTypicalPositiveRatio': 0.982851, 'keepEnglishLetter': False, 'charsetName': "ISO-8859-7" } Win1253GreekModel = { 'charToOrderMap': win1253_CharToOrderMap, 'precedenceMatrix': GreekLangModel, 'mTypicalPositiveRatio': 0.982851, 'keepEnglishLetter': False, 'charsetName': "windows-1253" } # flake8: noqa
jonathonwalz/ansible
refs/heads/devel
test/integration/targets/module_utils/module_utils/spam8/ham/bacon.py
298
data = 'spam8:bacon'
CivicTechTO/open-cabinet
refs/heads/master
venv/lib/python2.7/site-packages/static.py
7
#!/usr/bin/env python """ Copyright (C) 2012 Roman Mohr <roman@fenkhuber.at> """ """static - A stupidly simple WSGI way to serve static (or mixed) content. (See the docstrings of the various functions and classes.) Copyright (C) 2006-2009 Luke Arno - http://lukearno.com/ This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. This library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with this library; if not, write to: The Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. Luke Arno can be found at http://lukearno.com/ """ import mimetypes import email.utils as rfc822 import time import string import sys from os import path, stat from wsgiref import util from wsgiref.headers import Headers from wsgiref.simple_server import make_server from optparse import OptionParser try: from pkg_resources import resource_filename, Requirement except: pass try: import kid except: pass try: from genshi.template import MarkupTemplate except: pass if sys.version < '3': import codecs def u(x): return codecs.unicode_escape_decode(x)[0] else: def u(x): return x class MagicError(Exception): pass def _encode(string, encoding): if sys.version_info[0] > 2: return string.encode(encoding=encoding, errors='strict') else: if type(u('')) == type(string): string = string.encode(encoding) return string def _decode(string, encoding): if sys.version_info[0] > 2: return string.decode(encoding=encoding, errors='strict') else: return string def _open(filename, encoding): if sys.version_info[0] > 2: return open(filename, 'r', encoding=encoding, errors='strict') else: return open(filename, 'rb') class StatusApp: """Used by WSGI apps to return some HTTP status.""" def __init__(self, status, message=None, encoding=sys.getdefaultencoding()): self.status = status self.encoding = encoding if message is None: self.message = status else: self.message = message def __call__(self, environ, start_response, headers=[]): if self.message: Headers(headers).add_header('Content-type', 'text/plain') start_response(self.status, headers) if environ['REQUEST_METHOD'] == 'HEAD': return [_encode("", self.encoding)] else: return [_encode(self.message, self.encoding)] class Cling(object): """A stupidly simple way to serve static content via WSGI. Serve the file of the same path as PATH_INFO in self.datadir. Look up the Content-type in self.content_types by extension or use 'text/plain' if the extension is not found. Serve up the contents of the file or delegate to self.not_found. """ block_size = 16 * 4096 index_file = 'index.html' not_found = StatusApp('404 Not Found') not_modified = StatusApp('304 Not Modified', "") moved_permanently = StatusApp('301 Moved Permanently') method_not_allowed = StatusApp('405 Method Not Allowed') def __init__(self, root, **kw): """Just set the root and any other attribs passes via **kw.""" self.root = root self.encoding = sys.getdefaultencoding() for k, v in kw.items(): setattr(self, k, v) def __call__(self, environ, start_response): """Respond to a request when called in the usual WSGI way.""" if environ['REQUEST_METHOD'] not in ('GET', 'HEAD'): headers = [('Allow', 'GET, HEAD')] return self.method_not_allowed(environ, start_response, headers) path_info = environ.get('PATH_INFO', '') full_path = self._full_path(path_info) if not self._is_under_root(full_path): return self.not_found(environ, start_response) if path.isdir(full_path): if full_path[-1] != '/' or full_path == self.root: location = util.request_uri(environ, include_query=False) + '/' if environ.get('QUERY_STRING'): location += '?' + environ.get('QUERY_STRING') headers = [('Location', location)] return self.moved_permanently(environ, start_response, headers) else: full_path = self._full_path(path_info + self.index_file) prezipped = ('gzip' in environ.get('HTTP_ACCEPT_ENCODING', []) and path.exists(full_path + '.gz')) if prezipped: full_path += '.gz' content_type = self._guess_type(full_path) try: etag, last_modified = self._conditions(full_path, environ) headers = [('Date', rfc822.formatdate(time.time())), ('Last-Modified', last_modified), ('ETag', etag)] if_modified = environ.get('HTTP_IF_MODIFIED_SINCE') if if_modified and (rfc822.parsedate(if_modified) >= rfc822.parsedate(last_modified)): return self.not_modified(environ, start_response, headers) if_none = environ.get('HTTP_IF_NONE_MATCH') if if_none and (if_none == '*' or etag in if_none): return self.not_modified(environ, start_response, headers) file_like = self._file_like(full_path) headers.append(('Content-Type', content_type)) if prezipped: headers.extend([('Content-Encoding', 'gzip'), ('Vary', 'Accept-Encoding')]) start_response("200 OK", headers) if environ['REQUEST_METHOD'] == 'GET': return self._body(full_path, environ, file_like) else: return [''] except (IOError, OSError) as e: print(e) return self.not_found(environ, start_response) def _full_path(self, path_info): """Return the full path from which to read.""" return self.root + path_info def _is_under_root(self, full_path): """Guard against arbitrary file retrieval.""" if (path.abspath(full_path) + path.sep)\ .startswith(path.abspath(self.root) + path.sep): return True else: return False def _guess_type(self, full_path): """Guess the mime type using the mimetypes module.""" return mimetypes.guess_type(full_path)[0] or 'text/plain' def _conditions(self, full_path, environ): """Return a tuple of etag, last_modified by mtime from stat.""" mtime = stat(full_path).st_mtime return str(mtime), rfc822.formatdate(mtime) def _file_like(self, full_path): """Return the appropriate file object.""" return open(full_path, 'rb') def _body(self, full_path, environ, file_like): """Return an iterator over the body of the response.""" way_to_send = environ.get('wsgi.file_wrapper', iter_and_close) return way_to_send(file_like, self.block_size) def iter_and_close(file_like, block_size): """Yield file contents by block then close the file.""" while 1: try: block = file_like.read(block_size) if block: yield block else: raise StopIteration except StopIteration: file_like.close() return def cling_wrap(package_name, dir_name, **kw): """Return a Cling that serves from the given package and dir_name. This uses pkg_resources.resource_filename which is not the recommended way, since it extracts the files. I think this works fine unless you have some _very_ serious requirements for static content, in which case you probably shouldn't be serving it through a WSGI app, IMHO. YMMV. """ resource = Requirement.parse(package_name) return Cling(resource_filename(resource, dir_name), **kw) class Shock(Cling): """A stupidly simple way to serve up mixed content. Serves static content just like Cling (it's superclass) except that it process content with the first matching magic from self.magics if any apply. See Cling and classes with "Magic" in their names in this module. If you are using Shock with the StringMagic class for instance: shock = Shock('/data', magics=[StringMagic(food='cheese')]) Let's say you have a file called /data/foo.txt.stp containing one line: "I love to eat $food!" When you do a GET on /foo.txt you will see this in your browser: "I love to eat cheese!" This is really nice if you have a color variable in your css files or something trivial like that. It seems silly to create or change a handful of objects for a couple of dynamic bits of text. """ magics = () def _match_magic(self, full_path): """Return the first magic that matches this path or None.""" for magic in self.magics: if magic.matches(full_path): return magic def _full_path(self, path_info): """Return the full path from which to read.""" full_path = self.root + path_info if path.exists(full_path): return full_path else: for magic in self.magics: if path.exists(magic.new_path(full_path)): return magic.new_path(full_path) else: return full_path def _guess_type(self, full_path): """Guess the mime type magically or using the mimetypes module.""" magic = self._match_magic(full_path) if magic is not None: return (mimetypes.guess_type(magic.old_path(full_path))[0] or 'text/plain') else: return mimetypes.guess_type(full_path)[0] or 'text/plain' def _conditions(self, full_path, environ): """Return Etag and Last-Modified values defaults to now for both.""" magic = self._match_magic(full_path) if magic is not None: return magic.conditions(full_path, environ) else: mtime = stat(full_path).st_mtime return str(mtime), rfc822.formatdate(mtime) def _file_like(self, full_path): """Return the appropriate file object.""" magic = self._match_magic(full_path) if magic is not None: return magic.file_like(full_path, self.encoding) else: return open(full_path, 'rb') def _body(self, full_path, environ, file_like): """Return an iterator over the body of the response.""" magic = self._match_magic(full_path) if magic is not None: return [_encode(s, self.encoding) for s in magic.body(environ, file_like)] else: way_to_send = environ.get('wsgi.file_wrapper', iter_and_close) return way_to_send(file_like, self.block_size) class BaseMagic(object): """Base class for magic file handling. Really a do nothing if you were to use this directly. In a strait forward case you would just override .extension and body(). (See StringMagic in this module for a simple example of subclassing.) In a more complex case you may need to override many or all methods. """ extension = '' def exists(self, full_path): """Check that self.new_path(full_path) exists.""" if path.exists(self.new_path(full_path)): return self.new_path(full_path) def new_path(self, full_path): """Add the self.extension to the path.""" return full_path + self.extension def old_path(self, full_path): """Remove self.extension from path or raise MagicError.""" if self.matches(full_path): return full_path[:-len(self.extension)] else: raise MagicError("Path does not match this magic.") def matches(self, full_path): """Check that path ends with self.extension.""" if full_path.endswith(self.extension): return full_path def conditions(self, full_path, environ): """Return Etag and Last-Modified values (based on mtime).""" mtime = int(time.time()) return str(mtime), rfc822.formatdate(mtime) def file_like(self, full_path, encoding): """Return a file object for path.""" return _open(full_path, encoding) def body(self, environ, file_like): """Return an iterator over the body of the response.""" return [file_like.read()] class StringMagic(BaseMagic): """Magic to replace variables in file contents using string.Template. Using this requires Python2.4. """ extension = '.stp' safe = False def __init__(self, **variables): """Keyword arguments populate self.variables.""" self.variables = variables def body(self, environ, file_like): """Pass environ and self.variables in to template. self.variables overrides environ so that suprises in environ don't cause unexpected output if you are passing a value in explicitly. """ variables = environ.copy() variables.update(self.variables) template = string.Template(file_like.read()) if self.safe is True: return [template.safe_substitute(variables)] else: return [template.substitute(variables)] class KidMagic(StringMagic): """Like StringMagic only using the Kid templating language. Using this requires Kid: http://kid.lesscode.org/ """ extension = '.kid' def body(self, environ, full_path): """Pass environ and **self.variables into the template.""" template = kid.Template(file=full_path, environ=environ, **self.variables) return [template.serialize()] class GenshiMagic(StringMagic): """Like StringMagic only using the Genshi templating language. Using this requires Genshi """ extension = '.genshi' def body(self, environ, full_path): """Pass environ and **self.variables into the template.""" template = MarkupTemplate(full_path.read()) variables = self.variables.copy() variables["environ"] = environ return [template.generate(**variables) .render('html', doctype='html')] def command(): parser = OptionParser(usage="%prog DIR [HOST][:][PORT]", version="static 0.3.6") options, args = parser.parse_args() if len(args) in (1, 2): if len(args) == 2: parts = args[1].split(":") if len(parts) == 1: host = parts[0] port = None elif len(parts) == 2: host, port = parts else: sys.exit("Invalid host:port specification.") elif len(args) == 1: host, port = None, None if not host: host = '0.0.0.0' if not port: port = 9999 try: port = int(port) except: sys.exit("Invalid host:port specification.") app = Cling(args[0]) try: make_server(host, port, app).serve_forever() except KeyboardInterrupt: print("Cio, baby!") except: sys.exit("Problem initializing server.") else: parser.print_help(sys.stderr) sys.exit(1) def test(): from wsgiref.validate import validator magics = (StringMagic(title="String Test"), KidMagic(title="Kid Test"), GenshiMagic(title="Genshi Test")) app = Shock('testdata/pub', magics=magics) try: make_server('localhost', 9999, validator(app)).serve_forever() except KeyboardInterrupt: print("Ciao, baby!") if __name__ == '__main__': test()
hristo-vrigazov/Word-sense-disambiguator
refs/heads/master
submit.py
2
### The only things you'll have to edit (unless you're porting this script over to a different language) ### are at the bottom of this file. import urllib import urllib2 import hashlib import random import email import email.message import email.encoders import StringIO import sys import os """""""""""""""""""" """""""""""""""""""" class NullDevice: def write(self, s): pass def submit(): print '==\n== [sandbox] Submitting Solutions \n==' (login, password) = loginPrompt() if not login: print '!! Submission Cancelled' return print '\n== Connecting to Coursera ... ' # Part Identifier (partIdx, sid) = partPrompt() # Get Challenge (login, ch, state, ch_aux) = getChallenge(login, sid) #sid is the "part identifier" if((not login) or (not ch) or (not state)): # Some error occured, error string in first return element. print '\n!! Error: %s\n' % login return # Attempt Submission with Challenge ch_resp = challengeResponse(login, password, ch) #try: (result, string) = submitSolution(login, ch_resp, sid, output(partIdx), \ source(partIdx), state, ch_aux) print '== %s' % string.strip() #except: #print #print 'Submission Failure from error',str(sys.exc_info()[0]) #print 'The error can be caused by a too large output file, or some unexpected output preventing the submission.' # =========================== LOGIN HELPERS - NO NEED TO CONFIGURE THIS ======================================= def loginPrompt(): """Prompt the user for login credentials. Returns a tuple (login, password).""" (login, password) = basicPrompt() return login, password def basicPrompt(): """Prompt the user for login credentials. Returns a tuple (login, password).""" login = raw_input('Login (Email address): ') password = raw_input('One-time Password (from the assignment page. This is NOT your own account\'s password): ') return login, password def partPrompt(): print 'Hello! These are the assignment parts that you can submit:' counter = 0 for part in partFriendlyNames: counter += 1 print str(counter) + ') ' + partFriendlyNames[counter - 1] partIdx = int(raw_input('Please enter which part you want to submit (1-' + str(counter) + '): ')) - 1 return (partIdx, partIds[partIdx]) def getChallenge(email, sid): """Gets the challenge salt from the server. Returns (email,ch,state,ch_aux).""" url = challenge_url() values = {'email_address' : email, 'assignment_part_sid' : sid, 'response_encoding' : 'delim'} data = urllib.urlencode(values) req = urllib2.Request(url, data) response = urllib2.urlopen(req) text = response.read().strip() # text is of the form email|ch|signature splits = text.split('|') if(len(splits) != 9): print 'Badly formatted challenge response: %s' % text return None return (splits[2], splits[4], splits[6], splits[8]) def challengeResponse(email, passwd, challenge): sha1 = hashlib.sha1() sha1.update("".join([challenge, passwd])) # hash the first elements digest = sha1.hexdigest() strAnswer = '' for i in range(0, len(digest)): strAnswer = strAnswer + digest[i] return strAnswer def challenge_url(): """Returns the challenge url.""" return "https://class.coursera.org/" + URL + "/assignment/challenge" def submit_url(): """Returns the submission url.""" return "https://class.coursera.org/" + URL + "/assignment/submit" def submitSolution(email_address, ch_resp, sid, output, source, state, ch_aux): #print output #print source """Submits a solution to the server. Returns (result, string).""" source_64_msg = email.message.Message() source_64_msg.set_payload(source) email.encoders.encode_base64(source_64_msg) output_64_msg = email.message.Message() output_64_msg.set_payload(output) email.encoders.encode_base64(output_64_msg) values = { 'assignment_part_sid' : sid, \ 'email_address' : email_address, \ 'submission' : output_64_msg.get_payload(), \ 'submission_aux' : source_64_msg.get_payload(), \ 'challenge_response' : ch_resp, \ 'state' : state \ } url = submit_url() data = urllib.urlencode(values) req = urllib2.Request(url, data) response = urllib2.urlopen(req) string = response.read().strip() result = 0 return result, string ## This collects the source code (just for logging purposes) def source(partIdx): # open the file, get all lines #f = open(sourceFiles[partIdx]) #src = f.read() #f.close() #return src return '' ############ BEGIN ASSIGNMENT SPECIFIC CODE - YOU'LL HAVE TO EDIT THIS ############## import subprocess # Make sure you change this string to the last segment of your class URL. # For example, if your URL is https://class.coursera.org/pgm-2012-001-staging, set it to "pgm-2012-001-staging". URL = 'nlpintro-001' # the "Identifier" you used when creating the part #partIds = ['degree-part-1', 'degree-part-2', 'degree-part-4'] partIds = ['hw3parta', 'hw3partb'] # used to generate readable run-time information for students partFriendlyNames = ['WSD Part A', 'WSD Part B'] # source files to collect (just for our records) sourceFiles = [] def evaluate(files,test_files,baselines,references,scores): score_total = 0 for i in range(len(files)): f = files[i] baseline = baselines[i] reference = references[i] test_file = test_files[i] score = scores[i] if not os.path.exists(f): print 'Please save your output file', f, 'under Assignment3 directory.' continue command = "scorer2 " + f + " " + test_file print command #res = subprocess.check_output(command,shell = True) try: res = subprocess.check_output(command,shell = True) except Exception, e: res = None print 'scorer2 failed for',f sys.exit() #print res acc = 0 if res: try: acc = float(res.split('\n')[2].split(' ')[2]) except Exception, e: print 'scorer2 failed for',f sys.exit() print 'accuracy',acc, if acc < baseline: score_i = 0 elif acc >= reference: score_i = score else: score_i = (score - score*(reference - acc)/(reference - baseline)) score_total += score_i print 'score',score_i return score_total def evaluate_wsd(partIdx): if partIdx == 0: files = ['KNN-English.answer','KNN-Spanish.answer','KNN-Catalan.answer','SVM-English.answer','SVM-Spanish.answer','SVM-Catalan.answer'] #test_files = ['data/English-dev.key data/English.sensemap','data/Spanish-dev.key','data/Catalan-dev.key'] * 2 test_files = ['data/English-dev.key','data/Spanish-dev.key','data/Catalan-dev.key'] * 2 baselines = [0.535,0.684,0.678] * 2 references = [0.550,0.690,0.705,0.605,0.785,0.805] scores = [10] * 6 return evaluate(files,test_files,baselines,references,scores) elif partIdx == 1: files = ['Best-English.answer','Best-Spanish.answer','Best-Catalan.answer'] #test_files = ['data/English-dev.key data/English.sensemap','data/Spanish-dev.key','data/Catalan-dev.key'] test_files = ['data/English-dev.key','data/Spanish-dev.key','data/Catalan-dev.key'] baselines = [0.605,0.785,0.805] references = [0.650,0.810,0.820] scores = [20,10,10] return evaluate(files,test_files,baselines,references,scores) def output(partIdx): """Uses the student code to compute the output for test cases.""" outputString = 'nlpfromumich'*100 outputString += str(int(round(evaluate_wsd(partIdx)))) return outputString submit()
JohnTroony/nikola
refs/heads/master
nikola/plugins/command/import_wordpress.py
1
# -*- coding: utf-8 -*- # Copyright © 2012-2015 Roberto Alsina and others. # Permission is hereby granted, free of charge, to any # person obtaining a copy of this software and associated # documentation files (the "Software"), to deal in the # Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the # Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice # shall be included in all copies or substantial portions of # the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR # PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS # OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR # OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from __future__ import unicode_literals, print_function import os import re import sys import datetime import requests from lxml import etree try: from urlparse import urlparse from urllib import unquote except ImportError: from urllib.parse import urlparse, unquote # NOQA try: import phpserialize except ImportError: phpserialize = None # NOQA from nikola.plugin_categories import Command from nikola import utils from nikola.utils import req_missing from nikola.plugins.basic_import import ImportMixin, links from nikola.nikola import DEFAULT_TRANSLATIONS_PATTERN from nikola.plugins.command.init import SAMPLE_CONF, prepare_config, format_default_translations_config LOGGER = utils.get_logger('import_wordpress', utils.STDERR_HANDLER) class CommandImportWordpress(Command, ImportMixin): """Import a WordPress dump.""" name = "import_wordpress" needs_config = False doc_usage = "[options] wordpress_export_file" doc_purpose = "import a WordPress dump" cmd_options = ImportMixin.cmd_options + [ { 'name': 'exclude_drafts', 'long': 'no-drafts', 'short': 'd', 'default': False, 'type': bool, 'help': "Don't import drafts", }, { 'name': 'squash_newlines', 'long': 'squash-newlines', 'default': False, 'type': bool, 'help': "Shorten multiple newlines in a row to only two newlines", }, { 'name': 'no_downloads', 'long': 'no-downloads', 'default': False, 'type': bool, 'help': "Do not try to download files for the import", }, { 'name': 'separate_qtranslate_content', 'long': 'qtranslate', 'default': False, 'type': bool, 'help': "Look for translations generated by qtranslate plugin", # WARNING: won't recover translated titles that actually # don't seem to be part of the wordpress XML export at the # time of writing :( }, { 'name': 'translations_pattern', 'long': 'translations_pattern', 'default': None, 'type': str, 'help': "The pattern for translation files names", }, ] def _execute(self, options={}, args=[]): """Import a WordPress blog from an export file into a Nikola site.""" if not args: print(self.help()) return options['filename'] = args.pop(0) if args and ('output_folder' not in args or options['output_folder'] == 'new_site'): options['output_folder'] = args.pop(0) if args: LOGGER.warn('You specified additional arguments ({0}). Please consider ' 'putting these arguments before the filename if you ' 'are running into problems.'.format(args)) self.import_into_existing_site = False self.url_map = {} self.timezone = None self.wordpress_export_file = options['filename'] self.squash_newlines = options.get('squash_newlines', False) self.output_folder = options.get('output_folder', 'new_site') self.exclude_drafts = options.get('exclude_drafts', False) self.no_downloads = options.get('no_downloads', False) self.separate_qtranslate_content = options.get('separate_qtranslate_content') self.translations_pattern = options.get('translations_pattern') # A place holder where extra language (if detected) will be stored self.extra_languages = set() if not self.no_downloads: def show_info_about_mising_module(modulename): LOGGER.error( 'To use the "{commandname}" command, you have to install ' 'the "{package}" package or supply the "--no-downloads" ' 'option.'.format( commandname=self.name, package=modulename) ) if phpserialize is None: req_missing(['phpserialize'], 'import WordPress dumps without --no-downloads') channel = self.get_channel_from_file(self.wordpress_export_file) self.context = self.populate_context(channel) self.base_dir = urlparse(self.context['BASE_URL']).path conf_template = self.generate_base_site() # If user has specified a custom pattern for translation files we # need to fix the config if self.translations_pattern: self.context['TRANSLATIONS_PATTERN'] = self.translations_pattern self.import_posts(channel) self.context['TRANSLATIONS'] = format_default_translations_config( self.extra_languages) self.context['REDIRECTIONS'] = self.configure_redirections( self.url_map) self.write_urlmap_csv( os.path.join(self.output_folder, 'url_map.csv'), self.url_map) rendered_template = conf_template.render(**prepare_config(self.context)) rendered_template = re.sub('# REDIRECTIONS = ', 'REDIRECTIONS = ', rendered_template) if self.timezone: rendered_template = re.sub('# TIMEZONE = \'UTC\'', 'TIMEZONE = \'' + self.timezone + '\'', rendered_template) self.write_configuration(self.get_configuration_output_path(), rendered_template) @classmethod def read_xml_file(cls, filename): xml = [] with open(filename, 'rb') as fd: for line in fd: # These explode etree and are useless if b'<atom:link rel=' in line: continue xml.append(line) return b'\n'.join(xml) @classmethod def get_channel_from_file(cls, filename): tree = etree.fromstring(cls.read_xml_file(filename)) channel = tree.find('channel') return channel @staticmethod def populate_context(channel): wordpress_namespace = channel.nsmap['wp'] context = SAMPLE_CONF.copy() context['DEFAULT_LANG'] = get_text_tag(channel, 'language', 'en')[:2] context['TRANSLATIONS_PATTERN'] = DEFAULT_TRANSLATIONS_PATTERN context['BLOG_TITLE'] = get_text_tag(channel, 'title', 'PUT TITLE HERE') context['BLOG_DESCRIPTION'] = get_text_tag( channel, 'description', 'PUT DESCRIPTION HERE') context['BASE_URL'] = get_text_tag(channel, 'link', '#') if not context['BASE_URL']: base_site_url = channel.find('{{{0}}}author'.format(wordpress_namespace)) context['BASE_URL'] = get_text_tag(base_site_url, None, "http://foo.com/") if not context['BASE_URL'].endswith('/'): context['BASE_URL'] += '/' context['SITE_URL'] = context['BASE_URL'] author = channel.find('{{{0}}}author'.format(wordpress_namespace)) context['BLOG_EMAIL'] = get_text_tag( author, '{{{0}}}author_email'.format(wordpress_namespace), "joe@example.com") context['BLOG_AUTHOR'] = get_text_tag( author, '{{{0}}}author_display_name'.format(wordpress_namespace), "Joe Example") context['POSTS'] = '''( ("posts/*.rst", "posts", "post.tmpl"), ("posts/*.txt", "posts", "post.tmpl"), ("posts/*.md", "posts", "post.tmpl"), ("posts/*.wp", "posts", "post.tmpl"), )''' context['PAGES'] = '''( ("stories/*.rst", "stories", "story.tmpl"), ("stories/*.txt", "stories", "story.tmpl"), ("stories/*.md", "stories", "story.tmpl"), ("stories/*.wp", "stories", "story.tmpl"), )''' context['COMPILERS'] = '''{ "rest": ('.txt', '.rst'), "markdown": ('.md', '.mdown', '.markdown', '.wp'), "html": ('.html', '.htm') } ''' return context def download_url_content_to_file(self, url, dst_path): if self.no_downloads: return try: with open(dst_path, 'wb+') as fd: fd.write(requests.get(url).content) except requests.exceptions.ConnectionError as err: LOGGER.warn("Downloading {0} to {1} failed: {2}".format(url, dst_path, err)) def import_attachment(self, item, wordpress_namespace): url = get_text_tag( item, '{{{0}}}attachment_url'.format(wordpress_namespace), 'foo') link = get_text_tag(item, '{{{0}}}link'.format(wordpress_namespace), 'foo') path = urlparse(url).path dst_path = os.path.join(*([self.output_folder, 'files'] + list(path.split('/')))) dst_dir = os.path.dirname(dst_path) utils.makedirs(dst_dir) LOGGER.info("Downloading {0} => {1}".format(url, dst_path)) self.download_url_content_to_file(url, dst_path) dst_url = '/'.join(dst_path.split(os.sep)[2:]) links[link] = '/' + dst_url links[url] = '/' + dst_url self.download_additional_image_sizes( item, wordpress_namespace, os.path.dirname(url) ) def download_additional_image_sizes(self, item, wordpress_namespace, source_path): if phpserialize is None: return additional_metadata = item.findall('{{{0}}}postmeta'.format(wordpress_namespace)) if additional_metadata is None: return for element in additional_metadata: meta_key = element.find('{{{0}}}meta_key'.format(wordpress_namespace)) if meta_key is not None and meta_key.text == '_wp_attachment_metadata': meta_value = element.find('{{{0}}}meta_value'.format(wordpress_namespace)) if meta_value is None: continue # Someone from Wordpress thought it was a good idea # serialize PHP objects into that metadata field. Given # that the export should give you the power to insert # your blogging into another site or system its not. # Why don't they just use JSON? if sys.version_info[0] == 2: try: metadata = phpserialize.loads(utils.sys_encode(meta_value.text)) except ValueError: # local encoding might be wrong sometimes metadata = phpserialize.loads(meta_value.text.encode('utf-8')) else: metadata = phpserialize.loads(meta_value.text.encode('utf-8')) size_key = b'sizes' file_key = b'file' if size_key not in metadata: continue for filename in [metadata[size_key][size][file_key] for size in metadata[size_key]]: url = '/'.join([source_path, filename.decode('utf-8')]) path = urlparse(url).path dst_path = os.path.join(*([self.output_folder, 'files'] + list(path.split('/')))) dst_dir = os.path.dirname(dst_path) utils.makedirs(dst_dir) LOGGER.info("Downloading {0} => {1}".format(url, dst_path)) self.download_url_content_to_file(url, dst_path) dst_url = '/'.join(dst_path.split(os.sep)[2:]) links[url] = '/' + dst_url links[url] = '/' + dst_url code_re1 = re.compile(r'\[code.* lang.*?="(.*?)?".*\](.*?)\[/code\]', re.DOTALL | re.MULTILINE) code_re2 = re.compile(r'\[sourcecode.* lang.*?="(.*?)?".*\](.*?)\[/sourcecode\]', re.DOTALL | re.MULTILINE) code_re3 = re.compile(r'\[code.*?\](.*?)\[/code\]', re.DOTALL | re.MULTILINE) code_re4 = re.compile(r'\[sourcecode.*?\](.*?)\[/sourcecode\]', re.DOTALL | re.MULTILINE) def transform_code(self, content): # http://en.support.wordpress.com/code/posting-source-code/. There are # a ton of things not supported here. We only do a basic [code # lang="x"] -> ```x translation, and remove quoted html entities (<, # >, &, and "). def replacement(m): language = m.group(1) or '' code = m.group(2) code = code.replace('&amp;', '&') code = code.replace('&gt;', '>') code = code.replace('&lt;', '<') code = code.replace('&quot;', '"') return '```{language}\n{code}\n```'.format(language=language, code=code) content = self.code_re1.sub(replacement, content) content = self.code_re2.sub(replacement, content) content = self.code_re3.sub(replacement, content) content = self.code_re4.sub(replacement, content) return content @staticmethod def transform_caption(content): new_caption = re.sub(r'\[/caption\]', '', content) new_caption = re.sub(r'\[caption.*\]', '', new_caption) return new_caption def transform_multiple_newlines(self, content): """Replaces multiple newlines with only two.""" if self.squash_newlines: return re.sub(r'\n{3,}', r'\n\n', content) else: return content def transform_content(self, content): content = self.transform_code(content) content = self.transform_caption(content) content = self.transform_multiple_newlines(content) return content def import_item(self, item, wordpress_namespace, out_folder=None): """Takes an item from the feed and creates a post file.""" if out_folder is None: out_folder = 'posts' title = get_text_tag(item, 'title', 'NO TITLE') # link is something like http://foo.com/2012/09/01/hello-world/ # So, take the path, utils.slugify it, and that's our slug link = get_text_tag(item, 'link', None) parsed = urlparse(link) path = unquote(parsed.path.strip('/')) # In python 2, path is a str. slug requires a unicode # object. According to wikipedia, unquoted strings will # usually be UTF8 if isinstance(path, utils.bytes_str): path = path.decode('utf8') # Cut out the base directory. if path.startswith(self.base_dir.strip('/')): path = path.replace(self.base_dir.strip('/'), '', 1) pathlist = path.split('/') if parsed.query: # if there are no nice URLs and query strings are used out_folder = os.path.join(*([out_folder] + pathlist)) slug = get_text_tag( item, '{{{0}}}post_name'.format(wordpress_namespace), None) if not slug: # it *may* happen slug = get_text_tag( item, '{{{0}}}post_id'.format(wordpress_namespace), None) if not slug: # should never happen LOGGER.error("Error converting post:", title) return else: if len(pathlist) > 1: out_folder = os.path.join(*([out_folder] + pathlist[:-1])) slug = utils.slugify(pathlist[-1]) description = get_text_tag(item, 'description', '') post_date = get_text_tag( item, '{{{0}}}post_date'.format(wordpress_namespace), None) try: dt = utils.to_datetime(post_date) except ValueError: dt = datetime.datetime(1970, 1, 1, 0, 0, 0) LOGGER.error('Malformed date "{0}" in "{1}" [{2}], assuming 1970-01-01 00:00:00 instead.'.format(post_date, title, slug)) post_date = dt.strftime('%Y-%m-%d %H:%M:%S') if dt.tzinfo and self.timezone is None: self.timezone = utils.get_tzname(dt) status = get_text_tag( item, '{{{0}}}status'.format(wordpress_namespace), 'publish') content = get_text_tag( item, '{http://purl.org/rss/1.0/modules/content/}encoded', '') tags = [] if status == 'trash': LOGGER.warn('Trashed post "{0}" will not be imported.'.format(title)) return elif status != 'publish': tags.append('draft') is_draft = True else: is_draft = False for tag in item.findall('category'): text = tag.text if text == 'Uncategorized': continue tags.append(text) if '$latex' in content: tags.append('mathjax') if is_draft and self.exclude_drafts: LOGGER.notice('Draft "{0}" will not be imported.'.format(title)) elif content.strip(): # If no content is found, no files are written. self.url_map[link] = (self.context['SITE_URL'] + out_folder.rstrip('/') + '/' + slug + '.html').replace(os.sep, '/') if hasattr(self, "separate_qtranslate_content") \ and self.separate_qtranslate_content: content_translations = separate_qtranslate_content(content) else: content_translations = {"": content} default_language = self.context["DEFAULT_LANG"] for lang, content in content_translations.items(): if lang: out_meta_filename = slug + '.meta' if lang == default_language: out_content_filename = slug + '.wp' else: out_content_filename \ = utils.get_translation_candidate(self.context, slug + ".wp", lang) self.extra_languages.add(lang) meta_slug = slug else: out_meta_filename = slug + '.meta' out_content_filename = slug + '.wp' meta_slug = slug content = self.transform_content(content) self.write_metadata(os.path.join(self.output_folder, out_folder, out_meta_filename), title, meta_slug, post_date, description, tags) self.write_content( os.path.join(self.output_folder, out_folder, out_content_filename), content) else: LOGGER.warn('Not going to import "{0}" because it seems to contain' ' no content.'.format(title)) def process_item(self, item): # The namespace usually is something like: # http://wordpress.org/export/1.2/ wordpress_namespace = item.nsmap['wp'] post_type = get_text_tag( item, '{{{0}}}post_type'.format(wordpress_namespace), 'post') if post_type == 'attachment': self.import_attachment(item, wordpress_namespace) elif post_type == 'post': self.import_item(item, wordpress_namespace, 'posts') else: self.import_item(item, wordpress_namespace, 'stories') def import_posts(self, channel): for item in channel.findall('item'): self.process_item(item) def get_text_tag(tag, name, default): if tag is None: return default t = tag.find(name) if t is not None: return t.text else: return default def separate_qtranslate_content(text): """Parse the content of a wordpress post or page and separate the various language specific contents when they are delimited with qtranslate tags: <!--:LL-->blabla<!--:-->""" # TODO: uniformize qtranslate tags <!--/en--> => <!--:--> qt_start = "<!--:" qt_end = "-->" qt_end_with_lang_len = 5 qt_chunks = text.split(qt_start) content_by_lang = {} common_txt_list = [] for c in qt_chunks: if not c.strip(): continue if c.startswith(qt_end): # just after the end of a language specific section, there may # be some piece of common text or tags, or just nothing lang = "" # default language c = c.lstrip(qt_end) if not c: continue elif c[2:].startswith(qt_end): # a language specific section (with language code at the begining) lang = c[:2] c = c[qt_end_with_lang_len:] else: # nowhere specific (maybe there is no language section in the # currently parsed content) lang = "" # default language if not lang: common_txt_list.append(c) for l in content_by_lang.keys(): content_by_lang[l].append(c) else: content_by_lang[lang] = content_by_lang.get(lang, common_txt_list) + [c] # in case there was no language specific section, just add the text if common_txt_list and not content_by_lang: content_by_lang[""] = common_txt_list # Format back the list to simple text for l in content_by_lang.keys(): content_by_lang[l] = " ".join(content_by_lang[l]) return content_by_lang
michaelgallacher/intellij-community
refs/heads/master
python/testData/refactoring/introduceVariable/tripleQuotedSubstring.py
83
print(""""One two * <selection>Three</selection> * Four * Five""" + suffix)
richardnpaul/FWL-Website
refs/heads/master
lib/python2.7/site-packages/django/core/servers/fastcgi.py
241
""" FastCGI (or SCGI, or AJP1.3 ...) server that implements the WSGI protocol. Uses the flup python package: http://www.saddi.com/software/flup/ This is a adaptation of the flup package to add FastCGI server support to run Django apps from Web servers that support the FastCGI protocol. This module can be run standalone or from the django-admin / manage.py scripts using the "runfcgi" directive. Run with the extra option "help" for a list of additional options you can pass to this server. """ import os import sys from django.utils import importlib __version__ = "0.1" __all__ = ["runfastcgi"] FASTCGI_OPTIONS = { 'protocol': 'fcgi', 'host': None, 'port': None, 'socket': None, 'method': 'fork', 'daemonize': None, 'workdir': '/', 'pidfile': None, 'maxspare': 5, 'minspare': 2, 'maxchildren': 50, 'maxrequests': 0, 'debug': None, 'outlog': None, 'errlog': None, 'umask': None, } FASTCGI_HELP = r""" Run this project as a fastcgi (or some other protocol supported by flup) application. To do this, the flup package from http://www.saddi.com/software/flup/ is required. runfcgi [options] [fcgi settings] Optional Fcgi settings: (setting=value) protocol=PROTOCOL fcgi, scgi, ajp, ... (default %(protocol)s) host=HOSTNAME hostname to listen on. port=PORTNUM port to listen on. socket=FILE UNIX socket to listen on. method=IMPL prefork or threaded (default %(method)s). maxrequests=NUMBER number of requests a child handles before it is killed and a new child is forked (0 = no limit). maxspare=NUMBER max number of spare processes / threads (default %(maxspare)s). minspare=NUMBER min number of spare processes / threads (default %(minspare)s). maxchildren=NUMBER hard limit number of processes / threads (default %(maxchildren)s). daemonize=BOOL whether to detach from terminal. pidfile=FILE write the spawned process-id to this file. workdir=DIRECTORY change to this directory when daemonizing (default %(workdir)s). debug=BOOL set to true to enable flup tracebacks. outlog=FILE write stdout to this file. errlog=FILE write stderr to this file. umask=UMASK umask to use when daemonizing, in octal notation (default 022). Examples: Run a "standard" fastcgi process on a file-descriptor (for Web servers which spawn your processes for you) $ manage.py runfcgi method=threaded Run a scgi server on a TCP host/port $ manage.py runfcgi protocol=scgi method=prefork host=127.0.0.1 port=8025 Run a fastcgi server on a UNIX domain socket (posix platforms only) $ manage.py runfcgi method=prefork socket=/tmp/fcgi.sock Run a fastCGI as a daemon and write the spawned PID in a file $ manage.py runfcgi socket=/tmp/fcgi.sock method=prefork \ daemonize=true pidfile=/var/run/django-fcgi.pid """ % FASTCGI_OPTIONS def fastcgi_help(message=None): print(FASTCGI_HELP) if message: print(message) return False def runfastcgi(argset=[], **kwargs): options = FASTCGI_OPTIONS.copy() options.update(kwargs) for x in argset: if "=" in x: k, v = x.split('=', 1) else: k, v = x, True options[k.lower()] = v if "help" in options: return fastcgi_help() try: import flup except ImportError as e: sys.stderr.write("ERROR: %s\n" % e) sys.stderr.write(" Unable to load the flup package. In order to run django\n") sys.stderr.write(" as a FastCGI application, you will need to get flup from\n") sys.stderr.write(" http://www.saddi.com/software/flup/ If you've already\n") sys.stderr.write(" installed flup, then make sure you have it in your PYTHONPATH.\n") return False flup_module = 'server.' + options['protocol'] if options['method'] in ('prefork', 'fork'): wsgi_opts = { 'maxSpare': int(options["maxspare"]), 'minSpare': int(options["minspare"]), 'maxChildren': int(options["maxchildren"]), 'maxRequests': int(options["maxrequests"]), } flup_module += '_fork' elif options['method'] in ('thread', 'threaded'): wsgi_opts = { 'maxSpare': int(options["maxspare"]), 'minSpare': int(options["minspare"]), 'maxThreads': int(options["maxchildren"]), } else: return fastcgi_help("ERROR: Implementation must be one of prefork or " "thread.") wsgi_opts['debug'] = options['debug'] is not None try: module = importlib.import_module('.%s' % flup_module, 'flup') WSGIServer = module.WSGIServer except Exception: print("Can't import flup." + flup_module) return False # Prep up and go from django.core.servers.basehttp import get_internal_wsgi_application if options["host"] and options["port"] and not options["socket"]: wsgi_opts['bindAddress'] = (options["host"], int(options["port"])) elif options["socket"] and not options["host"] and not options["port"]: wsgi_opts['bindAddress'] = options["socket"] elif not options["socket"] and not options["host"] and not options["port"]: wsgi_opts['bindAddress'] = None else: return fastcgi_help("Invalid combination of host, port, socket.") if options["daemonize"] is None: # Default to daemonizing if we're running on a socket/named pipe. daemonize = (wsgi_opts['bindAddress'] is not None) else: if options["daemonize"].lower() in ('true', 'yes', 't'): daemonize = True elif options["daemonize"].lower() in ('false', 'no', 'f'): daemonize = False else: return fastcgi_help("ERROR: Invalid option for daemonize " "parameter.") daemon_kwargs = {} if options['outlog']: daemon_kwargs['out_log'] = options['outlog'] if options['errlog']: daemon_kwargs['err_log'] = options['errlog'] if options['umask']: daemon_kwargs['umask'] = int(options['umask'], 8) if daemonize: from django.utils.daemonize import become_daemon become_daemon(our_home_dir=options["workdir"], **daemon_kwargs) if options["pidfile"]: with open(options["pidfile"], "w") as fp: fp.write("%d\n" % os.getpid()) WSGIServer(get_internal_wsgi_application(), **wsgi_opts).run() if __name__ == '__main__': runfastcgi(sys.argv[1:])
AndrewGrossman/django
refs/heads/master
tests/flatpages_tests/test_models.py
342
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.contrib.flatpages.models import FlatPage from django.test import SimpleTestCase from django.test.utils import override_script_prefix class FlatpageModelTests(SimpleTestCase): def test_get_absolute_url_urlencodes(self): pf = FlatPage(title="Café!", url='/café/') self.assertEqual(pf.get_absolute_url(), '/caf%C3%A9/') @override_script_prefix('/beverages/') def test_get_absolute_url_honors_script_prefix(self): pf = FlatPage(title="Tea!", url='/tea/') self.assertEqual(pf.get_absolute_url(), '/beverages/tea/')
mpetyx/palmdrop
refs/heads/master
venv/lib/python2.7/site-packages/django/contrib/admin/forms.py
97
from __future__ import unicode_literals from django import forms from django.contrib.auth import authenticate from django.contrib.auth.forms import AuthenticationForm from django.utils.translation import ugettext_lazy ERROR_MESSAGE = ugettext_lazy("Please enter the correct %(username)s and password " "for a staff account. Note that both fields may be case-sensitive.") class AdminAuthenticationForm(AuthenticationForm): """ A custom authentication form used in the admin app. """ this_is_the_login_form = forms.BooleanField(widget=forms.HiddenInput, initial=1, error_messages={'required': ugettext_lazy("Please log in again, because your session has expired.")}) def clean(self): username = self.cleaned_data.get('username') password = self.cleaned_data.get('password') message = ERROR_MESSAGE if username and password: self.user_cache = authenticate(username=username, password=password) if self.user_cache is None: raise forms.ValidationError(message % { 'username': self.username_field.verbose_name }) elif not self.user_cache.is_active or not self.user_cache.is_staff: raise forms.ValidationError(message % { 'username': self.username_field.verbose_name }) self.check_for_test_cookie() return self.cleaned_data
idlead/scikit-learn
refs/heads/master
examples/cluster/plot_kmeans_stability_low_dim_dense.py
338
""" ============================================================ Empirical evaluation of the impact of k-means initialization ============================================================ Evaluate the ability of k-means initializations strategies to make the algorithm convergence robust as measured by the relative standard deviation of the inertia of the clustering (i.e. the sum of distances to the nearest cluster center). The first plot shows the best inertia reached for each combination of the model (``KMeans`` or ``MiniBatchKMeans``) and the init method (``init="random"`` or ``init="kmeans++"``) for increasing values of the ``n_init`` parameter that controls the number of initializations. The second plot demonstrate one single run of the ``MiniBatchKMeans`` estimator using a ``init="random"`` and ``n_init=1``. This run leads to a bad convergence (local optimum) with estimated centers stuck between ground truth clusters. The dataset used for evaluation is a 2D grid of isotropic Gaussian clusters widely spaced. """ print(__doc__) # Author: Olivier Grisel <olivier.grisel@ensta.org> # License: BSD 3 clause import numpy as np import matplotlib.pyplot as plt import matplotlib.cm as cm from sklearn.utils import shuffle from sklearn.utils import check_random_state from sklearn.cluster import MiniBatchKMeans from sklearn.cluster import KMeans random_state = np.random.RandomState(0) # Number of run (with randomly generated dataset) for each strategy so as # to be able to compute an estimate of the standard deviation n_runs = 5 # k-means models can do several random inits so as to be able to trade # CPU time for convergence robustness n_init_range = np.array([1, 5, 10, 15, 20]) # Datasets generation parameters n_samples_per_center = 100 grid_size = 3 scale = 0.1 n_clusters = grid_size ** 2 def make_data(random_state, n_samples_per_center, grid_size, scale): random_state = check_random_state(random_state) centers = np.array([[i, j] for i in range(grid_size) for j in range(grid_size)]) n_clusters_true, n_features = centers.shape noise = random_state.normal( scale=scale, size=(n_samples_per_center, centers.shape[1])) X = np.concatenate([c + noise for c in centers]) y = np.concatenate([[i] * n_samples_per_center for i in range(n_clusters_true)]) return shuffle(X, y, random_state=random_state) # Part 1: Quantitative evaluation of various init methods fig = plt.figure() plots = [] legends = [] cases = [ (KMeans, 'k-means++', {}), (KMeans, 'random', {}), (MiniBatchKMeans, 'k-means++', {'max_no_improvement': 3}), (MiniBatchKMeans, 'random', {'max_no_improvement': 3, 'init_size': 500}), ] for factory, init, params in cases: print("Evaluation of %s with %s init" % (factory.__name__, init)) inertia = np.empty((len(n_init_range), n_runs)) for run_id in range(n_runs): X, y = make_data(run_id, n_samples_per_center, grid_size, scale) for i, n_init in enumerate(n_init_range): km = factory(n_clusters=n_clusters, init=init, random_state=run_id, n_init=n_init, **params).fit(X) inertia[i, run_id] = km.inertia_ p = plt.errorbar(n_init_range, inertia.mean(axis=1), inertia.std(axis=1)) plots.append(p[0]) legends.append("%s with %s init" % (factory.__name__, init)) plt.xlabel('n_init') plt.ylabel('inertia') plt.legend(plots, legends) plt.title("Mean inertia for various k-means init across %d runs" % n_runs) # Part 2: Qualitative visual inspection of the convergence X, y = make_data(random_state, n_samples_per_center, grid_size, scale) km = MiniBatchKMeans(n_clusters=n_clusters, init='random', n_init=1, random_state=random_state).fit(X) fig = plt.figure() for k in range(n_clusters): my_members = km.labels_ == k color = cm.spectral(float(k) / n_clusters, 1) plt.plot(X[my_members, 0], X[my_members, 1], 'o', marker='.', c=color) cluster_center = km.cluster_centers_[k] plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=color, markeredgecolor='k', markersize=6) plt.title("Example cluster allocation with a single random init\n" "with MiniBatchKMeans") plt.show()
mbareta/edx-platform-ft
refs/heads/open-release/eucalyptus.master
lms/djangoapps/courseware/tests/test_video_mongo.py
8
# -*- coding: utf-8 -*- """Video xmodule tests in mongo.""" import ddt import json from collections import OrderedDict from path import Path as path from lxml import etree from mock import patch, MagicMock, Mock from nose.plugins.attrib import attr from django.conf import settings from django.test import TestCase from django.test.utils import override_settings from xmodule.video_module import VideoDescriptor, bumper_utils, video_utils, rewrite_video_url from xmodule.x_module import STUDENT_VIEW from xmodule.tests.test_video import VideoDescriptorTestBase, instantiate_descriptor from xmodule.tests.test_import import DummySystem from xmodule.video_module.transcripts_utils import save_to_store, Transcript from xmodule.modulestore.inheritance import own_metadata from xmodule.contentstore.content import StaticContent from xmodule.exceptions import NotFoundError from xmodule.modulestore.tests.django_utils import ( TEST_DATA_MONGO_MODULESTORE, TEST_DATA_SPLIT_MODULESTORE ) from edxval.api import ( create_profile, create_video, get_video_info, ValCannotCreateError, ValVideoNotFoundError ) from . import BaseTestXmodule from .test_video_xml import SOURCE_XML from .test_video_handlers import TestVideo @attr('shard_1') class TestVideoYouTube(TestVideo): METADATA = {} def test_video_constructor(self): """Make sure that all parameters extracted correctly from xml""" context = self.item_descriptor.render(STUDENT_VIEW).content sources = [u'example.mp4', u'example.webm'] expected_context = { 'branding_info': None, 'license': None, 'bumper_metadata': 'null', 'cdn_eval': False, 'cdn_exp_group': None, 'display_name': u'A Name', 'download_video_link': u'example.mp4', 'handout': None, 'id': self.item_descriptor.location.html_id(), 'metadata': json.dumps(OrderedDict({ "saveStateUrl": self.item_descriptor.xmodule_runtime.ajax_url + "/save_user_state", "autoplay": False, "streams": "0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg", "sub": "a_sub_file.srt.sjson", "sources": sources, "captionDataDir": None, "showCaptions": "true", "generalSpeed": 1.0, "speed": None, "savedVideoPosition": 0.0, "start": 3603.0, "end": 3610.0, "transcriptLanguage": "en", "transcriptLanguages": OrderedDict({"en": "English", "uk": u"Українська"}), "ytTestTimeout": 1500, "ytApiUrl": "https://www.youtube.com/iframe_api", "ytMetadataUrl": "https://www.googleapis.com/youtube/v3/videos/", "ytKey": None, "transcriptTranslationUrl": self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'translation/__lang__' ).rstrip('/?'), "transcriptAvailableTranslationsUrl": self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'available_translations' ).rstrip('/?'), "autohideHtml5": False, "recordedYoutubeIsAvailable": True, })), 'track': None, 'transcript_download_format': 'srt', 'transcript_download_formats_list': [ {'display_name': 'SubRip (.srt) file', 'value': 'srt'}, {'display_name': 'Text (.txt) file', 'value': 'txt'} ], 'poster': 'null', } self.assertEqual( context, self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context), ) @attr('shard_1') class TestVideoNonYouTube(TestVideo): """Integration tests: web client + mongo.""" DATA = """ <video show_captions="true" display_name="A Name" sub="a_sub_file.srt.sjson" download_video="true" start_time="01:00:03" end_time="01:00:10" > <source src="example.mp4"/> <source src="example.webm"/> </video> """ MODEL_DATA = { 'data': DATA, } METADATA = {} def test_video_constructor(self): """Make sure that if the 'youtube' attribute is omitted in XML, then the template generates an empty string for the YouTube streams. """ context = self.item_descriptor.render(STUDENT_VIEW).content sources = [u'example.mp4', u'example.webm'] expected_context = { 'branding_info': None, 'license': None, 'bumper_metadata': 'null', 'cdn_eval': False, 'cdn_exp_group': None, 'display_name': u'A Name', 'download_video_link': u'example.mp4', 'handout': None, 'id': self.item_descriptor.location.html_id(), 'metadata': json.dumps(OrderedDict({ "saveStateUrl": self.item_descriptor.xmodule_runtime.ajax_url + "/save_user_state", "autoplay": False, "streams": "1.00:3_yD_cEKoCk", "sub": "a_sub_file.srt.sjson", "sources": sources, "captionDataDir": None, "showCaptions": "true", "generalSpeed": 1.0, "speed": None, "savedVideoPosition": 0.0, "start": 3603.0, "end": 3610.0, "transcriptLanguage": "en", "transcriptLanguages": OrderedDict({"en": "English"}), "ytTestTimeout": 1500, "ytApiUrl": "https://www.youtube.com/iframe_api", "ytMetadataUrl": "https://www.googleapis.com/youtube/v3/videos/", "ytKey": None, "transcriptTranslationUrl": self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'translation/__lang__' ).rstrip('/?'), "transcriptAvailableTranslationsUrl": self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'available_translations' ).rstrip('/?'), "autohideHtml5": False, "recordedYoutubeIsAvailable": True, })), 'track': None, 'transcript_download_format': 'srt', 'transcript_download_formats_list': [ {'display_name': 'SubRip (.srt) file', 'value': 'srt'}, {'display_name': 'Text (.txt) file', 'value': 'txt'} ], 'poster': 'null', } self.assertEqual( context, self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context), ) @attr('shard_1') class TestGetHtmlMethod(BaseTestXmodule): ''' Make sure that `get_html` works correctly. ''' CATEGORY = "video" DATA = SOURCE_XML METADATA = {} def setUp(self): super(TestGetHtmlMethod, self).setUp() self.setup_course() self.default_metadata_dict = OrderedDict({ "saveStateUrl": "", "autoplay": settings.FEATURES.get('AUTOPLAY_VIDEOS', True), "streams": "1.00:3_yD_cEKoCk", "sub": "a_sub_file.srt.sjson", "sources": '[]', "captionDataDir": None, "showCaptions": "true", "generalSpeed": 1.0, "speed": None, "savedVideoPosition": 0.0, "start": 3603.0, "end": 3610.0, "transcriptLanguage": "en", "transcriptLanguages": OrderedDict({"en": "English"}), "ytTestTimeout": 1500, "ytApiUrl": "https://www.youtube.com/iframe_api", "ytMetadataUrl": "https://www.googleapis.com/youtube/v3/videos/", "ytKey": None, "transcriptTranslationUrl": self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'translation/__lang__' ).rstrip('/?'), "transcriptAvailableTranslationsUrl": self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'available_translations' ).rstrip('/?'), "autohideHtml5": False, "recordedYoutubeIsAvailable": True, }) def test_get_html_track(self): SOURCE_XML = """ <video show_captions="true" display_name="A Name" sub="{sub}" download_track="{download_track}" start_time="01:00:03" end_time="01:00:10" download_video="true" > <source src="example.mp4"/> <source src="example.webm"/> {track} {transcripts} </video> """ cases = [ { 'download_track': u'true', 'track': u'<track src="http://www.example.com/track"/>', 'sub': u'a_sub_file.srt.sjson', 'expected_track_url': u'http://www.example.com/track', 'transcripts': '', }, { 'download_track': u'true', 'track': u'', 'sub': u'a_sub_file.srt.sjson', 'expected_track_url': u'a_sub_file.srt.sjson', 'transcripts': '', }, { 'download_track': u'true', 'track': u'', 'sub': u'', 'expected_track_url': None, 'transcripts': '', }, { 'download_track': u'false', 'track': u'<track src="http://www.example.com/track"/>', 'sub': u'a_sub_file.srt.sjson', 'expected_track_url': None, 'transcripts': '', }, { 'download_track': u'true', 'track': u'', 'sub': u'', 'expected_track_url': u'a_sub_file.srt.sjson', 'transcripts': '<transcript language="uk" src="ukrainian.srt" />', }, ] sources = [u'example.mp4', u'example.webm'] expected_context = { 'branding_info': None, 'license': None, 'bumper_metadata': 'null', 'cdn_eval': False, 'cdn_exp_group': None, 'display_name': u'A Name', 'download_video_link': u'example.mp4', 'handout': None, 'id': self.item_descriptor.location.html_id(), 'metadata': '', 'track': None, 'transcript_download_format': 'srt', 'transcript_download_formats_list': [ {'display_name': 'SubRip (.srt) file', 'value': 'srt'}, {'display_name': 'Text (.txt) file', 'value': 'txt'} ], 'poster': 'null', } for data in cases: metadata = self.default_metadata_dict metadata['sources'] = sources DATA = SOURCE_XML.format( download_track=data['download_track'], track=data['track'], sub=data['sub'], transcripts=data['transcripts'], ) self.initialize_module(data=DATA) track_url = self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'download' ).rstrip('/?') context = self.item_descriptor.render(STUDENT_VIEW).content metadata.update({ 'transcriptLanguages': {"en": "English"} if not data['transcripts'] else {"uk": u'Українська'}, 'transcriptLanguage': u'en' if not data['transcripts'] or data.get('sub') else u'uk', 'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'translation/__lang__' ).rstrip('/?'), 'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'available_translations' ).rstrip('/?'), 'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state', 'sub': data['sub'], }) expected_context.update({ 'transcript_download_format': ( None if self.item_descriptor.track and self.item_descriptor.download_track else 'srt' ), 'track': ( track_url if data['expected_track_url'] == u'a_sub_file.srt.sjson' else data['expected_track_url'] ), 'id': self.item_descriptor.location.html_id(), 'metadata': json.dumps(metadata) }) self.assertEqual( context, self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context), ) def test_get_html_source(self): SOURCE_XML = """ <video show_captions="true" display_name="A Name" sub="a_sub_file.srt.sjson" source="{source}" download_video="{download_video}" start_time="01:00:03" end_time="01:00:10" > {sources} </video> """ cases = [ # self.download_video == True { 'download_video': 'true', 'source': 'example_source.mp4', 'sources': """ <source src="example.mp4"/> <source src="example.webm"/> """, 'result': { 'download_video_link': u'example_source.mp4', 'sources': [u'example.mp4', u'example.webm'], }, }, { 'download_video': 'true', 'source': '', 'sources': """ <source src="example.mp4"/> <source src="example.webm"/> """, 'result': { 'download_video_link': u'example.mp4', 'sources': [u'example.mp4', u'example.webm'], }, }, { 'download_video': 'true', 'source': '', 'sources': [], 'result': {}, }, # self.download_video == False { 'download_video': 'false', 'source': 'example_source.mp4', 'sources': """ <source src="example.mp4"/> <source src="example.webm"/> """, 'result': { 'sources': [u'example.mp4', u'example.webm'], }, }, ] initial_context = { 'branding_info': None, 'license': None, 'bumper_metadata': 'null', 'cdn_eval': False, 'cdn_exp_group': None, 'display_name': u'A Name', 'download_video_link': u'example.mp4', 'handout': None, 'id': self.item_descriptor.location.html_id(), 'metadata': self.default_metadata_dict, 'track': None, 'transcript_download_format': 'srt', 'transcript_download_formats_list': [ {'display_name': 'SubRip (.srt) file', 'value': 'srt'}, {'display_name': 'Text (.txt) file', 'value': 'txt'} ], 'poster': 'null', } for data in cases: DATA = SOURCE_XML.format( download_video=data['download_video'], source=data['source'], sources=data['sources'] ) self.initialize_module(data=DATA) context = self.item_descriptor.render(STUDENT_VIEW).content expected_context = dict(initial_context) expected_context['metadata'].update({ 'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'translation/__lang__' ).rstrip('/?'), 'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'available_translations' ).rstrip('/?'), 'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state', 'sources': data['result'].get('sources', []), }) expected_context.update({ 'id': self.item_descriptor.location.html_id(), 'download_video_link': data['result'].get('download_video_link'), 'metadata': json.dumps(expected_context['metadata']) }) self.assertEqual( context, self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context) ) def test_get_html_with_non_existent_edx_video_id(self): """ Tests the VideoModule get_html where a edx_video_id is given but a video is not found """ SOURCE_XML = """ <video show_captions="true" display_name="A Name" sub="a_sub_file.srt.sjson" source="{source}" download_video="{download_video}" start_time="01:00:03" end_time="01:00:10" edx_video_id="{edx_video_id}" > {sources} </video> """ no_video_data = { 'download_video': 'true', 'source': 'example_source.mp4', 'sources': """ <source src="example.mp4"/> <source src="example.webm"/> """, 'edx_video_id': "meow", 'result': { 'download_video_link': u'example_source.mp4', 'sources': [u'example.mp4', u'example.webm'], } } DATA = SOURCE_XML.format( download_video=no_video_data['download_video'], source=no_video_data['source'], sources=no_video_data['sources'], edx_video_id=no_video_data['edx_video_id'] ) self.initialize_module(data=DATA) # Referencing a non-existent VAL ID in courseware won't cause an error -- # it'll just fall back to the values in the VideoDescriptor. self.assertIn("example_source.mp4", self.item_descriptor.render(STUDENT_VIEW).content) def test_get_html_with_mocked_edx_video_id(self): SOURCE_XML = """ <video show_captions="true" display_name="A Name" sub="a_sub_file.srt.sjson" source="{source}" download_video="{download_video}" start_time="01:00:03" end_time="01:00:10" edx_video_id="{edx_video_id}" > {sources} </video> """ data = { # test with download_video set to false and make sure download_video_link is not set (is None) 'download_video': 'false', 'source': 'example_source.mp4', 'sources': """ <source src="example.mp4"/> <source src="example.webm"/> """, 'edx_video_id': "mock item", 'result': { 'download_video_link': None, # make sure the desktop_mp4 url is included as part of the alternative sources. 'sources': [u'example.mp4', u'example.webm', u'http://www.meowmix.com'], } } # Video found for edx_video_id metadata = self.default_metadata_dict metadata['autoplay'] = False metadata['sources'] = "" initial_context = { 'branding_info': None, 'license': None, 'bumper_metadata': 'null', 'cdn_eval': False, 'cdn_exp_group': None, 'display_name': u'A Name', 'download_video_link': u'example.mp4', 'handout': None, 'id': self.item_descriptor.location.html_id(), 'track': None, 'transcript_download_format': 'srt', 'transcript_download_formats_list': [ {'display_name': 'SubRip (.srt) file', 'value': 'srt'}, {'display_name': 'Text (.txt) file', 'value': 'txt'} ], 'poster': 'null', 'metadata': metadata } DATA = SOURCE_XML.format( download_video=data['download_video'], source=data['source'], sources=data['sources'], edx_video_id=data['edx_video_id'] ) self.initialize_module(data=DATA) with patch('edxval.api.get_video_info') as mock_get_video_info: mock_get_video_info.return_value = { 'url': '/edxval/video/example', 'edx_video_id': u'example', 'duration': 111.0, 'client_video_id': u'The example video', 'encoded_videos': [ { 'url': u'http://www.meowmix.com', 'file_size': 25556, 'bitrate': 9600, 'profile': u'desktop_mp4' } ] } context = self.item_descriptor.render(STUDENT_VIEW).content expected_context = dict(initial_context) expected_context['metadata'].update({ 'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'translation/__lang__' ).rstrip('/?'), 'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'available_translations' ).rstrip('/?'), 'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state', 'sources': data['result']['sources'], }) expected_context.update({ 'id': self.item_descriptor.location.html_id(), 'download_video_link': data['result']['download_video_link'], 'metadata': json.dumps(expected_context['metadata']) }) self.assertEqual( context, self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context) ) def test_get_html_with_existing_edx_video_id(self): # create test profiles and their encodings encoded_videos = [] for profile, extension in [("desktop_webm", "webm"), ("desktop_mp4", "mp4")]: create_profile(profile) encoded_videos.append( dict( url=u"http://fake-video.edx.org/thundercats.{}".format(extension), file_size=9000, bitrate=42, profile=profile, ) ) result = create_video( dict( client_video_id="Thunder Cats", duration=111, edx_video_id="thundercats", status='test', encoded_videos=encoded_videos ) ) self.assertEqual(result, "thundercats") SOURCE_XML = """ <video show_captions="true" display_name="A Name" sub="a_sub_file.srt.sjson" source="{source}" download_video="{download_video}" start_time="01:00:03" end_time="01:00:10" edx_video_id="{edx_video_id}" > {sources} </video> """ data = { 'download_video': 'true', 'source': 'example_source.mp4', 'sources': """ <source src="example.mp4"/> <source src="example.webm"/> """, 'edx_video_id': "thundercats", 'result': { 'download_video_link': u'http://fake-video.edx.org/thundercats.mp4', # make sure the urls for the various encodings are included as part of the alternative sources. 'sources': [u'example.mp4', u'example.webm'] + [video['url'] for video in encoded_videos], } } # Video found for edx_video_id metadata = self.default_metadata_dict metadata['sources'] = "" initial_context = { 'branding_info': None, 'license': None, 'bumper_metadata': 'null', 'cdn_eval': False, 'cdn_exp_group': None, 'display_name': u'A Name', 'download_video_link': u'example.mp4', 'handout': None, 'id': self.item_descriptor.location.html_id(), 'track': None, 'transcript_download_format': 'srt', 'transcript_download_formats_list': [ {'display_name': 'SubRip (.srt) file', 'value': 'srt'}, {'display_name': 'Text (.txt) file', 'value': 'txt'} ], 'poster': 'null', 'metadata': metadata, } DATA = SOURCE_XML.format( download_video=data['download_video'], source=data['source'], sources=data['sources'], edx_video_id=data['edx_video_id'] ) self.initialize_module(data=DATA) context = self.item_descriptor.render(STUDENT_VIEW).content expected_context = dict(initial_context) expected_context['metadata'].update({ 'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'translation/__lang__' ).rstrip('/?'), 'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'available_translations' ).rstrip('/?'), 'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state', 'sources': data['result']['sources'], }) expected_context.update({ 'id': self.item_descriptor.location.html_id(), 'download_video_link': data['result']['download_video_link'], 'metadata': json.dumps(expected_context['metadata']) }) self.assertEqual( context, self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context) ) # pylint: disable=invalid-name @patch('xmodule.video_module.video_module.BrandingInfoConfig') @patch('xmodule.video_module.video_module.rewrite_video_url') def test_get_html_cdn_source(self, mocked_get_video, mock_BrandingInfoConfig): """ Test if sources got from CDN """ mock_BrandingInfoConfig.get_config.return_value = { "CN": { 'url': 'http://www.xuetangx.com', 'logo_src': 'http://www.xuetangx.com/static/images/logo.png', 'logo_tag': 'Video hosted by XuetangX.com' } } def side_effect(*args, **kwargs): cdn = { 'http://example.com/example.mp4': 'http://cdn-example.com/example.mp4', 'http://example.com/example.webm': 'http://cdn-example.com/example.webm', } return cdn.get(args[1]) mocked_get_video.side_effect = side_effect SOURCE_XML = """ <video show_captions="true" display_name="A Name" sub="a_sub_file.srt.sjson" source="{source}" download_video="{download_video}" edx_video_id="{edx_video_id}" start_time="01:00:03" end_time="01:00:10" > {sources} </video> """ case_data = { 'download_video': 'true', 'source': 'example_source.mp4', 'sources': """ <source src="http://example.com/example.mp4"/> <source src="http://example.com/example.webm"/> """, 'result': { 'download_video_link': u'example_source.mp4', 'sources': [ u'http://cdn-example.com/example.mp4', u'http://cdn-example.com/example.webm' ], }, } # test with and without edx_video_id specified. cases = [ dict(case_data, edx_video_id=""), dict(case_data, edx_video_id="vid-v1:12345"), ] initial_context = { 'branding_info': { 'logo_src': 'http://www.xuetangx.com/static/images/logo.png', 'logo_tag': 'Video hosted by XuetangX.com', 'url': 'http://www.xuetangx.com' }, 'license': None, 'bumper_metadata': 'null', 'cdn_eval': False, 'cdn_exp_group': None, 'display_name': u'A Name', 'download_video_link': None, 'handout': None, 'id': None, 'metadata': self.default_metadata_dict, 'track': None, 'transcript_download_format': 'srt', 'transcript_download_formats_list': [ {'display_name': 'SubRip (.srt) file', 'value': 'srt'}, {'display_name': 'Text (.txt) file', 'value': 'txt'} ], 'poster': 'null', } for data in cases: DATA = SOURCE_XML.format( download_video=data['download_video'], source=data['source'], sources=data['sources'], edx_video_id=data['edx_video_id'], ) self.initialize_module(data=DATA) self.item_descriptor.xmodule_runtime.user_location = 'CN' context = self.item_descriptor.render('student_view').content expected_context = dict(initial_context) expected_context['metadata'].update({ 'transcriptTranslationUrl': self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'translation/__lang__' ).rstrip('/?'), 'transcriptAvailableTranslationsUrl': self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'available_translations' ).rstrip('/?'), 'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state', 'sources': data['result'].get('sources', []), }) expected_context.update({ 'id': self.item_descriptor.location.html_id(), 'download_video_link': data['result'].get('download_video_link'), 'metadata': json.dumps(expected_context['metadata']) }) self.assertEqual( context, self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context) ) @attr('shard_1') class TestVideoCDNRewriting(BaseTestXmodule): """ Tests for Video CDN. """ def setUp(self, *args, **kwargs): super(TestVideoCDNRewriting, self).setUp(*args, **kwargs) self.original_video_file = "original_video.mp4" self.original_video_url = "http://www.originalvideo.com/" + self.original_video_file @patch.dict("django.conf.settings.CDN_VIDEO_URLS", {"CN": "https://chinacdn.cn/"}) def test_rewrite_video_url_success(self): """ Test successful CDN request. """ cdn_response_video_url = settings.CDN_VIDEO_URLS["CN"] + self.original_video_file self.assertEqual( rewrite_video_url(settings.CDN_VIDEO_URLS["CN"], self.original_video_url), cdn_response_video_url ) @patch.dict("django.conf.settings.CDN_VIDEO_URLS", {"CN": "https://chinacdn.cn/"}) def test_rewrite_url_concat(self): """ Test that written URLs are returned clean despite input """ cdn_response_video_url = settings.CDN_VIDEO_URLS["CN"] + "original_video.mp4" self.assertEqual( rewrite_video_url(settings.CDN_VIDEO_URLS["CN"] + "///", self.original_video_url), cdn_response_video_url ) def test_rewrite_video_url_invalid_url(self): """ Test if no alternative video in CDN exists. """ invalid_cdn_url = 'http://http://fakecdn.com/' self.assertIsNone(rewrite_video_url(invalid_cdn_url, self.original_video_url)) def test_none_args(self): """ Ensure None args return None """ self.assertIsNone(rewrite_video_url(None, None)) def test_emptystring_args(self): """ Ensure emptyrstring args return None """ self.assertIsNone(rewrite_video_url("", "")) @attr('shard_1') class TestVideoDescriptorInitialization(BaseTestXmodule): """ Make sure that module initialization works correctly. """ CATEGORY = "video" DATA = SOURCE_XML METADATA = {} def setUp(self): super(TestVideoDescriptorInitialization, self).setUp() self.setup_course() def test_source_not_in_html5sources(self): metadata = { 'source': 'http://example.org/video.mp4', 'html5_sources': ['http://youtu.be/3_yD_cEKoCk.mp4'], } self.initialize_module(metadata=metadata) fields = self.item_descriptor.editable_metadata_fields self.assertIn('source', fields) self.assertEqual(self.item_descriptor.source, 'http://example.org/video.mp4') self.assertTrue(self.item_descriptor.download_video) self.assertTrue(self.item_descriptor.source_visible) def test_source_in_html5sources(self): metadata = { 'source': 'http://example.org/video.mp4', 'html5_sources': ['http://example.org/video.mp4'], } self.initialize_module(metadata=metadata) fields = self.item_descriptor.editable_metadata_fields self.assertNotIn('source', fields) self.assertTrue(self.item_descriptor.download_video) self.assertFalse(self.item_descriptor.source_visible) def test_download_video_is_explicitly_set(self): metadata = { 'track': u'http://some_track.srt', 'source': 'http://example.org/video.mp4', 'html5_sources': ['http://youtu.be/3_yD_cEKoCk.mp4'], 'download_video': False, } self.initialize_module(metadata=metadata) fields = self.item_descriptor.editable_metadata_fields self.assertIn('source', fields) self.assertIn('download_video', fields) self.assertFalse(self.item_descriptor.download_video) self.assertTrue(self.item_descriptor.source_visible) self.assertTrue(self.item_descriptor.download_track) def test_source_is_empty(self): metadata = { 'source': '', 'html5_sources': ['http://youtu.be/3_yD_cEKoCk.mp4'], } self.initialize_module(metadata=metadata) fields = self.item_descriptor.editable_metadata_fields self.assertNotIn('source', fields) self.assertFalse(self.item_descriptor.download_video) @attr('shard_1') @ddt.ddt class TestEditorSavedMethod(BaseTestXmodule): """ Make sure that `editor_saved` method works correctly. """ CATEGORY = "video" DATA = SOURCE_XML METADATA = {} def setUp(self): super(TestEditorSavedMethod, self).setUp() self.setup_course() self.metadata = { 'source': 'http://youtu.be/3_yD_cEKoCk', 'html5_sources': ['http://example.org/video.mp4'], } # path to subs_3_yD_cEKoCk.srt.sjson file self.file_name = 'subs_3_yD_cEKoCk.srt.sjson' # pylint: disable=no-value-for-parameter self.test_dir = path(__file__).abspath().dirname().dirname().dirname().dirname().dirname() self.file_path = self.test_dir + '/common/test/data/uploads/' + self.file_name @ddt.data(TEST_DATA_MONGO_MODULESTORE, TEST_DATA_SPLIT_MODULESTORE) def test_editor_saved_when_html5_sub_not_exist(self, default_store): """ When there is youtube_sub exist but no html5_sub present for html5_sources, editor_saved function will generate new html5_sub for video. """ self.MODULESTORE = default_store # pylint: disable=invalid-name self.initialize_module(metadata=self.metadata) item = self.store.get_item(self.item_descriptor.location) with open(self.file_path, "r") as myfile: save_to_store(myfile.read(), self.file_name, 'text/sjson', item.location) item.sub = "3_yD_cEKoCk" # subs_video.srt.sjson does not exist before calling editor_saved function with self.assertRaises(NotFoundError): Transcript.get_asset(item.location, 'subs_video.srt.sjson') old_metadata = own_metadata(item) # calling editor_saved will generate new file subs_video.srt.sjson for html5_sources item.editor_saved(self.user, old_metadata, None) self.assertIsInstance(Transcript.get_asset(item.location, 'subs_3_yD_cEKoCk.srt.sjson'), StaticContent) self.assertIsInstance(Transcript.get_asset(item.location, 'subs_video.srt.sjson'), StaticContent) @ddt.data(TEST_DATA_MONGO_MODULESTORE, TEST_DATA_SPLIT_MODULESTORE) def test_editor_saved_when_youtube_and_html5_subs_exist(self, default_store): """ When both youtube_sub and html5_sub already exist then no new sub will be generated by editor_saved function. """ self.MODULESTORE = default_store self.initialize_module(metadata=self.metadata) item = self.store.get_item(self.item_descriptor.location) with open(self.file_path, "r") as myfile: save_to_store(myfile.read(), self.file_name, 'text/sjson', item.location) save_to_store(myfile.read(), 'subs_video.srt.sjson', 'text/sjson', item.location) item.sub = "3_yD_cEKoCk" # subs_3_yD_cEKoCk.srt.sjson and subs_video.srt.sjson already exist self.assertIsInstance(Transcript.get_asset(item.location, self.file_name), StaticContent) self.assertIsInstance(Transcript.get_asset(item.location, 'subs_video.srt.sjson'), StaticContent) old_metadata = own_metadata(item) with patch('xmodule.video_module.video_module.manage_video_subtitles_save') as manage_video_subtitles_save: item.editor_saved(self.user, old_metadata, None) self.assertFalse(manage_video_subtitles_save.called) @ddt.ddt class TestVideoDescriptorStudentViewJson(TestCase): """ Tests for the student_view_data method on VideoDescriptor. """ TEST_DURATION = 111.0 TEST_PROFILE = "mobile" TEST_SOURCE_URL = "http://www.example.com/source.mp4" TEST_LANGUAGE = "ge" TEST_ENCODED_VIDEO = { 'profile': TEST_PROFILE, 'bitrate': 333, 'url': 'http://example.com/video', 'file_size': 222, } TEST_EDX_VIDEO_ID = 'test_edx_video_id' TEST_YOUTUBE_ID = 'test_youtube_id' TEST_YOUTUBE_EXPECTED_URL = 'https://www.youtube.com/watch?v=test_youtube_id' def setUp(self): super(TestVideoDescriptorStudentViewJson, self).setUp() video_declaration = "<video display_name='Test Video' youtube_id_1_0=\'" + self.TEST_YOUTUBE_ID + "\'>" sample_xml = ''.join([ video_declaration, "<source src='", self.TEST_SOURCE_URL, "'/> ", "<transcript language='", self.TEST_LANGUAGE, "' src='german_translation.srt' /> ", "</video>"] ) self.transcript_url = "transcript_url" self.video = instantiate_descriptor(data=sample_xml) self.video.runtime.handler_url = Mock(return_value=self.transcript_url) def setup_val_video(self, associate_course_in_val=False): """ Creates a video entry in VAL. Arguments: associate_course - If True, associates the test course with the video in VAL. """ create_profile('mobile') create_video({ 'edx_video_id': self.TEST_EDX_VIDEO_ID, 'client_video_id': 'test_client_video_id', 'duration': self.TEST_DURATION, 'status': 'dummy', 'encoded_videos': [self.TEST_ENCODED_VIDEO], 'courses': [self.video.location.course_key] if associate_course_in_val else [], }) self.val_video = get_video_info(self.TEST_EDX_VIDEO_ID) # pylint: disable=attribute-defined-outside-init def get_result(self, allow_cache_miss=True): """ Returns the result from calling the video's student_view_data method. Arguments: allow_cache_miss is passed in the context to the student_view_data method. """ context = { "profiles": [self.TEST_PROFILE], "allow_cache_miss": "True" if allow_cache_miss else "False" } return self.video.student_view_data(context) def verify_result_with_fallback_and_youtube(self, result): """ Verifies the result is as expected when returning "fallback" video data (not from VAL). """ self.assertDictEqual( result, { "only_on_web": False, "duration": None, "transcripts": {self.TEST_LANGUAGE: self.transcript_url}, "encoded_videos": { "fallback": {"url": self.TEST_SOURCE_URL, "file_size": 0}, "youtube": {"url": self.TEST_YOUTUBE_EXPECTED_URL, "file_size": 0} }, } ) def verify_result_with_youtube_url(self, result): """ Verifies the result is as expected when returning "fallback" video data (not from VAL). """ self.assertDictEqual( result, { "only_on_web": False, "duration": None, "transcripts": {self.TEST_LANGUAGE: self.transcript_url}, "encoded_videos": {"youtube": {"url": self.TEST_YOUTUBE_EXPECTED_URL, "file_size": 0}}, } ) def verify_result_with_val_profile(self, result): """ Verifies the result is as expected when returning video data from VAL. """ self.assertDictContainsSubset( result.pop("encoded_videos")[self.TEST_PROFILE], self.TEST_ENCODED_VIDEO, ) self.assertDictEqual( result, { "only_on_web": False, "duration": self.TEST_DURATION, "transcripts": {self.TEST_LANGUAGE: self.transcript_url}, } ) def test_only_on_web(self): self.video.only_on_web = True result = self.get_result() self.assertDictEqual(result, {"only_on_web": True}) def test_no_edx_video_id(self): result = self.get_result() self.verify_result_with_fallback_and_youtube(result) def test_no_edx_video_id_and_no_fallback(self): video_declaration = "<video display_name='Test Video' youtube_id_1_0=\'{}\'>".format(self.TEST_YOUTUBE_ID) # the video has no source listed, only a youtube link, so no fallback url will be provided sample_xml = ''.join([ video_declaration, "<transcript language='", self.TEST_LANGUAGE, "' src='german_translation.srt' /> ", "</video>" ]) self.transcript_url = "transcript_url" self.video = instantiate_descriptor(data=sample_xml) self.video.runtime.handler_url = Mock(return_value=self.transcript_url) result = self.get_result() self.verify_result_with_youtube_url(result) @ddt.data(True, False) def test_with_edx_video_id_video_associated_in_val(self, allow_cache_miss): """ Tests retrieving a video that is stored in VAL and associated with a course in VAL. """ self.video.edx_video_id = self.TEST_EDX_VIDEO_ID self.setup_val_video(associate_course_in_val=True) # the video is associated in VAL so no cache miss should ever happen but test retrieval in both contexts result = self.get_result(allow_cache_miss) self.verify_result_with_val_profile(result) @ddt.data(True, False) def test_with_edx_video_id_video_unassociated_in_val(self, allow_cache_miss): """ Tests retrieving a video that is stored in VAL but not associated with a course in VAL. """ self.video.edx_video_id = self.TEST_EDX_VIDEO_ID self.setup_val_video(associate_course_in_val=False) result = self.get_result(allow_cache_miss) if allow_cache_miss: self.verify_result_with_val_profile(result) else: self.verify_result_with_fallback_and_youtube(result) @ddt.data(True, False) def test_with_edx_video_id_video_not_in_val(self, allow_cache_miss): """ Tests retrieving a video that is not stored in VAL. """ self.video.edx_video_id = self.TEST_EDX_VIDEO_ID # The video is not in VAL so in contexts that do and don't allow cache misses we should always get a fallback result = self.get_result(allow_cache_miss) self.verify_result_with_fallback_and_youtube(result) @attr('shard_1') class VideoDescriptorTest(TestCase, VideoDescriptorTestBase): """ Tests for video descriptor that requires access to django settings. """ def setUp(self): super(VideoDescriptorTest, self).setUp() self.descriptor.runtime.handler_url = MagicMock() def test_get_context(self): """" Test get_context. This test is located here and not in xmodule.tests because get_context calls editable_metadata_fields. Which, in turn, uses settings.LANGUAGES from django setttings. """ correct_tabs = [ { 'name': "Basic", 'template': "video/transcripts.html", 'current': True }, { 'name': 'Advanced', 'template': 'tabs/metadata-edit-tab.html' } ] rendered_context = self.descriptor.get_context() self.assertListEqual(rendered_context['tabs'], correct_tabs) def test_export_val_data(self): self.descriptor.edx_video_id = 'test_edx_video_id' create_profile('mobile') create_video({ 'edx_video_id': self.descriptor.edx_video_id, 'client_video_id': 'test_client_video_id', 'duration': 111, 'status': 'dummy', 'encoded_videos': [{ 'profile': 'mobile', 'url': 'http://example.com/video', 'file_size': 222, 'bitrate': 333, }], }) actual = self.descriptor.definition_to_xml(resource_fs=None) expected_str = """ <video download_video="false" url_name="SampleProblem"> <video_asset client_video_id="test_client_video_id" duration="111.0"> <encoded_video profile="mobile" url="http://example.com/video" file_size="222" bitrate="333"/> </video_asset> </video> """ parser = etree.XMLParser(remove_blank_text=True) expected = etree.XML(expected_str, parser=parser) self.assertXmlEqual(expected, actual) def test_export_val_data_not_found(self): self.descriptor.edx_video_id = 'nonexistent' actual = self.descriptor.definition_to_xml(resource_fs=None) expected_str = """<video download_video="false" url_name="SampleProblem"/>""" parser = etree.XMLParser(remove_blank_text=True) expected = etree.XML(expected_str, parser=parser) self.assertXmlEqual(expected, actual) def test_import_val_data(self): create_profile('mobile') module_system = DummySystem(load_error_modules=True) xml_data = """ <video edx_video_id="test_edx_video_id"> <video_asset client_video_id="test_client_video_id" duration="111.0"> <encoded_video profile="mobile" url="http://example.com/video" file_size="222" bitrate="333"/> </video_asset> </video> """ id_generator = Mock() id_generator.target_course_id = "test_course_id" video = VideoDescriptor.from_xml(xml_data, module_system, id_generator) self.assertEqual(video.edx_video_id, 'test_edx_video_id') video_data = get_video_info(video.edx_video_id) self.assertEqual(video_data['client_video_id'], 'test_client_video_id') self.assertEqual(video_data['duration'], 111) self.assertEqual(video_data['status'], 'imported') self.assertEqual(video_data['courses'], [id_generator.target_course_id]) self.assertEqual(video_data['encoded_videos'][0]['profile'], 'mobile') self.assertEqual(video_data['encoded_videos'][0]['url'], 'http://example.com/video') self.assertEqual(video_data['encoded_videos'][0]['file_size'], 222) self.assertEqual(video_data['encoded_videos'][0]['bitrate'], 333) def test_import_val_data_invalid(self): create_profile('mobile') module_system = DummySystem(load_error_modules=True) # Negative file_size is invalid xml_data = """ <video edx_video_id="test_edx_video_id"> <video_asset client_video_id="test_client_video_id" duration="111.0"> <encoded_video profile="mobile" url="http://example.com/video" file_size="-222" bitrate="333"/> </video_asset> </video> """ with self.assertRaises(ValCannotCreateError): VideoDescriptor.from_xml(xml_data, module_system, id_generator=Mock()) with self.assertRaises(ValVideoNotFoundError): get_video_info("test_edx_video_id") class TestVideoWithBumper(TestVideo): """ Tests rendered content in presence of video bumper. """ CATEGORY = "video" METADATA = {} FEATURES = settings.FEATURES @patch('xmodule.video_module.bumper_utils.get_bumper_settings') def test_is_bumper_enabled(self, get_bumper_settings): """ Check that bumper is (not)shown if ENABLE_VIDEO_BUMPER is (False)True Assume that bumper settings are correct. """ self.FEATURES.update({ "SHOW_BUMPER_PERIODICITY": 1, "ENABLE_VIDEO_BUMPER": True, }) get_bumper_settings.return_value = { "video_id": "edx_video_id", "transcripts": {}, } with override_settings(FEATURES=self.FEATURES): self.assertTrue(bumper_utils.is_bumper_enabled(self.item_descriptor)) self.FEATURES.update({"ENABLE_VIDEO_BUMPER": False}) with override_settings(FEATURES=self.FEATURES): self.assertFalse(bumper_utils.is_bumper_enabled(self.item_descriptor)) @patch('xmodule.video_module.bumper_utils.is_bumper_enabled') @patch('xmodule.video_module.bumper_utils.get_bumper_settings') @patch('edxval.api.get_urls_for_profiles') def test_bumper_metadata(self, get_url_for_profiles, get_bumper_settings, is_bumper_enabled): """ Test content with rendered bumper metadata. """ get_url_for_profiles.return_value = { "desktop_mp4": "http://test_bumper.mp4", "desktop_webm": "", } get_bumper_settings.return_value = { "video_id": "edx_video_id", "transcripts": {}, } is_bumper_enabled.return_value = True content = self.item_descriptor.render(STUDENT_VIEW).content sources = [u'example.mp4', u'example.webm'] expected_context = { 'branding_info': None, 'license': None, 'bumper_metadata': json.dumps(OrderedDict({ 'saveStateUrl': self.item_descriptor.xmodule_runtime.ajax_url + '/save_user_state', "showCaptions": "true", "sources": ["http://test_bumper.mp4"], 'streams': '', "transcriptLanguage": "en", "transcriptLanguages": {"en": "English"}, "transcriptTranslationUrl": video_utils.set_query_parameter( self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'translation/__lang__' ).rstrip('/?'), 'is_bumper', 1 ), "transcriptAvailableTranslationsUrl": video_utils.set_query_parameter( self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'available_translations' ).rstrip('/?'), 'is_bumper', 1 ), })), 'cdn_eval': False, 'cdn_exp_group': None, 'display_name': u'A Name', 'download_video_link': u'example.mp4', 'handout': None, 'id': self.item_descriptor.location.html_id(), 'metadata': json.dumps(OrderedDict({ "saveStateUrl": self.item_descriptor.xmodule_runtime.ajax_url + "/save_user_state", "autoplay": False, "streams": "0.75:jNCf2gIqpeE,1.00:ZwkTiUPN0mg,1.25:rsq9auxASqI,1.50:kMyNdzVHHgg", "sub": "a_sub_file.srt.sjson", "sources": sources, "captionDataDir": None, "showCaptions": "true", "generalSpeed": 1.0, "speed": None, "savedVideoPosition": 0.0, "start": 3603.0, "end": 3610.0, "transcriptLanguage": "en", "transcriptLanguages": OrderedDict({"en": "English", "uk": u"Українська"}), "ytTestTimeout": 1500, "ytApiUrl": "https://www.youtube.com/iframe_api", "ytMetadataUrl": "https://www.googleapis.com/youtube/v3/videos/", "ytKey": None, "transcriptTranslationUrl": self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'translation/__lang__' ).rstrip('/?'), "transcriptAvailableTranslationsUrl": self.item_descriptor.xmodule_runtime.handler_url( self.item_descriptor, 'transcript', 'available_translations' ).rstrip('/?'), "autohideHtml5": False, "recordedYoutubeIsAvailable": True, })), 'track': None, 'transcript_download_format': 'srt', 'transcript_download_formats_list': [ {'display_name': 'SubRip (.srt) file', 'value': 'srt'}, {'display_name': 'Text (.txt) file', 'value': 'txt'} ], 'poster': json.dumps(OrderedDict({ "url": "http://img.youtube.com/vi/ZwkTiUPN0mg/0.jpg", "type": "youtube" })) } expected_content = self.item_descriptor.xmodule_runtime.render_template('video.html', expected_context) self.assertEqual(content, expected_content)
ayumilong/rethinkdb
refs/heads/next
external/v8_3.30.33.16/build/gyp/test/win/gyptest-link-base-address.py
137
#!/usr/bin/env python # Copyright 2013 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Make sure the base address setting is extracted properly. """ import TestGyp import re import sys if sys.platform == 'win32': test = TestGyp.TestGyp(formats=['msvs', 'ninja']) CHDIR = 'linker-flags' test.run_gyp('base-address.gyp', chdir=CHDIR) test.build('base-address.gyp', test.ALL, chdir=CHDIR) def GetHeaders(exe): full_path = test.built_file_path(exe, chdir=CHDIR) return test.run_dumpbin('/headers', full_path) # Extract the image base address from the headers output. image_base_reg_ex = re.compile('.*\s+([0-9]+) image base.*', re.DOTALL) exe_headers = GetHeaders('test_base_specified_exe.exe') exe_match = image_base_reg_ex.match(exe_headers) if not exe_match or not exe_match.group(1): test.fail_test() if exe_match.group(1) != '420000': test.fail_test() dll_headers = GetHeaders('test_base_specified_dll.dll') dll_match = image_base_reg_ex.match(dll_headers) if not dll_match or not dll_match.group(1): test.fail_test() if dll_match.group(1) != '10420000': test.fail_test() default_exe_headers = GetHeaders('test_base_default_exe.exe') default_exe_match = image_base_reg_ex.match(default_exe_headers) if not default_exe_match or not default_exe_match.group(1): test.fail_test() if default_exe_match.group(1) != '400000': test.fail_test() default_dll_headers = GetHeaders('test_base_default_dll.dll') default_dll_match = image_base_reg_ex.match(default_dll_headers) if not default_dll_match or not default_dll_match.group(1): test.fail_test() if default_dll_match.group(1) != '10000000': test.fail_test() test.pass_test()
sfaleron/ConfigNG
refs/heads/master
parser.py
1
"""Direct access is not intended; instead use the toplevel package.""" from __future__ import absolute_import from __future__ import print_function from .errors import * from .configcont import ConfigCont from minisup import do_string import os.path as osp import re # detects a declaration or assignment item_r = re.compile('([^:+=\\s][^:+=]*)(:|[+]?=)(.*)') # detects a command cmd_r = re.compile('%([\\S]*)\\s+(.*)') try: import localsettings as ls global_cfg = ls.get_global_cfg() except ImportError: global_cfg = '' class ConfigParserError(Exception): def __init__(self, *args): e, n, fn = args self.args = ('%s:%d:%s' % (fn, n, e),) class ConfigIOError(ConfigBaseError): pass def add_sign(f, sign): bits = osp.splitext(f) return '%s_%s%s' % ( bits[0], sign, bits[1] ) def findfile(f, dirs): dirs = list(dirs) if global_cfg: dirs.append(global_cfg) for i in dirs: path = osp.join(i, f) if osp.exists( path ): print(osp.normpath(path)) return path print(dirs) raise ConfigIOError('File "%s" not found in include directories!' % (f,)) # python can be a little weird about nonscalar default function parameters class ClearContext(dict): def __init__(self, fname): dict.__init__( self, { \ 'config' : ConfigCont(None), 'incdirs' : ['.'], 'fname' : fname, 'n' : 0 } ) def get_config(file_): """Takes a filename or stream; returns a configuraton container. Raises ConfigParserError if a descendent of ConfigBaseError is raised while processing.""" return config_recursable(file_, **ClearContext(file_)) def config_recursable(file_, config, incdirs, fname='stream', n=0): if type(file_) == type(''): fd = open( findfile(file_, incdirs), 'r' ) fname = file_ else: fd = file_ conts = [] while True: ln = fd.readline() # end of file if not ln: break ln = ln.strip() # end of block if ln == '}': break n += 1 if not ln: continue if ln.startswith('#'): continue try: m = cmd_r.match(ln) if m: # commands cmd, args = m.groups() if cmd == 'include': config_recursable(args, config, incdirs) elif cmd == 'dict' or cmd == 'odict': if args.endswith('{'): key = args[:-1].strip() d = config_recursable( fd, ConfigCont(config), incdirs, fname, n ) else: idx = args.find('=') if idx == -1: raise ConfigParserError('Syntax Error', n, fname) key = args[:idx].strip() d = config_recursable( args[idx+1:].strip(), ConfigCont(config), incdirs ) config.add_child(key, d) elif cmd == 'includedir': incdirs.append(args) else: raise ConfigParserError('Unrecognized command', n, fname) else: # declarations and definitions m = item_r.match(ln) if not m: raise ConfigParserError('Syntax Error', n, fname) a, op, b = [i.strip() for i in m.groups()] if op == ':': key, whatis = a, b if not has_type(whatis): raise ConfigParserError('Unsupported type "%s"' % (whatis,), n, fname) try: if is_container(whatis): config.add_container(key, whatis) conts.append(key) else: config.add_item(key, whatis) except ConfigTypeError as e: raise ConfigParserError('Unrecognized type "%s"' % (whatis,), n, fname) else: key, val = a, b if not config.has_key(key): raise ConfigParserError('Unrecognized key "%s"' % (key,), n, fname) val, inc = do_string(fd, val) n += inc if op == '=': config[key] = val else: config.add_to_container(key, val) except ConfigBaseError as e: raise ConfigParserError(e[0], n, fname) if file_ is not fd: fd.close() for cont in conts: config.finalize_container(cont) return config __all__ = ('get_config', 'ConfigParserError') # cyclical imports, so it goes at the end from .api import *
gabrielleLQX/arm-none-eabi_install
refs/heads/master
tools/perf/scripts/python/netdev-times.py
11271
# Display a process of packets and processed time. # It helps us to investigate networking or network device. # # options # tx: show only tx chart # rx: show only rx chart # dev=: show only thing related to specified device # debug: work with debug mode. It shows buffer status. import os import sys sys.path.append(os.environ['PERF_EXEC_PATH'] + \ '/scripts/python/Perf-Trace-Util/lib/Perf/Trace') from perf_trace_context import * from Core import * from Util import * all_event_list = []; # insert all tracepoint event related with this script irq_dic = {}; # key is cpu and value is a list which stacks irqs # which raise NET_RX softirq net_rx_dic = {}; # key is cpu and value include time of NET_RX softirq-entry # and a list which stacks receive receive_hunk_list = []; # a list which include a sequence of receive events rx_skb_list = []; # received packet list for matching # skb_copy_datagram_iovec buffer_budget = 65536; # the budget of rx_skb_list, tx_queue_list and # tx_xmit_list of_count_rx_skb_list = 0; # overflow count tx_queue_list = []; # list of packets which pass through dev_queue_xmit of_count_tx_queue_list = 0; # overflow count tx_xmit_list = []; # list of packets which pass through dev_hard_start_xmit of_count_tx_xmit_list = 0; # overflow count tx_free_list = []; # list of packets which is freed # options show_tx = 0; show_rx = 0; dev = 0; # store a name of device specified by option "dev=" debug = 0; # indices of event_info tuple EINFO_IDX_NAME= 0 EINFO_IDX_CONTEXT=1 EINFO_IDX_CPU= 2 EINFO_IDX_TIME= 3 EINFO_IDX_PID= 4 EINFO_IDX_COMM= 5 # Calculate a time interval(msec) from src(nsec) to dst(nsec) def diff_msec(src, dst): return (dst - src) / 1000000.0 # Display a process of transmitting a packet def print_transmit(hunk): if dev != 0 and hunk['dev'].find(dev) < 0: return print "%7s %5d %6d.%06dsec %12.3fmsec %12.3fmsec" % \ (hunk['dev'], hunk['len'], nsecs_secs(hunk['queue_t']), nsecs_nsecs(hunk['queue_t'])/1000, diff_msec(hunk['queue_t'], hunk['xmit_t']), diff_msec(hunk['xmit_t'], hunk['free_t'])) # Format for displaying rx packet processing PF_IRQ_ENTRY= " irq_entry(+%.3fmsec irq=%d:%s)" PF_SOFT_ENTRY=" softirq_entry(+%.3fmsec)" PF_NAPI_POLL= " napi_poll_exit(+%.3fmsec %s)" PF_JOINT= " |" PF_WJOINT= " | |" PF_NET_RECV= " |---netif_receive_skb(+%.3fmsec skb=%x len=%d)" PF_NET_RX= " |---netif_rx(+%.3fmsec skb=%x)" PF_CPY_DGRAM= " | skb_copy_datagram_iovec(+%.3fmsec %d:%s)" PF_KFREE_SKB= " | kfree_skb(+%.3fmsec location=%x)" PF_CONS_SKB= " | consume_skb(+%.3fmsec)" # Display a process of received packets and interrputs associated with # a NET_RX softirq def print_receive(hunk): show_hunk = 0 irq_list = hunk['irq_list'] cpu = irq_list[0]['cpu'] base_t = irq_list[0]['irq_ent_t'] # check if this hunk should be showed if dev != 0: for i in range(len(irq_list)): if irq_list[i]['name'].find(dev) >= 0: show_hunk = 1 break else: show_hunk = 1 if show_hunk == 0: return print "%d.%06dsec cpu=%d" % \ (nsecs_secs(base_t), nsecs_nsecs(base_t)/1000, cpu) for i in range(len(irq_list)): print PF_IRQ_ENTRY % \ (diff_msec(base_t, irq_list[i]['irq_ent_t']), irq_list[i]['irq'], irq_list[i]['name']) print PF_JOINT irq_event_list = irq_list[i]['event_list'] for j in range(len(irq_event_list)): irq_event = irq_event_list[j] if irq_event['event'] == 'netif_rx': print PF_NET_RX % \ (diff_msec(base_t, irq_event['time']), irq_event['skbaddr']) print PF_JOINT print PF_SOFT_ENTRY % \ diff_msec(base_t, hunk['sirq_ent_t']) print PF_JOINT event_list = hunk['event_list'] for i in range(len(event_list)): event = event_list[i] if event['event_name'] == 'napi_poll': print PF_NAPI_POLL % \ (diff_msec(base_t, event['event_t']), event['dev']) if i == len(event_list) - 1: print "" else: print PF_JOINT else: print PF_NET_RECV % \ (diff_msec(base_t, event['event_t']), event['skbaddr'], event['len']) if 'comm' in event.keys(): print PF_WJOINT print PF_CPY_DGRAM % \ (diff_msec(base_t, event['comm_t']), event['pid'], event['comm']) elif 'handle' in event.keys(): print PF_WJOINT if event['handle'] == "kfree_skb": print PF_KFREE_SKB % \ (diff_msec(base_t, event['comm_t']), event['location']) elif event['handle'] == "consume_skb": print PF_CONS_SKB % \ diff_msec(base_t, event['comm_t']) print PF_JOINT def trace_begin(): global show_tx global show_rx global dev global debug for i in range(len(sys.argv)): if i == 0: continue arg = sys.argv[i] if arg == 'tx': show_tx = 1 elif arg =='rx': show_rx = 1 elif arg.find('dev=',0, 4) >= 0: dev = arg[4:] elif arg == 'debug': debug = 1 if show_tx == 0 and show_rx == 0: show_tx = 1 show_rx = 1 def trace_end(): # order all events in time all_event_list.sort(lambda a,b :cmp(a[EINFO_IDX_TIME], b[EINFO_IDX_TIME])) # process all events for i in range(len(all_event_list)): event_info = all_event_list[i] name = event_info[EINFO_IDX_NAME] if name == 'irq__softirq_exit': handle_irq_softirq_exit(event_info) elif name == 'irq__softirq_entry': handle_irq_softirq_entry(event_info) elif name == 'irq__softirq_raise': handle_irq_softirq_raise(event_info) elif name == 'irq__irq_handler_entry': handle_irq_handler_entry(event_info) elif name == 'irq__irq_handler_exit': handle_irq_handler_exit(event_info) elif name == 'napi__napi_poll': handle_napi_poll(event_info) elif name == 'net__netif_receive_skb': handle_netif_receive_skb(event_info) elif name == 'net__netif_rx': handle_netif_rx(event_info) elif name == 'skb__skb_copy_datagram_iovec': handle_skb_copy_datagram_iovec(event_info) elif name == 'net__net_dev_queue': handle_net_dev_queue(event_info) elif name == 'net__net_dev_xmit': handle_net_dev_xmit(event_info) elif name == 'skb__kfree_skb': handle_kfree_skb(event_info) elif name == 'skb__consume_skb': handle_consume_skb(event_info) # display receive hunks if show_rx: for i in range(len(receive_hunk_list)): print_receive(receive_hunk_list[i]) # display transmit hunks if show_tx: print " dev len Qdisc " \ " netdevice free" for i in range(len(tx_free_list)): print_transmit(tx_free_list[i]) if debug: print "debug buffer status" print "----------------------------" print "xmit Qdisc:remain:%d overflow:%d" % \ (len(tx_queue_list), of_count_tx_queue_list) print "xmit netdevice:remain:%d overflow:%d" % \ (len(tx_xmit_list), of_count_tx_xmit_list) print "receive:remain:%d overflow:%d" % \ (len(rx_skb_list), of_count_rx_skb_list) # called from perf, when it finds a correspoinding event def irq__softirq_entry(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_exit(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__softirq_raise(name, context, cpu, sec, nsec, pid, comm, vec): if symbol_str("irq__softirq_entry", "vec", vec) != "NET_RX": return event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, vec) all_event_list.append(event_info) def irq__irq_handler_entry(name, context, cpu, sec, nsec, pid, comm, irq, irq_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, irq_name) all_event_list.append(event_info) def irq__irq_handler_exit(name, context, cpu, sec, nsec, pid, comm, irq, ret): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, irq, ret) all_event_list.append(event_info) def napi__napi_poll(name, context, cpu, sec, nsec, pid, comm, napi, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, napi, dev_name) all_event_list.append(event_info) def net__netif_receive_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__netif_rx(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_queue(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, dev_name) all_event_list.append(event_info) def net__net_dev_xmit(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen, rc, dev_name): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen, rc ,dev_name) all_event_list.append(event_info) def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr, protocol, location): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, protocol, location) all_event_list.append(event_info) def skb__consume_skb(name, context, cpu, sec, nsec, pid, comm, skbaddr): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr) all_event_list.append(event_info) def skb__skb_copy_datagram_iovec(name, context, cpu, sec, nsec, pid, comm, skbaddr, skblen): event_info = (name, context, cpu, nsecs(sec, nsec), pid, comm, skbaddr, skblen) all_event_list.append(event_info) def handle_irq_handler_entry(event_info): (name, context, cpu, time, pid, comm, irq, irq_name) = event_info if cpu not in irq_dic.keys(): irq_dic[cpu] = [] irq_record = {'irq':irq, 'name':irq_name, 'cpu':cpu, 'irq_ent_t':time} irq_dic[cpu].append(irq_record) def handle_irq_handler_exit(event_info): (name, context, cpu, time, pid, comm, irq, ret) = event_info if cpu not in irq_dic.keys(): return irq_record = irq_dic[cpu].pop() if irq != irq_record['irq']: return irq_record.update({'irq_ext_t':time}) # if an irq doesn't include NET_RX softirq, drop. if 'event_list' in irq_record.keys(): irq_dic[cpu].append(irq_record) def handle_irq_softirq_raise(event_info): (name, context, cpu, time, pid, comm, vec) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'sirq_raise'}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_irq_softirq_entry(event_info): (name, context, cpu, time, pid, comm, vec) = event_info net_rx_dic[cpu] = {'sirq_ent_t':time, 'event_list':[]} def handle_irq_softirq_exit(event_info): (name, context, cpu, time, pid, comm, vec) = event_info irq_list = [] event_list = 0 if cpu in irq_dic.keys(): irq_list = irq_dic[cpu] del irq_dic[cpu] if cpu in net_rx_dic.keys(): sirq_ent_t = net_rx_dic[cpu]['sirq_ent_t'] event_list = net_rx_dic[cpu]['event_list'] del net_rx_dic[cpu] if irq_list == [] or event_list == 0: return rec_data = {'sirq_ent_t':sirq_ent_t, 'sirq_ext_t':time, 'irq_list':irq_list, 'event_list':event_list} # merge information realted to a NET_RX softirq receive_hunk_list.append(rec_data) def handle_napi_poll(event_info): (name, context, cpu, time, pid, comm, napi, dev_name) = event_info if cpu in net_rx_dic.keys(): event_list = net_rx_dic[cpu]['event_list'] rec_data = {'event_name':'napi_poll', 'dev':dev_name, 'event_t':time} event_list.append(rec_data) def handle_netif_rx(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu not in irq_dic.keys() \ or len(irq_dic[cpu]) == 0: return irq_record = irq_dic[cpu].pop() if 'event_list' in irq_record.keys(): irq_event_list = irq_record['event_list'] else: irq_event_list = [] irq_event_list.append({'time':time, 'event':'netif_rx', 'skbaddr':skbaddr, 'skblen':skblen, 'dev_name':dev_name}) irq_record.update({'event_list':irq_event_list}) irq_dic[cpu].append(irq_record) def handle_netif_receive_skb(event_info): global of_count_rx_skb_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info if cpu in net_rx_dic.keys(): rec_data = {'event_name':'netif_receive_skb', 'event_t':time, 'skbaddr':skbaddr, 'len':skblen} event_list = net_rx_dic[cpu]['event_list'] event_list.append(rec_data) rx_skb_list.insert(0, rec_data) if len(rx_skb_list) > buffer_budget: rx_skb_list.pop() of_count_rx_skb_list += 1 def handle_net_dev_queue(event_info): global of_count_tx_queue_list (name, context, cpu, time, pid, comm, skbaddr, skblen, dev_name) = event_info skb = {'dev':dev_name, 'skbaddr':skbaddr, 'len':skblen, 'queue_t':time} tx_queue_list.insert(0, skb) if len(tx_queue_list) > buffer_budget: tx_queue_list.pop() of_count_tx_queue_list += 1 def handle_net_dev_xmit(event_info): global of_count_tx_xmit_list (name, context, cpu, time, pid, comm, skbaddr, skblen, rc, dev_name) = event_info if rc == 0: # NETDEV_TX_OK for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: skb['xmit_t'] = time tx_xmit_list.insert(0, skb) del tx_queue_list[i] if len(tx_xmit_list) > buffer_budget: tx_xmit_list.pop() of_count_tx_xmit_list += 1 return def handle_kfree_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr, protocol, location) = event_info for i in range(len(tx_queue_list)): skb = tx_queue_list[i] if skb['skbaddr'] == skbaddr: del tx_queue_list[i] return for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if rec_data['skbaddr'] == skbaddr: rec_data.update({'handle':"kfree_skb", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return def handle_consume_skb(event_info): (name, context, cpu, time, pid, comm, skbaddr) = event_info for i in range(len(tx_xmit_list)): skb = tx_xmit_list[i] if skb['skbaddr'] == skbaddr: skb['free_t'] = time tx_free_list.append(skb) del tx_xmit_list[i] return def handle_skb_copy_datagram_iovec(event_info): (name, context, cpu, time, pid, comm, skbaddr, skblen) = event_info for i in range(len(rx_skb_list)): rec_data = rx_skb_list[i] if skbaddr == rec_data['skbaddr']: rec_data.update({'handle':"skb_copy_datagram_iovec", 'comm':comm, 'pid':pid, 'comm_t':time}) del rx_skb_list[i] return
pandeyop/rally
refs/heads/master
rally/cmd/commands/verify.py
2
# Copyright 2014: Mirantis Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Rally command: verify """ import csv import json import os import six from rally import api from rally.cmd import cliutils from rally.cmd import envutils from rally.common import fileutils from rally.common.i18n import _ from rally import consts from rally import db from rally import exceptions from rally import objects from rally.verification.tempest import diff from rally.verification.tempest import json2html class VerifyCommands(object): """Test cloud with Tempest Set of commands that allow you to perform Tempest tests of OpenStack live cloud. """ @cliutils.deprecated_args( "--deploy-id", dest="deployment", type=str, required=False, help="UUID of the deployment.") @cliutils.args("--deployment", dest="deployment", type=str, required=False, help="UUID or name of a deployment.") @cliutils.args("--set", dest="set_name", type=str, required=False, help="Name of tempest test set. Available sets: %s" % ", ". join(list(consts.TempestTestsSets) + list(consts.TempestTestsAPI))) @cliutils.args("--regex", dest="regex", type=str, required=False, help="Regular expression of test.") @cliutils.args("--tempest-config", dest="tempest_config", type=str, required=False, help="User specified Tempest config file location") @cliutils.args("--no-use", action="store_false", dest="do_use", help="Don't set new task as default for future operations") @envutils.with_default_deployment(cli_arg_name="deployment") def start(self, set_name="", deployment=None, regex=None, tempest_config=None, do_use=False): """Start set of tests. :param set_name: Name of tempest test set :param deployment: UUID or name of a deployment :param regex: Regular expression of test :param tempest_config: User specified Tempest config file location """ if regex and set_name: raise exceptions.InvalidArgumentsException("set_name and regex " "are not compatible") if not (regex or set_name): set_name = "full" if set_name and set_name not in (list(consts.TempestTestsSets) + list(consts.TempestTestsAPI)): print("Sorry, but there are no desired tempest test set. Please " "choose from: %s" % ", ".join(list(consts.TempestTestsSets) + list(consts.TempestTestsAPI))) return (1) verification = api.Verification.verify(deployment, set_name, regex, tempest_config) if do_use: self.use(verification["uuid"]) def list(self): """Display all verifications table, started and finished.""" fields = ["UUID", "Deployment UUID", "Set name", "Tests", "Failures", "Created at", "Duration", "Status"] verifications = db.verification_list() for el in verifications: el["duration"] = el["updated_at"] - el["created_at"] if verifications: cliutils.print_list(verifications, fields, sortby_index=fields.index("Created at")) else: print(_("There are no results from verifier. To run a verifier, " "use:\nrally verify start")) @cliutils.args("--uuid", type=str, dest="verification_uuid", help="UUID of the verification") @cliutils.args("--html", action="store_true", dest="output_html", help=("Results will be in html format")) @cliutils.args("--json", action="store_true", dest="output_json", help=("Results will be in json format")) @cliutils.args("--output-file", type=str, required=False, dest="output_file", help="If specified, output will be saved to given file") @envutils.with_default_verification_id @cliutils.suppress_warnings def results(self, verification_uuid=None, output_file=None, output_html=None, output_json=None): """Get raw results of the verification. :param verification_uuid: Verification UUID :param output_file: If specified, output will be saved to given file :param output_html: The output will be in HTML format :param output_json: The output will be in JSON format (Default) """ try: results = db.verification_result_get(verification_uuid)["data"] except exceptions.NotFoundException as e: print(six.text_type(e)) return 1 result = "" if output_json + output_html > 1: print("Please specify only one output format.") elif output_html: result = json2html.HtmlOutput(results).create_report() else: result = json.dumps(results, sort_keys=True, indent=4) if output_file: output_file = os.path.expanduser(output_file) with open(output_file, "wb") as f: f.write(result) else: print(result) @cliutils.args("--uuid", dest="verification_uuid", type=str, required=False, help="UUID of a verification") @cliutils.args("--sort-by", dest="sort_by", type=str, required=False, help="Tests can be sorted by 'name' or 'duration'") @cliutils.args("--detailed", dest="detailed", action="store_true", required=False, help="Prints traceback of failed tests") @envutils.with_default_verification_id def show(self, verification_uuid=None, sort_by="name", detailed=False): """Display results table of the verification.""" try: sortby_index = ("name", "duration").index(sort_by) except ValueError: print("Sorry, but verification results can't be sorted " "by '%s'." % sort_by) return 1 try: verification = db.verification_get(verification_uuid) tests = db.verification_result_get(verification_uuid) except exceptions.NotFoundException as e: print(six.text_type(e)) return 1 print ("Total results of verification:\n") total_fields = ["UUID", "Deployment UUID", "Set name", "Tests", "Failures", "Created at", "Status"] cliutils.print_list([verification], fields=total_fields) print ("\nTests:\n") fields = ["name", "time", "status"] values = [objects.Verification(test) for test in six.itervalues(tests.data["test_cases"])] cliutils.print_list(values, fields, sortby_index=sortby_index) if detailed: for test in six.itervalues(tests.data["test_cases"]): if test["status"] == "FAIL": header = cliutils.make_header( "FAIL: %(name)s\n" "Time: %(time)s\n" "Type: %(type)s" % {"name": test["name"], "time": test["time"], "type": test["failure"]["type"]}) formatted_test = "%(header)s%(log)s\n" % { "header": header, "log": test["failure"]["log"]} print (formatted_test) @cliutils.args("--uuid", dest="verification_uuid", type=str, required=False, help="UUID of a verification") @cliutils.args("--sort-by", dest="sort_by", type=str, required=False, help="Tests can be sorted by 'name' or 'duration'") @envutils.with_default_verification_id def detailed(self, verification_uuid=None, sort_by="name"): """Display results table of verification with detailed errors.""" self.show(verification_uuid, sort_by, True) @cliutils.args("--uuid-1", type=str, dest="uuid1", help="UUID of the first verification") @cliutils.args("--uuid-2", type=str, dest="uuid2", help="UUID of the second verification") @cliutils.args("--csv", action="store_true", dest="output_csv", help=("Save results in csv format to specified file")) @cliutils.args("--html", action="store_true", dest="output_html", help=("Save results in html format to specified file")) @cliutils.args("--json", action="store_true", dest="output_json", help=("Save results in json format to specified file")) @cliutils.args("--output-file", type=str, required=False, dest="output_file", help="If specified, output will be saved to given file") @cliutils.args("--threshold", type=int, required=False, dest="threshold", default=0, help="If specified, timing differences must exceed this " "percentage threshold to be included in output") def compare(self, uuid1=None, uuid2=None, output_file=None, output_csv=None, output_html=None, output_json=None, threshold=0): """Compare two verification results. :param uuid1: First Verification UUID :param uuid2: Second Verification UUID :param output_file: If specified, output will be saved to given file :param output_csv: Save results in csv format to the specified file :param output_html: Save results in html format to the specified file :param output_json: Save results in json format to the specified file (Default) :param threshold: Timing difference threshold percentage """ try: results1 = db.verification_result_get(uuid1)["data"]["test_cases"] results2 = db.verification_result_get(uuid2)["data"]["test_cases"] _diff = diff.Diff(results1, results2, threshold) except exceptions.NotFoundException as e: print(six.text_type(e)) return 1 result = "" if output_json + output_html + output_csv > 1: print("Please specify only one output format, either --json, " "--html or --csv.") return 1 elif output_html: result = _diff.to_html() elif output_csv: result = _diff.to_csv() else: result = _diff.to_json() if output_file: with open(output_file, "wb") as f: if output_csv: writer = csv.writer(f, dialect="excel") writer.writerows(result) else: f.write(result) else: print(result) @cliutils.args("--verification", type=str, dest="verification", required=False, help="UUID of the verification") def use(self, verification): """Set active verification. Alias for "rally use verification" :param verification: a UUID of verification """ print("Verification UUID: %s" % verification) db.verification_get(verification) fileutils.update_globals_file("RALLY_VERIFICATION", verification)
milinbhakta/flaskjinja
refs/heads/master
flask1/Lib/collections/abc.py
274
from _collections_abc import * from _collections_abc import __all__
sanyaade-iot/Arduino-1
refs/heads/esp8266
arduino-core/src/processing/app/i18n/python/requests/packages/urllib3/connectionpool.py
184
# urllib3/connectionpool.py # Copyright 2008-2012 Andrey Petrov and contributors (see CONTRIBUTORS.txt) # # This module is part of urllib3 and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php import logging import socket import errno from socket import error as SocketError, timeout as SocketTimeout from .util import resolve_cert_reqs, resolve_ssl_version try: # Python 3 from http.client import HTTPConnection, HTTPException from http.client import HTTP_PORT, HTTPS_PORT except ImportError: from httplib import HTTPConnection, HTTPException from httplib import HTTP_PORT, HTTPS_PORT try: # Python 3 from queue import LifoQueue, Empty, Full except ImportError: from Queue import LifoQueue, Empty, Full try: # Compiled with SSL? HTTPSConnection = object BaseSSLError = None ssl = None try: # Python 3 from http.client import HTTPSConnection except ImportError: from httplib import HTTPSConnection import ssl BaseSSLError = ssl.SSLError except (ImportError, AttributeError): # Platform-specific: No SSL. pass from .request import RequestMethods from .response import HTTPResponse from .util import get_host, is_connection_dropped, ssl_wrap_socket from .exceptions import ( ClosedPoolError, EmptyPoolError, HostChangedError, MaxRetryError, SSLError, TimeoutError, ) from .packages.ssl_match_hostname import match_hostname, CertificateError from .packages import six xrange = six.moves.xrange log = logging.getLogger(__name__) _Default = object() port_by_scheme = { 'http': HTTP_PORT, 'https': HTTPS_PORT, } ## Connection objects (extension of httplib) class VerifiedHTTPSConnection(HTTPSConnection): """ Based on httplib.HTTPSConnection but wraps the socket with SSL certification. """ cert_reqs = None ca_certs = None ssl_version = None def set_cert(self, key_file=None, cert_file=None, cert_reqs=None, ca_certs=None): self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs self.ca_certs = ca_certs def connect(self): # Add certificate verification sock = socket.create_connection((self.host, self.port), self.timeout) resolved_cert_reqs = resolve_cert_reqs(self.cert_reqs) resolved_ssl_version = resolve_ssl_version(self.ssl_version) # Wrap socket using verification with the root certs in # trusted_root_certs self.sock = ssl_wrap_socket(sock, self.key_file, self.cert_file, cert_reqs=resolved_cert_reqs, ca_certs=self.ca_certs, server_hostname=self.host, ssl_version=resolved_ssl_version) if resolved_cert_reqs != ssl.CERT_NONE: match_hostname(self.sock.getpeercert(), self.host) ## Pool objects class ConnectionPool(object): """ Base class for all connection pools, such as :class:`.HTTPConnectionPool` and :class:`.HTTPSConnectionPool`. """ scheme = None QueueCls = LifoQueue def __init__(self, host, port=None): self.host = host self.port = port def __str__(self): return '%s(host=%r, port=%r)' % (type(self).__name__, self.host, self.port) class HTTPConnectionPool(ConnectionPool, RequestMethods): """ Thread-safe connection pool for one host. :param host: Host used for this HTTP Connection (e.g. "localhost"), passed into :class:`httplib.HTTPConnection`. :param port: Port used for this HTTP Connection (None is equivalent to 80), passed into :class:`httplib.HTTPConnection`. :param strict: Causes BadStatusLine to be raised if the status line can't be parsed as a valid HTTP/1.0 or 1.1 status line, passed into :class:`httplib.HTTPConnection`. :param timeout: Socket timeout for each individual connection, can be a float. None disables timeout. :param maxsize: Number of connections to save that can be reused. More than 1 is useful in multithreaded situations. If ``block`` is set to false, more connections will be created but they will not be saved once they've been used. :param block: If set to True, no more than ``maxsize`` connections will be used at a time. When no free connections are available, the call will block until a connection has been released. This is a useful side effect for particular multithreaded situations where one does not want to use more than maxsize connections per host to prevent flooding. :param headers: Headers to include with all requests, unless other headers are given explicitly. """ scheme = 'http' def __init__(self, host, port=None, strict=False, timeout=None, maxsize=1, block=False, headers=None): ConnectionPool.__init__(self, host, port) RequestMethods.__init__(self, headers) self.strict = strict self.timeout = timeout self.pool = self.QueueCls(maxsize) self.block = block # Fill the queue up so that doing get() on it will block properly for _ in xrange(maxsize): self.pool.put(None) # These are mostly for testing and debugging purposes. self.num_connections = 0 self.num_requests = 0 def _new_conn(self): """ Return a fresh :class:`httplib.HTTPConnection`. """ self.num_connections += 1 log.info("Starting new HTTP connection (%d): %s" % (self.num_connections, self.host)) return HTTPConnection(host=self.host, port=self.port, strict=self.strict) def _get_conn(self, timeout=None): """ Get a connection. Will return a pooled connection if one is available. If no connections are available and :prop:`.block` is ``False``, then a fresh connection is returned. :param timeout: Seconds to wait before giving up and raising :class:`urllib3.exceptions.EmptyPoolError` if the pool is empty and :prop:`.block` is ``True``. """ conn = None try: conn = self.pool.get(block=self.block, timeout=timeout) except AttributeError: # self.pool is None raise ClosedPoolError(self, "Pool is closed.") except Empty: if self.block: raise EmptyPoolError(self, "Pool reached maximum size and no more " "connections are allowed.") pass # Oh well, we'll create a new connection then # If this is a persistent connection, check if it got disconnected if conn and is_connection_dropped(conn): log.info("Resetting dropped connection: %s" % self.host) conn.close() return conn or self._new_conn() def _put_conn(self, conn): """ Put a connection back into the pool. :param conn: Connection object for the current host and port as returned by :meth:`._new_conn` or :meth:`._get_conn`. If the pool is already full, the connection is closed and discarded because we exceeded maxsize. If connections are discarded frequently, then maxsize should be increased. If the pool is closed, then the connection will be closed and discarded. """ try: self.pool.put(conn, block=False) return # Everything is dandy, done. except AttributeError: # self.pool is None. pass except Full: # This should never happen if self.block == True log.warning("HttpConnectionPool is full, discarding connection: %s" % self.host) # Connection never got put back into the pool, close it. conn.close() def _make_request(self, conn, method, url, timeout=_Default, **httplib_request_kw): """ Perform a request on a given httplib connection object taken from our pool. """ self.num_requests += 1 if timeout is _Default: timeout = self.timeout conn.timeout = timeout # This only does anything in Py26+ conn.request(method, url, **httplib_request_kw) # Set timeout sock = getattr(conn, 'sock', False) # AppEngine doesn't have sock attr. if sock: sock.settimeout(timeout) try: # Python 2.7+, use buffering of HTTP responses httplib_response = conn.getresponse(buffering=True) except TypeError: # Python 2.6 and older httplib_response = conn.getresponse() # AppEngine doesn't have a version attr. http_version = getattr(conn, '_http_vsn_str', 'HTTP/?') log.debug("\"%s %s %s\" %s %s" % (method, url, http_version, httplib_response.status, httplib_response.length)) return httplib_response def close(self): """ Close all pooled connections and disable the pool. """ # Disable access to the pool old_pool, self.pool = self.pool, None try: while True: conn = old_pool.get(block=False) if conn: conn.close() except Empty: pass # Done. def is_same_host(self, url): """ Check if the given ``url`` is a member of the same host as this connection pool. """ if url.startswith('/'): return True # TODO: Add optional support for socket.gethostbyname checking. scheme, host, port = get_host(url) if self.port and not port: # Use explicit default port for comparison when none is given. port = port_by_scheme.get(scheme) return (scheme, host, port) == (self.scheme, self.host, self.port) def urlopen(self, method, url, body=None, headers=None, retries=3, redirect=True, assert_same_host=True, timeout=_Default, pool_timeout=None, release_conn=None, **response_kw): """ Get a connection from the pool and perform an HTTP request. This is the lowest level call for making a request, so you'll need to specify all the raw details. .. note:: More commonly, it's appropriate to use a convenience method provided by :class:`.RequestMethods`, such as :meth:`request`. .. note:: `release_conn` will only behave as expected if `preload_content=False` because we want to make `preload_content=False` the default behaviour someday soon without breaking backwards compatibility. :param method: HTTP request method (such as GET, POST, PUT, etc.) :param body: Data to send in the request body (useful for creating POST requests, see HTTPConnectionPool.post_url for more convenience). :param headers: Dictionary of custom headers to send, such as User-Agent, If-None-Match, etc. If None, pool headers are used. If provided, these headers completely replace any pool-specific headers. :param retries: Number of retries to allow before raising a MaxRetryError exception. :param redirect: If True, automatically handle redirects (status codes 301, 302, 303, 307). Each redirect counts as a retry. :param assert_same_host: If ``True``, will make sure that the host of the pool requests is consistent else will raise HostChangedError. When False, you can use the pool on an HTTP proxy and request foreign hosts. :param timeout: If specified, overrides the default timeout for this one request. :param pool_timeout: If set and the pool is set to block=True, then this method will block for ``pool_timeout`` seconds and raise EmptyPoolError if no connection is available within the time period. :param release_conn: If False, then the urlopen call will not release the connection back into the pool once a response is received (but will release if you read the entire contents of the response such as when `preload_content=True`). This is useful if you're not preloading the response's content immediately. You will need to call ``r.release_conn()`` on the response ``r`` to return the connection back into the pool. If None, it takes the value of ``response_kw.get('preload_content', True)``. :param \**response_kw: Additional parameters are passed to :meth:`urllib3.response.HTTPResponse.from_httplib` """ if headers is None: headers = self.headers if retries < 0: raise MaxRetryError(self, url) if timeout is _Default: timeout = self.timeout if release_conn is None: release_conn = response_kw.get('preload_content', True) # Check host if assert_same_host and not self.is_same_host(url): host = "%s://%s" % (self.scheme, self.host) if self.port: host = "%s:%d" % (host, self.port) raise HostChangedError(self, url, retries - 1) conn = None try: # Request a connection from the queue conn = self._get_conn(timeout=pool_timeout) # Make the request on the httplib connection object httplib_response = self._make_request(conn, method, url, timeout=timeout, body=body, headers=headers) # If we're going to release the connection in ``finally:``, then # the request doesn't need to know about the connection. Otherwise # it will also try to release it and we'll have a double-release # mess. response_conn = not release_conn and conn # Import httplib's response into our own wrapper object response = HTTPResponse.from_httplib(httplib_response, pool=self, connection=response_conn, **response_kw) # else: # The connection will be put back into the pool when # ``response.release_conn()`` is called (implicitly by # ``response.read()``) except Empty as e: # Timed out by queue raise TimeoutError(self, "Request timed out. (pool_timeout=%s)" % pool_timeout) except SocketTimeout as e: # Timed out by socket raise TimeoutError(self, "Request timed out. (timeout=%s)" % timeout) except BaseSSLError as e: # SSL certificate error raise SSLError(e) except CertificateError as e: # Name mismatch raise SSLError(e) except (HTTPException, SocketError) as e: # Connection broken, discard. It will be replaced next _get_conn(). conn = None # This is necessary so we can access e below err = e if retries == 0: raise MaxRetryError(self, url, e) finally: if release_conn: # Put the connection back to be reused. If the connection is # expired then it will be None, which will get replaced with a # fresh connection during _get_conn. self._put_conn(conn) if not conn: # Try again log.warn("Retrying (%d attempts remain) after connection " "broken by '%r': %s" % (retries, err, url)) return self.urlopen(method, url, body, headers, retries - 1, redirect, assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, **response_kw) # Handle redirect? redirect_location = redirect and response.get_redirect_location() if redirect_location: if response.status == 303: method = 'GET' log.info("Redirecting %s -> %s" % (url, redirect_location)) return self.urlopen(method, redirect_location, body, headers, retries - 1, redirect, assert_same_host, timeout=timeout, pool_timeout=pool_timeout, release_conn=release_conn, **response_kw) return response class HTTPSConnectionPool(HTTPConnectionPool): """ Same as :class:`.HTTPConnectionPool`, but HTTPS. When Python is compiled with the :mod:`ssl` module, then :class:`.VerifiedHTTPSConnection` is used, which *can* verify certificates, instead of :class:`httplib.HTTPSConnection`. The ``key_file``, ``cert_file``, ``cert_reqs``, ``ca_certs``, and ``ssl_version`` are only used if :mod:`ssl` is available and are fed into :meth:`urllib3.util.ssl_wrap_socket` to upgrade the connection socket into an SSL socket. """ scheme = 'https' def __init__(self, host, port=None, strict=False, timeout=None, maxsize=1, block=False, headers=None, key_file=None, cert_file=None, cert_reqs=None, ca_certs=None, ssl_version=None): HTTPConnectionPool.__init__(self, host, port, strict, timeout, maxsize, block, headers) self.key_file = key_file self.cert_file = cert_file self.cert_reqs = cert_reqs self.ca_certs = ca_certs self.ssl_version = ssl_version def _new_conn(self): """ Return a fresh :class:`httplib.HTTPSConnection`. """ self.num_connections += 1 log.info("Starting new HTTPS connection (%d): %s" % (self.num_connections, self.host)) if not ssl: # Platform-specific: Python compiled without +ssl if not HTTPSConnection or HTTPSConnection is object: raise SSLError("Can't connect to HTTPS URL because the SSL " "module is not available.") return HTTPSConnection(host=self.host, port=self.port, strict=self.strict) connection = VerifiedHTTPSConnection(host=self.host, port=self.port, strict=self.strict) connection.set_cert(key_file=self.key_file, cert_file=self.cert_file, cert_reqs=self.cert_reqs, ca_certs=self.ca_certs) connection.ssl_version = self.ssl_version return connection def connection_from_url(url, **kw): """ Given a url, return an :class:`.ConnectionPool` instance of its host. This is a shortcut for not having to parse out the scheme, host, and port of the url before creating an :class:`.ConnectionPool` instance. :param url: Absolute URL string that must include the scheme. Port is optional. :param \**kw: Passes additional parameters to the constructor of the appropriate :class:`.ConnectionPool`. Useful for specifying things like timeout, maxsize, headers, etc. Example: :: >>> conn = connection_from_url('http://google.com/') >>> r = conn.request('GET', '/') """ scheme, host, port = get_host(url) if scheme == 'https': return HTTPSConnectionPool(host, port=port, **kw) else: return HTTPConnectionPool(host, port=port, **kw)
girving/tensorflow
refs/heads/master
tensorflow/contrib/linear_optimizer/python/ops/sparse_feature_column.py
119
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Sparse feature column.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.framework import dtypes from tensorflow.python.framework.ops import internal_convert_to_tensor from tensorflow.python.framework.ops import name_scope class SparseFeatureColumn(object): """Represents a sparse feature column. Contains three tensors representing a sparse feature column, they are example indices (`int64`), feature indices (`int64`), and feature values (`float`). Feature weights are optional, and are treated as `1.0f` if missing. For example, consider a batch of 4 examples, which contains the following features in a particular `SparseFeatureColumn`: * Example 0: feature 5, value 1 * Example 1: feature 6, value 1 and feature 10, value 0.5 * Example 2: no features * Example 3: two copies of feature 2, value 1 This SparseFeatureColumn will be represented as follows: ``` <0, 5, 1> <1, 6, 1> <1, 10, 0.5> <3, 2, 1> <3, 2, 1> ``` For a batch of 2 examples below: * Example 0: feature 5 * Example 1: feature 6 is represented by `SparseFeatureColumn` as: ``` <0, 5, 1> <1, 6, 1> ``` @@__init__ @@example_indices @@feature_indices @@feature_values """ def __init__(self, example_indices, feature_indices, feature_values): """Creates a `SparseFeatureColumn` representation. Args: example_indices: A 1-D int64 tensor of shape `[N]`. Also, accepts python lists, or numpy arrays. feature_indices: A 1-D int64 tensor of shape `[N]`. Also, accepts python lists, or numpy arrays. feature_values: An optional 1-D tensor float tensor of shape `[N]`. Also, accepts python lists, or numpy arrays. Returns: A `SparseFeatureColumn` """ with name_scope(None, 'SparseFeatureColumn', [example_indices, feature_indices]): self._example_indices = internal_convert_to_tensor( example_indices, name='example_indices', dtype=dtypes.int64) self._feature_indices = internal_convert_to_tensor( feature_indices, name='feature_indices', dtype=dtypes.int64) self._feature_values = None if feature_values is not None: with name_scope(None, 'SparseFeatureColumn', [feature_values]): self._feature_values = internal_convert_to_tensor( feature_values, name='feature_values', dtype=dtypes.float32) @property def example_indices(self): """The example indices represented as a dense tensor. Returns: A 1-D Tensor of int64 with shape `[N]`. """ return self._example_indices @property def feature_indices(self): """The feature indices represented as a dense tensor. Returns: A 1-D Tensor of int64 with shape `[N]`. """ return self._feature_indices @property def feature_values(self): """The feature values represented as a dense tensor. Returns: May return None, or a 1-D Tensor of float32 with shape `[N]`. """ return self._feature_values
crowning-/dash
refs/heads/master
qa/rpc-tests/mempool_resurrect_test.py
2
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test resurrection of mined transactions when # the blockchain is re-organized. # from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * # Create one-input, one-output, no-fee transaction: class MempoolCoinbaseTest(BitcoinTestFramework): def __init__(self): super().__init__() self.num_nodes = 1 self.setup_clean_chain = False def setup_network(self): # Just need one node for this test args = ["-checkmempool", "-debug=mempool"] self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, args)) self.is_network_split = False def run_test(self): node0_address = self.nodes[0].getnewaddress() # Spend block 1/2/3's coinbase transactions # Mine a block. # Create three more transactions, spending the spends # Mine another block. # ... make sure all the transactions are confirmed # Invalidate both blocks # ... make sure all the transactions are put back in the mempool # Mine a new block # ... make sure all the transactions are confirmed again. b = [ self.nodes[0].getblockhash(n) for n in range(1, 4) ] coinbase_txids = [ self.nodes[0].getblock(h)['tx'][0] for h in b ] spends1_raw = [ create_tx(self.nodes[0], txid, node0_address, 500) for txid in coinbase_txids ] spends1_id = [ self.nodes[0].sendrawtransaction(tx, False, False, True) for tx in spends1_raw ] blocks = [] blocks.extend(self.nodes[0].generate(1)) spends2_raw = [ create_tx(self.nodes[0], txid, node0_address, 499.99) for txid in spends1_id ] spends2_id = [ self.nodes[0].sendrawtransaction(tx, False, False, True) for tx in spends2_raw ] blocks.extend(self.nodes[0].generate(1)) # mempool should be empty, all txns confirmed assert_equal(set(self.nodes[0].getrawmempool()), set()) for txid in spends1_id+spends2_id: tx = self.nodes[0].gettransaction(txid) assert(tx["confirmations"] > 0) # Use invalidateblock to re-org back; all transactions should # end up unconfirmed and back in the mempool for node in self.nodes: node.invalidateblock(blocks[0]) # mempool should be empty, all txns confirmed assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id)) for txid in spends1_id+spends2_id: tx = self.nodes[0].gettransaction(txid) assert(tx["confirmations"] == 0) # Generate another block, they should all get mined self.nodes[0].generate(1) # mempool should be empty, all txns confirmed assert_equal(set(self.nodes[0].getrawmempool()), set()) for txid in spends1_id+spends2_id: tx = self.nodes[0].gettransaction(txid) assert(tx["confirmations"] > 0) if __name__ == '__main__': MempoolCoinbaseTest().main()
dmsurti/mayavi
refs/heads/master
examples/mayavi/interactive/mayavi_traits_ui.py
6
#!/usr/bin/env python """ An example of how to create a UI similar to the complete Mayavi application inside a Traits UI view. This does not use Envisage and provides a similar UI as seen in the full Mayavi application. This example uses `traitsUI <http://code.enthought.com/projects/traits/>`_ to create a dialog mimicking the mayavi2 application: a scene on the right, and on the left a pipeline tree view, and below it a panel to edit the currently-selected object. """ # Authors: Prabhu Ramachandran <prabhu [at] aero.iitb.ac.in> # Copyright (c) 2007, Enthought, Inc. # License: BSD Style. # Standard imports. from numpy import sqrt, sin, mgrid # Enthought imports. from traits.api import HasTraits, Instance, Property, Enum from traitsui.api import View, Item, HSplit, VSplit, InstanceEditor from tvtk.pyface.scene_editor import SceneEditor from mayavi.core.ui.engine_view import EngineView from mayavi.tools.mlab_scene_model import MlabSceneModel ###################################################################### class Mayavi(HasTraits): # The scene model. scene = Instance(MlabSceneModel, ()) # The mayavi engine view. engine_view = Instance(EngineView) # The current selection in the engine tree view. current_selection = Property ###################### view = View(HSplit(VSplit(Item(name='engine_view', style='custom', resizable=True, show_label=False ), Item(name='current_selection', editor=InstanceEditor(), enabled_when='current_selection is not None', style='custom', springy=True, show_label=False), ), Item(name='scene', editor=SceneEditor(), show_label=False, resizable=True, height=500, width=500), ), resizable=True, scrollable=True ) def __init__(self, **traits): HasTraits.__init__(self, **traits) self.engine_view = EngineView(engine=self.scene.engine) # Hook up the current_selection to change when the one in the engine # changes. This is probably unnecessary in Traits3 since you can show # the UI of a sub-object in T3. self.scene.engine.on_trait_change(self._selection_change, 'current_selection') self.generate_data_mayavi() def generate_data_mayavi(self): """Shows how you can generate data using mayavi instead of mlab.""" from mayavi.sources.api import ParametricSurface from mayavi.modules.api import Outline, Surface e = self.scene.engine s = ParametricSurface() e.add_source(s) e.add_module(Outline()) e.add_module(Surface()) def _selection_change(self, old, new): self.trait_property_changed('current_selection', old, new) def _get_current_selection(self): return self.scene.engine.current_selection if __name__ == '__main__': m = Mayavi() m.configure_traits()
cloudControl/libcloud
refs/heads/trunk
docs/examples/loadbalancer/elb/ex_create_balancer_listeners.py
51
from libcloud.compute.types import Provider from libcloud.compute.providers import get_driver ACCESS_ID = 'your access id' SECRET_KEY = 'your secret key' cls = get_driver(Provider.ELB) driver = cls(key=ACCESS_ID, secret=SECRET_KEY) driver.ex_create_balancer_listeners( name='MyLB', listeners=[[1024, 65533, 'HTTPS', 'arn:aws:iam::123456789012:server-certificate/servercert']])
bblacey/FreeCAD-MacOS-CI
refs/heads/master
src/Tools/generateTemplates/templateModule.py
31
#! python # -*- coding: utf-8 -*- # (c) 2006 Juergen Riegel import template, templateModuleApp,templateModuleGui import generateBase.generateModel_Module class TemplateModule (template.ModelTemplate): def Generate(self): print "generateBase.generateModel_Module.Generate()\n" App= templateModuleApp.TemplateModuleApp() App.path = self.path App.module = self.module App.Generate()
chowooick/kaug
refs/heads/master
setup.py
191
from setuptools import setup, find_packages, os import json with open('package.json') as packageFile: version = json.load(packageFile)['version'] setup( name="jasmine-core", version=version, url="http://pivotal.github.io/jasmine/", author="Pivotal Labs", author_email="jasmine-js@googlegroups.com", description=('Jasmine is a Behavior Driven Development testing framework for JavaScript. It does not rely on '+ 'browsers, DOM, or any JavaScript framework. Thus it\'s suited for websites, '+ 'Node.js (http://nodejs.org) projects, or anywhere that JavaScript can run.'), license='MIT', classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: Implementation :: PyPy', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Software Development :: Build Tools', 'Topic :: Software Development :: Quality Assurance', 'Topic :: Software Development :: Testing', ], packages=['jasmine_core', 'jasmine_core.images'], package_dir={'jasmine_core': 'lib/jasmine-core', 'jasmine_core.images': 'images'}, package_data={'jasmine_core': ['*.js', '*.css'], 'jasmine_core.images': ['*.png']}, include_package_data=True, install_requires=['glob2>=0.4.1', 'ordereddict==1.1'] )
Centre-Alt-Rendiment-Esportiu/att
refs/heads/master
src/python/test/classes/ball_detector/Extrapolator.py
1
import warnings import numpy as np from test.classes.utils.Ball import Ball LEN_EXTRAPOL = 4 class Extrapolator: def extrapolate(self, history): real_balls = list(filter(lambda x: not x.is_extrapolate, history)) # Don't extrapolate if there aren't enough balls to do so if len(real_balls) < LEN_EXTRAPOL: return Ball() # Do not extrapolate more than once in a row if history[-1].is_extrapolate: return Ball() # Fit parabola on all real_balls x_val = np.array([p.center[0] for p in real_balls[-LEN_EXTRAPOL:]]) y_val = np.array([p.center[1] for p in real_balls[-LEN_EXTRAPOL:]]) with warnings.catch_warnings(): warnings.filterwarnings('error') try: pol = np.polyfit(x_val, y_val, 2, full=False) except np.RankWarning: return Ball() # We grab last 2 balls let them be extrapolates or not b1, b2 = history[-1], history[-2] x1, x2 = b1.center[0], b2.center[0] # Predicted value is at x = x1 + (x1-x2) = 2*x1 - x2 extrapol_x = 2 * x1 - x2 # Evaluate parabola at x = 2*x1 - x2 extrapol_y = np.polyval(pol, extrapol_x) extrapol_ball = Ball((extrapol_x, extrapol_y)) extrapol_ball.is_extrapolate = True return extrapol_ball
JakeAi/Dashboard
refs/heads/master
user_guide_src/cilexer/setup.py
397
""" Install and setup CodeIgniter highlighting for Pygments. """ from setuptools import setup entry_points = """ [pygments.lexers] cilexer = cilexer.cilexer:CodeIgniterLexer """ setup( name='pycilexer', version='0.1', description=__doc__, author="EllisLab, Inc.", packages=['cilexer'], install_requires=( 'sphinx >= 1.0.7', 'sphinxcontrib-phpdomain >= 0.1.3-1' ), entry_points=entry_points )
dushu1203/chromium.src
refs/heads/nw12
tools/telemetry/third_party/pyserial/serial/urlhandler/protocol_socket.py
141
#! python # # Python Serial Port Extension for Win32, Linux, BSD, Jython # see __init__.py # # This module implements a simple socket based client. # It does not support changing any port parameters and will silently ignore any # requests to do so. # # The purpose of this module is that applications using pySerial can connect to # TCP/IP to serial port converters that do not support RFC 2217. # # (C) 2001-2011 Chris Liechti <cliechti@gmx.net> # this is distributed under a free software license, see license.txt # # URL format: socket://<host>:<port>[/option[/option...]] # options: # - "debug" print diagnostic messages from serial.serialutil import * import time import socket import logging # map log level names to constants. used in fromURL() LOGGER_LEVELS = { 'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, } POLL_TIMEOUT = 2 class SocketSerial(SerialBase): """Serial port implementation for plain sockets.""" BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800, 9600, 19200, 38400, 57600, 115200) def open(self): """Open port with current settings. This may throw a SerialException if the port cannot be opened.""" self.logger = None if self._port is None: raise SerialException("Port must be configured before it can be used.") if self._isOpen: raise SerialException("Port is already open.") try: # XXX in future replace with create_connection (py >=2.6) self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.connect(self.fromURL(self.portstr)) except Exception, msg: self._socket = None raise SerialException("Could not open port %s: %s" % (self.portstr, msg)) self._socket.settimeout(POLL_TIMEOUT) # used for write timeout support :/ # not that there anything to configure... self._reconfigurePort() # all things set up get, now a clean start self._isOpen = True if not self._rtscts: self.setRTS(True) self.setDTR(True) self.flushInput() self.flushOutput() def _reconfigurePort(self): """Set communication parameters on opened port. for the socket:// protocol all settings are ignored!""" if self._socket is None: raise SerialException("Can only operate on open ports") if self.logger: self.logger.info('ignored port configuration change') def close(self): """Close port""" if self._isOpen: if self._socket: try: self._socket.shutdown(socket.SHUT_RDWR) self._socket.close() except: # ignore errors. pass self._socket = None self._isOpen = False # in case of quick reconnects, give the server some time time.sleep(0.3) def makeDeviceName(self, port): raise SerialException("there is no sensible way to turn numbers into URLs") def fromURL(self, url): """extract host and port from an URL string""" if url.lower().startswith("socket://"): url = url[9:] try: # is there a "path" (our options)? if '/' in url: # cut away options url, options = url.split('/', 1) # process options now, directly altering self for option in options.split('/'): if '=' in option: option, value = option.split('=', 1) else: value = None if option == 'logging': logging.basicConfig() # XXX is that good to call it here? self.logger = logging.getLogger('pySerial.socket') self.logger.setLevel(LOGGER_LEVELS[value]) self.logger.debug('enabled logging') else: raise ValueError('unknown option: %r' % (option,)) # get host and port host, port = url.split(':', 1) # may raise ValueError because of unpacking port = int(port) # and this if it's not a number if not 0 <= port < 65536: raise ValueError("port not in range 0...65535") except ValueError, e: raise SerialException('expected a string in the form "[rfc2217://]<host>:<port>[/option[/option...]]": %s' % e) return (host, port) # - - - - - - - - - - - - - - - - - - - - - - - - def inWaiting(self): """Return the number of characters currently in the input buffer.""" if not self._isOpen: raise portNotOpenError if self.logger: # set this one to debug as the function could be called often... self.logger.debug('WARNING: inWaiting returns dummy value') return 0 # hmmm, see comment in read() def read(self, size=1): """Read size bytes from the serial port. If a timeout is set it may return less characters as requested. With no timeout it will block until the requested number of bytes is read.""" if not self._isOpen: raise portNotOpenError data = bytearray() if self._timeout is not None: timeout = time.time() + self._timeout else: timeout = None while len(data) < size and (timeout is None or time.time() < timeout): try: # an implementation with internal buffer would be better # performing... t = time.time() block = self._socket.recv(size - len(data)) duration = time.time() - t if block: data.extend(block) else: # no data -> EOF (connection probably closed) break except socket.timeout: # just need to get out of recv from time to time to check if # still alive continue except socket.error, e: # connection fails -> terminate loop raise SerialException('connection failed (%s)' % e) return bytes(data) def write(self, data): """Output the given string over the serial port. Can block if the connection is blocked. May raise SerialException if the connection is closed.""" if not self._isOpen: raise portNotOpenError try: self._socket.sendall(to_bytes(data)) except socket.error, e: # XXX what exception if socket connection fails raise SerialException("socket connection failed: %s" % e) return len(data) def flushInput(self): """Clear input buffer, discarding all that is in the buffer.""" if not self._isOpen: raise portNotOpenError if self.logger: self.logger.info('ignored flushInput') def flushOutput(self): """Clear output buffer, aborting the current output and discarding all that is in the buffer.""" if not self._isOpen: raise portNotOpenError if self.logger: self.logger.info('ignored flushOutput') def sendBreak(self, duration=0.25): """Send break condition. Timed, returns to idle state after given duration.""" if not self._isOpen: raise portNotOpenError if self.logger: self.logger.info('ignored sendBreak(%r)' % (duration,)) def setBreak(self, level=True): """Set break: Controls TXD. When active, to transmitting is possible.""" if not self._isOpen: raise portNotOpenError if self.logger: self.logger.info('ignored setBreak(%r)' % (level,)) def setRTS(self, level=True): """Set terminal status line: Request To Send""" if not self._isOpen: raise portNotOpenError if self.logger: self.logger.info('ignored setRTS(%r)' % (level,)) def setDTR(self, level=True): """Set terminal status line: Data Terminal Ready""" if not self._isOpen: raise portNotOpenError if self.logger: self.logger.info('ignored setDTR(%r)' % (level,)) def getCTS(self): """Read terminal status line: Clear To Send""" if not self._isOpen: raise portNotOpenError if self.logger: self.logger.info('returning dummy for getCTS()') return True def getDSR(self): """Read terminal status line: Data Set Ready""" if not self._isOpen: raise portNotOpenError if self.logger: self.logger.info('returning dummy for getDSR()') return True def getRI(self): """Read terminal status line: Ring Indicator""" if not self._isOpen: raise portNotOpenError if self.logger: self.logger.info('returning dummy for getRI()') return False def getCD(self): """Read terminal status line: Carrier Detect""" if not self._isOpen: raise portNotOpenError if self.logger: self.logger.info('returning dummy for getCD()') return True # - - - platform specific - - - # None so far # assemble Serial class with the platform specific implementation and the base # for file-like behavior. for Python 2.6 and newer, that provide the new I/O # library, derive from io.RawIOBase try: import io except ImportError: # classic version with our own file-like emulation class Serial(SocketSerial, FileLike): pass else: # io library present class Serial(SocketSerial, io.RawIOBase): pass # simple client test if __name__ == '__main__': import sys s = Serial('socket://localhost:7000') sys.stdout.write('%s\n' % s) sys.stdout.write("write...\n") s.write("hello\n") s.flush() sys.stdout.write("read: %s\n" % s.read(5)) s.close()
GdZ/scriptfile
refs/heads/master
software/googleAppEngine/lib/django_1_2/django/contrib/localflavor/pl/pl_voivodeships.py
544
""" Polish voivodeship as in http://en.wikipedia.org/wiki/Poland#Administrative_division """ from django.utils.translation import ugettext_lazy as _ VOIVODESHIP_CHOICES = ( ('lower_silesia', _('Lower Silesia')), ('kuyavia-pomerania', _('Kuyavia-Pomerania')), ('lublin', _('Lublin')), ('lubusz', _('Lubusz')), ('lodz', _('Lodz')), ('lesser_poland', _('Lesser Poland')), ('masovia', _('Masovia')), ('opole', _('Opole')), ('subcarpatia', _('Subcarpatia')), ('podlasie', _('Podlasie')), ('pomerania', _('Pomerania')), ('silesia', _('Silesia')), ('swietokrzyskie', _('Swietokrzyskie')), ('warmia-masuria', _('Warmia-Masuria')), ('greater_poland', _('Greater Poland')), ('west_pomerania', _('West Pomerania')), )
torchingloom/edx-platform
refs/heads/select/release
lms/djangoapps/instructor/tests/test_enrollment.py
1
""" Unit tests for instructor.enrollment methods. """ import json import mock from abc import ABCMeta from courseware.models import StudentModule from django.conf import settings from django.test import TestCase from django.test.utils import override_settings from student.tests.factories import UserFactory from xmodule.modulestore.tests.factories import CourseFactory from courseware.tests.modulestore_config import TEST_DATA_MIXED_MODULESTORE from student.models import CourseEnrollment, CourseEnrollmentAllowed from instructor.enrollment import ( EmailEnrollmentState, enroll_email, get_email_params, reset_student_attempts, send_beta_role_email, unenroll_email ) class TestSettableEnrollmentState(TestCase): """ Test the basis class for enrollment tests. """ def setUp(self): self.course_id = 'robot:/a/fake/c::rse/id' def test_mes_create(self): """ Test SettableEnrollmentState creation of user. """ mes = SettableEnrollmentState( user=True, enrollment=True, allowed=False, auto_enroll=False ) # enrollment objects eobjs = mes.create_user(self.course_id) ees = EmailEnrollmentState(self.course_id, eobjs.email) self.assertEqual(mes, ees) class TestEnrollmentChangeBase(TestCase): """ Test instructor enrollment administration against database effects. Test methods in derived classes follow a strict format. `action` is a function which is run the test will pass if `action` mutates state from `before_ideal` to `after_ideal` """ __metaclass__ = ABCMeta def setUp(self): self.course_id = 'robot:/a/fake/c::rse/id' def _run_state_change_test(self, before_ideal, after_ideal, action): """ Runs a state change test. `before_ideal` and `after_ideal` are SettableEnrollmentState's `action` is a function which will be run in the middle. `action` should transition the world from before_ideal to after_ideal `action` will be supplied the following arguments (None-able arguments) `email` is an email string """ # initialize & check before print "checking initialization..." eobjs = before_ideal.create_user(self.course_id) before = EmailEnrollmentState(self.course_id, eobjs.email) self.assertEqual(before, before_ideal) # do action print "running action..." action(eobjs.email) # check after print "checking effects..." after = EmailEnrollmentState(self.course_id, eobjs.email) self.assertEqual(after, after_ideal) class TestInstructorEnrollDB(TestEnrollmentChangeBase): """ Test instructor.enrollment.enroll_email """ def test_enroll(self): before_ideal = SettableEnrollmentState( user=True, enrollment=False, allowed=False, auto_enroll=False ) after_ideal = SettableEnrollmentState( user=True, enrollment=True, allowed=False, auto_enroll=False ) action = lambda email: enroll_email(self.course_id, email) return self._run_state_change_test(before_ideal, after_ideal, action) def test_enroll_again(self): before_ideal = SettableEnrollmentState( user=True, enrollment=True, allowed=False, auto_enroll=False, ) after_ideal = SettableEnrollmentState( user=True, enrollment=True, allowed=False, auto_enroll=False, ) action = lambda email: enroll_email(self.course_id, email) return self._run_state_change_test(before_ideal, after_ideal, action) def test_enroll_nouser(self): before_ideal = SettableEnrollmentState( user=False, enrollment=False, allowed=False, auto_enroll=False, ) after_ideal = SettableEnrollmentState( user=False, enrollment=False, allowed=True, auto_enroll=False, ) action = lambda email: enroll_email(self.course_id, email) return self._run_state_change_test(before_ideal, after_ideal, action) def test_enroll_nouser_again(self): before_ideal = SettableEnrollmentState( user=False, enrollment=False, allowed=True, auto_enroll=False ) after_ideal = SettableEnrollmentState( user=False, enrollment=False, allowed=True, auto_enroll=False, ) action = lambda email: enroll_email(self.course_id, email) return self._run_state_change_test(before_ideal, after_ideal, action) def test_enroll_nouser_autoenroll(self): before_ideal = SettableEnrollmentState( user=False, enrollment=False, allowed=False, auto_enroll=False, ) after_ideal = SettableEnrollmentState( user=False, enrollment=False, allowed=True, auto_enroll=True, ) action = lambda email: enroll_email(self.course_id, email, auto_enroll=True) return self._run_state_change_test(before_ideal, after_ideal, action) def test_enroll_nouser_change_autoenroll(self): before_ideal = SettableEnrollmentState( user=False, enrollment=False, allowed=True, auto_enroll=True, ) after_ideal = SettableEnrollmentState( user=False, enrollment=False, allowed=True, auto_enroll=False, ) action = lambda email: enroll_email(self.course_id, email, auto_enroll=False) return self._run_state_change_test(before_ideal, after_ideal, action) class TestInstructorUnenrollDB(TestEnrollmentChangeBase): """ Test instructor.enrollment.unenroll_email """ def test_unenroll(self): before_ideal = SettableEnrollmentState( user=True, enrollment=True, allowed=False, auto_enroll=False ) after_ideal = SettableEnrollmentState( user=True, enrollment=False, allowed=False, auto_enroll=False ) action = lambda email: unenroll_email(self.course_id, email) return self._run_state_change_test(before_ideal, after_ideal, action) def test_unenroll_notenrolled(self): before_ideal = SettableEnrollmentState( user=True, enrollment=False, allowed=False, auto_enroll=False ) after_ideal = SettableEnrollmentState( user=True, enrollment=False, allowed=False, auto_enroll=False ) action = lambda email: unenroll_email(self.course_id, email) return self._run_state_change_test(before_ideal, after_ideal, action) def test_unenroll_disallow(self): before_ideal = SettableEnrollmentState( user=False, enrollment=False, allowed=True, auto_enroll=True ) after_ideal = SettableEnrollmentState( user=False, enrollment=False, allowed=False, auto_enroll=False ) action = lambda email: unenroll_email(self.course_id, email) return self._run_state_change_test(before_ideal, after_ideal, action) def test_unenroll_norecord(self): before_ideal = SettableEnrollmentState( user=False, enrollment=False, allowed=False, auto_enroll=False ) after_ideal = SettableEnrollmentState( user=False, enrollment=False, allowed=False, auto_enroll=False ) action = lambda email: unenroll_email(self.course_id, email) return self._run_state_change_test(before_ideal, after_ideal, action) class TestInstructorEnrollmentStudentModule(TestCase): """ Test student module manipulations. """ def setUp(self): self.course_id = 'robot:/a/fake/c::rse/id' def test_reset_student_attempts(self): user = UserFactory() msk = 'robot/module/state/key' original_state = json.dumps({'attempts': 32, 'otherstuff': 'alsorobots'}) module = StudentModule.objects.create(student=user, course_id=self.course_id, module_state_key=msk, state=original_state) # lambda to reload the module state from the database module = lambda: StudentModule.objects.get(student=user, course_id=self.course_id, module_state_key=msk) self.assertEqual(json.loads(module().state)['attempts'], 32) reset_student_attempts(self.course_id, user, msk) self.assertEqual(json.loads(module().state)['attempts'], 0) def test_delete_student_attempts(self): user = UserFactory() msk = 'robot/module/state/key' original_state = json.dumps({'attempts': 32, 'otherstuff': 'alsorobots'}) StudentModule.objects.create(student=user, course_id=self.course_id, module_state_key=msk, state=original_state) self.assertEqual(StudentModule.objects.filter(student=user, course_id=self.course_id, module_state_key=msk).count(), 1) reset_student_attempts(self.course_id, user, msk, delete_module=True) self.assertEqual(StudentModule.objects.filter(student=user, course_id=self.course_id, module_state_key=msk).count(), 0) class EnrollmentObjects(object): """ Container for enrollment objects. `email` - student email `user` - student User object `cenr` - CourseEnrollment object `cea` - CourseEnrollmentAllowed object Any of the objects except email can be None. """ def __init__(self, email, user, cenr, cea): self.email = email self.user = user self.cenr = cenr self.cea = cea class SettableEnrollmentState(EmailEnrollmentState): """ Settable enrollment state. Used for testing state changes. SettableEnrollmentState can be constructed and then a call to create_user will make objects which correspond to the state represented in the SettableEnrollmentState. """ def __init__(self, user=False, enrollment=False, allowed=False, auto_enroll=False): # pylint: disable=W0231 self.user = user self.enrollment = enrollment self.allowed = allowed self.auto_enroll = auto_enroll def __eq__(self, other): return self.to_dict() == other.to_dict() def __neq__(self, other): return not self == other def create_user(self, course_id=None): """ Utility method to possibly create and possibly enroll a user. Creates a state matching the SettableEnrollmentState properties. Returns a tuple of ( email, User, (optionally None) CourseEnrollment, (optionally None) CourseEnrollmentAllowed, (optionally None) ) """ # if self.user=False, then this will just be used to generate an email. email = "robot_no_user_exists_with_this_email@edx.org" if self.user: user = UserFactory() email = user.email if self.enrollment: cenr = CourseEnrollment.enroll(user, course_id) return EnrollmentObjects(email, user, cenr, None) else: return EnrollmentObjects(email, user, None, None) elif self.allowed: cea = CourseEnrollmentAllowed.objects.create( email=email, course_id=course_id, auto_enroll=self.auto_enroll, ) return EnrollmentObjects(email, None, None, cea) else: return EnrollmentObjects(email, None, None, None) class TestSendBetaRoleEmail(TestCase): """ Test edge cases for `send_beta_role_email` """ def setUp(self): self.user = UserFactory.create() self.email_params = {'course': 'Robot Super Course'} def test_bad_action(self): bad_action = 'beta_tester' error_msg = "Unexpected action received '{}' - expected 'add' or 'remove'".format(bad_action) with self.assertRaisesRegexp(ValueError, error_msg): send_beta_role_email(bad_action, self.user, self.email_params) @override_settings(MODULESTORE=TEST_DATA_MIXED_MODULESTORE) class TestGetEmailParams(TestCase): """ Test what URLs the function get_email_params returns under different production-like conditions. """ def setUp(self): self.course = CourseFactory.create() # Explicitly construct what we expect the course URLs to be site = settings.SITE_NAME self.course_url = u'https://{}/courses/{}/'.format( site, self.course.id ) self.course_about_url = self.course_url + 'about' self.registration_url = u'https://{}/register'.format( site, ) def test_normal_params(self): # For a normal site, what do we expect to get for the URLs? # Also make sure `auto_enroll` is properly passed through. result = get_email_params(self.course, False) self.assertEqual(result['auto_enroll'], False) self.assertEqual(result['course_about_url'], self.course_about_url) self.assertEqual(result['registration_url'], self.registration_url) self.assertEqual(result['course_url'], self.course_url) def test_marketing_params(self): # For a site with a marketing front end, what do we expect to get for the URLs? # Also make sure `auto_enroll` is properly passed through. with mock.patch.dict('django.conf.settings.FEATURES', {'ENABLE_MKTG_SITE': True}): result = get_email_params(self.course, True) self.assertEqual(result['auto_enroll'], True) # We should *not* get a course about url (LMS doesn't know what the marketing site URLs are) self.assertEqual(result['course_about_url'], None) self.assertEqual(result['registration_url'], self.registration_url) self.assertEqual(result['course_url'], self.course_url)
nelmiux/CarnotKE
refs/heads/master
jyhton/lib-python/2.7/test/test_memoryview.py
75
"""Unit tests for the memoryview XXX We need more tests! Some tests are in test_bytes """ import unittest import sys import gc import weakref import array from test import test_support import io class AbstractMemoryTests: source_bytes = b"abcdef" @property def _source(self): return self.source_bytes @property def _types(self): return filter(None, [self.ro_type, self.rw_type]) def check_getitem_with_type(self, tp): item = self.getitem_type b = tp(self._source) oldrefcount = sys.getrefcount(b) m = self._view(b) self.assertEqual(m[0], item(b"a")) self.assertIsInstance(m[0], bytes) self.assertEqual(m[5], item(b"f")) self.assertEqual(m[-1], item(b"f")) self.assertEqual(m[-6], item(b"a")) # Bounds checking self.assertRaises(IndexError, lambda: m[6]) self.assertRaises(IndexError, lambda: m[-7]) self.assertRaises(IndexError, lambda: m[sys.maxsize]) self.assertRaises(IndexError, lambda: m[-sys.maxsize]) # Type checking self.assertRaises(TypeError, lambda: m[None]) self.assertRaises(TypeError, lambda: m[0.0]) self.assertRaises(TypeError, lambda: m["a"]) m = None self.assertEqual(sys.getrefcount(b), oldrefcount) def test_getitem(self): for tp in self._types: self.check_getitem_with_type(tp) def test_iter(self): for tp in self._types: b = tp(self._source) m = self._view(b) self.assertEqual(list(m), [m[i] for i in range(len(m))]) def test_repr(self): for tp in self._types: b = tp(self._source) m = self._view(b) self.assertIsInstance(m.__repr__(), str) def test_setitem_readonly(self): if not self.ro_type: return b = self.ro_type(self._source) oldrefcount = sys.getrefcount(b) m = self._view(b) def setitem(value): m[0] = value self.assertRaises(TypeError, setitem, b"a") self.assertRaises(TypeError, setitem, 65) self.assertRaises(TypeError, setitem, memoryview(b"a")) m = None self.assertEqual(sys.getrefcount(b), oldrefcount) def test_setitem_writable(self): if not self.rw_type: return tp = self.rw_type b = self.rw_type(self._source) oldrefcount = sys.getrefcount(b) m = self._view(b) m[0] = tp(b"0") self._check_contents(tp, b, b"0bcdef") m[1:3] = tp(b"12") self._check_contents(tp, b, b"012def") m[1:1] = tp(b"") self._check_contents(tp, b, b"012def") m[:] = tp(b"abcdef") self._check_contents(tp, b, b"abcdef") # Overlapping copies of a view into itself m[0:3] = m[2:5] self._check_contents(tp, b, b"cdedef") m[:] = tp(b"abcdef") m[2:5] = m[0:3] self._check_contents(tp, b, b"ababcf") def setitem(key, value): m[key] = tp(value) # Bounds checking self.assertRaises(IndexError, setitem, 6, b"a") self.assertRaises(IndexError, setitem, -7, b"a") self.assertRaises(IndexError, setitem, sys.maxsize, b"a") self.assertRaises(IndexError, setitem, -sys.maxsize, b"a") # Wrong index/slice types self.assertRaises(TypeError, setitem, 0.0, b"a") self.assertRaises(TypeError, setitem, (0,), b"a") self.assertRaises(TypeError, setitem, "a", b"a") # Trying to resize the memory object self.assertRaises(ValueError, setitem, 0, b"") self.assertRaises(ValueError, setitem, 0, b"ab") self.assertRaises(ValueError, setitem, slice(1,1), b"a") self.assertRaises(ValueError, setitem, slice(0,2), b"a") m = None self.assertEqual(sys.getrefcount(b), oldrefcount) def test_delitem(self): for tp in self._types: b = tp(self._source) m = self._view(b) with self.assertRaises(TypeError): del m[1] with self.assertRaises(TypeError): del m[1:4] def test_tobytes(self): for tp in self._types: m = self._view(tp(self._source)) b = m.tobytes() # This calls self.getitem_type() on each separate byte of b"abcdef" expected = b"".join( self.getitem_type(c) for c in b"abcdef") self.assertEqual(b, expected) self.assertIsInstance(b, bytes) def test_tolist(self): for tp in self._types: m = self._view(tp(self._source)) l = m.tolist() self.assertEqual(l, map(ord, b"abcdef")) def test_compare(self): # memoryviews can compare for equality with other objects # having the buffer interface. for tp in self._types: m = self._view(tp(self._source)) for tp_comp in self._types: self.assertTrue(m == tp_comp(b"abcdef")) self.assertFalse(m != tp_comp(b"abcdef")) self.assertFalse(m == tp_comp(b"abcde")) self.assertTrue(m != tp_comp(b"abcde")) self.assertFalse(m == tp_comp(b"abcde1")) self.assertTrue(m != tp_comp(b"abcde1")) self.assertTrue(m == m) self.assertTrue(m == m[:]) self.assertTrue(m[0:6] == m[:]) self.assertFalse(m[0:5] == m) # Comparison with objects which don't support the buffer API self.assertFalse(m == u"abcdef") self.assertTrue(m != u"abcdef") self.assertFalse(u"abcdef" == m) self.assertTrue(u"abcdef" != m) # Unordered comparisons are unimplemented, and therefore give # arbitrary results (they raise a TypeError in py3k) def check_attributes_with_type(self, tp): m = self._view(tp(self._source)) self.assertEqual(m.format, self.format) self.assertIsInstance(m.format, str) self.assertEqual(m.itemsize, self.itemsize) self.assertEqual(m.ndim, 1) self.assertEqual(m.shape, (6,)) self.assertEqual(len(m), 6) self.assertEqual(m.strides, (self.itemsize,)) self.assertEqual(m.suboffsets, None) return m def test_attributes_readonly(self): if not self.ro_type: return m = self.check_attributes_with_type(self.ro_type) self.assertEqual(m.readonly, True) def test_attributes_writable(self): if not self.rw_type: return m = self.check_attributes_with_type(self.rw_type) self.assertEqual(m.readonly, False) # Disabled: unicode uses the old buffer API in 2.x #def test_getbuffer(self): ## Test PyObject_GetBuffer() on a memoryview object. #for tp in self._types: #b = tp(self._source) #oldrefcount = sys.getrefcount(b) #m = self._view(b) #oldviewrefcount = sys.getrefcount(m) #s = unicode(m, "utf-8") #self._check_contents(tp, b, s.encode("utf-8")) #self.assertEqual(sys.getrefcount(m), oldviewrefcount) #m = None #self.assertEqual(sys.getrefcount(b), oldrefcount) def test_gc(self): for tp in self._types: if not isinstance(tp, type): # If tp is a factory rather than a plain type, skip continue class MySource(tp): pass class MyObject: pass # Create a reference cycle through a memoryview object b = MySource(tp(b'abc')) m = self._view(b) o = MyObject() b.m = m b.o = o wr = weakref.ref(o) b = m = o = None # The cycle must be broken gc.collect() self.assertTrue(wr() is None, wr()) def test_writable_readonly(self): # Issue #10451: memoryview incorrectly exposes a readonly # buffer as writable causing a segfault if using mmap tp = self.ro_type if tp is None: return b = tp(self._source) m = self._view(b) i = io.BytesIO(b'ZZZZ') self.assertRaises(TypeError, i.readinto, m) # Variations on source objects for the buffer: bytes-like objects, then arrays # with itemsize > 1. # NOTE: support for multi-dimensional objects is unimplemented. class BaseBytesMemoryTests(AbstractMemoryTests): ro_type = bytes rw_type = bytearray getitem_type = bytes itemsize = 1 format = 'B' # Disabled: array.array() does not support the new buffer API in 2.x #class BaseArrayMemoryTests(AbstractMemoryTests): #ro_type = None #rw_type = lambda self, b: array.array('i', map(ord, b)) #getitem_type = lambda self, b: array.array('i', map(ord, b)).tostring() #itemsize = array.array('i').itemsize #format = 'i' #def test_getbuffer(self): ## XXX Test should be adapted for non-byte buffers #pass #def test_tolist(self): ## XXX NotImplementedError: tolist() only supports byte views #pass # Variations on indirection levels: memoryview, slice of memoryview, # slice of slice of memoryview. # This is important to test allocation subtleties. class BaseMemoryviewTests: def _view(self, obj): return memoryview(obj) def _check_contents(self, tp, obj, contents): self.assertEqual(obj, tp(contents)) class BaseMemorySliceTests: source_bytes = b"XabcdefY" def _view(self, obj): m = memoryview(obj) return m[1:7] def _check_contents(self, tp, obj, contents): self.assertEqual(obj[1:7], tp(contents)) def test_refs(self): for tp in self._types: m = memoryview(tp(self._source)) oldrefcount = sys.getrefcount(m) m[1:2] self.assertEqual(sys.getrefcount(m), oldrefcount) class BaseMemorySliceSliceTests: source_bytes = b"XabcdefY" def _view(self, obj): m = memoryview(obj) return m[:7][1:] def _check_contents(self, tp, obj, contents): self.assertEqual(obj[1:7], tp(contents)) # Concrete test classes class BytesMemoryviewTest(unittest.TestCase, BaseMemoryviewTests, BaseBytesMemoryTests): def test_constructor(self): for tp in self._types: ob = tp(self._source) self.assertTrue(memoryview(ob)) self.assertTrue(memoryview(object=ob)) self.assertRaises(TypeError, memoryview) self.assertRaises(TypeError, memoryview, ob, ob) self.assertRaises(TypeError, memoryview, argument=ob) self.assertRaises(TypeError, memoryview, ob, argument=True) #class ArrayMemoryviewTest(unittest.TestCase, #BaseMemoryviewTests, BaseArrayMemoryTests): #def test_array_assign(self): ## Issue #4569: segfault when mutating a memoryview with itemsize != 1 #a = array.array('i', range(10)) #m = memoryview(a) #new_a = array.array('i', range(9, -1, -1)) #m[:] = new_a #self.assertEqual(a, new_a) class BytesMemorySliceTest(unittest.TestCase, BaseMemorySliceTests, BaseBytesMemoryTests): pass #class ArrayMemorySliceTest(unittest.TestCase, #BaseMemorySliceTests, BaseArrayMemoryTests): #pass class BytesMemorySliceSliceTest(unittest.TestCase, BaseMemorySliceSliceTests, BaseBytesMemoryTests): pass #class ArrayMemorySliceSliceTest(unittest.TestCase, #BaseMemorySliceSliceTests, BaseArrayMemoryTests): #pass def test_main(): test_support.run_unittest(__name__) if __name__ == "__main__": test_main()
RPI-OPENEDX/edx-platform
refs/heads/RPI-DEV
lms/djangoapps/courseware/access.py
3
""" This file contains (or should), all access control logic for the courseware. Ideally, it will be the only place that needs to know about any special settings like DISABLE_START_DATES. Note: The access control logic in this file does NOT check for enrollment in a course. It is expected that higher layers check for enrollment so we don't have to hit the enrollments table on every module load. If enrollment is to be checked, use get_course_with_access in courseware.courses. It is a wrapper around has_access that additionally checks for enrollment. """ from datetime import datetime import logging import pytz from django.conf import settings from django.contrib.auth.models import AnonymousUser from django.utils.timezone import UTC from opaque_keys.edx.keys import CourseKey, UsageKey from xblock.core import XBlock from xmodule.course_module import ( CourseDescriptor, CATALOG_VISIBILITY_CATALOG_AND_ABOUT, CATALOG_VISIBILITY_ABOUT, ) from xmodule.error_module import ErrorDescriptor from xmodule.x_module import XModule, DEPRECATION_VSCOMPAT_EVENT from xmodule.split_test_module import get_split_user_partitions from xmodule.partitions.partitions import NoSuchUserPartitionError, NoSuchUserPartitionGroupError from external_auth.models import ExternalAuthMap from courseware.masquerade import get_masquerade_role, is_masquerading_as_student from openedx.core.djangoapps.content.course_overviews.models import CourseOverview from student import auth from student.models import CourseEnrollmentAllowed from student.roles import ( CourseBetaTesterRole, CourseInstructorRole, CourseStaffRole, GlobalStaff, SupportStaffRole, OrgInstructorRole, OrgStaffRole, ) from util.milestones_helpers import ( get_pre_requisite_courses_not_completed, any_unfulfilled_milestones, is_prerequisite_courses_enabled, ) from ccx_keys.locator import CCXLocator import dogstats_wrapper as dog_stats_api from courseware.access_response import ( MilestoneError, MobileAvailabilityError, VisibilityError, ) from courseware.access_utils import adjust_start_date, check_start_date, debug, ACCESS_GRANTED, ACCESS_DENIED log = logging.getLogger(__name__) def has_access(user, action, obj, course_key=None): """ Check whether a user has the access to do action on obj. Handles any magic switching based on various settings. Things this module understands: - start dates for modules - visible_to_staff_only for modules - DISABLE_START_DATES - different access for instructor, staff, course staff, and students. - mobile_available flag for course modules user: a Django user object. May be anonymous. If none is passed, anonymous is assumed obj: The object to check access for. A module, descriptor, location, or certain special strings (e.g. 'global') action: A string specifying the action that the client is trying to perform. actions depend on the obj type, but include e.g. 'enroll' for courses. See the type-specific functions below for the known actions for that type. course_key: A course_key specifying which course run this access is for. Required when accessing anything other than a CourseDescriptor, 'global', or a location with category 'course' Returns an AccessResponse object. It is up to the caller to actually deny access in a way that makes sense in context. """ # Just in case user is passed in as None, make them anonymous if not user: user = AnonymousUser() if isinstance(course_key, CCXLocator): course_key = course_key.to_course_locator() # delegate the work to type-specific functions. # (start with more specific types, then get more general) if isinstance(obj, CourseDescriptor): return _has_access_course(user, action, obj) if isinstance(obj, CourseOverview): return _has_access_course(user, action, obj) if isinstance(obj, ErrorDescriptor): return _has_access_error_desc(user, action, obj, course_key) if isinstance(obj, XModule): return _has_access_xmodule(user, action, obj, course_key) # NOTE: any descriptor access checkers need to go above this if isinstance(obj, XBlock): return _has_access_descriptor(user, action, obj, course_key) if isinstance(obj, CCXLocator): return _has_access_ccx_key(user, action, obj) if isinstance(obj, CourseKey): return _has_access_course_key(user, action, obj) if isinstance(obj, UsageKey): return _has_access_location(user, action, obj, course_key) if isinstance(obj, basestring): return _has_access_string(user, action, obj) # Passing an unknown object here is a coding error, so rather than # returning a default, complain. raise TypeError("Unknown object type in has_access(): '{0}'" .format(type(obj))) # ================ Implementation helpers ================================ def _can_access_descriptor_with_start_date(user, descriptor, course_key): # pylint: disable=invalid-name """ Checks if a user has access to a descriptor based on its start date. If there is no start date specified, grant access. Else, check if we're past the start date. Note: We do NOT check whether the user is staff or if the descriptor is detached... it is assumed both of these are checked by the caller. Arguments: user (User): the user whose descriptor access we are checking. descriptor (AType): the descriptor for which we are checking access, where AType is CourseDescriptor, CourseOverview, or any other class that represents a descriptor and has the attributes .location, .id, .start, and .days_early_for_beta. Returns: AccessResponse: The result of this access check. Possible results are ACCESS_GRANTED or a StartDateError. """ return check_start_date(user, descriptor.days_early_for_beta, descriptor.start, course_key) def _can_view_courseware_with_prerequisites(user, course): # pylint: disable=invalid-name """ Checks if a user has access to a course based on its prerequisites. If the user is staff or anonymous, immediately grant access. Else, return whether or not the prerequisite courses have been passed. Arguments: user (User): the user whose course access we are checking. course (AType): the course for which we are checking access. where AType is CourseDescriptor, CourseOverview, or any other class that represents a course and has the attributes .location and .id. """ def _is_prerequisites_disabled(): """ Checks if prerequisites are disabled in the settings. """ return ACCESS_DENIED if is_prerequisite_courses_enabled() else ACCESS_GRANTED return ( _is_prerequisites_disabled() or _has_staff_access_to_descriptor(user, course, course.id) or user.is_anonymous() or _has_fulfilled_prerequisites(user, [course.id]) ) def _can_load_course_on_mobile(user, course): """ Checks if a user can view the given course on a mobile device. This function only checks mobile-specific access restrictions. Other access restrictions such as start date and the .visible_to_staff_only flag must be checked by callers in *addition* to the return value of this function. Arguments: user (User): the user whose course access we are checking. course (CourseDescriptor|CourseOverview): the course for which we are checking access. Returns: bool: whether the course can be accessed on mobile. """ return ( is_mobile_available_for_user(user, course) and ( _has_staff_access_to_descriptor(user, course, course.id) or _has_fulfilled_all_milestones(user, course.id) ) ) def _can_enroll_courselike(user, courselike): """ Ascertain if the user can enroll in the given courselike object. Arguments: user (User): The user attempting to enroll. courselike (CourseDescriptor or CourseOverview): The object representing the course in which the user is trying to enroll. Returns: AccessResponse, indicating whether the user can enroll. """ enrollment_domain = courselike.enrollment_domain # Courselike objects (e.g., course descriptors and CourseOverviews) have an attribute named `id` # which actually points to a CourseKey. Sigh. course_key = courselike.id # If using a registration method to restrict enrollment (e.g., Shibboleth) if settings.FEATURES.get('RESTRICT_ENROLL_BY_REG_METHOD') and enrollment_domain: if user is not None and user.is_authenticated() and \ ExternalAuthMap.objects.filter(user=user, external_domain=enrollment_domain): debug("Allow: external_auth of " + enrollment_domain) reg_method_ok = True else: reg_method_ok = False else: reg_method_ok = True # If the user appears in CourseEnrollmentAllowed paired with the given course key, # they may enroll. Note that as dictated by the legacy database schema, the filter # call includes a `course_id` kwarg which requires a CourseKey. if user is not None and user.is_authenticated(): if CourseEnrollmentAllowed.objects.filter(email=user.email, course_id=course_key): return ACCESS_GRANTED if _has_staff_access_to_descriptor(user, courselike, course_key): return ACCESS_GRANTED if courselike.invitation_only: debug("Deny: invitation only") return ACCESS_DENIED now = datetime.now(UTC()) enrollment_start = courselike.enrollment_start or datetime.min.replace(tzinfo=pytz.UTC) enrollment_end = courselike.enrollment_end or datetime.max.replace(tzinfo=pytz.UTC) if reg_method_ok and enrollment_start < now < enrollment_end: debug("Allow: in enrollment period") return ACCESS_GRANTED return ACCESS_DENIED def _has_access_course(user, action, courselike): """ Check if user has access to a course. Arguments: user (User): the user whose course access we are checking. action (string): The action that is being checked. courselike (CourseDescriptor or CourseOverview): The object representing the course that the user wants to access. Valid actions: 'load' -- load the courseware, see inside the course 'load_forum' -- can load and contribute to the forums (one access level for now) 'load_mobile' -- can load from a mobile context 'enroll' -- enroll. Checks for enrollment window. 'see_exists' -- can see that the course exists. 'staff' -- staff access to course. 'see_in_catalog' -- user is able to see the course listed in the course catalog. 'see_about_page' -- user is able to see the course about page. """ def can_load(): """ Can this user load this course? NOTE: this is not checking whether user is actually enrolled in the course. """ response = ( _visible_to_nonstaff_users(courselike) and _can_access_descriptor_with_start_date(user, courselike, courselike.id) ) return ( ACCESS_GRANTED if (response or _has_staff_access_to_descriptor(user, courselike, courselike.id)) else response ) def can_enroll(): """ Returns whether the user can enroll in the course. """ return _can_enroll_courselike(user, courselike) def see_exists(): """ Can see if can enroll, but also if can load it: if user enrolled in a course and now it's past the enrollment period, they should still see it. """ return ACCESS_GRANTED if (can_load() or can_enroll()) else ACCESS_DENIED def can_see_in_catalog(): """ Implements the "can see course in catalog" logic if a course should be visible in the main course catalog In this case we use the catalog_visibility property on the course descriptor but also allow course staff to see this. """ return ( _has_catalog_visibility(courselike, CATALOG_VISIBILITY_CATALOG_AND_ABOUT) or _has_staff_access_to_descriptor(user, courselike, courselike.id) ) def can_see_about_page(): """ Implements the "can see course about page" logic if a course about page should be visible In this case we use the catalog_visibility property on the course descriptor but also allow course staff to see this. """ return ( _has_catalog_visibility(courselike, CATALOG_VISIBILITY_CATALOG_AND_ABOUT) or _has_catalog_visibility(courselike, CATALOG_VISIBILITY_ABOUT) or _has_staff_access_to_descriptor(user, courselike, courselike.id) ) checkers = { 'load': can_load, 'view_courseware_with_prerequisites': lambda: _can_view_courseware_with_prerequisites(user, courselike), 'load_mobile': lambda: can_load() and _can_load_course_on_mobile(user, courselike), 'enroll': can_enroll, 'see_exists': see_exists, 'staff': lambda: _has_staff_access_to_descriptor(user, courselike, courselike.id), 'instructor': lambda: _has_instructor_access_to_descriptor(user, courselike, courselike.id), 'see_in_catalog': can_see_in_catalog, 'see_about_page': can_see_about_page, } return _dispatch(checkers, action, user, courselike) def _has_access_error_desc(user, action, descriptor, course_key): """ Only staff should see error descriptors. Valid actions: 'load' -- load this descriptor, showing it to the user. 'staff' -- staff access to descriptor. """ def check_for_staff(): return _has_staff_access_to_descriptor(user, descriptor, course_key) checkers = { 'load': check_for_staff, 'staff': check_for_staff, 'instructor': lambda: _has_instructor_access_to_descriptor(user, descriptor, course_key) } return _dispatch(checkers, action, user, descriptor) def _has_group_access(descriptor, user, course_key): """ This function returns a boolean indicating whether or not `user` has sufficient group memberships to "load" a block (the `descriptor`) """ if len(descriptor.user_partitions) == len(get_split_user_partitions(descriptor.user_partitions)): # Short-circuit the process, since there are no defined user partitions that are not # user_partitions used by the split_test module. The split_test module handles its own access # via updating the children of the split_test module. return ACCESS_GRANTED # use merged_group_access which takes group access on the block's # parents / ancestors into account merged_access = descriptor.merged_group_access # check for False in merged_access, which indicates that at least one # partition's group list excludes all students. if False in merged_access.values(): log.warning("Group access check excludes all students, access will be denied.", exc_info=True) return ACCESS_DENIED # resolve the partition IDs in group_access to actual # partition objects, skipping those which contain empty group directives. # If a referenced partition could not be found, it will be denied # If the partition is found but is no longer active (meaning it's been disabled) # then skip the access check for that partition. partitions = [] for partition_id, group_ids in merged_access.items(): try: partition = descriptor._get_user_partition(partition_id) # pylint: disable=protected-access if partition.active: if group_ids is not None: partitions.append(partition) else: log.debug( "Skipping partition with ID %s in course %s because it is no longer active", partition.id, course_key ) except NoSuchUserPartitionError: log.warning("Error looking up user partition, access will be denied.", exc_info=True) return ACCESS_DENIED # next resolve the group IDs specified within each partition partition_groups = [] try: for partition in partitions: groups = [ partition.get_group(group_id) for group_id in merged_access[partition.id] ] if groups: partition_groups.append((partition, groups)) except NoSuchUserPartitionGroupError: log.warning("Error looking up referenced user partition group, access will be denied.", exc_info=True) return ACCESS_DENIED # look up the user's group for each partition user_groups = {} for partition, groups in partition_groups: user_groups[partition.id] = partition.scheme.get_group_for_user( course_key, user, partition, ) # finally: check that the user has a satisfactory group assignment # for each partition. if not all(user_groups.get(partition.id) in groups for partition, groups in partition_groups): return ACCESS_DENIED # all checks passed. return ACCESS_GRANTED def _has_access_descriptor(user, action, descriptor, course_key=None): """ Check if user has access to this descriptor. Valid actions: 'load' -- load this descriptor, showing it to the user. 'staff' -- staff access to descriptor. NOTE: This is the fallback logic for descriptors that don't have custom policy (e.g. courses). If you call this method directly instead of going through has_access(), it will not do the right thing. """ def can_load(): """ NOTE: This does not check that the student is enrolled in the course that contains this module. We may or may not want to allow non-enrolled students to see modules. If not, views should check the course, so we don't have to hit the enrollments table on every module load. """ response = ( _visible_to_nonstaff_users(descriptor) and _has_group_access(descriptor, user, course_key) and ( _has_detached_class_tag(descriptor) or _can_access_descriptor_with_start_date(user, descriptor, course_key) ) ) return ( ACCESS_GRANTED if (response or _has_staff_access_to_descriptor(user, descriptor, course_key)) else response ) checkers = { 'load': can_load, 'staff': lambda: _has_staff_access_to_descriptor(user, descriptor, course_key), 'instructor': lambda: _has_instructor_access_to_descriptor(user, descriptor, course_key) } return _dispatch(checkers, action, user, descriptor) def _has_access_xmodule(user, action, xmodule, course_key): """ Check if user has access to this xmodule. Valid actions: - same as the valid actions for xmodule.descriptor """ # Delegate to the descriptor return has_access(user, action, xmodule.descriptor, course_key) def _has_access_location(user, action, location, course_key): """ Check if user has access to this location. Valid actions: 'staff' : True if the user has staff access to this location NOTE: if you add other actions, make sure that has_access(user, location, action) == has_access(user, get_item(location), action) """ checkers = { 'staff': lambda: _has_staff_access_to_location(user, location, course_key) } return _dispatch(checkers, action, user, location) def _has_access_course_key(user, action, course_key): """ Check if user has access to the course with this course_key Valid actions: 'staff' : True if the user has staff access to this location 'instructor' : True if the user has staff access to this location """ checkers = { 'staff': lambda: _has_staff_access_to_location(user, None, course_key), 'instructor': lambda: _has_instructor_access_to_location(user, None, course_key), } return _dispatch(checkers, action, user, course_key) def _has_access_ccx_key(user, action, ccx_key): """Check if user has access to the course for this ccx_key Delegates checking to _has_access_course_key Valid actions: same as for that function """ course_key = ccx_key.to_course_locator() return _has_access_course_key(user, action, course_key) def _has_access_string(user, action, perm): """ Check if user has certain special access, specified as string. Valid strings: 'global' Valid actions: 'staff' -- global staff access. 'support' -- access to student support functionality 'certificates' --- access to view and regenerate certificates for other users. """ def check_staff(): """ Checks for staff access """ if perm != 'global': debug("Deny: invalid permission '%s'", perm) return ACCESS_DENIED return ACCESS_GRANTED if GlobalStaff().has_user(user) else ACCESS_DENIED def check_support(): """Check that the user has access to the support UI. """ if perm != 'global': return ACCESS_DENIED return ( ACCESS_GRANTED if GlobalStaff().has_user(user) or SupportStaffRole().has_user(user) else ACCESS_DENIED ) checkers = { 'staff': check_staff, 'support': check_support, 'certificates': check_support, } return _dispatch(checkers, action, user, perm) ##### Internal helper methods below def _dispatch(table, action, user, obj): """ Helper: call table[action], raising a nice pretty error if there is no such key. user and object passed in only for error messages and debugging """ if action in table: result = table[action]() debug("%s user %s, object %s, action %s", 'ALLOWED' if result else 'DENIED', user, obj.location.to_deprecated_string() if isinstance(obj, XBlock) else str(obj), action) return result raise ValueError(u"Unknown action for object type '{0}': '{1}'".format( type(obj), action)) def _adjust_start_date_for_beta_testers(user, descriptor, course_key): # pylint: disable=invalid-name """ If user is in a beta test group, adjust the start date by the appropriate number of days. Arguments: user: A django user. May be anonymous. descriptor: the XModuleDescriptor the user is trying to get access to, with a non-None start date. Returns: A datetime. Either the same as start, or earlier for beta testers. NOTE: number of days to adjust should be cached to avoid looking it up thousands of times per query. NOTE: For now, this function assumes that the descriptor's location is in the course the user is looking at. Once we have proper usages and definitions per the XBlock design, this should use the course the usage is in. """ return adjust_start_date(user, descriptor.days_early_for_beta, descriptor.start, course_key) def _has_instructor_access_to_location(user, location, course_key=None): if course_key is None: course_key = location.course_key return _has_access_to_course(user, 'instructor', course_key) def _has_staff_access_to_location(user, location, course_key=None): if course_key is None: course_key = location.course_key return _has_access_to_course(user, 'staff', course_key) def _has_access_to_course(user, access_level, course_key): """ Returns True if the given user has access_level (= staff or instructor) access to the course with the given course_key. This ensures the user is authenticated and checks if global staff or has staff / instructor access. access_level = string, either "staff" or "instructor" """ if user is None or (not user.is_authenticated()): debug("Deny: no user or anon user") return ACCESS_DENIED if is_masquerading_as_student(user, course_key): return ACCESS_DENIED if GlobalStaff().has_user(user): debug("Allow: user.is_staff") return ACCESS_GRANTED if access_level not in ('staff', 'instructor'): log.debug("Error in access._has_access_to_course access_level=%s unknown", access_level) debug("Deny: unknown access level") return ACCESS_DENIED staff_access = ( CourseStaffRole(course_key).has_user(user) or OrgStaffRole(course_key.org).has_user(user) ) if staff_access and access_level == 'staff': debug("Allow: user has course staff access") return ACCESS_GRANTED instructor_access = ( CourseInstructorRole(course_key).has_user(user) or OrgInstructorRole(course_key.org).has_user(user) ) if instructor_access and access_level in ('staff', 'instructor'): debug("Allow: user has course instructor access") return ACCESS_GRANTED debug("Deny: user did not have correct access") return ACCESS_DENIED def _has_instructor_access_to_descriptor(user, descriptor, course_key): # pylint: disable=invalid-name """Helper method that checks whether the user has staff access to the course of the location. descriptor: something that has a location attribute """ return _has_instructor_access_to_location(user, descriptor.location, course_key) def _has_staff_access_to_descriptor(user, descriptor, course_key): """Helper method that checks whether the user has staff access to the course of the location. descriptor: something that has a location attribute """ return _has_staff_access_to_location(user, descriptor.location, course_key) def _visible_to_nonstaff_users(descriptor): """ Returns if the object is visible to nonstaff users. Arguments: descriptor: object to check """ return VisibilityError() if descriptor.visible_to_staff_only else ACCESS_GRANTED def _has_detached_class_tag(descriptor): """ Returns if the given descriptor's type is marked as detached. Arguments: descriptor: object to check """ return ACCESS_GRANTED if 'detached' in descriptor._class_tags else ACCESS_DENIED # pylint: disable=protected-access def _has_fulfilled_all_milestones(user, course_id): """ Returns whether the given user has fulfilled all milestones for the given course. Arguments: course_id: ID of the course to check user_id: ID of the user to check """ return MilestoneError() if any_unfulfilled_milestones(course_id, user.id) else ACCESS_GRANTED def _has_fulfilled_prerequisites(user, course_id): """ Returns whether the given user has fulfilled all prerequisites for the given course. Arguments: user: user to check course_id: ID of the course to check """ return MilestoneError() if get_pre_requisite_courses_not_completed(user, course_id) else ACCESS_GRANTED def _has_catalog_visibility(course, visibility_type): """ Returns whether the given course has the given visibility type """ return ACCESS_GRANTED if course.catalog_visibility == visibility_type else ACCESS_DENIED def _is_descriptor_mobile_available(descriptor): """ Returns if descriptor is available on mobile. """ return ACCESS_GRANTED if descriptor.mobile_available else MobileAvailabilityError() def is_mobile_available_for_user(user, descriptor): """ Returns whether the given course is mobile_available for the given user. Checks: mobile_available flag on the course Beta User and staff access overrides the mobile_available flag Arguments: descriptor (CourseDescriptor|CourseOverview): course or overview of course in question """ return ( auth.user_has_role(user, CourseBetaTesterRole(descriptor.id)) or _has_staff_access_to_descriptor(user, descriptor, descriptor.id) or _is_descriptor_mobile_available(descriptor) ) def get_user_role(user, course_key): """ Return corresponding string if user has staff, instructor or student course role in LMS. """ role = get_masquerade_role(user, course_key) if role: return role elif has_access(user, 'instructor', course_key): return 'instructor' elif has_access(user, 'staff', course_key): return 'staff' else: return 'student'
fnouama/intellij-community
refs/heads/master
python/helpers/pydev/pydevd_attach_to_process/winappdbg/win32/__init__.py
102
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (c) 2009-2014, Mario Vilas # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice,this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. """ Debugging API wrappers in ctypes. """ __revision__ = "$Id$" from winappdbg.win32 import defines from winappdbg.win32 import kernel32 from winappdbg.win32 import user32 from winappdbg.win32 import advapi32 from winappdbg.win32 import wtsapi32 from winappdbg.win32 import shell32 from winappdbg.win32 import shlwapi from winappdbg.win32 import psapi from winappdbg.win32 import dbghelp from winappdbg.win32 import ntdll from winappdbg.win32.defines import * from winappdbg.win32.kernel32 import * from winappdbg.win32.user32 import * from winappdbg.win32.advapi32 import * from winappdbg.win32.wtsapi32 import * from winappdbg.win32.shell32 import * from winappdbg.win32.shlwapi import * from winappdbg.win32.psapi import * from winappdbg.win32.dbghelp import * from winappdbg.win32.ntdll import * # This calculates the list of exported symbols. _all = set() _all.update(defines._all) _all.update(kernel32._all) _all.update(user32._all) _all.update(advapi32._all) _all.update(wtsapi32._all) _all.update(shell32._all) _all.update(shlwapi._all) _all.update(psapi._all) _all.update(dbghelp._all) _all.update(ntdll._all) __all__ = [_x for _x in _all if not _x.startswith('_')] __all__.sort()
CDE-UNIBE/qcat
refs/heads/develop
apps/questionnaire/migrations/0018_auto_20170810_1651.py
1
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('questionnaire', '0017_auto_20170313_1724'), ] operations = [ migrations.AlterField( model_name='questionnairetranslation', name='language', field=models.CharField(max_length=63, choices=[('en', 'English'), ('fr', 'French'), ('es', 'Spanish'), ('ru', 'Russian'), ('km', 'Khmer'), ('lo', 'Lao'), ('ar', 'Arabic'), ('pt', 'Portuguese'), ('af', 'Afrikaans')]), ), migrations.AlterUniqueTogether( name='questionnaire', unique_together=set([('code', 'version')]), ), ]
michaelni/audacity
refs/heads/master
lib-src/libsndfile/programs/test-sndfile-metadata-set.py
44
#!/usr/bin/python # Copyright (C) 2008-2011 Erik de Castro Lopo <erikd@mega-nerd.com> # # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # * Neither the author nor the names of any contributors may be used # to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED # TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; # OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR # OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF # ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # Simple test script for the sndfile-metadata-set program. import commands, os, sys import time, datetime def print_test_name (name): print " %-30s :" % name, def assert_info (filename, arg, value): cmd = "./sndfile-metadata-get %s %s" % (arg, filename) status, output = commands.getstatusoutput (cmd) if status: print "\n\nError : command '%s' should not have failed." % cmd sys.exit (1) if output.find (value) < 0: print "\n\nError : not able to find '%s'." % value print output sys.exit (1) return def check_executable (name): if not (os.path.isfile (name)): print "\n\nError : Can't find executable '%s'. Have you run make?" % name sys.exit (1) def test_empty_fail (): print_test_name ("Empty fail test") cmd = "./sndfile-metadata-set --bext-description Alpha sine.wav" status, output = commands.getstatusoutput (cmd) if not status: print "\n\nError : command '%s' should have failed." % cmd sys.exit (1) print "ok" def test_copy (): print_test_name ("Copy test") cmd = "./sndfile-metadata-set --bext-description \"First Try\" sine.wav output.wav" status, output = commands.getstatusoutput (cmd) if status: print "\n\nError : command '%s' should not have failed." % cmd sys.exit (1) assert_info ("output.wav", "--bext-description", "First Try") print "ok" def test_update (tests): print_test_name ("Update test") for arg, value in tests: cmd = "./sndfile-metadata-set %s \"%s\" output.wav" % (arg, value) status, output = commands.getstatusoutput (cmd) if status: print "\n\nError : command '%s' should not have failed." % cmd sys.exit (1) assert_info ("output.wav", arg, value) print "ok" def test_post_mod (tests): print_test_name ("Post mod test") for arg, value in tests: assert_info ("output.wav", arg, value) print "ok" def test_auto_date (): print_test_name ("Auto date test") cmd = "./sndfile-metadata-set --bext-auto-time-date sine.wav date-time.wav" status, output = commands.getstatusoutput (cmd) if status: print "\n\nError : command '%s' should not have failed." % cmd sys.exit (1) target = datetime.date.today ().__str__ () assert_info ("date-time.wav", "--bext-orig-date", target) print "ok" #------------------------------------------------------------------------------- def test_coding_history (): print_test_name ("Coding history test") cmd = "./sndfile-metadata-set --bext-coding-hist \"alpha beta\" output.wav" status, output = commands.getstatusoutput (cmd) if status: print "\n\nError : command '%s' should not have failed." % cmd sys.exit (1) cmd = "./sndfile-metadata-get --bext-coding-hist output.wav" status, output = commands.getstatusoutput (cmd) if status: print "\n\nError : command '%s' should not have failed." % cmd sys.exit (1) print "ok" #------------------------------------------------------------------------------- def test_rewrite (): print_test_name ("Rewrite test") cmd = "./sndfile-metadata-set --bext-originator \"Really, really long string\" output.wav" status, output = commands.getstatusoutput (cmd) if status: print "\n\nError : command '%s' should not have failed." % cmd sys.exit (1) cmd = "./sndfile-metadata-set --bext-originator \"Short\" output.wav" status, output = commands.getstatusoutput (cmd) if status: print "\n\nError : command '%s' should not have failed." % cmd sys.exit (1) cmd = "./sndfile-metadata-get --bext-originator output.wav" status, output = commands.getstatusoutput (cmd) if status: print "\n\nError : command '%s' should not have failed." % cmd sys.exit (1) if output.find ("really long") > 0: print "\n\nError : output '%s' should not contain 'really long'." % output sys.exit (1) print "ok" #=============================================================================== test_dir = "programs" if os.path.isdir (test_dir): os.chdir (test_dir) for f in [ "sndfile-metadata-set", "sndfile-metadata-get", "../examples/make_sine" ]: check_executable (f) os.system ("../examples/make_sine") if not os.path.isfile ("sine.wav"): print "\n\nError : Can't file file 'sine.wav'." sys.exit (1) print "" test_empty_fail () test_copy () tests = [ ("--bext-description", "Alpha"), ("--bext-originator", "Beta"), ("--bext-orig-ref", "Charlie"), ("--bext-umid", "Delta"), ("--bext-orig-date", "2001-10-01"), ("--bext-orig-time", "01:02:03"), ("--str-title", "Echo"), ("--str-artist", "Fox trot") ] test_auto_date () test_update (tests) test_post_mod (tests) test_update ([ ("--str-artist", "Fox") ]) # This never worked. # test_coding_history () test_rewrite () print "" sys.exit (0)