repo_name
stringlengths
5
100
path
stringlengths
4
294
copies
stringclasses
990 values
size
stringlengths
4
7
content
stringlengths
666
1M
license
stringclasses
15 values
rTreutlein/atomspace
tests/cython/atomspace/test_linkvalue_containing_atoms.py
2
3093
import unittest import re from opencog.atomspace import AtomSpace from opencog.type_constructors import * from opencog.utilities import initialize_opencog, finalize_opencog class LinkValueContainingAtomsTest(unittest.TestCase): def setUp(self): self.space = AtomSpace() initialize_opencog(self.space) def tearDown(self): finalize_opencog() del self.space def test_create_single_value(self): value = LinkValue(StringValue('foo')) self.assertTrue(value is not None) def test_create_list_value(self): value = LinkValue([FloatValue(1), StringValue('foo'), ConceptNode('bar')]) self.assertTrue(value is not None) def test_value_equals(self): self.assertEqual(LinkValue(StringValue('foo')), LinkValue([StringValue('foo')])) self.assertEqual(LinkValue([FloatValue(1), StringValue('foo'), ConceptNode('bar')]), LinkValue([FloatValue(1), StringValue('foo'), ConceptNode('bar')])) self.assertNotEqual(LinkValue(FloatValue(1)), LinkValue(FloatValue(2))) self.assertNotEqual(LinkValue([ConceptNode('bar'), FloatValue(1), StringValue('foo')]), LinkValue([StringValue('foo'), FloatValue(1), ConceptNode('bar')])) def test_add_value_to_atom(self): atom = ConceptNode('foo') key = PredicateNode('bar') value = LinkValue([StringValue('a'), FloatValue(1), ConceptNode('bar')]) atom.set_value(key, value) self.assertEqual(LinkValue([StringValue('a'), FloatValue(1), ConceptNode('bar')]), atom.get_value(key)) def test_get_list_of_items_from_value(self): value = LinkValue([FloatValue(1), StringValue('foo'), ConceptNode('bar')]) self.assertEqual([FloatValue(1), StringValue('foo'), ConceptNode('bar')], value.to_list()) def test_str(self): value = LinkValue([FloatValue(1), StringValue('foo'), ConceptNode('bar')]) print(str(value)) self.assertTrue(re.fullmatch( '\(LinkValue\n' ' \(FloatValue 1\)\n' ' \(StringValue "foo"\)\n' ' \(ConceptNode "bar"\) ; \[\d+\]\[\d+\]\n' '\)\n', str(value))) def test_is_a(self): value = LinkValue([FloatValue(1), StringValue('foo'), ConceptNode('bar')]) self.assertEqual(types.LinkValue, value.type) self.assertEqual('LinkValue', value.type_name) self.assertFalse(value.is_node()) self.assertFalse(value.is_atom()) self.assertFalse(value.is_link()) self.assertTrue(value.is_a(types.Value)) if __name__ == '__main__': unittest.main()
agpl-3.0
ArcherSys/ArcherSys
Lib/test/test_selectors.py
1
43481
<<<<<<< HEAD <<<<<<< HEAD import errno import os import random import selectors import signal import socket import sys from test import support from time import sleep import unittest import unittest.mock try: from time import monotonic as time except ImportError: from time import time as time try: import resource except ImportError: resource = None if hasattr(socket, 'socketpair'): socketpair = socket.socketpair else: def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0): with socket.socket(family, type, proto) as l: l.bind((support.HOST, 0)) l.listen(3) c = socket.socket(family, type, proto) try: c.connect(l.getsockname()) caddr = c.getsockname() while True: a, addr = l.accept() # check that we've got the correct client if addr == caddr: return c, a a.close() except OSError: c.close() raise def find_ready_matching(ready, flag): match = [] for key, events in ready: if events & flag: match.append(key.fileobj) return match class BaseSelectorTestCase(unittest.TestCase): def make_socketpair(self): rd, wr = socketpair() self.addCleanup(rd.close) self.addCleanup(wr.close) return rd, wr def test_register(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() key = s.register(rd, selectors.EVENT_READ, "data") self.assertIsInstance(key, selectors.SelectorKey) self.assertEqual(key.fileobj, rd) self.assertEqual(key.fd, rd.fileno()) self.assertEqual(key.events, selectors.EVENT_READ) self.assertEqual(key.data, "data") # register an unknown event self.assertRaises(ValueError, s.register, 0, 999999) # register an invalid FD self.assertRaises(ValueError, s.register, -10, selectors.EVENT_READ) # register twice self.assertRaises(KeyError, s.register, rd, selectors.EVENT_READ) # register the same FD, but with a different object self.assertRaises(KeyError, s.register, rd.fileno(), selectors.EVENT_READ) def test_unregister(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) s.unregister(rd) # unregister an unknown file obj self.assertRaises(KeyError, s.unregister, 999999) # unregister twice self.assertRaises(KeyError, s.unregister, rd) def test_unregister_after_fd_close(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() r, w = rd.fileno(), wr.fileno() s.register(r, selectors.EVENT_READ) s.register(w, selectors.EVENT_WRITE) rd.close() wr.close() s.unregister(r) s.unregister(w) @unittest.skipUnless(os.name == 'posix', "requires posix") def test_unregister_after_fd_close_and_reuse(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() r, w = rd.fileno(), wr.fileno() s.register(r, selectors.EVENT_READ) s.register(w, selectors.EVENT_WRITE) rd2, wr2 = self.make_socketpair() rd.close() wr.close() os.dup2(rd2.fileno(), r) os.dup2(wr2.fileno(), w) self.addCleanup(os.close, r) self.addCleanup(os.close, w) s.unregister(r) s.unregister(w) def test_unregister_after_socket_close(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) rd.close() wr.close() s.unregister(rd) s.unregister(wr) def test_modify(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() key = s.register(rd, selectors.EVENT_READ) # modify events key2 = s.modify(rd, selectors.EVENT_WRITE) self.assertNotEqual(key.events, key2.events) self.assertEqual(key2, s.get_key(rd)) s.unregister(rd) # modify data d1 = object() d2 = object() key = s.register(rd, selectors.EVENT_READ, d1) key2 = s.modify(rd, selectors.EVENT_READ, d2) self.assertEqual(key.events, key2.events) self.assertNotEqual(key.data, key2.data) self.assertEqual(key2, s.get_key(rd)) self.assertEqual(key2.data, d2) # modify unknown file obj self.assertRaises(KeyError, s.modify, 999999, selectors.EVENT_READ) # modify use a shortcut d3 = object() s.register = unittest.mock.Mock() s.unregister = unittest.mock.Mock() s.modify(rd, selectors.EVENT_READ, d3) self.assertFalse(s.register.called) self.assertFalse(s.unregister.called) def test_close(self): s = self.SELECTOR() self.addCleanup(s.close) mapping = s.get_map() rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) s.close() self.assertRaises(KeyError, s.get_key, rd) self.assertRaises(KeyError, s.get_key, wr) self.assertRaises(KeyError, mapping.__getitem__, rd) self.assertRaises(KeyError, mapping.__getitem__, wr) def test_get_key(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() key = s.register(rd, selectors.EVENT_READ, "data") self.assertEqual(key, s.get_key(rd)) # unknown file obj self.assertRaises(KeyError, s.get_key, 999999) def test_get_map(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() keys = s.get_map() self.assertFalse(keys) self.assertEqual(len(keys), 0) self.assertEqual(list(keys), []) key = s.register(rd, selectors.EVENT_READ, "data") self.assertIn(rd, keys) self.assertEqual(key, keys[rd]) self.assertEqual(len(keys), 1) self.assertEqual(list(keys), [rd.fileno()]) self.assertEqual(list(keys.values()), [key]) # unknown file obj with self.assertRaises(KeyError): keys[999999] # Read-only mapping with self.assertRaises(TypeError): del keys[rd] def test_select(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) wr_key = s.register(wr, selectors.EVENT_WRITE) result = s.select() for key, events in result: self.assertTrue(isinstance(key, selectors.SelectorKey)) self.assertTrue(events) self.assertFalse(events & ~(selectors.EVENT_READ | selectors.EVENT_WRITE)) self.assertEqual([(wr_key, selectors.EVENT_WRITE)], result) def test_context_manager(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() with s as sel: sel.register(rd, selectors.EVENT_READ) sel.register(wr, selectors.EVENT_WRITE) self.assertRaises(KeyError, s.get_key, rd) self.assertRaises(KeyError, s.get_key, wr) def test_fileno(self): s = self.SELECTOR() self.addCleanup(s.close) if hasattr(s, 'fileno'): fd = s.fileno() self.assertTrue(isinstance(fd, int)) self.assertGreaterEqual(fd, 0) def test_selector(self): s = self.SELECTOR() self.addCleanup(s.close) NUM_SOCKETS = 12 MSG = b" This is a test." MSG_LEN = len(MSG) readers = [] writers = [] r2w = {} w2r = {} for i in range(NUM_SOCKETS): rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) readers.append(rd) writers.append(wr) r2w[rd] = wr w2r[wr] = rd bufs = [] while writers: ready = s.select() ready_writers = find_ready_matching(ready, selectors.EVENT_WRITE) if not ready_writers: self.fail("no sockets ready for writing") wr = random.choice(ready_writers) wr.send(MSG) for i in range(10): ready = s.select() ready_readers = find_ready_matching(ready, selectors.EVENT_READ) if ready_readers: break # there might be a delay between the write to the write end and # the read end is reported ready sleep(0.1) else: self.fail("no sockets ready for reading") self.assertEqual([w2r[wr]], ready_readers) rd = ready_readers[0] buf = rd.recv(MSG_LEN) self.assertEqual(len(buf), MSG_LEN) bufs.append(buf) s.unregister(r2w[rd]) s.unregister(rd) writers.remove(r2w[rd]) self.assertEqual(bufs, [MSG] * NUM_SOCKETS) @unittest.skipIf(sys.platform == 'win32', 'select.select() cannot be used with empty fd sets') def test_empty_select(self): # Issue #23009: Make sure EpollSelector.select() works when no FD is # registered. s = self.SELECTOR() self.addCleanup(s.close) self.assertEqual(s.select(timeout=0), []) def test_timeout(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(wr, selectors.EVENT_WRITE) t = time() self.assertEqual(1, len(s.select(0))) self.assertEqual(1, len(s.select(-1))) self.assertLess(time() - t, 0.5) s.unregister(wr) s.register(rd, selectors.EVENT_READ) t = time() self.assertFalse(s.select(0)) self.assertFalse(s.select(-1)) self.assertLess(time() - t, 0.5) t0 = time() self.assertFalse(s.select(1)) t1 = time() dt = t1 - t0 # Tolerate 2.0 seconds for very slow buildbots self.assertTrue(0.8 <= dt <= 2.0, dt) @unittest.skipUnless(hasattr(signal, "alarm"), "signal.alarm() required for this test") def test_select_interrupt(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() orig_alrm_handler = signal.signal(signal.SIGALRM, lambda *args: None) self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler) self.addCleanup(signal.alarm, 0) signal.alarm(1) s.register(rd, selectors.EVENT_READ) t = time() self.assertFalse(s.select(2)) self.assertLess(time() - t, 2.5) class ScalableSelectorMixIn: # see issue #18963 for why it's skipped on older OS X versions @support.requires_mac_ver(10, 5) @unittest.skipUnless(resource, "Test needs resource module") def test_above_fd_setsize(self): # A scalable implementation should have no problem with more than # FD_SETSIZE file descriptors. Since we don't know the value, we just # try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling. soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) try: resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard)) self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE, (soft, hard)) NUM_FDS = min(hard, 2**16) except (OSError, ValueError): NUM_FDS = soft # guard for already allocated FDs (stdin, stdout...) NUM_FDS -= 32 s = self.SELECTOR() self.addCleanup(s.close) for i in range(NUM_FDS // 2): try: rd, wr = self.make_socketpair() except OSError: # too many FDs, skip - note that we should only catch EMFILE # here, but apparently *BSD and Solaris can fail upon connect() # or bind() with EADDRNOTAVAIL, so let's be safe self.skipTest("FD limit reached") try: s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) except OSError as e: if e.errno == errno.ENOSPC: # this can be raised by epoll if we go over # fs.epoll.max_user_watches sysctl self.skipTest("FD limit reached") raise self.assertEqual(NUM_FDS // 2, len(s.select())) class DefaultSelectorTestCase(BaseSelectorTestCase): SELECTOR = selectors.DefaultSelector class SelectSelectorTestCase(BaseSelectorTestCase): SELECTOR = selectors.SelectSelector @unittest.skipUnless(hasattr(selectors, 'PollSelector'), "Test needs selectors.PollSelector") class PollSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn): SELECTOR = getattr(selectors, 'PollSelector', None) @unittest.skipUnless(hasattr(selectors, 'EpollSelector'), "Test needs selectors.EpollSelector") class EpollSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn): SELECTOR = getattr(selectors, 'EpollSelector', None) @unittest.skipUnless(hasattr(selectors, 'KqueueSelector'), "Test needs selectors.KqueueSelector)") class KqueueSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn): SELECTOR = getattr(selectors, 'KqueueSelector', None) def test_main(): tests = [DefaultSelectorTestCase, SelectSelectorTestCase, PollSelectorTestCase, EpollSelectorTestCase, KqueueSelectorTestCase] support.run_unittest(*tests) support.reap_children() if __name__ == "__main__": test_main() ======= import errno import os import random import selectors import signal import socket import sys from test import support from time import sleep import unittest import unittest.mock try: from time import monotonic as time except ImportError: from time import time as time try: import resource except ImportError: resource = None if hasattr(socket, 'socketpair'): socketpair = socket.socketpair else: def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0): with socket.socket(family, type, proto) as l: l.bind((support.HOST, 0)) l.listen(3) c = socket.socket(family, type, proto) try: c.connect(l.getsockname()) caddr = c.getsockname() while True: a, addr = l.accept() # check that we've got the correct client if addr == caddr: return c, a a.close() except OSError: c.close() raise def find_ready_matching(ready, flag): match = [] for key, events in ready: if events & flag: match.append(key.fileobj) return match class BaseSelectorTestCase(unittest.TestCase): def make_socketpair(self): rd, wr = socketpair() self.addCleanup(rd.close) self.addCleanup(wr.close) return rd, wr def test_register(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() key = s.register(rd, selectors.EVENT_READ, "data") self.assertIsInstance(key, selectors.SelectorKey) self.assertEqual(key.fileobj, rd) self.assertEqual(key.fd, rd.fileno()) self.assertEqual(key.events, selectors.EVENT_READ) self.assertEqual(key.data, "data") # register an unknown event self.assertRaises(ValueError, s.register, 0, 999999) # register an invalid FD self.assertRaises(ValueError, s.register, -10, selectors.EVENT_READ) # register twice self.assertRaises(KeyError, s.register, rd, selectors.EVENT_READ) # register the same FD, but with a different object self.assertRaises(KeyError, s.register, rd.fileno(), selectors.EVENT_READ) def test_unregister(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) s.unregister(rd) # unregister an unknown file obj self.assertRaises(KeyError, s.unregister, 999999) # unregister twice self.assertRaises(KeyError, s.unregister, rd) def test_unregister_after_fd_close(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() r, w = rd.fileno(), wr.fileno() s.register(r, selectors.EVENT_READ) s.register(w, selectors.EVENT_WRITE) rd.close() wr.close() s.unregister(r) s.unregister(w) @unittest.skipUnless(os.name == 'posix', "requires posix") def test_unregister_after_fd_close_and_reuse(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() r, w = rd.fileno(), wr.fileno() s.register(r, selectors.EVENT_READ) s.register(w, selectors.EVENT_WRITE) rd2, wr2 = self.make_socketpair() rd.close() wr.close() os.dup2(rd2.fileno(), r) os.dup2(wr2.fileno(), w) self.addCleanup(os.close, r) self.addCleanup(os.close, w) s.unregister(r) s.unregister(w) def test_unregister_after_socket_close(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) rd.close() wr.close() s.unregister(rd) s.unregister(wr) def test_modify(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() key = s.register(rd, selectors.EVENT_READ) # modify events key2 = s.modify(rd, selectors.EVENT_WRITE) self.assertNotEqual(key.events, key2.events) self.assertEqual(key2, s.get_key(rd)) s.unregister(rd) # modify data d1 = object() d2 = object() key = s.register(rd, selectors.EVENT_READ, d1) key2 = s.modify(rd, selectors.EVENT_READ, d2) self.assertEqual(key.events, key2.events) self.assertNotEqual(key.data, key2.data) self.assertEqual(key2, s.get_key(rd)) self.assertEqual(key2.data, d2) # modify unknown file obj self.assertRaises(KeyError, s.modify, 999999, selectors.EVENT_READ) # modify use a shortcut d3 = object() s.register = unittest.mock.Mock() s.unregister = unittest.mock.Mock() s.modify(rd, selectors.EVENT_READ, d3) self.assertFalse(s.register.called) self.assertFalse(s.unregister.called) def test_close(self): s = self.SELECTOR() self.addCleanup(s.close) mapping = s.get_map() rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) s.close() self.assertRaises(KeyError, s.get_key, rd) self.assertRaises(KeyError, s.get_key, wr) self.assertRaises(KeyError, mapping.__getitem__, rd) self.assertRaises(KeyError, mapping.__getitem__, wr) def test_get_key(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() key = s.register(rd, selectors.EVENT_READ, "data") self.assertEqual(key, s.get_key(rd)) # unknown file obj self.assertRaises(KeyError, s.get_key, 999999) def test_get_map(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() keys = s.get_map() self.assertFalse(keys) self.assertEqual(len(keys), 0) self.assertEqual(list(keys), []) key = s.register(rd, selectors.EVENT_READ, "data") self.assertIn(rd, keys) self.assertEqual(key, keys[rd]) self.assertEqual(len(keys), 1) self.assertEqual(list(keys), [rd.fileno()]) self.assertEqual(list(keys.values()), [key]) # unknown file obj with self.assertRaises(KeyError): keys[999999] # Read-only mapping with self.assertRaises(TypeError): del keys[rd] def test_select(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) wr_key = s.register(wr, selectors.EVENT_WRITE) result = s.select() for key, events in result: self.assertTrue(isinstance(key, selectors.SelectorKey)) self.assertTrue(events) self.assertFalse(events & ~(selectors.EVENT_READ | selectors.EVENT_WRITE)) self.assertEqual([(wr_key, selectors.EVENT_WRITE)], result) def test_context_manager(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() with s as sel: sel.register(rd, selectors.EVENT_READ) sel.register(wr, selectors.EVENT_WRITE) self.assertRaises(KeyError, s.get_key, rd) self.assertRaises(KeyError, s.get_key, wr) def test_fileno(self): s = self.SELECTOR() self.addCleanup(s.close) if hasattr(s, 'fileno'): fd = s.fileno() self.assertTrue(isinstance(fd, int)) self.assertGreaterEqual(fd, 0) def test_selector(self): s = self.SELECTOR() self.addCleanup(s.close) NUM_SOCKETS = 12 MSG = b" This is a test." MSG_LEN = len(MSG) readers = [] writers = [] r2w = {} w2r = {} for i in range(NUM_SOCKETS): rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) readers.append(rd) writers.append(wr) r2w[rd] = wr w2r[wr] = rd bufs = [] while writers: ready = s.select() ready_writers = find_ready_matching(ready, selectors.EVENT_WRITE) if not ready_writers: self.fail("no sockets ready for writing") wr = random.choice(ready_writers) wr.send(MSG) for i in range(10): ready = s.select() ready_readers = find_ready_matching(ready, selectors.EVENT_READ) if ready_readers: break # there might be a delay between the write to the write end and # the read end is reported ready sleep(0.1) else: self.fail("no sockets ready for reading") self.assertEqual([w2r[wr]], ready_readers) rd = ready_readers[0] buf = rd.recv(MSG_LEN) self.assertEqual(len(buf), MSG_LEN) bufs.append(buf) s.unregister(r2w[rd]) s.unregister(rd) writers.remove(r2w[rd]) self.assertEqual(bufs, [MSG] * NUM_SOCKETS) @unittest.skipIf(sys.platform == 'win32', 'select.select() cannot be used with empty fd sets') def test_empty_select(self): # Issue #23009: Make sure EpollSelector.select() works when no FD is # registered. s = self.SELECTOR() self.addCleanup(s.close) self.assertEqual(s.select(timeout=0), []) def test_timeout(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(wr, selectors.EVENT_WRITE) t = time() self.assertEqual(1, len(s.select(0))) self.assertEqual(1, len(s.select(-1))) self.assertLess(time() - t, 0.5) s.unregister(wr) s.register(rd, selectors.EVENT_READ) t = time() self.assertFalse(s.select(0)) self.assertFalse(s.select(-1)) self.assertLess(time() - t, 0.5) t0 = time() self.assertFalse(s.select(1)) t1 = time() dt = t1 - t0 # Tolerate 2.0 seconds for very slow buildbots self.assertTrue(0.8 <= dt <= 2.0, dt) @unittest.skipUnless(hasattr(signal, "alarm"), "signal.alarm() required for this test") def test_select_interrupt(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() orig_alrm_handler = signal.signal(signal.SIGALRM, lambda *args: None) self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler) self.addCleanup(signal.alarm, 0) signal.alarm(1) s.register(rd, selectors.EVENT_READ) t = time() self.assertFalse(s.select(2)) self.assertLess(time() - t, 2.5) class ScalableSelectorMixIn: # see issue #18963 for why it's skipped on older OS X versions @support.requires_mac_ver(10, 5) @unittest.skipUnless(resource, "Test needs resource module") def test_above_fd_setsize(self): # A scalable implementation should have no problem with more than # FD_SETSIZE file descriptors. Since we don't know the value, we just # try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling. soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) try: resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard)) self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE, (soft, hard)) NUM_FDS = min(hard, 2**16) except (OSError, ValueError): NUM_FDS = soft # guard for already allocated FDs (stdin, stdout...) NUM_FDS -= 32 s = self.SELECTOR() self.addCleanup(s.close) for i in range(NUM_FDS // 2): try: rd, wr = self.make_socketpair() except OSError: # too many FDs, skip - note that we should only catch EMFILE # here, but apparently *BSD and Solaris can fail upon connect() # or bind() with EADDRNOTAVAIL, so let's be safe self.skipTest("FD limit reached") try: s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) except OSError as e: if e.errno == errno.ENOSPC: # this can be raised by epoll if we go over # fs.epoll.max_user_watches sysctl self.skipTest("FD limit reached") raise self.assertEqual(NUM_FDS // 2, len(s.select())) class DefaultSelectorTestCase(BaseSelectorTestCase): SELECTOR = selectors.DefaultSelector class SelectSelectorTestCase(BaseSelectorTestCase): SELECTOR = selectors.SelectSelector @unittest.skipUnless(hasattr(selectors, 'PollSelector'), "Test needs selectors.PollSelector") class PollSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn): SELECTOR = getattr(selectors, 'PollSelector', None) @unittest.skipUnless(hasattr(selectors, 'EpollSelector'), "Test needs selectors.EpollSelector") class EpollSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn): SELECTOR = getattr(selectors, 'EpollSelector', None) @unittest.skipUnless(hasattr(selectors, 'KqueueSelector'), "Test needs selectors.KqueueSelector)") class KqueueSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn): SELECTOR = getattr(selectors, 'KqueueSelector', None) def test_main(): tests = [DefaultSelectorTestCase, SelectSelectorTestCase, PollSelectorTestCase, EpollSelectorTestCase, KqueueSelectorTestCase] support.run_unittest(*tests) support.reap_children() if __name__ == "__main__": test_main() >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453 ======= import errno import os import random import selectors import signal import socket import sys from test import support from time import sleep import unittest import unittest.mock try: from time import monotonic as time except ImportError: from time import time as time try: import resource except ImportError: resource = None if hasattr(socket, 'socketpair'): socketpair = socket.socketpair else: def socketpair(family=socket.AF_INET, type=socket.SOCK_STREAM, proto=0): with socket.socket(family, type, proto) as l: l.bind((support.HOST, 0)) l.listen(3) c = socket.socket(family, type, proto) try: c.connect(l.getsockname()) caddr = c.getsockname() while True: a, addr = l.accept() # check that we've got the correct client if addr == caddr: return c, a a.close() except OSError: c.close() raise def find_ready_matching(ready, flag): match = [] for key, events in ready: if events & flag: match.append(key.fileobj) return match class BaseSelectorTestCase(unittest.TestCase): def make_socketpair(self): rd, wr = socketpair() self.addCleanup(rd.close) self.addCleanup(wr.close) return rd, wr def test_register(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() key = s.register(rd, selectors.EVENT_READ, "data") self.assertIsInstance(key, selectors.SelectorKey) self.assertEqual(key.fileobj, rd) self.assertEqual(key.fd, rd.fileno()) self.assertEqual(key.events, selectors.EVENT_READ) self.assertEqual(key.data, "data") # register an unknown event self.assertRaises(ValueError, s.register, 0, 999999) # register an invalid FD self.assertRaises(ValueError, s.register, -10, selectors.EVENT_READ) # register twice self.assertRaises(KeyError, s.register, rd, selectors.EVENT_READ) # register the same FD, but with a different object self.assertRaises(KeyError, s.register, rd.fileno(), selectors.EVENT_READ) def test_unregister(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) s.unregister(rd) # unregister an unknown file obj self.assertRaises(KeyError, s.unregister, 999999) # unregister twice self.assertRaises(KeyError, s.unregister, rd) def test_unregister_after_fd_close(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() r, w = rd.fileno(), wr.fileno() s.register(r, selectors.EVENT_READ) s.register(w, selectors.EVENT_WRITE) rd.close() wr.close() s.unregister(r) s.unregister(w) @unittest.skipUnless(os.name == 'posix', "requires posix") def test_unregister_after_fd_close_and_reuse(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() r, w = rd.fileno(), wr.fileno() s.register(r, selectors.EVENT_READ) s.register(w, selectors.EVENT_WRITE) rd2, wr2 = self.make_socketpair() rd.close() wr.close() os.dup2(rd2.fileno(), r) os.dup2(wr2.fileno(), w) self.addCleanup(os.close, r) self.addCleanup(os.close, w) s.unregister(r) s.unregister(w) def test_unregister_after_socket_close(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) rd.close() wr.close() s.unregister(rd) s.unregister(wr) def test_modify(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() key = s.register(rd, selectors.EVENT_READ) # modify events key2 = s.modify(rd, selectors.EVENT_WRITE) self.assertNotEqual(key.events, key2.events) self.assertEqual(key2, s.get_key(rd)) s.unregister(rd) # modify data d1 = object() d2 = object() key = s.register(rd, selectors.EVENT_READ, d1) key2 = s.modify(rd, selectors.EVENT_READ, d2) self.assertEqual(key.events, key2.events) self.assertNotEqual(key.data, key2.data) self.assertEqual(key2, s.get_key(rd)) self.assertEqual(key2.data, d2) # modify unknown file obj self.assertRaises(KeyError, s.modify, 999999, selectors.EVENT_READ) # modify use a shortcut d3 = object() s.register = unittest.mock.Mock() s.unregister = unittest.mock.Mock() s.modify(rd, selectors.EVENT_READ, d3) self.assertFalse(s.register.called) self.assertFalse(s.unregister.called) def test_close(self): s = self.SELECTOR() self.addCleanup(s.close) mapping = s.get_map() rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) s.close() self.assertRaises(KeyError, s.get_key, rd) self.assertRaises(KeyError, s.get_key, wr) self.assertRaises(KeyError, mapping.__getitem__, rd) self.assertRaises(KeyError, mapping.__getitem__, wr) def test_get_key(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() key = s.register(rd, selectors.EVENT_READ, "data") self.assertEqual(key, s.get_key(rd)) # unknown file obj self.assertRaises(KeyError, s.get_key, 999999) def test_get_map(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() keys = s.get_map() self.assertFalse(keys) self.assertEqual(len(keys), 0) self.assertEqual(list(keys), []) key = s.register(rd, selectors.EVENT_READ, "data") self.assertIn(rd, keys) self.assertEqual(key, keys[rd]) self.assertEqual(len(keys), 1) self.assertEqual(list(keys), [rd.fileno()]) self.assertEqual(list(keys.values()), [key]) # unknown file obj with self.assertRaises(KeyError): keys[999999] # Read-only mapping with self.assertRaises(TypeError): del keys[rd] def test_select(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) wr_key = s.register(wr, selectors.EVENT_WRITE) result = s.select() for key, events in result: self.assertTrue(isinstance(key, selectors.SelectorKey)) self.assertTrue(events) self.assertFalse(events & ~(selectors.EVENT_READ | selectors.EVENT_WRITE)) self.assertEqual([(wr_key, selectors.EVENT_WRITE)], result) def test_context_manager(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() with s as sel: sel.register(rd, selectors.EVENT_READ) sel.register(wr, selectors.EVENT_WRITE) self.assertRaises(KeyError, s.get_key, rd) self.assertRaises(KeyError, s.get_key, wr) def test_fileno(self): s = self.SELECTOR() self.addCleanup(s.close) if hasattr(s, 'fileno'): fd = s.fileno() self.assertTrue(isinstance(fd, int)) self.assertGreaterEqual(fd, 0) def test_selector(self): s = self.SELECTOR() self.addCleanup(s.close) NUM_SOCKETS = 12 MSG = b" This is a test." MSG_LEN = len(MSG) readers = [] writers = [] r2w = {} w2r = {} for i in range(NUM_SOCKETS): rd, wr = self.make_socketpair() s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) readers.append(rd) writers.append(wr) r2w[rd] = wr w2r[wr] = rd bufs = [] while writers: ready = s.select() ready_writers = find_ready_matching(ready, selectors.EVENT_WRITE) if not ready_writers: self.fail("no sockets ready for writing") wr = random.choice(ready_writers) wr.send(MSG) for i in range(10): ready = s.select() ready_readers = find_ready_matching(ready, selectors.EVENT_READ) if ready_readers: break # there might be a delay between the write to the write end and # the read end is reported ready sleep(0.1) else: self.fail("no sockets ready for reading") self.assertEqual([w2r[wr]], ready_readers) rd = ready_readers[0] buf = rd.recv(MSG_LEN) self.assertEqual(len(buf), MSG_LEN) bufs.append(buf) s.unregister(r2w[rd]) s.unregister(rd) writers.remove(r2w[rd]) self.assertEqual(bufs, [MSG] * NUM_SOCKETS) @unittest.skipIf(sys.platform == 'win32', 'select.select() cannot be used with empty fd sets') def test_empty_select(self): # Issue #23009: Make sure EpollSelector.select() works when no FD is # registered. s = self.SELECTOR() self.addCleanup(s.close) self.assertEqual(s.select(timeout=0), []) def test_timeout(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() s.register(wr, selectors.EVENT_WRITE) t = time() self.assertEqual(1, len(s.select(0))) self.assertEqual(1, len(s.select(-1))) self.assertLess(time() - t, 0.5) s.unregister(wr) s.register(rd, selectors.EVENT_READ) t = time() self.assertFalse(s.select(0)) self.assertFalse(s.select(-1)) self.assertLess(time() - t, 0.5) t0 = time() self.assertFalse(s.select(1)) t1 = time() dt = t1 - t0 # Tolerate 2.0 seconds for very slow buildbots self.assertTrue(0.8 <= dt <= 2.0, dt) @unittest.skipUnless(hasattr(signal, "alarm"), "signal.alarm() required for this test") def test_select_interrupt(self): s = self.SELECTOR() self.addCleanup(s.close) rd, wr = self.make_socketpair() orig_alrm_handler = signal.signal(signal.SIGALRM, lambda *args: None) self.addCleanup(signal.signal, signal.SIGALRM, orig_alrm_handler) self.addCleanup(signal.alarm, 0) signal.alarm(1) s.register(rd, selectors.EVENT_READ) t = time() self.assertFalse(s.select(2)) self.assertLess(time() - t, 2.5) class ScalableSelectorMixIn: # see issue #18963 for why it's skipped on older OS X versions @support.requires_mac_ver(10, 5) @unittest.skipUnless(resource, "Test needs resource module") def test_above_fd_setsize(self): # A scalable implementation should have no problem with more than # FD_SETSIZE file descriptors. Since we don't know the value, we just # try to set the soft RLIMIT_NOFILE to the hard RLIMIT_NOFILE ceiling. soft, hard = resource.getrlimit(resource.RLIMIT_NOFILE) try: resource.setrlimit(resource.RLIMIT_NOFILE, (hard, hard)) self.addCleanup(resource.setrlimit, resource.RLIMIT_NOFILE, (soft, hard)) NUM_FDS = min(hard, 2**16) except (OSError, ValueError): NUM_FDS = soft # guard for already allocated FDs (stdin, stdout...) NUM_FDS -= 32 s = self.SELECTOR() self.addCleanup(s.close) for i in range(NUM_FDS // 2): try: rd, wr = self.make_socketpair() except OSError: # too many FDs, skip - note that we should only catch EMFILE # here, but apparently *BSD and Solaris can fail upon connect() # or bind() with EADDRNOTAVAIL, so let's be safe self.skipTest("FD limit reached") try: s.register(rd, selectors.EVENT_READ) s.register(wr, selectors.EVENT_WRITE) except OSError as e: if e.errno == errno.ENOSPC: # this can be raised by epoll if we go over # fs.epoll.max_user_watches sysctl self.skipTest("FD limit reached") raise self.assertEqual(NUM_FDS // 2, len(s.select())) class DefaultSelectorTestCase(BaseSelectorTestCase): SELECTOR = selectors.DefaultSelector class SelectSelectorTestCase(BaseSelectorTestCase): SELECTOR = selectors.SelectSelector @unittest.skipUnless(hasattr(selectors, 'PollSelector'), "Test needs selectors.PollSelector") class PollSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn): SELECTOR = getattr(selectors, 'PollSelector', None) @unittest.skipUnless(hasattr(selectors, 'EpollSelector'), "Test needs selectors.EpollSelector") class EpollSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn): SELECTOR = getattr(selectors, 'EpollSelector', None) @unittest.skipUnless(hasattr(selectors, 'KqueueSelector'), "Test needs selectors.KqueueSelector)") class KqueueSelectorTestCase(BaseSelectorTestCase, ScalableSelectorMixIn): SELECTOR = getattr(selectors, 'KqueueSelector', None) def test_main(): tests = [DefaultSelectorTestCase, SelectSelectorTestCase, PollSelectorTestCase, EpollSelectorTestCase, KqueueSelectorTestCase] support.run_unittest(*tests) support.reap_children() if __name__ == "__main__": test_main() >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
mit
famz/patchew
mods/tags.py
1
4962
#!/usr/bin/env python3 # # Copyright 2016 Red Hat, Inc. # # Authors: # Fam Zheng <famz@redhat.com> # # This work is licensed under the MIT License. Please see the LICENSE file or # http://opensource.org/licenses/MIT. from mod import PatchewModule from mbox import parse_address from event import register_handler, emit_event, declare_event from api.models import Message from api.rest import PluginMethodField REV_BY_PREFIX = "Reviewed-by:" BASED_ON_PREFIX = "Based-on:" _default_config = """ [default] tags = Tested-by, Reported-by, Acked-by, Suggested-by """ BUILT_IN_TAGS = [REV_BY_PREFIX, BASED_ON_PREFIX] class SeriesTagsModule(PatchewModule): """ Documentation ------------- This module is configured in "INI" style. It has only one section named `[default]`. The only supported option is tags: [default] tags = Reviewed-by, Tested-by, Reported-by, Acked-by, Suggested-by The `tags` option contains the tag line prefixes (must be followed by colon) that should be treated as meaningful patch status tags, and picked up from series cover letter, patch mail body and their replies. """ name = "tags" default_config = _default_config def __init__(self): register_handler("MessageAdded", self.on_message_added) declare_event("TagsUpdate", series="message object that is updated") # XXX: get this list through module config? def get_tag_prefixes(self): tagsconfig = self.get_config("default", "tags", default="") return set([x.strip() for x in tagsconfig.split(",") if x.strip()] + BUILT_IN_TAGS) def update_tags(self, s): old = s.get_property("tags", []) new = self.look_for_tags(s) if set(old) != set(new): s.set_property("tags", list(set(new))) return True def on_message_added(self, event, message): series = message.get_series_head() if not series: return def newer_than(m1, m2): return m1.version > m2.version and m1.date >= m2.date for m in series.get_alternative_revisions(): if newer_than(m, series): series.set_property("obsoleted-by", m.message_id) elif newer_than(series, m): m.set_property("obsoleted-by", series.message_id) updated = self.update_tags(series) for p in series.get_patches(): updated = updated or self.update_tags(p) reviewers = set() num_reviewed = 0 def _find_reviewers(what): ret = set() for rev_tag in [x for x in what.get_property("tags", []) if x.lower().startswith(REV_BY_PREFIX.lower())]: ret.add(parse_address(rev_tag[len(REV_BY_PREFIX):])) return ret for p in series.get_patches(): first = True this_reviewers = _find_reviewers(p) if this_reviewers: if first: num_reviewed += 1 first = False reviewers = reviewers.union(this_reviewers) series_reviewers = _find_reviewers(series) reviewers = reviewers.union(series_reviewers) if num_reviewed == series.get_num()[1] or series_reviewers: series.set_property("reviewed", True) series.set_property("reviewers", list(reviewers)) if updated: emit_event("TagsUpdate", series=series) def parse_message_tags(self, m): r = [] for l in m.get_body().splitlines(): for p in self.get_tag_prefixes(): if l.lower().startswith(p.lower()): r.append(l) return r def look_for_tags(self, m): # Incorporate tags from non-patch replies r = self.parse_message_tags(m) for x in m.get_replies(): if x.is_patch: continue r += self.look_for_tags(x) return r def get_tags(self, m, request, format): return m.get_property("tags", []) def rest_message_fields_hook(self, fields): fields['tags'] = PluginMethodField(obj=self) def prepare_message_hook(self, request, message, detailed): if not message.is_series_head: return if message.get_property("reviewed"): reviewers = message.get_property("reviewers") message.status_tags.append({ "title": "Reviewed by " + ", ".join([x for x, y in reviewers]), "type": "success", "char": "R", }) ob = message.get_property("obsoleted-by") if ob: new = Message.objects.find_series(ob, message.project.name) if new is not None: message.status_tags.append({ "title": "Has a newer version: " + new.subject, "type": "default", "char": "O", "row_class": "obsolete" })
mit
tojon/treeherder
treeherder/autoclassify/matchers.py
3
10482
import logging import time from abc import (ABCMeta, abstractmethod) from collections import namedtuple from difflib import SequenceMatcher from django.conf import settings from django.db.models import Q from elasticsearch_dsl.query import Match as ESMatch from treeherder.autoclassify.autoclassify import AUTOCLASSIFY_GOOD_ENOUGH_RATIO from treeherder.model.models import (MatcherManager, TextLogError, TextLogErrorMatch) from treeherder.model.search import (TestFailureLine, es_connected) logger = logging.getLogger(__name__) Match = namedtuple('Match', ['text_log_error', 'classified_failure_id', 'score']) class Matcher(object): __metaclass__ = ABCMeta """Class that is called with a list of unmatched failure lines from a specific job, and returns a list of Match tuples containing the failure_line that matched, the failure it matched with, and the score, which is a number in the range 0-1 with 1 being a perfect match and 0 being the worst possible match.""" def __init__(self, db_object): self.db_object = db_object def __call__(self, text_log_errors): rv = [] for text_log_error in text_log_errors: match = self.match(text_log_error) if match: rv.append(match) return rv def match(self, text_log_error): best_match = self.query_best(text_log_error) if best_match: classified_failure_id, score = best_match logger.debug("Matched using %s" % self.__class__.__name__) return Match(text_log_error, classified_failure_id, score) @abstractmethod def query_best(self, text_log_error): pass ignored_line = (Q(text_log_error___metadata__best_classification=None) & Q(text_log_error___metadata__best_is_verified=True)) class id_window(object): def __init__(self, size, time_budget): self.size = size self.time_budget_ms = time_budget def __call__(self, f): outer = self def inner(self, text_log_error): queries = f(self, text_log_error) if not queries: return for item in queries: if isinstance(item, tuple): query, score_multiplier = item else: query = item score_multiplier = (1, 1) result = outer.run(query, score_multiplier) if result: return result inner.__name__ = f.__name__ inner.__doc__ = f.__doc__ return inner def run(self, query, score_multiplier): matches = [] time_budget = self.time_budget_ms / 1000. if self.time_budget_ms is not None else None t0 = time.time() upper_cutoff = (TextLogError.objects .order_by('-id') .values_list('id', flat=True)[0]) count = 0 while upper_cutoff > 0: count += 1 lower_cutoff = max(upper_cutoff - self.size, 0) window_queryset = query.filter( text_log_error__id__range=(lower_cutoff, upper_cutoff)) logger.debug("[time_window] Queryset: %s" % window_queryset.query) match = window_queryset.first() if match is not None: score = match.score * score_multiplier[0] / score_multiplier[1] matches.append((match, score)) if score >= AUTOCLASSIFY_GOOD_ENOUGH_RATIO: break upper_cutoff -= self.size if time_budget is not None and time.time() - t0 > time_budget: # Putting the condition at the end of the loop ensures that we always # run it once, which is useful for testing break logger.debug("[time_window] Used %i queries" % count) if matches: matches.sort(key=lambda x: (-x[1], -x[0].classified_failure_id)) best = matches[0] return best[0].classified_failure_id, best[1] return None def with_failure_lines(f): def inner(self, text_log_errors): with_failure_lines = [item for item in text_log_errors if item.metadata and item.metadata.failure_line] return f(self, with_failure_lines) inner.__name__ = f.__name__ inner.__doc__ = f.__doc__ return inner class PreciseTestMatcher(Matcher): """Matcher that looks for existing failures with identical tests and identical error message.""" @with_failure_lines def __call__(self, text_log_errors): return super(PreciseTestMatcher, self).__call__(text_log_errors) @id_window(size=20000, time_budget=500) def query_best(self, text_log_error): failure_line = text_log_error.metadata.failure_line logger.debug("Looking for test match in failure %d" % failure_line.id) if failure_line.action != "test_result" or failure_line.message is None: return return [(TextLogErrorMatch.objects .filter(text_log_error___metadata__failure_line__action="test_result", text_log_error___metadata__failure_line__test=failure_line.test, text_log_error___metadata__failure_line__subtest=failure_line.subtest, text_log_error___metadata__failure_line__status=failure_line.status, text_log_error___metadata__failure_line__expected=failure_line.expected, text_log_error___metadata__failure_line__message=failure_line.message) .exclude(ignored_line | Q(text_log_error__step__job=text_log_error.step.job)) .order_by("-score", "-classified_failure"))] class ElasticSearchTestMatcher(Matcher): """Matcher that looks for existing failures with identical tests, and error message that is a good match when non-alphabetic tokens have been removed.""" def __init__(self, *args, **kwargs): Matcher.__init__(self, *args, **kwargs) self.lines = 0 self.calls = 0 @es_connected(default=[]) @with_failure_lines def __call__(self, text_log_errors): return super(ElasticSearchTestMatcher, self).__call__(text_log_errors) def query_best(self, text_log_error): failure_line = text_log_error.metadata.failure_line if failure_line.action != "test_result" or not failure_line.message: logger.debug("Skipped elasticsearch matching") return match = ESMatch(message={"query": failure_line.message[:1024], "type": "phrase"}) search = (TestFailureLine.search() .filter("term", test=failure_line.test) .filter("term", status=failure_line.status) .filter("term", expected=failure_line.expected) .filter("exists", field="best_classification") .query(match)) if failure_line.subtest: search = search.filter("term", subtest=failure_line.subtest) try: self.calls += 1 resp = search.execute() except: logger.error("Elastic search lookup failed: %s %s %s %s %s", failure_line.test, failure_line.subtest, failure_line.status, failure_line.expected, failure_line.message) raise scorer = MatchScorer(failure_line.message) matches = [(item, item.message) for item in resp] best_match = scorer.best_match(matches) if best_match: return (best_match[1].best_classification, best_match[0]) class CrashSignatureMatcher(Matcher): """Matcher that looks for crashes with identical signature""" @with_failure_lines def __call__(self, text_log_errors): return super(CrashSignatureMatcher, self).__call__(text_log_errors) @id_window(size=20000, time_budget=250) def query_best(self, text_log_error): failure_line = text_log_error.metadata.failure_line if (failure_line.action != "crash" or failure_line.signature is None or failure_line.signature == "None"): return matching_failures = (TextLogErrorMatch.objects .filter(text_log_error___metadata__failure_line__action="crash", text_log_error___metadata__failure_line__signature=failure_line.signature) .exclude(ignored_line | Q(text_log_error__step__job=text_log_error.step.job)) .select_related('text_log_error', 'text_log_error___metadata') .order_by("-score", "-classified_failure")) return [matching_failures.filter(text_log_error___metadata__failure_line__test=failure_line.test), (matching_failures, (8, 10))] class MatchScorer(object): """Simple scorer for similarity of strings based on python's difflib SequenceMatcher""" def __init__(self, target): """:param target: The string to which candidate strings will be compared""" self.matcher = SequenceMatcher(lambda x: x == " ") self.matcher.set_seq2(target) def best_match(self, matches): """Return the most similar string to the target string from a list of candidates, along with a score indicating the goodness of the match. :param matches: A list of candidate matches :returns: A tuple of (score, best_match)""" best_match = None for match, message in matches: self.matcher.set_seq1(message) ratio = self.matcher.quick_ratio() if best_match is None or ratio >= best_match[0]: new_ratio = self.matcher.ratio() if best_match is None or new_ratio > best_match[0]: best_match = (new_ratio, match) return best_match def register(): for obj_name in settings.AUTOCLASSIFY_MATCHERS: obj = globals()[obj_name] MatcherManager.register_matcher(obj)
mpl-2.0
johnkit/vtk-dev
Filters/Core/Testing/Python/TestSynchronizedTemplates3D.py
12
2199
#!/usr/bin/env python import vtk from vtk.test import Testing from vtk.util.misc import vtkGetDataRoot VTK_DATA_ROOT = vtkGetDataRoot() class TestSynchronizedTemplates3D(Testing.vtkTest): def testAll(self): reader = vtk.vtkImageReader() reader.SetDataByteOrderToLittleEndian() reader.SetDataExtent(0,63,0,63,1,93) reader.SetDataSpacing(3.2,3.2,1.5) reader.SetFilePrefix("" + str(VTK_DATA_ROOT) + "/Data/headsq/quarter") reader.SetDataMask(0x7fff) # write isosurface to file #vtkSynchronizedTemplates3D stemp stemp = vtk.vtkContourFilter() stemp.SetInputConnection(reader.GetOutputPort()) stemp.SetValue(0,1150) stemp.GenerateTrianglesOff() stemp.Update() self.failUnlessEqual(stemp.GetOutputDataObject(0).GetNumberOfPoints(),39315) self.failUnlessEqual(stemp.GetOutputDataObject(0).GetNumberOfCells(),38380) stemp.GenerateTrianglesOn() stemp.Update() self.failUnlessEqual(stemp.GetOutputDataObject(0).GetNumberOfPoints(),39315) self.failUnlessEqual(stemp.GetOutputDataObject(0).GetNumberOfCells(),78268) mapper = vtk.vtkPolyDataMapper() mapper.SetInputConnection(stemp.GetOutputPort()) mapper.ScalarVisibilityOff() head = vtk.vtkActor() head.SetMapper(mapper) head.GetProperty().SetColor(1,0.7,0.6) # Create the RenderWindow, Renderer and Interactor # ren1 = vtk.vtkRenderer() renWin = vtk.vtkRenderWindow() renWin.AddRenderer(ren1) iren = vtk.vtkRenderWindowInteractor() iren.SetRenderWindow(renWin) # Add the actors to the renderer, set the background and size # ren1.AddActor(head) ren1.SetBackground(1,1,1) renWin.SetSize(400,400) ren1.SetBackground(0.5,0.5,0.6) ren1.GetActiveCamera().SetPosition(99.8847,537.926,15) ren1.GetActiveCamera().SetFocalPoint(99.8847,109.81,15) ren1.GetActiveCamera().SetViewAngle(20) ren1.GetActiveCamera().SetViewUp(0,0,-1) ren1.ResetCameraClippingRange() # render the image # renWin.Render() # prevent the tk window from showing up then start the event loop # --- end of script -- if __name__ == "__main__": Testing.main([(TestSynchronizedTemplates3D, 'test')])
bsd-3-clause
IronLanguages/ironpython3
Src/StdLib/Lib/tkinter/test/test_tkinter/test_loadtk.py
162
1503
import os import sys import unittest import test.support as test_support from tkinter import Tcl, TclError test_support.requires('gui') class TkLoadTest(unittest.TestCase): @unittest.skipIf('DISPLAY' not in os.environ, 'No $DISPLAY set.') def testLoadTk(self): tcl = Tcl() self.assertRaises(TclError,tcl.winfo_geometry) tcl.loadtk() self.assertEqual('1x1+0+0', tcl.winfo_geometry()) tcl.destroy() def testLoadTkFailure(self): old_display = None if sys.platform.startswith(('win', 'darwin', 'cygwin')): # no failure possible on windows? # XXX Maybe on tk older than 8.4.13 it would be possible, # see tkinter.h. return with test_support.EnvironmentVarGuard() as env: if 'DISPLAY' in os.environ: del env['DISPLAY'] # on some platforms, deleting environment variables # doesn't actually carry through to the process level # because they don't support unsetenv # If that's the case, abort. with os.popen('echo $DISPLAY') as pipe: display = pipe.read().strip() if display: return tcl = Tcl() self.assertRaises(TclError, tcl.winfo_geometry) self.assertRaises(TclError, tcl.loadtk) tests_gui = (TkLoadTest, ) if __name__ == "__main__": test_support.run_unittest(*tests_gui)
apache-2.0
LeeMendelowitz/basketball-reference
run_games.py
1
1036
""" Download all 2013 boxscores. """ import os import sys from file_utils import make_dirs import pandas import time import requests from basketball_reference import games_table from basketball_reference.globals import URL_BASE def games_to_csv(): src = os.path.join('pages', 'NBA_2013_games.html') games_table.to_csv(src, 'games.csv') def download_boxscores(): """ Download all boxscores. """ df = pandas.read_csv('games.csv') urls = df['box_score_url'] #import pdb; pdb.set_trace() n = len(urls) for i, url in enumerate(urls): url = url.lstrip('/') dirpath = os.path.dirname(url) make_dirs(dirpath) full_url = '%s/%s'%(URL_BASE, url) sys.stderr.write("Downloading file %i of %i: %s..."%(i, n, full_url)) response = requests.get(full_url) with open(url, 'w') as f: f.write(response.text.encode('utf-8', errors='ignore')) sys.stderr.write('done!\n') # Be kind to Basketball-Reference.com time.sleep(1) if __name__ == '__main__': download_boxscores()
gpl-3.0
mandeepdhami/horizon
openstack_dashboard/dashboards/admin/metadata_defs/forms.py
54
5852
# # (c) Copyright 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Forms for managing metadata. """ import json import logging from django.forms import ValidationError # noqa from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon import messages from openstack_dashboard.api import glance from openstack_dashboard.dashboards.admin.metadata_defs \ import constants LOG = logging.getLogger(__name__) class CreateNamespaceForm(forms.SelfHandlingForm): source_type = forms.ChoiceField( label=_('Namespace Definition Source'), required=False, choices=[('file', _('Metadata Definition File')), ('raw', _('Direct Input'))], widget=forms.Select( attrs={'class': 'switchable', 'data-slug': 'source'})) metadef_file = forms.FileField( label=_("Metadata Definition File"), help_text=_("A local metadata definition file to upload."), widget=forms.FileInput( attrs={'class': 'switched', 'data-switch-on': 'source', 'data-source-file': _('Metadata Definition File')}), required=False) direct_input = forms.CharField( label=_('Namespace JSON'), help_text=_('The JSON formatted contents of a namespace.'), widget=forms.widgets.Textarea( attrs={'class': 'switched', 'data-switch-on': 'source', 'data-source-raw': _('Namespace JSON')}), required=False) public = forms.BooleanField(label=_("Public"), required=False) protected = forms.BooleanField(label=_("Protected"), required=False) def __init__(self, request, *args, **kwargs): super(CreateNamespaceForm, self).__init__(request, *args, **kwargs) def clean(self): data = super(CreateNamespaceForm, self).clean() # The key can be missing based on particular upload # conditions. Code defensively for it here... metadef_file = data.get('metadef_file', None) metadata_raw = data.get('direct_input', None) if metadata_raw and metadef_file: raise ValidationError( _("Cannot specify both file and direct input.")) if not metadata_raw and not metadef_file: raise ValidationError( _("No input was provided for the namespace content.")) try: if metadef_file: ns_str = self.files['metadef_file'].read() else: ns_str = data['direct_input'] namespace = json.loads(ns_str) if data['public']: namespace['visibility'] = 'public' else: namespace['visibility'] = 'private' namespace['protected'] = data['protected'] for protected_prop in constants.METADEFS_PROTECTED_PROPS: namespace.pop(protected_prop, None) data['namespace'] = namespace except Exception as e: msg = _('There was a problem loading the namespace: %s.') % e raise forms.ValidationError(msg) return data def handle(self, request, data): try: namespace = glance.metadefs_namespace_create(request, data['namespace']) messages.success(request, _('Namespace %s has been created.') % namespace['namespace']) return namespace except Exception as e: msg = _('Unable to create new namespace. %s') msg %= e.message.split('Failed validating', 1)[0] exceptions.handle(request, message=msg) return False class ManageResourceTypesForm(forms.SelfHandlingForm): def __init__(self, request, *args, **kwargs): super(ManageResourceTypesForm, self).__init__(request, *args, **kwargs) def handle(self, request, context): namespace_name = self.initial['id'] current_names = self.get_names(self.initial['resource_types']) try: updated_types = json.loads(self.data['resource_types']) selected_types = [updated_type for updated_type in updated_types if updated_type.pop('selected', False)] for current_name in current_names: glance.metadefs_namespace_remove_resource_type( self.request, namespace_name, current_name) for selected_type in selected_types: selected_type.pop('$$hashKey', None) selected_type.pop('created_at', None) selected_type.pop('updated_at', None) glance.metadefs_namespace_add_resource_type( self.request, namespace_name, selected_type) msg = _('Resource types updated for namespace %s.') msg %= namespace_name messages.success(request, msg) except Exception: msg = _('Error updating resource types for namespace %s.') msg %= namespace_name exceptions.handle(request, msg) return False return True def get_names(self, items): return [item['name'] for item in items]
apache-2.0
nkcr/WebIndex
app/venv/lib/python3.5/site-packages/nltk/tokenize/punkt.py
7
61452
# Natural Language Toolkit: Punkt sentence tokenizer # # Copyright (C) 2001-2016 NLTK Project # Algorithm: Kiss & Strunk (2006) # Author: Willy <willy@csse.unimelb.edu.au> (original Python port) # Steven Bird <stevenbird1@gmail.com> (additions) # Edward Loper <edloper@gmail.com> (rewrite) # Joel Nothman <jnothman@student.usyd.edu.au> (almost rewrite) # Arthur Darcet <arthur@darcet.fr> (fixes) # URL: <http://nltk.org/> # For license information, see LICENSE.TXT r""" Punkt Sentence Tokenizer This tokenizer divides a text into a list of sentences, by using an unsupervised algorithm to build a model for abbreviation words, collocations, and words that start sentences. It must be trained on a large collection of plaintext in the target language before it can be used. The NLTK data package includes a pre-trained Punkt tokenizer for English. >>> import nltk.data >>> text = ''' ... Punkt knows that the periods in Mr. Smith and Johann S. Bach ... do not mark sentence boundaries. And sometimes sentences ... can start with non-capitalized words. i is a good variable ... name. ... ''' >>> sent_detector = nltk.data.load('tokenizers/punkt/english.pickle') >>> print('\n-----\n'.join(sent_detector.tokenize(text.strip()))) Punkt knows that the periods in Mr. Smith and Johann S. Bach do not mark sentence boundaries. ----- And sometimes sentences can start with non-capitalized words. ----- i is a good variable name. (Note that whitespace from the original text, including newlines, is retained in the output.) Punctuation following sentences is also included by default (from NLTK 3.0 onwards). It can be excluded with the realign_boundaries flag. >>> text = ''' ... (How does it deal with this parenthesis?) "It should be part of the ... previous sentence." "(And the same with this one.)" ('And this one!') ... "('(And (this)) '?)" [(and this. )] ... ''' >>> print('\n-----\n'.join( ... sent_detector.tokenize(text.strip()))) (How does it deal with this parenthesis?) ----- "It should be part of the previous sentence." ----- "(And the same with this one.)" ----- ('And this one!') ----- "('(And (this)) '?)" ----- [(and this. )] >>> print('\n-----\n'.join( ... sent_detector.tokenize(text.strip(), realign_boundaries=False))) (How does it deal with this parenthesis? ----- ) "It should be part of the previous sentence. ----- " "(And the same with this one. ----- )" ('And this one! ----- ') "('(And (this)) '? ----- )" [(and this. ----- )] However, Punkt is designed to learn parameters (a list of abbreviations, etc.) unsupervised from a corpus similar to the target domain. The pre-packaged models may therefore be unsuitable: use ``PunktSentenceTokenizer(text)`` to learn parameters from the given text. :class:`.PunktTrainer` learns parameters such as a list of abbreviations (without supervision) from portions of text. Using a ``PunktTrainer`` directly allows for incremental training and modification of the hyper-parameters used to decide what is considered an abbreviation, etc. The algorithm for this tokenizer is described in:: Kiss, Tibor and Strunk, Jan (2006): Unsupervised Multilingual Sentence Boundary Detection. Computational Linguistics 32: 485-525. """ from __future__ import print_function, unicode_literals, division # TODO: Make orthographic heuristic less susceptible to overtraining # TODO: Frequent sentence starters optionally exclude always-capitalised words # FIXME: Problem with ending string with e.g. '!!!' -> '!! !' import re import math from collections import defaultdict from nltk.compat import unicode_repr, python_2_unicode_compatible, string_types from nltk.probability import FreqDist from nltk.tokenize.api import TokenizerI ###################################################################### #{ Orthographic Context Constants ###################################################################### # The following constants are used to describe the orthographic # contexts in which a word can occur. BEG=beginning, MID=middle, # UNK=unknown, UC=uppercase, LC=lowercase, NC=no case. _ORTHO_BEG_UC = 1 << 1 """Orthographic context: beginning of a sentence with upper case.""" _ORTHO_MID_UC = 1 << 2 """Orthographic context: middle of a sentence with upper case.""" _ORTHO_UNK_UC = 1 << 3 """Orthographic context: unknown position in a sentence with upper case.""" _ORTHO_BEG_LC = 1 << 4 """Orthographic context: beginning of a sentence with lower case.""" _ORTHO_MID_LC = 1 << 5 """Orthographic context: middle of a sentence with lower case.""" _ORTHO_UNK_LC = 1 << 6 """Orthographic context: unknown position in a sentence with lower case.""" _ORTHO_UC = _ORTHO_BEG_UC + _ORTHO_MID_UC + _ORTHO_UNK_UC """Orthographic context: occurs with upper case.""" _ORTHO_LC = _ORTHO_BEG_LC + _ORTHO_MID_LC + _ORTHO_UNK_LC """Orthographic context: occurs with lower case.""" _ORTHO_MAP = { ('initial', 'upper'): _ORTHO_BEG_UC, ('internal', 'upper'): _ORTHO_MID_UC, ('unknown', 'upper'): _ORTHO_UNK_UC, ('initial', 'lower'): _ORTHO_BEG_LC, ('internal', 'lower'): _ORTHO_MID_LC, ('unknown', 'lower'): _ORTHO_UNK_LC, } """A map from context position and first-letter case to the appropriate orthographic context flag.""" #} (end orthographic context constants) ###################################################################### ###################################################################### #{ Decision reasons for debugging ###################################################################### REASON_DEFAULT_DECISION = 'default decision' REASON_KNOWN_COLLOCATION = 'known collocation (both words)' REASON_ABBR_WITH_ORTHOGRAPHIC_HEURISTIC = 'abbreviation + orthographic heuristic' REASON_ABBR_WITH_SENTENCE_STARTER = 'abbreviation + frequent sentence starter' REASON_INITIAL_WITH_ORTHOGRAPHIC_HEURISTIC = 'initial + orthographic heuristic' REASON_NUMBER_WITH_ORTHOGRAPHIC_HEURISTIC = 'initial + orthographic heuristic' REASON_INITIAL_WITH_SPECIAL_ORTHOGRAPHIC_HEURISTIC = 'initial + special orthographic heuristic' #} (end decision reasons for debugging) ###################################################################### ###################################################################### #{ Language-dependent variables ###################################################################### class PunktLanguageVars(object): """ Stores variables, mostly regular expressions, which may be language-dependent for correct application of the algorithm. An extension of this class may modify its properties to suit a language other than English; an instance can then be passed as an argument to PunktSentenceTokenizer and PunktTrainer constructors. """ __slots__ = ('_re_period_context', '_re_word_tokenizer') def __getstate__(self): # All modifications to the class are performed by inheritance. # Non-default parameters to be pickled must be defined in the inherited # class. return 1 def __setstate__(self, state): return 1 sent_end_chars = ('.', '?', '!') """Characters which are candidates for sentence boundaries""" @property def _re_sent_end_chars(self): return '[%s]' % re.escape(''.join(self.sent_end_chars)) internal_punctuation = ',:;' # might want to extend this.. """sentence internal punctuation, which indicates an abbreviation if preceded by a period-final token.""" re_boundary_realignment = re.compile(r'["\')\]}]+?(?:\s+|(?=--)|$)', re.MULTILINE) """Used to realign punctuation that should be included in a sentence although it follows the period (or ?, !).""" _re_word_start = r"[^\(\"\`{\[:;&\#\*@\)}\]\-,]" """Excludes some characters from starting word tokens""" _re_non_word_chars = r"(?:[?!)\";}\]\*:@\'\({\[])" """Characters that cannot appear within words""" _re_multi_char_punct = r"(?:\-{2,}|\.{2,}|(?:\.\s){2,}\.)" """Hyphen and ellipsis are multi-character punctuation""" _word_tokenize_fmt = r'''( %(MultiChar)s | (?=%(WordStart)s)\S+? # Accept word characters until end is found (?= # Sequences marking a word's end \s| # White-space $| # End-of-string %(NonWord)s|%(MultiChar)s| # Punctuation ,(?=$|\s|%(NonWord)s|%(MultiChar)s) # Comma if at end of word ) | \S )''' """Format of a regular expression to split punctuation from words, excluding period.""" def _word_tokenizer_re(self): """Compiles and returns a regular expression for word tokenization""" try: return self._re_word_tokenizer except AttributeError: self._re_word_tokenizer = re.compile( self._word_tokenize_fmt % { 'NonWord': self._re_non_word_chars, 'MultiChar': self._re_multi_char_punct, 'WordStart': self._re_word_start, }, re.UNICODE | re.VERBOSE ) return self._re_word_tokenizer def word_tokenize(self, s): """Tokenize a string to split off punctuation other than periods""" return self._word_tokenizer_re().findall(s) _period_context_fmt = r""" \S* # some word material %(SentEndChars)s # a potential sentence ending (?=(?P<after_tok> %(NonWord)s # either other punctuation | \s+(?P<next_tok>\S+) # or whitespace and some other token ))""" """Format of a regular expression to find contexts including possible sentence boundaries. Matches token which the possible sentence boundary ends, and matches the following token within a lookahead expression.""" def period_context_re(self): """Compiles and returns a regular expression to find contexts including possible sentence boundaries.""" try: return self._re_period_context except: self._re_period_context = re.compile( self._period_context_fmt % { 'NonWord': self._re_non_word_chars, 'SentEndChars': self._re_sent_end_chars, }, re.UNICODE | re.VERBOSE) return self._re_period_context _re_non_punct = re.compile(r'[^\W\d]', re.UNICODE) """Matches token types that are not merely punctuation. (Types for numeric tokens are changed to ##number## and hence contain alpha.)""" #} ###################################################################### #//////////////////////////////////////////////////////////// #{ Helper Functions #//////////////////////////////////////////////////////////// def _pair_iter(it): """ Yields pairs of tokens from the given iterator such that each input token will appear as the first element in a yielded tuple. The last pair will have None as its second element. """ it = iter(it) prev = next(it) for el in it: yield (prev, el) prev = el yield (prev, None) ###################################################################### #{ Punkt Parameters ###################################################################### class PunktParameters(object): """Stores data used to perform sentence boundary detection with Punkt.""" def __init__(self): self.abbrev_types = set() """A set of word types for known abbreviations.""" self.collocations = set() """A set of word type tuples for known common collocations where the first word ends in a period. E.g., ('S.', 'Bach') is a common collocation in a text that discusses 'Johann S. Bach'. These count as negative evidence for sentence boundaries.""" self.sent_starters = set() """A set of word types for words that often appear at the beginning of sentences.""" self.ortho_context = defaultdict(int) """A dictionary mapping word types to the set of orthographic contexts that word type appears in. Contexts are represented by adding orthographic context flags: ...""" def clear_abbrevs(self): self.abbrev_types = set() def clear_collocations(self): self.collocations = set() def clear_sent_starters(self): self.sent_starters = set() def clear_ortho_context(self): self.ortho_context = defaultdict(int) def add_ortho_context(self, typ, flag): self.ortho_context[typ] |= flag def _debug_ortho_context(self, typ): c = self.ortho_context[typ] if c & _ORTHO_BEG_UC: yield 'BEG-UC' if c & _ORTHO_MID_UC: yield 'MID-UC' if c & _ORTHO_UNK_UC: yield 'UNK-UC' if c & _ORTHO_BEG_LC: yield 'BEG-LC' if c & _ORTHO_MID_LC: yield 'MID-LC' if c & _ORTHO_UNK_LC: yield 'UNK-LC' ###################################################################### #{ PunktToken ###################################################################### @python_2_unicode_compatible class PunktToken(object): """Stores a token of text with annotations produced during sentence boundary detection.""" _properties = [ 'parastart', 'linestart', 'sentbreak', 'abbr', 'ellipsis' ] __slots__ = ['tok', 'type', 'period_final'] + _properties def __init__(self, tok, **params): self.tok = tok self.type = self._get_type(tok) self.period_final = tok.endswith('.') for p in self._properties: setattr(self, p, None) for k in params: setattr(self, k, params[k]) #//////////////////////////////////////////////////////////// #{ Regular expressions for properties #//////////////////////////////////////////////////////////// # Note: [A-Za-z] is approximated by [^\W\d] in the general case. _RE_ELLIPSIS = re.compile(r'\.\.+$') _RE_NUMERIC = re.compile(r'^-?[\.,]?\d[\d,\.-]*\.?$') _RE_INITIAL = re.compile(r'[^\W\d]\.$', re.UNICODE) _RE_ALPHA = re.compile(r'[^\W\d]+$', re.UNICODE) #//////////////////////////////////////////////////////////// #{ Derived properties #//////////////////////////////////////////////////////////// def _get_type(self, tok): """Returns a case-normalized representation of the token.""" return self._RE_NUMERIC.sub('##number##', tok.lower()) @property def type_no_period(self): """ The type with its final period removed if it has one. """ if len(self.type) > 1 and self.type[-1] == '.': return self.type[:-1] return self.type @property def type_no_sentperiod(self): """ The type with its final period removed if it is marked as a sentence break. """ if self.sentbreak: return self.type_no_period return self.type @property def first_upper(self): """True if the token's first character is uppercase.""" return self.tok[0].isupper() @property def first_lower(self): """True if the token's first character is lowercase.""" return self.tok[0].islower() @property def first_case(self): if self.first_lower: return 'lower' elif self.first_upper: return 'upper' return 'none' @property def is_ellipsis(self): """True if the token text is that of an ellipsis.""" return self._RE_ELLIPSIS.match(self.tok) @property def is_number(self): """True if the token text is that of a number.""" return self.type.startswith('##number##') @property def is_initial(self): """True if the token text is that of an initial.""" return self._RE_INITIAL.match(self.tok) @property def is_alpha(self): """True if the token text is all alphabetic.""" return self._RE_ALPHA.match(self.tok) @property def is_non_punct(self): """True if the token is either a number or is alphabetic.""" return _re_non_punct.search(self.type) #//////////////////////////////////////////////////////////// #{ String representation #//////////////////////////////////////////////////////////// def __repr__(self): """ A string representation of the token that can reproduce it with eval(), which lists all the token's non-default annotations. """ typestr = (' type=%s,' % unicode_repr(self.type) if self.type != self.tok else '') propvals = ', '.join( '%s=%s' % (p, unicode_repr(getattr(self, p))) for p in self._properties if getattr(self, p) ) return '%s(%s,%s %s)' % (self.__class__.__name__, unicode_repr(self.tok), typestr, propvals) def __str__(self): """ A string representation akin to that used by Kiss and Strunk. """ res = self.tok if self.abbr: res += '<A>' if self.ellipsis: res += '<E>' if self.sentbreak: res += '<S>' return res ###################################################################### #{ Punkt base class ###################################################################### class PunktBaseClass(object): """ Includes common components of PunktTrainer and PunktSentenceTokenizer. """ def __init__(self, lang_vars=PunktLanguageVars(), token_cls=PunktToken, params=PunktParameters()): self._params = params self._lang_vars = lang_vars self._Token = token_cls """The collection of parameters that determines the behavior of the punkt tokenizer.""" #//////////////////////////////////////////////////////////// #{ Word tokenization #//////////////////////////////////////////////////////////// def _tokenize_words(self, plaintext): """ Divide the given text into tokens, using the punkt word segmentation regular expression, and generate the resulting list of tokens augmented as three-tuples with two boolean values for whether the given token occurs at the start of a paragraph or a new line, respectively. """ parastart = False for line in plaintext.split('\n'): if line.strip(): line_toks = iter(self._lang_vars.word_tokenize(line)) yield self._Token(next(line_toks), parastart=parastart, linestart=True) parastart = False for t in line_toks: yield self._Token(t) else: parastart = True #//////////////////////////////////////////////////////////// #{ Annotation Procedures #//////////////////////////////////////////////////////////// def _annotate_first_pass(self, tokens): """ Perform the first pass of annotation, which makes decisions based purely based on the word type of each word: - '?', '!', and '.' are marked as sentence breaks. - sequences of two or more periods are marked as ellipsis. - any word ending in '.' that's a known abbreviation is marked as an abbreviation. - any other word ending in '.' is marked as a sentence break. Return these annotations as a tuple of three sets: - sentbreak_toks: The indices of all sentence breaks. - abbrev_toks: The indices of all abbreviations. - ellipsis_toks: The indices of all ellipsis marks. """ for aug_tok in tokens: self._first_pass_annotation(aug_tok) yield aug_tok def _first_pass_annotation(self, aug_tok): """ Performs type-based annotation on a single token. """ tok = aug_tok.tok if tok in self._lang_vars.sent_end_chars: aug_tok.sentbreak = True elif aug_tok.is_ellipsis: aug_tok.ellipsis = True elif aug_tok.period_final and not tok.endswith('..'): if (tok[:-1].lower() in self._params.abbrev_types or tok[:-1].lower().split('-')[-1] in self._params.abbrev_types): aug_tok.abbr = True else: aug_tok.sentbreak = True return ###################################################################### #{ Punkt Trainer ###################################################################### class PunktTrainer(PunktBaseClass): """Learns parameters used in Punkt sentence boundary detection.""" def __init__(self, train_text=None, verbose=False, lang_vars=PunktLanguageVars(), token_cls=PunktToken): PunktBaseClass.__init__(self, lang_vars=lang_vars, token_cls=token_cls) self._type_fdist = FreqDist() """A frequency distribution giving the frequency of each case-normalized token type in the training data.""" self._num_period_toks = 0 """The number of words ending in period in the training data.""" self._collocation_fdist = FreqDist() """A frequency distribution giving the frequency of all bigrams in the training data where the first word ends in a period. Bigrams are encoded as tuples of word types. Especially common collocations are extracted from this frequency distribution, and stored in ``_params``.``collocations <PunktParameters.collocations>``.""" self._sent_starter_fdist = FreqDist() """A frequency distribution giving the frequency of all words that occur at the training data at the beginning of a sentence (after the first pass of annotation). Especially common sentence starters are extracted from this frequency distribution, and stored in ``_params.sent_starters``. """ self._sentbreak_count = 0 """The total number of sentence breaks identified in training, used for calculating the frequent sentence starter heuristic.""" self._finalized = True """A flag as to whether the training has been finalized by finding collocations and sentence starters, or whether finalize_training() still needs to be called.""" if train_text: self.train(train_text, verbose, finalize=True) def get_params(self): """ Calculates and returns parameters for sentence boundary detection as derived from training.""" if not self._finalized: self.finalize_training() return self._params #//////////////////////////////////////////////////////////// #{ Customization Variables #//////////////////////////////////////////////////////////// ABBREV = 0.3 """cut-off value whether a 'token' is an abbreviation""" IGNORE_ABBREV_PENALTY = False """allows the disabling of the abbreviation penalty heuristic, which exponentially disadvantages words that are found at times without a final period.""" ABBREV_BACKOFF = 5 """upper cut-off for Mikheev's(2002) abbreviation detection algorithm""" COLLOCATION = 7.88 """minimal log-likelihood value that two tokens need to be considered as a collocation""" SENT_STARTER = 30 """minimal log-likelihood value that a token requires to be considered as a frequent sentence starter""" INCLUDE_ALL_COLLOCS = False """this includes as potential collocations all word pairs where the first word ends in a period. It may be useful in corpora where there is a lot of variation that makes abbreviations like Mr difficult to identify.""" INCLUDE_ABBREV_COLLOCS = False """this includes as potential collocations all word pairs where the first word is an abbreviation. Such collocations override the orthographic heuristic, but not the sentence starter heuristic. This is overridden by INCLUDE_ALL_COLLOCS, and if both are false, only collocations with initials and ordinals are considered.""" """""" MIN_COLLOC_FREQ = 1 """this sets a minimum bound on the number of times a bigram needs to appear before it can be considered a collocation, in addition to log likelihood statistics. This is useful when INCLUDE_ALL_COLLOCS is True.""" #//////////////////////////////////////////////////////////// #{ Training.. #//////////////////////////////////////////////////////////// def train(self, text, verbose=False, finalize=True): """ Collects training data from a given text. If finalize is True, it will determine all the parameters for sentence boundary detection. If not, this will be delayed until get_params() or finalize_training() is called. If verbose is True, abbreviations found will be listed. """ # Break the text into tokens; record which token indices correspond to # line starts and paragraph starts; and determine their types. self._train_tokens(self._tokenize_words(text), verbose) if finalize: self.finalize_training(verbose) def train_tokens(self, tokens, verbose=False, finalize=True): """ Collects training data from a given list of tokens. """ self._train_tokens((self._Token(t) for t in tokens), verbose) if finalize: self.finalize_training(verbose) def _train_tokens(self, tokens, verbose): self._finalized = False # Ensure tokens are a list tokens = list(tokens) # Find the frequency of each case-normalized type. (Don't # strip off final periods.) Also keep track of the number of # tokens that end in periods. for aug_tok in tokens: self._type_fdist[aug_tok.type] += 1 if aug_tok.period_final: self._num_period_toks += 1 # Look for new abbreviations, and for types that no longer are unique_types = self._unique_types(tokens) for abbr, score, is_add in self._reclassify_abbrev_types(unique_types): if score >= self.ABBREV: if is_add: self._params.abbrev_types.add(abbr) if verbose: print((' Abbreviation: [%6.4f] %s' % (score, abbr))) else: if not is_add: self._params.abbrev_types.remove(abbr) if verbose: print((' Removed abbreviation: [%6.4f] %s' % (score, abbr))) # Make a preliminary pass through the document, marking likely # sentence breaks, abbreviations, and ellipsis tokens. tokens = list(self._annotate_first_pass(tokens)) # Check what contexts each word type can appear in, given the # case of its first letter. self._get_orthography_data(tokens) # We need total number of sentence breaks to find sentence starters self._sentbreak_count += self._get_sentbreak_count(tokens) # The remaining heuristics relate to pairs of tokens where the first # ends in a period. for aug_tok1, aug_tok2 in _pair_iter(tokens): if not aug_tok1.period_final or not aug_tok2: continue # Is the first token a rare abbreviation? if self._is_rare_abbrev_type(aug_tok1, aug_tok2): self._params.abbrev_types.add(aug_tok1.type_no_period) if verbose: print((' Rare Abbrev: %s' % aug_tok1.type)) # Does second token have a high likelihood of starting a sentence? if self._is_potential_sent_starter(aug_tok2, aug_tok1): self._sent_starter_fdist[aug_tok2.type] += 1 # Is this bigram a potential collocation? if self._is_potential_collocation(aug_tok1, aug_tok2): self._collocation_fdist[ (aug_tok1.type_no_period, aug_tok2.type_no_sentperiod)] += 1 def _unique_types(self, tokens): return set(aug_tok.type for aug_tok in tokens) def finalize_training(self, verbose=False): """ Uses data that has been gathered in training to determine likely collocations and sentence starters. """ self._params.clear_sent_starters() for typ, ll in self._find_sent_starters(): self._params.sent_starters.add(typ) if verbose: print((' Sent Starter: [%6.4f] %r' % (ll, typ))) self._params.clear_collocations() for (typ1, typ2), ll in self._find_collocations(): self._params.collocations.add( (typ1,typ2) ) if verbose: print((' Collocation: [%6.4f] %r+%r' % (ll, typ1, typ2))) self._finalized = True #//////////////////////////////////////////////////////////// #{ Overhead reduction #//////////////////////////////////////////////////////////// def freq_threshold(self, ortho_thresh=2, type_thresh=2, colloc_thres=2, sentstart_thresh=2): """ Allows memory use to be reduced after much training by removing data about rare tokens that are unlikely to have a statistical effect with further training. Entries occurring above the given thresholds will be retained. """ if ortho_thresh > 1: old_oc = self._params.ortho_context self._params.clear_ortho_context() for tok in self._type_fdist: count = self._type_fdist[tok] if count >= ortho_thresh: self._params.ortho_context[tok] = old_oc[tok] self._type_fdist = self._freq_threshold(self._type_fdist, type_thresh) self._collocation_fdist = self._freq_threshold( self._collocation_fdist, colloc_thres) self._sent_starter_fdist = self._freq_threshold( self._sent_starter_fdist, sentstart_thresh) def _freq_threshold(self, fdist, threshold): """ Returns a FreqDist containing only data with counts below a given threshold, as well as a mapping (None -> count_removed). """ # We assume that there is more data below the threshold than above it # and so create a new FreqDist rather than working in place. res = FreqDist() num_removed = 0 for tok in fdist: count = fdist[tok] if count < threshold: num_removed += 1 else: res[tok] += count res[None] += num_removed return res #//////////////////////////////////////////////////////////// #{ Orthographic data #//////////////////////////////////////////////////////////// def _get_orthography_data(self, tokens): """ Collect information about whether each token type occurs with different case patterns (i) overall, (ii) at sentence-initial positions, and (iii) at sentence-internal positions. """ # 'initial' or 'internal' or 'unknown' context = 'internal' tokens = list(tokens) for aug_tok in tokens: # If we encounter a paragraph break, then it's a good sign # that it's a sentence break. But err on the side of # caution (by not positing a sentence break) if we just # saw an abbreviation. if aug_tok.parastart and context != 'unknown': context = 'initial' # If we're at the beginning of a line, then we can't decide # between 'internal' and 'initial'. if aug_tok.linestart and context == 'internal': context = 'unknown' # Find the case-normalized type of the token. If it's a # sentence-final token, strip off the period. typ = aug_tok.type_no_sentperiod # Update the orthographic context table. flag = _ORTHO_MAP.get((context, aug_tok.first_case), 0) if flag: self._params.add_ortho_context(typ, flag) # Decide whether the next word is at a sentence boundary. if aug_tok.sentbreak: if not (aug_tok.is_number or aug_tok.is_initial): context = 'initial' else: context = 'unknown' elif aug_tok.ellipsis or aug_tok.abbr: context = 'unknown' else: context = 'internal' #//////////////////////////////////////////////////////////// #{ Abbreviations #//////////////////////////////////////////////////////////// def _reclassify_abbrev_types(self, types): """ (Re)classifies each given token if - it is period-final and not a known abbreviation; or - it is not period-final and is otherwise a known abbreviation by checking whether its previous classification still holds according to the heuristics of section 3. Yields triples (abbr, score, is_add) where abbr is the type in question, score is its log-likelihood with penalties applied, and is_add specifies whether the present type is a candidate for inclusion or exclusion as an abbreviation, such that: - (is_add and score >= 0.3) suggests a new abbreviation; and - (not is_add and score < 0.3) suggests excluding an abbreviation. """ # (While one could recalculate abbreviations from all .-final tokens at # every iteration, in cases requiring efficiency, the number of tokens # in the present training document will be much less.) for typ in types: # Check some basic conditions, to rule out words that are # clearly not abbrev_types. if not _re_non_punct.search(typ) or typ == '##number##': continue if typ.endswith('.'): if typ in self._params.abbrev_types: continue typ = typ[:-1] is_add = True else: if typ not in self._params.abbrev_types: continue is_add = False # Count how many periods & nonperiods are in the # candidate. num_periods = typ.count('.') + 1 num_nonperiods = len(typ) - num_periods + 1 # Let <a> be the candidate without the period, and <b> # be the period. Find a log likelihood ratio that # indicates whether <ab> occurs as a single unit (high # value of ll), or as two independent units <a> and # <b> (low value of ll). count_with_period = self._type_fdist[typ + '.'] count_without_period = self._type_fdist[typ] ll = self._dunning_log_likelihood( count_with_period + count_without_period, self._num_period_toks, count_with_period, self._type_fdist.N()) # Apply three scaling factors to 'tweak' the basic log # likelihood ratio: # F_length: long word -> less likely to be an abbrev # F_periods: more periods -> more likely to be an abbrev # F_penalty: penalize occurrences w/o a period f_length = math.exp(-num_nonperiods) f_periods = num_periods f_penalty = (int(self.IGNORE_ABBREV_PENALTY) or math.pow(num_nonperiods, -count_without_period)) score = ll * f_length * f_periods * f_penalty yield typ, score, is_add def find_abbrev_types(self): """ Recalculates abbreviations given type frequencies, despite no prior determination of abbreviations. This fails to include abbreviations otherwise found as "rare". """ self._params.clear_abbrevs() tokens = (typ for typ in self._type_fdist if typ and typ.endswith('.')) for abbr, score, is_add in self._reclassify_abbrev_types(tokens): if score >= self.ABBREV: self._params.abbrev_types.add(abbr) # This function combines the work done by the original code's # functions `count_orthography_context`, `get_orthography_count`, # and `get_rare_abbreviations`. def _is_rare_abbrev_type(self, cur_tok, next_tok): """ A word type is counted as a rare abbreviation if... - it's not already marked as an abbreviation - it occurs fewer than ABBREV_BACKOFF times - either it is followed by a sentence-internal punctuation mark, *or* it is followed by a lower-case word that sometimes appears with upper case, but never occurs with lower case at the beginning of sentences. """ if cur_tok.abbr or not cur_tok.sentbreak: return False # Find the case-normalized type of the token. If it's # a sentence-final token, strip off the period. typ = cur_tok.type_no_sentperiod # Proceed only if the type hasn't been categorized as an # abbreviation already, and is sufficiently rare... count = self._type_fdist[typ] + self._type_fdist[typ[:-1]] if (typ in self._params.abbrev_types or count >= self.ABBREV_BACKOFF): return False # Record this token as an abbreviation if the next # token is a sentence-internal punctuation mark. # [XX] :1 or check the whole thing?? if next_tok.tok[:1] in self._lang_vars.internal_punctuation: return True # Record this type as an abbreviation if the next # token... (i) starts with a lower case letter, # (ii) sometimes occurs with an uppercase letter, # and (iii) never occus with an uppercase letter # sentence-internally. # [xx] should the check for (ii) be modified?? elif next_tok.first_lower: typ2 = next_tok.type_no_sentperiod typ2ortho_context = self._params.ortho_context[typ2] if ( (typ2ortho_context & _ORTHO_BEG_UC) and not (typ2ortho_context & _ORTHO_MID_UC) ): return True #//////////////////////////////////////////////////////////// #{ Log Likelihoods #//////////////////////////////////////////////////////////// # helper for _reclassify_abbrev_types: @staticmethod def _dunning_log_likelihood(count_a, count_b, count_ab, N): """ A function that calculates the modified Dunning log-likelihood ratio scores for abbreviation candidates. The details of how this works is available in the paper. """ p1 = count_b / N p2 = 0.99 null_hypo = (count_ab * math.log(p1) + (count_a - count_ab) * math.log(1.0 - p1)) alt_hypo = (count_ab * math.log(p2) + (count_a - count_ab) * math.log(1.0 - p2)) likelihood = null_hypo - alt_hypo return (-2.0 * likelihood) @staticmethod def _col_log_likelihood(count_a, count_b, count_ab, N): """ A function that will just compute log-likelihood estimate, in the original paper it's described in algorithm 6 and 7. This *should* be the original Dunning log-likelihood values, unlike the previous log_l function where it used modified Dunning log-likelihood values """ import math p = count_b / N p1 = count_ab / count_a p2 = (count_b - count_ab) / (N - count_a) summand1 = (count_ab * math.log(p) + (count_a - count_ab) * math.log(1.0 - p)) summand2 = ((count_b - count_ab) * math.log(p) + (N - count_a - count_b + count_ab) * math.log(1.0 - p)) if count_a == count_ab: summand3 = 0 else: summand3 = (count_ab * math.log(p1) + (count_a - count_ab) * math.log(1.0 - p1)) if count_b == count_ab: summand4 = 0 else: summand4 = ((count_b - count_ab) * math.log(p2) + (N - count_a - count_b + count_ab) * math.log(1.0 - p2)) likelihood = summand1 + summand2 - summand3 - summand4 return (-2.0 * likelihood) #//////////////////////////////////////////////////////////// #{ Collocation Finder #//////////////////////////////////////////////////////////// def _is_potential_collocation(self, aug_tok1, aug_tok2): """ Returns True if the pair of tokens may form a collocation given log-likelihood statistics. """ return ((self.INCLUDE_ALL_COLLOCS or (self.INCLUDE_ABBREV_COLLOCS and aug_tok1.abbr) or (aug_tok1.sentbreak and (aug_tok1.is_number or aug_tok1.is_initial))) and aug_tok1.is_non_punct and aug_tok2.is_non_punct) def _find_collocations(self): """ Generates likely collocations and their log-likelihood. """ for types in self._collocation_fdist: try: typ1, typ2 = types except TypeError: # types may be None after calling freq_threshold() continue if typ2 in self._params.sent_starters: continue col_count = self._collocation_fdist[types] typ1_count = self._type_fdist[typ1]+self._type_fdist[typ1+'.'] typ2_count = self._type_fdist[typ2]+self._type_fdist[typ2+'.'] if (typ1_count > 1 and typ2_count > 1 and self.MIN_COLLOC_FREQ < col_count <= min(typ1_count, typ2_count)): ll = self._col_log_likelihood(typ1_count, typ2_count, col_count, self._type_fdist.N()) # Filter out the not-so-collocative if (ll >= self.COLLOCATION and (self._type_fdist.N()/typ1_count > typ2_count/col_count)): yield (typ1, typ2), ll #//////////////////////////////////////////////////////////// #{ Sentence-Starter Finder #//////////////////////////////////////////////////////////// def _is_potential_sent_starter(self, cur_tok, prev_tok): """ Returns True given a token and the token that preceds it if it seems clear that the token is beginning a sentence. """ # If a token (i) is preceded by a sentece break that is # not a potential ordinal number or initial, and (ii) is # alphabetic, then it is a a sentence-starter. return ( prev_tok.sentbreak and not (prev_tok.is_number or prev_tok.is_initial) and cur_tok.is_alpha ) def _find_sent_starters(self): """ Uses collocation heuristics for each candidate token to determine if it frequently starts sentences. """ for typ in self._sent_starter_fdist: if not typ: continue typ_at_break_count = self._sent_starter_fdist[typ] typ_count = self._type_fdist[typ]+self._type_fdist[typ+'.'] if typ_count < typ_at_break_count: # needed after freq_threshold continue ll = self._col_log_likelihood(self._sentbreak_count, typ_count, typ_at_break_count, self._type_fdist.N()) if (ll >= self.SENT_STARTER and self._type_fdist.N()/self._sentbreak_count > typ_count/typ_at_break_count): yield typ, ll def _get_sentbreak_count(self, tokens): """ Returns the number of sentence breaks marked in a given set of augmented tokens. """ return sum(1 for aug_tok in tokens if aug_tok.sentbreak) ###################################################################### #{ Punkt Sentence Tokenizer ###################################################################### class PunktSentenceTokenizer(PunktBaseClass,TokenizerI): """ A sentence tokenizer which uses an unsupervised algorithm to build a model for abbreviation words, collocations, and words that start sentences; and then uses that model to find sentence boundaries. This approach has been shown to work well for many European languages. """ def __init__(self, train_text=None, verbose=False, lang_vars=PunktLanguageVars(), token_cls=PunktToken): """ train_text can either be the sole training text for this sentence boundary detector, or can be a PunktParameters object. """ PunktBaseClass.__init__(self, lang_vars=lang_vars, token_cls=token_cls) if train_text: self._params = self.train(train_text, verbose) def train(self, train_text, verbose=False): """ Derives parameters from a given training text, or uses the parameters given. Repeated calls to this method destroy previous parameters. For incremental training, instantiate a separate PunktTrainer instance. """ if not isinstance(train_text, string_types): return train_text return PunktTrainer(train_text, lang_vars=self._lang_vars, token_cls=self._Token).get_params() #//////////////////////////////////////////////////////////// #{ Tokenization #//////////////////////////////////////////////////////////// def tokenize(self, text, realign_boundaries=True): """ Given a text, returns a list of the sentences in that text. """ return list(self.sentences_from_text(text, realign_boundaries)) def debug_decisions(self, text): """ Classifies candidate periods as sentence breaks, yielding a dict for each that may be used to understand why the decision was made. See format_debug_decision() to help make this output readable. """ for match in self._lang_vars.period_context_re().finditer(text): decision_text = match.group() + match.group('after_tok') tokens = self._tokenize_words(decision_text) tokens = list(self._annotate_first_pass(tokens)) while not tokens[0].period_final: tokens.pop(0) yield dict(period_index=match.end() - 1, text=decision_text, type1=tokens[0].type, type2=tokens[1].type, type1_in_abbrs=bool(tokens[0].abbr), type1_is_initial=bool(tokens[0].is_initial), type2_is_sent_starter=tokens[1].type_no_sentperiod in self._params.sent_starters, type2_ortho_heuristic=self._ortho_heuristic(tokens[1]), type2_ortho_contexts=set(self._params._debug_ortho_context(tokens[1].type_no_sentperiod)), collocation=(tokens[0].type_no_sentperiod, tokens[1].type_no_sentperiod) in self._params.collocations, reason=self._second_pass_annotation(tokens[0], tokens[1]) or REASON_DEFAULT_DECISION, break_decision=tokens[0].sentbreak, ) def span_tokenize(self, text, realign_boundaries=True): """ Given a text, returns a list of the (start, end) spans of sentences in the text. """ slices = self._slices_from_text(text) if realign_boundaries: slices = self._realign_boundaries(text, slices) return [(sl.start, sl.stop) for sl in slices] def sentences_from_text(self, text, realign_boundaries=True): """ Given a text, generates the sentences in that text by only testing candidate sentence breaks. If realign_boundaries is True, includes in the sentence closing punctuation that follows the period. """ return [text[s:e] for s, e in self.span_tokenize(text, realign_boundaries)] def _slices_from_text(self, text): last_break = 0 for match in self._lang_vars.period_context_re().finditer(text): context = match.group() + match.group('after_tok') if self.text_contains_sentbreak(context): yield slice(last_break, match.end()) if match.group('next_tok'): # next sentence starts after whitespace last_break = match.start('next_tok') else: # next sentence starts at following punctuation last_break = match.end() yield slice(last_break, len(text)) def _realign_boundaries(self, text, slices): """ Attempts to realign punctuation that falls after the period but should otherwise be included in the same sentence. For example: "(Sent1.) Sent2." will otherwise be split as:: ["(Sent1.", ") Sent1."]. This method will produce:: ["(Sent1.)", "Sent2."]. """ realign = 0 for sl1, sl2 in _pair_iter(slices): sl1 = slice(sl1.start + realign, sl1.stop) if not sl2: if text[sl1]: yield sl1 continue m = self._lang_vars.re_boundary_realignment.match(text[sl2]) if m: yield slice(sl1.start, sl2.start + len(m.group(0).rstrip())) realign = m.end() else: realign = 0 if text[sl1]: yield sl1 def text_contains_sentbreak(self, text): """ Returns True if the given text includes a sentence break. """ found = False # used to ignore last token for t in self._annotate_tokens(self._tokenize_words(text)): if found: return True if t.sentbreak: found = True return False def sentences_from_text_legacy(self, text): """ Given a text, generates the sentences in that text. Annotates all tokens, rather than just those with possible sentence breaks. Should produce the same results as ``sentences_from_text``. """ tokens = self._annotate_tokens(self._tokenize_words(text)) return self._build_sentence_list(text, tokens) def sentences_from_tokens(self, tokens): """ Given a sequence of tokens, generates lists of tokens, each list corresponding to a sentence. """ tokens = iter(self._annotate_tokens(self._Token(t) for t in tokens)) sentence = [] for aug_tok in tokens: sentence.append(aug_tok.tok) if aug_tok.sentbreak: yield sentence sentence = [] if sentence: yield sentence def _annotate_tokens(self, tokens): """ Given a set of tokens augmented with markers for line-start and paragraph-start, returns an iterator through those tokens with full annotation including predicted sentence breaks. """ # Make a preliminary pass through the document, marking likely # sentence breaks, abbreviations, and ellipsis tokens. tokens = self._annotate_first_pass(tokens) # Make a second pass through the document, using token context # information to change our preliminary decisions about where # sentence breaks, abbreviations, and ellipsis occurs. tokens = self._annotate_second_pass(tokens) ## [XX] TESTING #tokens = list(tokens) #self.dump(tokens) return tokens def _build_sentence_list(self, text, tokens): """ Given the original text and the list of augmented word tokens, construct and return a tokenized list of sentence strings. """ # Most of the work here is making sure that we put the right # pieces of whitespace back in all the right places. # Our position in the source text, used to keep track of which # whitespace to add: pos = 0 # A regular expression that finds pieces of whitespace: WS_REGEXP = re.compile(r'\s*') sentence = '' for aug_tok in tokens: tok = aug_tok.tok # Find the whitespace before this token, and update pos. ws = WS_REGEXP.match(text, pos).group() pos += len(ws) # Some of the rules used by the punkt word tokenizer # strip whitespace out of the text, resulting in tokens # that contain whitespace in the source text. If our # token doesn't match, see if adding whitespace helps. # If so, then use the version with whitespace. if text[pos:pos+len(tok)] != tok: pat = '\s*'.join(re.escape(c) for c in tok) m = re.compile(pat).match(text,pos) if m: tok = m.group() # Move our position pointer to the end of the token. assert text[pos:pos+len(tok)] == tok pos += len(tok) # Add this token. If it's not at the beginning of the # sentence, then include any whitespace that separated it # from the previous token. if sentence: sentence += ws sentence += tok # If we're at a sentence break, then start a new sentence. if aug_tok.sentbreak: yield sentence sentence = '' # If the last sentence is emtpy, discard it. if sentence: yield sentence # [XX] TESTING def dump(self, tokens): print('writing to /tmp/punkt.new...') with open('/tmp/punkt.new', 'w') as outfile: for aug_tok in tokens: if aug_tok.parastart: outfile.write('\n\n') elif aug_tok.linestart: outfile.write('\n') else: outfile.write(' ') outfile.write(str(aug_tok)) #//////////////////////////////////////////////////////////// #{ Customization Variables #//////////////////////////////////////////////////////////// PUNCTUATION = tuple(';:,.!?') #//////////////////////////////////////////////////////////// #{ Annotation Procedures #//////////////////////////////////////////////////////////// def _annotate_second_pass(self, tokens): """ Performs a token-based classification (section 4) over the given tokens, making use of the orthographic heuristic (4.1.1), collocation heuristic (4.1.2) and frequent sentence starter heuristic (4.1.3). """ for t1, t2 in _pair_iter(tokens): self._second_pass_annotation(t1, t2) yield t1 def _second_pass_annotation(self, aug_tok1, aug_tok2): """ Performs token-based classification over a pair of contiguous tokens updating the first. """ # Is it the last token? We can't do anything then. if not aug_tok2: return tok = aug_tok1.tok if not aug_tok1.period_final: # We only care about words ending in periods. return typ = aug_tok1.type_no_period next_tok = aug_tok2.tok next_typ = aug_tok2.type_no_sentperiod tok_is_initial = aug_tok1.is_initial # [4.1.2. Collocation Heuristic] If there's a # collocation between the word before and after the # period, then label tok as an abbreviation and NOT # a sentence break. Note that collocations with # frequent sentence starters as their second word are # excluded in training. if (typ, next_typ) in self._params.collocations: aug_tok1.sentbreak = False aug_tok1.abbr = True return REASON_KNOWN_COLLOCATION # [4.2. Token-Based Reclassification of Abbreviations] If # the token is an abbreviation or an ellipsis, then decide # whether we should *also* classify it as a sentbreak. if ( (aug_tok1.abbr or aug_tok1.ellipsis) and (not tok_is_initial) ): # [4.1.1. Orthographic Heuristic] Check if there's # orthogrpahic evidence about whether the next word # starts a sentence or not. is_sent_starter = self._ortho_heuristic(aug_tok2) if is_sent_starter == True: aug_tok1.sentbreak = True return REASON_ABBR_WITH_ORTHOGRAPHIC_HEURISTIC # [4.1.3. Frequent Sentence Starter Heruistic] If the # next word is capitalized, and is a member of the # frequent-sentence-starters list, then label tok as a # sentence break. if ( aug_tok2.first_upper and next_typ in self._params.sent_starters): aug_tok1.sentbreak = True return REASON_ABBR_WITH_SENTENCE_STARTER # [4.3. Token-Based Detection of Initials and Ordinals] # Check if any initials or ordinals tokens that are marked # as sentbreaks should be reclassified as abbreviations. if tok_is_initial or typ == '##number##': # [4.1.1. Orthographic Heuristic] Check if there's # orthogrpahic evidence about whether the next word # starts a sentence or not. is_sent_starter = self._ortho_heuristic(aug_tok2) if is_sent_starter == False: aug_tok1.sentbreak = False aug_tok1.abbr = True if tok_is_initial: return REASON_INITIAL_WITH_ORTHOGRAPHIC_HEURISTIC else: return REASON_NUMBER_WITH_ORTHOGRAPHIC_HEURISTIC # Special heuristic for initials: if orthogrpahic # heuristc is unknown, and next word is always # capitalized, then mark as abbrev (eg: J. Bach). if ( is_sent_starter == 'unknown' and tok_is_initial and aug_tok2.first_upper and not (self._params.ortho_context[next_typ] & _ORTHO_LC) ): aug_tok1.sentbreak = False aug_tok1.abbr = True return REASON_INITIAL_WITH_SPECIAL_ORTHOGRAPHIC_HEURISTIC return def _ortho_heuristic(self, aug_tok): """ Decide whether the given token is the first token in a sentence. """ # Sentences don't start with punctuation marks: if aug_tok.tok in self.PUNCTUATION: return False ortho_context = self._params.ortho_context[aug_tok.type_no_sentperiod] # If the word is capitalized, occurs at least once with a # lower case first letter, and never occurs with an upper case # first letter sentence-internally, then it's a sentence starter. if ( aug_tok.first_upper and (ortho_context & _ORTHO_LC) and not (ortho_context & _ORTHO_MID_UC) ): return True # If the word is lower case, and either (a) we've seen it used # with upper case, or (b) we've never seen it used # sentence-initially with lower case, then it's not a sentence # starter. if ( aug_tok.first_lower and ((ortho_context & _ORTHO_UC) or not (ortho_context & _ORTHO_BEG_LC)) ): return False # Otherwise, we're not sure. return 'unknown' DEBUG_DECISION_FMT = '''Text: %(text)r (at offset %(period_index)d) Sentence break? %(break_decision)s (%(reason)s) Collocation? %(collocation)s %(type1)r: known abbreviation: %(type1_in_abbrs)s is initial: %(type1_is_initial)s %(type2)r: known sentence starter: %(type2_is_sent_starter)s orthographic heuristic suggests is a sentence starter? %(type2_ortho_heuristic)s orthographic contexts in training: %(type2_ortho_contexts)s ''' def format_debug_decision(d): return DEBUG_DECISION_FMT % d def demo(text, tok_cls=PunktSentenceTokenizer, train_cls=PunktTrainer): """Builds a punkt model and applies it to the same text""" cleanup = lambda s: re.compile(r'(?:\r|^\s+)', re.MULTILINE).sub('', s).replace('\n', ' ') trainer = train_cls() trainer.INCLUDE_ALL_COLLOCS = True trainer.train(text) sbd = tok_cls(trainer.get_params()) for l in sbd.sentences_from_text(text): print(cleanup(l))
mit
joakim-hove/django
django/contrib/gis/db/models/query.py
224
36645
import warnings from django.contrib.gis.db.models import aggregates from django.contrib.gis.db.models.fields import ( GeometryField, LineStringField, PointField, get_srid_info, ) from django.contrib.gis.db.models.lookups import GISLookup from django.contrib.gis.db.models.sql import ( AreaField, DistanceField, GeomField, GMLField, ) from django.contrib.gis.geometry.backend import Geometry from django.contrib.gis.measure import Area, Distance from django.db import connections from django.db.models.expressions import RawSQL from django.db.models.fields import Field from django.db.models.query import QuerySet from django.utils import six from django.utils.deprecation import ( RemovedInDjango20Warning, RemovedInDjango110Warning, ) class GeoQuerySet(QuerySet): "The Geographic QuerySet." # ### GeoQuerySet Methods ### def area(self, tolerance=0.05, **kwargs): """ Returns the area of the geographic field in an `area` attribute on each element of this GeoQuerySet. """ # Performing setup here rather than in `_spatial_attribute` so that # we can get the units for `AreaField`. procedure_args, geo_field = self._spatial_setup( 'area', field_name=kwargs.get('field_name')) s = {'procedure_args': procedure_args, 'geo_field': geo_field, 'setup': False, } connection = connections[self.db] backend = connection.ops if backend.oracle: s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s' s['procedure_args']['tolerance'] = tolerance s['select_field'] = AreaField('sq_m') # Oracle returns area in units of meters. elif backend.postgis or backend.spatialite: if backend.geography: # Geography fields support area calculation, returns square meters. s['select_field'] = AreaField('sq_m') elif not geo_field.geodetic(connection): # Getting the area units of the geographic field. s['select_field'] = AreaField(Area.unit_attname(geo_field.units_name(connection))) else: # TODO: Do we want to support raw number areas for geodetic fields? raise Exception('Area on geodetic coordinate systems not supported.') return self._spatial_attribute('area', s, **kwargs) def centroid(self, **kwargs): """ Returns the centroid of the geographic field in a `centroid` attribute on each element of this GeoQuerySet. """ return self._geom_attribute('centroid', **kwargs) def collect(self, **kwargs): """ Performs an aggregate collect operation on the given geometry field. This is analogous to a union operation, but much faster because boundaries are not dissolved. """ warnings.warn( "The collect GeoQuerySet method is deprecated. Use the Collect() " "aggregate in an aggregate() or annotate() method.", RemovedInDjango110Warning, stacklevel=2 ) return self._spatial_aggregate(aggregates.Collect, **kwargs) def difference(self, geom, **kwargs): """ Returns the spatial difference of the geographic field in a `difference` attribute on each element of this GeoQuerySet. """ return self._geomset_attribute('difference', geom, **kwargs) def distance(self, geom, **kwargs): """ Returns the distance from the given geographic field name to the given geometry in a `distance` attribute on each element of the GeoQuerySet. Keyword Arguments: `spheroid` => If the geometry field is geodetic and PostGIS is the spatial database, then the more accurate spheroid calculation will be used instead of the quicker sphere calculation. `tolerance` => Used only for Oracle. The tolerance is in meters -- a default of 5 centimeters (0.05) is used. """ return self._distance_attribute('distance', geom, **kwargs) def envelope(self, **kwargs): """ Returns a Geometry representing the bounding box of the Geometry field in an `envelope` attribute on each element of the GeoQuerySet. """ return self._geom_attribute('envelope', **kwargs) def extent(self, **kwargs): """ Returns the extent (aggregate) of the features in the GeoQuerySet. The extent will be returned as a 4-tuple, consisting of (xmin, ymin, xmax, ymax). """ warnings.warn( "The extent GeoQuerySet method is deprecated. Use the Extent() " "aggregate in an aggregate() or annotate() method.", RemovedInDjango110Warning, stacklevel=2 ) return self._spatial_aggregate(aggregates.Extent, **kwargs) def extent3d(self, **kwargs): """ Returns the aggregate extent, in 3D, of the features in the GeoQuerySet. It is returned as a 6-tuple, comprising: (xmin, ymin, zmin, xmax, ymax, zmax). """ warnings.warn( "The extent3d GeoQuerySet method is deprecated. Use the Extent3D() " "aggregate in an aggregate() or annotate() method.", RemovedInDjango110Warning, stacklevel=2 ) return self._spatial_aggregate(aggregates.Extent3D, **kwargs) def force_rhr(self, **kwargs): """ Returns a modified version of the Polygon/MultiPolygon in which all of the vertices follow the Right-Hand-Rule. By default, this is attached as the `force_rhr` attribute on each element of the GeoQuerySet. """ return self._geom_attribute('force_rhr', **kwargs) def geojson(self, precision=8, crs=False, bbox=False, **kwargs): """ Returns a GeoJSON representation of the geometry field in a `geojson` attribute on each element of the GeoQuerySet. The `crs` and `bbox` keywords may be set to True if the user wants the coordinate reference system and the bounding box to be included in the GeoJSON representation of the geometry. """ backend = connections[self.db].ops if not backend.geojson: raise NotImplementedError('Only PostGIS 1.3.4+ and SpatiaLite 3.0+ ' 'support GeoJSON serialization.') if not isinstance(precision, six.integer_types): raise TypeError('Precision keyword must be set with an integer.') options = 0 if crs and bbox: options = 3 elif bbox: options = 1 elif crs: options = 2 s = {'desc': 'GeoJSON', 'procedure_args': {'precision': precision, 'options': options}, 'procedure_fmt': '%(geo_col)s,%(precision)s,%(options)s', } return self._spatial_attribute('geojson', s, **kwargs) def geohash(self, precision=20, **kwargs): """ Returns a GeoHash representation of the given field in a `geohash` attribute on each element of the GeoQuerySet. The `precision` keyword may be used to custom the number of _characters_ used in the output GeoHash, the default is 20. """ s = {'desc': 'GeoHash', 'procedure_args': {'precision': precision}, 'procedure_fmt': '%(geo_col)s,%(precision)s', } return self._spatial_attribute('geohash', s, **kwargs) def gml(self, precision=8, version=2, **kwargs): """ Returns GML representation of the given field in a `gml` attribute on each element of the GeoQuerySet. """ backend = connections[self.db].ops s = {'desc': 'GML', 'procedure_args': {'precision': precision}} if backend.postgis: s['procedure_fmt'] = '%(version)s,%(geo_col)s,%(precision)s' s['procedure_args'] = {'precision': precision, 'version': version} if backend.oracle: s['select_field'] = GMLField() return self._spatial_attribute('gml', s, **kwargs) def intersection(self, geom, **kwargs): """ Returns the spatial intersection of the Geometry field in an `intersection` attribute on each element of this GeoQuerySet. """ return self._geomset_attribute('intersection', geom, **kwargs) def kml(self, **kwargs): """ Returns KML representation of the geometry field in a `kml` attribute on each element of this GeoQuerySet. """ s = {'desc': 'KML', 'procedure_fmt': '%(geo_col)s,%(precision)s', 'procedure_args': {'precision': kwargs.pop('precision', 8)}, } return self._spatial_attribute('kml', s, **kwargs) def length(self, **kwargs): """ Returns the length of the geometry field as a `Distance` object stored in a `length` attribute on each element of this GeoQuerySet. """ return self._distance_attribute('length', None, **kwargs) def make_line(self, **kwargs): """ Creates a linestring from all of the PointField geometries in the this GeoQuerySet and returns it. This is a spatial aggregate method, and thus returns a geometry rather than a GeoQuerySet. """ warnings.warn( "The make_line GeoQuerySet method is deprecated. Use the MakeLine() " "aggregate in an aggregate() or annotate() method.", RemovedInDjango110Warning, stacklevel=2 ) return self._spatial_aggregate(aggregates.MakeLine, geo_field_type=PointField, **kwargs) def mem_size(self, **kwargs): """ Returns the memory size (number of bytes) that the geometry field takes in a `mem_size` attribute on each element of this GeoQuerySet. """ return self._spatial_attribute('mem_size', {}, **kwargs) def num_geom(self, **kwargs): """ Returns the number of geometries if the field is a GeometryCollection or Multi* Field in a `num_geom` attribute on each element of this GeoQuerySet; otherwise the sets with None. """ return self._spatial_attribute('num_geom', {}, **kwargs) def num_points(self, **kwargs): """ Returns the number of points in the first linestring in the Geometry field in a `num_points` attribute on each element of this GeoQuerySet; otherwise sets with None. """ return self._spatial_attribute('num_points', {}, **kwargs) def perimeter(self, **kwargs): """ Returns the perimeter of the geometry field as a `Distance` object stored in a `perimeter` attribute on each element of this GeoQuerySet. """ return self._distance_attribute('perimeter', None, **kwargs) def point_on_surface(self, **kwargs): """ Returns a Point geometry guaranteed to lie on the surface of the Geometry field in a `point_on_surface` attribute on each element of this GeoQuerySet; otherwise sets with None. """ return self._geom_attribute('point_on_surface', **kwargs) def reverse_geom(self, **kwargs): """ Reverses the coordinate order of the geometry, and attaches as a `reverse` attribute on each element of this GeoQuerySet. """ s = {'select_field': GeomField()} kwargs.setdefault('model_att', 'reverse_geom') if connections[self.db].ops.oracle: s['geo_field_type'] = LineStringField return self._spatial_attribute('reverse', s, **kwargs) def scale(self, x, y, z=0.0, **kwargs): """ Scales the geometry to a new size by multiplying the ordinates with the given x,y,z scale factors. """ if connections[self.db].ops.spatialite: if z != 0.0: raise NotImplementedError('SpatiaLite does not support 3D scaling.') s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s', 'procedure_args': {'x': x, 'y': y}, 'select_field': GeomField(), } else: s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s,%(z)s', 'procedure_args': {'x': x, 'y': y, 'z': z}, 'select_field': GeomField(), } return self._spatial_attribute('scale', s, **kwargs) def snap_to_grid(self, *args, **kwargs): """ Snap all points of the input geometry to the grid. How the geometry is snapped to the grid depends on how many arguments were given: - 1 argument : A single size to snap both the X and Y grids to. - 2 arguments: X and Y sizes to snap the grid to. - 4 arguments: X, Y sizes and the X, Y origins. """ if False in [isinstance(arg, (float,) + six.integer_types) for arg in args]: raise TypeError('Size argument(s) for the grid must be a float or integer values.') nargs = len(args) if nargs == 1: size = args[0] procedure_fmt = '%(geo_col)s,%(size)s' procedure_args = {'size': size} elif nargs == 2: xsize, ysize = args procedure_fmt = '%(geo_col)s,%(xsize)s,%(ysize)s' procedure_args = {'xsize': xsize, 'ysize': ysize} elif nargs == 4: xsize, ysize, xorigin, yorigin = args procedure_fmt = '%(geo_col)s,%(xorigin)s,%(yorigin)s,%(xsize)s,%(ysize)s' procedure_args = {'xsize': xsize, 'ysize': ysize, 'xorigin': xorigin, 'yorigin': yorigin} else: raise ValueError('Must provide 1, 2, or 4 arguments to `snap_to_grid`.') s = {'procedure_fmt': procedure_fmt, 'procedure_args': procedure_args, 'select_field': GeomField(), } return self._spatial_attribute('snap_to_grid', s, **kwargs) def svg(self, relative=False, precision=8, **kwargs): """ Returns SVG representation of the geographic field in a `svg` attribute on each element of this GeoQuerySet. Keyword Arguments: `relative` => If set to True, this will evaluate the path in terms of relative moves (rather than absolute). `precision` => May be used to set the maximum number of decimal digits used in output (defaults to 8). """ relative = int(bool(relative)) if not isinstance(precision, six.integer_types): raise TypeError('SVG precision keyword argument must be an integer.') s = { 'desc': 'SVG', 'procedure_fmt': '%(geo_col)s,%(rel)s,%(precision)s', 'procedure_args': { 'rel': relative, 'precision': precision, } } return self._spatial_attribute('svg', s, **kwargs) def sym_difference(self, geom, **kwargs): """ Returns the symmetric difference of the geographic field in a `sym_difference` attribute on each element of this GeoQuerySet. """ return self._geomset_attribute('sym_difference', geom, **kwargs) def translate(self, x, y, z=0.0, **kwargs): """ Translates the geometry to a new location using the given numeric parameters as offsets. """ if connections[self.db].ops.spatialite: if z != 0.0: raise NotImplementedError('SpatiaLite does not support 3D translation.') s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s', 'procedure_args': {'x': x, 'y': y}, 'select_field': GeomField(), } else: s = {'procedure_fmt': '%(geo_col)s,%(x)s,%(y)s,%(z)s', 'procedure_args': {'x': x, 'y': y, 'z': z}, 'select_field': GeomField(), } return self._spatial_attribute('translate', s, **kwargs) def transform(self, srid=4326, **kwargs): """ Transforms the given geometry field to the given SRID. If no SRID is provided, the transformation will default to using 4326 (WGS84). """ if not isinstance(srid, six.integer_types): raise TypeError('An integer SRID must be provided.') field_name = kwargs.get('field_name') self._spatial_setup('transform', field_name=field_name) self.query.add_context('transformed_srid', srid) return self._clone() def union(self, geom, **kwargs): """ Returns the union of the geographic field with the given Geometry in a `union` attribute on each element of this GeoQuerySet. """ return self._geomset_attribute('union', geom, **kwargs) def unionagg(self, **kwargs): """ Performs an aggregate union on the given geometry field. Returns None if the GeoQuerySet is empty. The `tolerance` keyword is for Oracle backends only. """ warnings.warn( "The unionagg GeoQuerySet method is deprecated. Use the Union() " "aggregate in an aggregate() or annotate() method.", RemovedInDjango110Warning, stacklevel=2 ) return self._spatial_aggregate(aggregates.Union, **kwargs) # ### Private API -- Abstracted DRY routines. ### def _spatial_setup(self, att, desc=None, field_name=None, geo_field_type=None): """ Performs set up for executing the spatial function. """ # Does the spatial backend support this? connection = connections[self.db] func = getattr(connection.ops, att, False) if desc is None: desc = att if not func: raise NotImplementedError('%s stored procedure not available on ' 'the %s backend.' % (desc, connection.ops.name)) # Initializing the procedure arguments. procedure_args = {'function': func} # Is there a geographic field in the model to perform this # operation on? geo_field = self._geo_field(field_name) if not geo_field: raise TypeError('%s output only available on GeometryFields.' % func) # If the `geo_field_type` keyword was used, then enforce that # type limitation. if geo_field_type is not None and not isinstance(geo_field, geo_field_type): raise TypeError('"%s" stored procedures may only be called on %ss.' % (func, geo_field_type.__name__)) # Setting the procedure args. procedure_args['geo_col'] = self._geocol_select(geo_field, field_name) return procedure_args, geo_field def _spatial_aggregate(self, aggregate, field_name=None, geo_field_type=None, tolerance=0.05): """ DRY routine for calling aggregate spatial stored procedures and returning their result to the caller of the function. """ # Getting the field the geographic aggregate will be called on. geo_field = self._geo_field(field_name) if not geo_field: raise TypeError('%s aggregate only available on GeometryFields.' % aggregate.name) # Checking if there are any geo field type limitations on this # aggregate (e.g. ST_Makeline only operates on PointFields). if geo_field_type is not None and not isinstance(geo_field, geo_field_type): raise TypeError('%s aggregate may only be called on %ss.' % (aggregate.name, geo_field_type.__name__)) # Getting the string expression of the field name, as this is the # argument taken by `Aggregate` objects. agg_col = field_name or geo_field.name # Adding any keyword parameters for the Aggregate object. Oracle backends # in particular need an additional `tolerance` parameter. agg_kwargs = {} if connections[self.db].ops.oracle: agg_kwargs['tolerance'] = tolerance # Calling the QuerySet.aggregate, and returning only the value of the aggregate. return self.aggregate(geoagg=aggregate(agg_col, **agg_kwargs))['geoagg'] def _spatial_attribute(self, att, settings, field_name=None, model_att=None): """ DRY routine for calling a spatial stored procedure on a geometry column and attaching its output as an attribute of the model. Arguments: att: The name of the spatial attribute that holds the spatial SQL function to call. settings: Dictonary of internal settings to customize for the spatial procedure. Public Keyword Arguments: field_name: The name of the geographic field to call the spatial function on. May also be a lookup to a geometry field as part of a foreign key relation. model_att: The name of the model attribute to attach the output of the spatial function to. """ warnings.warn( "The %s GeoQuerySet method is deprecated. See GeoDjango Functions " "documentation to find the expression-based replacement." % att, RemovedInDjango20Warning, stacklevel=2 ) # Default settings. settings.setdefault('desc', None) settings.setdefault('geom_args', ()) settings.setdefault('geom_field', None) settings.setdefault('procedure_args', {}) settings.setdefault('procedure_fmt', '%(geo_col)s') settings.setdefault('select_params', []) connection = connections[self.db] # Performing setup for the spatial column, unless told not to. if settings.get('setup', True): default_args, geo_field = self._spatial_setup( att, desc=settings['desc'], field_name=field_name, geo_field_type=settings.get('geo_field_type')) for k, v in six.iteritems(default_args): settings['procedure_args'].setdefault(k, v) else: geo_field = settings['geo_field'] # The attribute to attach to the model. if not isinstance(model_att, six.string_types): model_att = att # Special handling for any argument that is a geometry. for name in settings['geom_args']: # Using the field's get_placeholder() routine to get any needed # transformation SQL. geom = geo_field.get_prep_value(settings['procedure_args'][name]) params = geo_field.get_db_prep_lookup('contains', geom, connection=connection) geom_placeholder = geo_field.get_placeholder(geom, None, connection) # Replacing the procedure format with that of any needed # transformation SQL. old_fmt = '%%(%s)s' % name new_fmt = geom_placeholder % '%%s' settings['procedure_fmt'] = settings['procedure_fmt'].replace(old_fmt, new_fmt) settings['select_params'].extend(params) # Getting the format for the stored procedure. fmt = '%%(function)s(%s)' % settings['procedure_fmt'] # If the result of this function needs to be converted. if settings.get('select_field'): select_field = settings['select_field'] if connection.ops.oracle: select_field.empty_strings_allowed = False else: select_field = Field() # Finally, setting the extra selection attribute with # the format string expanded with the stored procedure # arguments. self.query.add_annotation( RawSQL(fmt % settings['procedure_args'], settings['select_params'], select_field), model_att) return self def _distance_attribute(self, func, geom=None, tolerance=0.05, spheroid=False, **kwargs): """ DRY routine for GeoQuerySet distance attribute routines. """ # Setting up the distance procedure arguments. procedure_args, geo_field = self._spatial_setup(func, field_name=kwargs.get('field_name')) # If geodetic defaulting distance attribute to meters (Oracle and # PostGIS spherical distances return meters). Otherwise, use the # units of the geometry field. connection = connections[self.db] geodetic = geo_field.geodetic(connection) geography = geo_field.geography if geodetic: dist_att = 'm' else: dist_att = Distance.unit_attname(geo_field.units_name(connection)) # Shortcut booleans for what distance function we're using and # whether the geometry field is 3D. distance = func == 'distance' length = func == 'length' perimeter = func == 'perimeter' if not (distance or length or perimeter): raise ValueError('Unknown distance function: %s' % func) geom_3d = geo_field.dim == 3 # The field's get_db_prep_lookup() is used to get any # extra distance parameters. Here we set up the # parameters that will be passed in to field's function. lookup_params = [geom or 'POINT (0 0)', 0] # Getting the spatial backend operations. backend = connection.ops # If the spheroid calculation is desired, either by the `spheroid` # keyword or when calculating the length of geodetic field, make # sure the 'spheroid' distance setting string is passed in so we # get the correct spatial stored procedure. if spheroid or (backend.postgis and geodetic and (not geography) and length): lookup_params.append('spheroid') lookup_params = geo_field.get_prep_value(lookup_params) params = geo_field.get_db_prep_lookup('distance_lte', lookup_params, connection=connection) # The `geom_args` flag is set to true if a geometry parameter was # passed in. geom_args = bool(geom) if backend.oracle: if distance: procedure_fmt = '%(geo_col)s,%(geom)s,%(tolerance)s' elif length or perimeter: procedure_fmt = '%(geo_col)s,%(tolerance)s' procedure_args['tolerance'] = tolerance else: # Getting whether this field is in units of degrees since the field may have # been transformed via the `transform` GeoQuerySet method. srid = self.query.get_context('transformed_srid') if srid: u, unit_name, s = get_srid_info(srid, connection) geodetic = unit_name.lower() in geo_field.geodetic_units if geodetic and not connection.features.supports_distance_geodetic: raise ValueError( 'This database does not support linear distance ' 'calculations on geodetic coordinate systems.' ) if distance: if srid: # Setting the `geom_args` flag to false because we want to handle # transformation SQL here, rather than the way done by default # (which will transform to the original SRID of the field rather # than to what was transformed to). geom_args = False procedure_fmt = '%s(%%(geo_col)s, %s)' % (backend.transform, srid) if geom.srid is None or geom.srid == srid: # If the geom parameter srid is None, it is assumed the coordinates # are in the transformed units. A placeholder is used for the # geometry parameter. `GeomFromText` constructor is also needed # to wrap geom placeholder for SpatiaLite. if backend.spatialite: procedure_fmt += ', %s(%%%%s, %s)' % (backend.from_text, srid) else: procedure_fmt += ', %%s' else: # We need to transform the geom to the srid specified in `transform()`, # so wrapping the geometry placeholder in transformation SQL. # SpatiaLite also needs geometry placeholder wrapped in `GeomFromText` # constructor. if backend.spatialite: procedure_fmt += (', %s(%s(%%%%s, %s), %s)' % ( backend.transform, backend.from_text, geom.srid, srid)) else: procedure_fmt += ', %s(%%%%s, %s)' % (backend.transform, srid) else: # `transform()` was not used on this GeoQuerySet. procedure_fmt = '%(geo_col)s,%(geom)s' if not geography and geodetic: # Spherical distance calculation is needed (because the geographic # field is geodetic). However, the PostGIS ST_distance_sphere/spheroid() # procedures may only do queries from point columns to point geometries # some error checking is required. if not backend.geography: if not isinstance(geo_field, PointField): raise ValueError('Spherical distance calculation only supported on PointFields.') if not str(Geometry(six.memoryview(params[0].ewkb)).geom_type) == 'Point': raise ValueError( 'Spherical distance calculation only supported with ' 'Point Geometry parameters' ) # The `function` procedure argument needs to be set differently for # geodetic distance calculations. if spheroid: # Call to distance_spheroid() requires spheroid param as well. procedure_fmt += ",'%(spheroid)s'" procedure_args.update({'function': backend.distance_spheroid, 'spheroid': params[1]}) else: procedure_args.update({'function': backend.distance_sphere}) elif length or perimeter: procedure_fmt = '%(geo_col)s' if not geography and geodetic and length: # There's no `length_sphere`, and `length_spheroid` also # works on 3D geometries. procedure_fmt += ",'%(spheroid)s'" procedure_args.update({'function': backend.length_spheroid, 'spheroid': params[1]}) elif geom_3d and connection.features.supports_3d_functions: # Use 3D variants of perimeter and length routines on supported backends. if perimeter: procedure_args.update({'function': backend.perimeter3d}) elif length: procedure_args.update({'function': backend.length3d}) # Setting up the settings for `_spatial_attribute`. s = {'select_field': DistanceField(dist_att), 'setup': False, 'geo_field': geo_field, 'procedure_args': procedure_args, 'procedure_fmt': procedure_fmt, } if geom_args: s['geom_args'] = ('geom',) s['procedure_args']['geom'] = geom elif geom: # The geometry is passed in as a parameter because we handled # transformation conditions in this routine. s['select_params'] = [backend.Adapter(geom)] return self._spatial_attribute(func, s, **kwargs) def _geom_attribute(self, func, tolerance=0.05, **kwargs): """ DRY routine for setting up a GeoQuerySet method that attaches a Geometry attribute (e.g., `centroid`, `point_on_surface`). """ s = {'select_field': GeomField()} if connections[self.db].ops.oracle: s['procedure_fmt'] = '%(geo_col)s,%(tolerance)s' s['procedure_args'] = {'tolerance': tolerance} return self._spatial_attribute(func, s, **kwargs) def _geomset_attribute(self, func, geom, tolerance=0.05, **kwargs): """ DRY routine for setting up a GeoQuerySet method that attaches a Geometry attribute and takes a Geoemtry parameter. This is used for geometry set-like operations (e.g., intersection, difference, union, sym_difference). """ s = { 'geom_args': ('geom',), 'select_field': GeomField(), 'procedure_fmt': '%(geo_col)s,%(geom)s', 'procedure_args': {'geom': geom}, } if connections[self.db].ops.oracle: s['procedure_fmt'] += ',%(tolerance)s' s['procedure_args']['tolerance'] = tolerance return self._spatial_attribute(func, s, **kwargs) def _geocol_select(self, geo_field, field_name): """ Helper routine for constructing the SQL to select the geographic column. Takes into account if the geographic field is in a ForeignKey relation to the current model. """ compiler = self.query.get_compiler(self.db) opts = self.model._meta if geo_field not in opts.fields: # Is this operation going to be on a related geographic field? # If so, it'll have to be added to the select related information # (e.g., if 'location__point' was given as the field name). # Note: the operation really is defined as "must add select related!" self.query.add_select_related([field_name]) # Call pre_sql_setup() so that compiler.select gets populated. compiler.pre_sql_setup() for col, _, _ in compiler.select: if col.output_field == geo_field: return col.as_sql(compiler, compiler.connection)[0] raise ValueError("%r not in compiler's related_select_cols" % geo_field) elif geo_field not in opts.local_fields: # This geographic field is inherited from another model, so we have to # use the db table for the _parent_ model instead. parent_model = geo_field.model._meta.concrete_model return self._field_column(compiler, geo_field, parent_model._meta.db_table) else: return self._field_column(compiler, geo_field) # Private API utilities, subject to change. def _geo_field(self, field_name=None): """ Returns the first Geometry field encountered or the one specified via the `field_name` keyword. The `field_name` may be a string specifying the geometry field on this GeoQuerySet's model, or a lookup string to a geometry field via a ForeignKey relation. """ if field_name is None: # Incrementing until the first geographic field is found. for field in self.model._meta.fields: if isinstance(field, GeometryField): return field return False else: # Otherwise, check by the given field name -- which may be # a lookup to a _related_ geographic field. return GISLookup._check_geo_field(self.model._meta, field_name) def _field_column(self, compiler, field, table_alias=None, column=None): """ Helper function that returns the database column for the given field. The table and column are returned (quoted) in the proper format, e.g., `"geoapp_city"."point"`. If `table_alias` is not specified, the database table associated with the model of this `GeoQuerySet` will be used. If `column` is specified, it will be used instead of the value in `field.column`. """ if table_alias is None: table_alias = compiler.query.get_meta().db_table return "%s.%s" % (compiler.quote_name_unless_alias(table_alias), compiler.connection.ops.quote_name(column or field.column))
bsd-3-clause
glucoseinc/naumanni-server
naumanni/web/server.py
2
11683
# -*- coding: utf-8 -*- import asyncio import collections import logging import functools import json import multiprocessing import os import signal import socket import time import psutil from tornado import gen, ioloop, iostream, routing, web from tornado.httpserver import HTTPServer from tornado.wsgi import WSGIContainer import tornado.netutil import tornado.process from tornado.platform.asyncio import AsyncIOMainLoop from .base import NaumanniRequestHandlerMixIn from .proxy import APIProxyHandler from .websocket import WebsocketProxyHandler logger = logging.getLogger(__name__) MAX_WAIT_SECONDS_BEFORE_SHUTDOWN = 5 REDIS_SERVER_STATUS_KEY = 'naumanni:server_status' ChildProc = collections.namedtuple('ChildProc', ['proc', 'pipe_reader', 'pipe_writer']) DELIMITER = b'\x00' class NaumanniWebApplication(web.Application): def add_plugin_handlers(self, plugin_id, handlers): """plugin apiを追加する""" # plugin apiのprefixをURLに追加する path_prefix = '/plugins/{}/'.format(plugin_id) replaced_handlers = [] for rule in handlers: if isinstance(rule, (tuple, list)): if isinstance(rule[0], str): if not rule[0].startswith('/'): raise ValueError('invalid plugin url path, must be startswith \'/\'') rule = (path_prefix + rule[0][1:], *rule[1:]) else: assert 0, 'not implemented' else: assert 0, 'not implemented' replaced_handlers.append(rule) # 登録 self.wildcard_router.add_rules( [(path_prefix + '.*$', web._ApplicationRouter(self, replaced_handlers))] ) class WebServerBase(object): def __init__(self, naumanni_app, listen): self.naumanni_app = naumanni_app self.listen = listen self.init() def init(self): handlers = [ (r'/proxy/(?P<request_url>.+)', APIProxyHandler), (r'/ws/(?P<request_url>.+)', WebsocketProxyHandler), (r'/status', StatusAPIHandler), (r'/ping', PingAPIHandler), ] self.application = NaumanniWebApplication( handlers, compress_response=True, debug=self.naumanni_app.debug, autoreload=False, websocket_ping_interval=3, naumanni_app=self.naumanni_app, ) self.naumanni_app.emit('after-initialize-webserver', webserver=self) def _run_server(self, task_id): assert AsyncIOMainLoop().initialized() # run self.naumanni_app.setup(task_id) synchronusly io_loop = ioloop.IOLoop.current() io_loop.run_sync(functools.partial(self.naumanni_app.setup, task_id)) self.http_server = HTTPServer(self.application) self.http_server.add_sockets(self.sockets) # install signal handlers for child proc install_child_signal_handlers(self) # run ioloop ioloop.IOLoop.current().start() async def save_server_status(self, status): """statusをredisに保存する""" async with self.naumanni_app.get_async_redis() as redis: status['date'] = time.time() await redis.set(REDIS_SERVER_STATUS_KEY, json.dumps(status)) async def collect_server_status(self): raise NotImplementedError() class DebugWebServer(WebServerBase): def start(self): self.sockets = tornado.netutil.bind_sockets(*self.naumanni_app.config.listen) # debugなのでautoreloadする AsyncIOMainLoop().install() from tornado import autoreload autoreload.start() self._run_server(None) class ForkedWebServer(WebServerBase): def start(self): self.sockets = tornado.netutil.bind_sockets(*self.naumanni_app.config.listen) children = self.fork(0) # こっからはMasterの世界 # use asyncio for ioloop AsyncIOMainLoop().install() self.children = [ChildProc( proc, iostream.PipeIOStream(fdr), iostream.PipeIOStream(fdw), ) for proc, fdr, fdw in children] # run self.naumanni_app.setup(None) synchronusly io_loop = ioloop.IOLoop.current() io_loop.run_sync(functools.partial(self.naumanni_app.setup, None)) # master run loop io_loop.start() for task_id, child in enumerate(self.children): child.proc.join() def is_master(self): return getattr(self.naumanni_app, 'task_id', None) is None def fork(self, num_processes): # install signal handlers for master proc install_master_signal_handlers(self) if num_processes == 0: num_processes = multiprocessing.cpu_count() children = [] for task_id in range(num_processes): # scoketpair使えば良い気がする fdr, fdw = os.pipe() fdr2, fdw2 = os.pipe() proc = multiprocessing.Process(target=self._run_child, args=(task_id, fdr, fdw2)) children.append((proc, fdr2, fdw)) proc.start() return children def _run_child(self, task_id, pipe_reader, pipe_writer): logger.info('Child process PID:%s', os.getpid()) # use asyncio for ioloop AsyncIOMainLoop().install() # listen pipe self.pipe_reader = iostream.PipeIOStream(pipe_reader) self.pipe_writer = iostream.PipeIOStream(pipe_writer) tornado.process._reseed_random() self._run_server(task_id) def on_master_pipe_can_read(self, child): """child -> masterのpipeに何か書き込みがあれば呼ばれる""" async def _process_child_request(f): self.unwait_child_commands() try: request = json.loads(f.result()[:-len(DELIMITER)]) logger.info('on_master_pipe_can_read %s %r', child.proc.pid, request) if request.get('request') == STATUS_REQUEST: status = await self.collect_server_status() await child.pipe_writer.write( json.dumps(status).encode('latin1') + DELIMITER ) finally: self.wait_child_commands() ioloop.IOLoop.instance().add_future( child.pipe_reader.read_until(DELIMITER), _process_child_request ) # server status async def collect_server_status(self): if not self.is_master(): return await self.get_status_from_master() for child in self.children: os.kill(child.proc.pid, signal.SIGUSR1) keys = ['io_loop.handlers', 'io_loop.selector.fds', 'process.uss', 'process.rss'] status = {'process': {}} for idx, child in enumerate(self.children): child_status = await child.pipe_reader.read_until(DELIMITER) child_status = json.loads(child_status[:-len(DELIMITER)]) status['process'][idx] = child_status for key in keys: status[key] = status.get(key, 0) + child_status[key] master_status = _collect_status() status['process']['master'] = master_status for key in keys: status[key] = status.get(key, 0) + master_status[key] return status # utility page class StatusAPIHandler(web.RequestHandler, NaumanniRequestHandlerMixIn): async def get(self): last_status = await self._get_status() last_time = last_status['date'] if last_status else None # sind signal os.kill(os.getppid(), signal.SIGUSR1) while True: status = await self._get_status() if status and status['date'] != last_time: break await gen.sleep(0.5) self.write(status) await self.flush() async def _get_status(self): async with self.naumanni_app.get_async_redis() as redis: data = await redis.get(REDIS_SERVER_STATUS_KEY) return json.loads(data) if data else None class PingAPIHandler(web.RequestHandler): async def get(self): self.write('pong') await self.flush() # signal handling def install_master_signal_handlers(webserver): # SIGTERMされてもちゃんと終了するように def stop_handler(webserver, sig, frame): io_loop = ioloop.IOLoop.current() try: for child in webserver.children: try: os.kill(child.proc.pid, signal.SIGTERM) except ProcessLookupError: pass io_loop.add_callback_from_signal(io_loop.stop) except Exception as exc: logger.exception(exc) handler = functools.partial(stop_handler, webserver) signal.signal(signal.SIGINT, handler) signal.signal(signal.SIGQUIT, handler) signal.signal(signal.SIGTERM, handler) # status情報収集用ハンドラ def status_handler(webserver, sig, frame): async def show_server_status(webserver): status = await webserver.collect_server_status() await webserver.save_server_status(status) logger.info('Server status: %r', status) ioloop.IOLoop.instance().add_callback_from_signal(show_server_status, webserver) signal.signal(signal.SIGUSR1, functools.partial(status_handler, webserver)) def install_child_signal_handlers(webserver): """子プロセスがgracefulに死ぬように""" def stop_handler(webserver, sig, frame): io_loop = ioloop.IOLoop.instance() def stop_loop(deadline): now = time.time() if now < deadline and has_ioloop_tasks(io_loop): logger.info('Waiting for next tick...') io_loop.add_timeout(now + 1, stop_loop, deadline) else: io_loop.stop() logger.info('Shutdown finally') def shutdown(): logger.info('Stopping http server') webserver.naumanni_app.emit('before-stop-server') webserver.http_server.stop() logger.info('Will shutdown in %s seconds ...', MAX_WAIT_SECONDS_BEFORE_SHUTDOWN) stop_loop(time.time() + MAX_WAIT_SECONDS_BEFORE_SHUTDOWN) io_loop.add_callback_from_signal(shutdown) handler = functools.partial(stop_handler, webserver) signal.signal(signal.SIGINT, handler) signal.signal(signal.SIGQUIT, handler) signal.signal(signal.SIGTERM, handler) def status_handler(webserver, sig, frame): io_loop = ioloop.IOLoop.instance() async def _send_status(webserver): status = _collect_status() await webserver.pipe_writer.write(json.dumps(status).encode('latin1') + DELIMITER) io_loop.add_callback_from_signal(_send_status, webserver) signal.signal(signal.SIGUSR1, functools.partial(status_handler, webserver)) def _collect_status(): io_loop = ioloop.IOLoop.instance() selector = io_loop.asyncio_loop._selector proc = psutil.Process() with proc.oneshot(): mem = proc.memory_full_info() status = { 'io_loop.handlers': len(io_loop.handlers), 'io_loop.selector.fds': len(selector._fd_to_key), 'process.uss': mem.uss / 1024.0 / 1024.0, 'process.rss': mem.rss / 1024.0 / 1024.0, } return status def has_ioloop_tasks(io_loop): if hasattr(io_loop, '_callbacks'): return io_loop._callbacks or io_loop._timeouts elif hasattr(io_loop, 'handlers'): return len(io_loop.handlers) return False
agpl-3.0
allanlewis/behave
test/test_ansi_escapes.py
12
2733
# -*- coding: utf-8 -*- # pylint: disable=C0103,R0201,W0401,W0614,W0621 # C0103 Invalid name (setUp(), ...) # R0201 Method could be a function # W0401 Wildcard import # W0614 Unused import ... from wildcard import # W0621 Redefining name ... from outer scope from __future__ import absolute_import from nose import tools from behave.formatter import ansi_escapes import unittest from six.moves import range class StripEscapesTest(unittest.TestCase): ALL_COLORS = list(ansi_escapes.colors.keys()) CURSOR_UPS = [ ansi_escapes.up(count) for count in range(10) ] TEXTS = [ u"lorem ipsum", u"Alice\nBob\nCharly\nDennis", ] @classmethod def colorize(cls, text, color): color_escape = "" if color: color_escape = ansi_escapes.colors[color] return color_escape + text + ansi_escapes.escapes["reset"] @classmethod def colorize_text(cls, text, colors=None): if not colors: colors = [] colors_size = len(colors) color_index = 0 colored_chars = [] for char in text: color = colors[color_index] colored_chars.append(cls.colorize(char, color)) color_index += 1 if color_index >= colors_size: color_index = 0 return "".join(colored_chars) def test_should_return_same_text_without_escapes(self): for text in self.TEXTS: tools.eq_(text, ansi_escapes.strip_escapes(text)) def test_should_return_empty_string_for_any_ansi_escape(self): # XXX-JE-CHECK-PY23: If list() is really needed. for text in list(ansi_escapes.colors.values()): tools.eq_("", ansi_escapes.strip_escapes(text)) for text in list(ansi_escapes.escapes.values()): tools.eq_("", ansi_escapes.strip_escapes(text)) def test_should_strip_color_escapes_from_text(self): for text in self.TEXTS: colored_text = self.colorize_text(text, self.ALL_COLORS) tools.eq_(text, ansi_escapes.strip_escapes(colored_text)) self.assertNotEqual(text, colored_text) for color in self.ALL_COLORS: colored_text = self.colorize(text, color) tools.eq_(text, ansi_escapes.strip_escapes(colored_text)) self.assertNotEqual(text, colored_text) def test_should_strip_cursor_up_escapes_from_text(self): for text in self.TEXTS: for cursor_up in self.CURSOR_UPS: colored_text = cursor_up + text + ansi_escapes.escapes["reset"] tools.eq_(text, ansi_escapes.strip_escapes(colored_text)) self.assertNotEqual(text, colored_text)
bsd-2-clause
googleapis/googleapis-gen
google/ads/googleads/v7/googleads-py/google/ads/googleads/v7/enums/types/product_type_level.py
1
1201
# -*- coding: utf-8 -*- # Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import proto # type: ignore __protobuf__ = proto.module( package='google.ads.googleads.v7.enums', marshal='google.ads.googleads.v7', manifest={ 'ProductTypeLevelEnum', }, ) class ProductTypeLevelEnum(proto.Message): r"""Level of the type of a product offer. """ class ProductTypeLevel(proto.Enum): r"""Enum describing the level of the type of a product offer.""" UNSPECIFIED = 0 UNKNOWN = 1 LEVEL1 = 7 LEVEL2 = 8 LEVEL3 = 9 LEVEL4 = 10 LEVEL5 = 11 __all__ = tuple(sorted(__protobuf__.manifest))
apache-2.0
tpo/ansible
lib/ansible/module_utils/facts/system/apparmor.py
232
1311
# Collect facts related to apparmor # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os from ansible.module_utils.facts.collector import BaseFactCollector class ApparmorFactCollector(BaseFactCollector): name = 'apparmor' _fact_ids = set() def collect(self, module=None, collected_facts=None): facts_dict = {} apparmor_facts = {} if os.path.exists('/sys/kernel/security/apparmor'): apparmor_facts['status'] = 'enabled' else: apparmor_facts['status'] = 'disabled' facts_dict['apparmor'] = apparmor_facts return facts_dict
gpl-3.0
mdodsworth/hadoop-common
src/contrib/hod/hodlib/Common/miniHTMLParser.py
182
1402
#Licensed to the Apache Software Foundation (ASF) under one #or more contributor license agreements. See the NOTICE file #distributed with this work for additional information #regarding copyright ownership. The ASF licenses this file #to you under the Apache License, Version 2.0 (the #"License"); you may not use this file except in compliance #with the License. You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. import urllib, urlparse, re from HTMLParser import HTMLParser class miniHTMLParser( HTMLParser ): viewedQueue = [] instQueue = [] def setBaseUrl(self, url): self.baseUrl = url def getNextLink( self ): if self.instQueue == []: return None else: return self.instQueue.pop(0) def handle_starttag( self, tag, attrs ): if tag == 'a': newstr = urlparse.urljoin(self.baseUrl, str(attrs[0][1])) if re.search('mailto', newstr) != None: return if (newstr in self.viewedQueue) == False: self.instQueue.append( newstr ) self.viewedQueue.append( newstr )
apache-2.0
fkolacek/FIT-VUT
bp-revok/python/lib/python2.7/_threading_local.py
2
7116
"""Thread-local objects. (Note that this module provides a Python version of the threading.local class. Depending on the version of Python you're using, there may be a faster one available. You should always import the `local` class from `threading`.) Thread-local objects support the management of thread-local data. If you have data that you want to be local to a thread, simply create a thread-local object and use its attributes: >>> mydata = local() >>> mydata.number = 42 >>> mydata.number 42 You can also access the local-object's dictionary: >>> mydata.__dict__ {'number': 42} >>> mydata.__dict__.setdefault('widgets', []) [] >>> mydata.widgets [] What's important about thread-local objects is that their data are local to a thread. If we access the data in a different thread: >>> log = [] >>> def f(): ... items = mydata.__dict__.items() ... items.sort() ... log.append(items) ... mydata.number = 11 ... log.append(mydata.number) >>> import threading >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() >>> log [[], 11] we get different data. Furthermore, changes made in the other thread don't affect data seen in this thread: >>> mydata.number 42 Of course, values you get from a local object, including a __dict__ attribute, are for whatever thread was current at the time the attribute was read. For that reason, you generally don't want to save these values across threads, as they apply only to the thread they came from. You can create custom local objects by subclassing the local class: >>> class MyLocal(local): ... number = 2 ... initialized = False ... def __init__(self, **kw): ... if self.initialized: ... raise SystemError('__init__ called too many times') ... self.initialized = True ... self.__dict__.update(kw) ... def squared(self): ... return self.number ** 2 This can be useful to support default values, methods and initialization. Note that if you define an __init__ method, it will be called each time the local object is used in a separate thread. This is necessary to initialize each thread's dictionary. Now if we create a local object: >>> mydata = MyLocal(color='red') Now we have a default number: >>> mydata.number 2 an initial color: >>> mydata.color 'red' >>> del mydata.color And a method that operates on the data: >>> mydata.squared() 4 As before, we can access the data in a separate thread: >>> log = [] >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() >>> log [[('color', 'red'), ('initialized', True)], 11] without affecting this thread's data: >>> mydata.number 2 >>> mydata.color Traceback (most recent call last): ... AttributeError: 'MyLocal' object has no attribute 'color' Note that subclasses can define slots, but they are not thread local. They are shared across threads: >>> class MyLocal(local): ... __slots__ = 'number' >>> mydata = MyLocal() >>> mydata.number = 42 >>> mydata.color = 'red' So, the separate thread: >>> thread = threading.Thread(target=f) >>> thread.start() >>> thread.join() affects what we see: >>> mydata.number 11 >>> del mydata """ __all__ = ["local"] # We need to use objects from the threading module, but the threading # module may also want to use our `local` class, if support for locals # isn't compiled in to the `thread` module. This creates potential problems # with circular imports. For that reason, we don't import `threading` # until the bottom of this file (a hack sufficient to worm around the # potential problems). Note that almost all platforms do have support for # locals in the `thread` module, and there is no circular import problem # then, so problems introduced by fiddling the order of imports here won't # manifest on most boxes. class _localbase(object): __slots__ = '_local__key', '_local__args', '_local__lock' def __new__(cls, *args, **kw): self = object.__new__(cls) key = '_local__key', 'thread.local.' + str(id(self)) object.__setattr__(self, '_local__key', key) object.__setattr__(self, '_local__args', (args, kw)) object.__setattr__(self, '_local__lock', RLock()) if (args or kw) and (cls.__init__ is object.__init__): raise TypeError("Initialization arguments are not supported") # We need to create the thread dict in anticipation of # __init__ being called, to make sure we don't call it # again ourselves. dict = object.__getattribute__(self, '__dict__') current_thread().__dict__[key] = dict return self def _patch(self): key = object.__getattribute__(self, '_local__key') d = current_thread().__dict__.get(key) if d is None: d = {} current_thread().__dict__[key] = d object.__setattr__(self, '__dict__', d) # we have a new instance dict, so call out __init__ if we have # one cls = type(self) if cls.__init__ is not object.__init__: args, kw = object.__getattribute__(self, '_local__args') cls.__init__(self, *args, **kw) else: object.__setattr__(self, '__dict__', d) class local(_localbase): def __getattribute__(self, name): lock = object.__getattribute__(self, '_local__lock') lock.acquire() try: _patch(self) return object.__getattribute__(self, name) finally: lock.release() def __setattr__(self, name, value): lock = object.__getattribute__(self, '_local__lock') lock.acquire() try: _patch(self) return object.__setattr__(self, name, value) finally: lock.release() def __delattr__(self, name): lock = object.__getattribute__(self, '_local__lock') lock.acquire() try: _patch(self) return object.__delattr__(self, name) finally: lock.release() def __del__(self): import threading key = object.__getattribute__(self, '_local__key') try: # We use the non-locking API since we might already hold the lock # (__del__ can be called at any point by the cyclic GC). threads = threading._enumerate() except: # If enumerating the current threads fails, as it seems to do # during shutdown, we'll skip cleanup under the assumption # that there is nothing to clean up. return for thread in threads: try: __dict__ = thread.__dict__ except AttributeError: # Thread is dying, rest in peace. continue if key in __dict__: try: del __dict__[key] except KeyError: pass # didn't have anything in this thread from threading import current_thread, RLock
apache-2.0
ACJTeam/enigma2
lib/python/Components/Renderer/Pig.py
16
1156
## ## P(icture)i(n)g(raphics) renderer ## from Renderer import Renderer from enigma import eVideoWidget, getDesktop from Screens.PictureInPicture import PipPigMode class Pig(Renderer): def __init__(self): Renderer.__init__(self) self.Position = self.Size = None self.hidePip = True GUI_WIDGET = eVideoWidget def postWidgetCreate(self, instance): desk = getDesktop(0) instance.setDecoder(0) instance.setFBSize(desk.size()) def applySkin(self, desktop, parent): attribs = self.skinAttributes[:] for (attrib, value) in self.skinAttributes: if attrib == "hidePip": self.hidePip = value == "1" attribs.remove((attrib,value)) break self.skinAttributes = attribs ret = Renderer.applySkin(self, desktop, parent) if ret: self.Position = self.instance.position() self.Size = self.instance.size() return ret def onShow(self): if self.instance: if self.Size: self.instance.resize(self.Size) if self.Position: self.instance.move(self.Position) self.hidePip and PipPigMode(True) def onHide(self): if self.instance: self.preWidgetRemove(self.instance) self.hidePip and PipPigMode(False)
gpl-2.0
yoer/hue
desktop/core/ext-py/tablib-0.10.0/tablib/packages/xlrd/compdoc.py
64
14974
# -*- coding: cp1252 -*- ## # Implements the minimal functionality required # to extract a "Workbook" or "Book" stream (as one big string) # from an OLE2 Compound Document file. # <p>Copyright © 2005-2008 Stephen John Machin, Lingfo Pty Ltd</p> # <p>This module is part of the xlrd package, which is released under a BSD-style licence.</p> ## # No part of the content of this file was derived from the works of David Giffin. # 2008-11-04 SJM Avoid assertion error when -1 used instead of -2 for first_SID of empty SCSS [Frank Hoffsuemmer] # 2007-09-08 SJM Warning message if sector sizes are extremely large. # 2007-05-07 SJM Meaningful exception instead of IndexError if a SAT (sector allocation table) is corrupted. # 2007-04-22 SJM Missing "<" in a struct.unpack call => can't open files on bigendian platforms. import sys from struct import unpack from timemachine import * ## # Magic cookie that should appear in the first 8 bytes of the file. SIGNATURE = "\xD0\xCF\x11\xE0\xA1\xB1\x1A\xE1" EOCSID = -2 FREESID = -1 SATSID = -3 MSATSID = -4 class CompDocError(Exception): pass class DirNode(object): def __init__(self, DID, dent, DEBUG=0): # dent is the 128-byte directory entry self.DID = DID # (cbufsize, self.etype, self.colour, self.left_DID, self.right_DID, # self.root_DID, # self.first_SID, # self.tot_size) = \ # unpack('<HBBiii16x4x8x8xii4x', dent[64:128]) (cbufsize, self.etype, self.colour, self.left_DID, self.right_DID, self.root_DID) = \ unpack('<HBBiii', dent[64:80]) (self.first_SID, self.tot_size) = \ unpack('<ii', dent[116:124]) if cbufsize == 0: self.name = u'' else: self.name = unicode(dent[0:cbufsize-2], 'utf_16_le') # omit the trailing U+0000 self.children = [] # filled in later self.parent = -1 # indicates orphan; fixed up later self.tsinfo = unpack('<IIII', dent[100:116]) if DEBUG: self.dump(DEBUG) def dump(self, DEBUG=1): print "DID=%d name=%r etype=%d DIDs(left=%d right=%d root=%d parent=%d kids=%r) first_SID=%d tot_size=%d" \ % (self.DID, self.name, self.etype, self.left_DID, self.right_DID, self.root_DID, self.parent, self.children, self.first_SID, self.tot_size) if DEBUG == 2: # cre_lo, cre_hi, mod_lo, mod_hi = tsinfo print "timestamp info", self.tsinfo def _build_family_tree(dirlist, parent_DID, child_DID): if child_DID < 0: return _build_family_tree(dirlist, parent_DID, dirlist[child_DID].left_DID) dirlist[parent_DID].children.append(child_DID) dirlist[child_DID].parent = parent_DID _build_family_tree(dirlist, parent_DID, dirlist[child_DID].right_DID) if dirlist[child_DID].etype == 1: # storage _build_family_tree(dirlist, child_DID, dirlist[child_DID].root_DID) ## # Compound document handler. # @param mem The raw contents of the file, as a string, or as an mmap.mmap() object. The # only operation it needs to support is slicing. class CompDoc(object): def __init__(self, mem, logfile=sys.stdout, DEBUG=0): self.logfile = logfile if mem[0:8] != SIGNATURE: raise CompDocError('Not an OLE2 compound document') if mem[28:30] != '\xFE\xFF': raise CompDocError('Expected "little-endian" marker, found %r' % mem[28:30]) revision, version = unpack('<HH', mem[24:28]) if DEBUG: print >> logfile, "\nCompDoc format: version=0x%04x revision=0x%04x" % (version, revision) self.mem = mem ssz, sssz = unpack('<HH', mem[30:34]) if ssz > 20: # allows for 2**20 bytes i.e. 1MB print >> logfile, \ "WARNING: sector size (2**%d) is preposterous; assuming 512 and continuing ..." \ % ssz ssz = 9 if sssz > ssz: print >> logfile, \ "WARNING: short stream sector size (2**%d) is preposterous; assuming 64 and continuing ..." \ % sssz sssz = 6 self.sec_size = sec_size = 1 << ssz self.short_sec_size = 1 << sssz ( SAT_tot_secs, self.dir_first_sec_sid, _unused, self.min_size_std_stream, SSAT_first_sec_sid, SSAT_tot_secs, MSAT_first_sec_sid, MSAT_tot_secs, # ) = unpack('<ii4xiiiii', mem[44:76]) ) = unpack('<iiiiiiii', mem[44:76]) mem_data_len = len(mem) - 512 mem_data_secs, left_over = divmod(mem_data_len, sec_size) if left_over: #### raise CompDocError("Not a whole number of sectors") print >> logfile, \ "WARNING *** file size (%d) not 512 + multiple of sector size (%d)" \ % (len(mem), sec_size) if DEBUG: print >> logfile, 'sec sizes', ssz, sssz, sec_size, self.short_sec_size print >> logfile, "mem data: %d bytes == %d sectors" % (mem_data_len, mem_data_secs) print >> logfile, "SAT_tot_secs=%d, dir_first_sec_sid=%d, min_size_std_stream=%d" \ % (SAT_tot_secs, self.dir_first_sec_sid, self.min_size_std_stream,) print >> logfile, "SSAT_first_sec_sid=%d, SSAT_tot_secs=%d" % (SSAT_first_sec_sid, SSAT_tot_secs,) print >> logfile, "MSAT_first_sec_sid=%d, MSAT_tot_secs=%d" % (MSAT_first_sec_sid, MSAT_tot_secs,) nent = int_floor_div(sec_size, 4) # number of SID entries in a sector fmt = "<%di" % nent trunc_warned = 0 # # === build the MSAT === # MSAT = list(unpack('<109i', mem[76:512])) sid = MSAT_first_sec_sid while sid >= 0: if sid >= mem_data_secs: raise CompDocError( "MSAT extension: accessing sector %d but only %d in file" % (sid, mem_data_secs) ) offset = 512 + sec_size * sid news = list(unpack(fmt, mem[offset:offset+sec_size])) sid = news.pop() MSAT.extend(news) if DEBUG: print >> logfile, "MSAT: len =", len(MSAT) print >> logfile, MSAT # # === build the SAT === # self.SAT = [] for msid in MSAT: if msid == FREESID: continue if msid >= mem_data_secs: if not trunc_warned: print >> logfile, "WARNING *** File is truncated, or OLE2 MSAT is corrupt!!" print >> logfile, \ "INFO: Trying to access sector %d but only %d available" \ % (msid, mem_data_secs) trunc_warned = 1 continue offset = 512 + sec_size * msid news = list(unpack(fmt, mem[offset:offset+sec_size])) self.SAT.extend(news) if DEBUG: print >> logfile, "SAT: len =", len(self.SAT) print >> logfile, self.SAT # print >> logfile, "SAT ", # for i, s in enumerate(self.SAT): # print >> logfile, "entry: %4d offset: %6d, next entry: %4d" % (i, 512 + sec_size * i, s) # print >> logfile, "%d:%d " % (i, s), print # === build the directory === # dbytes = self._get_stream( self.mem, 512, self.SAT, self.sec_size, self.dir_first_sec_sid, name="directory") dirlist = [] did = -1 for pos in xrange(0, len(dbytes), 128): did += 1 dirlist.append(DirNode(did, dbytes[pos:pos+128], 0)) self.dirlist = dirlist _build_family_tree(dirlist, 0, dirlist[0].root_DID) # and stand well back ... if DEBUG: for d in dirlist: d.dump(DEBUG) # # === get the SSCS === # sscs_dir = self.dirlist[0] assert sscs_dir.etype == 5 # root entry if sscs_dir.first_SID < 0 and sscs_dir.tot_size == 0: # Problem reported by Frank Hoffsuemmer: some software was # writing -1 instead of -2 (EOCSID) for the first_SID # when the SCCS was empty. Not having EOCSID caused assertion # failure in _get_stream. # Solution: avoid calling _get_stream in any case when the # SCSS appears to be empty. self.SSCS = "" else: self.SSCS = self._get_stream( self.mem, 512, self.SAT, sec_size, sscs_dir.first_SID, sscs_dir.tot_size, name="SSCS") # if DEBUG: print >> logfile, "SSCS", repr(self.SSCS) # # === build the SSAT === # self.SSAT = [] if SSAT_tot_secs > 0 and sscs_dir.tot_size == 0: print >> logfile, \ "WARNING *** OLE2 inconsistency: SSCS size is 0 but SSAT size is non-zero" if sscs_dir.tot_size > 0: sid = SSAT_first_sec_sid nsecs = SSAT_tot_secs while sid >= 0 and nsecs > 0: nsecs -= 1 start_pos = 512 + sid * sec_size news = list(unpack(fmt, mem[start_pos:start_pos+sec_size])) self.SSAT.extend(news) sid = self.SAT[sid] # assert SSAT_tot_secs == 0 or sid == EOCSID if DEBUG: print >> logfile, "SSAT last sid %d; remaining sectors %d" % (sid, nsecs) assert nsecs == 0 and sid == EOCSID if DEBUG: print >> logfile, "SSAT", self.SSAT def _get_stream(self, mem, base, sat, sec_size, start_sid, size=None, name=''): # print >> self.logfile, "_get_stream", base, sec_size, start_sid, size sectors = [] s = start_sid if size is None: # nothing to check against while s >= 0: start_pos = base + s * sec_size sectors.append(mem[start_pos:start_pos+sec_size]) try: s = sat[s] except IndexError: raise CompDocError( "OLE2 stream %r: sector allocation table invalid entry (%d)" % (name, s) ) assert s == EOCSID else: todo = size while s >= 0: start_pos = base + s * sec_size grab = sec_size if grab > todo: grab = todo todo -= grab sectors.append(mem[start_pos:start_pos+grab]) try: s = sat[s] except IndexError: raise CompDocError( "OLE2 stream %r: sector allocation table invalid entry (%d)" % (name, s) ) assert s == EOCSID if todo != 0: print >> self.logfile, \ "WARNING *** OLE2 stream %r: expected size %d, actual size %d" \ % (name, size, size - todo) return ''.join(sectors) def _dir_search(self, path, storage_DID=0): # Return matching DirNode instance, or None head = path[0] tail = path[1:] dl = self.dirlist for child in dl[storage_DID].children: if dl[child].name.lower() == head.lower(): et = dl[child].etype if et == 2: return dl[child] if et == 1: if not tail: raise CompDocError("Requested component is a 'storage'") return self._dir_search(tail, child) dl[child].dump(1) raise CompDocError("Requested stream is not a 'user stream'") return None ## # Interrogate the compound document's directory; return the stream as a string if found, otherwise # return None. # @param qname Name of the desired stream e.g. u'Workbook'. Should be in Unicode or convertible thereto. def get_named_stream(self, qname): d = self._dir_search(qname.split("/")) if d is None: return None if d.tot_size >= self.min_size_std_stream: return self._get_stream( self.mem, 512, self.SAT, self.sec_size, d.first_SID, d.tot_size, name=qname) else: return self._get_stream( self.SSCS, 0, self.SSAT, self.short_sec_size, d.first_SID, d.tot_size, name=qname + " (from SSCS)") ## # Interrogate the compound document's directory. # If the named stream is not found, (None, 0, 0) will be returned. # If the named stream is found and is contiguous within the original byte sequence ("mem") # used when the document was opened, # then (mem, offset_to_start_of_stream, length_of_stream) is returned. # Otherwise a new string is built from the fragments and (new_string, 0, length_of_stream) is returned. # @param qname Name of the desired stream e.g. u'Workbook'. Should be in Unicode or convertible thereto. def locate_named_stream(self, qname): d = self._dir_search(qname.split("/")) if d is None: return (None, 0, 0) if d.tot_size >= self.min_size_std_stream: return self._locate_stream(self.mem, 512, self.SAT, self.sec_size, d.first_SID, d.tot_size) else: return ( self._get_stream( self.SSCS, 0, self.SSAT, self.short_sec_size, d.first_SID, d.tot_size, qname + " (from SSCS)"), 0, d.tot_size ) return (None, 0, 0) # not found def _locate_stream(self, mem, base, sat, sec_size, start_sid, size): # print >> self.logfile, "_locate_stream", base, sec_size, start_sid, size s = start_sid if s < 0: raise CompDocError("_locate_stream: start_sid (%d) is -ve" % start_sid) p = -99 # dummy previous SID start_pos = -9999 end_pos = -8888 slices = [] while s >= 0: if s == p+1: # contiguous sectors end_pos += sec_size else: # start new slice if p >= 0: # not first time slices.append((start_pos, end_pos)) start_pos = base + s * sec_size end_pos = start_pos + sec_size p = s s = sat[s] assert s == EOCSID # print >> self.logfile, len(slices) + 1, "slices" if not slices: # The stream is contiguous ... just what we like! return (mem, start_pos, size) slices.append((start_pos, end_pos)) return (''.join([mem[start_pos:end_pos] for start_pos, end_pos in slices]), 0, size) # ==========================================================================================
apache-2.0
naav97/upgradeFileUbuntu
configs/cmus/status_display_notify_send.py
1
6807
#! /usr/bin/env python # # cmus_desktop_notify.py: display song cmus is playing using notify-send. # Copyright (C) 2011 Travis Poppe <tlp@lickwid.net> # # Version 2011.06.24 # http://tlp.lickwid.net/cmus_desktop_notify.py # Usage: Run script for instructions. # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation; either version 2 of the License, or (at your # option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # TODO: # # * Offer some configuration # * Clean up status_data() (first iteration problem) # * Clean up notification when some data is missing # * Attempt to use filename when title is unavailable # * Make work with python 3/test/etc import sys import time import subprocess def print_usage(): """Display usage info and exit.""" print ' ##############################################################################' print ' # cmus_desktop_notify.py: display song cmus is playing using notify-send. #' print ' # Copyright (C) 2011 Travis Poppe <tlp@lickwid.net> #' print ' # #' print ' # Version: 2011.06.24 #' print ' # #' print ' # Tested on Ubuntu 11.04 with Unity window manager. #' print ' # Requirements: libnotify-bin (notify-send); probably python 2.x. #' print ' ##############################################################################' print ' # Usage: #' print ' # 1. Copy cmus_desktop_notify.py to ~/.cmus/ and make executable. #' print ' # 2. Create ~/.cmus/status_display_program.sh with these contents #' print ' # and make executable (remove spaces and border): #' print ' # #' print ' # #!/bin/sh #' print ' # ~/.cmus/cmus_desktop_notify.py "$*" & #' print ' # #' print ' # 3. Set the status_display_program variable in cmus (with YOUR homedir!) #' print ' # #' print ' # :set status_display_program=/home/user/.cmus/status_display_program.sh #' print ' # #' print ' # 4. Enjoy desktop notifications from cmus! Be sure to :save. #' print ' ##############################################################################' sys.exit(2) def status_data(item): """Return the requested cmus status data.""" # We loop through cmus status data and use each of its known data # types as 'delimiters', collecting data until we reach one, # inserting it into the dictionary -- rinse and repeat. # cmus helper script provides our data as argv[1]. cmus_data = sys.argv[1] # Split the data into an easily-parsed list. cmus_data = cmus_data.split() # Our temporary collector list. collector = [] # Dictionary that will contain our parsed-out data. cmus_info = {'status':"", 'file':"", 'artist':"", 'album':"", 'discnumber':"", 'tracknumber':"", 'title':"", 'date':"", 'duration':""} # Loop through cmus data and write it to our dictionary. last_found = "status" for value in cmus_data: collector.append(value) # Check to see if cmus value matches dictionary key. for key in cmus_info: # If a match has been found, record the data. if key == value: collector.pop() cmus_info[last_found] = " ".join(collector) collector = [] last_found = key # Return whatever data main() requests. return cmus_info[item] def display_song(): """Display the song data using notify-send.""" # We only display a notification if something is playing. if status_data("status") == "playing": # Check to see if title data exists before trying to display it. # Display "Unknown" otherwise. if status_data("title") != "": notify_summary = status_data("title") else: notify_summary = "Unknown" # Check to see if album data exists before trying to # display it. Prevents "Artist, " if it's blank. if status_data("artist") != "": s = status_data("artist").split() re = "" for st in s: if st == "albumartist": break else: re = re + st + " " notify_body = re else: notify_body = "Unknown" # Create our temporary file if it doesn't exist yet. open("/tmp/cmus_desktop_last_track", "a").write("4") # Check to see when we got our last track from cmus. last_notice = open("/tmp/cmus_desktop_last_track", "r").read() # Write time stamp for current track from cmus. last_notice_time = str(time.time()) open("/tmp/cmus_desktop_last_track", "w").write(last_notice_time) # Calculate seconds between track changes. track_change_duration = round(time.time() - float(last_notice)) # Display current track notification only if 3 seconds have # elapsed since last track was chosen. if track_change_duration > 3: # Execute notify-send with our default song data. subprocess.call('notify-send -t 5000 "' + \ notify_summary + '" "by ' + \ notify_body + ' "', shell=True) def main(): try: # See if script is being called by cmus before proceeding. if sys.argv[1].startswith("status"): display_song() except: print_usage() if __name__ == "__main__": main()
gpl-3.0
kinverarity1/bruges
bruges/attribute/test/similarity_test.py
3
2688
import unittest import numpy from bruges.attribute import similarity class SimilarityTest( unittest.TestCase ): def test_same_data( self ): """ Simple test to check if the algorithm works for the trivial case. """ data = numpy.zeros( [100, 100] ) check_data = data + 1.0 data +=10. window_size = 20 output = similarity( data, window_size ) same = numpy.allclose( check_data[:,1:], output[:,1:], .001 ) self.assertTrue( same ) def test_stepout( self ): data = numpy.zeros( [ 100,100 ] ) check_data = data + 1.0 # Make adjacent traces dissimilar, next nearest trace # similar data += 11. data[ :,::2] = -11.0 window_size = 20 step_out = 2 # Check with a step out of 2 output = similarity( data, window_size, step_out = step_out ) same = numpy.allclose( check_data[:,step_out:], output[:,step_out:], .001 ) self.assertTrue( same ) # Check with a step out of 1 step_out = 1 output = similarity( data, window_size, step_out = step_out ) # Everything should be zero check_data -= 1 same = numpy.allclose( check_data[:,step_out:], output[:,step_out:], .001 ) self.assertTrue( same ) def test_lag( self ): data = numpy.zeros( [ 100,100 ] ) check_data = data + 1.0 # Make an off by 1 similarity that can be corrected # with lag data+=11 data[::2,::2] = -11. data[1::2,1::2]=-11 lag = 2 window_size = 20 output = similarity( data, window_size, lag=lag ) same = numpy.allclose( check_data[window_size/2 :,1:], output[window_size/2 :,1:], atol=.01 ) self.assertTrue( same ) # Should be zero with no lag lag = 0 window_size = 20 output = similarity( data, window_size, lag=lag ) check_data[:,1:] -= 1. same = numpy.allclose( check_data[:,1:], output[:,1:], .001 ) self.assertTrue( same ) if __name__ == '__main__': suite = \ unittest.TestLoader().loadTestsFromTestCase(SimilarityTest) unittest.TextTestRunner(verbosity=2).run(suite)
apache-2.0
daviddrysdale/python-phonenumbers
python/phonenumbers/data/region_NA.py
1
1959
"""Auto-generated file, do not edit by hand. NA metadata""" from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata PHONE_METADATA_NA = PhoneMetadata(id='NA', country_code=264, international_prefix='00', general_desc=PhoneNumberDesc(national_number_pattern='[68]\\d{7,8}', possible_length=(8, 9)), fixed_line=PhoneNumberDesc(national_number_pattern='64426\\d{3}|6(?:1(?:2[2-7]|3[01378]|4[0-4])|254|32[0237]|4(?:27|41|5[25])|52[236-8]|626|7(?:2[2-4]|30))\\d{4,5}|6(?:1(?:(?:0\\d|2[0189]|3[24-69]|4[5-9])\\d|17|69|7[014])|2(?:17|5[0-36-8]|69|70)|3(?:17|2[14-689]|34|6[289]|7[01]|81)|4(?:17|2[0-2]|4[06]|5[0137]|69|7[01])|5(?:17|2[0459]|69|7[01])|6(?:17|25|38|42|69|7[01])|7(?:17|2[569]|3[13]|6[89]|7[01]))\\d{4}', example_number='61221234', possible_length=(8, 9)), mobile=PhoneNumberDesc(national_number_pattern='(?:60|8[1245])\\d{7}', example_number='811234567', possible_length=(9,)), toll_free=PhoneNumberDesc(national_number_pattern='80\\d{7}', example_number='800123456', possible_length=(9,)), premium_rate=PhoneNumberDesc(national_number_pattern='8701\\d{5}', example_number='870123456', possible_length=(9,)), voip=PhoneNumberDesc(national_number_pattern='8(?:3\\d\\d|86)\\d{5}', example_number='88612345', possible_length=(8, 9)), national_prefix='0', national_prefix_for_parsing='0', number_format=[NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['88'], national_prefix_formatting_rule='0\\1'), NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{3,4})', format='\\1 \\2 \\3', leading_digits_pattern=['6'], national_prefix_formatting_rule='0\\1'), NumberFormat(pattern='(\\d{3})(\\d{3})(\\d{3})', format='\\1 \\2 \\3', leading_digits_pattern=['87'], national_prefix_formatting_rule='0\\1'), NumberFormat(pattern='(\\d{2})(\\d{3})(\\d{4})', format='\\1 \\2 \\3', leading_digits_pattern=['8'], national_prefix_formatting_rule='0\\1')])
apache-2.0
UPenn-RoboCup/UPennalizers
Lib/Modules/Util/Python/monitor_shm.py
3
2295
#!/usr/bin/env python import matplotlib.pyplot as mpl import numpy as np from scipy.misc import pilutil import time import shm import os vcmImage = shm.ShmWrapper('vcmImage181%s' % str(os.getenv('USER'))); def draw_data(rgb, labelA): mpl.subplot(2,2,1); mpl.imshow(rgb) # disp('Received image.') mpl.subplot(2,2,2); # labelA = sw.vcmImage.get_labelA(); # labelA = typecast( labelA, 'uint8' ); # labelA = reshape( labelA, [80,60] ); # labelA = permute( labelA, [2 1] ); mpl.imshow(labelA); # TODO: Port the Matlab Colormap # cbk=[0 0 0];cr=[1 0 0];cg=[0 1 0];cb=[0 0 1];cy=[1 1 0];cw=[1 1 1]; # cmap=[cbk;cr;cy;cy;cb;cb;cb;cb;cg;cg;cg;cg;cg;cg;cg;cg;cw]; # colormap(cmap); # hold on; # plot_ball( sw.vcmBall ); # plot_goalposts( sw.vcmGoal ); # print 'Received Label A." mpl.subplot(2,2,3); # Draw the field for localization reasons #plot_field(); # hold on; # plot robots # for t in range( len(teamNumbers) ): # for p in range(nPlayers): # if (~isempty(robots{p, t})): # plot_robot_struct(robots{p, t}); mpl.subplot(2,2,4); # What to draw here? #plot(10,10); #hold on; #plot_goalposts( sw.vcmGoal ); mpl.draw(); def on_button_press(event): global vcmImage # get the yuyv image data yuyv = vcmImage.get_yuyv(); # data is actually int32 (YUYV format) not float64 yuyv.dtype = 'uint32'; n = yuyv.shape[0]; # convert to uint8 to seperate out YUYV yuyv.dtype = 'uint8'; # reshape to Nx4 yuyv_u8 = yuyv.reshape((120, 80, 4)); # convert to ycbcr (approx.) ycbcr = yuyv_u8[0:-1:2, :, [0,1,3]]; # convert to rgb # there is probably a better way to do this... rgb = np.asarray(pilutil.toimage(ycbcr, mode='YCbCr').convert('RGB').getdata()); rgb = rgb.reshape((60, 80, 3))/255.0; # Get the labelA data labelA = vcmImage.get_labelA(); # data is actually uint8 (one bit per label) labelA.dtype = 'uint8'; n = yuyv.shape[0]; labelA = labelA.reshape( (60,80) ); # labelA = permute( labelA, [2 1] ); # display image draw_data(rgb, labelA) if __name__=='__main__': # create connection to image shm print('Click on the image to update...'); fig = mpl.figure(); fig.canvas.mpl_connect('button_press_event', on_button_press); mpl.show(); time.sleep(0.1);
gpl-3.0
shanot/imp
modules/atom/test/test_create_protein.py
1
1316
from __future__ import print_function import IMP import IMP.test import IMP.core import IMP.atom import IMP.display from IMP.algebra import * class Tests(IMP.test.TestCase): """Test molecular dynamics optimizer""" def broken_until_swig_hierarchies_gets_fixed_test_cp(self): """Testing create_protein""" m = IMP.Model() rp = IMP.Particle(m) r = IMP.atom.create_protein(rp, "hi", 10.0, 150) print("back") r.show() m.add_restraint(r) p = IMP.atom.Hierarchy(rp) print("printing") print(p.get_number_of_children()) print(p.get_children()) print(p.get_children().size()) for c in p.get_children(): d = IMP.core.XYZ(c.get_particle()) d.set_coordinates(get_random_vector_in(Vector3D(0, 0, 0), Vector3D(300, 300, 300))) o = IMP.core.SteepestDescent() o.set_model(m) score = o.optimize(1000) print(score) w = IMP.display.ChimeraWriter(self.get_tmp_file_name("proteinconf.py")) for c in p.get_children(): d = IMP.core.XYZR(c.get_particle()) w.add_geometry(IMP.core.XYZRGeometry(d)) self.assertLess(score, 1) if __name__ == '__main__': IMP.test.main()
gpl-3.0
eustislab/horton
horton/part/test/test_symmetry.py
1
2963
# -*- coding: utf-8 -*- # HORTON: Helpful Open-source Research TOol for N-fermion systems. # Copyright (C) 2011-2015 The HORTON Development Team # # This file is part of HORTON. # # HORTON is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 3 # of the License, or (at your option) any later version. # # HORTON is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see <http://www.gnu.org/licenses/> # #-- #pylint: skip-file import numpy as np from horton import * from horton.test.common import get_pentagon_moments def get_fake_example(): generators = [ np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0]]), np.array([[0, 1, 0, 0], [1, 0, 0, 0], [0, 0, 1, 0]]), ] fracs = np.array([ [1, 0, 0], [0.5, 0.5, 0.5], ]) numbers = np.array([1, 8]) cell = Cell(None) symmetry = Symmetry('fake', generators, fracs, numbers, cell) coordinates, numbers, links = symmetry.generate() return coordinates, numbers, links, cell, symmetry def test_symmetry_scalar(): coordinates, numbers, links, cell, symmetry = get_fake_example() aim_results = { 'charges': np.array([0.29, 0.31, -0.6]), 'volumes': np.array([1.2, 1.4, 3.4]), } sym_results = symmetry_analysis(coordinates, cell, symmetry, aim_results) assert len(sym_results) == 2 stats = sym_results['charges'] assert abs(stats[:,0] - [0.3, -0.6]).max() < 1e-10 assert abs(stats[:,1] - [np.std([0.29, 0.31]), 0.0]).max() < 1e-10 stats = sym_results['volumes'] assert abs(stats[:,0] - [1.3, 3.4]).max() < 1e-10 assert abs(stats[:,1] - [np.std([1.2, 1.4]), 0.0]).max() < 1e-10 def test_symmetry_moments(): coordinates, numbers, links, cell, symmetry = get_fake_example() # setup rotated multipole moments m0 = get_pentagon_moments() m00 = m0.copy() m01 = rotate_cartesian_moments_all(symmetry.generators[1][:,:3], m0) m1 = get_pentagon_moments(get_random_rotation()) # perturb them in a controlled way m00[0] += 0.1 m01[0] -= 0.1 m00[1] += 0.1 m01[2] -= 0.1 # run analysis aim_results = { 'cartesian_multipoles': np.array([m00, m01, m1]), } sym_results = symmetry_analysis(coordinates, cell, symmetry, aim_results) # check results assert len(sym_results) == 1 stats = sym_results['cartesian_multipoles'] assert abs(stats[:,0] - [m0, m1]).max() < 1e-10 assert abs(stats[1,1]).max() < 1e-10 assert abs(stats[0,1,:2] - np.std([-0.1, 0.1])).max() < 1e-10 assert abs(stats[0,1,2:]).max() < 1e-10
gpl-3.0
lc525/gtest
scripts/upload.py
2511
51024
#!/usr/bin/env python # # Copyright 2007 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tool for uploading diffs from a version control system to the codereview app. Usage summary: upload.py [options] [-- diff_options] Diff options are passed to the diff command of the underlying system. Supported version control systems: Git Mercurial Subversion It is important for Git/Mercurial users to specify a tree/node/branch to diff against by using the '--rev' option. """ # This code is derived from appcfg.py in the App Engine SDK (open source), # and from ASPN recipe #146306. import cookielib import getpass import logging import md5 import mimetypes import optparse import os import re import socket import subprocess import sys import urllib import urllib2 import urlparse try: import readline except ImportError: pass # The logging verbosity: # 0: Errors only. # 1: Status messages. # 2: Info logs. # 3: Debug logs. verbosity = 1 # Max size of patch or base file. MAX_UPLOAD_SIZE = 900 * 1024 def GetEmail(prompt): """Prompts the user for their email address and returns it. The last used email address is saved to a file and offered up as a suggestion to the user. If the user presses enter without typing in anything the last used email address is used. If the user enters a new address, it is saved for next time we prompt. """ last_email_file_name = os.path.expanduser("~/.last_codereview_email_address") last_email = "" if os.path.exists(last_email_file_name): try: last_email_file = open(last_email_file_name, "r") last_email = last_email_file.readline().strip("\n") last_email_file.close() prompt += " [%s]" % last_email except IOError, e: pass email = raw_input(prompt + ": ").strip() if email: try: last_email_file = open(last_email_file_name, "w") last_email_file.write(email) last_email_file.close() except IOError, e: pass else: email = last_email return email def StatusUpdate(msg): """Print a status message to stdout. If 'verbosity' is greater than 0, print the message. Args: msg: The string to print. """ if verbosity > 0: print msg def ErrorExit(msg): """Print an error message to stderr and exit.""" print >>sys.stderr, msg sys.exit(1) class ClientLoginError(urllib2.HTTPError): """Raised to indicate there was an error authenticating with ClientLogin.""" def __init__(self, url, code, msg, headers, args): urllib2.HTTPError.__init__(self, url, code, msg, headers, None) self.args = args self.reason = args["Error"] class AbstractRpcServer(object): """Provides a common interface for a simple RPC server.""" def __init__(self, host, auth_function, host_override=None, extra_headers={}, save_cookies=False): """Creates a new HttpRpcServer. Args: host: The host to send requests to. auth_function: A function that takes no arguments and returns an (email, password) tuple when called. Will be called if authentication is required. host_override: The host header to send to the server (defaults to host). extra_headers: A dict of extra headers to append to every request. save_cookies: If True, save the authentication cookies to local disk. If False, use an in-memory cookiejar instead. Subclasses must implement this functionality. Defaults to False. """ self.host = host self.host_override = host_override self.auth_function = auth_function self.authenticated = False self.extra_headers = extra_headers self.save_cookies = save_cookies self.opener = self._GetOpener() if self.host_override: logging.info("Server: %s; Host: %s", self.host, self.host_override) else: logging.info("Server: %s", self.host) def _GetOpener(self): """Returns an OpenerDirector for making HTTP requests. Returns: A urllib2.OpenerDirector object. """ raise NotImplementedError() def _CreateRequest(self, url, data=None): """Creates a new urllib request.""" logging.debug("Creating request for: '%s' with payload:\n%s", url, data) req = urllib2.Request(url, data=data) if self.host_override: req.add_header("Host", self.host_override) for key, value in self.extra_headers.iteritems(): req.add_header(key, value) return req def _GetAuthToken(self, email, password): """Uses ClientLogin to authenticate the user, returning an auth token. Args: email: The user's email address password: The user's password Raises: ClientLoginError: If there was an error authenticating with ClientLogin. HTTPError: If there was some other form of HTTP error. Returns: The authentication token returned by ClientLogin. """ account_type = "GOOGLE" if self.host.endswith(".google.com"): # Needed for use inside Google. account_type = "HOSTED" req = self._CreateRequest( url="https://www.google.com/accounts/ClientLogin", data=urllib.urlencode({ "Email": email, "Passwd": password, "service": "ah", "source": "rietveld-codereview-upload", "accountType": account_type, }), ) try: response = self.opener.open(req) response_body = response.read() response_dict = dict(x.split("=") for x in response_body.split("\n") if x) return response_dict["Auth"] except urllib2.HTTPError, e: if e.code == 403: body = e.read() response_dict = dict(x.split("=", 1) for x in body.split("\n") if x) raise ClientLoginError(req.get_full_url(), e.code, e.msg, e.headers, response_dict) else: raise def _GetAuthCookie(self, auth_token): """Fetches authentication cookies for an authentication token. Args: auth_token: The authentication token returned by ClientLogin. Raises: HTTPError: If there was an error fetching the authentication cookies. """ # This is a dummy value to allow us to identify when we're successful. continue_location = "http://localhost/" args = {"continue": continue_location, "auth": auth_token} req = self._CreateRequest("http://%s/_ah/login?%s" % (self.host, urllib.urlencode(args))) try: response = self.opener.open(req) except urllib2.HTTPError, e: response = e if (response.code != 302 or response.info()["location"] != continue_location): raise urllib2.HTTPError(req.get_full_url(), response.code, response.msg, response.headers, response.fp) self.authenticated = True def _Authenticate(self): """Authenticates the user. The authentication process works as follows: 1) We get a username and password from the user 2) We use ClientLogin to obtain an AUTH token for the user (see http://code.google.com/apis/accounts/AuthForInstalledApps.html). 3) We pass the auth token to /_ah/login on the server to obtain an authentication cookie. If login was successful, it tries to redirect us to the URL we provided. If we attempt to access the upload API without first obtaining an authentication cookie, it returns a 401 response and directs us to authenticate ourselves with ClientLogin. """ for i in range(3): credentials = self.auth_function() try: auth_token = self._GetAuthToken(credentials[0], credentials[1]) except ClientLoginError, e: if e.reason == "BadAuthentication": print >>sys.stderr, "Invalid username or password." continue if e.reason == "CaptchaRequired": print >>sys.stderr, ( "Please go to\n" "https://www.google.com/accounts/DisplayUnlockCaptcha\n" "and verify you are a human. Then try again.") break if e.reason == "NotVerified": print >>sys.stderr, "Account not verified." break if e.reason == "TermsNotAgreed": print >>sys.stderr, "User has not agreed to TOS." break if e.reason == "AccountDeleted": print >>sys.stderr, "The user account has been deleted." break if e.reason == "AccountDisabled": print >>sys.stderr, "The user account has been disabled." break if e.reason == "ServiceDisabled": print >>sys.stderr, ("The user's access to the service has been " "disabled.") break if e.reason == "ServiceUnavailable": print >>sys.stderr, "The service is not available; try again later." break raise self._GetAuthCookie(auth_token) return def Send(self, request_path, payload=None, content_type="application/octet-stream", timeout=None, **kwargs): """Sends an RPC and returns the response. Args: request_path: The path to send the request to, eg /api/appversion/create. payload: The body of the request, or None to send an empty request. content_type: The Content-Type header to use. timeout: timeout in seconds; default None i.e. no timeout. (Note: for large requests on OS X, the timeout doesn't work right.) kwargs: Any keyword arguments are converted into query string parameters. Returns: The response body, as a string. """ # TODO: Don't require authentication. Let the server say # whether it is necessary. if not self.authenticated: self._Authenticate() old_timeout = socket.getdefaulttimeout() socket.setdefaulttimeout(timeout) try: tries = 0 while True: tries += 1 args = dict(kwargs) url = "http://%s%s" % (self.host, request_path) if args: url += "?" + urllib.urlencode(args) req = self._CreateRequest(url=url, data=payload) req.add_header("Content-Type", content_type) try: f = self.opener.open(req) response = f.read() f.close() return response except urllib2.HTTPError, e: if tries > 3: raise elif e.code == 401: self._Authenticate() ## elif e.code >= 500 and e.code < 600: ## # Server Error - try again. ## continue else: raise finally: socket.setdefaulttimeout(old_timeout) class HttpRpcServer(AbstractRpcServer): """Provides a simplified RPC-style interface for HTTP requests.""" def _Authenticate(self): """Save the cookie jar after authentication.""" super(HttpRpcServer, self)._Authenticate() if self.save_cookies: StatusUpdate("Saving authentication cookies to %s" % self.cookie_file) self.cookie_jar.save() def _GetOpener(self): """Returns an OpenerDirector that supports cookies and ignores redirects. Returns: A urllib2.OpenerDirector object. """ opener = urllib2.OpenerDirector() opener.add_handler(urllib2.ProxyHandler()) opener.add_handler(urllib2.UnknownHandler()) opener.add_handler(urllib2.HTTPHandler()) opener.add_handler(urllib2.HTTPDefaultErrorHandler()) opener.add_handler(urllib2.HTTPSHandler()) opener.add_handler(urllib2.HTTPErrorProcessor()) if self.save_cookies: self.cookie_file = os.path.expanduser("~/.codereview_upload_cookies") self.cookie_jar = cookielib.MozillaCookieJar(self.cookie_file) if os.path.exists(self.cookie_file): try: self.cookie_jar.load() self.authenticated = True StatusUpdate("Loaded authentication cookies from %s" % self.cookie_file) except (cookielib.LoadError, IOError): # Failed to load cookies - just ignore them. pass else: # Create an empty cookie file with mode 600 fd = os.open(self.cookie_file, os.O_CREAT, 0600) os.close(fd) # Always chmod the cookie file os.chmod(self.cookie_file, 0600) else: # Don't save cookies across runs of update.py. self.cookie_jar = cookielib.CookieJar() opener.add_handler(urllib2.HTTPCookieProcessor(self.cookie_jar)) return opener parser = optparse.OptionParser(usage="%prog [options] [-- diff_options]") parser.add_option("-y", "--assume_yes", action="store_true", dest="assume_yes", default=False, help="Assume that the answer to yes/no questions is 'yes'.") # Logging group = parser.add_option_group("Logging options") group.add_option("-q", "--quiet", action="store_const", const=0, dest="verbose", help="Print errors only.") group.add_option("-v", "--verbose", action="store_const", const=2, dest="verbose", default=1, help="Print info level logs (default).") group.add_option("--noisy", action="store_const", const=3, dest="verbose", help="Print all logs.") # Review server group = parser.add_option_group("Review server options") group.add_option("-s", "--server", action="store", dest="server", default="codereview.appspot.com", metavar="SERVER", help=("The server to upload to. The format is host[:port]. " "Defaults to 'codereview.appspot.com'.")) group.add_option("-e", "--email", action="store", dest="email", metavar="EMAIL", default=None, help="The username to use. Will prompt if omitted.") group.add_option("-H", "--host", action="store", dest="host", metavar="HOST", default=None, help="Overrides the Host header sent with all RPCs.") group.add_option("--no_cookies", action="store_false", dest="save_cookies", default=True, help="Do not save authentication cookies to local disk.") # Issue group = parser.add_option_group("Issue options") group.add_option("-d", "--description", action="store", dest="description", metavar="DESCRIPTION", default=None, help="Optional description when creating an issue.") group.add_option("-f", "--description_file", action="store", dest="description_file", metavar="DESCRIPTION_FILE", default=None, help="Optional path of a file that contains " "the description when creating an issue.") group.add_option("-r", "--reviewers", action="store", dest="reviewers", metavar="REVIEWERS", default=None, help="Add reviewers (comma separated email addresses).") group.add_option("--cc", action="store", dest="cc", metavar="CC", default=None, help="Add CC (comma separated email addresses).") # Upload options group = parser.add_option_group("Patch options") group.add_option("-m", "--message", action="store", dest="message", metavar="MESSAGE", default=None, help="A message to identify the patch. " "Will prompt if omitted.") group.add_option("-i", "--issue", type="int", action="store", metavar="ISSUE", default=None, help="Issue number to which to add. Defaults to new issue.") group.add_option("--download_base", action="store_true", dest="download_base", default=False, help="Base files will be downloaded by the server " "(side-by-side diffs may not work on files with CRs).") group.add_option("--rev", action="store", dest="revision", metavar="REV", default=None, help="Branch/tree/revision to diff against (used by DVCS).") group.add_option("--send_mail", action="store_true", dest="send_mail", default=False, help="Send notification email to reviewers.") def GetRpcServer(options): """Returns an instance of an AbstractRpcServer. Returns: A new AbstractRpcServer, on which RPC calls can be made. """ rpc_server_class = HttpRpcServer def GetUserCredentials(): """Prompts the user for a username and password.""" email = options.email if email is None: email = GetEmail("Email (login for uploading to %s)" % options.server) password = getpass.getpass("Password for %s: " % email) return (email, password) # If this is the dev_appserver, use fake authentication. host = (options.host or options.server).lower() if host == "localhost" or host.startswith("localhost:"): email = options.email if email is None: email = "test@example.com" logging.info("Using debug user %s. Override with --email" % email) server = rpc_server_class( options.server, lambda: (email, "password"), host_override=options.host, extra_headers={"Cookie": 'dev_appserver_login="%s:False"' % email}, save_cookies=options.save_cookies) # Don't try to talk to ClientLogin. server.authenticated = True return server return rpc_server_class(options.server, GetUserCredentials, host_override=options.host, save_cookies=options.save_cookies) def EncodeMultipartFormData(fields, files): """Encode form fields for multipart/form-data. Args: fields: A sequence of (name, value) elements for regular form fields. files: A sequence of (name, filename, value) elements for data to be uploaded as files. Returns: (content_type, body) ready for httplib.HTTP instance. Source: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306 """ BOUNDARY = '-M-A-G-I-C---B-O-U-N-D-A-R-Y-' CRLF = '\r\n' lines = [] for (key, value) in fields: lines.append('--' + BOUNDARY) lines.append('Content-Disposition: form-data; name="%s"' % key) lines.append('') lines.append(value) for (key, filename, value) in files: lines.append('--' + BOUNDARY) lines.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename)) lines.append('Content-Type: %s' % GetContentType(filename)) lines.append('') lines.append(value) lines.append('--' + BOUNDARY + '--') lines.append('') body = CRLF.join(lines) content_type = 'multipart/form-data; boundary=%s' % BOUNDARY return content_type, body def GetContentType(filename): """Helper to guess the content-type from the filename.""" return mimetypes.guess_type(filename)[0] or 'application/octet-stream' # Use a shell for subcommands on Windows to get a PATH search. use_shell = sys.platform.startswith("win") def RunShellWithReturnCode(command, print_output=False, universal_newlines=True): """Executes a command and returns the output from stdout and the return code. Args: command: Command to execute. print_output: If True, the output is printed to stdout. If False, both stdout and stderr are ignored. universal_newlines: Use universal_newlines flag (default: True). Returns: Tuple (output, return code) """ logging.info("Running %s", command) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=use_shell, universal_newlines=universal_newlines) if print_output: output_array = [] while True: line = p.stdout.readline() if not line: break print line.strip("\n") output_array.append(line) output = "".join(output_array) else: output = p.stdout.read() p.wait() errout = p.stderr.read() if print_output and errout: print >>sys.stderr, errout p.stdout.close() p.stderr.close() return output, p.returncode def RunShell(command, silent_ok=False, universal_newlines=True, print_output=False): data, retcode = RunShellWithReturnCode(command, print_output, universal_newlines) if retcode: ErrorExit("Got error status from %s:\n%s" % (command, data)) if not silent_ok and not data: ErrorExit("No output from %s" % command) return data class VersionControlSystem(object): """Abstract base class providing an interface to the VCS.""" def __init__(self, options): """Constructor. Args: options: Command line options. """ self.options = options def GenerateDiff(self, args): """Return the current diff as a string. Args: args: Extra arguments to pass to the diff command. """ raise NotImplementedError( "abstract method -- subclass %s must override" % self.__class__) def GetUnknownFiles(self): """Return a list of files unknown to the VCS.""" raise NotImplementedError( "abstract method -- subclass %s must override" % self.__class__) def CheckForUnknownFiles(self): """Show an "are you sure?" prompt if there are unknown files.""" unknown_files = self.GetUnknownFiles() if unknown_files: print "The following files are not added to version control:" for line in unknown_files: print line prompt = "Are you sure to continue?(y/N) " answer = raw_input(prompt).strip() if answer != "y": ErrorExit("User aborted") def GetBaseFile(self, filename): """Get the content of the upstream version of a file. Returns: A tuple (base_content, new_content, is_binary, status) base_content: The contents of the base file. new_content: For text files, this is empty. For binary files, this is the contents of the new file, since the diff output won't contain information to reconstruct the current file. is_binary: True iff the file is binary. status: The status of the file. """ raise NotImplementedError( "abstract method -- subclass %s must override" % self.__class__) def GetBaseFiles(self, diff): """Helper that calls GetBase file for each file in the patch. Returns: A dictionary that maps from filename to GetBaseFile's tuple. Filenames are retrieved based on lines that start with "Index:" or "Property changes on:". """ files = {} for line in diff.splitlines(True): if line.startswith('Index:') or line.startswith('Property changes on:'): unused, filename = line.split(':', 1) # On Windows if a file has property changes its filename uses '\' # instead of '/'. filename = filename.strip().replace('\\', '/') files[filename] = self.GetBaseFile(filename) return files def UploadBaseFiles(self, issue, rpc_server, patch_list, patchset, options, files): """Uploads the base files (and if necessary, the current ones as well).""" def UploadFile(filename, file_id, content, is_binary, status, is_base): """Uploads a file to the server.""" file_too_large = False if is_base: type = "base" else: type = "current" if len(content) > MAX_UPLOAD_SIZE: print ("Not uploading the %s file for %s because it's too large." % (type, filename)) file_too_large = True content = "" checksum = md5.new(content).hexdigest() if options.verbose > 0 and not file_too_large: print "Uploading %s file for %s" % (type, filename) url = "/%d/upload_content/%d/%d" % (int(issue), int(patchset), file_id) form_fields = [("filename", filename), ("status", status), ("checksum", checksum), ("is_binary", str(is_binary)), ("is_current", str(not is_base)), ] if file_too_large: form_fields.append(("file_too_large", "1")) if options.email: form_fields.append(("user", options.email)) ctype, body = EncodeMultipartFormData(form_fields, [("data", filename, content)]) response_body = rpc_server.Send(url, body, content_type=ctype) if not response_body.startswith("OK"): StatusUpdate(" --> %s" % response_body) sys.exit(1) patches = dict() [patches.setdefault(v, k) for k, v in patch_list] for filename in patches.keys(): base_content, new_content, is_binary, status = files[filename] file_id_str = patches.get(filename) if file_id_str.find("nobase") != -1: base_content = None file_id_str = file_id_str[file_id_str.rfind("_") + 1:] file_id = int(file_id_str) if base_content != None: UploadFile(filename, file_id, base_content, is_binary, status, True) if new_content != None: UploadFile(filename, file_id, new_content, is_binary, status, False) def IsImage(self, filename): """Returns true if the filename has an image extension.""" mimetype = mimetypes.guess_type(filename)[0] if not mimetype: return False return mimetype.startswith("image/") class SubversionVCS(VersionControlSystem): """Implementation of the VersionControlSystem interface for Subversion.""" def __init__(self, options): super(SubversionVCS, self).__init__(options) if self.options.revision: match = re.match(r"(\d+)(:(\d+))?", self.options.revision) if not match: ErrorExit("Invalid Subversion revision %s." % self.options.revision) self.rev_start = match.group(1) self.rev_end = match.group(3) else: self.rev_start = self.rev_end = None # Cache output from "svn list -r REVNO dirname". # Keys: dirname, Values: 2-tuple (ouput for start rev and end rev). self.svnls_cache = {} # SVN base URL is required to fetch files deleted in an older revision. # Result is cached to not guess it over and over again in GetBaseFile(). required = self.options.download_base or self.options.revision is not None self.svn_base = self._GuessBase(required) def GuessBase(self, required): """Wrapper for _GuessBase.""" return self.svn_base def _GuessBase(self, required): """Returns the SVN base URL. Args: required: If true, exits if the url can't be guessed, otherwise None is returned. """ info = RunShell(["svn", "info"]) for line in info.splitlines(): words = line.split() if len(words) == 2 and words[0] == "URL:": url = words[1] scheme, netloc, path, params, query, fragment = urlparse.urlparse(url) username, netloc = urllib.splituser(netloc) if username: logging.info("Removed username from base URL") if netloc.endswith("svn.python.org"): if netloc == "svn.python.org": if path.startswith("/projects/"): path = path[9:] elif netloc != "pythondev@svn.python.org": ErrorExit("Unrecognized Python URL: %s" % url) base = "http://svn.python.org/view/*checkout*%s/" % path logging.info("Guessed Python base = %s", base) elif netloc.endswith("svn.collab.net"): if path.startswith("/repos/"): path = path[6:] base = "http://svn.collab.net/viewvc/*checkout*%s/" % path logging.info("Guessed CollabNet base = %s", base) elif netloc.endswith(".googlecode.com"): path = path + "/" base = urlparse.urlunparse(("http", netloc, path, params, query, fragment)) logging.info("Guessed Google Code base = %s", base) else: path = path + "/" base = urlparse.urlunparse((scheme, netloc, path, params, query, fragment)) logging.info("Guessed base = %s", base) return base if required: ErrorExit("Can't find URL in output from svn info") return None def GenerateDiff(self, args): cmd = ["svn", "diff"] if self.options.revision: cmd += ["-r", self.options.revision] cmd.extend(args) data = RunShell(cmd) count = 0 for line in data.splitlines(): if line.startswith("Index:") or line.startswith("Property changes on:"): count += 1 logging.info(line) if not count: ErrorExit("No valid patches found in output from svn diff") return data def _CollapseKeywords(self, content, keyword_str): """Collapses SVN keywords.""" # svn cat translates keywords but svn diff doesn't. As a result of this # behavior patching.PatchChunks() fails with a chunk mismatch error. # This part was originally written by the Review Board development team # who had the same problem (http://reviews.review-board.org/r/276/). # Mapping of keywords to known aliases svn_keywords = { # Standard keywords 'Date': ['Date', 'LastChangedDate'], 'Revision': ['Revision', 'LastChangedRevision', 'Rev'], 'Author': ['Author', 'LastChangedBy'], 'HeadURL': ['HeadURL', 'URL'], 'Id': ['Id'], # Aliases 'LastChangedDate': ['LastChangedDate', 'Date'], 'LastChangedRevision': ['LastChangedRevision', 'Rev', 'Revision'], 'LastChangedBy': ['LastChangedBy', 'Author'], 'URL': ['URL', 'HeadURL'], } def repl(m): if m.group(2): return "$%s::%s$" % (m.group(1), " " * len(m.group(3))) return "$%s$" % m.group(1) keywords = [keyword for name in keyword_str.split(" ") for keyword in svn_keywords.get(name, [])] return re.sub(r"\$(%s):(:?)([^\$]+)\$" % '|'.join(keywords), repl, content) def GetUnknownFiles(self): status = RunShell(["svn", "status", "--ignore-externals"], silent_ok=True) unknown_files = [] for line in status.split("\n"): if line and line[0] == "?": unknown_files.append(line) return unknown_files def ReadFile(self, filename): """Returns the contents of a file.""" file = open(filename, 'rb') result = "" try: result = file.read() finally: file.close() return result def GetStatus(self, filename): """Returns the status of a file.""" if not self.options.revision: status = RunShell(["svn", "status", "--ignore-externals", filename]) if not status: ErrorExit("svn status returned no output for %s" % filename) status_lines = status.splitlines() # If file is in a cl, the output will begin with # "\n--- Changelist 'cl_name':\n". See # http://svn.collab.net/repos/svn/trunk/notes/changelist-design.txt if (len(status_lines) == 3 and not status_lines[0] and status_lines[1].startswith("--- Changelist")): status = status_lines[2] else: status = status_lines[0] # If we have a revision to diff against we need to run "svn list" # for the old and the new revision and compare the results to get # the correct status for a file. else: dirname, relfilename = os.path.split(filename) if dirname not in self.svnls_cache: cmd = ["svn", "list", "-r", self.rev_start, dirname or "."] out, returncode = RunShellWithReturnCode(cmd) if returncode: ErrorExit("Failed to get status for %s." % filename) old_files = out.splitlines() args = ["svn", "list"] if self.rev_end: args += ["-r", self.rev_end] cmd = args + [dirname or "."] out, returncode = RunShellWithReturnCode(cmd) if returncode: ErrorExit("Failed to run command %s" % cmd) self.svnls_cache[dirname] = (old_files, out.splitlines()) old_files, new_files = self.svnls_cache[dirname] if relfilename in old_files and relfilename not in new_files: status = "D " elif relfilename in old_files and relfilename in new_files: status = "M " else: status = "A " return status def GetBaseFile(self, filename): status = self.GetStatus(filename) base_content = None new_content = None # If a file is copied its status will be "A +", which signifies # "addition-with-history". See "svn st" for more information. We need to # upload the original file or else diff parsing will fail if the file was # edited. if status[0] == "A" and status[3] != "+": # We'll need to upload the new content if we're adding a binary file # since diff's output won't contain it. mimetype = RunShell(["svn", "propget", "svn:mime-type", filename], silent_ok=True) base_content = "" is_binary = mimetype and not mimetype.startswith("text/") if is_binary and self.IsImage(filename): new_content = self.ReadFile(filename) elif (status[0] in ("M", "D", "R") or (status[0] == "A" and status[3] == "+") or # Copied file. (status[0] == " " and status[1] == "M")): # Property change. args = [] if self.options.revision: url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start) else: # Don't change filename, it's needed later. url = filename args += ["-r", "BASE"] cmd = ["svn"] + args + ["propget", "svn:mime-type", url] mimetype, returncode = RunShellWithReturnCode(cmd) if returncode: # File does not exist in the requested revision. # Reset mimetype, it contains an error message. mimetype = "" get_base = False is_binary = mimetype and not mimetype.startswith("text/") if status[0] == " ": # Empty base content just to force an upload. base_content = "" elif is_binary: if self.IsImage(filename): get_base = True if status[0] == "M": if not self.rev_end: new_content = self.ReadFile(filename) else: url = "%s/%s@%s" % (self.svn_base, filename, self.rev_end) new_content = RunShell(["svn", "cat", url], universal_newlines=True, silent_ok=True) else: base_content = "" else: get_base = True if get_base: if is_binary: universal_newlines = False else: universal_newlines = True if self.rev_start: # "svn cat -r REV delete_file.txt" doesn't work. cat requires # the full URL with "@REV" appended instead of using "-r" option. url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start) base_content = RunShell(["svn", "cat", url], universal_newlines=universal_newlines, silent_ok=True) else: base_content = RunShell(["svn", "cat", filename], universal_newlines=universal_newlines, silent_ok=True) if not is_binary: args = [] if self.rev_start: url = "%s/%s@%s" % (self.svn_base, filename, self.rev_start) else: url = filename args += ["-r", "BASE"] cmd = ["svn"] + args + ["propget", "svn:keywords", url] keywords, returncode = RunShellWithReturnCode(cmd) if keywords and not returncode: base_content = self._CollapseKeywords(base_content, keywords) else: StatusUpdate("svn status returned unexpected output: %s" % status) sys.exit(1) return base_content, new_content, is_binary, status[0:5] class GitVCS(VersionControlSystem): """Implementation of the VersionControlSystem interface for Git.""" def __init__(self, options): super(GitVCS, self).__init__(options) # Map of filename -> hash of base file. self.base_hashes = {} def GenerateDiff(self, extra_args): # This is more complicated than svn's GenerateDiff because we must convert # the diff output to include an svn-style "Index:" line as well as record # the hashes of the base files, so we can upload them along with our diff. if self.options.revision: extra_args = [self.options.revision] + extra_args gitdiff = RunShell(["git", "diff", "--full-index"] + extra_args) svndiff = [] filecount = 0 filename = None for line in gitdiff.splitlines(): match = re.match(r"diff --git a/(.*) b/.*$", line) if match: filecount += 1 filename = match.group(1) svndiff.append("Index: %s\n" % filename) else: # The "index" line in a git diff looks like this (long hashes elided): # index 82c0d44..b2cee3f 100755 # We want to save the left hash, as that identifies the base file. match = re.match(r"index (\w+)\.\.", line) if match: self.base_hashes[filename] = match.group(1) svndiff.append(line + "\n") if not filecount: ErrorExit("No valid patches found in output from git diff") return "".join(svndiff) def GetUnknownFiles(self): status = RunShell(["git", "ls-files", "--exclude-standard", "--others"], silent_ok=True) return status.splitlines() def GetBaseFile(self, filename): hash = self.base_hashes[filename] base_content = None new_content = None is_binary = False if hash == "0" * 40: # All-zero hash indicates no base file. status = "A" base_content = "" else: status = "M" base_content, returncode = RunShellWithReturnCode(["git", "show", hash]) if returncode: ErrorExit("Got error status from 'git show %s'" % hash) return (base_content, new_content, is_binary, status) class MercurialVCS(VersionControlSystem): """Implementation of the VersionControlSystem interface for Mercurial.""" def __init__(self, options, repo_dir): super(MercurialVCS, self).__init__(options) # Absolute path to repository (we can be in a subdir) self.repo_dir = os.path.normpath(repo_dir) # Compute the subdir cwd = os.path.normpath(os.getcwd()) assert cwd.startswith(self.repo_dir) self.subdir = cwd[len(self.repo_dir):].lstrip(r"\/") if self.options.revision: self.base_rev = self.options.revision else: self.base_rev = RunShell(["hg", "parent", "-q"]).split(':')[1].strip() def _GetRelPath(self, filename): """Get relative path of a file according to the current directory, given its logical path in the repo.""" assert filename.startswith(self.subdir), filename return filename[len(self.subdir):].lstrip(r"\/") def GenerateDiff(self, extra_args): # If no file specified, restrict to the current subdir extra_args = extra_args or ["."] cmd = ["hg", "diff", "--git", "-r", self.base_rev] + extra_args data = RunShell(cmd, silent_ok=True) svndiff = [] filecount = 0 for line in data.splitlines(): m = re.match("diff --git a/(\S+) b/(\S+)", line) if m: # Modify line to make it look like as it comes from svn diff. # With this modification no changes on the server side are required # to make upload.py work with Mercurial repos. # NOTE: for proper handling of moved/copied files, we have to use # the second filename. filename = m.group(2) svndiff.append("Index: %s" % filename) svndiff.append("=" * 67) filecount += 1 logging.info(line) else: svndiff.append(line) if not filecount: ErrorExit("No valid patches found in output from hg diff") return "\n".join(svndiff) + "\n" def GetUnknownFiles(self): """Return a list of files unknown to the VCS.""" args = [] status = RunShell(["hg", "status", "--rev", self.base_rev, "-u", "."], silent_ok=True) unknown_files = [] for line in status.splitlines(): st, fn = line.split(" ", 1) if st == "?": unknown_files.append(fn) return unknown_files def GetBaseFile(self, filename): # "hg status" and "hg cat" both take a path relative to the current subdir # rather than to the repo root, but "hg diff" has given us the full path # to the repo root. base_content = "" new_content = None is_binary = False oldrelpath = relpath = self._GetRelPath(filename) # "hg status -C" returns two lines for moved/copied files, one otherwise out = RunShell(["hg", "status", "-C", "--rev", self.base_rev, relpath]) out = out.splitlines() # HACK: strip error message about missing file/directory if it isn't in # the working copy if out[0].startswith('%s: ' % relpath): out = out[1:] if len(out) > 1: # Moved/copied => considered as modified, use old filename to # retrieve base contents oldrelpath = out[1].strip() status = "M" else: status, _ = out[0].split(' ', 1) if status != "A": base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath], silent_ok=True) is_binary = "\0" in base_content # Mercurial's heuristic if status != "R": new_content = open(relpath, "rb").read() is_binary = is_binary or "\0" in new_content if is_binary and base_content: # Fetch again without converting newlines base_content = RunShell(["hg", "cat", "-r", self.base_rev, oldrelpath], silent_ok=True, universal_newlines=False) if not is_binary or not self.IsImage(relpath): new_content = None return base_content, new_content, is_binary, status # NOTE: The SplitPatch function is duplicated in engine.py, keep them in sync. def SplitPatch(data): """Splits a patch into separate pieces for each file. Args: data: A string containing the output of svn diff. Returns: A list of 2-tuple (filename, text) where text is the svn diff output pertaining to filename. """ patches = [] filename = None diff = [] for line in data.splitlines(True): new_filename = None if line.startswith('Index:'): unused, new_filename = line.split(':', 1) new_filename = new_filename.strip() elif line.startswith('Property changes on:'): unused, temp_filename = line.split(':', 1) # When a file is modified, paths use '/' between directories, however # when a property is modified '\' is used on Windows. Make them the same # otherwise the file shows up twice. temp_filename = temp_filename.strip().replace('\\', '/') if temp_filename != filename: # File has property changes but no modifications, create a new diff. new_filename = temp_filename if new_filename: if filename and diff: patches.append((filename, ''.join(diff))) filename = new_filename diff = [line] continue if diff is not None: diff.append(line) if filename and diff: patches.append((filename, ''.join(diff))) return patches def UploadSeparatePatches(issue, rpc_server, patchset, data, options): """Uploads a separate patch for each file in the diff output. Returns a list of [patch_key, filename] for each file. """ patches = SplitPatch(data) rv = [] for patch in patches: if len(patch[1]) > MAX_UPLOAD_SIZE: print ("Not uploading the patch for " + patch[0] + " because the file is too large.") continue form_fields = [("filename", patch[0])] if not options.download_base: form_fields.append(("content_upload", "1")) files = [("data", "data.diff", patch[1])] ctype, body = EncodeMultipartFormData(form_fields, files) url = "/%d/upload_patch/%d" % (int(issue), int(patchset)) print "Uploading patch for " + patch[0] response_body = rpc_server.Send(url, body, content_type=ctype) lines = response_body.splitlines() if not lines or lines[0] != "OK": StatusUpdate(" --> %s" % response_body) sys.exit(1) rv.append([lines[1], patch[0]]) return rv def GuessVCS(options): """Helper to guess the version control system. This examines the current directory, guesses which VersionControlSystem we're using, and returns an instance of the appropriate class. Exit with an error if we can't figure it out. Returns: A VersionControlSystem instance. Exits if the VCS can't be guessed. """ # Mercurial has a command to get the base directory of a repository # Try running it, but don't die if we don't have hg installed. # NOTE: we try Mercurial first as it can sit on top of an SVN working copy. try: out, returncode = RunShellWithReturnCode(["hg", "root"]) if returncode == 0: return MercurialVCS(options, out.strip()) except OSError, (errno, message): if errno != 2: # ENOENT -- they don't have hg installed. raise # Subversion has a .svn in all working directories. if os.path.isdir('.svn'): logging.info("Guessed VCS = Subversion") return SubversionVCS(options) # Git has a command to test if you're in a git tree. # Try running it, but don't die if we don't have git installed. try: out, returncode = RunShellWithReturnCode(["git", "rev-parse", "--is-inside-work-tree"]) if returncode == 0: return GitVCS(options) except OSError, (errno, message): if errno != 2: # ENOENT -- they don't have git installed. raise ErrorExit(("Could not guess version control system. " "Are you in a working copy directory?")) def RealMain(argv, data=None): """The real main function. Args: argv: Command line arguments. data: Diff contents. If None (default) the diff is generated by the VersionControlSystem implementation returned by GuessVCS(). Returns: A 2-tuple (issue id, patchset id). The patchset id is None if the base files are not uploaded by this script (applies only to SVN checkouts). """ logging.basicConfig(format=("%(asctime).19s %(levelname)s %(filename)s:" "%(lineno)s %(message)s ")) os.environ['LC_ALL'] = 'C' options, args = parser.parse_args(argv[1:]) global verbosity verbosity = options.verbose if verbosity >= 3: logging.getLogger().setLevel(logging.DEBUG) elif verbosity >= 2: logging.getLogger().setLevel(logging.INFO) vcs = GuessVCS(options) if isinstance(vcs, SubversionVCS): # base field is only allowed for Subversion. # Note: Fetching base files may become deprecated in future releases. base = vcs.GuessBase(options.download_base) else: base = None if not base and options.download_base: options.download_base = True logging.info("Enabled upload of base file") if not options.assume_yes: vcs.CheckForUnknownFiles() if data is None: data = vcs.GenerateDiff(args) files = vcs.GetBaseFiles(data) if verbosity >= 1: print "Upload server:", options.server, "(change with -s/--server)" if options.issue: prompt = "Message describing this patch set: " else: prompt = "New issue subject: " message = options.message or raw_input(prompt).strip() if not message: ErrorExit("A non-empty message is required") rpc_server = GetRpcServer(options) form_fields = [("subject", message)] if base: form_fields.append(("base", base)) if options.issue: form_fields.append(("issue", str(options.issue))) if options.email: form_fields.append(("user", options.email)) if options.reviewers: for reviewer in options.reviewers.split(','): if "@" in reviewer and not reviewer.split("@")[1].count(".") == 1: ErrorExit("Invalid email address: %s" % reviewer) form_fields.append(("reviewers", options.reviewers)) if options.cc: for cc in options.cc.split(','): if "@" in cc and not cc.split("@")[1].count(".") == 1: ErrorExit("Invalid email address: %s" % cc) form_fields.append(("cc", options.cc)) description = options.description if options.description_file: if options.description: ErrorExit("Can't specify description and description_file") file = open(options.description_file, 'r') description = file.read() file.close() if description: form_fields.append(("description", description)) # Send a hash of all the base file so the server can determine if a copy # already exists in an earlier patchset. base_hashes = "" for file, info in files.iteritems(): if not info[0] is None: checksum = md5.new(info[0]).hexdigest() if base_hashes: base_hashes += "|" base_hashes += checksum + ":" + file form_fields.append(("base_hashes", base_hashes)) # If we're uploading base files, don't send the email before the uploads, so # that it contains the file status. if options.send_mail and options.download_base: form_fields.append(("send_mail", "1")) if not options.download_base: form_fields.append(("content_upload", "1")) if len(data) > MAX_UPLOAD_SIZE: print "Patch is large, so uploading file patches separately." uploaded_diff_file = [] form_fields.append(("separate_patches", "1")) else: uploaded_diff_file = [("data", "data.diff", data)] ctype, body = EncodeMultipartFormData(form_fields, uploaded_diff_file) response_body = rpc_server.Send("/upload", body, content_type=ctype) patchset = None if not options.download_base or not uploaded_diff_file: lines = response_body.splitlines() if len(lines) >= 2: msg = lines[0] patchset = lines[1].strip() patches = [x.split(" ", 1) for x in lines[2:]] else: msg = response_body else: msg = response_body StatusUpdate(msg) if not response_body.startswith("Issue created.") and \ not response_body.startswith("Issue updated."): sys.exit(0) issue = msg[msg.rfind("/")+1:] if not uploaded_diff_file: result = UploadSeparatePatches(issue, rpc_server, patchset, data, options) if not options.download_base: patches = result if not options.download_base: vcs.UploadBaseFiles(issue, rpc_server, patches, patchset, options, files) if options.send_mail: rpc_server.Send("/" + issue + "/mail", payload="") return issue, patchset def main(): try: RealMain(sys.argv) except KeyboardInterrupt: print StatusUpdate("Interrupted.") sys.exit(1) if __name__ == "__main__": main()
bsd-3-clause
jesramirez/odoo
addons/note_pad/__openerp__.py
312
1691
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Memos pad', 'version': '0.1', 'category': 'Tools', 'description': """ This module update memos inside OpenERP for using an external pad ================================================================= Use for update your text memo in real time with the following user that you invite. """, 'author': 'OpenERP SA', 'website': 'https://www.odoo.com/page/notes', 'summary': 'Sticky memos, Collaborative', 'depends': [ 'mail', 'pad', 'note', ], 'data': [ 'note_pad_view.xml', ], 'installable': True, 'application': False, 'auto_install': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
jsoref/django
django/core/management/commands/testserver.py
200
2074
from django.core.management import call_command from django.core.management.base import BaseCommand from django.db import connection class Command(BaseCommand): help = 'Runs a development server with data from the given fixture(s).' requires_system_checks = False def add_arguments(self, parser): parser.add_argument('args', metavar='fixture', nargs='*', help='Path(s) to fixtures to load before running the server.') parser.add_argument('--noinput', '--no-input', action='store_false', dest='interactive', default=True, help='Tells Django to NOT prompt the user for input of any kind.') parser.add_argument('--addrport', default='', help='Port number or ipaddr:port to run the server on.') parser.add_argument('--ipv6', '-6', action='store_true', dest='use_ipv6', default=False, help='Tells Django to use an IPv6 address.') def handle(self, *fixture_labels, **options): verbosity = options.get('verbosity') interactive = options.get('interactive') # Create a test database. db_name = connection.creation.create_test_db(verbosity=verbosity, autoclobber=not interactive, serialize=False) # Import the fixture data into the test database. call_command('loaddata', *fixture_labels, **{'verbosity': verbosity}) # Run the development server. Turn off auto-reloading because it causes # a strange error -- it causes this handle() method to be called # multiple times. shutdown_message = ( '\nServer stopped.\nNote that the test database, %r, has not been ' 'deleted. You can explore it on your own.' % db_name ) use_threading = connection.features.test_db_allows_multiple_connections call_command( 'runserver', addrport=options['addrport'], shutdown_message=shutdown_message, use_reloader=False, use_ipv6=options['use_ipv6'], use_threading=use_threading )
bsd-3-clause
HaraldWeber/client
src/coop/__init__.py
1
1079
#------------------------------------------------------------------------------- # Copyright (c) 2012 Gael Honorez. # All rights reserved. This program and the accompanying materials # are made available under the terms of the GNU Public License v3.0 # which accompanies this distribution, and is available at # http://www.gnu.org/licenses/gpl.html # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. #------------------------------------------------------------------------------- import logging from fa import faction logger = logging.getLogger(__name__) # For use by other modules from _coopwidget import CoopWidget as Coop
gpl-3.0
Applied-GeoSolutions/gips
gips/scripts/process.py
1
3787
#!/usr/bin/env python ################################################################################ # GIPS: Geospatial Image Processing System # # AUTHOR: Matthew Hanson # EMAIL: matt.a.hanson@gmail.com # # Copyright (C) 2014-2018 Applied Geosolutions # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> ################################################################################ from gips import __version__ from gips.parsers import GIPSParser from gips.core import SpatialExtent, TemporalExtent from gips.utils import Colors, VerboseOut, open_vector, import_data_class from gips import utils from gips.inventory import DataInventory from gips.inventory import orm from functools import reduce def main(): title = Colors.BOLD + 'GIPS Data Processing (v%s)' % __version__ + Colors.OFF # argument parsing parser0 = GIPSParser(description=title) parser0.add_inventory_parser() parser0.add_process_parser() args = parser0.parse_args() cls = utils.gips_script_setup(args.command, args.stop_on_error) print(title) with utils.error_handler(): extents = SpatialExtent.factory( cls, site=args.site, rastermask=args.rastermask, key=args.key, where=args.where, tiles=args.tiles, pcov=args.pcov, ptile=args.ptile ) batchargs = None if args.batchout: tdl = [] batchargs = '--chunksize ' + str(args.chunksize) batchargs += ' --format ' + str(args.format) batchargs += ' --numprocs ' + str(args.numprocs) batchargs += ' --verbose ' + str(args.verbose) if args.overwrite: batchargs += ' --overwrite ' if args.products: batchargs += ' -p ' + ' '.join(args.products) for extent in extents: inv = DataInventory( cls, extent, TemporalExtent(args.dates, args.days), **vars(args) ) if args.batchout: def get_commands(tiles_obj): commands = [] for tile in tiles_obj.tiles.keys(): needed = any([p not in [k for sen, k in tiles_obj.tiles[tile].filenames.keys()] for p in args.products]) if not needed: continue commands.append(args.command + ' -t ' + str(tile) + ' -d ' + str(tiles_obj.date) + ' ' + batchargs + '\n') return commands tdl = reduce( list.__add__, map( get_commands, inv.data.values() ), tdl ) else: inv.process(overwrite=args.overwrite) if args.batchout: with open(args.batchout, 'w') as ofile: ofile.writelines(tdl) utils.gips_exit() # produce a summary error report then quit with a proper exit status if __name__ == "__main__": main()
gpl-3.0
WSDC-NITWarangal/django
tests/migrate_signals/tests.py
324
3585
from django.apps import apps from django.core import management from django.db.models import signals from django.test import TestCase, override_settings from django.utils import six APP_CONFIG = apps.get_app_config('migrate_signals') PRE_MIGRATE_ARGS = ['app_config', 'verbosity', 'interactive', 'using'] MIGRATE_DATABASE = 'default' MIGRATE_VERBOSITY = 1 MIGRATE_INTERACTIVE = False class PreMigrateReceiver(object): def __init__(self): self.call_counter = 0 self.call_args = None def __call__(self, signal, sender, **kwargs): self.call_counter = self.call_counter + 1 self.call_args = kwargs class OneTimeReceiver(object): """ Special receiver for handle the fact that test runner calls migrate for several databases and several times for some of them. """ def __init__(self): self.call_counter = 0 self.call_args = None def __call__(self, signal, sender, **kwargs): # Although test runner calls migrate for several databases, # testing for only one of them is quite sufficient. if kwargs['using'] == MIGRATE_DATABASE: self.call_counter = self.call_counter + 1 self.call_args = kwargs # we need to test only one call of migrate signals.pre_migrate.disconnect(pre_migrate_receiver, sender=APP_CONFIG) # We connect receiver here and not in unit test code because we need to # connect receiver before test runner creates database. That is, sequence of # actions would be: # # 1. Test runner imports this module. # 2. We connect receiver. # 3. Test runner calls migrate for create default database. # 4. Test runner execute our unit test code. pre_migrate_receiver = OneTimeReceiver() signals.pre_migrate.connect(pre_migrate_receiver, sender=APP_CONFIG) class MigrateSignalTests(TestCase): available_apps = ['migrate_signals'] def test_pre_migrate_call_time(self): self.assertEqual(pre_migrate_receiver.call_counter, 1) def test_pre_migrate_args(self): r = PreMigrateReceiver() signals.pre_migrate.connect(r, sender=APP_CONFIG) management.call_command('migrate', database=MIGRATE_DATABASE, verbosity=MIGRATE_VERBOSITY, interactive=MIGRATE_INTERACTIVE, stdout=six.StringIO()) args = r.call_args self.assertEqual(r.call_counter, 1) self.assertEqual(set(args), set(PRE_MIGRATE_ARGS)) self.assertEqual(args['app_config'], APP_CONFIG) self.assertEqual(args['verbosity'], MIGRATE_VERBOSITY) self.assertEqual(args['interactive'], MIGRATE_INTERACTIVE) self.assertEqual(args['using'], 'default') @override_settings(MIGRATION_MODULES={'migrate_signals': 'migrate_signals.custom_migrations'}) def test_pre_migrate_migrations_only(self): """ If all apps have migrations, pre_migrate should be sent. """ r = PreMigrateReceiver() signals.pre_migrate.connect(r, sender=APP_CONFIG) stdout = six.StringIO() management.call_command('migrate', database=MIGRATE_DATABASE, verbosity=MIGRATE_VERBOSITY, interactive=MIGRATE_INTERACTIVE, stdout=stdout) args = r.call_args self.assertEqual(r.call_counter, 1) self.assertEqual(set(args), set(PRE_MIGRATE_ARGS)) self.assertEqual(args['app_config'], APP_CONFIG) self.assertEqual(args['verbosity'], MIGRATE_VERBOSITY) self.assertEqual(args['interactive'], MIGRATE_INTERACTIVE) self.assertEqual(args['using'], 'default')
bsd-3-clause
alena1108/cattle
tests/integration/cattletest/core/test_ssh_key.py
2
1253
from common_fixtures import * # NOQA import requests def test_create_ssh_key_default(admin_client): key = admin_client.create_ssh_key() assert key.state == 'registering' key = admin_client.wait_success(key) assert key.state == 'active' assert key.publicValue.startswith('ssh-rsa ') assert key.publicValue.endswith('cattle@cattle') assert key.secretValue.startswith('-----BEGIN RSA PRIVATE KEY-----') assert 'pem' in key.links pem = requests.get(key.links['pem']).text assert pem.startswith('-----BEGIN RSA PRIVATE KEY-----') def test_create_ssh_key_with_value(admin_client): key = admin_client.create_ssh_key(publicValue='ssh-rsa') assert key.state == 'registering' key = admin_client.wait_success(key) assert key.state == 'active' assert key.publicValue == 'ssh-rsa' assert key.secretValue is None assert 'pem' not in key.links def test_create_container(admin_client, sim_context): key = create_and_activate(admin_client, 'sshKey') c = create_sim_container(admin_client, sim_context, credentialIds=[key.id]) maps = c.credentialInstanceMaps() assert len(maps) == 1 map = maps[0] assert map.state == 'active' assert map.credentialId == key.id
apache-2.0
ednad/ooi-ui-services
tests/model/test_adaptor.py
1
1361
#!/usr/bin/env python ''' tests.model.test_adaptor The base class for the Services Test Case ''' from ooiservices.adaptor.file import FileAdaptor from tests.services_test_case import ServicesTestCase import os import shutil class TestAdaptor(ServicesTestCase): ''' Unit tests for the file adaptor ''' def setUp(self): ''' a place to put the docs ''' ServicesTestCase.setUp(self) self.docs_dir = os.path.join(self.output_dir, 'docs') if not os.path.exists(self.docs_dir): os.makedirs(self.docs_dir) def test_basic_io(self): ''' Tests the basic input output of the file adaptor ''' adaptor = FileAdaptor(self.docs_dir) doc = { "name" : "Platform Example", "owner" : "owner_id", "lat" : 40, "lon" : -70 } doc_id = adaptor.create(doc) assert os.path.exists(os.path.join(self.docs_dir, doc_id + '.json')) doc = adaptor.read(doc_id) assert 'id' in doc and doc_id == doc['id'] doc['owner'] = 'WHOI' adaptor.update(doc) doc = None doc = adaptor.read(doc_id) assert doc['owner'] == 'WHOI' assert adaptor.delete(doc_id) assert not os.path.exists(os.path.join(self.docs_dir, doc_id + '.json'))
apache-2.0
ghickman/django
django/contrib/staticfiles/management/commands/collectstatic.py
24
14280
from __future__ import unicode_literals import os from collections import OrderedDict from django.contrib.staticfiles.finders import get_finders from django.contrib.staticfiles.storage import staticfiles_storage from django.core.files.storage import FileSystemStorage from django.core.management.base import BaseCommand, CommandError from django.core.management.color import no_style from django.utils.encoding import smart_text from django.utils.functional import cached_property from django.utils.six.moves import input class Command(BaseCommand): """ Command that allows to copy or symlink static files from different locations to the settings.STATIC_ROOT. """ help = "Collect static files in a single location." requires_system_checks = False def __init__(self, *args, **kwargs): super(Command, self).__init__(*args, **kwargs) self.copied_files = [] self.symlinked_files = [] self.unmodified_files = [] self.post_processed_files = [] self.storage = staticfiles_storage self.style = no_style() @cached_property def local(self): try: self.storage.path('') except NotImplementedError: return False return True def add_arguments(self, parser): parser.add_argument('--noinput', '--no-input', action='store_false', dest='interactive', default=True, help="Do NOT prompt the user for input of any kind.") parser.add_argument('--no-post-process', action='store_false', dest='post_process', default=True, help="Do NOT post process collected files.") parser.add_argument('-i', '--ignore', action='append', default=[], dest='ignore_patterns', metavar='PATTERN', help="Ignore files or directories matching this glob-style " "pattern. Use multiple times to ignore more.") parser.add_argument('-n', '--dry-run', action='store_true', dest='dry_run', default=False, help="Do everything except modify the filesystem.") parser.add_argument('-c', '--clear', action='store_true', dest='clear', default=False, help="Clear the existing files using the storage " "before trying to copy or link the original file.") parser.add_argument('-l', '--link', action='store_true', dest='link', default=False, help="Create a symbolic link to each file instead of copying.") parser.add_argument('--no-default-ignore', action='store_false', dest='use_default_ignore_patterns', default=True, help="Don't ignore the common private glob-style patterns 'CVS', " "'.*' and '*~'.") def set_options(self, **options): """ Set instance variables based on an options dict """ self.interactive = options['interactive'] self.verbosity = options['verbosity'] self.symlink = options['link'] self.clear = options['clear'] self.dry_run = options['dry_run'] ignore_patterns = options['ignore_patterns'] if options['use_default_ignore_patterns']: ignore_patterns += ['CVS', '.*', '*~'] self.ignore_patterns = list(set(ignore_patterns)) self.post_process = options['post_process'] def collect(self): """ Perform the bulk of the work of collectstatic. Split off from handle() to facilitate testing. """ if self.symlink and not self.local: raise CommandError("Can't symlink to a remote destination.") if self.clear: self.clear_dir('') if self.symlink: handler = self.link_file else: handler = self.copy_file found_files = OrderedDict() for finder in get_finders(): for path, storage in finder.list(self.ignore_patterns): # Prefix the relative path if the source storage contains it if getattr(storage, 'prefix', None): prefixed_path = os.path.join(storage.prefix, path) else: prefixed_path = path if prefixed_path not in found_files: found_files[prefixed_path] = (storage, path) handler(path, prefixed_path, storage) else: self.log( "Found another file with the destination path '%s'. It " "will be ignored since only the first encountered file " "is collected. If this is not what you want, make sure " "every static file has a unique path." % prefixed_path, level=1, ) # Here we check if the storage backend has a post_process # method and pass it the list of modified files. if self.post_process and hasattr(self.storage, 'post_process'): processor = self.storage.post_process(found_files, dry_run=self.dry_run) for original_path, processed_path, processed in processor: if isinstance(processed, Exception): self.stderr.write("Post-processing '%s' failed!" % original_path) # Add a blank line before the traceback, otherwise it's # too easy to miss the relevant part of the error message. self.stderr.write("") raise processed if processed: self.log("Post-processed '%s' as '%s'" % (original_path, processed_path), level=1) self.post_processed_files.append(original_path) else: self.log("Skipped post-processing '%s'" % original_path) return { 'modified': self.copied_files + self.symlinked_files, 'unmodified': self.unmodified_files, 'post_processed': self.post_processed_files, } def handle(self, **options): self.set_options(**options) message = ['\n'] if self.dry_run: message.append( 'You have activated the --dry-run option so no files will be modified.\n\n' ) message.append( 'You have requested to collect static files at the destination\n' 'location as specified in your settings' ) if self.is_local_storage() and self.storage.location: destination_path = self.storage.location message.append(':\n\n %s\n\n' % destination_path) else: destination_path = None message.append('.\n\n') if self.clear: message.append('This will DELETE ALL FILES in this location!\n') else: message.append('This will overwrite existing files!\n') message.append( 'Are you sure you want to do this?\n\n' "Type 'yes' to continue, or 'no' to cancel: " ) if self.interactive and input(''.join(message)) != 'yes': raise CommandError("Collecting static files cancelled.") collected = self.collect() modified_count = len(collected['modified']) unmodified_count = len(collected['unmodified']) post_processed_count = len(collected['post_processed']) if self.verbosity >= 1: template = ("\n%(modified_count)s %(identifier)s %(action)s" "%(destination)s%(unmodified)s%(post_processed)s.\n") summary = template % { 'modified_count': modified_count, 'identifier': 'static file' + ('' if modified_count == 1 else 's'), 'action': 'symlinked' if self.symlink else 'copied', 'destination': (" to '%s'" % destination_path if destination_path else ''), 'unmodified': (', %s unmodified' % unmodified_count if collected['unmodified'] else ''), 'post_processed': (collected['post_processed'] and ', %s post-processed' % post_processed_count or ''), } self.stdout.write(summary) def log(self, msg, level=2): """ Small log helper """ if self.verbosity >= level: self.stdout.write(msg) def is_local_storage(self): return isinstance(self.storage, FileSystemStorage) def clear_dir(self, path): """ Deletes the given relative path using the destination storage backend. """ if not self.storage.exists(path): return dirs, files = self.storage.listdir(path) for f in files: fpath = os.path.join(path, f) if self.dry_run: self.log("Pretending to delete '%s'" % smart_text(fpath), level=1) else: self.log("Deleting '%s'" % smart_text(fpath), level=1) full_path = self.storage.path(fpath) if not os.path.exists(full_path) and os.path.lexists(full_path): # Delete broken symlinks os.unlink(full_path) else: self.storage.delete(fpath) for d in dirs: self.clear_dir(os.path.join(path, d)) def delete_file(self, path, prefixed_path, source_storage): """ Checks if the target file should be deleted if it already exists """ if self.storage.exists(prefixed_path): try: # When was the target file modified last time? target_last_modified = \ self.storage.modified_time(prefixed_path) except (OSError, NotImplementedError, AttributeError): # The storage doesn't support ``modified_time`` or failed pass else: try: # When was the source file modified last time? source_last_modified = source_storage.modified_time(path) except (OSError, NotImplementedError, AttributeError): pass else: # The full path of the target file if self.local: full_path = self.storage.path(prefixed_path) else: full_path = None # Skip the file if the source file is younger # Avoid sub-second precision (see #14665, #19540) if (target_last_modified.replace(microsecond=0) >= source_last_modified.replace(microsecond=0)): if not ((self.symlink and full_path and not os.path.islink(full_path)) or (not self.symlink and full_path and os.path.islink(full_path))): if prefixed_path not in self.unmodified_files: self.unmodified_files.append(prefixed_path) self.log("Skipping '%s' (not modified)" % path) return False # Then delete the existing file if really needed if self.dry_run: self.log("Pretending to delete '%s'" % path) else: self.log("Deleting '%s'" % path) self.storage.delete(prefixed_path) return True def link_file(self, path, prefixed_path, source_storage): """ Attempt to link ``path`` """ # Skip this file if it was already copied earlier if prefixed_path in self.symlinked_files: return self.log("Skipping '%s' (already linked earlier)" % path) # Delete the target file if needed or break if not self.delete_file(path, prefixed_path, source_storage): return # The full path of the source file source_path = source_storage.path(path) # Finally link the file if self.dry_run: self.log("Pretending to link '%s'" % source_path, level=1) else: self.log("Linking '%s'" % source_path, level=1) full_path = self.storage.path(prefixed_path) try: os.makedirs(os.path.dirname(full_path)) except OSError: pass try: if os.path.lexists(full_path): os.unlink(full_path) os.symlink(source_path, full_path) except AttributeError: import platform raise CommandError("Symlinking is not supported by Python %s." % platform.python_version()) except NotImplementedError: import platform raise CommandError("Symlinking is not supported in this " "platform (%s)." % platform.platform()) except OSError as e: raise CommandError(e) if prefixed_path not in self.symlinked_files: self.symlinked_files.append(prefixed_path) def copy_file(self, path, prefixed_path, source_storage): """ Attempt to copy ``path`` with storage """ # Skip this file if it was already copied earlier if prefixed_path in self.copied_files: return self.log("Skipping '%s' (already copied earlier)" % path) # Delete the target file if needed or break if not self.delete_file(path, prefixed_path, source_storage): return # The full path of the source file source_path = source_storage.path(path) # Finally start copying if self.dry_run: self.log("Pretending to copy '%s'" % source_path, level=1) else: self.log("Copying '%s'" % source_path, level=1) with source_storage.open(path) as source_file: self.storage.save(prefixed_path, source_file) self.copied_files.append(prefixed_path)
bsd-3-clause
ioanpocol/superdesk-core
tests/media/crop_test.py
2
8659
# -*- coding: utf-8; -*- # # This file is part of Superdesk. # # Copyright 2013, 2014 Sourcefabric z.u. and contributors. # # For the full copyright and license information, please see the # AUTHORS and LICENSE files distributed with this source code, or # at https://www.sourcefabric.org/superdesk/license from unittest import mock from nose.tools import assert_raises from superdesk.tests import TestCase from superdesk.media.crop import CropService from superdesk.errors import SuperdeskApiError from superdesk.media.media_operations import crop_image from superdesk.media.renditions import _resize_image, get_renditions_spec, can_generate_custom_crop_from_original from apps.prepopulate.app_populate import populate_table_json from ..media import get_picture_fixture class CropTestCase(TestCase): crop_sizes = { "_id": "crop_sizes", "display_name": "Image Crop Sizes", "type": "manageable", "items": [ {"is_active": True, "name": "4-3", "width": 800, "height": 600}, {"is_active": True, "name": "16-9", "width": 1280, "height": 720} ] } def setUp(self): self.service = CropService() populate_table_json('vocabularies', [self.crop_sizes]) def test_validate_aspect_ratio_fails(self): doc = {'CropLeft': 0, 'CropRight': 80, 'CropTop': 0, 'CropBottom': 60} crop = {'height': 700, 'width': 70} with assert_raises(SuperdeskApiError): self.service._validate_aspect_ratio(crop, doc) def test_validate_aspect_ratio_fails_with_cropsize_less(self): doc = {'CropLeft': 0, 'CropRight': 80, 'CropTop': 0, 'CropBottom': 60} crop = {'height': 600, 'width': 800} with assert_raises(SuperdeskApiError): self.service._validate_aspect_ratio(crop, doc) def test_validate_aspect_ratio_succeeds(self): doc = {'CropLeft': 0, 'CropRight': 800, 'CropTop': 0, 'CropBottom': 600} crop = {'height': 600, 'width': 800} self.assertIsNone(self.service._validate_aspect_ratio(crop, doc)) def test_validate_aspect_ratio_succeeds_2(self): doc = {'CropLeft': 0, 'CropRight': 1600, 'CropTop': 0, 'CropBottom': 1200} crop = {'height': 600, 'width': 800} self.assertIsNone(self.service._validate_aspect_ratio(crop, doc)) def test_get_crop_by_name(self): self.assertIsNotNone(self.service.get_crop_by_name('16-9')) self.assertIsNotNone(self.service.get_crop_by_name('4-3')) self.assertIsNone(self.service.get_crop_by_name('d')) def test_validate_crop_raises_error_if_item_is_not_picture(self): original = {"type": "text"} doc = {'renditions': {'4-3': {'CropLeft': 0, 'CropRight': 800, 'CropTop': 0, 'CropBottom': 600}}} with self.assertRaises(SuperdeskApiError) as context: self.service.validate_crop(original, doc, "4-3") ex = context.exception self.assertEqual(ex.message, 'Only images can be cropped!') self.assertEqual(ex.status_code, 400) def test_validate_crop_raises_error_if_renditions_are_missing(self): original = {"type": "picture"} doc = {'renditions': {'4-3': {'CropLeft': 0, 'CropRight': 800, 'CropTop': 0, 'CropBottom': 600}}} with self.assertRaises(SuperdeskApiError) as context: self.service.validate_crop(original, doc, "4-3") ex = context.exception self.assertEqual(ex.message, 'Missing renditions!') self.assertEqual(ex.status_code, 400) def test_validate_crop_raises_error_if_original_rendition_is_missing(self): original = {"type": "picture", "renditions": {"4-3": {'CropLeft': 0, 'CropRight': 800, 'CropTop': 0, 'CropBottom': 600}}} doc = {'renditions': {'4-3': {'CropLeft': 0, 'CropRight': 800, 'CropTop': 0, 'CropBottom': 600}}} with self.assertRaises(SuperdeskApiError) as context: self.service.validate_crop(original, doc, "4-3") ex = context.exception self.assertEqual(ex.message, 'Missing original rendition!') self.assertEqual(ex.status_code, 400) def test_validate_crop_raises_error_if_crop_name_is_unknown(self): original = {"type": "picture", "renditions": { "original": {'CropLeft': 0, 'CropRight': 800, 'CropTop': 0, 'CropBottom': 600} } } doc = {'renditions': {'d': {'CropLeft': 0, 'CropRight': 800, 'CropTop': 0, 'CropBottom': 600}}} with self.assertRaises(SuperdeskApiError) as context: self.service.validate_crop(original, doc, "d") ex = context.exception self.assertEqual(ex.message, 'Unknown crop name! (name=d)') self.assertEqual(ex.status_code, 400) def test_add_crop_raises_error_if_original_missing(self): original = { 'renditions': { '4-3': { } } } doc = {'CropLeft': 0, 'CropRight': 800, 'CropTop': 0, 'CropBottom': 600} with self.assertRaises(SuperdeskApiError) as context: self.service.create_crop(original, '4-3', doc) ex = context.exception self.assertEqual(ex.message, 'Original file couldn\'t be found') self.assertEqual(ex.status_code, 400) def test_validate_crop_converts_to_int(self): crop = {'width': '300', 'height': 200} self.service._validate_values(crop) self.assertEqual(300, crop['width']) self.assertEqual(200, crop['height']) with self.assertRaises(SuperdeskApiError) as context: self.service._validate_values({'width': 'foo'}) self.assertEqual(context.exception.message, 'Invalid value for width in renditions') @mock.patch('superdesk.media.crop.crop_image', return_value=(False, 'test')) def test_add_crop_raises_error(self, crop_name): original = { 'renditions': { 'original': { } } } media = mock.MagicMock() media.name = 'test.jpg' with mock.patch('superdesk.app.media.get', return_value=media): doc = {'CropLeft': 0, 'CropRight': 800, 'CropTop': 0, 'CropBottom': 600} with self.assertRaises(SuperdeskApiError) as context: self.service.create_crop(original, '4-3', doc) ex = context.exception self.assertEqual(ex.message, 'Saving crop failed.') self.assertEqual(ex.status_code, 400) def test_crop_image_exact_size(self): img = get_picture_fixture() size = {'width': '300', 'height': '200'} crop = {'CropTop': '0', 'CropRight': '300', 'CropBottom': '200', 'CropLeft': '0'} with open(img, 'rb') as imgfile: res = crop_image(imgfile, img, crop, size) self.assertTrue(res[0]) self.assertEqual(300, res[1].width) self.assertEqual(200, res[1].height) def test_resize_image(self): img = get_picture_fixture() with open(img, 'rb') as imgfile: resized, width, height = _resize_image(imgfile, ('200', None), 'jpeg') self.assertEqual(150, height) def test_get_rendition_spec_no_custom_crop(self): renditions = get_renditions_spec(no_custom_crops=True) for crop in self.crop_sizes.get('items'): self.assertNotIn(crop['name'], renditions) def test_get_rendition_spec_with_custom_crop(self): renditions = get_renditions_spec() for crop in self.crop_sizes.get('items'): self.assertIn(crop['name'], renditions) def test_can_generate_custom_crop_from_original(self): self.assertEquals(True, can_generate_custom_crop_from_original(800, 600, {'ratio': '16:9'})) self.assertEquals(True, can_generate_custom_crop_from_original(800, 600, {'width': 800, 'height': 600})) self.assertEquals(True, can_generate_custom_crop_from_original(810, 600, {'width': 800, 'height': 600})) self.assertEquals(True, can_generate_custom_crop_from_original(810, 610, {'width': 800, 'height': 600})) self.assertEquals(False, can_generate_custom_crop_from_original(780, 610, {'width': 800, 'height': 600})) self.assertEquals(False, can_generate_custom_crop_from_original(780, 590, {'width': 800, 'height': 600})) self.assertEquals(True, can_generate_custom_crop_from_original(780, 590, {'width': 800})) self.assertEquals(True, can_generate_custom_crop_from_original(780, 590, {'height': 800})) self.assertEquals(False, can_generate_custom_crop_from_original(780, 590, None))
agpl-3.0
ammarkhann/FinalSeniorCode
lib/python2.7/site-packages/zmq/eventloop/minitornado/ioloop.py
17
41351
#!/usr/bin/env python # # Copyright 2009 Facebook # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """An I/O event loop for non-blocking sockets. Typical applications will use a single `IOLoop` object, in the `IOLoop.instance` singleton. The `IOLoop.start` method should usually be called at the end of the ``main()`` function. Atypical applications may use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest` case. In addition to I/O events, the `IOLoop` can also schedule time-based events. `IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`. """ from __future__ import absolute_import, division, print_function, with_statement import datetime import errno import functools import heapq import itertools import logging import numbers import os import select import sys import threading import time import traceback import math from .concurrent import TracebackFuture, is_future from .log import app_log, gen_log from . import stack_context from .util import Configurable, errno_from_exception, timedelta_to_seconds try: import signal except ImportError: signal = None try: import thread # py2 except ImportError: import _thread as thread # py3 from .platform.auto import set_close_exec, Waker _POLL_TIMEOUT = 3600.0 class TimeoutError(Exception): pass class IOLoop(Configurable): """A level-triggered I/O loop. We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they are available, or else we fall back on select(). If you are implementing a system that needs to handle thousands of simultaneous connections, you should use a system that supports either ``epoll`` or ``kqueue``. Example usage for a simple TCP server: .. testcode:: import errno import functools import tornado.ioloop import socket def connection_ready(sock, fd, events): while True: try: connection, address = sock.accept() except socket.error as e: if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN): raise return connection.setblocking(0) handle_connection(connection, address) if __name__ == '__main__': sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock.setblocking(0) sock.bind(("", port)) sock.listen(128) io_loop = tornado.ioloop.IOLoop.current() callback = functools.partial(connection_ready, sock) io_loop.add_handler(sock.fileno(), callback, io_loop.READ) io_loop.start() .. testoutput:: :hide: By default, a newly-constructed `IOLoop` becomes the thread's current `IOLoop`, unless there already is a current `IOLoop`. This behavior can be controlled with the ``make_current`` argument to the `IOLoop` constructor: if ``make_current=True``, the new `IOLoop` will always try to become current and it raises an error if there is already a current instance. If ``make_current=False``, the new `IOLoop` will not try to become current. .. versionchanged:: 4.2 Added the ``make_current`` keyword argument to the `IOLoop` constructor. """ # Constants from the epoll module _EPOLLIN = 0x001 _EPOLLPRI = 0x002 _EPOLLOUT = 0x004 _EPOLLERR = 0x008 _EPOLLHUP = 0x010 _EPOLLRDHUP = 0x2000 _EPOLLONESHOT = (1 << 30) _EPOLLET = (1 << 31) # Our events map exactly to the epoll events NONE = 0 READ = _EPOLLIN WRITE = _EPOLLOUT ERROR = _EPOLLERR | _EPOLLHUP # Global lock for creating global IOLoop instance _instance_lock = threading.Lock() _current = threading.local() @staticmethod def instance(): """Returns a global `IOLoop` instance. Most applications have a single, global `IOLoop` running on the main thread. Use this method to get this instance from another thread. In most other cases, it is better to use `current()` to get the current thread's `IOLoop`. """ if not hasattr(IOLoop, "_instance"): with IOLoop._instance_lock: if not hasattr(IOLoop, "_instance"): # New instance after double check IOLoop._instance = IOLoop() return IOLoop._instance @staticmethod def initialized(): """Returns true if the singleton instance has been created.""" return hasattr(IOLoop, "_instance") def install(self): """Installs this `IOLoop` object as the singleton instance. This is normally not necessary as `instance()` will create an `IOLoop` on demand, but you may want to call `install` to use a custom subclass of `IOLoop`. """ assert not IOLoop.initialized() IOLoop._instance = self @staticmethod def clear_instance(): """Clear the global `IOLoop` instance. .. versionadded:: 4.0 """ if hasattr(IOLoop, "_instance"): del IOLoop._instance @staticmethod def current(instance=True): """Returns the current thread's `IOLoop`. If an `IOLoop` is currently running or has been marked as current by `make_current`, returns that instance. If there is no current `IOLoop`, returns `IOLoop.instance()` (i.e. the main thread's `IOLoop`, creating one if necessary) if ``instance`` is true. In general you should use `IOLoop.current` as the default when constructing an asynchronous object, and use `IOLoop.instance` when you mean to communicate to the main thread from a different one. .. versionchanged:: 4.1 Added ``instance`` argument to control the fallback to `IOLoop.instance()`. """ current = getattr(IOLoop._current, "instance", None) if current is None and instance: return IOLoop.instance() return current def make_current(self): """Makes this the `IOLoop` for the current thread. An `IOLoop` automatically becomes current for its thread when it is started, but it is sometimes useful to call `make_current` explicitly before starting the `IOLoop`, so that code run at startup time can find the right instance. .. versionchanged:: 4.1 An `IOLoop` created while there is no current `IOLoop` will automatically become current. """ IOLoop._current.instance = self @staticmethod def clear_current(): IOLoop._current.instance = None @classmethod def configurable_base(cls): return IOLoop @classmethod def configurable_default(cls): # this is the only patch to IOLoop: from zmq.eventloop.ioloop import ZMQIOLoop return ZMQIOLoop if hasattr(select, "epoll"): from tornado.platform.epoll import EPollIOLoop return EPollIOLoop if hasattr(select, "kqueue"): # Python 2.6+ on BSD or Mac from tornado.platform.kqueue import KQueueIOLoop return KQueueIOLoop from tornado.platform.select import SelectIOLoop return SelectIOLoop def initialize(self, make_current=None): if make_current is None: if IOLoop.current(instance=False) is None: self.make_current() elif make_current: if IOLoop.current(instance=False) is not None: raise RuntimeError("current IOLoop already exists") self.make_current() def close(self, all_fds=False): """Closes the `IOLoop`, freeing any resources used. If ``all_fds`` is true, all file descriptors registered on the IOLoop will be closed (not just the ones created by the `IOLoop` itself). Many applications will only use a single `IOLoop` that runs for the entire lifetime of the process. In that case closing the `IOLoop` is not necessary since everything will be cleaned up when the process exits. `IOLoop.close` is provided mainly for scenarios such as unit tests, which create and destroy a large number of ``IOLoops``. An `IOLoop` must be completely stopped before it can be closed. This means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must be allowed to return before attempting to call `IOLoop.close()`. Therefore the call to `close` will usually appear just after the call to `start` rather than near the call to `stop`. .. versionchanged:: 3.1 If the `IOLoop` implementation supports non-integer objects for "file descriptors", those objects will have their ``close`` method when ``all_fds`` is true. """ raise NotImplementedError() def add_handler(self, fd, handler, events): """Registers the given handler to receive the given events for ``fd``. The ``fd`` argument may either be an integer file descriptor or a file-like object with a ``fileno()`` method (and optionally a ``close()`` method, which may be called when the `IOLoop` is shut down). The ``events`` argument is a bitwise or of the constants ``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``. When an event occurs, ``handler(fd, events)`` will be run. .. versionchanged:: 4.0 Added the ability to pass file-like objects in addition to raw file descriptors. """ raise NotImplementedError() def update_handler(self, fd, events): """Changes the events we listen for ``fd``. .. versionchanged:: 4.0 Added the ability to pass file-like objects in addition to raw file descriptors. """ raise NotImplementedError() def remove_handler(self, fd): """Stop listening for events on ``fd``. .. versionchanged:: 4.0 Added the ability to pass file-like objects in addition to raw file descriptors. """ raise NotImplementedError() def set_blocking_signal_threshold(self, seconds, action): """Sends a signal if the `IOLoop` is blocked for more than ``s`` seconds. Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy platform. The action parameter is a Python signal handler. Read the documentation for the `signal` module for more information. If ``action`` is None, the process will be killed if it is blocked for too long. """ raise NotImplementedError() def set_blocking_log_threshold(self, seconds): """Logs a stack trace if the `IOLoop` is blocked for more than ``s`` seconds. Equivalent to ``set_blocking_signal_threshold(seconds, self.log_stack)`` """ self.set_blocking_signal_threshold(seconds, self.log_stack) def log_stack(self, signal, frame): """Signal handler to log the stack trace of the current thread. For use with `set_blocking_signal_threshold`. """ gen_log.warning('IOLoop blocked for %f seconds in\n%s', self._blocking_signal_threshold, ''.join(traceback.format_stack(frame))) def start(self): """Starts the I/O loop. The loop will run until one of the callbacks calls `stop()`, which will make the loop stop after the current event iteration completes. """ raise NotImplementedError() def _setup_logging(self): """The IOLoop catches and logs exceptions, so it's important that log output be visible. However, python's default behavior for non-root loggers (prior to python 3.2) is to print an unhelpful "no handlers could be found" message rather than the actual log entry, so we must explicitly configure logging if we've made it this far without anything. This method should be called from start() in subclasses. """ if not any([logging.getLogger().handlers, logging.getLogger('tornado').handlers, logging.getLogger('tornado.application').handlers]): logging.basicConfig() def stop(self): """Stop the I/O loop. If the event loop is not currently running, the next call to `start()` will return immediately. To use asynchronous methods from otherwise-synchronous code (such as unit tests), you can start and stop the event loop like this:: ioloop = IOLoop() async_method(ioloop=ioloop, callback=ioloop.stop) ioloop.start() ``ioloop.start()`` will return after ``async_method`` has run its callback, whether that callback was invoked before or after ``ioloop.start``. Note that even after `stop` has been called, the `IOLoop` is not completely stopped until `IOLoop.start` has also returned. Some work that was scheduled before the call to `stop` may still be run before the `IOLoop` shuts down. """ raise NotImplementedError() def run_sync(self, func, timeout=None): """Starts the `IOLoop`, runs the given function, and stops the loop. The function must return either a yieldable object or ``None``. If the function returns a yieldable object, the `IOLoop` will run until the yieldable is resolved (and `run_sync()` will return the yieldable's result). If it raises an exception, the `IOLoop` will stop and the exception will be re-raised to the caller. The keyword-only argument ``timeout`` may be used to set a maximum duration for the function. If the timeout expires, a `TimeoutError` is raised. This method is useful in conjunction with `tornado.gen.coroutine` to allow asynchronous calls in a ``main()`` function:: @gen.coroutine def main(): # do stuff... if __name__ == '__main__': IOLoop.current().run_sync(main) .. versionchanged:: 4.3 Returning a non-``None``, non-yieldable value is now an error. """ future_cell = [None] def run(): try: result = func() if result is not None: from tornado.gen import convert_yielded result = convert_yielded(result) except Exception: future_cell[0] = TracebackFuture() future_cell[0].set_exc_info(sys.exc_info()) else: if is_future(result): future_cell[0] = result else: future_cell[0] = TracebackFuture() future_cell[0].set_result(result) self.add_future(future_cell[0], lambda future: self.stop()) self.add_callback(run) if timeout is not None: timeout_handle = self.add_timeout(self.time() + timeout, self.stop) self.start() if timeout is not None: self.remove_timeout(timeout_handle) if not future_cell[0].done(): raise TimeoutError('Operation timed out after %s seconds' % timeout) return future_cell[0].result() def time(self): """Returns the current time according to the `IOLoop`'s clock. The return value is a floating-point number relative to an unspecified time in the past. By default, the `IOLoop`'s time function is `time.time`. However, it may be configured to use e.g. `time.monotonic` instead. Calls to `add_timeout` that pass a number instead of a `datetime.timedelta` should use this function to compute the appropriate time, so they can work no matter what time function is chosen. """ return time.time() def add_timeout(self, deadline, callback, *args, **kwargs): """Runs the ``callback`` at the time ``deadline`` from the I/O loop. Returns an opaque handle that may be passed to `remove_timeout` to cancel. ``deadline`` may be a number denoting a time (on the same scale as `IOLoop.time`, normally `time.time`), or a `datetime.timedelta` object for a deadline relative to the current time. Since Tornado 4.0, `call_later` is a more convenient alternative for the relative case since it does not require a timedelta object. Note that it is not safe to call `add_timeout` from other threads. Instead, you must use `add_callback` to transfer control to the `IOLoop`'s thread, and then call `add_timeout` from there. Subclasses of IOLoop must implement either `add_timeout` or `call_at`; the default implementations of each will call the other. `call_at` is usually easier to implement, but subclasses that wish to maintain compatibility with Tornado versions prior to 4.0 must use `add_timeout` instead. .. versionchanged:: 4.0 Now passes through ``*args`` and ``**kwargs`` to the callback. """ if isinstance(deadline, numbers.Real): return self.call_at(deadline, callback, *args, **kwargs) elif isinstance(deadline, datetime.timedelta): return self.call_at(self.time() + timedelta_to_seconds(deadline), callback, *args, **kwargs) else: raise TypeError("Unsupported deadline %r" % deadline) def call_later(self, delay, callback, *args, **kwargs): """Runs the ``callback`` after ``delay`` seconds have passed. Returns an opaque handle that may be passed to `remove_timeout` to cancel. Note that unlike the `asyncio` method of the same name, the returned object does not have a ``cancel()`` method. See `add_timeout` for comments on thread-safety and subclassing. .. versionadded:: 4.0 """ return self.call_at(self.time() + delay, callback, *args, **kwargs) def call_at(self, when, callback, *args, **kwargs): """Runs the ``callback`` at the absolute time designated by ``when``. ``when`` must be a number using the same reference point as `IOLoop.time`. Returns an opaque handle that may be passed to `remove_timeout` to cancel. Note that unlike the `asyncio` method of the same name, the returned object does not have a ``cancel()`` method. See `add_timeout` for comments on thread-safety and subclassing. .. versionadded:: 4.0 """ return self.add_timeout(when, callback, *args, **kwargs) def remove_timeout(self, timeout): """Cancels a pending timeout. The argument is a handle as returned by `add_timeout`. It is safe to call `remove_timeout` even if the callback has already been run. """ raise NotImplementedError() def add_callback(self, callback, *args, **kwargs): """Calls the given callback on the next I/O loop iteration. It is safe to call this method from any thread at any time, except from a signal handler. Note that this is the **only** method in `IOLoop` that makes this thread-safety guarantee; all other interaction with the `IOLoop` must be done from that `IOLoop`'s thread. `add_callback()` may be used to transfer control from other threads to the `IOLoop`'s thread. To add a callback from a signal handler, see `add_callback_from_signal`. """ raise NotImplementedError() def add_callback_from_signal(self, callback, *args, **kwargs): """Calls the given callback on the next I/O loop iteration. Safe for use from a Python signal handler; should not be used otherwise. Callbacks added with this method will be run without any `.stack_context`, to avoid picking up the context of the function that was interrupted by the signal. """ raise NotImplementedError() def spawn_callback(self, callback, *args, **kwargs): """Calls the given callback on the next IOLoop iteration. Unlike all other callback-related methods on IOLoop, ``spawn_callback`` does not associate the callback with its caller's ``stack_context``, so it is suitable for fire-and-forget callbacks that should not interfere with the caller. .. versionadded:: 4.0 """ with stack_context.NullContext(): self.add_callback(callback, *args, **kwargs) def add_future(self, future, callback): """Schedules a callback on the ``IOLoop`` when the given `.Future` is finished. The callback is invoked with one argument, the `.Future`. """ assert is_future(future) callback = stack_context.wrap(callback) future.add_done_callback( lambda future: self.add_callback(callback, future)) def _run_callback(self, callback): """Runs a callback with error handling. For use in subclasses. """ try: ret = callback() if ret is not None: from tornado import gen # Functions that return Futures typically swallow all # exceptions and store them in the Future. If a Future # makes it out to the IOLoop, ensure its exception (if any) # gets logged too. try: ret = gen.convert_yielded(ret) except gen.BadYieldError: # It's not unusual for add_callback to be used with # methods returning a non-None and non-yieldable # result, which should just be ignored. pass else: self.add_future(ret, lambda f: f.result()) except Exception: self.handle_callback_exception(callback) def handle_callback_exception(self, callback): """This method is called whenever a callback run by the `IOLoop` throws an exception. By default simply logs the exception as an error. Subclasses may override this method to customize reporting of exceptions. The exception itself is not passed explicitly, but is available in `sys.exc_info`. """ app_log.error("Exception in callback %r", callback, exc_info=True) def split_fd(self, fd): """Returns an (fd, obj) pair from an ``fd`` parameter. We accept both raw file descriptors and file-like objects as input to `add_handler` and related methods. When a file-like object is passed, we must retain the object itself so we can close it correctly when the `IOLoop` shuts down, but the poller interfaces favor file descriptors (they will accept file-like objects and call ``fileno()`` for you, but they always return the descriptor itself). This method is provided for use by `IOLoop` subclasses and should not generally be used by application code. .. versionadded:: 4.0 """ try: return fd.fileno(), fd except AttributeError: return fd, fd def close_fd(self, fd): """Utility method to close an ``fd``. If ``fd`` is a file-like object, we close it directly; otherwise we use `os.close`. This method is provided for use by `IOLoop` subclasses (in implementations of ``IOLoop.close(all_fds=True)`` and should not generally be used by application code. .. versionadded:: 4.0 """ try: try: fd.close() except AttributeError: os.close(fd) except OSError: pass class PollIOLoop(IOLoop): """Base class for IOLoops built around a select-like function. For concrete implementations, see `tornado.platform.epoll.EPollIOLoop` (Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or `tornado.platform.select.SelectIOLoop` (all platforms). """ def initialize(self, impl, time_func=None, **kwargs): super(PollIOLoop, self).initialize(**kwargs) self._impl = impl if hasattr(self._impl, 'fileno'): set_close_exec(self._impl.fileno()) self.time_func = time_func or time.time self._handlers = {} self._events = {} self._callbacks = [] self._callback_lock = threading.Lock() self._timeouts = [] self._cancellations = 0 self._running = False self._stopped = False self._closing = False self._thread_ident = None self._blocking_signal_threshold = None self._timeout_counter = itertools.count() # Create a pipe that we send bogus data to when we want to wake # the I/O loop when it is idle self._waker = Waker() self.add_handler(self._waker.fileno(), lambda fd, events: self._waker.consume(), self.READ) def close(self, all_fds=False): with self._callback_lock: self._closing = True self.remove_handler(self._waker.fileno()) if all_fds: for fd, handler in self._handlers.values(): self.close_fd(fd) self._waker.close() self._impl.close() self._callbacks = None self._timeouts = None def add_handler(self, fd, handler, events): fd, obj = self.split_fd(fd) self._handlers[fd] = (obj, stack_context.wrap(handler)) self._impl.register(fd, events | self.ERROR) def update_handler(self, fd, events): fd, obj = self.split_fd(fd) self._impl.modify(fd, events | self.ERROR) def remove_handler(self, fd): fd, obj = self.split_fd(fd) self._handlers.pop(fd, None) self._events.pop(fd, None) try: self._impl.unregister(fd) except Exception: gen_log.debug("Error deleting fd from IOLoop", exc_info=True) def set_blocking_signal_threshold(self, seconds, action): if not hasattr(signal, "setitimer"): gen_log.error("set_blocking_signal_threshold requires a signal module " "with the setitimer method") return self._blocking_signal_threshold = seconds if seconds is not None: signal.signal(signal.SIGALRM, action if action is not None else signal.SIG_DFL) def start(self): if self._running: raise RuntimeError("IOLoop is already running") self._setup_logging() if self._stopped: self._stopped = False return old_current = getattr(IOLoop._current, "instance", None) IOLoop._current.instance = self self._thread_ident = thread.get_ident() self._running = True # signal.set_wakeup_fd closes a race condition in event loops: # a signal may arrive at the beginning of select/poll/etc # before it goes into its interruptible sleep, so the signal # will be consumed without waking the select. The solution is # for the (C, synchronous) signal handler to write to a pipe, # which will then be seen by select. # # In python's signal handling semantics, this only matters on the # main thread (fortunately, set_wakeup_fd only works on the main # thread and will raise a ValueError otherwise). # # If someone has already set a wakeup fd, we don't want to # disturb it. This is an issue for twisted, which does its # SIGCHLD processing in response to its own wakeup fd being # written to. As long as the wakeup fd is registered on the IOLoop, # the loop will still wake up and everything should work. old_wakeup_fd = None if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix': # requires python 2.6+, unix. set_wakeup_fd exists but crashes # the python process on windows. try: old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno()) if old_wakeup_fd != -1: # Already set, restore previous value. This is a little racy, # but there's no clean get_wakeup_fd and in real use the # IOLoop is just started once at the beginning. signal.set_wakeup_fd(old_wakeup_fd) old_wakeup_fd = None except ValueError: # Non-main thread, or the previous value of wakeup_fd # is no longer valid. old_wakeup_fd = None try: while True: # Prevent IO event starvation by delaying new callbacks # to the next iteration of the event loop. with self._callback_lock: callbacks = self._callbacks self._callbacks = [] # Add any timeouts that have come due to the callback list. # Do not run anything until we have determined which ones # are ready, so timeouts that call add_timeout cannot # schedule anything in this iteration. due_timeouts = [] if self._timeouts: now = self.time() while self._timeouts: if self._timeouts[0].callback is None: # The timeout was cancelled. Note that the # cancellation check is repeated below for timeouts # that are cancelled by another timeout or callback. heapq.heappop(self._timeouts) self._cancellations -= 1 elif self._timeouts[0].deadline <= now: due_timeouts.append(heapq.heappop(self._timeouts)) else: break if (self._cancellations > 512 and self._cancellations > (len(self._timeouts) >> 1)): # Clean up the timeout queue when it gets large and it's # more than half cancellations. self._cancellations = 0 self._timeouts = [x for x in self._timeouts if x.callback is not None] heapq.heapify(self._timeouts) for callback in callbacks: self._run_callback(callback) for timeout in due_timeouts: if timeout.callback is not None: self._run_callback(timeout.callback) # Closures may be holding on to a lot of memory, so allow # them to be freed before we go into our poll wait. callbacks = callback = due_timeouts = timeout = None if self._callbacks: # If any callbacks or timeouts called add_callback, # we don't want to wait in poll() before we run them. poll_timeout = 0.0 elif self._timeouts: # If there are any timeouts, schedule the first one. # Use self.time() instead of 'now' to account for time # spent running callbacks. poll_timeout = self._timeouts[0].deadline - self.time() poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT)) else: # No timeouts and no callbacks, so use the default. poll_timeout = _POLL_TIMEOUT if not self._running: break if self._blocking_signal_threshold is not None: # clear alarm so it doesn't fire while poll is waiting for # events. signal.setitimer(signal.ITIMER_REAL, 0, 0) try: event_pairs = self._impl.poll(poll_timeout) except Exception as e: # Depending on python version and IOLoop implementation, # different exception types may be thrown and there are # two ways EINTR might be signaled: # * e.errno == errno.EINTR # * e.args is like (errno.EINTR, 'Interrupted system call') if errno_from_exception(e) == errno.EINTR: continue else: raise if self._blocking_signal_threshold is not None: signal.setitimer(signal.ITIMER_REAL, self._blocking_signal_threshold, 0) # Pop one fd at a time from the set of pending fds and run # its handler. Since that handler may perform actions on # other file descriptors, there may be reentrant calls to # this IOLoop that update self._events self._events.update(event_pairs) while self._events: fd, events = self._events.popitem() try: fd_obj, handler_func = self._handlers[fd] handler_func(fd_obj, events) except (OSError, IOError) as e: if errno_from_exception(e) == errno.EPIPE: # Happens when the client closes the connection pass else: self.handle_callback_exception(self._handlers.get(fd)) except Exception: self.handle_callback_exception(self._handlers.get(fd)) fd_obj = handler_func = None finally: # reset the stopped flag so another start/stop pair can be issued self._stopped = False if self._blocking_signal_threshold is not None: signal.setitimer(signal.ITIMER_REAL, 0, 0) IOLoop._current.instance = old_current if old_wakeup_fd is not None: signal.set_wakeup_fd(old_wakeup_fd) def stop(self): self._running = False self._stopped = True self._waker.wake() def time(self): return self.time_func() def call_at(self, deadline, callback, *args, **kwargs): timeout = _Timeout( deadline, functools.partial(stack_context.wrap(callback), *args, **kwargs), self) heapq.heappush(self._timeouts, timeout) return timeout def remove_timeout(self, timeout): # Removing from a heap is complicated, so just leave the defunct # timeout object in the queue (see discussion in # http://docs.python.org/library/heapq.html). # If this turns out to be a problem, we could add a garbage # collection pass whenever there are too many dead timeouts. timeout.callback = None self._cancellations += 1 def add_callback(self, callback, *args, **kwargs): if thread.get_ident() != self._thread_ident: # If we're not on the IOLoop's thread, we need to synchronize # with other threads, or waking logic will induce a race. with self._callback_lock: if self._closing: return list_empty = not self._callbacks self._callbacks.append(functools.partial( stack_context.wrap(callback), *args, **kwargs)) if list_empty: # If we're not in the IOLoop's thread, and we added the # first callback to an empty list, we may need to wake it # up (it may wake up on its own, but an occasional extra # wake is harmless). Waking up a polling IOLoop is # relatively expensive, so we try to avoid it when we can. self._waker.wake() else: if self._closing: return # If we're on the IOLoop's thread, we don't need the lock, # since we don't need to wake anyone, just add the # callback. Blindly insert into self._callbacks. This is # safe even from signal handlers because the GIL makes # list.append atomic. One subtlety is that if the signal # is interrupting another thread holding the # _callback_lock block in IOLoop.start, we may modify # either the old or new version of self._callbacks, but # either way will work. self._callbacks.append(functools.partial( stack_context.wrap(callback), *args, **kwargs)) def add_callback_from_signal(self, callback, *args, **kwargs): with stack_context.NullContext(): self.add_callback(callback, *args, **kwargs) class _Timeout(object): """An IOLoop timeout, a UNIX timestamp and a callback""" # Reduce memory overhead when there are lots of pending callbacks __slots__ = ['deadline', 'callback', 'tiebreaker'] def __init__(self, deadline, callback, io_loop): if not isinstance(deadline, numbers.Real): raise TypeError("Unsupported deadline %r" % deadline) self.deadline = deadline self.callback = callback self.tiebreaker = next(io_loop._timeout_counter) # Comparison methods to sort by deadline, with object id as a tiebreaker # to guarantee a consistent ordering. The heapq module uses __le__ # in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons # use __lt__). def __lt__(self, other): return ((self.deadline, self.tiebreaker) < (other.deadline, other.tiebreaker)) def __le__(self, other): return ((self.deadline, self.tiebreaker) <= (other.deadline, other.tiebreaker)) class PeriodicCallback(object): """Schedules the given callback to be called periodically. The callback is called every ``callback_time`` milliseconds. Note that the timeout is given in milliseconds, while most other time-related functions in Tornado use seconds. If the callback runs for longer than ``callback_time`` milliseconds, subsequent invocations will be skipped to get back on schedule. `start` must be called after the `PeriodicCallback` is created. .. versionchanged:: 4.1 The ``io_loop`` argument is deprecated. """ def __init__(self, callback, callback_time, io_loop=None): self.callback = callback if callback_time <= 0: raise ValueError("Periodic callback must have a positive callback_time") self.callback_time = callback_time self.io_loop = io_loop or IOLoop.current() self._running = False self._timeout = None def start(self): """Starts the timer.""" self._running = True self._next_timeout = self.io_loop.time() self._schedule_next() def stop(self): """Stops the timer.""" self._running = False if self._timeout is not None: self.io_loop.remove_timeout(self._timeout) self._timeout = None def is_running(self): """Return True if this `.PeriodicCallback` has been started. .. versionadded:: 4.1 """ return self._running def _run(self): if not self._running: return try: return self.callback() except Exception: self.io_loop.handle_callback_exception(self.callback) finally: self._schedule_next() def _schedule_next(self): if self._running: current_time = self.io_loop.time() if self._next_timeout <= current_time: callback_time_sec = self.callback_time / 1000.0 self._next_timeout += (math.floor((current_time - self._next_timeout) / callback_time_sec) + 1) * callback_time_sec self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run)
mit
ampax/edx-platform-backup
lms/djangoapps/instructor/management/commands/dump_grades.py
39
3227
#!/usr/bin/python """ django management command: dump grades to csv files for use by batch processes """ import csv from instructor.views.legacy import get_student_grade_summary_data from courseware.courses import get_course_by_id from opaque_keys import InvalidKeyError from opaque_keys.edx.keys import CourseKey from opaque_keys.edx.locations import SlashSeparatedCourseKey from xmodule.modulestore.django import modulestore from django.core.management.base import BaseCommand from instructor.utils import DummyRequest class Command(BaseCommand): help = "dump grades to CSV file. Usage: dump_grades course_id_or_dir filename dump_type\n" help += " course_id_or_dir: either course_id or course_dir\n" help += " filename: where the output CSV is to be stored\n" # help += " start_date: end date as M/D/Y H:M (defaults to end of available data)" help += " dump_type: 'all' or 'raw' (see instructor dashboard)" def handle(self, *args, **options): # current grading logic and data schema doesn't handle dates # datetime.strptime("21/11/06 16:30", "%m/%d/%y %H:%M") print "args = ", args course_id = 'MITx/8.01rq_MW/Classical_Mechanics_Reading_Questions_Fall_2012_MW_Section' fn = "grades.csv" get_raw_scores = False if len(args) > 0: course_id = args[0] if len(args) > 1: fn = args[1] if len(args) > 2: get_raw_scores = args[2].lower() == 'raw' request = DummyRequest() # parse out the course into a coursekey try: course_key = CourseKey.from_string(course_id) # if it's not a new-style course key, parse it from an old-style # course key except InvalidKeyError: course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id) try: course = get_course_by_id(course_key) # Ok with catching general exception here because this is run as a management command # and the exception is exposed right away to the user. except Exception as err: # pylint: disable=broad-except print "-----------------------------------------------------------------------------" print "Sorry, cannot find course with id {}".format(course_id) print "Got exception {}".format(err) print "Please provide a course ID or course data directory name, eg content-mit-801rq" return print "-----------------------------------------------------------------------------" print "Dumping grades from {} to file {} (get_raw_scores={})".format(course.id, fn, get_raw_scores) datatable = get_student_grade_summary_data(request, course, get_raw_scores=get_raw_scores) fp = open(fn, 'w') writer = csv.writer(fp, dialect='excel', quotechar='"', quoting=csv.QUOTE_ALL) writer.writerow([unicode(s).encode('utf-8') for s in datatable['header']]) for datarow in datatable['data']: encoded_row = [unicode(s).encode('utf-8') for s in datarow] writer.writerow(encoded_row) fp.close() print "Done: {} records dumped".format(len(datatable['data']))
agpl-3.0
hundeboll/core
lib/oelite/meta/dict.py
3
14552
from oelite.meta import * import oelite.path import sys import copy import warnings import cPickle import operator import types import os def unpickle(file): return DictMeta(meta=file) class DictMeta(MetaData): def pickle(self, file): cPickle.dump(self.dict, file, 2) cPickle.dump(self.expand_cache, file, 2) return INDEXED_FLAGS = ("python", "task", "autoimport", "precondition", "export") def __init__(self, meta=None): if isinstance(meta, file): self.dict = cPickle.load(meta) self.expand_cache = cPickle.load(meta) meta = None elif isinstance(meta, DictMeta): self.dict = {} #self.dict = meta.dict.copy() self.dict = copy.deepcopy(meta.dict) #for var in meta.dict: # self.dict[var] = meta.dict[var].copy() self.expand_cache = meta.expand_cache.copy() #self.expand_cache = copy.deepcopy(meta.expand_cache) meta = None else: self.dict = {} self.expand_cache = {} self.dict["__flag_index"] = {} for flag in self.INDEXED_FLAGS: self.dict["__flag_index"][flag] = set([]) super(DictMeta, self).__init__(meta=meta) return def copy(self): return DictMeta(meta=self) def __repr__(self): return '%s()'%(self.__class__.__name__) def __str__(self): return str(self.dict) def __len__(self): # required by Sized return len(self.dict) def keys(self): return self.dict.keys() def set(self, var, val): assert not " " in var try: self.dict[var][""] = val except KeyError: self.dict[var] = {"": val} self.trim_expand_cache(var) return def trim_expand_cache(self, var): for (cached_var, (cached_val, deps)) in self.expand_cache.items(): if cached_var == var or var in deps: # FIXME: is it safe to delete from the dict we are iterating ? del self.expand_cache[cached_var] return def set_flag(self, var, flag, val): #print "set_flag %s[%s]=%s"%(var, flag, val) assert not " " in var try: self.dict[var][flag] = val except KeyError: self.dict[var] = {flag: val} if flag in self.dict["__flag_index"]: if val: self.dict["__flag_index"][flag].add(var) else: self.dict["__flag_index"][flag].discard(var) if flag == "": self.trim_expand_cache(var) return def weak_set_flag(self, var, flag, val): if not var in self.dict.keys() or not flag in self.dict[var].keys(): self.set_flag(var, flag, val) def set_override(self, var, override, val): assert var not in ("OVERRIDES", "__overrides", "", ">", "<") assert override[0] in ("", ">", "<") try: self.dict[var]["__overrides"][override[0]][override[1]] = val except KeyError, e: if e.args[0] == var: self.dict[var] = {"__overrides": {'':{}, '>':{}, '<':{}}} else: assert e.args[0] == "__overrides" self.dict[var]["__overrides"] = {'':{}, '>':{}, '<':{}} self.dict[var]["__overrides"][override[0]][override[1]] = val self.trim_expand_cache(var) return def get(self, var, expand=FULL_EXPANSION): #print "get var=%s expand=%s"%(var, expand) assert isinstance(expand, int) val = self._get(var, expand)[0] #print "get returning %s"%(val) return val def _get(self, var, expand=FULL_EXPANSION): #print "_get expand=%s"%(expand) assert isinstance(expand, int) try: val = self.dict[var][""] except KeyError: try: val = self.dict[var]["defaultval"] except KeyError: val = None if not expand: return (val, None) if not var in self.dict: return (None, None) if not isinstance(val, (basestring, types.NoneType)): return (val, None) if expand != OVERRIDES_EXPANSION: try: return self.expand_cache[var] except KeyError: pass override_dep = None if "__overrides" in self.dict[var]: current_overrides, override_dep = self._get_overrides() override_dep.add("OVERRIDES") var_overrides = self.dict[var]["__overrides"][''] append_overrides = self.dict[var]["__overrides"]['>'] prepend_overrides = self.dict[var]["__overrides"]['<'] oval = None append = "" prepend = "" var_override_used = None overrides_used = set() for override in current_overrides: if oval is None: try: oval = var_overrides[override] var_override_used = override except KeyError: pass try: append += append_overrides[override] or "" overrides_used.add(override) except KeyError: pass try: prepend = (prepend_overrides[override] or "") + prepend overrides_used.add(override) except KeyError: pass if oval is not None: val = oval overrides_used.add(var_override_used) val = prepend + (val or "") + append for override in overrides_used: if override.startswith('MACHINE_'): self['EXTRA_ARCH'] = '.%s'%(self['MACHINE']) break if expand == OVERRIDES_EXPANSION: return (val, None) deps = set() #print "get expanding %s=%s"%(var, repr(val)) expand_method = self.get_flag(var, "expand") if expand_method: expand_method = int(expand_method) elif self.get_flag(var, "python"): expand_method = NO_EXPANSION else: #expand_method = FULL_EXPANSION expand_method = expand if val: #print "get not expanding anyway" self.expand_stack.push("${%s}"%var) (val, deps) = self._expand(val, expand_method, var) self.expand_stack.pop() if override_dep: deps = deps.union(override_dep) self.expand_cache[var] = (val, deps) return (val, deps) def get_overrides(self): return _get_overrides(self)[0] def _get_overrides(self): overrides = self._get("OVERRIDES", 2) filtered = [] for override in overrides[0].split(":"): if not "${" in override: filtered.append(override) return (filtered, overrides[1]) def get_flag(self, var, flag, expand=False): assert isinstance(expand, int) try: val = self.dict[var][flag] except KeyError: val = None if val and expand: (val, deps) = self._expand(val, expand) return val def get_override(self, var, override): try: return self.dict[var]["__overrides"][override[0]][override[1]] except KeyError: pass return None def del_var(self, var): #print "del_var %s"%(var) for flag in self.dict["__flag_index"]: self.dict["__flag_index"][flag].discard(var) del self.dict[var] try: del self.expand_cache[var] except KeyError: pass return def get_list(self, var, expand=FULL_EXPANSION): return (self.get(var, expand) or "").split() def get_flag_list(self, var, flag, expand=False): return (self.get_flag(var, flag, expand) or "").split() def get_vars(self, flag="", values=False): #print "get_vars flag=%s values=%s"%(flag, values) if flag and not flag in self.dict["__flag_index"]: print "get_vars flag=%s not indexed"%(flag) print "__flag_index=%s"%(self.dict["__flag_index"]) if values: vars = {} if flag in self.dict["__flag_index"]: for var in self.dict["__flag_index"][flag]: try: vars[var] = self.dict[var][""] except KeyError: continue else: for var in self.dict: try: if flag is not None and not self.dict[var][flag]: continue vars[var] = self.dict[var][""] except KeyError: continue else: if flag in self.dict["__flag_index"]: vars = self.dict["__flag_index"][flag].copy() else: vars = [] for var in self.dict: try: if flag is not None and not self.dict[var][flag]: continue vars.append(var) except KeyError: continue #print "get_vars: %s"%(vars) return vars def get_flags(self, var, prune_var_value=True): try: flags = self.dict[var].copy() if prune_var_value: try: del flags[""] except KeyError: pass except KeyError: return None return flags def get_var_flags(self, flag="", append=()): var_flags = [] for var in self.get_vars(flag): flags = self.get_flags(var, prune_var_value=False) for flag in flags: var_flags.append((var, flag, flags[flag]) + append) return var_flags def add_hook(self, name, function, sequence=1, after=[], before=[]): if after is None: after = [] if before is None: before = [] try: hooks = self.dict["__hooks"] except KeyError: hooks = self.dict["__hooks"] = {} try: functions = hooks[name] except KeyError: functions = hooks[name] = {} try: if sequence is None or sequence == functions[function][0]: functions[function] = (functions[function][0], functions[function][1].union(after)) elif functions[function][0] is None: functions[function] = (sequence, functions[function][1].union(after)) else: raise Exception("Invalid addhook statement (add more debug info here telling what sequence mismatch is and how to resolve it)") except KeyError: functions[function] = (sequence, set(after)) for other_function in before: try: functions[other_function][1].add(function) except KeyError: functions[other_function] = (None, set([function])) self.weak_set_flag(function, "emit", "") return def get_hooks(self, name): try: functions = self.dict["__hooks"][name] except KeyError: return [] functions = sorted(functions.iteritems(), key=operator.itemgetter(1)) num_functions = len(functions) i = 0 while i < num_functions: moved = [] function = functions[i][0] sequence = functions[i][1][0] after = list(functions[i][1][1]) if not after: i += 1 continue move_after = None for j in xrange(i+1, num_functions): if functions[j][0] in after: move_after = max(move_after, j) if not move_after: i += 1 continue if function in moved: raise Exception( "circular hook dependency detected: %s"%(function)) del functions[i] functions.insert(move_after, (function, (sequence, after))) moved.append(function) return [function[0] for function in functions if function[1][0] is not None] def set_preference(self, packages=[], recipe=None, layer=None, version=None): if packages: return self.set_preferred_packages(packages, recipe, layer, version) else: return self.set_preferred_recipe(recipe, layer, version) def set_preferred_recipe(self, recipe, layer, version): preferred_recipes = self.get('__preferred_recipes') or {} try: preferences = preferred_recipes[recipe] except KeyError: preferences = preferred_recipes[recipe] = [] preferences.append((layer, version)) self.set('__preferred_recipes', preferred_recipes) def set_preferred_packages(self, packages, recipe, layer, version): preferred_packages = self.get('__preferred_packages') or {} for package in packages: try: preferences = preferred_packages[package] except KeyError: preferences = preferred_packages[package] = [] preferences.append((recipe, layer, version)) self.set('__preferred_packages', preferred_packages) return def set_input_mtime(self, fn, path=None, mtime=None): if mtime is None: if path: f = oelite.path.which(path, fn) if f: mtime = os.path.getmtime(f) else: mtime = None elif os.path.exists(fn): mtime = os.path.getmtime(fn) else: mtime = None mtimes = self.get_input_mtimes() mtimes.append((fn, path, mtime)) self.set("__mtimes", mtimes) return def get_input_mtimes(self): return self.get("__mtimes", expand=False) or [] def finalize(self): #warnings.warn("FIXME: implement DictMeta.finalize()") return
mit
clinton-hall/nzbToMedia
libs/common/unidecode/x028.py
253
5069
data = ( ' ', # 0x00 'a', # 0x01 '1', # 0x02 'b', # 0x03 '\'', # 0x04 'k', # 0x05 '2', # 0x06 'l', # 0x07 '@', # 0x08 'c', # 0x09 'i', # 0x0a 'f', # 0x0b '/', # 0x0c 'm', # 0x0d 's', # 0x0e 'p', # 0x0f '"', # 0x10 'e', # 0x11 '3', # 0x12 'h', # 0x13 '9', # 0x14 'o', # 0x15 '6', # 0x16 'r', # 0x17 '^', # 0x18 'd', # 0x19 'j', # 0x1a 'g', # 0x1b '>', # 0x1c 'n', # 0x1d 't', # 0x1e 'q', # 0x1f ',', # 0x20 '*', # 0x21 '5', # 0x22 '<', # 0x23 '-', # 0x24 'u', # 0x25 '8', # 0x26 'v', # 0x27 '.', # 0x28 '%', # 0x29 '[', # 0x2a '$', # 0x2b '+', # 0x2c 'x', # 0x2d '!', # 0x2e '&', # 0x2f ';', # 0x30 ':', # 0x31 '4', # 0x32 '\\', # 0x33 '0', # 0x34 'z', # 0x35 '7', # 0x36 '(', # 0x37 '_', # 0x38 '?', # 0x39 'w', # 0x3a ']', # 0x3b '#', # 0x3c 'y', # 0x3d ')', # 0x3e '=', # 0x3f '[d7]', # 0x40 '[d17]', # 0x41 '[d27]', # 0x42 '[d127]', # 0x43 '[d37]', # 0x44 '[d137]', # 0x45 '[d237]', # 0x46 '[d1237]', # 0x47 '[d47]', # 0x48 '[d147]', # 0x49 '[d247]', # 0x4a '[d1247]', # 0x4b '[d347]', # 0x4c '[d1347]', # 0x4d '[d2347]', # 0x4e '[d12347]', # 0x4f '[d57]', # 0x50 '[d157]', # 0x51 '[d257]', # 0x52 '[d1257]', # 0x53 '[d357]', # 0x54 '[d1357]', # 0x55 '[d2357]', # 0x56 '[d12357]', # 0x57 '[d457]', # 0x58 '[d1457]', # 0x59 '[d2457]', # 0x5a '[d12457]', # 0x5b '[d3457]', # 0x5c '[d13457]', # 0x5d '[d23457]', # 0x5e '[d123457]', # 0x5f '[d67]', # 0x60 '[d167]', # 0x61 '[d267]', # 0x62 '[d1267]', # 0x63 '[d367]', # 0x64 '[d1367]', # 0x65 '[d2367]', # 0x66 '[d12367]', # 0x67 '[d467]', # 0x68 '[d1467]', # 0x69 '[d2467]', # 0x6a '[d12467]', # 0x6b '[d3467]', # 0x6c '[d13467]', # 0x6d '[d23467]', # 0x6e '[d123467]', # 0x6f '[d567]', # 0x70 '[d1567]', # 0x71 '[d2567]', # 0x72 '[d12567]', # 0x73 '[d3567]', # 0x74 '[d13567]', # 0x75 '[d23567]', # 0x76 '[d123567]', # 0x77 '[d4567]', # 0x78 '[d14567]', # 0x79 '[d24567]', # 0x7a '[d124567]', # 0x7b '[d34567]', # 0x7c '[d134567]', # 0x7d '[d234567]', # 0x7e '[d1234567]', # 0x7f '[d8]', # 0x80 '[d18]', # 0x81 '[d28]', # 0x82 '[d128]', # 0x83 '[d38]', # 0x84 '[d138]', # 0x85 '[d238]', # 0x86 '[d1238]', # 0x87 '[d48]', # 0x88 '[d148]', # 0x89 '[d248]', # 0x8a '[d1248]', # 0x8b '[d348]', # 0x8c '[d1348]', # 0x8d '[d2348]', # 0x8e '[d12348]', # 0x8f '[d58]', # 0x90 '[d158]', # 0x91 '[d258]', # 0x92 '[d1258]', # 0x93 '[d358]', # 0x94 '[d1358]', # 0x95 '[d2358]', # 0x96 '[d12358]', # 0x97 '[d458]', # 0x98 '[d1458]', # 0x99 '[d2458]', # 0x9a '[d12458]', # 0x9b '[d3458]', # 0x9c '[d13458]', # 0x9d '[d23458]', # 0x9e '[d123458]', # 0x9f '[d68]', # 0xa0 '[d168]', # 0xa1 '[d268]', # 0xa2 '[d1268]', # 0xa3 '[d368]', # 0xa4 '[d1368]', # 0xa5 '[d2368]', # 0xa6 '[d12368]', # 0xa7 '[d468]', # 0xa8 '[d1468]', # 0xa9 '[d2468]', # 0xaa '[d12468]', # 0xab '[d3468]', # 0xac '[d13468]', # 0xad '[d23468]', # 0xae '[d123468]', # 0xaf '[d568]', # 0xb0 '[d1568]', # 0xb1 '[d2568]', # 0xb2 '[d12568]', # 0xb3 '[d3568]', # 0xb4 '[d13568]', # 0xb5 '[d23568]', # 0xb6 '[d123568]', # 0xb7 '[d4568]', # 0xb8 '[d14568]', # 0xb9 '[d24568]', # 0xba '[d124568]', # 0xbb '[d34568]', # 0xbc '[d134568]', # 0xbd '[d234568]', # 0xbe '[d1234568]', # 0xbf '[d78]', # 0xc0 '[d178]', # 0xc1 '[d278]', # 0xc2 '[d1278]', # 0xc3 '[d378]', # 0xc4 '[d1378]', # 0xc5 '[d2378]', # 0xc6 '[d12378]', # 0xc7 '[d478]', # 0xc8 '[d1478]', # 0xc9 '[d2478]', # 0xca '[d12478]', # 0xcb '[d3478]', # 0xcc '[d13478]', # 0xcd '[d23478]', # 0xce '[d123478]', # 0xcf '[d578]', # 0xd0 '[d1578]', # 0xd1 '[d2578]', # 0xd2 '[d12578]', # 0xd3 '[d3578]', # 0xd4 '[d13578]', # 0xd5 '[d23578]', # 0xd6 '[d123578]', # 0xd7 '[d4578]', # 0xd8 '[d14578]', # 0xd9 '[d24578]', # 0xda '[d124578]', # 0xdb '[d34578]', # 0xdc '[d134578]', # 0xdd '[d234578]', # 0xde '[d1234578]', # 0xdf '[d678]', # 0xe0 '[d1678]', # 0xe1 '[d2678]', # 0xe2 '[d12678]', # 0xe3 '[d3678]', # 0xe4 '[d13678]', # 0xe5 '[d23678]', # 0xe6 '[d123678]', # 0xe7 '[d4678]', # 0xe8 '[d14678]', # 0xe9 '[d24678]', # 0xea '[d124678]', # 0xeb '[d34678]', # 0xec '[d134678]', # 0xed '[d234678]', # 0xee '[d1234678]', # 0xef '[d5678]', # 0xf0 '[d15678]', # 0xf1 '[d25678]', # 0xf2 '[d125678]', # 0xf3 '[d35678]', # 0xf4 '[d135678]', # 0xf5 '[d235678]', # 0xf6 '[d1235678]', # 0xf7 '[d45678]', # 0xf8 '[d145678]', # 0xf9 '[d245678]', # 0xfa '[d1245678]', # 0xfb '[d345678]', # 0xfc '[d1345678]', # 0xfd '[d2345678]', # 0xfe '[d12345678]', # 0xff )
gpl-3.0
sparkslabs/kamaelia_
Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Internet/Multicast_receiver.py
3
4324
#!/usr/bin/python # -*- coding: utf-8 -*- # # Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1) # # (1) Kamaelia Contributors are listed in the AUTHORS file and at # http://www.kamaelia.org/AUTHORS - please extend this file, # not this notice. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------- """\ ========================= Simple multicast receiver ========================= A simple component for receiving packets in the specified multicast group. Remember that multicast is an unreliable connection - packets may be lost, duplicated or reordered. Example Usage ------------- Receiving multicast packets from group address 1.2.3.4 port 1000 and displaying them on the console:: Pipeline( Multicast_receiver("1.2.3.4", 1000), ConsoleEchoer() ).activate() The data emitted by Multicast_receiver (and displayed by ConsoleEchoer) is of the form (source_address, data). More detail ----------- Data received from the multicast group is emitted as a tuple: (source_addr, data) where data is a string of the received data. This component ignores anything received on its "control" inbox. It is not yet possible to ask it to shut down. It does not terminate. Multicast groups do not 'shut down', so this component never emits any signals on its "signal" outbox. """ import socket import Axon class Multicast_receiver(Axon.Component.component): """\ Multicast_receiver(address, port) -> component that receives multicast traffic. Creates a component that receives multicast packets in the given multicast group and sends it out of its "outbox" outbox. Keyword arguments: - address -- address of multicast group (string) - port -- port number """ Inboxes = { "inbox" : "NOT USED", "control" : "NOT USED", } Outboxes = { "outbox" : "Emits (src_addr, data_received)", "signal" : "NOT USED", } def __init__(self, address, port): """x.__init__(...) initializes x; see x.__class__.__doc__ for signature""" super(Multicast_receiver, self).__init__() self.mcast_addr = address self.port = port def main(self): """Main loop""" sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP) sock.bind((self.mcast_addr,self.port)) # Specifically we want to receieve stuff # from server on this address. sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 255) status = sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, socket.inet_aton(self.mcast_addr) + socket.inet_aton("0.0.0.0")) sock.setblocking(0) while 1: try: data, addr = sock.recvfrom(1024) except socket.error, e: pass else: message = (addr, data) self.send(message,"outbox") yield 1 def tests(): from Axon.Scheduler import scheduler from Kamaelia.Util.Console import ConsoleEchoer class testComponent(Axon.Component.component): def main(self): receiver = Multicast_receiver("224.168.2.9", 1600) display = ConsoleEchoer() self.link((receiver,"outbox"), (display,"inbox")) self.addChildren(receiver, display) yield Axon.Ipc.newComponent(*(self.children)) while 1: self.pause() yield 1 harness = testComponent() harness.activate() scheduler.run.runThreads(slowmo=0.1) __kamaelia_components__ = ( Multicast_receiver, ) if __name__=="__main__": tests()
apache-2.0
nomaro/SickBeard_Backup
sickbeard/naming.py
30
6135
# Author: Nic Wolfe <nic@wolfeden.ca> # URL: http://code.google.com/p/sickbeard/ # # This file is part of Sick Beard. # # Sick Beard is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Sick Beard is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Sick Beard. If not, see <http://www.gnu.org/licenses/>. import datetime import os import sickbeard from sickbeard import encodingKludge as ek from sickbeard import tv from sickbeard import common from sickbeard import logger from sickbeard.name_parser.parser import NameParser, InvalidNameException from common import Quality, DOWNLOADED name_presets = ('%SN - %Sx%0E - %EN', '%S.N.S%0SE%0E.%E.N', '%Sx%0E - %EN', 'S%0SE%0E - %EN', 'Saison %0S/%S.N.S%0SE%0E.%Q.N-%RG' ) name_abd_presets = ('%SN - %A-D - %EN', '%S.N.%A.D.%E.N.%Q.N', '%Y/%0M/%S.N.%A.D.%E.N-%RG' ) class TVShow(): def __init__(self): self.name = "Show Name" self.genre = "Comedy" self.air_by_date = 0 class TVEpisode(tv.TVEpisode): def __init__(self, season, episode, name): self.relatedEps = [] self._name = name self._season = season self._episode = episode self._airdate = datetime.date(2010, 3, 9) self.show = TVShow() self._status = Quality.compositeStatus(common.DOWNLOADED, common.Quality.SDTV) self._release_name = 'Show.Name.S02E03.HDTV.XviD-RLSGROUP' def check_force_season_folders(pattern=None, multi=None): """ Checks if the name can still be parsed if you strip off the folders to determine if we need to force season folders to be enabled or not. Returns true if season folders need to be forced on or false otherwise. """ if pattern == None: pattern = sickbeard.NAMING_PATTERN valid = not validate_name(pattern, None, file_only=True) if multi != None: valid = valid or not validate_name(pattern, multi, file_only=True) return valid def check_valid_naming(pattern=None, multi=None): """ Checks if the name is can be parsed back to its original form for both single and multi episodes. Returns true if the naming is valid, false if not. """ if pattern == None: pattern = sickbeard.NAMING_PATTERN logger.log(u"Checking whether the pattern "+pattern+" is valid for a single episode", logger.DEBUG) valid = validate_name(pattern, None) if multi != None: logger.log(u"Checking whether the pattern "+pattern+" is valid for a multi episode", logger.DEBUG) valid = valid and validate_name(pattern, multi) return valid def check_valid_abd_naming(pattern=None): """ Checks if the name is can be parsed back to its original form for an air-by-date format. Returns true if the naming is valid, false if not. """ if pattern == None: pattern = sickbeard.NAMING_PATTERN logger.log(u"Checking whether the pattern "+pattern+" is valid for an air-by-date episode", logger.DEBUG) valid = validate_name(pattern, abd=True) return valid def validate_name(pattern, multi=None, file_only=False, abd=False): ep = _generate_sample_ep(multi, abd) parser = NameParser(True) new_name = ep.formatted_filename(pattern, multi) + '.ext' new_path = ep.formatted_dir(pattern, multi) if not file_only: new_name = ek.ek(os.path.join, new_path, new_name) if not new_name: logger.log(u"Unable to create a name out of "+pattern, logger.DEBUG) return False logger.log(u"Trying to parse "+new_name, logger.DEBUG) try: result = parser.parse(new_name) except InvalidNameException: logger.log(u"Unable to parse "+new_name+", not valid", logger.DEBUG) return False logger.log("The name "+new_name + " parsed into " + str(result), logger.DEBUG) if abd: if result.air_date != ep.airdate: logger.log(u"Air date incorrect in parsed episode, pattern isn't valid", logger.DEBUG) return False else: if result.season_number != ep.season: logger.log(u"Season incorrect in parsed episode, pattern isn't valid", logger.DEBUG) return False if result.episode_numbers != [x.episode for x in [ep] + ep.relatedEps]: logger.log(u"Episode incorrect in parsed episode, pattern isn't valid", logger.DEBUG) return False return True def _generate_sample_ep(multi=None, abd=False): # make a fake episode object ep = TVEpisode(2,3,"Ep Name") ep._status = Quality.compositeStatus(DOWNLOADED, Quality.HDTV) ep._airdate = datetime.date(2011, 3, 9) if abd: ep._release_name = 'Show.Name.2011.03.09.HDTV.XviD-RLSGROUP' else: ep._release_name = 'Show.Name.S02E03.HDTV.XviD-RLSGROUP' if multi != None: ep._name = "Ep Name (1)" ep._release_name = 'Show.Name.S02E03E04E05.HDTV.XviD-RLSGROUP' secondEp = TVEpisode(2,4,"Ep Name (2)") secondEp._status = Quality.compositeStatus(DOWNLOADED, Quality.HDTV) secondEp._release_name = ep._release_name thirdEp = TVEpisode(2,5,"Ep Name (3)") thirdEp._status = Quality.compositeStatus(DOWNLOADED, Quality.HDTV) thirdEp._release_name = ep._release_name ep.relatedEps.append(secondEp) ep.relatedEps.append(thirdEp) return ep def test_name(pattern, multi=None, abd=False): ep = _generate_sample_ep(multi, abd) return {'name': ep.formatted_filename(pattern, multi), 'dir': ep.formatted_dir(pattern, multi)}
gpl-3.0
isb-cgc/ISB-CGC-Webapp
bq_data_access/v1/mrna_data.py
1
6752
# # Copyright 2015-2019, Institute for Systems Biology # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from builtins import str from builtins import object from google_helpers.bigquery.service import get_bigquery_service import logging from bq_data_access.v1.errors import FeatureNotFoundException from bq_data_access.v1.feature_value_types import ValueType, DataTypes from django.conf import settings TABLES = [ { 'table_id': 'mRNA_BCGSC_GA_RPKM', 'platform': 'Illumina GA', 'center': 'BCGSC', 'id': 'mrna_bcgsc_illumina_ga', 'value_label': 'RPKM', 'value_field': 'RPKM' }, { 'table_id': 'mRNA_BCGSC_HiSeq_RPKM', 'platform': 'Illumina HiSeq', 'center': 'BCGSC', 'id': 'mrna_bcgsc_illumina_hiseq', 'value_label': 'RPKM', 'value_field': 'RPKM' }, { 'table_id': 'mRNA_UNC_GA_RSEM', 'platform': 'Illumina GA', 'center': 'UNC', 'id': 'mrna_unc_illumina_ga', 'value_label': 'RSEM', 'value_field': 'normalized_count' }, { 'table_id': 'mRNA_UNC_HiSeq_RSEM', 'platform': 'Illumina HiSeq', 'center': 'UNC', 'id': 'mrna_unc_illumina_hiseq', 'value_label': 'RSEM', 'value_field': 'normalized_count' } ] GEXP_FEATURE_TYPE = 'GEXP' def get_feature_type(): return GEXP_FEATURE_TYPE def build_feature_label(gene, info): # print info # Example: 'EGFR mRNA (Illumina HiSeq, UNC RSEM)' label = gene + " mRNA (" + info['platform'] + ", " + info['center'] + " " + info['value_label'] + ")" return label def build_internal_feature_id(gene, table_id): return '{feature_type}:{gene}:{table}'.format( feature_type=get_feature_type(), gene=gene, table=table_id ) def get_table_info(table_id): table_info = None for table_entry in TABLES: if table_id == table_entry['id']: table_info = table_entry return table_info def build_query(project_name, dataset_name, table_name, gene_symbol, value_field, cohort_dataset, cohort_table, cohort_id_array, project_id_array): cohort_project_name = settings.GCLOUD_PROJECT_ID # Generate the 'IN' statement string: (%s, %s, ..., %s) cohort_id_stmt = ', '.join([str(cohort_id) for cohort_id in cohort_id_array]) project_id_stmt = '' if project_id_array is not None: project_id_stmt = ', '.join([str(project_id) for project_id in project_id_array]) query_template = \ ("SELECT ParticipantBarcode AS case_id, SampleBarcode AS sample_id, AliquotBarcode AS aliquot_id, {value_field} AS value " "FROM [{project_name}:{dataset_name}.{table_name}] AS gexp " "WHERE original_gene_symbol='{gene_symbol}' " "AND SampleBarcode IN ( " " SELECT sample_barcode " " FROM [{cohort_project_name}:{cohort_dataset}.{cohort_table}] " " WHERE cohort_id IN ({cohort_id_list})" " AND (project_id IS NULL") query_template += (" OR project_id IN ({project_id_list})))" if project_id_array is not None else "))") query = query_template.format(dataset_name=dataset_name, project_name=project_name, table_name=table_name, gene_symbol=gene_symbol, value_field=value_field, cohort_project_name=cohort_project_name, cohort_dataset=cohort_dataset, cohort_table=cohort_table, cohort_id_list=cohort_id_stmt, project_id_list=project_id_stmt) logging.debug("BQ_QUERY_GEXP: " + query) return query def do_query(project_id, project_name, dataset_name, table_name, gene_symbol, value_field, cohort_dataset, cohort_table, cohort_id_array): bigquery_service = get_bigquery_service() query = build_query(project_name, dataset_name, table_name, gene_symbol, value_field, cohort_dataset, cohort_table, cohort_id_array) query_body = { 'query': query } table_data = bigquery_service.jobs() query_response = table_data.query(projectId=project_id, body=query_body).execute() result = [] num_result_rows = int(query_response['totalRows']) if num_result_rows == 0: return result for row in query_response['rows']: result.append({ 'case_id': row['f'][0]['v'], 'sample_id': row['f'][1]['v'], 'aliquot_id': row['f'][2]['v'], 'value': float(row['f'][3]['v']) }) return result class MRNAFeatureProvider(object): def __init__(self, feature_id): self.feature_type = '' self.gene_label = '' self.table_id = '' self.table_info = None self.value_field = '' self.table_name = '' self.parse_internal_feature_id(feature_id) def get_value_type(self): return ValueType.FLOAT def get_feature_type(self): return DataTypes.GEXP def process_data_point(self, data_point): return str(data_point['value']) def get_data_from_bigquery(self, cohort_id_array, cohort_dataset, cohort_table): project_id = settings.BIGQUERY_PROJECT_ID project_name = settings.BIGQUERY_DATA_PROJECT_ID dataset_name = settings.BIGQUERY_DATASET_V1 result = do_query(project_id, project_name, dataset_name, self.table_name, self.gene_label, self.value_field, cohort_dataset, cohort_table, cohort_id_array) return result def get_data(self, cohort_id_array, cohort_dataset, cohort_table): result = self.get_data_from_bigquery(cohort_id_array, cohort_dataset, cohort_table) return result def parse_internal_feature_id(self, feature_id): # TODO better feature ID input validation feature_type, gene_label, table_id = feature_id.split(':') self.feature_type = feature_type self.gene_label = gene_label self.table_id = table_id self.table_info = get_table_info(table_id) if self.table_info is None: raise FeatureNotFoundException(feature_id) self.table_name = self.table_info['table_id'] self.value_field = self.table_info['value_field']
apache-2.0
garyfeng/pybrain
pybrain/structure/networks/bidirectional.py
31
4053
__author__ = 'Tom Schaul, tom@idsia.ch' from pybrain.structure.modules import TanhLayer, SigmoidLayer from pybrain.structure.networks.feedforward import FeedForwardNetwork from pybrain.structure.connections.shared import MotherConnection, SharedFullConnection from pybrain.structure.modules.linearlayer import LinearLayer from pybrain.structure.modulemesh import ModuleMesh class BidirectionalNetwork(FeedForwardNetwork): """ A bi-directional recurrent neural network, implemented as unfolded in time. """ #: should the weights for the forward-direction be the same than for the backward-direction? symmetric = False #: class for the hidden layers componentclass = TanhLayer #: class for the output layers outcomponentclass = SigmoidLayer #: number of inputs for each component of the sequence inputsize = 1 #: number of outputs for each component of the sequence outputsize = 1 #: number of hidden neurons in each hiddne layer hiddensize = 5 #: length of the sequences seqlen = None def __init__(self, predefined = None, **kwargs): """ For the current implementation, the sequence length needs to be fixed, and given at construction time. """ if predefined is not None: self.predefined = predefined else: self.predefined = {} FeedForwardNetwork.__init__(self, **kwargs) assert self.seqlen is not None # the input is a 1D-mesh (as a view on a flat input layer) inmod = LinearLayer(self.inputsize * self.seqlen, name='input') inmesh = ModuleMesh.viewOnFlatLayer(inmod, (self.seqlen,), 'inmesh') # the output is also a 1D-mesh outmod = self.outcomponentclass(self.outputsize * self.seqlen, name='output') outmesh = ModuleMesh.viewOnFlatLayer(outmod, (self.seqlen,), 'outmesh') # the hidden layers are places in a 2xseqlen mesh hiddenmesh = ModuleMesh.constructWithLayers(self.componentclass, self.hiddensize, (2, self.seqlen), 'hidden') # add the modules for c in inmesh: self.addInputModule(c) for c in outmesh: self.addOutputModule(c) for c in hiddenmesh: self.addModule(c) # set the connections weights to be shared inconnf = MotherConnection(inmesh.componentOutdim * hiddenmesh.componentIndim, name='inconn') outconnf = MotherConnection(outmesh.componentIndim * hiddenmesh.componentOutdim, name='outconn') forwardconn = MotherConnection(hiddenmesh.componentIndim * hiddenmesh.componentOutdim, name='fconn') if self.symmetric: backwardconn = forwardconn inconnb = inconnf outconnb = outconnf else: backwardconn = MotherConnection(hiddenmesh.componentIndim * hiddenmesh.componentOutdim, name='bconn') inconnb = MotherConnection(inmesh.componentOutdim * hiddenmesh.componentIndim, name='inconn') outconnb = MotherConnection(outmesh.componentIndim * hiddenmesh.componentOutdim, name='outconn') # build the connections for i in range(self.seqlen): # input to hidden self.addConnection(SharedFullConnection(inconnf, inmesh[(i,)], hiddenmesh[(0, i)])) self.addConnection(SharedFullConnection(inconnb, inmesh[(i,)], hiddenmesh[(1, i)])) # hidden to output self.addConnection(SharedFullConnection(outconnf, hiddenmesh[(0, i)], outmesh[(i,)])) self.addConnection(SharedFullConnection(outconnb, hiddenmesh[(1, i)], outmesh[(i,)])) if i > 0: # forward in time self.addConnection(SharedFullConnection(forwardconn, hiddenmesh[(0, i - 1)], hiddenmesh[(0, i)])) if i < self.seqlen - 1: # backward in time self.addConnection(SharedFullConnection(backwardconn, hiddenmesh[(1, i + 1)], hiddenmesh[(1, i)])) self.sortModules()
bsd-3-clause
supersven/intellij-community
python/helpers/pydev/_pydev_imps/_pydev_BaseHTTPServer.py
54
22554
"""HTTP server base class. Note: the class in this module doesn't implement any HTTP request; see SimpleHTTPServer for simple implementations of GET, HEAD and POST (including CGI scripts). It does, however, optionally implement HTTP/1.1 persistent connections, as of version 0.3. Contents: - BaseHTTPRequestHandler: HTTP request handler base class - test: test function XXX To do: - log requests even later (to capture byte count) - log user-agent header and other interesting goodies - send error log to separate file """ # See also: # # HTTP Working Group T. Berners-Lee # INTERNET-DRAFT R. T. Fielding # <draft-ietf-http-v10-spec-00.txt> H. Frystyk Nielsen # Expires September 8, 1995 March 8, 1995 # # URL: http://www.ics.uci.edu/pub/ietf/http/draft-ietf-http-v10-spec-00.txt # # and # # Network Working Group R. Fielding # Request for Comments: 2616 et al # Obsoletes: 2068 June 1999 # Category: Standards Track # # URL: http://www.faqs.org/rfcs/rfc2616.html # Log files # --------- # # Here's a quote from the NCSA httpd docs about log file format. # # | The logfile format is as follows. Each line consists of: # | # | host rfc931 authuser [DD/Mon/YYYY:hh:mm:ss] "request" ddd bbbb # | # | host: Either the DNS name or the IP number of the remote client # | rfc931: Any information returned by identd for this person, # | - otherwise. # | authuser: If user sent a userid for authentication, the user name, # | - otherwise. # | DD: Day # | Mon: Month (calendar name) # | YYYY: Year # | hh: hour (24-hour format, the machine's timezone) # | mm: minutes # | ss: seconds # | request: The first line of the HTTP request as sent by the client. # | ddd: the status code returned by the server, - if not available. # | bbbb: the total number of bytes sent, # | *not including the HTTP/1.0 header*, - if not available # | # | You can determine the name of the file accessed through request. # # (Actually, the latter is only true if you know the server configuration # at the time the request was made!) __version__ = "0.3" __all__ = ["HTTPServer", "BaseHTTPRequestHandler"] import sys from _pydev_imps import _pydev_time as time from _pydev_imps import _pydev_socket as socket from warnings import filterwarnings, catch_warnings with catch_warnings(): if sys.py3kwarning: filterwarnings("ignore", ".*mimetools has been removed", DeprecationWarning) import mimetools from _pydev_imps import _pydev_SocketServer as SocketServer # Default error message template DEFAULT_ERROR_MESSAGE = """\ <head> <title>Error response</title> </head> <body> <h1>Error response</h1> <p>Error code %(code)d. <p>Message: %(message)s. <p>Error code explanation: %(code)s = %(explain)s. </body> """ DEFAULT_ERROR_CONTENT_TYPE = "text/html" def _quote_html(html): return html.replace("&", "&amp;").replace("<", "&lt;").replace(">", "&gt;") class HTTPServer(SocketServer.TCPServer): allow_reuse_address = 1 # Seems to make sense in testing environment def server_bind(self): """Override server_bind to store the server name.""" SocketServer.TCPServer.server_bind(self) host, port = self.socket.getsockname()[:2] self.server_name = socket.getfqdn(host) self.server_port = port class BaseHTTPRequestHandler(SocketServer.StreamRequestHandler): """HTTP request handler base class. The following explanation of HTTP serves to guide you through the code as well as to expose any misunderstandings I may have about HTTP (so you don't need to read the code to figure out I'm wrong :-). HTTP (HyperText Transfer Protocol) is an extensible protocol on top of a reliable stream transport (e.g. TCP/IP). The protocol recognizes three parts to a request: 1. One line identifying the request type and path 2. An optional set of RFC-822-style headers 3. An optional data part The headers and data are separated by a blank line. The first line of the request has the form <command> <path> <version> where <command> is a (case-sensitive) keyword such as GET or POST, <path> is a string containing path information for the request, and <version> should be the string "HTTP/1.0" or "HTTP/1.1". <path> is encoded using the URL encoding scheme (using %xx to signify the ASCII character with hex code xx). The specification specifies that lines are separated by CRLF but for compatibility with the widest range of clients recommends servers also handle LF. Similarly, whitespace in the request line is treated sensibly (allowing multiple spaces between components and allowing trailing whitespace). Similarly, for output, lines ought to be separated by CRLF pairs but most clients grok LF characters just fine. If the first line of the request has the form <command> <path> (i.e. <version> is left out) then this is assumed to be an HTTP 0.9 request; this form has no optional headers and data part and the reply consists of just the data. The reply form of the HTTP 1.x protocol again has three parts: 1. One line giving the response code 2. An optional set of RFC-822-style headers 3. The data Again, the headers and data are separated by a blank line. The response code line has the form <version> <responsecode> <responsestring> where <version> is the protocol version ("HTTP/1.0" or "HTTP/1.1"), <responsecode> is a 3-digit response code indicating success or failure of the request, and <responsestring> is an optional human-readable string explaining what the response code means. This server parses the request and the headers, and then calls a function specific to the request type (<command>). Specifically, a request SPAM will be handled by a method do_SPAM(). If no such method exists the server sends an error response to the client. If it exists, it is called with no arguments: do_SPAM() Note that the request name is case sensitive (i.e. SPAM and spam are different requests). The various request details are stored in instance variables: - client_address is the client IP address in the form (host, port); - command, path and version are the broken-down request line; - headers is an instance of mimetools.Message (or a derived class) containing the header information; - rfile is a file object open for reading positioned at the start of the optional input data part; - wfile is a file object open for writing. IT IS IMPORTANT TO ADHERE TO THE PROTOCOL FOR WRITING! The first thing to be written must be the response line. Then follow 0 or more header lines, then a blank line, and then the actual data (if any). The meaning of the header lines depends on the command executed by the server; in most cases, when data is returned, there should be at least one header line of the form Content-type: <type>/<subtype> where <type> and <subtype> should be registered MIME types, e.g. "text/html" or "text/plain". """ # The Python system version, truncated to its first component. sys_version = "Python/" + sys.version.split()[0] # The server software version. You may want to override this. # The format is multiple whitespace-separated strings, # where each string is of the form name[/version]. server_version = "BaseHTTP/" + __version__ # The default request version. This only affects responses up until # the point where the request line is parsed, so it mainly decides what # the client gets back when sending a malformed request line. # Most web servers default to HTTP 0.9, i.e. don't send a status line. default_request_version = "HTTP/0.9" def parse_request(self): """Parse a request (internal). The request should be stored in self.raw_requestline; the results are in self.command, self.path, self.request_version and self.headers. Return True for success, False for failure; on failure, an error is sent back. """ self.command = None # set in case of error on the first line self.request_version = version = self.default_request_version self.close_connection = 1 requestline = self.raw_requestline requestline = requestline.rstrip('\r\n') self.requestline = requestline words = requestline.split() if len(words) == 3: command, path, version = words if version[:5] != 'HTTP/': self.send_error(400, "Bad request version (%r)" % version) return False try: base_version_number = version.split('/', 1)[1] version_number = base_version_number.split(".") # RFC 2145 section 3.1 says there can be only one "." and # - major and minor numbers MUST be treated as # separate integers; # - HTTP/2.4 is a lower version than HTTP/2.13, which in # turn is lower than HTTP/12.3; # - Leading zeros MUST be ignored by recipients. if len(version_number) != 2: raise ValueError version_number = int(version_number[0]), int(version_number[1]) except (ValueError, IndexError): self.send_error(400, "Bad request version (%r)" % version) return False if version_number >= (1, 1) and self.protocol_version >= "HTTP/1.1": self.close_connection = 0 if version_number >= (2, 0): self.send_error(505, "Invalid HTTP Version (%s)" % base_version_number) return False elif len(words) == 2: command, path = words self.close_connection = 1 if command != 'GET': self.send_error(400, "Bad HTTP/0.9 request type (%r)" % command) return False elif not words: return False else: self.send_error(400, "Bad request syntax (%r)" % requestline) return False self.command, self.path, self.request_version = command, path, version # Examine the headers and look for a Connection directive self.headers = self.MessageClass(self.rfile, 0) conntype = self.headers.get('Connection', "") if conntype.lower() == 'close': self.close_connection = 1 elif (conntype.lower() == 'keep-alive' and self.protocol_version >= "HTTP/1.1"): self.close_connection = 0 return True def handle_one_request(self): """Handle a single HTTP request. You normally don't need to override this method; see the class __doc__ string for information on how to handle specific HTTP commands such as GET and POST. """ try: self.raw_requestline = self.rfile.readline(65537) if len(self.raw_requestline) > 65536: self.requestline = '' self.request_version = '' self.command = '' self.send_error(414) return if not self.raw_requestline: self.close_connection = 1 return if not self.parse_request(): # An error code has been sent, just exit return mname = 'do_' + self.command if not hasattr(self, mname): self.send_error(501, "Unsupported method (%r)" % self.command) return method = getattr(self, mname) method() self.wfile.flush() #actually send the response if not already done. except socket.timeout: #a read or a write timed out. Discard this connection self.log_error("Request timed out: %r", sys.exc_info()[1]) self.close_connection = 1 return def handle(self): """Handle multiple requests if necessary.""" self.close_connection = 1 self.handle_one_request() while not self.close_connection: self.handle_one_request() def send_error(self, code, message=None): """Send and log an error reply. Arguments are the error code, and a detailed message. The detailed message defaults to the short entry matching the response code. This sends an error response (so it must be called before any output has been generated), logs the error, and finally sends a piece of HTML explaining the error to the user. """ try: short, long = self.responses[code] except KeyError: short, long = '???', '???' if message is None: message = short explain = long self.log_error("code %d, message %s", code, message) # using _quote_html to prevent Cross Site Scripting attacks (see bug #1100201) content = (self.error_message_format % {'code': code, 'message': _quote_html(message), 'explain': explain}) self.send_response(code, message) self.send_header("Content-Type", self.error_content_type) self.send_header('Connection', 'close') self.end_headers() if self.command != 'HEAD' and code >= 200 and code not in (204, 304): self.wfile.write(content) error_message_format = DEFAULT_ERROR_MESSAGE error_content_type = DEFAULT_ERROR_CONTENT_TYPE def send_response(self, code, message=None): """Send the response header and log the response code. Also send two standard headers with the server software version and the current date. """ self.log_request(code) if message is None: if code in self.responses: message = self.responses[code][0] else: message = '' if self.request_version != 'HTTP/0.9': self.wfile.write("%s %d %s\r\n" % (self.protocol_version, code, message)) # print (self.protocol_version, code, message) self.send_header('Server', self.version_string()) self.send_header('Date', self.date_time_string()) def send_header(self, keyword, value): """Send a MIME header.""" if self.request_version != 'HTTP/0.9': self.wfile.write("%s: %s\r\n" % (keyword, value)) if keyword.lower() == 'connection': if value.lower() == 'close': self.close_connection = 1 elif value.lower() == 'keep-alive': self.close_connection = 0 def end_headers(self): """Send the blank line ending the MIME headers.""" if self.request_version != 'HTTP/0.9': self.wfile.write("\r\n") def log_request(self, code='-', size='-'): """Log an accepted request. This is called by send_response(). """ self.log_message('"%s" %s %s', self.requestline, str(code), str(size)) def log_error(self, format, *args): """Log an error. This is called when a request cannot be fulfilled. By default it passes the message on to log_message(). Arguments are the same as for log_message(). XXX This should go to the separate error log. """ self.log_message(format, *args) def log_message(self, format, *args): """Log an arbitrary message. This is used by all other logging functions. Override it if you have specific logging wishes. The first argument, FORMAT, is a format string for the message to be logged. If the format string contains any % escapes requiring parameters, they should be specified as subsequent arguments (it's just like printf!). The client host and current date/time are prefixed to every message. """ sys.stderr.write("%s - - [%s] %s\n" % (self.address_string(), self.log_date_time_string(), format%args)) def version_string(self): """Return the server software version string.""" return self.server_version + ' ' + self.sys_version def date_time_string(self, timestamp=None): """Return the current date and time formatted for a message header.""" if timestamp is None: timestamp = time.time() year, month, day, hh, mm, ss, wd, y, z = time.gmtime(timestamp) s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % ( self.weekdayname[wd], day, self.monthname[month], year, hh, mm, ss) return s def log_date_time_string(self): """Return the current time formatted for logging.""" now = time.time() year, month, day, hh, mm, ss, x, y, z = time.localtime(now) s = "%02d/%3s/%04d %02d:%02d:%02d" % ( day, self.monthname[month], year, hh, mm, ss) return s weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'] monthname = [None, 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'] def address_string(self): """Return the client address formatted for logging. This version looks up the full hostname using gethostbyaddr(), and tries to find a name that contains at least one dot. """ host, port = self.client_address[:2] return socket.getfqdn(host) # Essentially static class variables # The version of the HTTP protocol we support. # Set this to HTTP/1.1 to enable automatic keepalive protocol_version = "HTTP/1.0" # The Message-like class used to parse headers MessageClass = mimetools.Message # Table mapping response codes to messages; entries have the # form {code: (shortmessage, longmessage)}. # See RFC 2616. responses = { 100: ('Continue', 'Request received, please continue'), 101: ('Switching Protocols', 'Switching to new protocol; obey Upgrade header'), 200: ('OK', 'Request fulfilled, document follows'), 201: ('Created', 'Document created, URL follows'), 202: ('Accepted', 'Request accepted, processing continues off-line'), 203: ('Non-Authoritative Information', 'Request fulfilled from cache'), 204: ('No Content', 'Request fulfilled, nothing follows'), 205: ('Reset Content', 'Clear input form for further input.'), 206: ('Partial Content', 'Partial content follows.'), 300: ('Multiple Choices', 'Object has several resources -- see URI list'), 301: ('Moved Permanently', 'Object moved permanently -- see URI list'), 302: ('Found', 'Object moved temporarily -- see URI list'), 303: ('See Other', 'Object moved -- see Method and URL list'), 304: ('Not Modified', 'Document has not changed since given time'), 305: ('Use Proxy', 'You must use proxy specified in Location to access this ' 'resource.'), 307: ('Temporary Redirect', 'Object moved temporarily -- see URI list'), 400: ('Bad Request', 'Bad request syntax or unsupported method'), 401: ('Unauthorized', 'No permission -- see authorization schemes'), 402: ('Payment Required', 'No payment -- see charging schemes'), 403: ('Forbidden', 'Request forbidden -- authorization will not help'), 404: ('Not Found', 'Nothing matches the given URI'), 405: ('Method Not Allowed', 'Specified method is invalid for this resource.'), 406: ('Not Acceptable', 'URI not available in preferred format.'), 407: ('Proxy Authentication Required', 'You must authenticate with ' 'this proxy before proceeding.'), 408: ('Request Timeout', 'Request timed out; try again later.'), 409: ('Conflict', 'Request conflict.'), 410: ('Gone', 'URI no longer exists and has been permanently removed.'), 411: ('Length Required', 'Client must specify Content-Length.'), 412: ('Precondition Failed', 'Precondition in headers is false.'), 413: ('Request Entity Too Large', 'Entity is too large.'), 414: ('Request-URI Too Long', 'URI is too long.'), 415: ('Unsupported Media Type', 'Entity body in unsupported format.'), 416: ('Requested Range Not Satisfiable', 'Cannot satisfy request range.'), 417: ('Expectation Failed', 'Expect condition could not be satisfied.'), 500: ('Internal Server Error', 'Server got itself in trouble'), 501: ('Not Implemented', 'Server does not support this operation'), 502: ('Bad Gateway', 'Invalid responses from another server/proxy.'), 503: ('Service Unavailable', 'The server cannot process the request due to a high load'), 504: ('Gateway Timeout', 'The gateway server did not receive a timely response'), 505: ('HTTP Version Not Supported', 'Cannot fulfill request.'), } def test(HandlerClass = BaseHTTPRequestHandler, ServerClass = HTTPServer, protocol="HTTP/1.0"): """Test the HTTP request handler class. This runs an HTTP server on port 8000 (or the first command line argument). """ if sys.argv[1:]: port = int(sys.argv[1]) else: port = 8000 server_address = ('', port) HandlerClass.protocol_version = protocol httpd = ServerClass(server_address, HandlerClass) sa = httpd.socket.getsockname() print ("Serving HTTP on", sa[0], "port", sa[1], "...") httpd.serve_forever() if __name__ == '__main__': test()
apache-2.0
flyfei/python-for-android
python3-alpha/python3-src/Doc/tools/sphinxext/pyspecific.py
45
9791
# -*- coding: utf-8 -*- """ pyspecific.py ~~~~~~~~~~~~~ Sphinx extension with Python doc-specific markup. :copyright: 2008, 2009, 2010 by Georg Brandl. :license: Python license. """ ISSUE_URI = 'http://bugs.python.org/issue%s' SOURCE_URI = 'http://hg.python.org/cpython/file/3.2/%s' from docutils import nodes, utils from sphinx.util.nodes import split_explicit_title # monkey-patch reST parser to disable alphabetic and roman enumerated lists from docutils.parsers.rst.states import Body Body.enum.converters['loweralpha'] = \ Body.enum.converters['upperalpha'] = \ Body.enum.converters['lowerroman'] = \ Body.enum.converters['upperroman'] = lambda x: None # monkey-patch HTML translator to give versionmodified paragraphs a class def new_visit_versionmodified(self, node): self.body.append(self.starttag(node, 'p', CLASS=node['type'])) text = versionlabels[node['type']] % node['version'] if len(node): text += ': ' else: text += '.' self.body.append('<span class="versionmodified">%s</span>' % text) from sphinx.writers.html import HTMLTranslator from sphinx.locale import versionlabels HTMLTranslator.visit_versionmodified = new_visit_versionmodified # Support for marking up and linking to bugs.python.org issues def issue_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): issue = utils.unescape(text) text = 'issue ' + issue refnode = nodes.reference(text, text, refuri=ISSUE_URI % issue) return [refnode], [] # Support for linking to Python source files easily def source_role(typ, rawtext, text, lineno, inliner, options={}, content=[]): has_t, title, target = split_explicit_title(text) title = utils.unescape(title) target = utils.unescape(target) refnode = nodes.reference(title, title, refuri=SOURCE_URI % target) return [refnode], [] # Support for marking up implementation details from sphinx.util.compat import Directive class ImplementationDetail(Directive): has_content = True required_arguments = 0 optional_arguments = 1 final_argument_whitespace = True def run(self): pnode = nodes.compound(classes=['impl-detail']) content = self.content add_text = nodes.strong('CPython implementation detail:', 'CPython implementation detail:') if self.arguments: n, m = self.state.inline_text(self.arguments[0], self.lineno) pnode.append(nodes.paragraph('', '', *(n + m))) self.state.nested_parse(content, self.content_offset, pnode) if pnode.children and isinstance(pnode[0], nodes.paragraph): pnode[0].insert(0, add_text) pnode[0].insert(1, nodes.Text(' ')) else: pnode.insert(0, nodes.paragraph('', '', add_text)) return [pnode] # Support for documenting decorators from sphinx import addnodes from sphinx.domains.python import PyModulelevel, PyClassmember class PyDecoratorMixin(object): def handle_signature(self, sig, signode): ret = super(PyDecoratorMixin, self).handle_signature(sig, signode) signode.insert(0, addnodes.desc_addname('@', '@')) return ret def needs_arglist(self): return False class PyDecoratorFunction(PyDecoratorMixin, PyModulelevel): def run(self): # a decorator function is a function after all self.name = 'py:function' return PyModulelevel.run(self) class PyDecoratorMethod(PyDecoratorMixin, PyClassmember): def run(self): self.name = 'py:method' return PyClassmember.run(self) # Support for documenting version of removal in deprecations from sphinx.locale import versionlabels from sphinx.util.compat import Directive versionlabels['deprecated-removed'] = \ 'Deprecated since version %s, will be removed in version %s' class DeprecatedRemoved(Directive): has_content = True required_arguments = 2 optional_arguments = 1 final_argument_whitespace = True option_spec = {} def run(self): node = addnodes.versionmodified() node.document = self.state.document node['type'] = 'deprecated-removed' version = (self.arguments[0], self.arguments[1]) node['version'] = version if len(self.arguments) == 3: inodes, messages = self.state.inline_text(self.arguments[2], self.lineno+1) node.extend(inodes) if self.content: self.state.nested_parse(self.content, self.content_offset, node) ret = [node] + messages else: ret = [node] env = self.state.document.settings.env env.note_versionchange('deprecated', version[0], node, self.lineno) return ret # Support for building "topic help" for pydoc pydoc_topic_labels = [ 'assert', 'assignment', 'atom-identifiers', 'atom-literals', 'attribute-access', 'attribute-references', 'augassign', 'binary', 'bitwise', 'bltin-code-objects', 'bltin-ellipsis-object', 'bltin-null-object', 'bltin-type-objects', 'booleans', 'break', 'callable-types', 'calls', 'class', 'comparisons', 'compound', 'context-managers', 'continue', 'conversions', 'customization', 'debugger', 'del', 'dict', 'dynamic-features', 'else', 'exceptions', 'execmodel', 'exprlists', 'floating', 'for', 'formatstrings', 'function', 'global', 'id-classes', 'identifiers', 'if', 'imaginary', 'import', 'in', 'integers', 'lambda', 'lists', 'naming', 'nonlocal', 'numbers', 'numeric-types', 'objects', 'operator-summary', 'pass', 'power', 'raise', 'return', 'sequence-types', 'shifting', 'slicings', 'specialattrs', 'specialnames', 'string-methods', 'strings', 'subscriptions', 'truth', 'try', 'types', 'typesfunctions', 'typesmapping', 'typesmethods', 'typesmodules', 'typesseq', 'typesseq-mutable', 'unary', 'while', 'with', 'yield' ] from os import path from time import asctime from pprint import pformat from docutils.io import StringOutput from docutils.utils import new_document from sphinx.builders import Builder from sphinx.writers.text import TextWriter class PydocTopicsBuilder(Builder): name = 'pydoc-topics' def init(self): self.topics = {} def get_outdated_docs(self): return 'all pydoc topics' def get_target_uri(self, docname, typ=None): return '' # no URIs def write(self, *ignored): writer = TextWriter(self) for label in self.status_iterator(pydoc_topic_labels, 'building topics... ', length=len(pydoc_topic_labels)): if label not in self.env.domaindata['std']['labels']: self.warn('label %r not in documentation' % label) continue docname, labelid, sectname = self.env.domaindata['std']['labels'][label] doctree = self.env.get_and_resolve_doctree(docname, self) document = new_document('<section node>') document.append(doctree.ids[labelid]) destination = StringOutput(encoding='utf-8') writer.write(document, destination) self.topics[label] = str(writer.output) def finish(self): f = open(path.join(self.outdir, 'topics.py'), 'w') try: f.write('# Autogenerated by Sphinx on %s\n' % asctime()) f.write('topics = ' + pformat(self.topics) + '\n') finally: f.close() # Support for checking for suspicious markup import suspicious # Support for documenting Opcodes import re opcode_sig_re = re.compile(r'(\w+(?:\+\d)?)(?:\s*\((.*)\))?') def parse_opcode_signature(env, sig, signode): """Transform an opcode signature into RST nodes.""" m = opcode_sig_re.match(sig) if m is None: raise ValueError opname, arglist = m.groups() signode += addnodes.desc_name(opname, opname) if arglist is not None: paramlist = addnodes.desc_parameterlist() signode += paramlist paramlist += addnodes.desc_parameter(arglist, arglist) return opname.strip() # Support for documenting pdb commands pdbcmd_sig_re = re.compile(r'([a-z()!]+)\s*(.*)') # later... #pdbargs_tokens_re = re.compile(r'''[a-zA-Z]+ | # identifiers # [.,:]+ | # punctuation # [\[\]()] | # parens # \s+ # whitespace # ''', re.X) def parse_pdb_command(env, sig, signode): """Transform a pdb command signature into RST nodes.""" m = pdbcmd_sig_re.match(sig) if m is None: raise ValueError name, args = m.groups() fullname = name.replace('(', '').replace(')', '') signode += addnodes.desc_name(name, name) if args: signode += addnodes.desc_addname(' '+args, ' '+args) return fullname def setup(app): app.add_role('issue', issue_role) app.add_role('source', source_role) app.add_directive('impl-detail', ImplementationDetail) app.add_directive('deprecated-removed', DeprecatedRemoved) app.add_builder(PydocTopicsBuilder) app.add_builder(suspicious.CheckSuspiciousMarkupBuilder) app.add_description_unit('opcode', 'opcode', '%s (opcode)', parse_opcode_signature) app.add_description_unit('pdbcommand', 'pdbcmd', '%s (pdb command)', parse_pdb_command) app.add_description_unit('2to3fixer', '2to3fixer', '%s (2to3 fixer)') app.add_directive_to_domain('py', 'decorator', PyDecoratorFunction) app.add_directive_to_domain('py', 'decoratormethod', PyDecoratorMethod)
apache-2.0
kobejean/tensorflow
tensorflow/python/data/kernel_tests/optional_ops_test.py
9
10950
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the Optional data type wrapper.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl.testing import parameterized import numpy as np from tensorflow.python.data.kernel_tests import test_base from tensorflow.python.data.ops import dataset_ops from tensorflow.python.data.ops import iterator_ops from tensorflow.python.data.ops import optional_ops from tensorflow.python.data.util import structure from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.platform import test class OptionalTest(test_base.DatasetTestBase, parameterized.TestCase): @test_util.run_in_graph_and_eager_modes def testFromValue(self): opt = optional_ops.Optional.from_value(constant_op.constant(37.0)) self.assertTrue(self.evaluate(opt.has_value())) self.assertEqual(37.0, self.evaluate(opt.get_value())) @test_util.run_in_graph_and_eager_modes def testFromStructuredValue(self): opt = optional_ops.Optional.from_value({ "a": constant_op.constant(37.0), "b": (constant_op.constant(["Foo"]), constant_op.constant("Bar")) }) self.assertTrue(self.evaluate(opt.has_value())) self.assertEqual({ "a": 37.0, "b": ([b"Foo"], b"Bar") }, self.evaluate(opt.get_value())) @test_util.run_in_graph_and_eager_modes def testFromSparseTensor(self): st_0 = sparse_tensor.SparseTensorValue( indices=np.array([[0]]), values=np.array([0], dtype=np.int64), dense_shape=np.array([1])) st_1 = sparse_tensor.SparseTensorValue( indices=np.array([[0, 0], [1, 1]]), values=np.array([-1., 1.], dtype=np.float32), dense_shape=np.array([2, 2])) opt = optional_ops.Optional.from_value((st_0, st_1)) self.assertTrue(self.evaluate(opt.has_value())) val_0, val_1 = opt.get_value() for expected, actual in [(st_0, val_0), (st_1, val_1)]: self.assertAllEqual(expected.indices, self.evaluate(actual.indices)) self.assertAllEqual(expected.values, self.evaluate(actual.values)) self.assertAllEqual(expected.dense_shape, self.evaluate(actual.dense_shape)) @test_util.run_in_graph_and_eager_modes def testFromNone(self): value_structure = structure.TensorStructure(dtypes.float32, []) opt = optional_ops.Optional.none_from_structure(value_structure) self.assertTrue(opt.value_structure.is_compatible_with(value_structure)) self.assertFalse( opt.value_structure.is_compatible_with( structure.TensorStructure(dtypes.float32, [1]))) self.assertFalse( opt.value_structure.is_compatible_with( structure.TensorStructure(dtypes.int32, []))) self.assertFalse(self.evaluate(opt.has_value())) with self.assertRaises(errors.InvalidArgumentError): self.evaluate(opt.get_value()) @test_util.run_in_graph_and_eager_modes def testCopyToGPU(self): if not test_util.is_gpu_available(): self.skipTest("No GPU available") with ops.device("/cpu:0"): optional_with_value = optional_ops.Optional.from_value( (constant_op.constant(37.0), constant_op.constant("Foo"), constant_op.constant(42))) optional_none = optional_ops.Optional.none_from_structure( structure.TensorStructure(dtypes.float32, [])) with ops.device("/gpu:0"): gpu_optional_with_value = optional_ops._OptionalImpl( array_ops.identity(optional_with_value._variant_tensor), optional_with_value.value_structure) gpu_optional_none = optional_ops._OptionalImpl( array_ops.identity(optional_none._variant_tensor), optional_none.value_structure) gpu_optional_with_value_has_value = gpu_optional_with_value.has_value() gpu_optional_with_value_values = gpu_optional_with_value.get_value() gpu_optional_none_has_value = gpu_optional_none.has_value() self.assertTrue(self.evaluate(gpu_optional_with_value_has_value)) self.assertEqual((37.0, b"Foo", 42), self.evaluate(gpu_optional_with_value_values)) self.assertFalse(self.evaluate(gpu_optional_none_has_value)) def _assertElementValueEqual(self, expected, actual): if isinstance(expected, dict): self.assertItemsEqual(list(expected.keys()), list(actual.keys())) for k in expected.keys(): self._assertElementValueEqual(expected[k], actual[k]) elif isinstance(expected, sparse_tensor.SparseTensorValue): self.assertAllEqual(expected.indices, actual.indices) self.assertAllEqual(expected.values, actual.values) self.assertAllEqual(expected.dense_shape, actual.dense_shape) else: self.assertAllEqual(expected, actual) # pylint: disable=g-long-lambda @parameterized.named_parameters( ("Tensor", lambda: constant_op.constant(37.0), structure.TensorStructure(dtypes.float32, [])), ("SparseTensor", lambda: sparse_tensor.SparseTensor( indices=[[0]], values=constant_op.constant([0], dtype=dtypes.int32), dense_shape=[1]), structure.SparseTensorStructure(dtypes.int32, [1])), ("Nest", lambda: { "a": constant_op.constant(37.0), "b": (constant_op.constant(["Foo"]), constant_op.constant("Bar"))}, structure.NestedStructure({ "a": structure.TensorStructure(dtypes.float32, []), "b": (structure.TensorStructure(dtypes.string, [1]), structure.TensorStructure(dtypes.string, []))})), ("Optional", lambda: optional_ops.Optional.from_value(37.0), optional_ops.OptionalStructure( structure.TensorStructure(dtypes.float32, []))), ) def testOptionalStructure(self, tf_value_fn, expected_value_structure): tf_value = tf_value_fn() opt = optional_ops.Optional.from_value(tf_value) self.assertTrue( expected_value_structure.is_compatible_with(opt.value_structure)) self.assertTrue( opt.value_structure.is_compatible_with(expected_value_structure)) opt_structure = structure.Structure.from_value(opt) self.assertIsInstance(opt_structure, optional_ops.OptionalStructure) self.assertTrue(opt_structure.is_compatible_with(opt_structure)) self.assertTrue(opt_structure._value_structure.is_compatible_with( expected_value_structure)) self.assertEqual([dtypes.variant], opt_structure._flat_types) self.assertEqual([tensor_shape.scalar()], opt_structure._flat_shapes) # All OptionalStructure objects are not compatible with a non-optional # value. non_optional_structure = structure.Structure.from_value( constant_op.constant(42.0)) self.assertFalse(opt_structure.is_compatible_with(non_optional_structure)) # Assert that the optional survives a round-trip via _from_tensor_list() # and _to_tensor_list(). round_trip_opt = opt_structure._from_tensor_list( opt_structure._to_tensor_list(opt)) if isinstance(tf_value, optional_ops.Optional): self.assertEqual( self.evaluate(tf_value.get_value()), self.evaluate(round_trip_opt.get_value().get_value())) else: self.assertEqual( self.evaluate(tf_value), self.evaluate(round_trip_opt.get_value())) @parameterized.named_parameters( ("Tensor", np.array([1, 2, 3], dtype=np.int32), lambda: constant_op.constant([4, 5, 6], dtype=dtypes.int32), True), ("SparseTensor", sparse_tensor.SparseTensorValue( indices=[[0, 0], [1, 1]], values=np.array([-1., 1.], dtype=np.float32), dense_shape=[2, 2]), lambda: sparse_tensor.SparseTensor( indices=[[0, 1], [1, 0]], values=[37.0, 42.0], dense_shape=[2, 2]), False), ("Nest", {"a": np.array([1, 2, 3], dtype=np.int32), "b": sparse_tensor.SparseTensorValue( indices=[[0, 0], [1, 1]], values=np.array([-1., 1.], dtype=np.float32), dense_shape=[2, 2])}, lambda: {"a": constant_op.constant([4, 5, 6], dtype=dtypes.int32), "b": sparse_tensor.SparseTensor( indices=[[0, 1], [1, 0]], values=[37.0, 42.0], dense_shape=[2, 2])}, False), ) def testIteratorGetNextAsOptional(self, np_value, tf_value_fn, works_on_gpu): if not works_on_gpu and test.is_gpu_available(): self.skipTest("Test case not yet supported on GPU.") ds = dataset_ops.Dataset.from_tensors(np_value).repeat(3) iterator = ds.make_initializable_iterator() next_elem = iterator_ops.get_next_as_optional(iterator) self.assertIsInstance(next_elem, optional_ops.Optional) self.assertTrue( next_elem.value_structure.is_compatible_with( structure.Structure.from_value(tf_value_fn()))) elem_has_value_t = next_elem.has_value() elem_value_t = next_elem.get_value() with self.cached_session() as sess: # Before initializing the iterator, evaluating the optional fails with # a FailedPreconditionError. with self.assertRaises(errors.FailedPreconditionError): sess.run(elem_has_value_t) with self.assertRaises(errors.FailedPreconditionError): sess.run(elem_value_t) # For each element of the dataset, assert that the optional evaluates to # the expected value. sess.run(iterator.initializer) for _ in range(3): elem_has_value, elem_value = sess.run([elem_has_value_t, elem_value_t]) self.assertTrue(elem_has_value) self._assertElementValueEqual(np_value, elem_value) # After exhausting the iterator, `next_elem.has_value()` will evaluate to # false, and attempting to get the value will fail. for _ in range(2): self.assertFalse(sess.run(elem_has_value_t)) with self.assertRaises(errors.InvalidArgumentError): sess.run(elem_value_t) if __name__ == "__main__": test.main()
apache-2.0
DmitryDmitrienko/kube-form-editor
editorform/editor/models.py
1
1398
from django.db import models from django.contrib.auth.models import User class FormModel(models.Model): name = models.CharField(max_length=60, verbose_name=u'name form') user = models.ForeignKey(User, verbose_name=u'user form') created = models.DateField(auto_now_add=True, verbose_name=u'date created') description = models.TextField(verbose_name=u'description of form') def get_absolute_url(self): from django.core.urlresolvers import reverse return reverse('form', args=(self.id, )) def __unicode__(self): return self.name class ElementForm(models.Model): type_element = models.CharField(max_length=20, verbose_name=u'type element form') label = models.CharField(max_length=60, verbose_name=u'label element') description = models.CharField(max_length=120, verbose_name=u'description element') width = models.IntegerField(verbose_name=u'width element') name = models.CharField(max_length=20, verbose_name=u'name element') type_input = models.CharField(max_length=20, verbose_name=u'type input', default='-') options = models.TextField(verbose_name=u'options select') number = models.IntegerField(verbose_name=u'number element', default=-1) form = models.ForeignKey(FormModel, verbose_name=u'form of element') def __unicode__(self): return u'%s type: %s' % (self.form.name, self.type_element)
gpl-2.0
yd0str/infernal-twin
build/pillow/build/lib.linux-i686-2.7/PIL/CurImagePlugin.py
52
1943
# # The Python Imaging Library. # $Id$ # # Windows Cursor support for PIL # # notes: # uses BmpImagePlugin.py to read the bitmap data. # # history: # 96-05-27 fl Created # # Copyright (c) Secret Labs AB 1997. # Copyright (c) Fredrik Lundh 1996. # # See the README file for information on usage and redistribution. # __version__ = "0.1" from PIL import Image, BmpImagePlugin, _binary # # -------------------------------------------------------------------- i8 = _binary.i8 i16 = _binary.i16le i32 = _binary.i32le def _accept(prefix): return prefix[:4] == b"\0\0\2\0" ## # Image plugin for Windows Cursor files. class CurImageFile(BmpImagePlugin.BmpImageFile): format = "CUR" format_description = "Windows Cursor" def _open(self): offset = self.fp.tell() # check magic s = self.fp.read(6) if not _accept(s): raise SyntaxError("not a CUR file") # pick the largest cursor in the file m = b"" for i in range(i16(s[4:])): s = self.fp.read(16) if not m: m = s elif i8(s[0]) > i8(m[0]) and i8(s[1]) > i8(m[1]): m = s # print "width", i8(s[0]) # print "height", i8(s[1]) # print "colors", i8(s[2]) # print "reserved", i8(s[3]) # print "hotspot x", i16(s[4:]) # print "hotspot y", i16(s[6:]) # print "bytes", i32(s[8:]) # print "offset", i32(s[12:]) # load as bitmap self._bitmap(i32(m[12:]) + offset) # patch up the bitmap height self.size = self.size[0], self.size[1]//2 d, e, o, a = self.tile[0] self.tile[0] = d, (0, 0)+self.size, o, a return # # -------------------------------------------------------------------- Image.register_open("CUR", CurImageFile, _accept) Image.register_extension("CUR", ".cur")
gpl-3.0
ax003d/openerp
openerp/addons/point_of_sale/wizard/pos_discount.py
55
2903
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv, fields class pos_discount(osv.osv_memory): _name = 'pos.discount' _description = 'Add a Global Discount' _columns = { 'discount': fields.float('Discount (%)', required=True, digits=(16,2)), } _defaults = { 'discount': 5, } # def view_init(self, cr, uid, fields_list, context=None): # """ # Creates view dynamically and adding fields at runtime. # @param self: The object pointer. # @param cr: A database cursor # @param uid: ID of the user currently logged in # @param context: A standard dictionary # @return: New arch of view with new columns. # """ # if context is None: # context = {} # super(pos_discount, self).view_init(cr, uid, fields_list, context=context) # record_id = context and context.get('active_id', False) or False # True def apply_discount(self, cr, uid, ids, context=None): """ To give the discount of product and check the. @param self: The object pointer. @param cr: A database cursor @param uid: ID of the user currently logged in @param context: A standard dictionary @return : nothing """ order_ref = self.pool.get('pos.order') order_line_ref = self.pool.get('pos.order.line') if context is None: context = {} this = self.browse(cr, uid, ids[0], context=context) record_id = context and context.get('active_id', False) if isinstance(record_id, (int, long)): record_id = [record_id] for order in order_ref.browse(cr, uid, record_id, context=context): order_line_ref.write(cr, uid, [x.id for x in order.lines], {'discount':this.discount}, context=context) return {} pos_discount() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
abzaloid/kazakh-story-generator
webserver/lib/werkzeug/local.py
107
14553
# -*- coding: utf-8 -*- """ werkzeug.local ~~~~~~~~~~~~~~ This module implements context-local objects. :copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details. :license: BSD, see LICENSE for more details. """ import copy from functools import update_wrapper from werkzeug.wsgi import ClosingIterator from werkzeug._compat import PY2, implements_bool # since each thread has its own greenlet we can just use those as identifiers # for the context. If greenlets are not available we fall back to the # current thread ident depending on where it is. try: from greenlet import getcurrent as get_ident except ImportError: try: from thread import get_ident except ImportError: from _thread import get_ident def release_local(local): """Releases the contents of the local for the current context. This makes it possible to use locals without a manager. Example:: >>> loc = Local() >>> loc.foo = 42 >>> release_local(loc) >>> hasattr(loc, 'foo') False With this function one can release :class:`Local` objects as well as :class:`LocalStack` objects. However it is not possible to release data held by proxies that way, one always has to retain a reference to the underlying local object in order to be able to release it. .. versionadded:: 0.6.1 """ local.__release_local__() class Local(object): __slots__ = ('__storage__', '__ident_func__') def __init__(self): object.__setattr__(self, '__storage__', {}) object.__setattr__(self, '__ident_func__', get_ident) def __iter__(self): return iter(self.__storage__.items()) def __call__(self, proxy): """Create a proxy for a name.""" return LocalProxy(self, proxy) def __release_local__(self): self.__storage__.pop(self.__ident_func__(), None) def __getattr__(self, name): try: return self.__storage__[self.__ident_func__()][name] except KeyError: raise AttributeError(name) def __setattr__(self, name, value): ident = self.__ident_func__() storage = self.__storage__ try: storage[ident][name] = value except KeyError: storage[ident] = {name: value} def __delattr__(self, name): try: del self.__storage__[self.__ident_func__()][name] except KeyError: raise AttributeError(name) class LocalStack(object): """This class works similar to a :class:`Local` but keeps a stack of objects instead. This is best explained with an example:: >>> ls = LocalStack() >>> ls.push(42) >>> ls.top 42 >>> ls.push(23) >>> ls.top 23 >>> ls.pop() 23 >>> ls.top 42 They can be force released by using a :class:`LocalManager` or with the :func:`release_local` function but the correct way is to pop the item from the stack after using. When the stack is empty it will no longer be bound to the current context (and as such released). By calling the stack without arguments it returns a proxy that resolves to the topmost item on the stack. .. versionadded:: 0.6.1 """ def __init__(self): self._local = Local() def __release_local__(self): self._local.__release_local__() def _get__ident_func__(self): return self._local.__ident_func__ def _set__ident_func__(self, value): object.__setattr__(self._local, '__ident_func__', value) __ident_func__ = property(_get__ident_func__, _set__ident_func__) del _get__ident_func__, _set__ident_func__ def __call__(self): def _lookup(): rv = self.top if rv is None: raise RuntimeError('object unbound') return rv return LocalProxy(_lookup) def push(self, obj): """Pushes a new item to the stack""" rv = getattr(self._local, 'stack', None) if rv is None: self._local.stack = rv = [] rv.append(obj) return rv def pop(self): """Removes the topmost item from the stack, will return the old value or `None` if the stack was already empty. """ stack = getattr(self._local, 'stack', None) if stack is None: return None elif len(stack) == 1: release_local(self._local) return stack[-1] else: return stack.pop() @property def top(self): """The topmost item on the stack. If the stack is empty, `None` is returned. """ try: return self._local.stack[-1] except (AttributeError, IndexError): return None class LocalManager(object): """Local objects cannot manage themselves. For that you need a local manager. You can pass a local manager multiple locals or add them later by appending them to `manager.locals`. Every time the manager cleans up, it will clean up all the data left in the locals for this context. The `ident_func` parameter can be added to override the default ident function for the wrapped locals. .. versionchanged:: 0.6.1 Instead of a manager the :func:`release_local` function can be used as well. .. versionchanged:: 0.7 `ident_func` was added. """ def __init__(self, locals=None, ident_func=None): if locals is None: self.locals = [] elif isinstance(locals, Local): self.locals = [locals] else: self.locals = list(locals) if ident_func is not None: self.ident_func = ident_func for local in self.locals: object.__setattr__(local, '__ident_func__', ident_func) else: self.ident_func = get_ident def get_ident(self): """Return the context identifier the local objects use internally for this context. You cannot override this method to change the behavior but use it to link other context local objects (such as SQLAlchemy's scoped sessions) to the Werkzeug locals. .. versionchanged:: 0.7 You can pass a different ident function to the local manager that will then be propagated to all the locals passed to the constructor. """ return self.ident_func() def cleanup(self): """Manually clean up the data in the locals for this context. Call this at the end of the request or use `make_middleware()`. """ for local in self.locals: release_local(local) def make_middleware(self, app): """Wrap a WSGI application so that cleaning up happens after request end. """ def application(environ, start_response): return ClosingIterator(app(environ, start_response), self.cleanup) return application def middleware(self, func): """Like `make_middleware` but for decorating functions. Example usage:: @manager.middleware def application(environ, start_response): ... The difference to `make_middleware` is that the function passed will have all the arguments copied from the inner application (name, docstring, module). """ return update_wrapper(self.make_middleware(func), func) def __repr__(self): return '<%s storages: %d>' % ( self.__class__.__name__, len(self.locals) ) @implements_bool class LocalProxy(object): """Acts as a proxy for a werkzeug local. Forwards all operations to a proxied object. The only operations not supported for forwarding are right handed operands and any kind of assignment. Example usage:: from werkzeug.local import Local l = Local() # these are proxies request = l('request') user = l('user') from werkzeug.local import LocalStack _response_local = LocalStack() # this is a proxy response = _response_local() Whenever something is bound to l.user / l.request the proxy objects will forward all operations. If no object is bound a :exc:`RuntimeError` will be raised. To create proxies to :class:`Local` or :class:`LocalStack` objects, call the object as shown above. If you want to have a proxy to an object looked up by a function, you can (as of Werkzeug 0.6.1) pass a function to the :class:`LocalProxy` constructor:: session = LocalProxy(lambda: get_current_request().session) .. versionchanged:: 0.6.1 The class can be instantiated with a callable as well now. """ __slots__ = ('__local', '__dict__', '__name__', '__wrapped__') def __init__(self, local, name=None): object.__setattr__(self, '_LocalProxy__local', local) object.__setattr__(self, '__name__', name) if callable(local) and not hasattr(local, '__release_local__'): # "local" is a callable that is not an instance of Local or # LocalManager: mark it as a wrapped function. object.__setattr__(self, '__wrapped__', local) def _get_current_object(self): """Return the current object. This is useful if you want the real object behind the proxy at a time for performance reasons or because you want to pass the object into a different context. """ if not hasattr(self.__local, '__release_local__'): return self.__local() try: return getattr(self.__local, self.__name__) except AttributeError: raise RuntimeError('no object bound to %s' % self.__name__) @property def __dict__(self): try: return self._get_current_object().__dict__ except RuntimeError: raise AttributeError('__dict__') def __repr__(self): try: obj = self._get_current_object() except RuntimeError: return '<%s unbound>' % self.__class__.__name__ return repr(obj) def __bool__(self): try: return bool(self._get_current_object()) except RuntimeError: return False def __unicode__(self): try: return unicode(self._get_current_object()) # noqa except RuntimeError: return repr(self) def __dir__(self): try: return dir(self._get_current_object()) except RuntimeError: return [] def __getattr__(self, name): if name == '__members__': return dir(self._get_current_object()) return getattr(self._get_current_object(), name) def __setitem__(self, key, value): self._get_current_object()[key] = value def __delitem__(self, key): del self._get_current_object()[key] if PY2: __getslice__ = lambda x, i, j: x._get_current_object()[i:j] def __setslice__(self, i, j, seq): self._get_current_object()[i:j] = seq def __delslice__(self, i, j): del self._get_current_object()[i:j] __setattr__ = lambda x, n, v: setattr(x._get_current_object(), n, v) __delattr__ = lambda x, n: delattr(x._get_current_object(), n) __str__ = lambda x: str(x._get_current_object()) __lt__ = lambda x, o: x._get_current_object() < o __le__ = lambda x, o: x._get_current_object() <= o __eq__ = lambda x, o: x._get_current_object() == o __ne__ = lambda x, o: x._get_current_object() != o __gt__ = lambda x, o: x._get_current_object() > o __ge__ = lambda x, o: x._get_current_object() >= o __cmp__ = lambda x, o: cmp(x._get_current_object(), o) # noqa __hash__ = lambda x: hash(x._get_current_object()) __call__ = lambda x, *a, **kw: x._get_current_object()(*a, **kw) __len__ = lambda x: len(x._get_current_object()) __getitem__ = lambda x, i: x._get_current_object()[i] __iter__ = lambda x: iter(x._get_current_object()) __contains__ = lambda x, i: i in x._get_current_object() __add__ = lambda x, o: x._get_current_object() + o __sub__ = lambda x, o: x._get_current_object() - o __mul__ = lambda x, o: x._get_current_object() * o __floordiv__ = lambda x, o: x._get_current_object() // o __mod__ = lambda x, o: x._get_current_object() % o __divmod__ = lambda x, o: x._get_current_object().__divmod__(o) __pow__ = lambda x, o: x._get_current_object() ** o __lshift__ = lambda x, o: x._get_current_object() << o __rshift__ = lambda x, o: x._get_current_object() >> o __and__ = lambda x, o: x._get_current_object() & o __xor__ = lambda x, o: x._get_current_object() ^ o __or__ = lambda x, o: x._get_current_object() | o __div__ = lambda x, o: x._get_current_object().__div__(o) __truediv__ = lambda x, o: x._get_current_object().__truediv__(o) __neg__ = lambda x: -(x._get_current_object()) __pos__ = lambda x: +(x._get_current_object()) __abs__ = lambda x: abs(x._get_current_object()) __invert__ = lambda x: ~(x._get_current_object()) __complex__ = lambda x: complex(x._get_current_object()) __int__ = lambda x: int(x._get_current_object()) __long__ = lambda x: long(x._get_current_object()) # noqa __float__ = lambda x: float(x._get_current_object()) __oct__ = lambda x: oct(x._get_current_object()) __hex__ = lambda x: hex(x._get_current_object()) __index__ = lambda x: x._get_current_object().__index__() __coerce__ = lambda x, o: x._get_current_object().__coerce__(x, o) __enter__ = lambda x: x._get_current_object().__enter__() __exit__ = lambda x, *a, **kw: x._get_current_object().__exit__(*a, **kw) __radd__ = lambda x, o: o + x._get_current_object() __rsub__ = lambda x, o: o - x._get_current_object() __rmul__ = lambda x, o: o * x._get_current_object() __rdiv__ = lambda x, o: o / x._get_current_object() if PY2: __rtruediv__ = lambda x, o: x._get_current_object().__rtruediv__(o) else: __rtruediv__ = __rdiv__ __rfloordiv__ = lambda x, o: o // x._get_current_object() __rmod__ = lambda x, o: o % x._get_current_object() __rdivmod__ = lambda x, o: x._get_current_object().__rdivmod__(o) __copy__ = lambda x: copy.copy(x._get_current_object()) __deepcopy__ = lambda x, memo: copy.deepcopy(x._get_current_object(), memo)
mit
llhe/tensorflow
tensorflow/contrib/layers/python/layers/feature_column.py
12
104580
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """This API defines FeatureColumn abstraction. FeatureColumns provide a high level abstraction for ingesting and representing features in tf.learn Estimator models. FeatureColumns are the primary way of encoding features for pre-canned tf.learn Estimators. When using FeatureColumns with tf.learn models, the type of feature column you should choose depends on (1) the feature type and (2) the model type. (1) Feature type: * Continuous features can be represented by `real_valued_column`. * Categorical features can be represented by any `sparse_column_with_*` column (`sparse_column_with_keys`, `sparse_column_with_vocabulary_file`, `sparse_column_with_hash_bucket`, `sparse_column_with_integerized_feature`). (2) Model type: * Deep neural network models (`DNNClassifier`, `DNNRegressor`). Continuous features can be directly fed into deep neural network models. age_column = real_valued_column("age") To feed sparse features into DNN models, wrap the column with `embedding_column` or `one_hot_column`. `one_hot_column` will create a dense boolean tensor with an entry for each possible value, and thus the computation cost is linear in the number of possible values versus the number of values that occur in the sparse tensor. Thus using a "one_hot_column" is only recommended for features with only a few possible values. For features with many possible values or for very sparse features, `embedding_column` is recommended. embedded_dept_column = embedding_column( sparse_column_with_keys("department", ["math", "philosphy", ...]), dimension=10) * Wide (aka linear) models (`LinearClassifier`, `LinearRegressor`). Sparse features can be fed directly into linear models. When doing so an embedding_lookups are used to efficiently perform the sparse matrix multiplication. dept_column = sparse_column_with_keys("department", ["math", "philosophy", "english"]) It is recommended that continuous features be bucketized before being fed into linear models. bucketized_age_column = bucketized_column( source_column=age_column, boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65]) Sparse features can be crossed (also known as conjuncted or combined) in order to form non-linearities, and then fed into linear models. cross_dept_age_column = crossed_column( columns=[department_column, bucketized_age_column], hash_bucket_size=1000) Example of building tf.learn model using FeatureColumns: # Define features and transformations deep_feature_columns = [age_column, embedded_dept_column] wide_feature_columns = [dept_column, bucketized_age_column, cross_dept_age_column] # Build deep model estimator = DNNClassifier( feature_columns=deep_feature_columns, hidden_units=[500, 250, 50]) estimator.train(...) # Or build a wide model estimator = LinearClassifier( feature_columns=wide_feature_columns) estimator.train(...) # Or build a wide and deep model! estimator = DNNLinearCombinedClassifier( linear_feature_columns=wide_feature_columns, dnn_feature_columns=deep_feature_columns, dnn_hidden_units=[500, 250, 50]) estimator.train(...) FeatureColumns can also be transformed into a generic input layer for custom models using `input_from_feature_columns` within `feature_column_ops.py`. Example of building non-tf.learn model using FeatureColumns: # Building model via layers deep_feature_columns = [age_column, embedded_dept_column] columns_to_tensor = parse_feature_columns_from_examples( serialized=my_data, feature_columns=deep_feature_columns) first_layer = input_from_feature_columns( columns_to_tensors=columns_to_tensor, feature_columns=deep_feature_columns) second_layer = fully_connected(first_layer, ...) See feature_column_ops_test for more examples. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc import collections import math import six from tensorflow.contrib import lookup from tensorflow.contrib.framework.python.framework import checkpoint_utils from tensorflow.contrib.framework.python.framework import experimental from tensorflow.contrib.framework.python.ops import variables as contrib_variables from tensorflow.contrib.layers.python.layers import embedding_ops from tensorflow.contrib.layers.python.layers import layers from tensorflow.contrib.layers.python.ops import bucketization_op from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op from tensorflow.contrib.layers.python.ops import sparse_ops as contrib_sparse_ops from tensorflow.python.feature_column import feature_column as fc_core from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import sparse_tensor as sparse_tensor_py from tensorflow.python.framework import tensor_shape from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import parsing_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import sparse_ops from tensorflow.python.ops import string_ops from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import deprecation class _LinearEmbeddingLookupArguments( collections.namedtuple("_LinearEmbeddingLookupArguments", ["input_tensor", "weight_tensor", "vocab_size", "initializer", "combiner"])): """Represents the information needed from a column for embedding lookup. Used to to compute DNN inputs and weighted sum. """ pass class _DeepEmbeddingLookupArguments( collections.namedtuple("_DeepEmbeddingLookupArguments", ["input_tensor", "weight_tensor", "vocab_size", "initializer", "combiner", "dimension", "shared_embedding_name", "hash_key", "max_norm", "trainable"])): """Represents the information needed from a column for embedding lookup. Used to to compute DNN inputs and weighted sum. """ pass class _FeatureColumn(object): """Represents a feature column abstraction. To distinguish the concept of a feature family and a specific binary feature within a family, we refer to a feature family like "country" as a feature column. For example "country:US" is a feature which is in "country" feature column and has a feature value ("US"). This class is an abstract class. User should not create one instance of this. Following classes (_SparseColumn, _RealValuedColumn, ...) are concrete instances. """ __metaclass__ = abc.ABCMeta @abc.abstractproperty @deprecation.deprecated( "2016-09-25", "Should be private.") def name(self): """Returns the name of column or transformed column.""" pass @abc.abstractproperty @deprecation.deprecated( "2016-09-25", "Should be private.") def config(self): """Returns configuration of the base feature for `tf.parse_example`.""" pass @abc.abstractproperty @deprecation.deprecated( "2016-09-25", "Should be private.") def key(self): """Returns a string which will be used as a key when we do sorting.""" pass @abc.abstractmethod @deprecation.deprecated( "2016-09-25", "Should be private.") def insert_transformed_feature(self, columns_to_tensors): """Apply transformation and inserts it into columns_to_tensors. Args: columns_to_tensors: A mapping from feature columns to tensors. 'string' key means a base feature (not-transformed). It can have _FeatureColumn as a key too. That means that _FeatureColumn is already transformed. """ raise NotImplementedError("Transform is not implemented for {}.".format( self)) # pylint: disable=unused-argument def _to_dnn_input_layer(self, input_tensor, weight_collection=None, trainable=True, output_rank=2): """Returns a Tensor as an input to the first layer of neural network.""" raise ValueError("Calling an abstract method.") def _deep_embedding_lookup_arguments(self, input_tensor): """Returns arguments to embedding lookup to build an input layer.""" raise NotImplementedError( "No deep embedding lookup arguments for column {}.".format(self)) # It is expected that classes implement either wide_embedding_lookup_arguments # or to_dense_tensor to be used in linear models. # pylint: disable=unused-argument def _wide_embedding_lookup_arguments(self, input_tensor): """Returns arguments to look up embeddings for this column.""" raise NotImplementedError( "No wide embedding lookup arguments for column {}.".format(self)) # pylint: disable=unused-argument def _to_dense_tensor(self, input_tensor): """Returns a dense tensor representing this column's values.""" raise NotImplementedError( "No dense tensor representation for column {}.".format(self)) def _checkpoint_path(self): """Returns None, or a (path,tensor_name) to load a checkpoint from.""" return None def _key_without_properties(self, properties): """Helper method for self.key() that omits particular properties.""" fields_values = [] # pylint: disable=protected-access for i, k in enumerate(self._fields): if k in properties: # Excludes a property from the key. # For instance, exclude `initializer` from the key of EmbeddingColumn # since we don't support users specifying different initializers for # the same embedding column. Ditto for `normalizer` and # RealValuedColumn. # Special treatment is needed since the default str form of a # function contains its address, which could introduce non-determinism # in sorting. continue fields_values.append("{}={}".format(k, self[i])) # pylint: enable=protected-access # This is effectively the same format as str(self), except with our special # treatment. return "{}({})".format(type(self).__name__, ", ".join(fields_values)) # TODO(b/30410315): Support warm starting in all feature columns. class _SparseColumn( _FeatureColumn, fc_core._CategoricalColumn, # pylint: disable=protected-access collections.namedtuple("_SparseColumn", [ "column_name", "is_integerized", "bucket_size", "lookup_config", "combiner", "dtype" ])): """Represents a sparse feature column also known as categorical features. Instances of this class are immutable. A sparse column means features are sparse and dictionary returned by InputBuilder contains a ("column_name", SparseTensor) pair. One and only one of bucket_size or lookup_config should be set. If is_integerized is True then bucket_size should be set. Attributes: column_name: A string defining sparse column name. is_integerized: A bool if True means type of feature is an integer. Integerized means we can use the feature itself as id. bucket_size: An int that is > 0. The number of buckets. lookup_config: A _SparseIdLookupConfig defining feature-to-id lookup configuration combiner: A string specifying how to reduce if the sparse column is multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum" the default. "sqrtn" often achieves good accuracy, in particular with bag-of-words columns. * "sum": do not normalize features in the column * "mean": do l1 normalization on features in the column * "sqrtn": do l2 normalization on features in the column For more information: `tf.embedding_lookup_sparse`. dtype: Type of features, either `tf.string` or `tf.int64`. Raises: TypeError: if lookup_config is not a _SparseIdLookupConfig. ValueError: if above expectations about input fails. """ def __new__(cls, column_name, is_integerized=False, bucket_size=None, lookup_config=None, combiner="sum", dtype=dtypes.string): if is_integerized and bucket_size is None: raise ValueError("bucket_size must be set if is_integerized is True. " "column_name: {}".format(column_name)) if is_integerized and not dtype.is_integer: raise ValueError("dtype must be an integer if is_integerized is True. " "dtype: {}, column_name: {}.".format(dtype, column_name)) if dtype != dtypes.string and not dtype.is_integer: raise ValueError("dtype must be string or integer. " "dtype: {}, column_name: {}".format(dtype, column_name)) if bucket_size is None and lookup_config is None: raise ValueError("one of bucket_size or lookup_config must be set. " "column_name: {}".format(column_name)) if bucket_size is not None and lookup_config: raise ValueError("one and only one of bucket_size or lookup_config " "must be set. column_name: {}".format(column_name)) if bucket_size is not None and bucket_size < 1: raise ValueError("bucket_size must be at least 1. " "bucket_size: {}, column_name: {}".format(bucket_size, column_name)) if ((lookup_config) and (not isinstance(lookup_config, _SparseIdLookupConfig))): raise TypeError( "lookup_config must be an instance of _SparseIdLookupConfig. " "Given one is in type {} for column_name {}".format( type(lookup_config), column_name)) if (lookup_config and lookup_config.vocabulary_file and lookup_config.vocab_size is None): raise ValueError("vocab_size must be defined. " "column_name: {}".format(column_name)) return super(_SparseColumn, cls).__new__( cls, column_name, is_integerized=is_integerized, bucket_size=bucket_size, lookup_config=lookup_config, combiner=combiner, dtype=dtype) @property def name(self): return self.column_name @property def length(self): """Returns vocabulary or hash_bucket size.""" if self.bucket_size is not None: return self.bucket_size return self.lookup_config.vocab_size + self.lookup_config.num_oov_buckets @property def config(self): return {self.column_name: parsing_ops.VarLenFeature(self.dtype)} @property def key(self): """Returns a string which will be used as a key when we do sorting.""" return "{}".format(self) def id_tensor(self, input_tensor): """Returns the id tensor from the given transformed input_tensor.""" return input_tensor # pylint: disable=unused-argument def weight_tensor(self, input_tensor): """Returns the weight tensor from the given transformed input_tensor.""" return None # pylint: disable=unused-argument def _to_dnn_input_layer(self, input_tensor, weight_collections=None, trainable=True, output_rank=2): raise ValueError( "SparseColumn is not supported in DNN. " "Please use embedding_column or one_hot_column. column: {}".format( self)) def _wide_embedding_lookup_arguments(self, input_tensor): return _LinearEmbeddingLookupArguments( input_tensor=self.id_tensor(input_tensor), weight_tensor=self.weight_tensor(input_tensor), vocab_size=self.length, initializer=init_ops.zeros_initializer(), combiner=self.combiner) def _get_input_sparse_tensor(self, input_tensor): """sparsify input_tensor if dense.""" if not isinstance(input_tensor, sparse_tensor_py.SparseTensor): # To avoid making any assumptions about which values are to be ignored, # we set ignore_value to -1 for numeric tensors to avoid excluding valid # indices. if input_tensor.dtype == dtypes.string: ignore_value = "" else: ignore_value = -1 input_tensor = _reshape_real_valued_tensor(input_tensor, 2, self.name) input_tensor = contrib_sparse_ops.dense_to_sparse_tensor( input_tensor, ignore_value=ignore_value) return input_tensor def is_compatible(self, other_column): """Check compatibility of two sparse columns.""" if self.lookup_config and other_column.lookup_config: return self.lookup_config == other_column.lookup_config compatible = (self.length == other_column.length and (self.dtype == other_column.dtype or (self.dtype.is_integer and other_column.dtype.is_integer))) if compatible: logging.warn("Column {} and {} may not have the same vocabulary.". format(self.name, other_column.name)) return compatible @abc.abstractmethod def _do_transform(self, input_tensor): pass def insert_transformed_feature(self, columns_to_tensors): """Handles sparse column to id conversion.""" input_tensor = self._get_input_sparse_tensor(columns_to_tensors[self.name]) columns_to_tensors[self] = self._do_transform(input_tensor) def _transform_feature(self, inputs): input_tensor = self._get_input_sparse_tensor(inputs.get(self.name)) return self._do_transform(input_tensor) @property def _parse_example_spec(self): return self.config @property def _num_buckets(self): return self.length def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): del weight_collections del trainable input_tensor = inputs.get(self) return fc_core._CategoricalColumn.IdWeightPair( # pylint: disable=protected-access self.id_tensor(input_tensor), self.weight_tensor(input_tensor)) class _SparseColumnIntegerized(_SparseColumn): """See `sparse_column_with_integerized_feature`.""" def _do_transform(self, input_tensor): sparse_id_values = math_ops.mod(input_tensor.values, self.bucket_size, name="mod") return sparse_tensor_py.SparseTensor(input_tensor.indices, sparse_id_values, input_tensor.dense_shape) def sparse_column_with_integerized_feature(column_name, bucket_size, combiner="sum", dtype=dtypes.int64): """Creates an integerized _SparseColumn. Use this when your features are already pre-integerized into int64 IDs, that is, when the set of values to output is already coming in as what's desired in the output. Integerized means we can use the feature value itself as id. Typically this is used for reading contiguous ranges of integers indexes, but it doesn't have to be. The output value is simply copied from the input_feature, whatever it is. Just be aware, however, that if you have large gaps of unused integers it might affect what you feed those in (for instance, if you make up a one-hot tensor from these, the unused integers will appear as values in the tensor which are always zero.) Args: column_name: A string defining sparse column name. bucket_size: An int that is > 1. The number of buckets. It should be bigger than maximum feature. In other words features in this column should be an int64 in range [0, bucket_size) combiner: A string specifying how to reduce if the sparse column is multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum" the default. "sqrtn" often achieves good accuracy, in particular with bag-of-words columns. * "sum": do not normalize features in the column * "mean": do l1 normalization on features in the column * "sqrtn": do l2 normalization on features in the column For more information: `tf.embedding_lookup_sparse`. dtype: Type of features. It should be an integer type. Default value is dtypes.int64. Returns: An integerized _SparseColumn definition. Raises: ValueError: bucket_size is not greater than 1. ValueError: dtype is not integer. """ return _SparseColumnIntegerized( column_name, is_integerized=True, bucket_size=bucket_size, combiner=combiner, dtype=dtype) class _SparseColumnHashed(_SparseColumn): """See `sparse_column_with_hash_bucket`.""" def _do_transform(self, input_tensor): if self.dtype.is_integer: sparse_values = string_ops.as_string(input_tensor.values) else: sparse_values = input_tensor.values sparse_id_values = string_ops.string_to_hash_bucket_fast( sparse_values, self.bucket_size, name="lookup") return sparse_tensor_py.SparseTensor(input_tensor.indices, sparse_id_values, input_tensor.dense_shape) def sparse_column_with_hash_bucket(column_name, hash_bucket_size, combiner="sum", dtype=dtypes.string): """Creates a _SparseColumn with hashed bucket configuration. Use this when your sparse features are in string or integer format, but you don't have a vocab file that maps each value to an integer ID. output_id = Hash(input_feature_string) % bucket_size Args: column_name: A string defining sparse column name. hash_bucket_size: An int that is > 1. The number of buckets. combiner: A string specifying how to reduce if the sparse column is multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum" the default. "sqrtn" often achieves good accuracy, in particular with bag-of-words columns. * "sum": do not normalize features in the column * "mean": do l1 normalization on features in the column * "sqrtn": do l2 normalization on features in the column For more information: `tf.embedding_lookup_sparse`. dtype: The type of features. Only string and integer types are supported. Returns: A _SparseColumn with hashed bucket configuration Raises: ValueError: hash_bucket_size is not greater than 2. ValueError: dtype is neither string nor integer. """ return _SparseColumnHashed( column_name, bucket_size=hash_bucket_size, combiner=combiner, dtype=dtype) class _SparseColumnKeys(_SparseColumn): """See `sparse_column_with_keys`.""" def _do_transform(self, input_tensor): table = lookup.index_table_from_tensor( mapping=tuple(self.lookup_config.keys), default_value=self.lookup_config.default_value, dtype=self.dtype, name="lookup") return table.lookup(input_tensor) def sparse_column_with_keys( column_name, keys, default_value=-1, combiner="sum", dtype=dtypes.string): """Creates a _SparseColumn with keys. Look up logic is as follows: lookup_id = index_of_feature_in_keys if feature in keys else default_value Args: column_name: A string defining sparse column name. keys: A list or tuple defining vocabulary. Must be castable to `dtype`. default_value: The value to use for out-of-vocabulary feature values. Default is -1. combiner: A string specifying how to reduce if the sparse column is multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum" the default. "sqrtn" often achieves good accuracy, in particular with bag-of-words columns. * "sum": do not normalize features in the column * "mean": do l1 normalization on features in the column * "sqrtn": do l2 normalization on features in the column For more information: `tf.embedding_lookup_sparse`. dtype: Type of features. Only integer and string are supported. Returns: A _SparseColumnKeys with keys configuration. """ keys = tuple(keys) return _SparseColumnKeys( column_name, lookup_config=_SparseIdLookupConfig( keys=keys, vocab_size=len(keys), default_value=default_value), combiner=combiner, dtype=dtype) class _SparseColumnVocabulary(_SparseColumn): """See `sparse_column_with_vocabulary_file`.""" def _do_transform(self, st): if self.dtype.is_integer: sparse_string_values = string_ops.as_string(st.values) sparse_string_tensor = sparse_tensor_py.SparseTensor(st.indices, sparse_string_values, st.dense_shape) else: sparse_string_tensor = st table = lookup.index_table_from_file( vocabulary_file=self.lookup_config.vocabulary_file, num_oov_buckets=self.lookup_config.num_oov_buckets, vocab_size=self.lookup_config.vocab_size, default_value=self.lookup_config.default_value, name=self.name + "_lookup") return table.lookup(sparse_string_tensor) def sparse_column_with_vocabulary_file(column_name, vocabulary_file, num_oov_buckets=0, vocab_size=None, default_value=-1, combiner="sum", dtype=dtypes.string): """Creates a _SparseColumn with vocabulary file configuration. Use this when your sparse features are in string or integer format, and you have a vocab file that maps each value to an integer ID. output_id = LookupIdFromVocab(input_feature_string) Args: column_name: A string defining sparse column name. vocabulary_file: The vocabulary filename. num_oov_buckets: The number of out-of-vocabulary buckets. If zero all out of vocabulary features will be ignored. vocab_size: Number of the elements in the vocabulary. default_value: The value to use for out-of-vocabulary feature values. Defaults to -1. combiner: A string specifying how to reduce if the sparse column is multivalent. Currently "mean", "sqrtn" and "sum" are supported, with "sum" the default. "sqrtn" often achieves good accuracy, in particular with bag-of-words columns. * "sum": do not normalize features in the column * "mean": do l1 normalization on features in the column * "sqrtn": do l2 normalization on features in the column For more information: `tf.embedding_lookup_sparse`. dtype: The type of features. Only string and integer types are supported. Returns: A _SparseColumn with vocabulary file configuration. Raises: ValueError: vocab_size is not defined. ValueError: dtype is neither string nor integer. """ if vocab_size is None: raise ValueError("vocab_size should be defined. " "column_name: {}".format(column_name)) return _SparseColumnVocabulary( column_name, lookup_config=_SparseIdLookupConfig( vocabulary_file=vocabulary_file, num_oov_buckets=num_oov_buckets, vocab_size=vocab_size, default_value=default_value), combiner=combiner, dtype=dtype) class _WeightedSparseColumn( _FeatureColumn, fc_core._CategoricalColumn, # pylint: disable=protected-access collections.namedtuple("_WeightedSparseColumn", ["sparse_id_column", "weight_column_name", "dtype"])): """See `weighted_sparse_column`.""" def __new__(cls, sparse_id_column, weight_column_name, dtype): return super(_WeightedSparseColumn, cls).__new__(cls, sparse_id_column, weight_column_name, dtype) @property def name(self): return "{}_weighted_by_{}".format(self.sparse_id_column.name, self.weight_column_name) @property def length(self): """Returns id size.""" return self.sparse_id_column.length @property def config(self): config = _get_feature_config(self.sparse_id_column) config.update( {self.weight_column_name: parsing_ops.VarLenFeature(self.dtype)}) return config @property def key(self): """Returns a string which will be used as a key when we do sorting.""" return "{}".format(self) def id_tensor(self, input_tensor): """Returns the id tensor from the given transformed input_tensor.""" return input_tensor[0] def weight_tensor(self, input_tensor): """Returns the weight tensor from the given transformed input_tensor.""" return input_tensor[1] # pylint: disable=unused-argument def _to_dnn_input_layer(self, input_tensor, weight_collections=None, trainable=True, output_rank=2): raise ValueError( "WeightedSparseColumn is not supported in DNN. " "Please use embedding_column or one_hot_column. column: {}".format( self)) def _wide_embedding_lookup_arguments(self, input_tensor): return _LinearEmbeddingLookupArguments( input_tensor=self.id_tensor(input_tensor), weight_tensor=self.weight_tensor(input_tensor), vocab_size=self.length, initializer=init_ops.zeros_initializer(), combiner=self.sparse_id_column.combiner) def _do_transform(self, id_tensor, weight_tensor): if not isinstance(weight_tensor, sparse_tensor_py.SparseTensor): # The weight tensor can be a regular Tensor. In such case, sparsify it. weight_tensor = contrib_sparse_ops.dense_to_sparse_tensor(weight_tensor) if not self.dtype.is_floating: weight_tensor = math_ops.to_float(weight_tensor) return tuple([id_tensor, weight_tensor]) def insert_transformed_feature(self, columns_to_tensors): """Inserts a tuple with the id and weight tensors.""" if self.sparse_id_column not in columns_to_tensors: self.sparse_id_column.insert_transformed_feature(columns_to_tensors) weight_tensor = columns_to_tensors[self.weight_column_name] columns_to_tensors[self] = self._do_transform( columns_to_tensors[self.sparse_id_column], weight_tensor) def _transform_feature(self, inputs): return self._do_transform( inputs.get(self.sparse_id_column), inputs.get(self.weight_column_name)) @property def _parse_example_spec(self): return self.config @property def _num_buckets(self): return self.length def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): del weight_collections del trainable input_tensor = inputs.get(self) return fc_core._CategoricalColumn.IdWeightPair( # pylint: disable=protected-access self.id_tensor(input_tensor), self.weight_tensor(input_tensor)) def weighted_sparse_column(sparse_id_column, weight_column_name, dtype=dtypes.float32): """Creates a _SparseColumn by combining sparse_id_column with a weight column. Example: ```python sparse_feature = sparse_column_with_hash_bucket(column_name="sparse_col", hash_bucket_size=1000) weighted_feature = weighted_sparse_column(sparse_id_column=sparse_feature, weight_column_name="weights_col") ``` This configuration assumes that input dictionary of model contains the following two items: * (key="sparse_col", value=sparse_tensor) where sparse_tensor is a SparseTensor. * (key="weights_col", value=weights_tensor) where weights_tensor is a SparseTensor. Following are assumed to be true: * sparse_tensor.indices = weights_tensor.indices * sparse_tensor.dense_shape = weights_tensor.dense_shape Args: sparse_id_column: A `_SparseColumn` which is created by `sparse_column_with_*` functions. weight_column_name: A string defining a sparse column name which represents weight or value of the corresponding sparse id feature. dtype: Type of weights, such as `tf.float32`. Only floating and integer weights are supported. Returns: A _WeightedSparseColumn composed of two sparse features: one represents id, the other represents weight (value) of the id feature in that example. Raises: ValueError: if dtype is not convertible to float. """ if not (dtype.is_integer or dtype.is_floating): raise ValueError("dtype is not convertible to float. Given {}".format( dtype)) return _WeightedSparseColumn(sparse_id_column, weight_column_name, dtype) class _OneHotColumn( _FeatureColumn, fc_core._DenseColumn, # pylint: disable=protected-access collections.namedtuple("_OneHotColumn", ["sparse_id_column"])): """Represents a one-hot column for use in deep networks. Args: sparse_id_column: A _SparseColumn which is created by `sparse_column_with_*` function. """ @property def name(self): return "{}_one_hot".format(self.sparse_id_column.name) @property def length(self): """Returns vocabulary or hash_bucket size.""" return self.sparse_id_column.length @property def config(self): """Returns the parsing config of the origin column.""" return _get_feature_config(self.sparse_id_column) @property def key(self): """Returns a string which will be used as a key when we do sorting.""" return "{}".format(self) def insert_transformed_feature(self, columns_to_tensors): """Used by the Transformer to prevent double transformations.""" if self.sparse_id_column not in columns_to_tensors: self.sparse_id_column.insert_transformed_feature(columns_to_tensors) columns_to_tensors[self] = columns_to_tensors[self.sparse_id_column] def _to_dnn_input_layer(self, transformed_input_tensor, unused_weight_collections=None, unused_trainable=False, output_rank=2): """Returns a Tensor as an input to the first layer of neural network. Args: transformed_input_tensor: A tensor that has undergone the transformations in `insert_transformed_feature`. Rank should be >= `output_rank`. unused_weight_collections: Unused. One hot encodings are not variable. unused_trainable: Unused. One hot encodings are not trainable. output_rank: the desired rank of the output `Tensor`. Returns: A multi-hot Tensor to be fed into the first layer of neural network. Raises: ValueError: When using one_hot_column with weighted_sparse_column. This is not yet supported. """ # Reshape ID column to `output_rank`. sparse_id_column = self.sparse_id_column.id_tensor(transformed_input_tensor) # pylint: disable=protected-access sparse_id_column = layers._inner_flatten(sparse_id_column, output_rank) weight_tensor = self.sparse_id_column.weight_tensor( transformed_input_tensor) if weight_tensor is not None: weighted_column = sparse_ops.sparse_merge(sp_ids=sparse_id_column, sp_values=weight_tensor, vocab_size=self.length) return sparse_ops.sparse_tensor_to_dense(weighted_column) dense_id_tensor = sparse_ops.sparse_tensor_to_dense(sparse_id_column, default_value=-1) # One hot must be float for tf.concat reasons since all other inputs to # input_layer are float32. one_hot_id_tensor = array_ops.one_hot( dense_id_tensor, depth=self.length, on_value=1.0, off_value=0.0) # Reduce to get a multi-hot per example. return math_ops.reduce_sum( one_hot_id_tensor, reduction_indices=[output_rank - 1]) @property def _variable_shape(self): return tensor_shape.TensorShape([self.length]) def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): del weight_collections del trainable return inputs.get(self) def _transform_feature(self, inputs): return self._to_dnn_input_layer(inputs.get(self.sparse_id_column)) @property def _parse_example_spec(self): return self.config class _EmbeddingColumn( _FeatureColumn, fc_core._DenseColumn, # pylint: disable=protected-access collections.namedtuple("_EmbeddingColumn", [ "sparse_id_column", "dimension", "combiner", "initializer", "ckpt_to_load_from", "tensor_name_in_ckpt", "shared_embedding_name", "shared_vocab_size", "max_norm", "trainable" ])): """Represents an embedding column. Args: sparse_id_column: A `_SparseColumn` which is created by `sparse_column_with_*` or `weighted_sparse_column` functions. dimension: An integer specifying dimension of the embedding. combiner: A string specifying how to reduce if there are multiple entries in a single row. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the default. "sqrtn" often achieves good accuracy, in particular with bag-of-words columns. Each of this can be thought as example level normalizations on the column: * "sum": do not normalize features in the column * "mean": do l1 normalization on features in the column * "sqrtn": do l2 normalization on features in the column For more information: `tf.embedding_lookup_sparse`. initializer: A variable initializer function to be used in embedding variable initialization. If not specified, defaults to `tf.truncated_normal_initializer` with mean 0.0 and standard deviation 1/sqrt(sparse_id_column.length). ckpt_to_load_from: (Optional). String representing checkpoint name/pattern to restore the column weights. Required if `tensor_name_in_ckpt` is not None. tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided checkpoint from which to restore the column weights. Required if `ckpt_to_load_from` is not None. shared_embedding_name: (Optional). The common name for shared embedding. shared_vocab_size: (Optional). The common vocab_size used for shared embedding space. max_norm: (Optional). If not None, embedding values are l2-normalized to the value of max_norm. trainable: (Optional). Should the embedding be trainable. Default is True. Raises: ValueError: if `initializer` is specified and is not callable. Also, if only one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified. """ def __new__(cls, sparse_id_column, dimension, combiner="mean", initializer=None, ckpt_to_load_from=None, tensor_name_in_ckpt=None, shared_embedding_name=None, shared_vocab_size=None, max_norm=None, trainable=True): if initializer is not None and not callable(initializer): raise ValueError("initializer must be callable if specified. " "Embedding of column_name: {}".format( sparse_id_column.name)) if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None): raise ValueError("Must specify both `ckpt_to_load_from` and " "`tensor_name_in_ckpt` or none of them.") if initializer is None: logging.warn("The default stddev value of initializer will change from " "\"1/sqrt(vocab_size)\" to \"1/sqrt(dimension)\" after " "2017/02/25.") stddev = 1 / math.sqrt(sparse_id_column.length) initializer = init_ops.truncated_normal_initializer( mean=0.0, stddev=stddev) return super(_EmbeddingColumn, cls).__new__(cls, sparse_id_column, dimension, combiner, initializer, ckpt_to_load_from, tensor_name_in_ckpt, shared_embedding_name, shared_vocab_size, max_norm, trainable) @property def name(self): if self.shared_embedding_name is None: return "{}_embedding".format(self.sparse_id_column.name) else: return "{}_shared_embedding".format(self.sparse_id_column.name) @property def length(self): """Returns id size.""" if self.shared_vocab_size is None: return self.sparse_id_column.length else: return self.shared_vocab_size @property def config(self): return _get_feature_config(self.sparse_id_column) @property def key(self): """Returns a string which will be used as a key when we do sorting.""" return self._key_without_properties(["initializer"]) def insert_transformed_feature(self, columns_to_tensors): if self.sparse_id_column not in columns_to_tensors: self.sparse_id_column.insert_transformed_feature(columns_to_tensors) columns_to_tensors[self] = columns_to_tensors[self.sparse_id_column] def _deep_embedding_lookup_arguments(self, input_tensor): return _DeepEmbeddingLookupArguments( input_tensor=self.sparse_id_column.id_tensor(input_tensor), weight_tensor=self.sparse_id_column.weight_tensor(input_tensor), vocab_size=self.length, dimension=self.dimension, initializer=self.initializer, combiner=self.combiner, shared_embedding_name=self.shared_embedding_name, hash_key=None, max_norm=self.max_norm, trainable=self.trainable) def _checkpoint_path(self): if self.ckpt_to_load_from is not None: return self.ckpt_to_load_from, self.tensor_name_in_ckpt return None # pylint: disable=unused-argument def _wide_embedding_lookup_arguments(self, input_tensor): raise ValueError("Column {} is not supported in linear models. " "Please use sparse_column.".format(self)) @property def _variable_shape(self): return tensor_shape.TensorShape([self.dimension]) def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): return _embeddings_from_arguments( self, self._deep_embedding_lookup_arguments(inputs.get(self)), weight_collections, trainable) def _transform_feature(self, inputs): return inputs.get(self.sparse_id_column) @property def _parse_example_spec(self): return self.config def _is_variable(v): """Returns true if `v` is a variable.""" return isinstance(v, (variables.Variable, resource_variable_ops.ResourceVariable)) def _embeddings_from_arguments(column, args, weight_collections, trainable, output_rank=2): """Returns embeddings for a column based on the computed arguments. Args: column: the column name. args: the _DeepEmbeddingLookupArguments for this column. weight_collections: collections to store weights in. trainable: whether these embeddings should be trainable. output_rank: the desired rank of the returned `Tensor`. Inner dimensions will be combined to produce the desired rank. Returns: the embeddings. Raises: ValueError: if not possible to create. """ # pylint: disable=protected-access input_tensor = layers._inner_flatten(args.input_tensor, output_rank) weight_tensor = None if args.weight_tensor is not None: weight_tensor = layers._inner_flatten(args.weight_tensor, output_rank) # pylint: enable=protected-access # This option is only enabled for scattered_embedding_column. if args.hash_key: embeddings = contrib_variables.model_variable( name="weights", shape=[args.vocab_size], dtype=dtypes.float32, initializer=args.initializer, trainable=(trainable and args.trainable), collections=weight_collections) return embedding_ops.scattered_embedding_lookup_sparse( embeddings, input_tensor, args.dimension, hash_key=args.hash_key, combiner=args.combiner, name="lookup") if args.shared_embedding_name is not None: shared_embedding_collection_name = ( "SHARED_EMBEDDING_COLLECTION_" + args.shared_embedding_name.upper()) graph = ops.get_default_graph() shared_embedding_collection = ( graph.get_collection_ref(shared_embedding_collection_name)) shape = [args.vocab_size, args.dimension] if shared_embedding_collection: if len(shared_embedding_collection) > 1: raise ValueError( "Collection %s can only contain one " "(partitioned) variable." % shared_embedding_collection_name) else: embeddings = shared_embedding_collection[0] if embeddings.get_shape() != shape: raise ValueError( "The embedding variable with name {} already " "exists, but its shape does not match required " "embedding shape here. Please make sure to use " "different shared_embedding_name for different " "shared embeddings.".format(args.shared_embedding_name)) else: embeddings = contrib_variables.model_variable( name=args.shared_embedding_name, shape=shape, dtype=dtypes.float32, initializer=args.initializer, trainable=(trainable and args.trainable), collections=weight_collections) graph.add_to_collection(shared_embedding_collection_name, embeddings) else: embeddings = contrib_variables.model_variable( name="weights", shape=[args.vocab_size, args.dimension], dtype=dtypes.float32, initializer=args.initializer, trainable=(trainable and args.trainable), collections=weight_collections) if _is_variable(embeddings): embeddings = [embeddings] else: embeddings = embeddings._get_variable_list() # pylint: disable=protected-access # pylint: disable=protected-access _maybe_restore_from_checkpoint(column._checkpoint_path(), embeddings) return embedding_ops.safe_embedding_lookup_sparse( embeddings, input_tensor, sparse_weights=weight_tensor, combiner=args.combiner, name=column.name + "weights", max_norm=args.max_norm) def _maybe_restore_from_checkpoint(checkpoint_path, variable): if checkpoint_path is not None: path, tensor_name = checkpoint_path weights_to_restore = variable if len(variable) == 1: weights_to_restore = variable[0] checkpoint_utils.init_from_checkpoint(path, {tensor_name: weights_to_restore}) def one_hot_column(sparse_id_column): """Creates an `_OneHotColumn` for a one-hot or multi-hot repr in a DNN. Args: sparse_id_column: A _SparseColumn which is created by `sparse_column_with_*` or crossed_column functions. Note that `combiner` defined in `sparse_id_column` is ignored. Returns: An _OneHotColumn. """ return _OneHotColumn(sparse_id_column) def embedding_column(sparse_id_column, dimension, combiner="mean", initializer=None, ckpt_to_load_from=None, tensor_name_in_ckpt=None, max_norm=None, trainable=True): """Creates an `_EmbeddingColumn` for feeding sparse data into a DNN. Args: sparse_id_column: A `_SparseColumn` which is created by for example `sparse_column_with_*` or crossed_column functions. Note that `combiner` defined in `sparse_id_column` is ignored. dimension: An integer specifying dimension of the embedding. combiner: A string specifying how to reduce if there are multiple entries in a single row. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the default. "sqrtn" often achieves good accuracy, in particular with bag-of-words columns. Each of this can be thought as example level normalizations on the column: * "sum": do not normalize * "mean": do l1 normalization * "sqrtn": do l2 normalization For more information: `tf.embedding_lookup_sparse`. initializer: A variable initializer function to be used in embedding variable initialization. If not specified, defaults to `tf.truncated_normal_initializer` with mean 0.0 and standard deviation 1/sqrt(sparse_id_column.length). ckpt_to_load_from: (Optional). String representing checkpoint name/pattern to restore the column weights. Required if `tensor_name_in_ckpt` is not None. tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided checkpoint from which to restore the column weights. Required if `ckpt_to_load_from` is not None. max_norm: (Optional). If not None, embedding values are l2-normalized to the value of max_norm. trainable: (Optional). Should the embedding be trainable. Default is True Returns: An `_EmbeddingColumn`. """ return _EmbeddingColumn(sparse_id_column, dimension, combiner, initializer, ckpt_to_load_from, tensor_name_in_ckpt, max_norm=max_norm, trainable=trainable) def shared_embedding_columns(sparse_id_columns, dimension, combiner="mean", shared_embedding_name=None, initializer=None, ckpt_to_load_from=None, tensor_name_in_ckpt=None, max_norm=None, trainable=True): """Creates a list of `_EmbeddingColumn` sharing the same embedding. Args: sparse_id_columns: An iterable of `_SparseColumn`, such as those created by `sparse_column_with_*` or crossed_column functions. Note that `combiner` defined in each sparse_id_column is ignored. dimension: An integer specifying dimension of the embedding. combiner: A string specifying how to reduce if there are multiple entries in a single row. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the default. "sqrtn" often achieves good accuracy, in particular with bag-of-words columns. Each of this can be thought as example level normalizations on the column: * "sum": do not normalize * "mean": do l1 normalization * "sqrtn": do l2 normalization For more information: `tf.embedding_lookup_sparse`. shared_embedding_name: (Optional). A string specifying the name of shared embedding weights. This will be needed if you want to reference the shared embedding separately from the generated `_EmbeddingColumn`. initializer: A variable initializer function to be used in embedding variable initialization. If not specified, defaults to `tf.truncated_normal_initializer` with mean 0.0 and standard deviation 1/sqrt(sparse_id_columns[0].length). ckpt_to_load_from: (Optional). String representing checkpoint name/pattern to restore the column weights. Required if `tensor_name_in_ckpt` is not None. tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided checkpoint from which to restore the column weights. Required if `ckpt_to_load_from` is not None. max_norm: (Optional). If not None, embedding values are l2-normalized to the value of max_norm. trainable: (Optional). Should the embedding be trainable. Default is True Returns: A tuple of `_EmbeddingColumn` with shared embedding space. Raises: ValueError: if sparse_id_columns is empty, or its elements are not compatible with each other. TypeError: if `sparse_id_columns` is not a sequence or is a string. If at least one element of `sparse_id_columns` is not a `SparseTensor`. """ if (not isinstance(sparse_id_columns, collections.Sequence) or isinstance(sparse_id_columns, six.string_types)): raise TypeError( "sparse_id_columns must be a non-string sequence (ex: list or tuple) " "instead of type {}.".format(type(sparse_id_columns))) if len(sparse_id_columns) < 1: raise ValueError("The input sparse_id_columns should have at least one " "element.") for sparse_id_column in sparse_id_columns: if not isinstance(sparse_id_column, _SparseColumn): raise TypeError("Elements of sparse_id_columns must be _SparseColumn, but" "{} is not.".format(sparse_id_column)) if len(sparse_id_columns) == 1: return [ _EmbeddingColumn(sparse_id_columns[0], dimension, combiner, initializer, ckpt_to_load_from, tensor_name_in_ckpt, shared_embedding_name, max_norm=max_norm, trainable=trainable)] else: # check compatibility of sparse_id_columns compatible = True for column in sparse_id_columns[1:]: compatible = compatible and column.is_compatible(sparse_id_columns[0]) if not compatible: raise ValueError("The input sparse id columns are not compatible.") # Construct the shared name and size for shared embedding space. if not shared_embedding_name: # Sort the columns so that shared_embedding_name will be deterministic # even if users pass in unsorted columns from a dict or something. sorted_columns = sorted(sparse_id_columns) if len(sorted_columns) <= 3: shared_embedding_name = "_".join([column.name for column in sorted_columns]) else: shared_embedding_name = "_".join([column.name for column in sorted_columns[0:3]]) shared_embedding_name += ( "_plus_{}_others".format(len(sorted_columns) - 3)) shared_embedding_name += "_shared_embedding" shared_vocab_size = sparse_id_columns[0].length embedded_columns = [] for column in sparse_id_columns: embedded_columns.append( _EmbeddingColumn(column, dimension, combiner, initializer, ckpt_to_load_from, tensor_name_in_ckpt, shared_embedding_name, shared_vocab_size, max_norm=max_norm, trainable=trainable)) return tuple(embedded_columns) class _ScatteredEmbeddingColumn( _FeatureColumn, fc_core._DenseColumn, # pylint: disable=protected-access collections.namedtuple("_ScatteredEmbeddingColumn", [ "column_name", "size", "dimension", "hash_key", "combiner", "initializer" ])): """See `scattered_embedding_column`.""" def __new__(cls, column_name, size, dimension, hash_key, combiner="sqrtn", initializer=None): if initializer is not None and not callable(initializer): raise ValueError("initializer must be callable if specified. " "column_name: {}".format(column_name)) if initializer is None: logging.warn("The default stddev value of initializer will change from " "\"0.1\" to \"1/sqrt(dimension)\" after 2017/02/25.") stddev = 0.1 initializer = init_ops.truncated_normal_initializer( mean=0.0, stddev=stddev) return super(_ScatteredEmbeddingColumn, cls).__new__(cls, column_name, size, dimension, hash_key, combiner, initializer) @property def name(self): return "{}_scattered_embedding".format(self.column_name) @property def config(self): return {self.column_name: parsing_ops.VarLenFeature(dtypes.string)} @property def key(self): """Returns a string which will be used as a key when we do sorting.""" return self._key_without_properties(["initializer"]) def insert_transformed_feature(self, columns_to_tensors): columns_to_tensors[self] = columns_to_tensors[self.column_name] def _deep_embedding_lookup_arguments(self, input_tensor): return _DeepEmbeddingLookupArguments( input_tensor=input_tensor, weight_tensor=None, vocab_size=self.size, initializer=self.initializer, combiner=self.combiner, dimension=self.dimension, shared_embedding_name=None, hash_key=self.hash_key, max_norm=None, trainable=True) @property def _variable_shape(self): return tensor_shape.TensorShape([self.dimension]) def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): return _embeddings_from_arguments( self, self._deep_embedding_lookup_arguments(inputs.get(self)), weight_collections, trainable) def _transform_feature(self, inputs): return inputs.get(self.column_name) @property def _parse_example_spec(self): return self.config def scattered_embedding_column(column_name, size, dimension, hash_key, combiner="mean", initializer=None): """Creates an embedding column of a sparse feature using parameter hashing. This is a useful shorthand when you have a sparse feature you want to use an embedding for, but also want to hash the embedding's values in each dimension to a variable based on a different hash. Specifically, the i-th embedding component of a value v is found by retrieving an embedding weight whose index is a fingerprint of the pair (v,i). An embedding column with sparse_column_with_hash_bucket such as embedding_column( sparse_column_with_hash_bucket(column_name, bucket_size), dimension) could be replaced by scattered_embedding_column( column_name, size=bucket_size * dimension, dimension=dimension, hash_key=tf.contrib.layers.SPARSE_FEATURE_CROSS_DEFAULT_HASH_KEY) for the same number of embedding parameters. This should hopefully reduce the impact of collisions, but adds the cost of slowing down training. Args: column_name: A string defining sparse column name. size: An integer specifying the number of parameters in the embedding layer. dimension: An integer specifying dimension of the embedding. hash_key: Specify the hash_key that will be used by the `FingerprintCat64` function to combine the crosses fingerprints on SparseFeatureCrossOp. combiner: A string specifying how to reduce if there are multiple entries in a single row. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the default. "sqrtn" often achieves good accuracy, in particular with bag-of-words columns. Each of this can be thought as example level normalizations on the column: * "sum": do not normalize features in the column * "mean": do l1 normalization on features in the column * "sqrtn": do l2 normalization on features in the column For more information: `tf.embedding_lookup_sparse`. initializer: A variable initializer function to be used in embedding variable initialization. If not specified, defaults to `tf.truncated_normal_initializer` with mean 0 and standard deviation 0.1. Returns: A _ScatteredEmbeddingColumn. Raises: ValueError: if dimension or size is not a positive integer; or if combiner is not supported. """ if (dimension < 1) or (size < 1): raise ValueError("Dimension and size must be greater than 0. " "dimension: {}, size: {}, column_name: {}".format( dimension, size, column_name)) if combiner not in ("mean", "sqrtn", "sum"): raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'. " "combiner: {}, column_name: {}".format(combiner, column_name)) return _ScatteredEmbeddingColumn(column_name, size, dimension, hash_key, combiner, initializer) def _reshape_real_valued_tensor(input_tensor, output_rank, column_name=None): """Reshaping logic for dense, numeric `Tensors`. Follows the following rules: 1. If `output_rank > input_rank + 1` raise a `ValueError`. 2. If `output_rank == input_rank + 1`, expand `input_tensor` by one dimension and return 3. If `output_rank == input_rank`, return `input_tensor`. 4. If `output_rank < input_rank`, flatten the inner dimensions of `input_tensor` and return a `Tensor` with `output_rank` Args: input_tensor: a dense `Tensor` to be reshaped. output_rank: the desired rank of the reshaped `Tensor`. column_name: (optional) the name of the associated column. Used for error messages. Returns: A `Tensor` with the same entries as `input_tensor` and rank `output_rank`. Raises: ValueError: if `output_rank > input_rank + 1`. """ input_rank = input_tensor.get_shape().ndims if input_rank is not None: if output_rank > input_rank + 1: error_string = ("Rank of input Tensor ({}) should be the same as " "output_rank ({}). For example, sequence data should " "typically be 3 dimensional (rank 3) while non-sequence " "data is typically 2 dimensional (rank 2).".format( input_rank, output_rank)) if column_name is not None: error_string = ("Error while processing column {}.".format(column_name) + error_string) raise ValueError(error_string) if output_rank == input_rank + 1: logging.warning( "Rank of input Tensor ({}) should be the same as output_rank ({}) " "for column. Will attempt to expand dims. It is highly recommended " "that you resize your input, as this behavior may change.".format( input_rank, output_rank)) return array_ops.expand_dims(input_tensor, -1, name="expand_dims") if output_rank == input_rank: return input_tensor # Here, either `input_rank` is unknown or it is greater than `output_rank`. return layers._inner_flatten(input_tensor, output_rank) # pylint: disable=protected-access class _RealValuedVarLenColumn(_FeatureColumn, collections.namedtuple( "_RealValuedVarLenColumn", ["column_name", "default_value", "dtype", "normalizer", "is_sparse"])): """Represents a real valued feature column for variable length Features. Instances of this class are immutable. If is_sparse=False, the dictionary returned by InputBuilder contains a ("column_name", Tensor) pair with a Tensor shape of (batch_size, dimension). If is_sparse=True, the dictionary contains a ("column_name", SparseTensor) pair instead with shape inferred after parsing. """ @property def name(self): return self.column_name @property def config(self): if self.is_sparse: return {self.column_name: parsing_ops.VarLenFeature(self.dtype)} else: return {self.column_name: parsing_ops.FixedLenSequenceFeature( [], self.dtype, allow_missing=True, default_value=self.default_value)} @property def key(self): """Returns a string which will be used as a key when we do sorting.""" return self._key_without_properties(["normalizer"]) @property def normalizer_fn(self): """Returns the function used to normalize the column.""" return self.normalizer def _normalized_input_tensor(self, input_tensor): """Returns the input tensor after custom normalization is applied.""" if self.normalizer is None: return input_tensor if self.is_sparse: return sparse_tensor_py.SparseTensor( input_tensor.indices, self.normalizer(input_tensor.values), input_tensor.dense_shape) else: return self.normalizer(input_tensor) def insert_transformed_feature(self, columns_to_tensors): """Apply transformation and inserts it into columns_to_tensors. Args: columns_to_tensors: A mapping from feature columns to tensors. 'string' key means a base feature (not-transformed). It can have _FeatureColumn as a key too. That means that _FeatureColumn is already transformed. """ # Transform the input tensor according to the normalizer function. input_tensor = self._normalized_input_tensor(columns_to_tensors[self.name]) columns_to_tensors[self] = math_ops.to_float(input_tensor) # pylint: disable=unused-argument def _to_dnn_input_layer(self, input_tensor, weight_collections=None, trainable=True, output_rank=2): return _reshape_real_valued_tensor( self._to_dense_tensor(input_tensor), output_rank, self.name) def _to_dense_tensor(self, input_tensor): if not self.is_sparse: return input_tensor raise ValueError("Set is_sparse to False if you want a dense Tensor for " "column_name: {}".format(self.name)) @experimental def _real_valued_var_len_column(column_name, default_value=None, dtype=dtypes.float32, normalizer=None, is_sparse=False): """Creates a `_RealValuedVarLenColumn` for variable-length numeric data. Note, this is not integrated with any of the DNNEstimators, except the RNN ones DynamicRNNEstimator and the StateSavingRNNEstimator. It can either create a parsing config for a SparseTensor (with is_sparse=True) or a padded Tensor. The (dense_)shape of the result will be [batch_size, None], which can be used with is_sparse=False as input into an RNN (see DynamicRNNEstimator or StateSavingRNNEstimator) or with is_sparse=True as input into a tree (see gtflow). Use real_valued_column if the Feature has a fixed length. Use some SparseColumn for columns to be embedded / one-hot-encoded. Args: column_name: A string defining real valued column name. default_value: A scalar value compatible with dtype. Needs to be specified if is_sparse=False. dtype: Defines the type of values. Default value is tf.float32. Needs to be convertible to tf.float32. normalizer: If not None, a function that can be used to normalize the value of the real valued column after default_value is applied for parsing. Normalizer function takes the input tensor as its argument, and returns the output tensor. (e.g. lambda x: (x - 3.0) / 4.2). Note that for is_sparse=False, the normalizer will be run on the values of the `SparseTensor`. is_sparse: A boolean defining whether to create a SparseTensor or a Tensor. Returns: A _RealValuedSparseColumn. Raises: TypeError: if default_value is not a scalar value compatible with dtype. TypeError: if dtype is not convertible to tf.float32. ValueError: if default_value is None and is_sparse is False. """ if not (dtype.is_integer or dtype.is_floating): raise TypeError("dtype must be convertible to float. " "dtype: {}, column_name: {}".format(dtype, column_name)) if default_value is None and not is_sparse: raise ValueError("default_value must be provided when is_sparse=False to " "parse a padded Tensor. " "column_name: {}".format(column_name)) if isinstance(default_value, list): raise ValueError( "Only scalar default value. default_value: {}, column_name: {}".format( default_value, column_name)) if default_value is not None: if dtype.is_integer: default_value = int(default_value) elif dtype.is_floating: default_value = float(default_value) return _RealValuedVarLenColumn(column_name, default_value, dtype, normalizer, is_sparse) class _RealValuedColumn( _FeatureColumn, fc_core._DenseColumn, # pylint: disable=protected-access collections.namedtuple( "_RealValuedColumn", ["column_name", "dimension", "default_value", "dtype", "normalizer"])): """Represents a real valued feature column also known as continuous features. Instances of this class are immutable. The dictionary returned by InputBuilder contains a ("column_name", Tensor) pair with a Tensor shape of (batch_size, dimension). """ def __new__(cls, column_name, dimension, default_value, dtype, normalizer): if default_value is not None: default_value = tuple(default_value) return super(_RealValuedColumn, cls).__new__(cls, column_name, dimension, default_value, dtype, normalizer) @property def name(self): return self.column_name @property def config(self): default_value = self.default_value if default_value is not None: default_value = list(default_value) return {self.column_name: parsing_ops.FixedLenFeature([self.dimension], self.dtype, default_value)} @property def key(self): """Returns a string which will be used as a key when we do sorting.""" return self._key_without_properties(["normalizer"]) @property def normalizer_fn(self): """Returns the function used to normalize the column.""" return self.normalizer def _normalized_input_tensor(self, input_tensor): """Returns the input tensor after custom normalization is applied.""" return (self.normalizer(input_tensor) if self.normalizer is not None else input_tensor) def insert_transformed_feature(self, columns_to_tensors): """Apply transformation and inserts it into columns_to_tensors. Args: columns_to_tensors: A mapping from feature columns to tensors. 'string' key means a base feature (not-transformed). It can have _FeatureColumn as a key too. That means that _FeatureColumn is already transformed. """ # Transform the input tensor according to the normalizer function. input_tensor = self._normalized_input_tensor(columns_to_tensors[self.name]) columns_to_tensors[self] = math_ops.to_float(input_tensor) # pylint: disable=unused-argument def _to_dnn_input_layer(self, input_tensor, weight_collections=None, trainable=True, output_rank=2): input_tensor = self._to_dense_tensor(input_tensor) if input_tensor.dtype != dtypes.float32: input_tensor = math_ops.to_float(input_tensor) return _reshape_real_valued_tensor(input_tensor, output_rank, self.name) def _to_dense_tensor(self, input_tensor): return input_tensor @property def _variable_shape(self): return tensor_shape.TensorShape([self.dimension]) def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): del weight_collections del trainable return inputs.get(self) def _transform_feature(self, inputs): return math_ops.to_float( self._normalized_input_tensor(inputs.get(self.name))) @property def _parse_example_spec(self): return self.config def real_valued_column(column_name, dimension=1, default_value=None, dtype=dtypes.float32, normalizer=None): """Creates a `_RealValuedColumn` for dense numeric data. Args: column_name: A string defining real valued column name. dimension: An integer specifying dimension of the real valued column. The default is 1. default_value: A single value compatible with dtype or a list of values compatible with dtype which the column takes on during tf.Example parsing if data is missing. When dimension is not None, a default value of None will cause tf.parse_example to fail if an example does not contain this column. If a single value is provided, the same value will be applied as the default value for every dimension. If a list of values is provided, the length of the list should be equal to the value of `dimension`. Only scalar default value is supported in case dimension is not specified. dtype: defines the type of values. Default value is tf.float32. Must be a non-quantized, real integer or floating point type. normalizer: If not None, a function that can be used to normalize the value of the real valued column after default_value is applied for parsing. Normalizer function takes the input tensor as its argument, and returns the output tensor. (e.g. lambda x: (x - 3.0) / 4.2). Note that for variable length columns, the normalizer should expect an input_tensor of type `SparseTensor`. Returns: A _RealValuedColumn. Raises: TypeError: if dimension is not an int ValueError: if dimension is not a positive integer TypeError: if default_value is a list but its length is not equal to the value of `dimension`. TypeError: if default_value is not compatible with dtype. ValueError: if dtype is not convertible to tf.float32. """ if dimension is None: raise TypeError("dimension must be an integer. Use the " "_real_valued_var_len_column for variable length features." "dimension: {}, column_name: {}".format(dimension, column_name)) if not isinstance(dimension, int): raise TypeError("dimension must be an integer. " "dimension: {}, column_name: {}".format(dimension, column_name)) if dimension < 1: raise ValueError("dimension must be greater than 0. " "dimension: {}, column_name: {}".format(dimension, column_name)) if not (dtype.is_integer or dtype.is_floating): raise ValueError("dtype must be convertible to float. " "dtype: {}, column_name: {}".format(dtype, column_name)) if default_value is None: return _RealValuedColumn(column_name, dimension, default_value, dtype, normalizer) if isinstance(default_value, int): if dtype.is_integer: default_value = ([default_value for _ in range(dimension)] if dimension else [default_value]) return _RealValuedColumn(column_name, dimension, default_value, dtype, normalizer) if dtype.is_floating: default_value = float(default_value) default_value = ([default_value for _ in range(dimension)] if dimension else [default_value]) return _RealValuedColumn(column_name, dimension, default_value, dtype, normalizer) if isinstance(default_value, float): if dtype.is_floating and (not dtype.is_integer): default_value = ([default_value for _ in range(dimension)] if dimension else [default_value]) return _RealValuedColumn(column_name, dimension, default_value, dtype, normalizer) if isinstance(default_value, list): if len(default_value) != dimension: raise ValueError( "The length of default_value must be equal to dimension. " "default_value: {}, dimension: {}, column_name: {}".format( default_value, dimension, column_name)) # Check if the values in the list are all integers or are convertible to # floats. is_list_all_int = True is_list_all_float = True for v in default_value: if not isinstance(v, int): is_list_all_int = False if not (isinstance(v, float) or isinstance(v, int)): is_list_all_float = False if is_list_all_int: if dtype.is_integer: return _RealValuedColumn(column_name, dimension, default_value, dtype, normalizer) elif dtype.is_floating: default_value = [float(v) for v in default_value] return _RealValuedColumn(column_name, dimension, default_value, dtype, normalizer) if is_list_all_float: if dtype.is_floating and (not dtype.is_integer): default_value = [float(v) for v in default_value] return _RealValuedColumn(column_name, dimension, default_value, dtype, normalizer) raise TypeError("default_value must be compatible with dtype. " "default_value: {}, dtype: {}, column_name: {}".format( default_value, dtype, column_name)) class _BucketizedColumn( _FeatureColumn, fc_core._CategoricalColumn, # pylint: disable=protected-access fc_core._DenseColumn, # pylint: disable=protected-access collections.namedtuple("_BucketizedColumn", ["source_column", "boundaries"])): """Represents a bucketization transformation also known as binning. Instances of this class are immutable. Values in `source_column` will be bucketized based on `boundaries`. For example, if the inputs are: boundaries = [0, 10, 100] source_column = [[-5], [150], [10], [0], [4], [19]] then the bucketized feature will be: output = [[0], [3], [2], [1], [1], [2]] Attributes: source_column: A _RealValuedColumn defining dense column. boundaries: A list or tuple of floats specifying the boundaries. It has to be sorted. [a, b, c] defines following buckets: (-inf., a), [a, b), [b, c), [c, inf.) Raises: ValueError: if 'boundaries' is empty or not sorted. """ def __new__(cls, source_column, boundaries): if not isinstance(source_column, _RealValuedColumn): raise TypeError("source_column must be an instance of _RealValuedColumn. " "source_column: {}".format(source_column)) if source_column.dimension is None: raise ValueError("source_column must have a defined dimension. " "source_column: {}".format(source_column)) if (not isinstance(boundaries, list) and not isinstance(boundaries, tuple)) or not boundaries: raise ValueError("boundaries must be a non-empty list or tuple. " "boundaries: {}".format(boundaries)) # We allow bucket boundaries to be monotonically increasing # (ie a[i+1] >= a[i]). When two bucket boundaries are the same, we # de-duplicate. sanitized_boundaries = [] for i in range(len(boundaries) - 1): if boundaries[i] == boundaries[i + 1]: continue elif boundaries[i] < boundaries[i + 1]: sanitized_boundaries.append(boundaries[i]) else: raise ValueError("boundaries must be a sorted list. " "boundaries: {}".format(boundaries)) sanitized_boundaries.append(boundaries[len(boundaries) - 1]) return super(_BucketizedColumn, cls).__new__(cls, source_column, tuple(sanitized_boundaries)) @property def name(self): return "{}_bucketized".format(self.source_column.name) @property def length(self): """Returns total number of buckets.""" return len(self.boundaries) + 1 @property def config(self): return self.source_column.config @property def key(self): """Returns a string which will be used as a key when we do sorting.""" return "{}".format(self) # pylint: disable=unused-argument def _to_dnn_input_layer(self, input_tensor, weight_collections=None, trainable=True, output_rank=2): if output_rank != 2: raise ValueError("BucketizedColumn currently only supports output_rank=2") return array_ops.reshape( array_ops.one_hot( math_ops.to_int64(input_tensor), self.length, 1., 0., name="one_hot"), [-1, self.length * self.source_column.dimension], name="reshape") def to_sparse_tensor(self, input_tensor): """Creates a SparseTensor from the bucketized Tensor.""" dimension = self.source_column.dimension batch_size = array_ops.shape(input_tensor, name="shape")[0] if dimension > 1: i1 = array_ops.reshape( array_ops.tile( array_ops.expand_dims( math_ops.range(0, batch_size), 1, name="expand_dims"), [1, dimension], name="tile"), [-1], name="reshape") i2 = array_ops.tile( math_ops.range(0, dimension), [batch_size], name="tile") # Flatten the bucket indices and unique them across dimensions # E.g. 2nd dimension indices will range from k to 2*k-1 with k buckets bucket_indices = array_ops.reshape( input_tensor, [-1], name="reshape") + self.length * i2 else: # Simpler indices when dimension=1 i1 = math_ops.range(0, batch_size) i2 = array_ops.zeros([batch_size], dtype=dtypes.int32, name="zeros") bucket_indices = array_ops.reshape(input_tensor, [-1], name="reshape") indices = math_ops.to_int64(array_ops.transpose(array_ops.stack((i1, i2)))) shape = math_ops.to_int64(array_ops.stack([batch_size, dimension])) sparse_id_values = sparse_tensor_py.SparseTensor( indices, bucket_indices, shape) return sparse_id_values def _wide_embedding_lookup_arguments(self, input_tensor): return _LinearEmbeddingLookupArguments( input_tensor=self.to_sparse_tensor(input_tensor), weight_tensor=None, vocab_size=self.length * self.source_column.dimension, initializer=init_ops.zeros_initializer(), combiner="sum") def _transform_feature(self, inputs): """Handles cross transformation.""" # Bucketize the source column. return bucketization_op.bucketize( inputs.get(self.source_column), boundaries=list(self.boundaries), name="bucketize") def insert_transformed_feature(self, columns_to_tensors): """Handles sparse column to id conversion.""" columns_to_tensors[self] = self._transform_feature( _LazyBuilderByColumnsToTensor(columns_to_tensors)) @property def _parse_example_spec(self): return self.config @property def _num_buckets(self): return self.length * self.source_column.dimension def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): del weight_collections del trainable return fc_core._CategoricalColumn.IdWeightPair( # pylint: disable=protected-access self.to_sparse_tensor(inputs.get(self)), None) @property def _variable_shape(self): return tensor_shape.TensorShape( [self.length * self.source_column.dimension]) def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None): return self._to_dnn_input_layer( inputs.get(self), weight_collections, trainable) def bucketized_column(source_column, boundaries): """Creates a _BucketizedColumn for discretizing dense input. Args: source_column: A _RealValuedColumn defining dense column. boundaries: A list or tuple of floats specifying the boundaries. It has to be sorted. Returns: A _BucketizedColumn. Raises: ValueError: if 'boundaries' is empty or not sorted. """ return _BucketizedColumn(source_column, boundaries) class _CrossedColumn( _FeatureColumn, fc_core._CategoricalColumn, # pylint: disable=protected-access collections.namedtuple("_CrossedColumn", [ "columns", "hash_bucket_size", "hash_key", "combiner", "ckpt_to_load_from", "tensor_name_in_ckpt" ])): """Represents a cross transformation also known as conjunction or combination. Instances of this class are immutable. It crosses given `columns`. Crossed column output will be hashed to hash_bucket_size. Conceptually, transformation can be thought as: Hash(cartesian product of features in columns) % `hash_bucket_size` For example, if the columns are SparseTensor referred by first column: shape = [2, 2] [0, 0]: "a" [1, 0]: "b" [1, 1]: "c" SparseTensor referred by second column: : shape = [2, 1] [0, 0]: "d" [1, 0]: "e" then crossed feature will look like: shape = [2, 2] [0, 0]: Hash64("d", Hash64("a")) % hash_bucket_size [1, 0]: Hash64("e", Hash64("b")) % hash_bucket_size [1, 1]: Hash64("e", Hash64("c")) % hash_bucket_size Attributes: columns: An iterable of _FeatureColumn. Items can be an instance of _SparseColumn, _CrossedColumn, or _BucketizedColumn. hash_bucket_size: An int that is > 1. The number of buckets. combiner: A string specifying how to reduce if there are multiple entries in a single row. Currently "mean", "sqrtn" and "sum" are supported, with "sum" the default. "sqrtn" often achieves good accuracy, in particular with bag-of-words columns. Each of this can be thought as example level normalizations on the column:: * "sum": do not normalize * "mean": do l1 normalization * "sqrtn": do l2 normalization For more information: `tf.embedding_lookup_sparse`. ckpt_to_load_from: (Optional). String representing checkpoint name/pattern to restore the column weights. Required if `tensor_name_in_ckpt` is not None. tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided checkpoint from which to restore the column weights. Required if `ckpt_to_load_from` is not None. Raises: TypeError: if all items in columns are not an instance of _SparseColumn, _CrossedColumn, or _BucketizedColumn. ValueError: if hash_bucket_size is not > 1 or len(columns) is not > 1. Also, if only one of `ckpt_to_load_from` and `tensor_name_in_ckpt` is specified. """ @staticmethod def _assert_is_crossable(column): if isinstance(column, (_SparseColumn, _CrossedColumn, _BucketizedColumn)): return raise TypeError("columns must be a set of _SparseColumn, " "_CrossedColumn, or _BucketizedColumn instances. " "(column {} is a {})".format(column, column.__class__.__name__)) def __new__(cls, columns, hash_bucket_size, hash_key, combiner="sum", ckpt_to_load_from=None, tensor_name_in_ckpt=None): for column in columns: _CrossedColumn._assert_is_crossable(column) if len(columns) < 2: raise ValueError("columns must contain at least 2 elements. " "columns: {}".format(columns)) if hash_bucket_size < 2: raise ValueError("hash_bucket_size must be at least 2. " "hash_bucket_size: {}".format(hash_bucket_size)) if (ckpt_to_load_from is None) != (tensor_name_in_ckpt is None): raise ValueError("Must specify both `ckpt_to_load_from` and " "`tensor_name_in_ckpt` or none of them.") sorted_columns = sorted( [column for column in columns], key=lambda column: column.name) return super(_CrossedColumn, cls).__new__(cls, tuple(sorted_columns), hash_bucket_size, hash_key, combiner, ckpt_to_load_from, tensor_name_in_ckpt) @property def name(self): sorted_names = sorted([column.name for column in self.columns]) return "_X_".join(sorted_names) @property def config(self): config = {} for column in self.columns: config.update(_get_feature_config(column)) return config @property def length(self): """Returns total number of buckets.""" return self.hash_bucket_size @property def key(self): """Returns a string which will be used as a key when we do sorting.""" return "{}".format(self) def id_tensor(self, input_tensor): """Returns the id tensor from the given transformed input_tensor.""" return input_tensor def weight_tensor(self, input_tensor): """Returns the weight tensor from the given transformed input_tensor.""" del input_tensor return None def _to_dnn_input_layer(self, input_tensor, weight_collections=None, trainable=True, output_rank=2): del input_tensor del weight_collections del trainable del output_rank raise ValueError("CrossedColumn is not supported in DNN. " "Please use embedding_column. column: {}".format(self)) def _checkpoint_path(self): if self.ckpt_to_load_from is not None: return self.ckpt_to_load_from, self.tensor_name_in_ckpt return None def _wide_embedding_lookup_arguments(self, input_tensor): return _LinearEmbeddingLookupArguments( input_tensor=input_tensor, weight_tensor=None, vocab_size=self.length, initializer=init_ops.zeros_initializer(), combiner=self.combiner) def _transform_feature(self, inputs): """Handles cross transformation.""" def _collect_leaf_level_columns(cross): """Collects base columns contained in the cross.""" leaf_level_columns = [] for c in cross.columns: if isinstance(c, _CrossedColumn): leaf_level_columns.extend(_collect_leaf_level_columns(c)) else: leaf_level_columns.append(c) return leaf_level_columns feature_tensors = [] for c in _collect_leaf_level_columns(self): if isinstance(c, _SparseColumn): feature_tensors.append(inputs.get(c.name)) else: if isinstance(c, _BucketizedColumn): feature_tensors.append(c.to_sparse_tensor(inputs.get(c))) else: feature_tensors.append(inputs.get(c)) return sparse_feature_cross_op.sparse_feature_cross( feature_tensors, hashed_output=True, num_buckets=self.hash_bucket_size, hash_key=self.hash_key, name="cross") def insert_transformed_feature(self, columns_to_tensors): """Handles sparse column to id conversion.""" columns_to_tensors[self] = self._transform_feature( _LazyBuilderByColumnsToTensor(columns_to_tensors)) @property def _parse_example_spec(self): return self.config @property def _num_buckets(self): return self.length def _get_sparse_tensors(self, inputs, weight_collections=None, trainable=None): del weight_collections del trainable return fc_core._CategoricalColumn.IdWeightPair(inputs.get(self), None) # pylint: disable=protected-access class _LazyBuilderByColumnsToTensor(object): def __init__(self, columns_to_tensors): self._columns_to_tensors = columns_to_tensors def get(self, key): """Gets the transformed feature column.""" if key in self._columns_to_tensors: return self._columns_to_tensors[key] if isinstance(key, str): raise ValueError( "features dictionary doesn't contain key ({})".format(key)) if not isinstance(key, _FeatureColumn): raise TypeError('"key" must be either a "str" or "_FeatureColumn". ' "Provided: {}".format(key)) key.insert_transformed_feature(self._columns_to_tensors) return self._columns_to_tensors[key] def crossed_column(columns, hash_bucket_size, combiner="sum", ckpt_to_load_from=None, tensor_name_in_ckpt=None, hash_key=None): """Creates a _CrossedColumn for performing feature crosses. Args: columns: An iterable of _FeatureColumn. Items can be an instance of _SparseColumn, _CrossedColumn, or _BucketizedColumn. hash_bucket_size: An int that is > 1. The number of buckets. combiner: A string specifying how to reduce if there are multiple entries in a single row. Currently "mean", "sqrtn" and "sum" are supported, with "sum" the default. "sqrtn" often achieves good accuracy, in particular with bag-of-words columns. Each of this can be thought as example level normalizations on the column:: * "sum": do not normalize * "mean": do l1 normalization * "sqrtn": do l2 normalization For more information: `tf.embedding_lookup_sparse`. ckpt_to_load_from: (Optional). String representing checkpoint name/pattern to restore the column weights. Required if `tensor_name_in_ckpt` is not None. tensor_name_in_ckpt: (Optional). Name of the `Tensor` in the provided checkpoint from which to restore the column weights. Required if `ckpt_to_load_from` is not None. hash_key: Specify the hash_key that will be used by the `FingerprintCat64` function to combine the crosses fingerprints on SparseFeatureCrossOp (optional). Returns: A _CrossedColumn. Raises: TypeError: if any item in columns is not an instance of _SparseColumn, _CrossedColumn, or _BucketizedColumn, or hash_bucket_size is not an int. ValueError: if hash_bucket_size is not > 1 or len(columns) is not > 1. """ return _CrossedColumn( columns, hash_bucket_size, hash_key, combiner=combiner, ckpt_to_load_from=ckpt_to_load_from, tensor_name_in_ckpt=tensor_name_in_ckpt) class DataFrameColumn(_FeatureColumn, collections.namedtuple("DataFrameColumn", ["column_name", "series"])): """Represents a feature column produced from a `DataFrame`. Instances of this class are immutable. A `DataFrame` column may be dense or sparse, and may have any shape, with the constraint that dimension 0 is batch_size. Args: column_name: a name for this column series: a `Series` to be wrapped, which has already had its base features substituted with `PredefinedSeries`. """ def __new__(cls, column_name, series): return super(DataFrameColumn, cls).__new__(cls, column_name, series) @property def name(self): return self.column_name @property def config(self): return self.series.required_base_features() @property def key(self): """Returns a string which will be used as a key when we do sorting.""" return self.name def insert_transformed_feature(self, columns_to_tensors): # The cache must already contain mappings from the expected base feature # names to Tensors. # Passing columns_to_tensors as the cache here means that multiple outputs # of the transform will be cached, keyed by the repr of their associated # TransformedSeries. # The specific requested output ends up in columns_to_tensors twice: once # keyed by the TransformedSeries repr, and once keyed by this # DataFrameColumn instance. columns_to_tensors[self] = self.series.build(columns_to_tensors) # pylint: disable=unused-argument def _to_dnn_input_layer(self, input_tensor, weight_collections=None, trainable=True, output_rank=2): if input_tensor.dtype != dtypes.float32: input_tensor = math_ops.to_float(input_tensor) return _reshape_real_valued_tensor(input_tensor, output_rank, self.name) def _to_dense_tensor(self, input_tensor): return self._to_dnn_input_layer(input_tensor) def __eq__(self, other): if isinstance(other, self.__class__): return self.__dict__ == other.__dict__ else: return False def __ne__(self, other): return not self.__eq__(other) def _get_feature_config(feature_column): """Returns configuration for the base feature defined in feature_column.""" if not isinstance(feature_column, _FeatureColumn): raise TypeError( "feature_columns should only contain instances of _FeatureColumn. " "Given column is {}".format(feature_column)) if isinstance(feature_column, (_SparseColumn, _WeightedSparseColumn, _EmbeddingColumn, _RealValuedColumn, _RealValuedVarLenColumn, _BucketizedColumn, _CrossedColumn, _OneHotColumn, _ScatteredEmbeddingColumn)): return feature_column.config raise TypeError("Not supported _FeatureColumn type. " "Given column is {}".format(feature_column)) def create_feature_spec_for_parsing(feature_columns): """Helper that prepares features config from input feature_columns. The returned feature config can be used as arg 'features' in tf.parse_example. Typical usage example: ```python # Define features and transformations feature_a = sparse_column_with_vocabulary_file(...) feature_b = real_valued_column(...) feature_c_bucketized = bucketized_column(real_valued_column("feature_c"), ...) feature_a_x_feature_c = crossed_column( columns=[feature_a, feature_c_bucketized], ...) feature_columns = set( [feature_b, feature_c_bucketized, feature_a_x_feature_c]) batch_examples = tf.parse_example( serialized=serialized_examples, features=create_feature_spec_for_parsing(feature_columns)) ``` For the above example, create_feature_spec_for_parsing would return the dict: { "feature_a": parsing_ops.VarLenFeature(tf.string), "feature_b": parsing_ops.FixedLenFeature([1], dtype=tf.float32), "feature_c": parsing_ops.FixedLenFeature([1], dtype=tf.float32) } Args: feature_columns: An iterable containing all the feature columns. All items should be instances of classes derived from _FeatureColumn, unless feature_columns is a dict -- in which case, this should be true of all values in the dict. Returns: A dict mapping feature keys to FixedLenFeature or VarLenFeature values. """ if isinstance(feature_columns, dict): feature_columns = feature_columns.values() features_config = {} for column in feature_columns: features_config.update(_get_feature_config(column)) return features_config def _create_sequence_feature_spec_for_parsing(sequence_feature_columns, allow_missing_by_default=False): """Prepares a feature spec for parsing `tf.SequenceExample`s. Args: sequence_feature_columns: an iterable containing all the feature columns. All items should be instances of classes derived from `_FeatureColumn`. allow_missing_by_default: whether to set `allow_missing=True` by default for `FixedLenSequenceFeature`s. Returns: A dict mapping feature keys to `FixedLenSequenceFeature` or `VarLenFeature`. """ feature_spec = create_feature_spec_for_parsing(sequence_feature_columns) sequence_feature_spec = {} for key, feature in feature_spec.items(): if (isinstance(feature, parsing_ops.VarLenFeature) or isinstance(feature, parsing_ops.FixedLenSequenceFeature)): sequence_feature = feature elif isinstance(feature, parsing_ops.FixedLenFeature): default_is_set = feature.default_value is not None if default_is_set: logging.warning( 'Found default value {} for feature "{}". Ignoring this value and ' 'setting `allow_missing=True` instead.'. format(feature.default_value, key)) sequence_feature = parsing_ops.FixedLenSequenceFeature( shape=feature.shape, dtype=feature.dtype, allow_missing=(allow_missing_by_default or default_is_set)) else: raise TypeError( "Unsupported feature type: {}".format(type(feature).__name__)) sequence_feature_spec[key] = sequence_feature return sequence_feature_spec def make_place_holder_tensors_for_base_features(feature_columns): """Returns placeholder tensors for inference. Args: feature_columns: An iterable containing all the feature columns. All items should be instances of classes derived from _FeatureColumn. Returns: A dict mapping feature keys to SparseTensors (sparse columns) or placeholder Tensors (dense columns). """ # Get dict mapping features to FixedLenFeature or VarLenFeature values. dict_for_parse_example = create_feature_spec_for_parsing(feature_columns) placeholders = {} for column_name, column_type in dict_for_parse_example.items(): if isinstance(column_type, parsing_ops.VarLenFeature): # Sparse placeholder for sparse tensors. placeholders[column_name] = array_ops.sparse_placeholder( column_type.dtype, name="Placeholder_{}".format(column_name)) else: # Simple placeholder for dense tensors. placeholders[column_name] = array_ops.placeholder( column_type.dtype, shape=(None, column_type.shape[0]), name="Placeholder_{}".format(column_name)) return placeholders class _SparseIdLookupConfig( collections.namedtuple("_SparseIdLookupConfig", ["vocabulary_file", "keys", "num_oov_buckets", "vocab_size", "default_value"])): """Defines lookup configuration for a sparse feature. An immutable object defines lookup table configuration used by tf.feature_to_id_v2. Attributes: vocabulary_file: The vocabulary filename. vocabulary_file cannot be combined with keys. keys: A 1-D string iterable that specifies the mapping of strings to indices. It means a feature in keys will map to it's index in keys. num_oov_buckets: The number of out-of-vocabulary buckets. If zero all out of vocabulary features will be ignored. vocab_size: Number of the elements in the vocabulary. default_value: The value to use for out-of-vocabulary feature values. Defaults to -1. """ def __new__(cls, vocabulary_file=None, keys=None, num_oov_buckets=0, vocab_size=None, default_value=-1): return super(_SparseIdLookupConfig, cls).__new__(cls, vocabulary_file, keys, num_oov_buckets, vocab_size, default_value)
apache-2.0
LeZhang2016/openthread
tests/toranj/wpan.py
2
56145
#!/usr/bin/env python # # Copyright (c) 2018, The OpenThread Authors. # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # 3. Neither the name of the copyright holder nor the # names of its contributors may be used to endorse or promote products # derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # import sys import os import time import re import random import weakref import subprocess import socket import asyncore import inspect #---------------------------------------------------------------------------------------------------------------------- # wpantund properties WPAN_STATE = 'NCP:State' WPAN_NAME = 'Network:Name' WPAN_PANID = 'Network:PANID' WPAN_XPANID = 'Network:XPANID' WPAN_KEY = 'Network:Key' WPAN_KEY_INDEX = 'Network:KeyIndex' WPAN_CHANNEL = 'NCP:Channel' WPAN_HW_ADDRESS = 'NCP:HardwareAddress' WPAN_EXT_ADDRESS = 'NCP:ExtendedAddress' WPAN_POLL_INTERVAL = 'NCP:SleepyPollInterval' WPAN_NODE_TYPE = 'Network:NodeType' WPAN_ROLE = 'Network:Role' WPAN_PARTITION_ID = 'Network:PartitionId' WPAN_NCP_VERSION = 'NCP:Version' WPAN_NCP_MCU_POWER_STATE = "NCP:MCUPowerState" WPAN_NETWORK_ALLOW_JOIN = 'com.nestlabs.internal:Network:AllowingJoin' WPAN_NETWORK_PASSTHRU_PORT = 'com.nestlabs.internal:Network:PassthruPort' WPAN_IP6_LINK_LOCAL_ADDRESS = "IPv6:LinkLocalAddress" WPAN_IP6_MESH_LOCAL_ADDRESS = "IPv6:MeshLocalAddress" WPAN_IP6_MESH_LOCAL_PREFIX = "IPv6:MeshLocalPrefix" WPAN_IP6_ALL_ADDRESSES = "IPv6:AllAddresses" WPAN_IP6_MULTICAST_ADDRESSES = "IPv6:MulticastAddresses" WPAN_THREAD_RLOC16 = "Thread:RLOC16" WPAN_THREAD_ROUTER_ID = "Thread:RouterID" WPAN_THREAD_LEADER_ADDRESS = "Thread:Leader:Address" WPAN_THREAD_LEADER_ROUTER_ID = "Thread:Leader:RouterID" WPAN_THREAD_LEADER_WEIGHT = "Thread:Leader:Weight" WPAN_THREAD_LEADER_LOCAL_WEIGHT = "Thread:Leader:LocalWeight" WPAN_THREAD_LEADER_NETWORK_DATA = "Thread:Leader:NetworkData" WPAN_THREAD_STABLE_LEADER_NETWORK_DATA = "Thread:Leader:StableNetworkData" WPAN_THREAD_NETWORK_DATA = "Thread:NetworkData" WPAN_THREAD_CHILD_TABLE = "Thread:ChildTable" WPAN_THREAD_CHILD_TABLE_ASVALMAP = "Thread:ChildTable:AsValMap" WPAN_THREAD_CHILD_TABLE_ADDRESSES = "Thread:ChildTable:Addresses" WPAN_THREAD_NEIGHBOR_TABLE = "Thread:NeighborTable" WPAN_THREAD_NEIGHBOR_TABLE_ASVALMAP = "Thread:NeighborTable:AsValMap" WPAN_THREAD_NEIGHBOR_TABLE_ERR_RATES = "Thread:NeighborTable:ErrorRates" WPAN_THREAD_NEIGHBOR_TABLE_ERR_RATES_AVVALMAP = "Thread:NeighborTable:ErrorRates:AsValMap" WPAN_THREAD_ROUTER_TABLE = "Thread:RouterTable" WPAN_THREAD_ROUTER_TABLE_ASVALMAP = "Thread:RouterTable:AsValMap" WPAN_THREAD_CHILD_TIMEOUT = "Thread:ChildTimeout" WPAN_THREAD_PARENT = "Thread:Parent" WPAN_THREAD_PARENT_ASVALMAP = "Thread:Parent:AsValMap" WPAN_THREAD_NETWORK_DATA_VERSION = "Thread:NetworkDataVersion" WPAN_THREAD_STABLE_NETWORK_DATA = "Thread:StableNetworkData" WPAN_THREAD_STABLE_NETWORK_DATA_VERSION = "Thread:StableNetworkDataVersion" WPAN_THREAD_PREFERRED_ROUTER_ID = "Thread:PreferredRouterID" WPAN_THREAD_COMMISSIONER_ENABLED = "Thread:Commissioner:Enabled" WPAN_THREAD_DEVICE_MODE = "Thread:DeviceMode" WPAN_THREAD_OFF_MESH_ROUTES = "Thread:OffMeshRoutes" WPAN_THREAD_ON_MESH_PREFIXES = "Thread:OnMeshPrefixes" WPAN_THREAD_ROUTER_ROLE_ENABLED = "Thread:RouterRole:Enabled" WPAN_THREAD_CONFIG_FILTER_RLOC_ADDRESSES = "Thread:Config:FilterRLOCAddresses" WPAN_THREAD_ROUTER_UPGRADE_THRESHOLD = "Thread:RouterUpgradeThreshold" WPAN_THREAD_ROUTER_DOWNGRADE_THRESHOLD = "Thread:RouterDowngradeThreshold" WPAN_THREAD_ACTIVE_DATASET = "Thread:ActiveDataset" WPAN_THREAD_ACTIVE_DATASET_ASVALMAP = "Thread:ActiveDataset:AsValMap" WPAN_THREAD_PENDING_DATASET = "Thread:PendingDataset" WPAN_THREAD_PENDING_DATASET_ASVALMAP = "Thread:PendingDataset:AsValMap" WPAN_THREAD_ADDRESS_CACHE_TABLE = "Thread:AddressCacheTable" WPAN_THREAD_ADDRESS_CACHE_TABLE_ASVALMAP = "Thread:AddressCacheTable:AsValMap" WPAN_OT_LOG_LEVEL = "OpenThread:LogLevel" WPAN_OT_STEERING_DATA_ADDRESS = "OpenThread:SteeringData:Address" WPAN_OT_STEERING_DATA_SET_WHEN_JOINABLE = "OpenThread:SteeringData:SetWhenJoinable" WPAN_OT_MSG_BUFFER_COUNTERS = "OpenThread:MsgBufferCounters" WPAN_OT_MSG_BUFFER_COUNTERS_AS_STRING = "OpenThread:MsgBufferCounters:AsString" WPAN_OT_DEBUG_TEST_ASSERT = "OpenThread:Debug:TestAssert" WPAN_OT_DEBUG_TEST_WATCHDOG = "OpenThread:Debug:TestWatchdog" WPAN_NCP_COUNTER_ALL_MAC = "NCP:Counter:AllMac" WPAN_NCP_COUNTER_ALL_MAC_ASVALMAP = "NCP:Counter:AllMac:AsValMap" WPAN_MAC_WHITELIST_ENABLED = "MAC:Whitelist:Enabled" WPAN_MAC_WHITELIST_ENTRIES = "MAC:Whitelist:Entries" WPAN_MAC_WHITELIST_ENTRIES_ASVALMAP = "MAC:Whitelist:Entries:AsValMap" WPAN_MAC_BLACKLIST_ENABLED = "MAC:Blacklist:Enabled" WPAN_MAC_BLACKLIST_ENTRIES = "MAC:Blacklist:Entries" WPAN_MAC_BLACKLIST_ENTRIES_ASVALMAP = "MAC:Blacklist:Entries:AsValMap" WPAN_CHILD_SUPERVISION_INTERVAL = "ChildSupervision:Interval" WPAN_CHILD_SUPERVISION_CHECK_TIMEOUT = "ChildSupervision:CheckTimeout" WPAN_JAM_DETECTION_STATUS = "JamDetection:Status" WPAN_JAM_DETECTION_ENABLE = "JamDetection:Enable" WPAN_JAM_DETECTION_RSSI_THRESHOLD = "JamDetection:RssiThreshold" WPAN_JAM_DETECTION_WINDOW = "JamDetection:Window" WPAN_JAM_DETECTION_BUSY_PERIOD = "JamDetection:BusyPeriod" WPAN_JAM_DETECTION_DEBUG_HISTORY_BITMAP = "JamDetection:Debug:HistoryBitmap" WPAN_CHANNEL_MONITOR_SAMPLE_INTERVAL = "ChannelMonitor:SampleInterval" WPAN_CHANNEL_MONITOR_RSSI_THRESHOLD = "ChannelMonitor:RssiThreshold" WPAN_CHANNEL_MONITOR_SAMPLE_WINDOW = "ChannelMonitor:SampleWindow" WPAN_CHANNEL_MONITOR_SAMPLE_COUNT = "ChannelMonitor:SampleCount" WPAN_CHANNEL_MONITOR_CHANNEL_QUALITY = "ChannelMonitor:ChannelQuality" WPAN_CHANNEL_MONITOR_CHANNEL_QUALITY_ASVALMAP = "ChannelMonitor:ChannelQuality:AsValMap" WPAN_CHANNEL_MANAGER_NEW_CHANNEL = "ChannelManager:NewChannel" WPAN_CHANNEL_MANAGER_DELAY = "ChannelManager:Delay" WPAN_CHANNEL_MANAGER_CHANNEL_SELECT = "ChannelManager:ChannelSelect" WPAN_CHANNEL_MANAGER_AUTO_SELECT_ENABLED = "ChannelManager:AutoSelect:Enabled" WPAN_CHANNEL_MANAGER_AUTO_SELECT_INTERVAL = "ChannelManager:AutoSelect:Interval" WPAN_CHANNEL_MANAGER_SUPPORTED_CHANNEL_MASK = "ChannelManager:SupportedChannelMask" WPAN_CHANNEL_MANAGER_FAVORED_CHANNEL_MASK = "ChannelManager:FavoredChannelMask" #---------------------------------------------------------------------------------------------------------------------- # Valid state values STATE_UNINITIALIZED = '"uninitialized"' STATE_FAULT = '"uninitialized:fault"' STATE_UPGRADING = '"uninitialized:upgrading"' STATE_DEEP_SLEEP = '"offline:deep-sleep"' STATE_OFFLINE = '"offline"' STATE_COMMISSIONED = '"offline:commissioned"' STATE_ASSOCIATING = '"associating"' STATE_CREDENTIALS_NEEDED = '"associating:credentials-needed"' STATE_ASSOCIATED = '"associated"' STATE_ISOLATED = '"associated:no-parent"' STATE_NETWAKE_ASLEEP = '"associated:netwake-asleep"' STATE_NETWAKE_WAKING = '"associated:netwake-waking"' #----------------------------------------------------------------------------------------------------------------------- # MCU Power state from `WPAN_NCP_MCU_POWER_STATE` MCU_POWER_STATE_ON = '"on"' MCU_POWER_STATE_LOW_POWER = '"low-power"' MCU_POWER_STATE_OFF = '"off"' #----------------------------------------------------------------------------------------------------------------------- # Node types (from `WPAN_NODE_TYPE` property) NODE_TYPE_UNKNOWN = '"unknown"' NODE_TYPE_LEADER = '"leader"' NODE_TYPE_ROUTER = '"router"' NODE_TYPE_END_DEVICE = '"end-device"' NODE_TYPE_SLEEPY_END_DEVICE = '"sleepy-end-device"' NODE_TYPE_COMMISSIONER = '"commissioner"' NODE_TYPE_NEST_LURKER = '"nl-lurker"' #----------------------------------------------------------------------------------------------------------------------- # Node types used by `Node.join()` JOIN_TYPE_ROUTER = 'r' JOIN_TYPE_END_DEVICE = 'e' JOIN_TYPE_SLEEPY_END_DEVICE = 's' #----------------------------------------------------------------------------------------------------------------------- # Bit Flags for Thread Device Mode `WPAN_THREAD_DEVICE_MODE` THREAD_MODE_FLAG_FULL_NETWORK_DATA = (1 << 0) THREAD_MODE_FLAG_FULL_THREAD_DEV = (1 << 1) THREAD_MODE_FLAG_SECURE_DATA_REQUEST = (1 << 2) THREAD_MODE_FLAG_RX_ON_WHEN_IDLE = (1 << 3) #----------------------------------------------------------------------------------------------------------------------- def _log(text, new_line=True, flush=True): sys.stdout.write(text) if new_line: sys.stdout.write('\n') if flush: sys.stdout.flush() #----------------------------------------------------------------------------------------------------------------------- # Node class class Node(object): """ A wpantund OT NCP instance """ _VERBOSE = False # defines the default verbosity setting (can be changed per `Node`) _SPEED_UP_FACTOR = 1 # defines the default time speed up factor # path to `wpantund`, `wpanctl`, `ot-ncp-ftd`,`ot-ncp` and `ot-ncp-radio` _WPANTUND = '/usr/local/sbin/wpantund' _WPANCTL = '/usr/local/bin/wpanctl' _OT_NCP_FTD = '../../examples/apps/ncp/ot-ncp-ftd' _OT_NCP_FTD_POSIX_APP = '../../src/posix/ot-ncp' _OT_NCP_RADIO = '../../examples/apps/ncp/ot-ncp-radio' # Environment variable used to determine how to run OpenThread # If set to 1, then posix-app (`ot-ncp`) is used along with a posix RCP `ot-ncp-radio`. # Otherwise, the posix NCP `ot-ncp-ftd` is used _POSIX_APP_ENV_VAR = 'TORANJ_POSIX_APP_RCP_MODEL' _TUND_LOG_TO_FILE = True # determines if the wpantund logs are saved in file or sent to stdout _TUND_LOG_FNAME = 'wpantund-logs' # name of wpantund log file (if # name of wpantund _TUND_LOG_TO_FILE is True) # interface name _INTFC_NAME_PREFIX = 'utun' if sys.platform == 'darwin' else 'wpan' _START_INDEX = 4 if sys.platform == 'darwin' else 1 _cur_index = _START_INDEX _all_nodes = weakref.WeakSet() def __init__(self, verbose=_VERBOSE): """Creates a new `Node` instance""" index = Node._cur_index Node._cur_index += 1 self._index = index self._interface_name = self._INTFC_NAME_PREFIX + str(index) self._verbose = verbose # Check if env variable `TORANJ_POSIX_APP_RCP_MODEL` is defined # and use it to determine if to use operate in "posix-ncp-app". if self._POSIX_APP_ENV_VAR in os.environ: use_posix_app_with_rcp = (os.environ[self._POSIX_APP_ENV_VAR] in ['1', 'yes']) else: use_posix_app_with_rcp = False if use_posix_app_with_rcp: ncp_socket_path = 'system:{} -s {} {} {}'.format(self._OT_NCP_FTD_POSIX_APP, self._SPEED_UP_FACTOR, self._OT_NCP_RADIO, index) else: ncp_socket_path = 'system:{} {} {}'.format(self._OT_NCP_FTD, index, self._SPEED_UP_FACTOR) cmd = self._WPANTUND + \ ' -o Config:NCP:SocketPath \"{}\"'.format(ncp_socket_path) + \ ' -o Config:TUN:InterfaceName {}'.format(self._interface_name) + \ ' -o Config:NCP:DriverName spinel' + \ ' -o Daemon:SyslogMask \"all -debug\"' if Node._TUND_LOG_TO_FILE: self._tund_log_file = open(self._TUND_LOG_FNAME + str(index) + '.log', 'wb') else: self._tund_log_file = None if self._verbose: _log('$ Node{}.__init__() cmd: {}'.format(index, cmd)) self._wpantund_process = subprocess.Popen(cmd, shell=True, stderr=self._tund_log_file) self._wpanctl_cmd = self._WPANCTL + ' -I ' + self._interface_name + ' ' self._recvers = weakref.WeakValueDictionary() # map from local_port to `AsyncReceiver` object Node._all_nodes.add(self) def __del__(self): self._wpantund_process.poll() if self._wpantund_process.returncode is None: self._wpantund_process.terminate() self._wpantund_process.wait() def __repr__(self): return 'Node (index={}, interface_name={})'.format(self._index, self._interface_name) @property def index(self): return self._index @property def interface_name(self): return self._interface_name @property def tund_log_file(self): return self._tund_log_file #------------------------------------------------------------------------------------------------------------------ # Executing a `wpanctl` command def wpanctl(self, cmd): """ Runs a wpanctl command on the given wpantund/OT-NCP instance and returns the output """ if self._verbose: _log('$ Node{}.wpanctl(\'{}\')'.format(self._index, cmd), new_line=False) result = subprocess.check_output(self._wpanctl_cmd + cmd, shell=True, stderr=subprocess.STDOUT) if len(result) >= 1 and result[-1] == '\n': # remove the last char if it is '\n', result = result[:-1] if self._verbose: if '\n' in result: _log(':') for line in result.splitlines(): _log(' ' + line) else: _log(' -> \'{}\''.format(result)) return result #------------------------------------------------------------------------------------------------------------------ # APIs matching `wpanctl` commands. def get(self, prop_name, value_only=True): return self.wpanctl('get ' + ('-v ' if value_only else '') + prop_name) def set(self, prop_name, value, binary_data=False): return self._update_prop('set', prop_name, value, binary_data) def add(self, prop_name, value, binary_data=False): return self._update_prop('add', prop_name, value, binary_data) def remove(self, prop_name, value, binary_data=False): return self._update_prop('remove', prop_name, value, binary_data) def _update_prop(self, action, prop_name, value, binary_data): return self.wpanctl(action + ' ' + prop_name + ' ' + ('-d ' if binary_data else '') + '-v ' + value) # use -v to handle values starting with `-`. def reset(self): return self.wpanctl('reset') def status(self): return self.wpanctl('status') def leave(self): return self.wpanctl('leave') def form(self, name, channel=None, channel_mask=None, panid=None, xpanid=None, key=None, key_index=None, node_type=None, mesh_local_prefix=None, legacy_prefix=None): return self.wpanctl('form \"' + name + '\"' + (' -c {}'.format(channel) if channel is not None else '') + (' -m {}'.format(channel_mask) if channel_mask is not None else '') + (' -p {}'.format(panid) if panid is not None else '') + (' -x {}'.format(xpanid) if xpanid is not None else '') + (' -k {}'.format(key) if key is not None else '') + (' -i {}'.format(key_index) if key_index is not None else '') + (' -T {}'.format(node_type) if node_type is not None else '') + (' -M {}'.format(mesh_local_prefix) if mesh_local_prefix is not None else '') + (' -L {}'.format(legacy_prefix) if legacy_prefix is not None else '')) def join(self, name, channel=None, node_type=None, panid=None, xpanid=None, key=None): return self.wpanctl('join \"' + name + '\"' + (' -c {}'.format(channel) if channel is not None else '') + (' -T {}'.format(node_type) if node_type is not None else '') + (' -p {}'.format(panid) if panid is not None else '') + (' -x {}'.format(xpanid) if xpanid is not None else '') + (' -k {}'.format(key) if key is not None else '') + (' -n')) def active_scan(self, channel=None): return self.wpanctl('scan' + (' -c {}'.format(channel) if channel is not None else '')) def energy_scan(self, channel=None): return self.wpanctl('scan -e' + (' -c {}'.format(channel) if channel is not None else '')) def discover_scan(self, channel=None, joiner_only=False, enable_filtering=False, panid_filter=None): return self.wpanctl('scan -d' + (' -c {}'.format(channel) if channel is not None else '') + (' -j' if joiner_only else '') + (' -e' if enable_filtering else '') + (' -p {}'.format(panid_filter) if panid_filter is not None else '')) def permit_join(self, duration_sec=None, port=None, udp=True, tcp=True): if not udp and not tcp: # incorrect use! return '' traffic_type = '' if udp and not tcp: traffic_type = ' --udp' if tcp and not udp: traffic_type = ' --tcp' if port is not None and duration_sec is None: duration_sec = '240' return self.wpanctl('permit-join' + (' {}'.format(duration_sec) if duration_sec is not None else '') + (' {}'.format(port) if port is not None else '') + traffic_type) def config_gateway(self, prefix, default_route=False, priority=None): return self.wpanctl('config-gateway ' + prefix + (' -d' if default_route else '') + (' -P {}'.format(priority) if priority is not None else '')) def add_prefix(self, prefix, prefix_len=None, priority=None, stable=True, on_mesh=False, slaac=False, dhcp=False, configure=False, default_route=False, preferred=False): return self.wpanctl('add-prefix ' + prefix + (' -l {}'.format(prefix_len) if prefix_len is not None else '') + (' -P {}'.format(priority) if priority is not None else '') + (' -s' if stable else '') + (' -f' if preferred else '') + (' -a' if slaac else '') + (' -d' if dhcp else '') + (' -c' if configure else '') + (' -r' if default_route else '') + (' -o' if on_mesh else '')) def remove_prefix(self, prefix, prefix_len=None): return self.wpanctl('remove-prefix ' + prefix + (' -l {}'.format(prefix_len) if prefix_len is not None else '')) def add_route(self, route_prefix, prefix_len=None, priority=None, stable=True): """route priority [(>0 for high, 0 for medium, <0 for low)]""" return self.wpanctl('add-route ' + route_prefix + (' -l {}'.format(prefix_len) if prefix_len is not None else '') + (' -p {}'.format(priority) if priority is not None else '') + ('' if stable else '-n')) def remove_route(self, route_prefix, prefix_len=None, priority=None, stable=True): """route priority [(>0 for high, 0 for medium, <0 for low)]""" return self.wpanctl('remove-route ' + route_prefix + (' -l {}'.format(prefix_len) if prefix_len is not None else '') + (' -p {}'.format(priority) if priority is not None else '')) #------------------------------------------------------------------------------------------------------------------ # Helper methods def is_associated(self): return self.get(WPAN_STATE) == STATE_ASSOCIATED def join_node(self, node, node_type=JOIN_TYPE_ROUTER, should_set_key=True): """Join a network specified by another node, `node` should be a Node""" if not node.is_associated(): return "{} is not associated".format(node) return self.join( node.get(WPAN_NAME)[1:-1], channel=node.get(WPAN_CHANNEL), node_type=node_type, panid=node.get(WPAN_PANID), xpanid=node.get(WPAN_XPANID), key=node.get(WPAN_KEY)[1:-1] if should_set_key else None) def whitelist_node(self, node): """Adds a given node (of type `Node`) to the whitelist of `self` and enables whitelisting on `self`""" self.add(WPAN_MAC_WHITELIST_ENTRIES, node.get(WPAN_EXT_ADDRESS)[1:-1]) self.set(WPAN_MAC_WHITELIST_ENABLED, '1') def un_whitelist_node(self, node): """Removes a given node (of node `Node) from the whitelist""" self.remove(WPAN_MAC_WHITELIST_ENTRIES, node.get(WPAN_EXT_ADDRESS)[1:-1]) def is_in_scan_result(self, scan_result): """Checks if node is in the scan results `scan_result` must be an array of `ScanResult` object (see `parse_scan_result`). """ joinable = (self.get(WPAN_NETWORK_ALLOW_JOIN) == 'true') panid = self.get(WPAN_PANID) xpanid = self.get(WPAN_XPANID)[2:] name = self.get(WPAN_NAME)[1:-1] channel = self.get(WPAN_CHANNEL) ext_address = self.get(WPAN_EXT_ADDRESS)[1:-1] for item in scan_result: if all( [item.network_name == name, item.panid == panid, item.xpanid == xpanid, item.channel == channel, item.ext_address == ext_address, (item.type == ScanResult.TYPE_DISCOVERY_SCAN) or (item.joinable == joinable) ] ): return True return False def find_ip6_address_with_prefix(self, prefix): """Find an IPv6 address on node matching a given prefix. `prefix` should be an string containing the prefix. Returns a string containing the IPv6 address matching the prefix or empty string if no address found. """ if len(prefix) > 2 and prefix[-1] == ':' and prefix[-2] == ':': prefix = prefix[:-1] all_addrs = parse_list(self.get(WPAN_IP6_ALL_ADDRESSES)) matched_addr = [addr for addr in all_addrs if addr.startswith(prefix)] return matched_addr[0] if len(matched_addr) >= 1 else '' def add_ip6_address_on_interface(self, address, prefix_len=64): """Adds an IPv6 interface on the network interface. `address` should be string containing the IPv6 address. `prefix_len` is an `int` specifying the prefix length. NOTE: this method uses linux `ip` command. """ cmd = 'ip -6 addr add '+ address + '/{} dev '.format(prefix_len) + self.interface_name if self._verbose: _log('$ Node{} \'{}\')'.format(self._index, cmd)) result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) return result def remove_ip6_address_on_interface(self, address, prefix_len=64): """Removes an IPv6 interface on the network interface. `address` should be string containing the IPv6 address. `prefix_len` is an `int` specifying the prefix length. NOTE: this method uses linux `ip` command. """ cmd = 'ip -6 addr del '+ address + '/{} dev '.format(prefix_len) + self.interface_name if self._verbose: _log('$ Node{} \'{}\')'.format(self._index, cmd)) result = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT) return result #------------------------------------------------------------------------------------------------------------------ # class methods @classmethod def init_all_nodes(cls, disable_logs=True, wait_time=15): """Issues a `wpanctl.leave` on all `Node` objects and waits for them to be ready""" random.seed(123456) time.sleep(0.5) for node in Node._all_nodes: start_time = time.time() while True: try: node._wpantund_process.poll() if node._wpantund_process.returncode is not None: print 'Node {} wpantund instance has terminated unexpectedly'.format(node) if disable_logs: node.set(WPAN_OT_LOG_LEVEL, '0') node.leave() except subprocess.CalledProcessError as e: if (node._verbose): _log(' -> \'{}\' exit code: {}'.format(e.output, e.returncode)) interval = time.time() - start_time if interval > wait_time: print 'Took too long to init node {} ({}>{} sec)'.format(node, interval, wait_time) raise except: raise else: break time.sleep(0.4) @classmethod def finalize_all_nodes(cls): """Finalizes all previously created `Node` instances (stops the wpantund process)""" for node in Node._all_nodes: node._wpantund_process.terminate() node._wpantund_process.wait() @classmethod def set_time_speedup_factor(cls, factor): """Sets up the time speed up factor - should be set before creating any `Node` objects""" if len(Node._all_nodes) != 0: raise Node._NodeError('set_time_speedup_factor() cannot be called after creating a `Node`') Node._SPEED_UP_FACTOR = factor #------------------------------------------------------------------------------------------------------------------ # IPv6 message Sender and Receiver class class _NodeError(Exception): pass def prepare_tx(self, src, dst, data=40, count=1, mcast_hops=None): """Prepares an IPv6 msg transmission. - `src` and `dst` can be either a string containing IPv6 address, or a tuple (ipv6 address as string, port), if no port is given, a random port number is used. - `data` can be either a string containing the message to be sent, or an int indicating size of the message (a random message with the given length will be used). - `count` gives number of times the message will be sent (default is 1). - `mcast_hops` specifies multicast hop limit (only applicable for multicast tx). Returns an `AsyncSender` object. """ if isinstance(src, tuple): src_addr = src[0] src_port = src[1] else: src_addr = src src_port = random.randint(49152, 65535) if isinstance(dst, tuple): dst_addr = dst[0] dst_port = dst[1] else: dst_addr = dst dst_port = random.randint(49152, 65535) if isinstance(data, int): # create a random message with the given length. all_chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,><?;:[]=-+)(*&^%$#@' msg = ''.join(random.choice(all_chars) for _ in range(data)) else: msg = data return AsyncSender(self, src_addr, src_port, dst_addr, dst_port, msg, count, mcast_hops) def _get_receiver(self, local_port): # Gets or creates a receiver (an `AsyncReceiver`) tied to given port number if local_port in self._recvers: receiver = self._recvers[local_port] else: receiver = AsyncReceiver(self, local_port) self._recvers[local_port] = receiver return receiver def _remove_recver(self, recvr): # Removes a receiver from weak dictionary - called when the receiver is done and its socket is closed local_port = recvr.local_port if local_port in self._recvers: del self._recvers[local_port] def prepare_rx(self, sender): """Prepare to receive messages from a sender (an `AsyncSender`)""" receiver = self._get_receiver(sender.dst_port) receiver._add_sender(sender.src_addr, sender.src_port, sender.msg, sender.count) return receiver def preapre_listener(self, local_port, timeout=1): """Prepares a listener (an `AsyncReceiver`) listening on the given `local_port` for given `timeout` (sec)""" receiver = self._get_receiver(local_port) receiver._set_listen_timeout(timeout) return receiver @staticmethod def perform_async_tx_rx(timeout=20): """Called to perform all previously prepared async rx/listen and tx operations""" try: start_time = time.time() while asyncore.socket_map: elapsed_time = time.time() - start_time if elapsed_time > timeout: print 'Performing aysnc tx/tx took too long ({}>{} sec)'.format(elapsed_time, timeout) raise Node._NodeError('perform_tx_rx timed out ({}>{} sec)'.format(elapsed_time, timeout)) # perform a single asyncore loop asyncore.loop(timeout=0.5, count=1) except: print 'Failed to perform async rx/tx' raise #----------------------------------------------------------------------------------------------------------------------- # `AsyncSender` and `AsyncReceiver classes _SO_BINDTODEVICE = 25 def _is_ipv6_addr_link_local(ip_addr): """Indicates if a given IPv6 address is link-local""" return ip_addr.lower().startswith('fe80::') def _create_socket_address(ip_address, port): """Convert a given IPv6 address (string) and port number into a socket address""" # `socket.getaddrinfo()` returns a list of `(family, socktype, proto, canonname, sockaddr)` where `sockaddr` # (at index 4) can be used as input in socket methods (like `sendto()`, `bind()`, etc.). return socket.getaddrinfo(ip_address, port)[0][4] class AsyncSender(asyncore.dispatcher): """ An IPv6 async message sender - use `Node.prepare_tx()` to create one""" def __init__(self, node, src_addr, src_port, dst_addr, dst_port, msg, count, mcast_hops=None): self._node = node self._src_addr = src_addr self._src_port = src_port self._dst_addr = dst_addr self._dst_port = dst_port self._msg = msg self._count = count self._dst_sock_addr = _create_socket_address(dst_addr, dst_port) self._tx_buffer = self._msg self._tx_counter = 0 # Create a socket, bind it to the node's interface sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) sock.setsockopt(socket.SOL_SOCKET, _SO_BINDTODEVICE, node.interface_name + '\0') sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) # Set the IPV6_MULTICAST_HOPS if mcast_hops is not None: sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_MULTICAST_HOPS, mcast_hops) # Bind the socket to the given src address if _is_ipv6_addr_link_local(src_addr): # If src is a link local address it requires the interface name to be specified. src_sock_addr = _create_socket_address(src_addr + '%' + node.interface_name, src_port) else: src_sock_addr = _create_socket_address(src_addr, src_port) sock.bind(src_sock_addr) asyncore.dispatcher.__init__(self, sock) # Property getters @property def node(self): return self._node @property def src_addr(self): return self._src_addr @property def src_port(self): return self._src_port @property def dst_addr(self): return self._dst_addr @property def dst_port(self): return self._dst_port @property def msg(self): return self._msg @property def count(self): return self._count @property def was_successful(self): """Indicates if the transmission of IPv6 messages finished successfully""" return self._tx_counter == self._count # asyncore.dispatcher callbacks def readable(self): return False def writable(self): return True def handle_write(self): sent_len = self.sendto(self._tx_buffer, self._dst_sock_addr) if self._node._verbose: if sent_len < 30: info_text = '{} bytes ("{}")'.format(sent_len, self._tx_buffer[:sent_len]) else: info_text = '{} bytes'.format(sent_len) _log('- Node{} sent {} to [{}]:{} from [{}]:{}'.format(self._node._index, info_text, self._dst_addr, self._dst_port, self._src_addr, self._src_port)) self._tx_buffer = self._tx_buffer[sent_len:] if len(self._tx_buffer) == 0: self._tx_counter += 1 if self._tx_counter < self._count: self._tx_buffer = self._msg else: self.handle_close() def handle_close(self): self.close() #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - class AsyncReceiver(asyncore.dispatcher): """ An IPv6 async message receiver - use `prepare_rx()` to create one""" _MAX_RECV_SIZE = 2048 class _SenderInfo(object): def __init__(self, sender_addr, sender_port, msg, count): self._sender_addr = sender_addr self._sender_port = sender_port self._msg = msg self._count = count self._rx_counter = 0 def _check_received(self, msg, sender_addr, sender_port): if self._msg == msg and self._sender_addr == sender_addr and self._sender_port == sender_port: self._rx_counter += 1 return self._did_recv_all() def _did_recv_all(self): return self._rx_counter >= self._count def __init__(self, node, local_port): self._node = node self._local_port = local_port self._senders = [] # list of `_SenderInfo` objects self._all_rx = [] # contains all received messages as a list of (pkt, (src_addr, src_port)) self._timeout = 0 # listen timeout (zero means forever) self._started = False self._start_time = 0 # Create a socket, bind it to the node's interface sock = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM) sock.setsockopt(socket.SOL_SOCKET, _SO_BINDTODEVICE, node.interface_name + '\0') sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) # Bind the socket to any IPv6 address with the given local port local_sock_addr = _create_socket_address('::', local_port) sock.bind(local_sock_addr) asyncore.dispatcher.__init__(self, sock) def _add_sender(self, sender_addr, sender_port, msg, count): self._senders.append(AsyncReceiver._SenderInfo(sender_addr, sender_port, msg, count)) def _set_listen_timeout(self, timeout): self._timeout = timeout # Property getters @property def node(self): return self._node @property def local_port(self): return self._local_port @property def all_rx_msg(self): """returns all received messages as a list of (msg, (src_addr, src_port))""" return self._all_rx @property def was_successful(self): """Indicates if all expected IPv6 messages were received successfully""" return len(self._senders) == 0 or all([sender._did_recv_all() for sender in self._senders]) # asyncore.dispatcher callbacks def readable(self): if not self._started: self._start_time = time.time() self._started = True if self._timeout != 0 and time.time() - self._start_time >= self._timeout: self.handle_close() if self._node._verbose: _log('- Node{} finished listening on port {} for {} sec, received {} msg(s)'.format( self._node._index, self._local_port, self._timeout, len(self._all_rx))) return False return True def writable(self): return False def handle_read(self): (msg, src_sock_addr) = self.recvfrom(AsyncReceiver._MAX_RECV_SIZE) src_addr = src_sock_addr[0] src_port = src_sock_addr[1] if (_is_ipv6_addr_link_local(src_addr)): if '%' in src_addr: src_addr = src_addr.split('%')[0] # remove the interface name from address if self._node._verbose: if len(msg) < 30: info_text = '{} bytes ("{}")'.format(len(msg), msg) else: info_text = '{} bytes'.format(len(msg)) _log('- Node{} received {} on port {} from [{}]:{}'.format(self._node._index, info_text, self._local_port, src_addr, src_port)) self._all_rx.append((msg, (src_addr, src_port))) if all([sender._check_received(msg, src_addr, src_port) for sender in self._senders]): self.handle_close() def handle_close(self): self.close() # remove the receiver from the node once the socket is closed self._node._remove_recver(self) #----------------------------------------------------------------------------------------------------------------------- class VerifyError(Exception): pass _is_in_verify_within = False def verify(condition): """Verifies that a `condition` is true, otherwise raises a VerifyError""" global _is_in_verify_within if not condition: calling_frame = inspect.currentframe().f_back error_message = 'verify() failed at line {} in "{}"'.format(calling_frame.f_lineno, calling_frame.f_code.co_filename) if not _is_in_verify_within: print error_message raise VerifyError(error_message) def verify_within(condition_checker_func, wait_time, delay_time=0.1): """Verifies that a given function `condition_checker_func` passes successfully within a given wait timeout. `wait_time` is maximum time waiting for condition_checker to pass (in seconds). `delay_time` specifies a delay interval added between failed attempts (in seconds). """ global _is_in_verify_within start_time = time.time() old_is_in_verify_within = _is_in_verify_within _is_in_verify_within = True while True: try: condition_checker_func() except VerifyError as e: if time.time() - start_time > wait_time: print 'Took too long to pass the condition ({}>{} sec)'.format(time.time() - start_time, wait_time) print e.message raise e except: raise else: break if delay_time != 0: time.sleep(delay_time) _is_in_verify_within = old_is_in_verify_within #----------------------------------------------------------------------------------------------------------------------- # Parsing `wpanctl` output #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - class ScanResult(object): """ This object encapsulates a scan result (active/discover/energy scan)""" TYPE_ACTIVE_SCAN = 'active-scan' TYPE_DISCOVERY_SCAN = 'discover-scan' TYPE_ENERGY_SCAN = 'energy-scan' def __init__(self, result_text): items = [item.strip() for item in result_text.split('|')] if len(items) == 8: self._type = ScanResult.TYPE_ACTIVE_SCAN self._index = items[0] self._joinable = (items[1] == 'YES') self._network_name = items[2][1:-1] self._panid = items[3] self._channel = items[4] self._xpanid = items[5] self._ext_address = items[6] self._rssi = items[7] elif len(items) == 7: self._type = ScanResult.TYPE_DISCOVERY_SCAN self._index = items[0] self._network_name = items[1][1:-1] self._panid = items[2] self._channel = items[3] self._xpanid = items[4] self._ext_address = items[5] self._rssi = items[6] elif len(items) == 2: self._type = ScanResult.TYPE_ENERGY_SCAN self._channel = items[0] self._rssi = items[1] else: raise ValueError('"{}" does not seem to be a valid scan result string'.result_text) @property def type(self): return self._type @property def joinable(self): return self._joinable @property def network_name(self): return self._network_name @property def panid(self): return self._panid @property def channel(self): return self._channel @property def xpanid(self): return self._xpanid @property def ext_address(self): return self._ext_address @property def rssi(self): return self._rssi def __repr__(self): return 'ScanResult({})'.format(self.__dict__) def parse_scan_result(scan_result): """ Parses scan result string and returns an array of `ScanResult` objects""" return [ ScanResult(item) for item in scan_result.split('\n')[2:] ] # skip first two lines which are table headers def parse_list(list_string): """ Parses IPv6/prefix/route list string (output of wpanctl get for properties WPAN_IP6_ALL_ADDRESSES, IP6_MULTICAST_ADDRESSES, WPAN_THREAD_ON_MESH_PREFIXES, ...) Returns an array of strings each containing an IPv6/prefix/route entry. """ # List string example (get(WPAN_IP6_ALL_ADDRESSES) output): # # '[\n # \t"fdf4:5632:4940:0:8798:8701:85d4:e2be prefix_len:64 origin:ncp valid:forever preferred:forever"\n # \t"fe80::2092:9358:97ea:71c6 prefix_len:64 origin:ncp valid:forever preferred:forever"\n # ]' # # We split the lines ('\n' as separator) and skip the first and last lines which are '[' and ']'. # For each line, skip the first two characters (which are '\t"') and last character ('"'), then split the string # using whitespace as separator. The first entry is the IPv6 address. # return [line[2:-1].split()[0] for line in list_string.split('\n')[1:-1]] #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - class OnMeshPrefix(object): """ This object encapsulates an on-mesh prefix""" def __init__(self, text): # Example of expected text: # # '\t"fd00:abba:cafe:: prefix_len:64 origin:user stable:yes flags:0x31' # ' [on-mesh:1 def-route:0 config:0 dhcp:0 slaac:1 pref:1 prio:med] rloc:0x0000"' m = re.match('\t"([0-9a-fA-F:]+)\s*prefix_len:(\d+)\s+origin:(\w*)\s+stable:(\w*).* \[' + 'on-mesh:(\d)\s+def-route:(\d)\s+config:(\d)\s+dhcp:(\d)\s+slaac:(\d)\s+pref:(\d)\s+prio:(\w*)\]' + '\s+rloc:(0x[0-9a-fA-F]+)', text) verify(m is not None) data = m.groups() self._prefix = data[0] self._prefix_len = data[1] self._origin = data[2] self._stable = (data[3] == 'yes') self._on_mesh = (data[4] == '1') self._def_route = (data[5] == '1') self._config = (data[6] == '1') self._dhcp = (data[7] == '1') self._slaac = (data[8] == '1') self._preferred = (data[9] == '1') self._priority = (data[10]) self._rloc16 = (data[11]) @property def prefix(self): return self._prefix @property def prefix_len(self): return self._prefix_len @property def origin(self): return self._origin @property def priority(self): return self._priority def is_stable(self): return self._stable def is_on_mesh(self): return self._on_mesh def is_def_route(self): return self._def_route def is_config(self): return self._config def is_dhcp(self): return self._dhcp def is_slaac(self): return self._slaac def is_preferred(self): return self._preferred def rloc16(self): return self._rloc16 def __repr__(self): return 'OnMeshPrefix({})'.format(self.__dict__) def parse_on_mesh_prefix_result(on_mesh_prefix_list): """ Parses on-mesh prefix list string and returns an array of `OnMeshPrefix` objects""" return [ OnMeshPrefix(item) for item in on_mesh_prefix_list.split('\n')[1:-1] ] #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - class ChildEntry(object): """ This object encapsulates a child entry""" def __init__(self, text): # Example of expected text: # # `\t"E24C5F67F4B8CBB9, RLOC16:d402, NetDataVer:175, LQIn:3, AveRssi:-20, LastRssi:-20, Timeout:120, Age:0, ` # `RxOnIdle:no, FTD:no, SecDataReq:yes, FullNetData:yes"` # # We get rid of the first two chars `\t"' and last char '"', split the rest using whitespace as separator. # Then remove any ',' at end of items in the list. items = [item[:-1] if item[-1] ==',' else item for item in text[2:-1].split()] # First item in the extended address self._ext_address = items[0] # Convert the rest into a dictionary by splitting using ':' as separator dict = {item.split(':')[0] : item.split(':')[1] for item in items[1:]} self._rloc16 = dict['RLOC16'] self._timeout = dict['Timeout'] self._rx_on_idle = (dict['RxOnIdle'] == 'yes') self._ftd = (dict['FTD'] == 'yes') self._sec_data_req = (dict['SecDataReq'] == 'yes') self._full_net_data = (dict['FullNetData'] == 'yes') @property def ext_address(self): return self._ext_address @property def rloc16(self): return self._rloc16 @property def timeout(self): return self._timeout def is_rx_on_when_idle(self): return self._rx_on_idle def is_ftd(self): return self._ftd def is_sec_data_req(self): return self._sec_data_req def is_full_net_data(self): return self._full_net_data def __repr__(self): return 'ChildEntry({})'.format(self.__dict__) def parse_child_table_result(child_table_list): """ Parses child table list string and returns an array of `ChildEntry` objects""" return [ ChildEntry(item) for item in child_table_list.split('\n')[1:-1] ] #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - class NeighborEntry(object): """ This object encapsulates a neighbor entry""" def __init__(self, text): # Example of expected text: # # `\t"5AC95ED4646D6565, RLOC16:9403, LQIn:3, AveRssi:-20, LastRssi:-20, Age:0, LinkFC:8, MleFC:0, IsChild:yes, ' # 'RxOnIdle:no, FTD:no, SecDataReq:yes, FullNetData:yes"' # # We get rid of the first two chars `\t"' and last char '"', split the rest using whitespace as separator. # Then remove any ',' at end of items in the list. items = [item[:-1] if item[-1] ==',' else item for item in text[2:-1].split()] # First item in the extended address self._ext_address = items[0] # Convert the rest into a dictionary by splitting the text using ':' as separator dict = {item.split(':')[0] : item.split(':')[1] for item in items[1:]} self._rloc16 = dict['RLOC16'] self._is_child = (dict['IsChild'] == 'yes') self._rx_on_idle = (dict['RxOnIdle'] == 'yes') self._ftd = (dict['FTD'] == 'yes') @property def ext_address(self): return self._ext_address @property def rloc16(self): return self._rloc16 def is_rx_on_when_idle(self): return self._rx_on_idle def is_ftd(self): return self._ftd def is_child(self): return self._is_child def __repr__(self): return 'NeighborEntry({})'.format(self.__dict__) def parse_neighbor_table_result(neighbor_table_list): """ Parses neighbor table list string and returns an array of `NeighborEntry` objects""" return [ NeighborEntry(item) for item in neighbor_table_list.split('\n')[1:-1] ] #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - class RouterTableEntry(object): """ This object encapsulates a router table entry""" def __init__(self, text): # Example of expected text: # # `\t"8A970B3251810826, RLOC16:4000, RouterId:16, NextHop:43, PathCost:1, LQIn:3, LQOut:3, Age:3, LinkEst:yes"` # # We get rid of the first two chars `\t"' and last char '"', split the rest using whitespace as separator. # Then remove any ',' at end of items in the list. items = [item[:-1] if item[-1] ==',' else item for item in text[2:-1].split()] # First item in the extended address self._ext_address = items[0] # Convert the rest into a dictionary by splitting the text using ':' as separator dict = {item.split(':')[0] : item.split(':')[1] for item in items[1:]} self._rloc16 = int(dict['RLOC16'], 16) self._router_id = int(dict['RouterId'], 0) self._next_hop = int(dict['NextHop'], 0) self._path_cost = int(dict['PathCost'], 0) self._age = int(dict['Age'], 0) self._le = (dict['LinkEst'] == 'yes') @property def ext_address(self): return self._ext_address @property def rloc16(self): return self._rloc16 @property def router_id(self): return self._router_id @property def next_hop(self): return self._next_hop @property def path_cost(self): return self._path_cost def is_link_established(self): return self._le def __repr__(self): return 'RouterTableEntry({})'.format(self.__dict__) def parse_router_table_result(router_table_list): """ Parses router table list string and returns an array of `RouterTableEntry` objects""" return [ RouterTableEntry(item) for item in router_table_list.split('\n')[1:-1] ] #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - class AddressCacheEntry(object): """ This object encapsulates an address cache entry""" def __init__(self, text): # Example of expected text: # # '\t"fd00:1234::d427:a1d9:6204:dbae -> 0x9c00, age:0"' # # We get rid of the first two chars `\t"' and last char '"', split the rest using whitespace as separator. # Then remove any ',' at end of items in the list. items = [item[:-1] if item[-1] ==',' else item for item in text[2:-1].split()] # First item in the extended address self._address = items[0] self._rloc16 = int(items[2], 16) # Convert the rest into a dictionary by splitting the text using ':' as separator dict = {item.split(':')[0] : item.split(':')[1] for item in items[3:]} self._age = int(dict['age'], 0) @property def address(self): return self._address @property def rloc16(self): return self._rloc16 @property def age(self): return self._age def __repr__(self): return 'AddressCacheEntry({})'.format(self.__dict__) def parse_address_cache_table_result(addr_cache_table_list): """ Parses address cache table list string and returns an array of `AddressCacheEntry` objects""" return [ AddressCacheEntry(item) for item in addr_cache_table_list.split('\n')[1:-1] ]
bsd-3-clause
bing-ads-sdk/BingAds-Python-SDK
bingads/v12/bulk/entities/target_criterions/bulk_ad_group_location_intent_criterion.py
1
4669
from bingads.v12.bulk.entities import * from bingads.service_client import _CAMPAIGN_OBJECT_FACTORY_V12 from bingads.v12.internal.bulk.entities.single_record_bulk_entity import _SingleRecordBulkEntity from bingads.v12.internal.bulk.mappings import _SimpleBulkMapping from bingads.v12.internal.bulk.string_table import _StringTable from bingads.v12.internal.extensions import * class BulkAdGroupLocationIntentCriterion(_SingleRecordBulkEntity): """ Represents an Ad Group Location Intent Criterion that can be read or written in a bulk file. This class exposes the :attr:`biddable_ad_group_criterion` property that can be read and written as fields of the Ad Group Location Intent Criterion record in a bulk file. For more information, see Ad Group Age Criterion at https://go.microsoft.com/fwlink/?linkid=846127. *See also:* * :class:`.BulkServiceManager` * :class:`.BulkOperation` * :class:`.BulkFileReader` * :class:`.BulkFileWriter` """ def __init__(self, biddable_ad_group_criterion=None, campaign_name=None, ad_group_name=None, ): super(BulkAdGroupLocationIntentCriterion, self).__init__() self._biddable_ad_group_criterion = biddable_ad_group_criterion self._campaign_name = campaign_name self._ad_group_name =ad_group_name _MAPPINGS = [ _SimpleBulkMapping( _StringTable.Status, field_to_csv=lambda c: bulk_str(c.biddable_ad_group_criterion.Status), csv_to_field=lambda c, v: setattr(c.biddable_ad_group_criterion, 'Status', v if v else None) ), _SimpleBulkMapping( _StringTable.Id, field_to_csv=lambda c: bulk_str(c.biddable_ad_group_criterion.Id), csv_to_field=lambda c, v: setattr(c.biddable_ad_group_criterion, 'Id', int(v) if v else None) ), _SimpleBulkMapping( _StringTable.ParentId, field_to_csv=lambda c: bulk_str(c.biddable_ad_group_criterion.AdGroupId), csv_to_field=lambda c, v: setattr(c.biddable_ad_group_criterion, 'AdGroupId', int(v) if v else None) ), _SimpleBulkMapping( _StringTable.Campaign, field_to_csv=lambda c: c.campaign_name, csv_to_field=lambda c, v: setattr(c, 'campaign_name', v) ), _SimpleBulkMapping( _StringTable.AdGroup, field_to_csv=lambda c: c.ad_group_name, csv_to_field=lambda c, v: setattr(c, 'ad_group_name', v) ), _SimpleBulkMapping( _StringTable.Target, field_to_csv=lambda c: field_to_csv_LocationIntentTarget(c.biddable_ad_group_criterion), csv_to_field=lambda c, v: csv_to_field_LocationIntentTarget(c.biddable_ad_group_criterion, v) ), ] @property def biddable_ad_group_criterion(self): """ Defines a Ad Group Criterion """ return self._biddable_ad_group_criterion @biddable_ad_group_criterion.setter def biddable_ad_group_criterion(self, biddable_ad_group_criterion): self._biddable_ad_group_criterion = biddable_ad_group_criterion @property def campaign_name(self): """ The name of the Campaign :rtype: str """ return self._campaign_name @campaign_name.setter def campaign_name(self, campaign_name): self._campaign_name = campaign_name @property def ad_group_name(self): """ The name of the Ad Group :rtype: str """ return self._ad_group_name @ad_group_name.setter def ad_group_name(self, ad_group_name): self._ad_group_name = ad_group_name def process_mappings_to_row_values(self, row_values, exclude_readonly_data): self._validate_property_not_null(self.biddable_ad_group_criterion, 'biddable_ad_group_criterion') self.convert_to_values(row_values, BulkAdGroupLocationIntentCriterion._MAPPINGS) def process_mappings_from_row_values(self, row_values): self._biddable_ad_group_criterion = _CAMPAIGN_OBJECT_FACTORY_V12.create('BiddableAdGroupCriterion') self._biddable_ad_group_criterion.Type = 'BiddableAdGroupCriterion' self._biddable_ad_group_criterion.Criterion = _CAMPAIGN_OBJECT_FACTORY_V12.create('LocationIntentCriterion') self._biddable_ad_group_criterion.Criterion.Type = 'LocationIntentCriterion' row_values.convert_to_entity(self, BulkAdGroupLocationIntentCriterion._MAPPINGS) def read_additional_data(self, stream_reader): super(BulkAdGroupLocationIntentCriterion, self).read_additional_data(stream_reader)
mit
cherusk/ansible
lib/ansible/modules/network/panos/panos_service.py
78
5072
#!/usr/bin/python # -*- coding: utf-8 -*- # # Ansible module to manage PaloAltoNetworks Firewall # (c) 2016, techbizdev <techbizdev@paloaltonetworks.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. DOCUMENTATION = ''' --- module: panos_service short_description: create a service object description: - Create a service object. Service objects are fundamental representation of the applications given src/dst ports and protocol author: "Luigi Mori (@jtschichold), Ivan Bojer (@ivanbojer)" version_added: "2.3" requirements: - pan-python options: ip_address: description: - IP address (or hostname) of PAN-OS device required: true password: description: - password for authentication required: true username: description: - username for authentication required: false default: "admin" service_name: description: - name of the service required: true protocol: description: - protocol for the service, should be tcp or udp required: true port: description: - destination port required: true source_port: description: - source port required: false default: None commit: description: - commit if changed required: false default: true ''' EXAMPLES = ''' # Creates service for port 22 - name: create SSH service panos_service: ip_address: "192.168.1.1" password: "admin" service_name: "service-tcp-22" protocol: "tcp" port: "22" ''' RETURN=''' # Default return values ''' ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'community'} from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.basic import get_exception try: import pan.xapi from pan.xapi import PanXapiError HAS_LIB = True except ImportError: HAS_LIB = False _SERVICE_XPATH = "/config/devices/entry[@name='localhost.localdomain']" +\ "/vsys/entry[@name='vsys1']" +\ "/service/entry[@name='%s']" def service_exists(xapi, service_name): xapi.get(_SERVICE_XPATH % service_name) e = xapi.element_root.find('.//entry') if e is None: return False return True def add_service(xapi, module, service_name, protocol, port, source_port): if service_exists(xapi, service_name): return False exml = ['<protocol>'] exml.append('<%s>' % protocol) exml.append('<port>%s</port>' % port) if source_port: exml.append('<source-port>%s</source-port>' % source_port) exml.append('</%s>' % protocol) exml.append('</protocol>') exml = ''.join(exml) xapi.set(xpath=_SERVICE_XPATH % service_name, element=exml) return True def main(): argument_spec = dict( ip_address=dict(required=True), password=dict(required=True, no_log=True), username=dict(default='admin'), service_name=dict(required=True), protocol=dict(required=True, choices=['tcp', 'udp']), port=dict(required=True), source_port=dict(), commit=dict(type='bool', default=True) ) module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False) if not HAS_LIB: module.fail_json(msg='pan-python is required for this module') ip_address = module.params["ip_address"] password = module.params["password"] username = module.params['username'] service_name = module.params['service_name'] protocol = module.params['protocol'] port = module.params['port'] source_port = module.params['source_port'] commit = module.params['commit'] xapi = pan.xapi.PanXapi( hostname=ip_address, api_username=username, api_password=password ) try: changed = add_service(xapi, module, service_name, protocol, port, source_port) if changed and commit: xapi.commit(cmd="<commit></commit>", sync=True, interval=1) except PanXapiError: exc = get_exception() module.fail_json(msg=exc.message) module.exit_json(changed=changed, msg="okey dokey") if __name__ == '__main__': main()
gpl-3.0
liggitt/openshift-ansible
roles/openshift_health_checker/openshift_checks/logging/logging_index_time.py
49
5031
""" Check for ensuring logs from pods can be queried in a reasonable amount of time. """ import json import time from uuid import uuid4 from openshift_checks import OpenShiftCheckException from openshift_checks.logging.logging import LoggingCheck ES_CMD_TIMEOUT_SECONDS = 30 class LoggingIndexTime(LoggingCheck): """Check that pod logs are aggregated and indexed in ElasticSearch within a reasonable amount of time.""" name = "logging_index_time" tags = ["health", "logging"] def run(self): """Add log entry by making unique request to Kibana. Check for unique entry in the ElasticSearch pod logs.""" try: log_index_timeout = int( self.get_var("openshift_check_logging_index_timeout_seconds", default=ES_CMD_TIMEOUT_SECONDS) ) except ValueError: raise OpenShiftCheckException( 'InvalidTimeout', 'Invalid value provided for "openshift_check_logging_index_timeout_seconds". ' 'Value must be an integer representing an amount in seconds.' ) running_component_pods = dict() # get all component pods for component, name in (['kibana', 'Kibana'], ['es', 'Elasticsearch']): pods = self.get_pods_for_component(component) running_pods = self.running_pods(pods) if not running_pods: raise OpenShiftCheckException( component + 'NoRunningPods', 'No {} pods in the "Running" state were found.' 'At least one pod is required in order to perform this check.'.format(name) ) running_component_pods[component] = running_pods uuid = self.curl_kibana_with_uuid(running_component_pods["kibana"][0]) self.wait_until_cmd_or_err(running_component_pods["es"][0], uuid, log_index_timeout) return {} def wait_until_cmd_or_err(self, es_pod, uuid, timeout_secs): """Retry an Elasticsearch query every second until query success, or a defined length of time has passed.""" deadline = time.time() + timeout_secs interval = 1 while not self.query_es_from_es(es_pod, uuid): if time.time() + interval > deadline: raise OpenShiftCheckException( "NoMatchFound", "expecting match in Elasticsearch for message with uuid {}, " "but no matches were found after {}s.".format(uuid, timeout_secs) ) time.sleep(interval) def curl_kibana_with_uuid(self, kibana_pod): """curl Kibana with a unique uuid.""" uuid = self.generate_uuid() pod_name = kibana_pod["metadata"]["name"] exec_cmd = "exec {pod_name} -c kibana -- curl --max-time 30 -s http://localhost:5601/{uuid}" exec_cmd = exec_cmd.format(pod_name=pod_name, uuid=uuid) error_str = self.exec_oc(exec_cmd, []) try: error_code = json.loads(error_str)["statusCode"] except (KeyError, ValueError): raise OpenShiftCheckException( 'kibanaInvalidResponse', 'invalid response returned from Kibana request:\n' 'Command: {}\nResponse: {}'.format(exec_cmd, error_str) ) if error_code != 404: raise OpenShiftCheckException( 'kibanaInvalidReturnCode', 'invalid error code returned from Kibana request.\n' 'Expecting error code "404", but got "{}" instead.'.format(error_code) ) return uuid def query_es_from_es(self, es_pod, uuid): """curl the Elasticsearch pod and look for a unique uuid in its logs.""" pod_name = es_pod["metadata"]["name"] exec_cmd = ( "exec {pod_name} -- curl --max-time 30 -s -f " "--cacert /etc/elasticsearch/secret/admin-ca " "--cert /etc/elasticsearch/secret/admin-cert " "--key /etc/elasticsearch/secret/admin-key " "https://logging-es:9200/project.{namespace}*/_count?q=message:{uuid}" ) exec_cmd = exec_cmd.format(pod_name=pod_name, namespace=self.logging_namespace(), uuid=uuid) result = self.exec_oc(exec_cmd, [], save_as_name="query_for_uuid.json") try: count = json.loads(result)["count"] except (KeyError, ValueError): raise OpenShiftCheckException( 'esInvalidResponse', 'Invalid response from Elasticsearch query:\n' ' {}\n' 'Response was:\n{}'.format(exec_cmd, result) ) return count @staticmethod def running_pods(pods): """Filter pods that are running.""" return [pod for pod in pods if pod['status']['phase'] == 'Running'] @staticmethod def generate_uuid(): """Wrap uuid generator. Allows for testing with expected values.""" return str(uuid4())
apache-2.0
niketanpansare/systemml
projects/breast_cancer/breastcancer/visualization.py
18
2001
#------------------------------------------------------------- # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # #------------------------------------------------------------- """ Visualization -- Predicting Breast Cancer Proliferation Scores with Apache SystemML This module contains functions for visualizing data for the breast cancer project. """ import matplotlib.pyplot as plt def visualize_tile(tile): """ Plot a tissue tile. Args: tile: A 3D NumPy array of shape (tile_size, tile_size, channels). Returns: None """ plt.imshow(tile) plt.show() def visualize_sample(sample, size=256): """ Plot a tissue sample. Args: sample: A square sample flattened to a vector of size (channels*size_x*size_y). size: The width and height of the square samples. Returns: None """ # Change type, reshape, transpose to (size_x, size_y, channels). length = sample.shape[0] channels = int(length / (size * size)) if channels > 1: sample = sample.astype('uint8').reshape((channels, size, size)).transpose(1,2,0) plt.imshow(sample) else: vmax = 255 if sample.max() > 1 else 1 sample = sample.reshape((size, size)) plt.imshow(sample, cmap="gray", vmin=0, vmax=vmax) plt.show()
apache-2.0
flyfei/python-for-android
python3-alpha/python3-src/Lib/unittest/test/test_discovery.py
785
13838
import os import re import sys import unittest class TestableTestProgram(unittest.TestProgram): module = '__main__' exit = True defaultTest = failfast = catchbreak = buffer = None verbosity = 1 progName = '' testRunner = testLoader = None def __init__(self): pass class TestDiscovery(unittest.TestCase): # Heavily mocked tests so I can avoid hitting the filesystem def test_get_name_from_path(self): loader = unittest.TestLoader() loader._top_level_dir = '/foo' name = loader._get_name_from_path('/foo/bar/baz.py') self.assertEqual(name, 'bar.baz') if not __debug__: # asserts are off return with self.assertRaises(AssertionError): loader._get_name_from_path('/bar/baz.py') def test_find_tests(self): loader = unittest.TestLoader() original_listdir = os.listdir def restore_listdir(): os.listdir = original_listdir original_isfile = os.path.isfile def restore_isfile(): os.path.isfile = original_isfile original_isdir = os.path.isdir def restore_isdir(): os.path.isdir = original_isdir path_lists = [['test1.py', 'test2.py', 'not_a_test.py', 'test_dir', 'test.foo', 'test-not-a-module.py', 'another_dir'], ['test3.py', 'test4.py', ]] os.listdir = lambda path: path_lists.pop(0) self.addCleanup(restore_listdir) def isdir(path): return path.endswith('dir') os.path.isdir = isdir self.addCleanup(restore_isdir) def isfile(path): # another_dir is not a package and so shouldn't be recursed into return not path.endswith('dir') and not 'another_dir' in path os.path.isfile = isfile self.addCleanup(restore_isfile) loader._get_module_from_name = lambda path: path + ' module' loader.loadTestsFromModule = lambda module: module + ' tests' top_level = os.path.abspath('/foo') loader._top_level_dir = top_level suite = list(loader._find_tests(top_level, 'test*.py')) expected = [name + ' module tests' for name in ('test1', 'test2')] expected.extend([('test_dir.%s' % name) + ' module tests' for name in ('test3', 'test4')]) self.assertEqual(suite, expected) def test_find_tests_with_package(self): loader = unittest.TestLoader() original_listdir = os.listdir def restore_listdir(): os.listdir = original_listdir original_isfile = os.path.isfile def restore_isfile(): os.path.isfile = original_isfile original_isdir = os.path.isdir def restore_isdir(): os.path.isdir = original_isdir directories = ['a_directory', 'test_directory', 'test_directory2'] path_lists = [directories, [], [], []] os.listdir = lambda path: path_lists.pop(0) self.addCleanup(restore_listdir) os.path.isdir = lambda path: True self.addCleanup(restore_isdir) os.path.isfile = lambda path: os.path.basename(path) not in directories self.addCleanup(restore_isfile) class Module(object): paths = [] load_tests_args = [] def __init__(self, path): self.path = path self.paths.append(path) if os.path.basename(path) == 'test_directory': def load_tests(loader, tests, pattern): self.load_tests_args.append((loader, tests, pattern)) return 'load_tests' self.load_tests = load_tests def __eq__(self, other): return self.path == other.path loader._get_module_from_name = lambda name: Module(name) def loadTestsFromModule(module, use_load_tests): if use_load_tests: raise self.failureException('use_load_tests should be False for packages') return module.path + ' module tests' loader.loadTestsFromModule = loadTestsFromModule loader._top_level_dir = '/foo' # this time no '.py' on the pattern so that it can match # a test package suite = list(loader._find_tests('/foo', 'test*')) # We should have loaded tests from the test_directory package by calling load_tests # and directly from the test_directory2 package self.assertEqual(suite, ['load_tests', 'test_directory2' + ' module tests']) self.assertEqual(Module.paths, ['test_directory', 'test_directory2']) # load_tests should have been called once with loader, tests and pattern self.assertEqual(Module.load_tests_args, [(loader, 'test_directory' + ' module tests', 'test*')]) def test_discover(self): loader = unittest.TestLoader() original_isfile = os.path.isfile original_isdir = os.path.isdir def restore_isfile(): os.path.isfile = original_isfile os.path.isfile = lambda path: False self.addCleanup(restore_isfile) orig_sys_path = sys.path[:] def restore_path(): sys.path[:] = orig_sys_path self.addCleanup(restore_path) full_path = os.path.abspath(os.path.normpath('/foo')) with self.assertRaises(ImportError): loader.discover('/foo/bar', top_level_dir='/foo') self.assertEqual(loader._top_level_dir, full_path) self.assertIn(full_path, sys.path) os.path.isfile = lambda path: True os.path.isdir = lambda path: True def restore_isdir(): os.path.isdir = original_isdir self.addCleanup(restore_isdir) _find_tests_args = [] def _find_tests(start_dir, pattern): _find_tests_args.append((start_dir, pattern)) return ['tests'] loader._find_tests = _find_tests loader.suiteClass = str suite = loader.discover('/foo/bar/baz', 'pattern', '/foo/bar') top_level_dir = os.path.abspath('/foo/bar') start_dir = os.path.abspath('/foo/bar/baz') self.assertEqual(suite, "['tests']") self.assertEqual(loader._top_level_dir, top_level_dir) self.assertEqual(_find_tests_args, [(start_dir, 'pattern')]) self.assertIn(top_level_dir, sys.path) def test_discover_with_modules_that_fail_to_import(self): loader = unittest.TestLoader() listdir = os.listdir os.listdir = lambda _: ['test_this_does_not_exist.py'] isfile = os.path.isfile os.path.isfile = lambda _: True orig_sys_path = sys.path[:] def restore(): os.path.isfile = isfile os.listdir = listdir sys.path[:] = orig_sys_path self.addCleanup(restore) suite = loader.discover('.') self.assertIn(os.getcwd(), sys.path) self.assertEqual(suite.countTestCases(), 1) test = list(list(suite)[0])[0] # extract test from suite with self.assertRaises(ImportError): test.test_this_does_not_exist() def test_command_line_handling_parseArgs(self): program = TestableTestProgram() args = [] def do_discovery(argv): args.extend(argv) program._do_discovery = do_discovery program.parseArgs(['something', 'discover']) self.assertEqual(args, []) program.parseArgs(['something', 'discover', 'foo', 'bar']) self.assertEqual(args, ['foo', 'bar']) def test_command_line_handling_discover_by_default(self): program = TestableTestProgram() program.module = None self.called = False def do_discovery(argv): self.called = True self.assertEqual(argv, []) program._do_discovery = do_discovery program.parseArgs(['something']) self.assertTrue(self.called) def test_command_line_handling_discover_by_default_with_options(self): program = TestableTestProgram() program.module = None args = ['something', '-v', '-b', '-v', '-c', '-f'] self.called = False def do_discovery(argv): self.called = True self.assertEqual(argv, args[1:]) program._do_discovery = do_discovery program.parseArgs(args) self.assertTrue(self.called) def test_command_line_handling_do_discovery_too_many_arguments(self): class Stop(Exception): pass def usageExit(): raise Stop program = TestableTestProgram() program.usageExit = usageExit with self.assertRaises(Stop): # too many args program._do_discovery(['one', 'two', 'three', 'four']) def test_command_line_handling_do_discovery_calls_loader(self): program = TestableTestProgram() class Loader(object): args = [] def discover(self, start_dir, pattern, top_level_dir): self.args.append((start_dir, pattern, top_level_dir)) return 'tests' program._do_discovery(['-v'], Loader=Loader) self.assertEqual(program.verbosity, 2) self.assertEqual(program.test, 'tests') self.assertEqual(Loader.args, [('.', 'test*.py', None)]) Loader.args = [] program = TestableTestProgram() program._do_discovery(['--verbose'], Loader=Loader) self.assertEqual(program.test, 'tests') self.assertEqual(Loader.args, [('.', 'test*.py', None)]) Loader.args = [] program = TestableTestProgram() program._do_discovery([], Loader=Loader) self.assertEqual(program.test, 'tests') self.assertEqual(Loader.args, [('.', 'test*.py', None)]) Loader.args = [] program = TestableTestProgram() program._do_discovery(['fish'], Loader=Loader) self.assertEqual(program.test, 'tests') self.assertEqual(Loader.args, [('fish', 'test*.py', None)]) Loader.args = [] program = TestableTestProgram() program._do_discovery(['fish', 'eggs'], Loader=Loader) self.assertEqual(program.test, 'tests') self.assertEqual(Loader.args, [('fish', 'eggs', None)]) Loader.args = [] program = TestableTestProgram() program._do_discovery(['fish', 'eggs', 'ham'], Loader=Loader) self.assertEqual(program.test, 'tests') self.assertEqual(Loader.args, [('fish', 'eggs', 'ham')]) Loader.args = [] program = TestableTestProgram() program._do_discovery(['-s', 'fish'], Loader=Loader) self.assertEqual(program.test, 'tests') self.assertEqual(Loader.args, [('fish', 'test*.py', None)]) Loader.args = [] program = TestableTestProgram() program._do_discovery(['-t', 'fish'], Loader=Loader) self.assertEqual(program.test, 'tests') self.assertEqual(Loader.args, [('.', 'test*.py', 'fish')]) Loader.args = [] program = TestableTestProgram() program._do_discovery(['-p', 'fish'], Loader=Loader) self.assertEqual(program.test, 'tests') self.assertEqual(Loader.args, [('.', 'fish', None)]) self.assertFalse(program.failfast) self.assertFalse(program.catchbreak) Loader.args = [] program = TestableTestProgram() program._do_discovery(['-p', 'eggs', '-s', 'fish', '-v', '-f', '-c'], Loader=Loader) self.assertEqual(program.test, 'tests') self.assertEqual(Loader.args, [('fish', 'eggs', None)]) self.assertEqual(program.verbosity, 2) self.assertTrue(program.failfast) self.assertTrue(program.catchbreak) def test_detect_module_clash(self): class Module(object): __file__ = 'bar/foo.py' sys.modules['foo'] = Module full_path = os.path.abspath('foo') original_listdir = os.listdir original_isfile = os.path.isfile original_isdir = os.path.isdir def cleanup(): os.listdir = original_listdir os.path.isfile = original_isfile os.path.isdir = original_isdir del sys.modules['foo'] if full_path in sys.path: sys.path.remove(full_path) self.addCleanup(cleanup) def listdir(_): return ['foo.py'] def isfile(_): return True def isdir(_): return True os.listdir = listdir os.path.isfile = isfile os.path.isdir = isdir loader = unittest.TestLoader() mod_dir = os.path.abspath('bar') expected_dir = os.path.abspath('foo') msg = re.escape(r"'foo' module incorrectly imported from %r. Expected %r. " "Is this module globally installed?" % (mod_dir, expected_dir)) self.assertRaisesRegex( ImportError, '^%s$' % msg, loader.discover, start_dir='foo', pattern='foo.py' ) self.assertEqual(sys.path[0], full_path) def test_discovery_from_dotted_path(self): loader = unittest.TestLoader() tests = [self] expectedPath = os.path.abspath(os.path.dirname(unittest.test.__file__)) self.wasRun = False def _find_tests(start_dir, pattern): self.wasRun = True self.assertEqual(start_dir, expectedPath) return tests loader._find_tests = _find_tests suite = loader.discover('unittest.test') self.assertTrue(self.wasRun) self.assertEqual(suite._tests, tests) if __name__ == '__main__': unittest.main()
apache-2.0
superbobry/pymc3
pymc3/examples/ARM5_4.py
14
1026
''' Created on May 18, 2012 @author: jsalvatier ''' import numpy as np from pymc3 import * import theano.tensor as t import pandas as pd wells = get_data_file('pymc3.examples', 'data/wells.dat') data = pd.read_csv(wells, delimiter=u' ', index_col=u'id', dtype={u'switch': np.int8}) data.dist /= 100 data.educ /= 4 col = data.columns P = data[col[1:]] P = P - P.mean() P['1'] = 1 Pa = np.array(P) with Model() as model: effects = Normal( 'effects', mu=0, tau=100. ** -2, shape=len(P.columns)) p = sigmoid(dot(Pa, effects)) s = Bernoulli('s', p, observed=np.array(data.switch)) def run(n=3000): if n == "short": n = 50 with model: # move the chain to the MAP which should be a good starting point start = find_MAP() H = model.fastd2logp() # find a good orientation using the hessian at the MAP h = H(start) step = HamiltonianMC(model.vars, h) trace = sample(n, step, start) if __name__ == '__main__': run()
apache-2.0
olaurendeau/v6_ui
c2corg_ui/views/xreport.py
3
6402
from c2corg_common.document_types import XREPORT_TYPE from c2corg_ui.caching import cache_document_detail from c2corg_ui.views import call_api, get_with_etag from pyramid.httpexceptions import HTTPInternalServerError from pyramid.renderers import render from pyramid.view import view_config from c2corg_ui.views.document import Document, ROUTE_NAMES class Xreport(Document): _API_ROUTE = ROUTE_NAMES[XREPORT_TYPE] @view_config(route_name='xreports_index') def index(self): return self._index('c2corg_ui:templates/xreport/index.html') @view_config(route_name='xreports_view_id') @view_config(route_name='xreports_view_id_lang') def redirect_to_full_url(self): self._redirect_to_full_url() @view_config(route_name='xreports_view') def detail(self): """ Xreports are a bit special (similar to user profiles), because users share non-public data inside the report so that the part of xreport data has to be requested with an authentication header. The request to get a profile page is made by the browser, so that the authentication header can not be set. That's why the profile page is constructed as follows: - The browser makes a request `/xreports/123` to the UI server-side (without authentication header). - The UI server sides makes an unauthenticated request to the API to get the available locales of the xreport (`api.camptocamp.org/xreports/123/info`) - The UI server-side returns a page containing only the public data. - On the UI client-side a request is made to the UI server-side to get the private data as rendered HTML (e.g. `/xreports/data/123/fr`). If the user is logged-in, the request is made authenticated. - The UI server-side makes a request to the API to get the private data (e.g. (`api.camptocamp.org/xreports/123/fr`)). If the request to the UI server-side was authenticated, the request to the API is also made authenticated. - On the UI client-side the rendered HTML is inserted into the page. """ id, lang = self._validate_id_lang() def render_page(xreport, locales): locale = list(filter(lambda l: l['lang'] == lang, locales)) self.template_input.update({ 'lang': lang, 'xreport': xreport, 'locale': locale[0], 'locales': locales, 'geometry': self._get_geometry(xreport['geometry']['geom']), 'version': None }) return render( 'c2corg_ui:templates/xreport/view.html', self.template_input, self.request ) def load_data(old_api_cache_key=None): not_modified, api_cache_key, document_and_locale = \ self._get_xreport_info(id, lang, old_api_cache_key) return not_modified, api_cache_key, document_and_locale return self._get_or_create( (id, lang), cache_document_detail, load_data, render_page, self._get_cache_key) def _get_xreport_info(self, id, lang, old_api_cache_key=None): url = '%s/%d?%s' % (self._API_ROUTE, id, lang) not_modified, api_cache_key, document = get_with_etag( self.settings, url, old_api_cache_key) if not_modified: return not_modified, api_cache_key, None return False, api_cache_key, (document, document['locales']) @view_config(route_name='xreports_data', renderer='c2corg_ui:templates/xreport/data.html') def data(self): id, lang = self._validate_id_lang() headers = None if 'Authorization' in self.request.headers: headers = { 'Authorization': self.request.headers.get('Authorization') } url = '%s/%d?l=%s' % (self._API_ROUTE, id, lang) resp, data = call_api(self.settings, url, headers) if resp.status_code != 200: raise HTTPInternalServerError( "An error occurred while loading the document") if data.get('not_authorized', False): self.template_input.update({ 'not_authorized': True }) else: locales = data['locales'] self.template_input.update({ 'lang': locales[0]['lang'], 'locale': locales[0], 'xreport': data, 'geometry': self._get_geometry(data['geometry']['geom']) if data['geometry'] else None }) return self.template_input @view_config(route_name='xreports_archive') def archive(self): id, lang = self._validate_id_lang() version_id = int(self.request.matchdict['version']) def render_page(xreport, locale, version): self.template_input.update({ 'lang': lang, 'xreport': xreport, 'locale': locale, 'geometry': self._get_geometry(xreport['geometry']['geom']), 'version': version }) return render( 'c2corg_ui:templates/xreport/view.html', self.template_input, self.request ) return self._get_or_create_archive(id, lang, version_id, render_page) @view_config(route_name='xreports_history') def history(self): return self._get_history() @view_config(route_name='xreports_diff') def diff(self): return self._diff() @view_config(route_name='xreports_add') def add(self): self.template_input.update({ 'xreport_lang': None, 'xreport_id': None }) return self._add('c2corg_ui:templates/xreport/edit.html') @view_config(route_name='xreports_edit', renderer='c2corg_ui:templates/xreport/edit.html') def edit(self): id, lang = self._validate_id_lang() self.template_input.update({ 'xreport_lang': lang, 'xreport_id': id }) return self.template_input @view_config(route_name='xreports_preview', renderer='c2corg_ui:templates/xreport/preview.html') def preview(self): return self._preview()
agpl-3.0
anryko/ansible
test/units/cli/test_vault.py
45
9495
# -*- coding: utf-8 -*- # (c) 2017, Adrian Likins <alikins@redhat.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # Make coding more python3-ish from __future__ import (absolute_import, division, print_function) __metaclass__ = type import os import pytest from units.compat import unittest from units.compat.mock import patch, MagicMock from units.mock.vault_helper import TextVaultSecret from ansible import context, errors from ansible.cli.vault import VaultCLI from ansible.module_utils._text import to_text from ansible.utils import context_objects as co # TODO: make these tests assert something, likely by verifing # mock calls @pytest.fixture(autouse='function') def reset_cli_args(): co.GlobalCLIArgs._Singleton__instance = None yield co.GlobalCLIArgs._Singleton__instance = None class TestVaultCli(unittest.TestCase): def setUp(self): self.tty_patcher = patch('ansible.cli.sys.stdin.isatty', return_value=False) self.mock_isatty = self.tty_patcher.start() def tearDown(self): self.tty_patcher.stop() def test_parse_empty(self): cli = VaultCLI(['vaultcli']) self.assertRaises(SystemExit, cli.parse) # FIXME: something weird seems to be afoot when parsing actions # cli = VaultCLI(args=['view', '/dev/null/foo', 'mysecret3']) # will skip '/dev/null/foo'. something in cli.CLI.set_action() ? # maybe we self.args gets modified in a loop? def test_parse_view_file(self): cli = VaultCLI(args=['ansible-vault', 'view', '/dev/null/foo']) cli.parse() @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') def test_view_missing_file_no_secret(self, mock_setup_vault_secrets): mock_setup_vault_secrets.return_value = [] cli = VaultCLI(args=['ansible-vault', 'view', '/dev/null/foo']) cli.parse() self.assertRaisesRegexp(errors.AnsibleOptionsError, "A vault password is required to use Ansible's Vault", cli.run) @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') def test_encrypt_missing_file_no_secret(self, mock_setup_vault_secrets): mock_setup_vault_secrets.return_value = [] cli = VaultCLI(args=['ansible-vault', 'encrypt', '/dev/null/foo']) cli.parse() self.assertRaisesRegexp(errors.AnsibleOptionsError, "A vault password is required to use Ansible's Vault", cli.run) @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') @patch('ansible.cli.vault.VaultEditor') def test_encrypt(self, mock_vault_editor, mock_setup_vault_secrets): mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] cli = VaultCLI(args=['ansible-vault', 'encrypt', '/dev/null/foo']) cli.parse() cli.run() @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') @patch('ansible.cli.vault.VaultEditor') def test_encrypt_string(self, mock_vault_editor, mock_setup_vault_secrets): mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] cli = VaultCLI(args=['ansible-vault', 'encrypt_string', 'some string to encrypt']) cli.parse() cli.run() @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') @patch('ansible.cli.vault.VaultEditor') @patch('ansible.cli.vault.display.prompt', return_value='a_prompt') def test_encrypt_string_prompt(self, mock_display, mock_vault_editor, mock_setup_vault_secrets): mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] cli = VaultCLI(args=['ansible-vault', 'encrypt_string', '--prompt', 'some string to encrypt']) cli.parse() cli.run() @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') @patch('ansible.cli.vault.VaultEditor') @patch('ansible.cli.vault.sys.stdin.read', return_value='This is data from stdin') def test_encrypt_string_stdin(self, mock_stdin_read, mock_vault_editor, mock_setup_vault_secrets): mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] cli = VaultCLI(args=['ansible-vault', 'encrypt_string', '--stdin-name', 'the_var_from_stdin', '-']) cli.parse() cli.run() @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') @patch('ansible.cli.vault.VaultEditor') def test_encrypt_string_names(self, mock_vault_editor, mock_setup_vault_secrets): mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] cli = VaultCLI(args=['ansible-vault', 'encrypt_string', '--name', 'foo1', '--name', 'foo2', 'some string to encrypt']) cli.parse() cli.run() @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') @patch('ansible.cli.vault.VaultEditor') def test_encrypt_string_more_args_than_names(self, mock_vault_editor, mock_setup_vault_secrets): mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] cli = VaultCLI(args=['ansible-vault', 'encrypt_string', '--name', 'foo1', 'some string to encrypt', 'other strings', 'a few more string args']) cli.parse() cli.run() @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') @patch('ansible.cli.vault.VaultEditor') def test_create(self, mock_vault_editor, mock_setup_vault_secrets): mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] cli = VaultCLI(args=['ansible-vault', 'create', '/dev/null/foo']) cli.parse() cli.run() @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') @patch('ansible.cli.vault.VaultEditor') def test_edit(self, mock_vault_editor, mock_setup_vault_secrets): mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] cli = VaultCLI(args=['ansible-vault', 'edit', '/dev/null/foo']) cli.parse() cli.run() @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') @patch('ansible.cli.vault.VaultEditor') def test_decrypt(self, mock_vault_editor, mock_setup_vault_secrets): mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] cli = VaultCLI(args=['ansible-vault', 'decrypt', '/dev/null/foo']) cli.parse() cli.run() @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') @patch('ansible.cli.vault.VaultEditor') def test_view(self, mock_vault_editor, mock_setup_vault_secrets): mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] cli = VaultCLI(args=['ansible-vault', 'view', '/dev/null/foo']) cli.parse() cli.run() @patch('ansible.cli.vault.VaultCLI.setup_vault_secrets') @patch('ansible.cli.vault.VaultEditor') def test_rekey(self, mock_vault_editor, mock_setup_vault_secrets): mock_setup_vault_secrets.return_value = [('default', TextVaultSecret('password'))] cli = VaultCLI(args=['ansible-vault', 'rekey', '/dev/null/foo']) cli.parse() cli.run() @pytest.mark.parametrize('cli_args, expected', [ (['ansible-vault', 'view', 'vault.txt'], 0), (['ansible-vault', 'view', 'vault.txt', '-vvv'], 3), (['ansible-vault', '-vv', 'view', 'vault.txt'], 2), # Due to our manual parsing we want to verify that -v set in the sub parser takes precedence. This behaviour is # deprecated and tests should be removed when the code that handles it is removed (['ansible-vault', '-vv', 'view', 'vault.txt', '-v'], 1), (['ansible-vault', '-vv', 'view', 'vault.txt', '-vvvv'], 4), ]) def test_verbosity_arguments(cli_args, expected, tmp_path_factory, monkeypatch): # Add a password file so we don't get a prompt in the test test_dir = to_text(tmp_path_factory.mktemp('test-ansible-vault')) pass_file = os.path.join(test_dir, 'pass.txt') with open(pass_file, 'w') as pass_fd: pass_fd.write('password') cli_args.extend(['--vault-id', pass_file]) # Mock out the functions so we don't actually execute anything for func_name in [f for f in dir(VaultCLI) if f.startswith("execute_")]: monkeypatch.setattr(VaultCLI, func_name, MagicMock()) cli = VaultCLI(args=cli_args) cli.run() assert context.CLIARGS['verbosity'] == expected
gpl-3.0
shastikk/youtube-dl
youtube_dl/extractor/thisamericanlife.py
135
1549
from __future__ import unicode_literals from .common import InfoExtractor class ThisAmericanLifeIE(InfoExtractor): _VALID_URL = r'https?://(?:www\.)?thisamericanlife\.org/(?:radio-archives/episode/|play_full\.php\?play=)(?P<id>\d+)' _TESTS = [{ 'url': 'http://www.thisamericanlife.org/radio-archives/episode/487/harper-high-school-part-one', 'md5': '8f7d2da8926298fdfca2ee37764c11ce', 'info_dict': { 'id': '487', 'ext': 'm4a', 'title': '487: Harper High School, Part One', 'description': 'md5:ee40bdf3fb96174a9027f76dbecea655', 'thumbnail': 're:^https?://.*\.jpg$', }, }, { 'url': 'http://www.thisamericanlife.org/play_full.php?play=487', 'only_matching': True, }] def _real_extract(self, url): video_id = self._match_id(url) webpage = self._download_webpage( 'http://www.thisamericanlife.org/radio-archives/episode/%s' % video_id, video_id) return { 'id': video_id, 'url': 'http://stream.thisamericanlife.org/{0}/stream/{0}_64k.m3u8'.format(video_id), 'protocol': 'm3u8_native', 'ext': 'm4a', 'acodec': 'aac', 'vcodec': 'none', 'abr': 64, 'title': self._html_search_meta(r'twitter:title', webpage, 'title', fatal=True), 'description': self._html_search_meta(r'description', webpage, 'description'), 'thumbnail': self._og_search_thumbnail(webpage), }
unlicense
Faysir/cuda-convnet2
python_util/gpumodel.py
175
14896
# Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import numpy as n import os from time import time, asctime, localtime, strftime from util import * from data import * from options import * from math import ceil, floor, sqrt from data import DataProvider, dp_types import sys import shutil import platform from os import linesep as NL from threading import Thread import tempfile as tf class ModelStateException(Exception): pass class CheckpointWriter(Thread): def __init__(self, path, dic): Thread.__init__(self) self.path = path self.dic = dic def run(self): save_dir = os.path.dirname(self.path) save_file = os.path.basename(self.path) # Write checkpoint to temporary filename tmpfile = tf.NamedTemporaryFile(dir=os.path.dirname(save_dir), delete=False) pickle(tmpfile, self.dic) # Also closes tf # Move it to final filename os.rename(tmpfile.name, self.path) # Delete old checkpoints for f in os.listdir(save_dir): if f != save_file: os.remove(os.path.join(save_dir, f)) # GPU Model interface class IGPUModel: def __init__(self, model_name, op, load_dic, filename_options=[], dp_params={}): # these are input parameters self.model_name = model_name self.op = op self.options = op.options self.load_dic = load_dic self.filename_options = filename_options self.dp_params = dp_params self.device_ids = self.op.get_value('gpu') self.fill_excused_options() self.checkpoint_writer = None #assert self.op.all_values_given() for o in op.get_options_list(): setattr(self, o.name, o.value) self.loaded_from_checkpoint = load_dic is not None # these are things that the model must remember but they're not input parameters if self.loaded_from_checkpoint: self.model_state = load_dic["model_state"] self.save_file = self.options["save_file_override"].value if self.options["save_file_override"].value_given else self.options['load_file'].value if not os.path.isdir(self.save_file) and os.path.exists(self.save_file): self.save_file = os.path.dirname(self.save_file) # print self.options["save_file_override"].value, self.save_file else: self.model_state = {} self.save_file = self.options["save_file_override"].value if self.options["save_file_override"].value_given else os.path.join(self.options['save_path'].value, model_name + "_" + '_'.join(['%s_%s' % (char, self.options[opt].get_str_value()) for opt, char in filename_options]) + '_' + strftime('%Y-%m-%d_%H.%M.%S')) self.model_state["train_outputs"] = [] self.model_state["test_outputs"] = [] self.model_state["epoch"] = 1 self.model_state["batchnum"] = self.train_batch_range[0] # print self.save_file self.init_data_providers() if load_dic: self.train_data_provider.advance_batch() # model state often requries knowledge of data provider, so it's initialized after try: self.init_model_state() except ModelStateException, e: print e sys.exit(1) for var, val in self.model_state.iteritems(): setattr(self, var, val) self.import_model() self.init_model_lib() def import_model(self): print "=========================" print "Importing %s C++ module" % ('_' + self.model_name) self.libmodel = __import__('_' + self.model_name) def fill_excused_options(self): pass def init_data_providers(self): self.dp_params['convnet'] = self try: self.test_data_provider = DataProvider.get_instance(self.data_path, self.test_batch_range, type=self.dp_type, dp_params=self.dp_params, test=True) self.train_data_provider = DataProvider.get_instance(self.data_path, self.train_batch_range, self.model_state["epoch"], self.model_state["batchnum"], type=self.dp_type, dp_params=self.dp_params, test=False) except DataProviderException, e: print "Unable to create data provider: %s" % e self.print_data_providers() sys.exit() def init_model_state(self): pass def init_model_lib(self): pass def start(self): if self.test_only: self.test_outputs += [self.get_test_error()] self.print_test_results() else: self.train() self.cleanup() if self.force_save: self.save_state().join() sys.exit(0) def train(self): print "=========================" print "Training %s" % self.model_name self.op.print_values() print "=========================" self.print_model_state() print "Running on CUDA device(s) %s" % ", ".join("%d" % d for d in self.device_ids) print "Current time: %s" % asctime(localtime()) print "Saving checkpoints to %s" % self.save_file print "=========================" next_data = self.get_next_batch() while self.epoch <= self.num_epochs: data = next_data self.epoch, self.batchnum = data[0], data[1] self.print_iteration() sys.stdout.flush() compute_time_py = time() self.start_batch(data) # load the next batch while the current one is computing next_data = self.get_next_batch() batch_output = self.finish_batch() self.train_outputs += [batch_output] self.print_train_results() if self.get_num_batches_done() % self.testing_freq == 0: self.sync_with_host() self.test_outputs += [self.get_test_error()] self.print_test_results() self.print_test_status() self.conditional_save() self.print_elapsed_time(time() - compute_time_py) def cleanup(self): if self.checkpoint_writer is not None: self.checkpoint_writer.join() self.checkpoint_writer = None def print_model_state(self): pass def get_num_batches_done(self): return len(self.train_batch_range) * (self.epoch - 1) + self.batchnum - self.train_batch_range[0] + 1 def get_next_batch(self, train=True): dp = self.train_data_provider if not train: dp = self.test_data_provider return self.parse_batch_data(dp.get_next_batch(), train=train) def parse_batch_data(self, batch_data, train=True): return batch_data[0], batch_data[1], batch_data[2]['data'] def start_batch(self, batch_data, train=True): self.libmodel.startBatch(batch_data[2], not train) def finish_batch(self): return self.libmodel.finishBatch() def print_iteration(self): print "\t%d.%d..." % (self.epoch, self.batchnum), def print_elapsed_time(self, compute_time_py): print "(%.3f sec)" % (compute_time_py) def print_train_results(self): batch_error = self.train_outputs[-1][0] if not (batch_error > 0 and batch_error < 2e20): print "Crazy train error: %.6f" % batch_error self.cleanup() print "Train error: %.6f " % (batch_error), def print_test_results(self): batch_error = self.test_outputs[-1][0] print "%s\t\tTest error: %.6f" % (NL, batch_error), def print_test_status(self): status = (len(self.test_outputs) == 1 or self.test_outputs[-1][0] < self.test_outputs[-2][0]) and "ok" or "WORSE" print status, def sync_with_host(self): if self.checkpoint_writer is not None: self.checkpoint_writer.join() self.checkpoint_writer = None self.libmodel.syncWithHost() def conditional_save(self): batch_error = self.test_outputs[-1][0] if batch_error > 0 and batch_error < self.max_test_err: self.save_state() else: print "\tTest error > %g, not saving." % self.max_test_err, def aggregate_test_outputs(self, test_outputs): test_error = tuple([sum(t[r] for t in test_outputs) / (1 if self.test_one else len(self.test_batch_range)) for r in range(len(test_outputs[-1]))]) return test_error def get_test_error(self): next_data = self.get_next_batch(train=False) test_outputs = [] while True: data = next_data start_time_test = time() self.start_batch(data, train=False) load_next = (not self.test_one or self.test_only) and data[1] < self.test_batch_range[-1] if load_next: # load next batch next_data = self.get_next_batch(train=False) test_outputs += [self.finish_batch()] if self.test_only: # Print the individual batch results for safety print "batch %d: %s" % (data[1], str(test_outputs[-1])), self.print_elapsed_time(time() - start_time_test) if not load_next: break sys.stdout.flush() return self.aggregate_test_outputs(test_outputs) def set_var(self, var_name, var_val): setattr(self, var_name, var_val) self.model_state[var_name] = var_val return var_val def get_var(self, var_name): return self.model_state[var_name] def has_var(self, var_name): return var_name in self.model_state def save_state(self): for att in self.model_state: if hasattr(self, att): self.model_state[att] = getattr(self, att) dic = {"model_state": self.model_state, "op": self.op} checkpoint_file = "%d.%d" % (self.epoch, self.batchnum) checkpoint_file_full_path = os.path.join(self.save_file, checkpoint_file) if not os.path.exists(self.save_file): os.makedirs(self.save_file) assert self.checkpoint_writer is None self.checkpoint_writer = CheckpointWriter(checkpoint_file_full_path, dic) self.checkpoint_writer.start() print "-------------------------------------------------------" print "Saved checkpoint to %s" % self.save_file print "=======================================================", return self.checkpoint_writer def get_progress(self): num_batches_total = self.num_epochs * len(self.train_batch_range) return min(1.0, max(0.0, float(self.get_num_batches_done()-1) / num_batches_total)) @staticmethod def load_checkpoint(load_dir): if os.path.isdir(load_dir): return unpickle(os.path.join(load_dir, sorted(os.listdir(load_dir), key=alphanum_key)[-1])) return unpickle(load_dir) @staticmethod def get_options_parser(): op = OptionsParser() op.add_option("load-file", "load_file", StringOptionParser, "Load file", default="", excuses=OptionsParser.EXCUSE_ALL) op.add_option("save-path", "save_path", StringOptionParser, "Save path", excuses=['save_file_override']) op.add_option("save-file", "save_file_override", StringOptionParser, "Save file override", excuses=['save_path']) op.add_option("train-range", "train_batch_range", RangeOptionParser, "Data batch range: training") op.add_option("test-range", "test_batch_range", RangeOptionParser, "Data batch range: testing") op.add_option("data-provider", "dp_type", StringOptionParser, "Data provider", default="default") op.add_option("test-freq", "testing_freq", IntegerOptionParser, "Testing frequency", default=25) op.add_option("epochs", "num_epochs", IntegerOptionParser, "Number of epochs", default=500) op.add_option("data-path", "data_path", StringOptionParser, "Data path") op.add_option("max-test-err", "max_test_err", FloatOptionParser, "Maximum test error for saving") op.add_option("test-only", "test_only", BooleanOptionParser, "Test and quit?", default=0) op.add_option("test-one", "test_one", BooleanOptionParser, "Test on one batch at a time?", default=1) op.add_option("force-save", "force_save", BooleanOptionParser, "Force save before quitting", default=0) op.add_option("gpu", "gpu", ListOptionParser(IntegerOptionParser), "GPU override") return op @staticmethod def print_data_providers(): print "Available data providers:" for dp, desc in dp_types.iteritems(): print " %s: %s" % (dp, desc) @staticmethod def parse_options(op): try: load_dic = None options = op.parse() load_location = None # print options['load_file'].value_given, options['save_file_override'].value_given # print options['save_file_override'].value if options['load_file'].value_given: load_location = options['load_file'].value elif options['save_file_override'].value_given and os.path.exists(options['save_file_override'].value): load_location = options['save_file_override'].value if load_location is not None: load_dic = IGPUModel.load_checkpoint(load_location) old_op = load_dic["op"] old_op.merge_from(op) op = old_op op.eval_expr_defaults() return op, load_dic except OptionMissingException, e: print e op.print_usage() except OptionException, e: print e except UnpickleError, e: print "Error loading checkpoint:" print e sys.exit()
apache-2.0
baylee-d/osf.io
osf/utils/workflows.py
5
11263
# -*- coding: utf-8 -*- from __future__ import unicode_literals from enum import Enum, IntEnum, unique class ModerationEnum(IntEnum): '''A helper Enum superclass that provides easy translation to Int/CharChoices fields.''' @classmethod def int_field_choices(cls): return tuple((member.value, member.readable_value) for member in cls) @classmethod def char_field_choices(cls): return tuple((member.db_name, member.readable_value) for member in cls) @classmethod def from_db_name(cls, state_db_name): return cls[state_db_name.upper()] @property def readable_value(self): return super().name.title().replace('_', '') @property def db_name(self): return self.name.lower() class SanctionTypes(ModerationEnum): '''A simple descriptor for the type of a sanction class''' UNDEFINED = 0 REGISTRATION_APPROVAL = 1 EMBARGO = 2 RETRACTION = 3 EMBARGO_TERMINATION_APPROVAL = 4 DRAFT_REGISTRATION_APPROVAL = 5 class SanctionStates(ModerationEnum): '''The moderated state of a Sanction object.''' UNDEFINED = 0 UNAPPROVED = 1 PENDING_MODERATION = 2 APPROVED = 3 REJECTED = 4 MODERATOR_REJECTED = 5 COMPLETED = 6 # Embargo only class RegistrationModerationStates(ModerationEnum): '''The publication state of a Registration object''' UNDEFINED = 0 INITIAL = 1 REVERTED = 2 PENDING = 3 REJECTED = 4 ACCEPTED = 5 EMBARGO = 6 PENDING_EMBARGO_TERMINATION = 7 PENDING_WITHDRAW_REQUEST = 8 PENDING_WITHDRAW = 9 WITHDRAWN = 10 @classmethod def from_sanction(cls, sanction): '''Returns a RegistrationModerationState based on sanction's type and state.''' # Define every time because it gets interpreted as an enum member in the class body :( SANCTION_STATE_MAP = { SanctionTypes.REGISTRATION_APPROVAL: { SanctionStates.UNAPPROVED: cls.INITIAL, SanctionStates.PENDING_MODERATION: cls.PENDING, SanctionStates.APPROVED: cls.ACCEPTED, SanctionStates.REJECTED: cls.REVERTED, SanctionStates.MODERATOR_REJECTED: cls.REJECTED, }, SanctionTypes.EMBARGO: { SanctionStates.UNAPPROVED: cls.INITIAL, SanctionStates.PENDING_MODERATION: cls.PENDING, SanctionStates.APPROVED: cls.EMBARGO, SanctionStates.COMPLETED: cls.ACCEPTED, SanctionStates.REJECTED: cls.REVERTED, SanctionStates.MODERATOR_REJECTED: cls.REJECTED, }, SanctionTypes.RETRACTION: { SanctionStates.UNAPPROVED: cls.PENDING_WITHDRAW_REQUEST, SanctionStates.PENDING_MODERATION: cls.PENDING_WITHDRAW, SanctionStates.APPROVED: cls.WITHDRAWN, # Rejected retractions are in either ACCEPTED or EMBARGO SanctionStates.REJECTED: cls.UNDEFINED, SanctionStates.MODERATOR_REJECTED: cls.UNDEFINED, }, SanctionTypes.EMBARGO_TERMINATION_APPROVAL: { SanctionStates.UNAPPROVED: cls.PENDING_EMBARGO_TERMINATION, SanctionStates.PENDING_MODERATION: cls.ACCEPTED, # Not currently reachable SanctionStates.APPROVED: cls.ACCEPTED, SanctionStates.REJECTED: cls.EMBARGO, SanctionStates.MODERATOR_REJECTED: cls.EMBARGO, # Not currently reachable }, } try: new_state = SANCTION_STATE_MAP[sanction.SANCTION_TYPE][sanction.approval_stage] except KeyError: new_state = cls.UNDEFINED return new_state class RegistrationModerationTriggers(ModerationEnum): '''The acceptable 'triggers' to describe a moderated action on a Registration.''' SUBMIT = 0 ACCEPT_SUBMISSION = 1 REJECT_SUBMISSION = 2 REQUEST_WITHDRAWAL = 3 ACCEPT_WITHDRAWAL = 4 REJECT_WITHDRAWAL = 5 FORCE_WITHDRAW = 6 @classmethod def from_transition(cls, from_state, to_state): '''Infer a trigger from a from_state/to_state pair.''' moderation_states = RegistrationModerationStates transition_to_trigger_mappings = { (moderation_states.INITIAL, moderation_states.PENDING): cls.SUBMIT, (moderation_states.PENDING, moderation_states.ACCEPTED): cls.ACCEPT_SUBMISSION, (moderation_states.PENDING, moderation_states.EMBARGO): cls.ACCEPT_SUBMISSION, (moderation_states.PENDING, moderation_states.REJECTED): cls.REJECT_SUBMISSION, (moderation_states.PENDING_WITHDRAW_REQUEST, moderation_states.PENDING_WITHDRAW): cls.REQUEST_WITHDRAWAL, (moderation_states.PENDING_WITHDRAW, moderation_states.WITHDRAWN): cls.ACCEPT_WITHDRAWAL, (moderation_states.PENDING_WITHDRAW, moderation_states.ACCEPTED): cls.REJECT_WITHDRAWAL, (moderation_states.PENDING_WITHDRAW, moderation_states.EMBARGO): cls.REJECT_WITHDRAWAL, (moderation_states.ACCEPTED, moderation_states.WITHDRAWN): cls.FORCE_WITHDRAW, (moderation_states.EMBARGO, moderation_states.WITHDRAWN): cls.FORCE_WITHDRAW, } return transition_to_trigger_mappings.get((from_state, to_state)) @unique class ChoiceEnum(Enum): @classmethod def choices(cls): return tuple((v, str(v).title()) for v in cls.values()) @classmethod def values(cls): return tuple(c.value for c in cls) @property def db_name(self): '''Return the value stored in the database for the enum member. For parity with ModerationEnum. ''' return self.value DEFAULT_STATES = [ ('INITIAL', 'initial'), ('PENDING', 'pending'), ('ACCEPTED', 'accepted'), ('REJECTED', 'rejected'), ] DEFAULT_TRIGGERS = [ ('SUBMIT', 'submit'), ('ACCEPT', 'accept'), ('REJECT', 'reject'), ('EDIT_COMMENT', 'edit_comment'), ] REVIEW_STATES = DEFAULT_STATES + [ ('WITHDRAWN', 'withdrawn'), ] REVIEW_TRIGGERS = DEFAULT_TRIGGERS + [ ('WITHDRAW', 'withdraw') ] REGISTRATION_STATES = REVIEW_STATES + [ ('EMBARGO', 'embargo'), ('PENDING_EMBARGO_TERMINATION', 'pending_embargo_termination'), ('PENDING_WITHDRAW_REQUEST', 'pending_withdraw_request'), ('PENDING_WITHDRAW', 'pending_withdraw'), ] DefaultStates = ChoiceEnum('DefaultStates', DEFAULT_STATES) ReviewStates = ChoiceEnum('ReviewStates', REVIEW_STATES) RegistrationStates = ChoiceEnum('RegistrationStates', REGISTRATION_STATES) DefaultTriggers = ChoiceEnum('DefaultTriggers', DEFAULT_TRIGGERS) ReviewTriggers = ChoiceEnum('ReviewTriggers', REVIEW_TRIGGERS) CHRONOS_STATUS_STATES = [ ('DRAFT', 1), ('SUBMITTED', 2), ('ACCEPTED', 3), ('PUBLISHED', 4), ('CANCELLED', 5), ] ChronosSubmissionStatus = ChoiceEnum('ChronosSubmissionStatus', CHRONOS_STATUS_STATES) DEFAULT_TRANSITIONS = [ { 'trigger': DefaultTriggers.SUBMIT.value, 'source': [DefaultStates.INITIAL.value], 'dest': DefaultStates.PENDING.value, 'after': ['save_action', 'update_last_transitioned', 'save_changes', 'notify_submit'], }, { 'trigger': DefaultTriggers.SUBMIT.value, 'source': [DefaultStates.PENDING.value, DefaultStates.REJECTED.value], 'conditions': 'resubmission_allowed', 'dest': DefaultStates.PENDING.value, 'after': ['save_action', 'update_last_transitioned', 'save_changes', 'notify_resubmit'], }, { 'trigger': DefaultTriggers.ACCEPT.value, 'source': [DefaultStates.PENDING.value, DefaultStates.REJECTED.value], 'dest': DefaultStates.ACCEPTED.value, 'after': ['save_action', 'update_last_transitioned', 'save_changes', 'notify_accept_reject'], }, { 'trigger': DefaultTriggers.REJECT.value, 'source': [DefaultStates.PENDING.value, DefaultStates.ACCEPTED.value], 'dest': DefaultStates.REJECTED.value, 'after': ['save_action', 'update_last_transitioned', 'save_changes', 'notify_accept_reject'], }, { 'trigger': DefaultTriggers.EDIT_COMMENT.value, 'source': [DefaultStates.PENDING.value, DefaultStates.REJECTED.value, DefaultStates.ACCEPTED.value], 'dest': '=', 'after': ['save_action', 'save_changes', 'notify_edit_comment'], }, ] REVIEWABLE_TRANSITIONS = DEFAULT_TRANSITIONS + [ { 'trigger': ReviewTriggers.WITHDRAW.value, 'source': [ReviewStates.PENDING.value, ReviewStates.ACCEPTED.value], 'dest': ReviewStates.WITHDRAWN.value, 'after': ['save_action', 'update_last_transitioned', 'perform_withdraw', 'save_changes', 'notify_withdraw'] } ] SANCTION_TRANSITIONS = [ { # A single admin approves a sanction 'trigger': 'approve', # Approval from an individual admin 'source': [SanctionStates.UNAPPROVED], 'dest': None, 'before': ['_validate_request'], 'after': ['_on_approve'], }, { # Allow delayed admin approvals as a noop in non-rejected states 'trigger': 'approve', 'source': [ SanctionStates.PENDING_MODERATION, SanctionStates.APPROVED, SanctionStates.COMPLETED ], 'dest': None, }, { # A moderated sanction has satisfied its Admin approval requirements # and is submitted for moderation. # Allow calling without validation for use by OSF admins 'trigger': 'accept', 'source': [SanctionStates.UNAPPROVED], 'dest': SanctionStates.PENDING_MODERATION, 'conditions': ['is_moderated'], 'before': ['_validate_request'], 'after': [], # send moderator emails here? }, { # An un moderated sanction has satisfied its Admin approval requirements # or a moderated sanction recieves moderator approval and takes effect 'trigger': 'accept', 'source': [SanctionStates.UNAPPROVED, SanctionStates.PENDING_MODERATION], 'dest': SanctionStates.APPROVED, 'before': ['_validate_request'], 'after': ['_on_complete'], }, { # Allow delayed accept triggers as a noop in completed states 'trigger': 'accept', 'source': [SanctionStates.APPROVED, SanctionStates.COMPLETED], 'dest': None, }, { # A sanction is rejected by an admin 'trigger': 'reject', 'source': [SanctionStates.UNAPPROVED], 'dest': SanctionStates.REJECTED, 'before': ['_validate_request'], 'after': ['_on_reject'], }, { # A sanction is rejected by a moderator 'trigger': 'reject', 'source': [SanctionStates.PENDING_MODERATION], 'dest': SanctionStates.MODERATOR_REJECTED, 'before': ['_validate_request'], 'after': ['_on_reject'], }, { # Allow delayed reject triggers as a noop in rejected states 'trigger': 'reject', 'source': [SanctionStates.REJECTED, SanctionStates.MODERATOR_REJECTED], 'dest': None, }, ] @unique class RequestTypes(ChoiceEnum): ACCESS = 'access' WITHDRAWAL = 'withdrawal'
apache-2.0
paypal/keystone
keystone/service.py
1
3306
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 OpenStack LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import routes from keystone import auth from keystone import catalog from keystone import config from keystone import controllers from keystone.common import logging from keystone.common import wsgi from keystone.contrib import ec2 from keystone import identity from keystone import policy from keystone import routers from keystone import token from keystone import trust CONF = config.CONF LOG = logging.getLogger(__name__) DRIVERS = dict( catalog_api=catalog.Manager(), ec2_api=ec2.Manager(), identity_api=identity.Manager(), policy_api=policy.Manager(), token_api=token.Manager(), trust_api=trust.Manager()) @logging.fail_gracefully def public_app_factory(global_conf, **local_conf): controllers.register_version('v2.0') conf = global_conf.copy() conf.update(local_conf) return wsgi.ComposingRouter(routes.Mapper(), [identity.routers.Public(), token.routers.Router(), routers.VersionV2('public'), routers.Extension(False)]) @logging.fail_gracefully def admin_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return wsgi.ComposingRouter(routes.Mapper(), [identity.routers.Admin(), token.routers.Router(), routers.VersionV2('admin'), routers.Extension()]) @logging.fail_gracefully def public_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return wsgi.ComposingRouter(routes.Mapper(), [routers.Versions('public')]) @logging.fail_gracefully def admin_version_app_factory(global_conf, **local_conf): conf = global_conf.copy() conf.update(local_conf) return wsgi.ComposingRouter(routes.Mapper(), [routers.Versions('admin')]) @logging.fail_gracefully def v3_app_factory(global_conf, **local_conf): controllers.register_version('v3') conf = global_conf.copy() conf.update(local_conf) mapper = routes.Mapper() v3routers = [] for module in [auth, catalog, identity, policy]: module.routers.append_v3_routers(mapper, v3routers) if CONF.trust.enabled: trust.routers.append_v3_routers(mapper, v3routers) # Add in the v3 version api v3routers.append(routers.VersionV3('admin')) v3routers.append(routers.VersionV3('public')) # TODO(ayoung): put token routes here return wsgi.ComposingRouter(mapper, v3routers)
apache-2.0
petrvanblokland/Xierpa3App
Xierpa3App/run.py
1
1377
# -*- coding: UTF-8 -*- # ----------------------------------------------------------------------------- # xierpa server # Copyright (c) 2014+ buro@petr.com, www.petr.com, www.xierpa.com # # X I E R P A 3 A P P # Distribution by the MIT License. # # ----------------------------------------------------------------------------- # # run.py # # http://twistedmatrix.com/documents/13.0.0/api/twisted.internet._threadedselect.html # # import os from PyObjCTools import AppHelper from AppKit import NSApplication, NSApp, NSBundle, NSLog # @UnresolvedImport import objc objc.setVerbose(True) # @UndefinedVariable # Specialized reactor for integrating with arbitrary foreign event loop, such as those you find in GUI toolkits. from twisted.internet._threadedselect import install reactor = install() # import modules containing classes required to start application and load MainMenu.nib import XierpaAppDelegate app = NSApplication.sharedApplication() nibPath = os.path.join(os.path.dirname(__file__), "dist", "Xierpa3.app", "Contents", "Resources", "en.lproj", "MainMenu.nib") NSBundle.loadNibFile_externalNameTable_withZone_(nibPath, {}, None) # @UndefinedVariable delegate = XierpaAppDelegate.XierpaAppDelegate.alloc().init() # @UndefinedVariable app.setDelegate_(delegate) # Bring app to top NSApp.activateIgnoringOtherApps_(True) AppHelper.runEventLoop()
mit
endlessm/chromium-browser
third_party/catapult/third_party/gsutil/gslib/discard_messages_queue.py
5
1323
# -*- coding: utf-8 -*- # Copyright 2018 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Provides a message queue that discards all messages.""" from __future__ import absolute_import from __future__ import print_function from __future__ import division from __future__ import unicode_literals class DiscardMessagesQueue(object): """Emulates a Cloud API status queue but drops all messages. This is useful when you want to perform some operations but not have the UI thread display information about those ops (e.g. running a test or fetching the public gsutil tarball object's metadata to perform a version check). """ # pylint: disable=invalid-name, unused-argument def put(self, message=None, timeout=None): pass # pylint: enable=invalid-name, unused-argument
bsd-3-clause
cristiana214/cristianachavez214-cristianachavez
python/src/Mac/Modules/ibcarbon/IBCarbonsupport.py
39
1223
# IBCarbonsupport.py from macsupport import * IBNibRef = OpaqueByValueType('IBNibRef', 'IBNibRefObj') #CFBundleRef = OpaqueByValueType('CFBundleRef') IBCarbonFunction = OSErrFunctionGenerator IBCarbonMethod = OSErrMethodGenerator includestuff = """ #include <Carbon/Carbon.h> #include "pymactoolbox.h" #ifdef USE_TOOLBOX_OBJECT_GLUE extern int _CFStringRefObj_Convert(PyObject *, CFStringRef *); #endif """ initstuff = """ """ module = MacModule('_IBCarbon', 'IBCarbon', includestuff, finalstuff, initstuff) class CFReleaserObject(PEP253Mixin, GlobalObjectDefinition): def outputFreeIt(self, name): Output("CFRelease(%s);" % name) class CFNibDesc(PEP253Mixin, GlobalObjectDefinition): def outputFreeIt(self, name): Output("DisposeNibReference(%s);" % name) #cfstringobject = CFReleaserObject("CFStringRef") #module.addobject(cfstringobject) #cfbundleobject = CFReleaserObject("CFBundleRef") #module.addobject(cfbundleobject) ibnibobject = CFNibDesc("IBNibRef", "IBNibRefObj") module.addobject(ibnibobject) functions = [] methods = [] execfile('IBCarbongen.py') for f in functions: module.add(f) for m in methods: ibnibobject.add(m) SetOutputFileName('_IBCarbon.c') module.generate()
apache-2.0
f-prettyland/angr
angr/analyses/identifier/functions/fdprintf.py
4
4343
import random import string import logging import claripy from ..func import Func, TestData l = logging.getLogger("identifier.functions.printf") class fdprintf(Func): non_null = [chr(i) for i in range(1, 256)] def __init__(self): super(fdprintf, self).__init__() self.format_spec_char = None self.string_spec_char = None self.allows_n = False def rand_str(self, length, byte_list=None): #pylint disable=no-self-use if byte_list is None: return "".join(chr(random.randint(0, 255)) for _ in xrange(length)) return "".join(random.choice(byte_list) for _ in xrange(length)) def num_args(self): return 2 def args(self): #pylint disable=no-self-use return ["fd", "str"] def get_name(self): return "fdprintf" def var_args(self): return True def gen_input_output_pair(self): # I'm kinda already assuming it's printf if it passed pretests... return None def pre_test(self, func, runner): # make sure it prints alphanumeric stuff length = 10 test_str = self.rand_str(length, string.ascii_letters + string.digits) test_input = [1, test_str] test_output = [None, test_str] max_steps = len(test_str) * 3 + 20 stdout = test_str test = TestData(test_input, test_output, None, max_steps, expected_stdout=stdout) if not runner.test(func, test): return False # find interesting characters test_input = [1, claripy.BVS("input", 10*8)] test_output = [None, None] test = TestData(test_input, test_output, None, max_steps) s = runner.get_base_call_state(func, test) pg = runner.project.factory.simgr(s) pg.step(18) interesting_chars = set() for p in pg.active: for g in p.history.jump_guards: if g.op == "__ne__" or g.op == "__eq__": for a in g.args: if not a.symbolic: interesting_chars.add(s.se.eval(a)) interesting_chars = set(chr(a) for a in interesting_chars if 0 < a < 0x80) alphanum = set(string.ascii_letters + string.digits) possible_format_specifiers = [c for c in interesting_chars if c not in alphanum and c in string.printable and c not in string.whitespace] possible_formats = [c for c in interesting_chars if c in alphanum] if len(possible_format_specifiers) > 10: # too many to test :( return False # find the format specifier second_str = "findme" for char in possible_format_specifiers: if self.format_spec_char is not None: break for cc in possible_formats: test_str = char + cc + "\n\x00" test_input = [1, test_str, second_str] test_output = [None, test_str, second_str] stdout = second_str + "\n" max_steps = 20 test = TestData(test_input, test_output, None, max_steps, expected_stdout=stdout) if runner.test(func, test): self.format_spec_char = char self.string_spec_char = cc break # brute force... if self.format_spec_char is None: second_str = "findme" for char in possible_format_specifiers: if self.format_spec_char is not None: break for cc in string.ascii_lowercase: if cc in possible_formats: continue test_str = char + cc + "\n\x00" test_input = [1, test_str, second_str] test_output = [None, test_str, second_str] stdout = second_str + "\n" max_steps = 10 test = TestData(test_input, test_output, None, max_steps, expected_stdout=stdout) if runner.test(func, test): self.format_spec_char = char self.string_spec_char = cc break if self.format_spec_char is None: l.warning("format spec is none :(") return False return True
bsd-2-clause
kjung/scikit-learn
doc/tutorial/machine_learning_map/svg2imagemap.py
360
3411
#!/usr/local/bin/python """ This script converts a subset of SVG into an HTML imagemap Note *subset*. It only handles <path> elements, for which it only pays attention to the M and L commands. Futher, it only notices the "translate" transform. It was written to generate the examples in the documentation for maphilight, and thus is very squarely aimed at handling several SVG maps from wikipedia. It *assumes* that all the <path>s it will need are inside a <g>. Any <path> outside of a <g> will be ignored. It takes several possible arguments, in the form: $ svn2imagemap.py FILENAME [x y [group1 group2 ... groupN]] FILENAME must be the name of an SVG file. All other arguments are optional. x and y, if present, are the dimensions of the image you'll be creating from the SVG. If not present, it assumes the values of the width and height attributes in the SVG file. group1 through groupN are group ids. If only want particular groups used, enter their ids here and all others will be ignored. """ import os import re import sys import xml.dom.minidom import parse_path if len(sys.argv) == 1: sys.exit("svn2imagemap.py FILENAME [x y [group1 group2 ... groupN]]") if not os.path.exists(sys.argv[1]): sys.exit("Input file does not exist") x, y, groups = None, None, None if len(sys.argv) >= 3: x = float(sys.argv[2]) y = float(sys.argv[3]) if len(sys.argv) > 3: groups = sys.argv[4:] svg_file = xml.dom.minidom.parse(sys.argv[1]) svg = svg_file.getElementsByTagName('svg')[0] raw_width = float(svg.getAttribute('width')) raw_height = float(svg.getAttribute('height')) width_ratio = x and (x / raw_width) or 1 height_ratio = y and (y / raw_height) or 1 if groups: elements = [g for g in svg.getElementsByTagName('g') if (g.hasAttribute('id') and g.getAttribute('id') in groups)] elements.extend([p for p in svg.getElementsByTagName('path') if (p.hasAttribute('id') and p.getAttribute('id') in groups)]) else: elements = svg.getElementsByTagName('g') parsed_groups = {} for e in elements: paths = [] if e.nodeName == 'g': for path in e.getElementsByTagName('path'): points = parse_path.get_points(path.getAttribute('d')) for pointset in points: paths.append([path.getAttribute('id'), pointset]) else: points = parse_path.get_points(e.getAttribute('d')) for pointset in points: paths.append([e.getAttribute('id'), pointset]) if e.hasAttribute('transform'): print e.getAttribute('id'), e.getAttribute('transform') for transform in re.findall(r'(\w+)\((-?\d+.?\d*),(-?\d+.?\d*)\)', e.getAttribute('transform')): if transform[0] == 'translate': x_shift = float(transform[1]) y_shift = float(transform[2]) for path in paths: path[1] = [(p[0] + x_shift, p[1] + y_shift) for p in path[1]] parsed_groups[e.getAttribute('id')] = paths out = [] for g in parsed_groups: for path in parsed_groups[g]: out.append('<area href="#" title="%s" shape="poly" coords="%s"></area>' % (path[0], ', '.join([("%d,%d" % (p[0]*width_ratio, p[1]*height_ratio)) for p in path[1]]))) outfile = open(sys.argv[1].replace('.svg', '.html'), 'w') outfile.write('\n'.join(out))
bsd-3-clause
nervous-laughter/qiime2
qiime2/sdk/tests/test_artifact.py
2
17709
# ---------------------------------------------------------------------------- # Copyright (c) 2016-2017, QIIME 2 development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. # ---------------------------------------------------------------------------- import collections import os import tempfile import unittest import uuid import qiime2.core.type from qiime2.sdk import Artifact from qiime2.sdk.result import ResultMetadata import qiime2.core.archive as archive from qiime2.core.testing.type import IntSequence1, FourInts, Mapping from qiime2.core.testing.util import get_dummy_plugin, ArchiveTestingMixin class TestArtifact(unittest.TestCase, ArchiveTestingMixin): def setUp(self): # Ignore the returned dummy plugin object, just run this to verify the # plugin exists as the tests rely on it being loaded. get_dummy_plugin() # TODO standardize temporary directories created by QIIME 2 self.test_dir = tempfile.TemporaryDirectory(prefix='qiime2-test-temp-') self.provenance_capture = archive.ImportProvenanceCapture() def tearDown(self): self.test_dir.cleanup() def test_private_constructor(self): with self.assertRaisesRegex( NotImplementedError, 'Artifact constructor.*private.*Artifact.load'): Artifact() # Note on testing strategy below: many of the tests for `_from_view` and # `load` are similar, with the exception that when `load`ing, the # artifact's UUID is known so more specific assertions can be performed. # While these tests appear somewhat redundant, they are important because # they exercise the same operations on Artifact objects constructed from # different sources, whose codepaths have very different internal behavior. # This internal behavior could be tested explicitly but it is safer to test # the public API behavior (e.g. as a user would interact with the object) # in case the internals change. def test_from_view(self): artifact = Artifact._from_view(FourInts, [-1, 42, 0, 43], list, self.provenance_capture) self.assertEqual(artifact.type, FourInts) # We don't know what the UUID is because it's generated within # Artifact._from_view. self.assertIsInstance(artifact.uuid, uuid.UUID) self.assertEqual(artifact.view(list), [-1, 42, 0, 43]) # Can produce same view if called again. self.assertEqual(artifact.view(list), [-1, 42, 0, 43]) def test_from_view_different_type_with_multiple_view_types(self): artifact = Artifact._from_view(IntSequence1, [42, 42, 43, -999, 42], list, self.provenance_capture) self.assertEqual(artifact.type, IntSequence1) self.assertIsInstance(artifact.uuid, uuid.UUID) self.assertEqual(artifact.view(list), [42, 42, 43, -999, 42]) self.assertEqual(artifact.view(list), [42, 42, 43, -999, 42]) self.assertEqual(artifact.view(collections.Counter), collections.Counter({42: 3, 43: 1, -999: 1})) self.assertEqual(artifact.view(collections.Counter), collections.Counter({42: 3, 43: 1, -999: 1})) def test_from_view_and_save(self): fp = os.path.join(self.test_dir.name, 'artifact.qza') # Using four-ints data layout because it has multiple files, some of # which are in a nested directory. artifact = Artifact._from_view(FourInts, [-1, 42, 0, 43], list, self.provenance_capture) artifact.save(fp) root_dir = str(artifact.uuid) expected = { 'VERSION', 'metadata.yaml', 'data/file1.txt', 'data/file2.txt', 'data/nested/file3.txt', 'data/nested/file4.txt', 'provenance/metadata.yaml', 'provenance/VERSION', 'provenance/action/action.yaml' } self.assertArchiveMembers(fp, root_dir, expected) def test_load(self): saved_artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43]) fp = os.path.join(self.test_dir.name, 'artifact.qza') saved_artifact.save(fp) artifact = Artifact.load(fp) self.assertEqual(artifact.type, FourInts) self.assertEqual(artifact.uuid, saved_artifact.uuid) self.assertEqual(artifact.view(list), [-1, 42, 0, 43]) self.assertEqual(artifact.view(list), [-1, 42, 0, 43]) def test_load_different_type_with_multiple_view_types(self): saved_artifact = Artifact.import_data(IntSequence1, [42, 42, 43, -999, 42]) fp = os.path.join(self.test_dir.name, 'artifact.qza') saved_artifact.save(fp) artifact = Artifact.load(fp) self.assertEqual(artifact.type, IntSequence1) self.assertEqual(artifact.uuid, saved_artifact.uuid) self.assertEqual(artifact.view(list), [42, 42, 43, -999, 42]) self.assertEqual(artifact.view(list), [42, 42, 43, -999, 42]) self.assertEqual(artifact.view(collections.Counter), collections.Counter({42: 3, 43: 1, -999: 1})) self.assertEqual(artifact.view(collections.Counter), collections.Counter({42: 3, 43: 1, -999: 1})) def test_load_and_save(self): fp1 = os.path.join(self.test_dir.name, 'artifact1.qza') fp2 = os.path.join(self.test_dir.name, 'artifact2.qza') artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43]) artifact.save(fp1) artifact = Artifact.load(fp1) # Overwriting its source file works. artifact.save(fp1) # Saving to a new file works. artifact.save(fp2) root_dir = str(artifact.uuid) expected = { 'VERSION', 'metadata.yaml', 'data/file1.txt', 'data/file2.txt', 'data/nested/file3.txt', 'data/nested/file4.txt', 'provenance/metadata.yaml', 'provenance/VERSION', 'provenance/action/action.yaml' } self.assertArchiveMembers(fp1, root_dir, expected) root_dir = str(artifact.uuid) expected = { 'VERSION', 'metadata.yaml', 'data/file1.txt', 'data/file2.txt', 'data/nested/file3.txt', 'data/nested/file4.txt', 'provenance/metadata.yaml', 'provenance/VERSION', 'provenance/action/action.yaml' } self.assertArchiveMembers(fp2, root_dir, expected) def test_roundtrip(self): fp1 = os.path.join(self.test_dir.name, 'artifact1.qza') fp2 = os.path.join(self.test_dir.name, 'artifact2.qza') artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43]) artifact.save(fp1) artifact1 = Artifact.load(fp1) artifact1.save(fp2) artifact2 = Artifact.load(fp2) self.assertEqual(artifact1.type, artifact2.type) self.assertEqual(artifact1.format, artifact2.format) self.assertEqual(artifact1.uuid, artifact2.uuid) self.assertEqual(artifact1.view(list), artifact2.view(list)) # double view to make sure multiple views can be taken self.assertEqual(artifact1.view(list), artifact2.view(list)) def test_load_with_archive_filepath_modified(self): # Save an artifact for use in the following test case. fp = os.path.join(self.test_dir.name, 'artifact.qza') Artifact.import_data(FourInts, [-1, 42, 0, 43]).save(fp) # Load the artifact from a filepath then save a different artifact to # the same filepath. Assert that both artifacts produce the correct # views of their data. # # `load` used to be lazy, only extracting data when it needed to (e.g. # when `save` or `view` was called). This was buggy as the filepath # could have been deleted, or worse, modified to contain a different # .qza file. Thus, the wrong archive could be extracted on demand, or # the archive could be missing altogether. There isn't an easy # cross-platform compatible way to solve this problem, so Artifact.load # is no longer lazy and always extracts its data immediately. The real # motivation for lazy loading was for quick inspection of archives # without extracting/copying data, so that API is now provided through # Artifact.peek. artifact1 = Artifact.load(fp) Artifact.import_data(FourInts, [10, 11, 12, 13]).save(fp) artifact2 = Artifact.load(fp) self.assertEqual(artifact1.view(list), [-1, 42, 0, 43]) self.assertEqual(artifact2.view(list), [10, 11, 12, 13]) def test_extract(self): fp = os.path.join(self.test_dir.name, 'artifact.qza') artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43]) artifact.save(fp) root_dir = str(artifact.uuid) output_dir = os.path.join(self.test_dir.name, 'artifact-extract-test') result_dir = Artifact.extract(fp, output_dir=output_dir) self.assertEqual(result_dir, os.path.join(output_dir, root_dir)) expected = { 'VERSION', 'metadata.yaml', 'data/file1.txt', 'data/file2.txt', 'data/nested/file3.txt', 'data/nested/file4.txt', 'provenance/metadata.yaml', 'provenance/VERSION', 'provenance/action/action.yaml' } self.assertExtractedArchiveMembers(output_dir, root_dir, expected) def test_peek(self): artifact = Artifact.import_data(FourInts, [0, 0, 42, 1000]) fp = os.path.join(self.test_dir.name, 'artifact.qza') artifact.save(fp) metadata = Artifact.peek(fp) self.assertIsInstance(metadata, ResultMetadata) self.assertEqual(metadata.type, 'FourInts') self.assertEqual(metadata.uuid, str(artifact.uuid)) self.assertEqual(metadata.format, 'FourIntsDirectoryFormat') def test_import_data_invalid_type(self): with self.assertRaisesRegex(TypeError, 'concrete semantic type.*Visualization'): Artifact.import_data(qiime2.core.type.Visualization, self.test_dir) with self.assertRaisesRegex(TypeError, 'concrete semantic type.*Visualization'): Artifact.import_data('Visualization', self.test_dir) def test_import_data_with_filepath_multi_file_data_layout(self): fp = os.path.join(self.test_dir.name, 'test.txt') with open(fp, 'w') as fh: fh.write('42\n') with self.assertRaisesRegex(ValueError, "FourIntsDirectoryFormat.*directory"): Artifact.import_data(FourInts, fp) def test_import_data_with_wrong_number_of_files(self): data_dir = os.path.join(self.test_dir.name, 'test') os.mkdir(data_dir) error_regex = ("Missing.*MappingDirectoryFormat.*mapping.tsv") with self.assertRaisesRegex(ValueError, error_regex): Artifact.import_data(Mapping, data_dir) def test_import_data_with_unrecognized_files(self): data_dir = os.path.join(self.test_dir.name, 'test') os.mkdir(data_dir) with open(os.path.join(data_dir, 'file1.txt'), 'w') as fh: fh.write('42\n') with open(os.path.join(data_dir, 'file2.txt'), 'w') as fh: fh.write('43\n') nested = os.path.join(data_dir, 'nested') os.mkdir(nested) with open(os.path.join(nested, 'file3.txt'), 'w') as fh: fh.write('44\n') with open(os.path.join(nested, 'foo.txt'), 'w') as fh: fh.write('45\n') error_regex = ("Unrecognized.*foo.txt.*FourIntsDirectoryFormat") with self.assertRaisesRegex(ValueError, error_regex): Artifact.import_data(FourInts, data_dir) def test_import_data_with_unreachable_path(self): with self.assertRaisesRegex(ValueError, "does not exist"): Artifact.import_data(IntSequence1, os.path.join(self.test_dir.name, 'foo.txt')) with self.assertRaisesRegex(ValueError, "does not exist"): Artifact.import_data(FourInts, os.path.join(self.test_dir.name, 'bar', '')) def test_import_data_with_invalid_format_single_file(self): fp = os.path.join(self.test_dir.name, 'foo.txt') with open(fp, 'w') as fh: fh.write('42\n') fh.write('43\n') fh.write('abc\n') fh.write('123\n') error_regex = "foo.txt.*IntSequenceFormat" with self.assertRaisesRegex(ValueError, error_regex): Artifact.import_data(IntSequence1, fp) def test_import_data_with_invalid_format_multi_file(self): data_dir = os.path.join(self.test_dir.name, 'test') os.mkdir(data_dir) with open(os.path.join(data_dir, 'file1.txt'), 'w') as fh: fh.write('42\n') with open(os.path.join(data_dir, 'file2.txt'), 'w') as fh: fh.write('43\n') nested = os.path.join(data_dir, 'nested') os.mkdir(nested) with open(os.path.join(nested, 'file3.txt'), 'w') as fh: fh.write('44\n') with open(os.path.join(nested, 'file4.txt'), 'w') as fh: fh.write('foo\n') error_regex = "file4.txt.*SingleIntFormat" with self.assertRaisesRegex(ValueError, error_regex): Artifact.import_data(FourInts, data_dir) def test_import_data_with_filepath(self): data_dir = os.path.join(self.test_dir.name, 'test') os.mkdir(data_dir) # Filename shouldn't matter for single-file case. fp = os.path.join(data_dir, 'foo.txt') with open(fp, 'w') as fh: fh.write('42\n') fh.write('43\n') fh.write('42\n') fh.write('0\n') artifact = Artifact.import_data(IntSequence1, fp) self.assertEqual(artifact.type, IntSequence1) self.assertIsInstance(artifact.uuid, uuid.UUID) self.assertEqual(artifact.view(list), [42, 43, 42, 0]) def test_import_data_with_directory_single_file(self): data_dir = os.path.join(self.test_dir.name, 'test') os.mkdir(data_dir) fp = os.path.join(data_dir, 'ints.txt') with open(fp, 'w') as fh: fh.write('-1\n') fh.write('-2\n') fh.write('10\n') fh.write('100\n') artifact = Artifact.import_data(IntSequence1, data_dir) self.assertEqual(artifact.type, IntSequence1) self.assertIsInstance(artifact.uuid, uuid.UUID) self.assertEqual(artifact.view(list), [-1, -2, 10, 100]) def test_import_data_with_directory_multi_file(self): data_dir = os.path.join(self.test_dir.name, 'test') os.mkdir(data_dir) with open(os.path.join(data_dir, 'file1.txt'), 'w') as fh: fh.write('42\n') with open(os.path.join(data_dir, 'file2.txt'), 'w') as fh: fh.write('41\n') nested = os.path.join(data_dir, 'nested') os.mkdir(nested) with open(os.path.join(nested, 'file3.txt'), 'w') as fh: fh.write('43\n') with open(os.path.join(nested, 'file4.txt'), 'w') as fh: fh.write('40\n') artifact = Artifact.import_data(FourInts, data_dir) self.assertEqual(artifact.type, FourInts) self.assertIsInstance(artifact.uuid, uuid.UUID) self.assertEqual(artifact.view(list), [42, 41, 43, 40]) def test_eq_identity(self): artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43]) self.assertEqual(artifact, artifact) def test_eq_same_uuid(self): fp = os.path.join(self.test_dir.name, 'artifact.qza') artifact1 = Artifact.import_data(FourInts, [-1, 42, 0, 43]) artifact1.save(fp) artifact2 = Artifact.load(fp) self.assertEqual(artifact1, artifact2) def test_ne_same_data_different_uuid(self): artifact1 = Artifact.import_data(FourInts, [-1, 42, 0, 43]) artifact2 = Artifact.import_data(FourInts, [-1, 42, 0, 43]) self.assertNotEqual(artifact1, artifact2) def test_ne_different_data_different_uuid(self): artifact1 = Artifact.import_data(FourInts, [-1, 42, 0, 43]) artifact2 = Artifact.import_data(FourInts, [1, 2, 3, 4]) self.assertNotEqual(artifact1, artifact2) def test_ne_subclass_same_uuid(self): class ArtifactSubclass(Artifact): pass fp = os.path.join(self.test_dir.name, 'artifact.qza') artifact1 = ArtifactSubclass.import_data(FourInts, [-1, 42, 0, 43]) artifact1.save(fp) artifact2 = Artifact.load(fp) self.assertNotEqual(artifact1, artifact2) self.assertNotEqual(artifact2, artifact1) def test_ne_different_type_same_uuid(self): artifact = Artifact.import_data(FourInts, [-1, 42, 0, 43]) class Faker: @property def uuid(self): return artifact.uuid faker = Faker() self.assertNotEqual(artifact, faker) if __name__ == '__main__': unittest.main()
bsd-3-clause
nrc/rustc-perf
collector/benchmarks/script-servo/components/script/dom/bindings/codegen/GlobalGen.py
73
3185
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. # We do one global pass over all the WebIDL to generate our prototype enum # and generate information for subsequent phases. import sys import os sys.path.append(os.path.join(".", "parser")) sys.path.append(os.path.join(".", "ply")) import WebIDL import cPickle from Configuration import Configuration from CodegenRust import GlobalGenRoots, replaceFileIfChanged def generate_file(config, name, filename): root = getattr(GlobalGenRoots, name)(config) code = root.define() if replaceFileIfChanged(filename, code): print "Generating %s" % (filename) else: print "%s hasn't changed - not touching it" % (filename) def main(): # Parse arguments. from optparse import OptionParser usageString = "usage: %prog [options] configFile outputdir webidldir [files]" o = OptionParser(usage=usageString) o.add_option("--cachedir", dest='cachedir', default=None, help="Directory in which to cache lex/parse tables.") o.add_option("--only-html", dest='only_html', action="store_true", help="Only generate HTML from WebIDL inputs") o.add_option("--filelist", dest='filelist', default=None, help="A file containing the list (one per line) of webidl files to process.") (options, args) = o.parse_args() if len(args) < 2: o.error(usageString) configFile = args[0] outputdir = args[1] baseDir = args[2] if options.filelist is not None: fileList = (l.strip() for l in open(options.filelist).xreadlines()) else: fileList = args[3:] # Parse the WebIDL. parser = WebIDL.Parser(options.cachedir) for filename in fileList: fullPath = os.path.normpath(os.path.join(baseDir, filename)) with open(fullPath, 'rb') as f: lines = f.readlines() parser.parse(''.join(lines), fullPath) parserResults = parser.finish() if not options.only_html: # Write the parser results out to a pickle. resultsPath = os.path.join(outputdir, 'ParserResults.pkl') with open(resultsPath, 'wb') as resultsFile: cPickle.dump(parserResults, resultsFile, -1) # Load the configuration. config = Configuration(configFile, parserResults) to_generate = [ ('SupportedDomApis', 'apis.html'), ] if not options.only_html: to_generate = [ ('PrototypeList', 'PrototypeList.rs'), ('RegisterBindings', 'RegisterBindings.rs'), ('InterfaceObjectMap', 'InterfaceObjectMap.rs'), ('InterfaceObjectMapData', 'InterfaceObjectMapData.json'), ('InterfaceTypes', 'InterfaceTypes.rs'), ('InheritTypes', 'InheritTypes.rs'), ('Bindings', os.path.join('Bindings', 'mod.rs')), ('UnionTypes', 'UnionTypes.rs'), ] for name, filename in to_generate: generate_file(config, name, os.path.join(outputdir, filename)) if __name__ == '__main__': main()
mit
valexandersaulys/airbnb_kaggle_contest
venv/lib/python3.4/site-packages/sklearn/tests/test_grid_search.py
53
28730
""" Testing for grid search module (sklearn.grid_search) """ from collections import Iterable, Sized from sklearn.externals.six.moves import cStringIO as StringIO from sklearn.externals.six.moves import xrange from itertools import chain, product import pickle import sys import numpy as np import scipy.sparse as sp from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_not_equal from sklearn.utils.testing import assert_raises from sklearn.utils.testing import assert_warns from sklearn.utils.testing import assert_raise_message from sklearn.utils.testing import assert_false, assert_true from sklearn.utils.testing import assert_array_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_no_warnings from sklearn.utils.testing import ignore_warnings from sklearn.utils.mocking import CheckingClassifier, MockDataFrame from scipy.stats import bernoulli, expon, uniform from sklearn.externals.six.moves import zip from sklearn.base import BaseEstimator from sklearn.datasets import make_classification from sklearn.datasets import make_blobs from sklearn.datasets import make_multilabel_classification from sklearn.grid_search import (GridSearchCV, RandomizedSearchCV, ParameterGrid, ParameterSampler, ChangedBehaviorWarning) from sklearn.svm import LinearSVC, SVC from sklearn.tree import DecisionTreeRegressor from sklearn.tree import DecisionTreeClassifier from sklearn.cluster import KMeans from sklearn.neighbors import KernelDensity from sklearn.metrics import f1_score from sklearn.metrics import make_scorer from sklearn.metrics import roc_auc_score from sklearn.cross_validation import KFold, StratifiedKFold, FitFailedWarning from sklearn.preprocessing import Imputer from sklearn.pipeline import Pipeline # Neither of the following two estimators inherit from BaseEstimator, # to test hyperparameter search on user-defined classifiers. class MockClassifier(object): """Dummy classifier to test the cross-validation""" def __init__(self, foo_param=0): self.foo_param = foo_param def fit(self, X, Y): assert_true(len(X) == len(Y)) return self def predict(self, T): return T.shape[0] predict_proba = predict decision_function = predict transform = predict def score(self, X=None, Y=None): if self.foo_param > 1: score = 1. else: score = 0. return score def get_params(self, deep=False): return {'foo_param': self.foo_param} def set_params(self, **params): self.foo_param = params['foo_param'] return self class LinearSVCNoScore(LinearSVC): """An LinearSVC classifier that has no score method.""" @property def score(self): raise AttributeError X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) y = np.array([1, 1, 2, 2]) def assert_grid_iter_equals_getitem(grid): assert_equal(list(grid), [grid[i] for i in range(len(grid))]) def test_parameter_grid(): # Test basic properties of ParameterGrid. params1 = {"foo": [1, 2, 3]} grid1 = ParameterGrid(params1) assert_true(isinstance(grid1, Iterable)) assert_true(isinstance(grid1, Sized)) assert_equal(len(grid1), 3) assert_grid_iter_equals_getitem(grid1) params2 = {"foo": [4, 2], "bar": ["ham", "spam", "eggs"]} grid2 = ParameterGrid(params2) assert_equal(len(grid2), 6) # loop to assert we can iterate over the grid multiple times for i in xrange(2): # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2) points = set(tuple(chain(*(sorted(p.items())))) for p in grid2) assert_equal(points, set(("bar", x, "foo", y) for x, y in product(params2["bar"], params2["foo"]))) assert_grid_iter_equals_getitem(grid2) # Special case: empty grid (useful to get default estimator settings) empty = ParameterGrid({}) assert_equal(len(empty), 1) assert_equal(list(empty), [{}]) assert_grid_iter_equals_getitem(empty) assert_raises(IndexError, lambda: empty[1]) has_empty = ParameterGrid([{'C': [1, 10]}, {}, {'C': [.5]}]) assert_equal(len(has_empty), 4) assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}, {'C': .5}]) assert_grid_iter_equals_getitem(has_empty) def test_grid_search(): # Test that the best estimator contains the right value for foo_param clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, verbose=3) # make sure it selects the smallest parameter in case of ties old_stdout = sys.stdout sys.stdout = StringIO() grid_search.fit(X, y) sys.stdout = old_stdout assert_equal(grid_search.best_estimator_.foo_param, 2) for i, foo_i in enumerate([1, 2, 3]): assert_true(grid_search.grid_scores_[i][0] == {'foo_param': foo_i}) # Smoke test the score etc: grid_search.score(X, y) grid_search.predict_proba(X) grid_search.decision_function(X) grid_search.transform(X) # Test exception handling on scoring grid_search.scoring = 'sklearn' assert_raises(ValueError, grid_search.fit, X, y) @ignore_warnings def test_grid_search_no_score(): # Test grid-search on classifier that has no score function. clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] clf_no_score = LinearSVCNoScore(random_state=0) grid_search = GridSearchCV(clf, {'C': Cs}, scoring='accuracy') grid_search.fit(X, y) grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}, scoring='accuracy') # smoketest grid search grid_search_no_score.fit(X, y) # check that best params are equal assert_equal(grid_search_no_score.best_params_, grid_search.best_params_) # check that we can call score and that it gives the correct result assert_equal(grid_search.score(X, y), grid_search_no_score.score(X, y)) # giving no scoring function raises an error grid_search_no_score = GridSearchCV(clf_no_score, {'C': Cs}) assert_raise_message(TypeError, "no scoring", grid_search_no_score.fit, [[1]]) def test_grid_search_score_method(): X, y = make_classification(n_samples=100, n_classes=2, flip_y=.2, random_state=0) clf = LinearSVC(random_state=0) grid = {'C': [.1]} search_no_scoring = GridSearchCV(clf, grid, scoring=None).fit(X, y) search_accuracy = GridSearchCV(clf, grid, scoring='accuracy').fit(X, y) search_no_score_method_auc = GridSearchCV(LinearSVCNoScore(), grid, scoring='roc_auc').fit(X, y) search_auc = GridSearchCV(clf, grid, scoring='roc_auc').fit(X, y) # Check warning only occurs in situation where behavior changed: # estimator requires score method to compete with scoring parameter score_no_scoring = assert_no_warnings(search_no_scoring.score, X, y) score_accuracy = assert_warns(ChangedBehaviorWarning, search_accuracy.score, X, y) score_no_score_auc = assert_no_warnings(search_no_score_method_auc.score, X, y) score_auc = assert_warns(ChangedBehaviorWarning, search_auc.score, X, y) # ensure the test is sane assert_true(score_auc < 1.0) assert_true(score_accuracy < 1.0) assert_not_equal(score_auc, score_accuracy) assert_almost_equal(score_accuracy, score_no_scoring) assert_almost_equal(score_auc, score_no_score_auc) def test_trivial_grid_scores(): # Test search over a "grid" with only one point. # Non-regression test: grid_scores_ wouldn't be set by GridSearchCV. clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1]}) grid_search.fit(X, y) assert_true(hasattr(grid_search, "grid_scores_")) random_search = RandomizedSearchCV(clf, {'foo_param': [0]}, n_iter=1) random_search.fit(X, y) assert_true(hasattr(random_search, "grid_scores_")) def test_no_refit(): # Test that grid search can be used for model selection only clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=False) grid_search.fit(X, y) assert_true(hasattr(grid_search, "best_params_")) def test_grid_search_error(): # Test that grid search will capture errors on data with different # length X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, X_[:180], y_) def test_grid_search_iid(): # test the iid parameter # noise-free simple 2d-data X, y = make_blobs(centers=[[0, 0], [1, 0], [0, 1], [1, 1]], random_state=0, cluster_std=0.1, shuffle=False, n_samples=80) # split dataset into two folds that are not iid # first one contains data of all 4 blobs, second only from two. mask = np.ones(X.shape[0], dtype=np.bool) mask[np.where(y == 1)[0][::2]] = 0 mask[np.where(y == 2)[0][::2]] = 0 # this leads to perfect classification on one fold and a score of 1/3 on # the other svm = SVC(kernel='linear') # create "cv" for splits cv = [[mask, ~mask], [~mask, mask]] # once with iid=True (default) grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv) grid_search.fit(X, y) first = grid_search.grid_scores_[0] assert_equal(first.parameters['C'], 1) assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.]) # for first split, 1/4 of dataset is in test, for second 3/4. # take weighted average assert_almost_equal(first.mean_validation_score, 1 * 1. / 4. + 1. / 3. * 3. / 4.) # once with iid=False grid_search = GridSearchCV(svm, param_grid={'C': [1, 10]}, cv=cv, iid=False) grid_search.fit(X, y) first = grid_search.grid_scores_[0] assert_equal(first.parameters['C'], 1) # scores are the same as above assert_array_almost_equal(first.cv_validation_scores, [1, 1. / 3.]) # averaged score is just mean of scores assert_almost_equal(first.mean_validation_score, np.mean(first.cv_validation_scores)) def test_grid_search_one_grid_point(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) param_dict = {"C": [1.0], "kernel": ["rbf"], "gamma": [0.1]} clf = SVC() cv = GridSearchCV(clf, param_dict) cv.fit(X_, y_) clf = SVC(C=1.0, kernel="rbf", gamma=0.1) clf.fit(X_, y_) assert_array_equal(clf.dual_coef_, cv.best_estimator_.dual_coef_) def test_grid_search_bad_param_grid(): param_dict = {"C": 1.0} clf = SVC() assert_raises(ValueError, GridSearchCV, clf, param_dict) param_dict = {"C": []} clf = SVC() assert_raises(ValueError, GridSearchCV, clf, param_dict) param_dict = {"C": np.ones(6).reshape(3, 2)} clf = SVC() assert_raises(ValueError, GridSearchCV, clf, param_dict) def test_grid_search_sparse(): # Test that grid search works with both dense and sparse matrices X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(X_[:180].tocoo(), y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert_true(np.mean(y_pred == y_pred2) >= .9) assert_equal(C, C2) def test_grid_search_sparse_scoring(): X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred = cv.predict(X_[180:]) C = cv.best_estimator_.C X_ = sp.csr_matrix(X_) clf = LinearSVC() cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring="f1") cv.fit(X_[:180], y_[:180]) y_pred2 = cv.predict(X_[180:]) C2 = cv.best_estimator_.C assert_array_equal(y_pred, y_pred2) assert_equal(C, C2) # Smoke test the score # np.testing.assert_allclose(f1_score(cv.predict(X_[:180]), y[:180]), # cv.score(X_[:180], y[:180])) # test loss where greater is worse def f1_loss(y_true_, y_pred_): return -f1_score(y_true_, y_pred_) F1Loss = make_scorer(f1_loss, greater_is_better=False) cv = GridSearchCV(clf, {'C': [0.1, 1.0]}, scoring=F1Loss) cv.fit(X_[:180], y_[:180]) y_pred3 = cv.predict(X_[180:]) C3 = cv.best_estimator_.C assert_equal(C, C3) assert_array_equal(y_pred, y_pred3) def test_grid_search_precomputed_kernel(): # Test that grid search works when the input features are given in the # form of a precomputed kernel matrix X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) # compute the training kernel matrix corresponding to the linear kernel K_train = np.dot(X_[:180], X_[:180].T) y_train = y_[:180] clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) cv.fit(K_train, y_train) assert_true(cv.best_score_ >= 0) # compute the test kernel matrix K_test = np.dot(X_[180:], X_[:180].T) y_test = y_[180:] y_pred = cv.predict(K_test) assert_true(np.mean(y_pred == y_test) >= 0) # test error is raised when the precomputed kernel is not array-like # or sparse assert_raises(ValueError, cv.fit, K_train.tolist(), y_train) def test_grid_search_precomputed_kernel_error_nonsquare(): # Test that grid search returns an error with a non-square precomputed # training kernel matrix K_train = np.zeros((10, 20)) y_train = np.ones((10, )) clf = SVC(kernel='precomputed') cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, K_train, y_train) def test_grid_search_precomputed_kernel_error_kernel_function(): # Test that grid search returns an error when using a kernel_function X_, y_ = make_classification(n_samples=200, n_features=100, random_state=0) kernel_function = lambda x1, x2: np.dot(x1, x2.T) clf = SVC(kernel=kernel_function) cv = GridSearchCV(clf, {'C': [0.1, 1.0]}) assert_raises(ValueError, cv.fit, X_, y_) class BrokenClassifier(BaseEstimator): """Broken classifier that cannot be fit twice""" def __init__(self, parameter=None): self.parameter = parameter def fit(self, X, y): assert_true(not hasattr(self, 'has_been_fit_')) self.has_been_fit_ = True def predict(self, X): return np.zeros(X.shape[0]) @ignore_warnings def test_refit(): # Regression test for bug in refitting # Simulates re-fitting a broken estimator; this used to break with # sparse SVMs. X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = GridSearchCV(BrokenClassifier(), [{'parameter': [0, 1]}], scoring="precision", refit=True) clf.fit(X, y) def test_gridsearch_nd(): # Pass X as list in GridSearchCV X_4d = np.arange(10 * 5 * 3 * 2).reshape(10, 5, 3, 2) y_3d = np.arange(10 * 7 * 11).reshape(10, 7, 11) check_X = lambda x: x.shape[1:] == (5, 3, 2) check_y = lambda x: x.shape[1:] == (7, 11) clf = CheckingClassifier(check_X=check_X, check_y=check_y) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_4d, y_3d).score(X, y) assert_true(hasattr(grid_search, "grid_scores_")) def test_X_as_list(): # Pass X as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(check_X=lambda x: isinstance(x, list)) cv = KFold(n=len(X), n_folds=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X.tolist(), y).score(X, y) assert_true(hasattr(grid_search, "grid_scores_")) def test_y_as_list(): # Pass y as list in GridSearchCV X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) clf = CheckingClassifier(check_y=lambda x: isinstance(x, list)) cv = KFold(n=len(X), n_folds=3) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, cv=cv) grid_search.fit(X, y.tolist()).score(X, y) assert_true(hasattr(grid_search, "grid_scores_")) def test_pandas_input(): # check cross_val_score doesn't destroy pandas dataframe types = [(MockDataFrame, MockDataFrame)] try: from pandas import Series, DataFrame types.append((DataFrame, Series)) except ImportError: pass X = np.arange(100).reshape(10, 10) y = np.array([0] * 5 + [1] * 5) for InputFeatureType, TargetType in types: # X dataframe, y series X_df, y_ser = InputFeatureType(X), TargetType(y) check_df = lambda x: isinstance(x, InputFeatureType) check_series = lambda x: isinstance(x, TargetType) clf = CheckingClassifier(check_X=check_df, check_y=check_series) grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}) grid_search.fit(X_df, y_ser).score(X_df, y_ser) grid_search.predict(X_df) assert_true(hasattr(grid_search, "grid_scores_")) def test_unsupervised_grid_search(): # test grid-search with unsupervised estimator X, y = make_blobs(random_state=0) km = KMeans(random_state=0) grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4]), scoring='adjusted_rand_score') grid_search.fit(X, y) # ARI can find the right number :) assert_equal(grid_search.best_params_["n_clusters"], 3) # Now without a score, and without y grid_search = GridSearchCV(km, param_grid=dict(n_clusters=[2, 3, 4])) grid_search.fit(X) assert_equal(grid_search.best_params_["n_clusters"], 4) def test_gridsearch_no_predict(): # test grid-search with an estimator without predict. # slight duplication of a test from KDE def custom_scoring(estimator, X): return 42 if estimator.bandwidth == .1 else 0 X, _ = make_blobs(cluster_std=.1, random_state=1, centers=[[0, 1], [1, 0], [0, 0]]) search = GridSearchCV(KernelDensity(), param_grid=dict(bandwidth=[.01, .1, 1]), scoring=custom_scoring) search.fit(X) assert_equal(search.best_params_['bandwidth'], .1) assert_equal(search.best_score_, 42) def test_param_sampler(): # test basic properties of param sampler param_distributions = {"kernel": ["rbf", "linear"], "C": uniform(0, 1)} sampler = ParameterSampler(param_distributions=param_distributions, n_iter=10, random_state=0) samples = [x for x in sampler] assert_equal(len(samples), 10) for sample in samples: assert_true(sample["kernel"] in ["rbf", "linear"]) assert_true(0 <= sample["C"] <= 1) def test_randomized_search_grid_scores(): # Make a dataset with a lot of noise to get various kind of prediction # errors across CV folds and parameter settings X, y = make_classification(n_samples=200, n_features=100, n_informative=3, random_state=0) # XXX: as of today (scipy 0.12) it's not possible to set the random seed # of scipy.stats distributions: the assertions in this test should thus # not depend on the randomization params = dict(C=expon(scale=10), gamma=expon(scale=0.1)) n_cv_iter = 3 n_search_iter = 30 search = RandomizedSearchCV(SVC(), n_iter=n_search_iter, cv=n_cv_iter, param_distributions=params, iid=False) search.fit(X, y) assert_equal(len(search.grid_scores_), n_search_iter) # Check consistency of the structure of each cv_score item for cv_score in search.grid_scores_: assert_equal(len(cv_score.cv_validation_scores), n_cv_iter) # Because we set iid to False, the mean_validation score is the # mean of the fold mean scores instead of the aggregate sample-wise # mean score assert_almost_equal(np.mean(cv_score.cv_validation_scores), cv_score.mean_validation_score) assert_equal(list(sorted(cv_score.parameters.keys())), list(sorted(params.keys()))) # Check the consistency with the best_score_ and best_params_ attributes sorted_grid_scores = list(sorted(search.grid_scores_, key=lambda x: x.mean_validation_score)) best_score = sorted_grid_scores[-1].mean_validation_score assert_equal(search.best_score_, best_score) tied_best_params = [s.parameters for s in sorted_grid_scores if s.mean_validation_score == best_score] assert_true(search.best_params_ in tied_best_params, "best_params_={0} is not part of the" " tied best models: {1}".format( search.best_params_, tied_best_params)) def test_grid_search_score_consistency(): # test that correct scores are used clf = LinearSVC(random_state=0) X, y = make_blobs(random_state=0, centers=2) Cs = [.1, 1, 10] for score in ['f1', 'roc_auc']: grid_search = GridSearchCV(clf, {'C': Cs}, scoring=score) grid_search.fit(X, y) cv = StratifiedKFold(n_folds=3, y=y) for C, scores in zip(Cs, grid_search.grid_scores_): clf.set_params(C=C) scores = scores[2] # get the separate runs from grid scores i = 0 for train, test in cv: clf.fit(X[train], y[train]) if score == "f1": correct_score = f1_score(y[test], clf.predict(X[test])) elif score == "roc_auc": dec = clf.decision_function(X[test]) correct_score = roc_auc_score(y[test], dec) assert_almost_equal(correct_score, scores[i]) i += 1 def test_pickle(): # Test that a fit search can be pickled clf = MockClassifier() grid_search = GridSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True) grid_search.fit(X, y) pickle.dumps(grid_search) # smoke test random_search = RandomizedSearchCV(clf, {'foo_param': [1, 2, 3]}, refit=True, n_iter=3) random_search.fit(X, y) pickle.dumps(random_search) # smoke test def test_grid_search_with_multioutput_data(): # Test search with multi-output estimator X, y = make_multilabel_classification(random_state=0) est_parameters = {"max_depth": [1, 2, 3, 4]} cv = KFold(y.shape[0], random_state=0) estimators = [DecisionTreeRegressor(random_state=0), DecisionTreeClassifier(random_state=0)] # Test with grid search cv for est in estimators: grid_search = GridSearchCV(est, est_parameters, cv=cv) grid_search.fit(X, y) for parameters, _, cv_validation_scores in grid_search.grid_scores_: est.set_params(**parameters) for i, (train, test) in enumerate(cv): est.fit(X[train], y[train]) correct_score = est.score(X[test], y[test]) assert_almost_equal(correct_score, cv_validation_scores[i]) # Test with a randomized search for est in estimators: random_search = RandomizedSearchCV(est, est_parameters, cv=cv, n_iter=3) random_search.fit(X, y) for parameters, _, cv_validation_scores in random_search.grid_scores_: est.set_params(**parameters) for i, (train, test) in enumerate(cv): est.fit(X[train], y[train]) correct_score = est.score(X[test], y[test]) assert_almost_equal(correct_score, cv_validation_scores[i]) def test_predict_proba_disabled(): # Test predict_proba when disabled on estimator. X = np.arange(20).reshape(5, -1) y = [0, 0, 1, 1, 1] clf = SVC(probability=False) gs = GridSearchCV(clf, {}, cv=2).fit(X, y) assert_false(hasattr(gs, "predict_proba")) def test_grid_search_allows_nans(): # Test GridSearchCV with Imputer X = np.arange(20, dtype=np.float64).reshape(5, -1) X[2, :] = np.nan y = [0, 0, 1, 1, 1] p = Pipeline([ ('imputer', Imputer(strategy='mean', missing_values='NaN')), ('classifier', MockClassifier()), ]) GridSearchCV(p, {'classifier__foo_param': [1, 2, 3]}, cv=2).fit(X, y) class FailingClassifier(BaseEstimator): """Classifier that raises a ValueError on fit()""" FAILING_PARAMETER = 2 def __init__(self, parameter=None): self.parameter = parameter def fit(self, X, y=None): if self.parameter == FailingClassifier.FAILING_PARAMETER: raise ValueError("Failing classifier failed as required") def predict(self, X): return np.zeros(X.shape[0]) def test_grid_search_failing_classifier(): # GridSearchCV with on_error != 'raise' # Ensures that a warning is raised and score reset where appropriate. X, y = make_classification(n_samples=20, n_features=10, random_state=0) clf = FailingClassifier() # refit=False because we only want to check that errors caused by fits # to individual folds will be caught and warnings raised instead. If # refit was done, then an exception would be raised on refit and not # caught by grid_search (expected behavior), and this would cause an # error in this test. gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score=0.0) assert_warns(FitFailedWarning, gs.fit, X, y) # Ensure that grid scores were set to zero as required for those fits # that are expected to fail. assert all(np.all(this_point.cv_validation_scores == 0.0) for this_point in gs.grid_scores_ if this_point.parameters['parameter'] == FailingClassifier.FAILING_PARAMETER) gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score=float('nan')) assert_warns(FitFailedWarning, gs.fit, X, y) assert all(np.all(np.isnan(this_point.cv_validation_scores)) for this_point in gs.grid_scores_ if this_point.parameters['parameter'] == FailingClassifier.FAILING_PARAMETER) def test_grid_search_failing_classifier_raise(): # GridSearchCV with on_error == 'raise' raises the error X, y = make_classification(n_samples=20, n_features=10, random_state=0) clf = FailingClassifier() # refit=False because we want to test the behaviour of the grid search part gs = GridSearchCV(clf, [{'parameter': [0, 1, 2]}], scoring='accuracy', refit=False, error_score='raise') # FailingClassifier issues a ValueError so this is what we look for. assert_raises(ValueError, gs.fit, X, y) def test_parameters_sampler_replacement(): # raise error if n_iter too large params = {'first': [0, 1], 'second': ['a', 'b', 'c']} sampler = ParameterSampler(params, n_iter=7) assert_raises(ValueError, list, sampler) # degenerates to GridSearchCV if n_iter the same as grid_size sampler = ParameterSampler(params, n_iter=6) samples = list(sampler) assert_equal(len(samples), 6) for values in ParameterGrid(params): assert_true(values in samples) # test sampling without replacement in a large grid params = {'a': range(10), 'b': range(10), 'c': range(10)} sampler = ParameterSampler(params, n_iter=99, random_state=42) samples = list(sampler) assert_equal(len(samples), 99) hashable_samples = ["a%db%dc%d" % (p['a'], p['b'], p['c']) for p in samples] assert_equal(len(set(hashable_samples)), 99) # doesn't go into infinite loops params_distribution = {'first': bernoulli(.5), 'second': ['a', 'b', 'c']} sampler = ParameterSampler(params_distribution, n_iter=7) samples = list(sampler) assert_equal(len(samples), 7)
gpl-2.0
jamespcole/home-assistant
tests/components/yessssms/test_notify.py
8
7584
"""The tests for the notify yessssms platform.""" import unittest import requests_mock import homeassistant.components.yessssms.notify as yessssms class TestNotifyYesssSMS(unittest.TestCase): """Test the yessssms notify.""" def setUp(self): # pylint: disable=invalid-name """Set up things to be run when tests are started.""" login = "06641234567" passwd = "testpasswd" recipient = "06501234567" self.yessssms = yessssms.YesssSMSNotificationService( login, passwd, recipient) @requests_mock.Mocker() def test_login_error(self, mock): """Test login that fails.""" mock.register_uri( requests_mock.POST, # pylint: disable=protected-access self.yessssms.yesss._login_url, status_code=200, text="BlaBlaBla<strong>Login nicht erfolgreichBlaBla" ) message = "Testing YesssSMS platform :)" with self.assertLogs("homeassistant.components.yessssms.notify", level='ERROR'): self.yessssms.send_message(message) self.assertTrue(mock.called) self.assertEqual(mock.call_count, 1) def test_empty_message_error(self): """Test for an empty SMS message error.""" message = "" with self.assertLogs("homeassistant.components.yessssms.notify", level='ERROR'): self.yessssms.send_message(message) @requests_mock.Mocker() def test_error_account_suspended(self, mock): """Test login that fails after multiple attempts.""" mock.register_uri( 'POST', # pylint: disable=protected-access self.yessssms.yesss._login_url, status_code=200, text="BlaBlaBla<strong>Login nicht erfolgreichBlaBla" ) message = "Testing YesssSMS platform :)" with self.assertLogs("homeassistant.components.yessssms.notify", level='ERROR'): self.yessssms.send_message(message) self.assertTrue(mock.called) self.assertEqual(mock.call_count, 1) mock.register_uri( 'POST', # pylint: disable=protected-access self.yessssms.yesss._login_url, status_code=200, text="Wegen 3 ungültigen Login-Versuchen ist Ihr Account für " "eine Stunde gesperrt." ) message = "Testing YesssSMS platform :)" with self.assertLogs("homeassistant.components.yessssms.notify", level='ERROR'): self.yessssms.send_message(message) self.assertTrue(mock.called) self.assertEqual(mock.call_count, 2) def test_error_account_suspended_2(self): """Test login that fails after multiple attempts.""" message = "Testing YesssSMS platform :)" # pylint: disable=protected-access self.yessssms.yesss._suspended = True with self.assertLogs("homeassistant.components.yessssms.notify", level='ERROR') as context: self.yessssms.send_message(message) self.assertIn("Account is suspended, cannot send SMS.", context.output[0]) @requests_mock.Mocker() def test_send_message(self, mock): """Test send message.""" message = "Testing YesssSMS platform :)" mock.register_uri( 'POST', # pylint: disable=protected-access self.yessssms.yesss._login_url, status_code=302, # pylint: disable=protected-access headers={'location': self.yessssms.yesss._kontomanager} ) # pylint: disable=protected-access login = self.yessssms.yesss._logindata['login_rufnummer'] mock.register_uri( 'GET', # pylint: disable=protected-access self.yessssms.yesss._kontomanager, status_code=200, text="test..." + login + "</a>" ) mock.register_uri( 'POST', # pylint: disable=protected-access self.yessssms.yesss._websms_url, status_code=200, text="<h1>Ihre SMS wurde erfolgreich verschickt!</h1>" ) mock.register_uri( 'GET', # pylint: disable=protected-access self.yessssms.yesss._logout_url, status_code=200, ) with self.assertLogs("homeassistant.components.yessssms.notify", level='INFO') as context: self.yessssms.send_message(message) self.assertIn("SMS sent", context.output[0]) self.assertTrue(mock.called) self.assertEqual(mock.call_count, 4) self.assertIn(mock.last_request.scheme + "://" + mock.last_request.hostname + mock.last_request.path + "?" + mock.last_request.query, # pylint: disable=protected-access self.yessssms.yesss._logout_url) def test_no_recipient_error(self): """Test for missing/empty recipient.""" message = "Testing YesssSMS platform :)" # pylint: disable=protected-access self.yessssms._recipient = "" with self.assertLogs("homeassistant.components.yessssms.notify", level='ERROR') as context: self.yessssms.send_message(message) self.assertIn("You need to provide a recipient for SMS notification", context.output[0]) @requests_mock.Mocker() def test_sms_sending_error(self, mock): """Test sms sending error.""" mock.register_uri( 'POST', # pylint: disable=protected-access self.yessssms.yesss._login_url, status_code=302, # pylint: disable=protected-access headers={'location': self.yessssms.yesss._kontomanager} ) # pylint: disable=protected-access login = self.yessssms.yesss._logindata['login_rufnummer'] mock.register_uri( 'GET', # pylint: disable=protected-access self.yessssms.yesss._kontomanager, status_code=200, text="test..." + login + "</a>" ) mock.register_uri( 'POST', # pylint: disable=protected-access self.yessssms.yesss._websms_url, status_code=500 ) message = "Testing YesssSMS platform :)" with self.assertLogs("homeassistant.components.yessssms.notify", level='ERROR') as context: self.yessssms.send_message(message) self.assertTrue(mock.called) self.assertEqual(mock.call_count, 3) self.assertIn("YesssSMS: error sending SMS", context.output[0]) @requests_mock.Mocker() def test_connection_error(self, mock): """Test connection error.""" mock.register_uri( 'POST', # pylint: disable=protected-access self.yessssms.yesss._login_url, exc=ConnectionError ) message = "Testing YesssSMS platform :)" with self.assertLogs("homeassistant.components.yessssms.notify", level='ERROR') as context: self.yessssms.send_message(message) self.assertTrue(mock.called) self.assertEqual(mock.call_count, 1) self.assertIn("unable to connect", context.output[0])
apache-2.0
dmoon4117/mutagen
mutagen/aiff.py
6
10863
# -*- coding: utf-8 -*- # Copyright (C) 2014 Evan Purkhiser # 2014 Ben Ockmore # # This program is free software; you can redistribute it and/or modify # it under the terms of version 2 of the GNU General Public License as # published by the Free Software Foundation. """AIFF audio stream information and tags.""" # NOTE from Ben Ockmore - according to the Py3k migration guidelines, AIFF # chunk keys should be unicode in Py3k, and unicode or bytes in Py2k (ASCII). # To make this easier, chunk keys should be stored internally as unicode. import struct from struct import pack from ._compat import endswith, text_type, PY3 from mutagen import StreamInfo, FileType from mutagen.id3 import ID3 from mutagen.id3._util import ID3NoHeaderError, error as ID3Error from mutagen._util import insert_bytes, delete_bytes, MutagenError __all__ = ["AIFF", "Open", "delete"] class error(MutagenError, RuntimeError): pass class InvalidChunk(error, IOError): pass # based on stdlib's aifc _HUGE_VAL = 1.79769313486231e+308 def is_valid_chunk_id(id): if not isinstance(id, text_type): if PY3: raise TypeError("AIFF chunk must be unicode") try: id = id.decode('ascii') except UnicodeDecodeError: return False return ((len(id) <= 4) and (min(id) >= u' ') and (max(id) <= u'~')) def read_float(data): # 10 bytes expon, himant, lomant = struct.unpack('>hLL', data) sign = 1 if expon < 0: sign = -1 expon = expon + 0x8000 if expon == himant == lomant == 0: f = 0.0 elif expon == 0x7FFF: f = _HUGE_VAL else: expon = expon - 16383 f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63) return sign * f class IFFChunk(object): """Representation of a single IFF chunk""" # Chunk headers are 8 bytes long (4 for ID and 4 for the size) HEADER_SIZE = 8 def __init__(self, fileobj, parent_chunk=None): self.__fileobj = fileobj self.parent_chunk = parent_chunk self.offset = fileobj.tell() header = fileobj.read(self.HEADER_SIZE) if len(header) < self.HEADER_SIZE: raise InvalidChunk() self.id, self.data_size = struct.unpack('>4si', header) if not isinstance(self.id, text_type): self.id = self.id.decode('ascii') if not is_valid_chunk_id(self.id): raise InvalidChunk() self.size = self.HEADER_SIZE + self.data_size self.data_offset = fileobj.tell() self.data = None def read(self): """Read the chunks data""" self.__fileobj.seek(self.data_offset) self.data = self.__fileobj.read(self.data_size) def delete(self): """Removes the chunk from the file""" delete_bytes(self.__fileobj, self.size, self.offset) if self.parent_chunk is not None: self.parent_chunk.resize(self.parent_chunk.data_size - self.size) def resize(self, data_size): """Update the size of the chunk""" self.__fileobj.seek(self.offset + 4) self.__fileobj.write(pack('>I', data_size)) if self.parent_chunk is not None: size_diff = self.data_size - data_size self.parent_chunk.resize(self.parent_chunk.data_size - size_diff) self.data_size = data_size self.size = data_size + self.HEADER_SIZE class IFFFile(object): """Representation of a IFF file""" def __init__(self, fileobj): self.__fileobj = fileobj self.__chunks = {} # AIFF Files always start with the FORM chunk which contains a 4 byte # ID before the start of other chunks fileobj.seek(0) self.__chunks[u'FORM'] = IFFChunk(fileobj) # Skip past the 4 byte FORM id fileobj.seek(IFFChunk.HEADER_SIZE + 4) # Where the next chunk can be located. We need to keep track of this # since the size indicated in the FORM header may not match up with the # offset determined from the size of the last chunk in the file self.__next_offset = fileobj.tell() # Load all of the chunks while True: try: chunk = IFFChunk(fileobj, self[u'FORM']) except InvalidChunk: break self.__chunks[chunk.id.strip()] = chunk # Calculate the location of the next chunk, # considering the pad byte self.__next_offset = chunk.offset + chunk.size self.__next_offset += self.__next_offset % 2 fileobj.seek(self.__next_offset) def __contains__(self, id_): """Check if the IFF file contains a specific chunk""" if not isinstance(id_, text_type): id_ = id_.decode('ascii') if not is_valid_chunk_id(id_): raise KeyError("AIFF key must be four ASCII characters.") return id_ in self.__chunks def __getitem__(self, id_): """Get a chunk from the IFF file""" if not isinstance(id_, text_type): id_ = id_.decode('ascii') if not is_valid_chunk_id(id_): raise KeyError("AIFF key must be four ASCII characters.") try: return self.__chunks[id_] except KeyError: raise KeyError( "%r has no %r chunk" % (self.__fileobj.name, id_)) def __delitem__(self, id_): """Remove a chunk from the IFF file""" if not isinstance(id_, text_type): id_ = id_.decode('ascii') if not is_valid_chunk_id(id_): raise KeyError("AIFF key must be four ASCII characters.") self.__chunks.pop(id_).delete() def insert_chunk(self, id_): """Insert a new chunk at the end of the IFF file""" if not isinstance(id_, text_type): id_ = id_.decode('ascii') if not is_valid_chunk_id(id_): raise KeyError("AIFF key must be four ASCII characters.") self.__fileobj.seek(self.__next_offset) self.__fileobj.write(pack('>4si', id_.ljust(4).encode('ascii'), 0)) self.__fileobj.seek(self.__next_offset) chunk = IFFChunk(self.__fileobj, self[u'FORM']) self[u'FORM'].resize(self[u'FORM'].data_size + chunk.size) self.__chunks[id_] = chunk self.__next_offset = chunk.offset + chunk.size class AIFFInfo(StreamInfo): """AIFF audio stream information. Information is parsed from the COMM chunk of the AIFF file Useful attributes: * length -- audio length, in seconds * bitrate -- audio bitrate, in bits per second * channels -- The number of audio channels * sample_rate -- audio sample rate, in Hz * sample_size -- The audio sample size """ length = 0 bitrate = 0 channels = 0 sample_rate = 0 def __init__(self, fileobj): iff = IFFFile(fileobj) try: common_chunk = iff[u'COMM'] except KeyError as e: raise error(str(e)) common_chunk.read() info = struct.unpack('>hLh10s', common_chunk.data[:18]) channels, frame_count, sample_size, sample_rate = info self.sample_rate = int(read_float(sample_rate)) self.sample_size = sample_size self.channels = channels self.bitrate = channels * sample_size * self.sample_rate self.length = frame_count / float(self.sample_rate) def pprint(self): return "%d channel AIFF @ %d bps, %s Hz, %.2f seconds" % ( self.channels, self.bitrate, self.sample_rate, self.length) class _IFFID3(ID3): """A AIFF file with ID3v2 tags""" def _pre_load_header(self, fileobj): try: fileobj.seek(IFFFile(fileobj)[u'ID3'].data_offset) except (InvalidChunk, KeyError): raise ID3NoHeaderError("No ID3 chunk") def save(self, filename=None, v2_version=4, v23_sep='/'): """Save ID3v2 data to the AIFF file""" framedata = self._prepare_framedata(v2_version, v23_sep) framesize = len(framedata) if filename is None: filename = self.filename # Unlike the parent ID3.save method, we won't save to a blank file # since we would have to construct a empty AIFF file fileobj = open(filename, 'rb+') iff_file = IFFFile(fileobj) try: if u'ID3' not in iff_file: iff_file.insert_chunk(u'ID3') chunk = iff_file[u'ID3'] fileobj.seek(chunk.data_offset) header = fileobj.read(10) header = self._prepare_id3_header(header, framesize, v2_version) header, new_size, _ = header data = header + framedata + (b'\x00' * (new_size - framesize)) # Include ID3 header size in 'new_size' calculation new_size += 10 # Expand the chunk if necessary, including pad byte if new_size > chunk.size: insert_at = chunk.offset + chunk.size insert_size = new_size - chunk.size + new_size % 2 insert_bytes(fileobj, insert_size, insert_at) chunk.resize(new_size) fileobj.seek(chunk.data_offset) fileobj.write(data) finally: fileobj.close() def delete(self, filename=None): """Completely removes the ID3 chunk from the AIFF file""" if filename is None: filename = self.filename delete(filename) self.clear() def delete(filename): """Completely removes the ID3 chunk from the AIFF file""" with open(filename, "rb+") as file_: try: del IFFFile(file_)[u'ID3'] except KeyError: pass class AIFF(FileType): """An AIFF audio file. :ivar info: :class:`AIFFInfo` :ivar tags: :class:`ID3` """ _mimes = ["audio/aiff", "audio/x-aiff"] @staticmethod def score(filename, fileobj, header): filename = filename.lower() return (header.startswith(b"FORM") * 2 + endswith(filename, b".aif") + endswith(filename, b".aiff") + endswith(filename, b".aifc")) def add_tags(self): """Add an empty ID3 tag to the file.""" if self.tags is None: self.tags = _IFFID3() else: raise error("an ID3 tag already exists") def load(self, filename, **kwargs): """Load stream and tag information from a file.""" self.filename = filename try: self.tags = _IFFID3(filename, **kwargs) except ID3NoHeaderError: self.tags = None except ID3Error as e: raise error(e) try: fileobj = open(filename, "rb") self.info = AIFFInfo(fileobj) finally: fileobj.close() Open = AIFF
gpl-2.0
mikeolteanu/livepythonconsole-app-engine
boilerplate/external/apiclient/model.py
102
11708
#!/usr/bin/python2.4 # # Copyright (C) 2010 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Model objects for requests and responses. Each API may support one or more serializations, such as JSON, Atom, etc. The model classes are responsible for converting between the wire format and the Python object representation. """ __author__ = 'jcgregorio@google.com (Joe Gregorio)' import logging import urllib from apiclient import __version__ from errors import HttpError from oauth2client.anyjson import simplejson dump_request_response = False def _abstract(): raise NotImplementedError('You need to override this function') class Model(object): """Model base class. All Model classes should implement this interface. The Model serializes and de-serializes between a wire format such as JSON and a Python object representation. """ def request(self, headers, path_params, query_params, body_value): """Updates outgoing requests with a serialized body. Args: headers: dict, request headers path_params: dict, parameters that appear in the request path query_params: dict, parameters that appear in the query body_value: object, the request body as a Python object, which must be serializable. Returns: A tuple of (headers, path_params, query, body) headers: dict, request headers path_params: dict, parameters that appear in the request path query: string, query part of the request URI body: string, the body serialized in the desired wire format. """ _abstract() def response(self, resp, content): """Convert the response wire format into a Python object. Args: resp: httplib2.Response, the HTTP response headers and status content: string, the body of the HTTP response Returns: The body de-serialized as a Python object. Raises: apiclient.errors.HttpError if a non 2xx response is received. """ _abstract() class BaseModel(Model): """Base model class. Subclasses should provide implementations for the "serialize" and "deserialize" methods, as well as values for the following class attributes. Attributes: accept: The value to use for the HTTP Accept header. content_type: The value to use for the HTTP Content-type header. no_content_response: The value to return when deserializing a 204 "No Content" response. alt_param: The value to supply as the "alt" query parameter for requests. """ accept = None content_type = None no_content_response = None alt_param = None def _log_request(self, headers, path_params, query, body): """Logs debugging information about the request if requested.""" if dump_request_response: logging.info('--request-start--') logging.info('-headers-start-') for h, v in headers.iteritems(): logging.info('%s: %s', h, v) logging.info('-headers-end-') logging.info('-path-parameters-start-') for h, v in path_params.iteritems(): logging.info('%s: %s', h, v) logging.info('-path-parameters-end-') logging.info('body: %s', body) logging.info('query: %s', query) logging.info('--request-end--') def request(self, headers, path_params, query_params, body_value): """Updates outgoing requests with a serialized body. Args: headers: dict, request headers path_params: dict, parameters that appear in the request path query_params: dict, parameters that appear in the query body_value: object, the request body as a Python object, which must be serializable by simplejson. Returns: A tuple of (headers, path_params, query, body) headers: dict, request headers path_params: dict, parameters that appear in the request path query: string, query part of the request URI body: string, the body serialized as JSON """ query = self._build_query(query_params) headers['accept'] = self.accept headers['accept-encoding'] = 'gzip, deflate' if 'user-agent' in headers: headers['user-agent'] += ' ' else: headers['user-agent'] = '' headers['user-agent'] += 'google-api-python-client/%s (gzip)' % __version__ if body_value is not None: headers['content-type'] = self.content_type body_value = self.serialize(body_value) self._log_request(headers, path_params, query, body_value) return (headers, path_params, query, body_value) def _build_query(self, params): """Builds a query string. Args: params: dict, the query parameters Returns: The query parameters properly encoded into an HTTP URI query string. """ if self.alt_param is not None: params.update({'alt': self.alt_param}) astuples = [] for key, value in params.iteritems(): if type(value) == type([]): for x in value: x = x.encode('utf-8') astuples.append((key, x)) else: if getattr(value, 'encode', False) and callable(value.encode): value = value.encode('utf-8') astuples.append((key, value)) return '?' + urllib.urlencode(astuples) def _log_response(self, resp, content): """Logs debugging information about the response if requested.""" if dump_request_response: logging.info('--response-start--') for h, v in resp.iteritems(): logging.info('%s: %s', h, v) if content: logging.info(content) logging.info('--response-end--') def response(self, resp, content): """Convert the response wire format into a Python object. Args: resp: httplib2.Response, the HTTP response headers and status content: string, the body of the HTTP response Returns: The body de-serialized as a Python object. Raises: apiclient.errors.HttpError if a non 2xx response is received. """ self._log_response(resp, content) # Error handling is TBD, for example, do we retry # for some operation/error combinations? if resp.status < 300: if resp.status == 204: # A 204: No Content response should be treated differently # to all the other success states return self.no_content_response return self.deserialize(content) else: logging.debug('Content from bad request was: %s' % content) raise HttpError(resp, content) def serialize(self, body_value): """Perform the actual Python object serialization. Args: body_value: object, the request body as a Python object. Returns: string, the body in serialized form. """ _abstract() def deserialize(self, content): """Perform the actual deserialization from response string to Python object. Args: content: string, the body of the HTTP response Returns: The body de-serialized as a Python object. """ _abstract() class JsonModel(BaseModel): """Model class for JSON. Serializes and de-serializes between JSON and the Python object representation of HTTP request and response bodies. """ accept = 'application/json' content_type = 'application/json' alt_param = 'json' def __init__(self, data_wrapper=False): """Construct a JsonModel. Args: data_wrapper: boolean, wrap requests and responses in a data wrapper """ self._data_wrapper = data_wrapper def serialize(self, body_value): if (isinstance(body_value, dict) and 'data' not in body_value and self._data_wrapper): body_value = {'data': body_value} return simplejson.dumps(body_value) def deserialize(self, content): content = content.decode('utf-8') body = simplejson.loads(content) if self._data_wrapper and isinstance(body, dict) and 'data' in body: body = body['data'] return body @property def no_content_response(self): return {} class RawModel(JsonModel): """Model class for requests that don't return JSON. Serializes and de-serializes between JSON and the Python object representation of HTTP request, and returns the raw bytes of the response body. """ accept = '*/*' content_type = 'application/json' alt_param = None def deserialize(self, content): return content @property def no_content_response(self): return '' class MediaModel(JsonModel): """Model class for requests that return Media. Serializes and de-serializes between JSON and the Python object representation of HTTP request, and returns the raw bytes of the response body. """ accept = '*/*' content_type = 'application/json' alt_param = 'media' def deserialize(self, content): return content @property def no_content_response(self): return '' class ProtocolBufferModel(BaseModel): """Model class for protocol buffers. Serializes and de-serializes the binary protocol buffer sent in the HTTP request and response bodies. """ accept = 'application/x-protobuf' content_type = 'application/x-protobuf' alt_param = 'proto' def __init__(self, protocol_buffer): """Constructs a ProtocolBufferModel. The serialzed protocol buffer returned in an HTTP response will be de-serialized using the given protocol buffer class. Args: protocol_buffer: The protocol buffer class used to de-serialize a response from the API. """ self._protocol_buffer = protocol_buffer def serialize(self, body_value): return body_value.SerializeToString() def deserialize(self, content): return self._protocol_buffer.FromString(content) @property def no_content_response(self): return self._protocol_buffer() def makepatch(original, modified): """Create a patch object. Some methods support PATCH, an efficient way to send updates to a resource. This method allows the easy construction of patch bodies by looking at the differences between a resource before and after it was modified. Args: original: object, the original deserialized resource modified: object, the modified deserialized resource Returns: An object that contains only the changes from original to modified, in a form suitable to pass to a PATCH method. Example usage: item = service.activities().get(postid=postid, userid=userid).execute() original = copy.deepcopy(item) item['object']['content'] = 'This is updated.' service.activities.patch(postid=postid, userid=userid, body=makepatch(original, item)).execute() """ patch = {} for key, original_value in original.iteritems(): modified_value = modified.get(key, None) if modified_value is None: # Use None to signal that the element is deleted patch[key] = None elif original_value != modified_value: if type(original_value) == type({}): # Recursively descend objects patch[key] = makepatch(original_value, modified_value) else: # In the case of simple types or arrays we just replace patch[key] = modified_value else: # Don't add anything to patch if there's no change pass for key in modified: if key not in original: patch[key] = modified[key] return patch
lgpl-3.0
ty707/airflow
airflow/operators/postgres_operator.py
18
1910
# -*- coding: utf-8 -*- # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging from airflow.hooks.postgres_hook import PostgresHook from airflow.models import BaseOperator from airflow.utils.decorators import apply_defaults class PostgresOperator(BaseOperator): """ Executes sql code in a specific Postgres database :param postgres_conn_id: reference to a specific postgres database :type postgres_conn_id: string :param sql: the sql code to be executed :type sql: Can receive a str representing a sql statement, a list of str (sql statements), or reference to a template file. Template reference are recognized by str ending in '.sql' """ template_fields = ('sql',) template_ext = ('.sql',) ui_color = '#ededed' @apply_defaults def __init__( self, sql, postgres_conn_id='postgres_default', autocommit=False, parameters=None, *args, **kwargs): super(PostgresOperator, self).__init__(*args, **kwargs) self.sql = sql self.postgres_conn_id = postgres_conn_id self.autocommit = autocommit self.parameters = parameters def execute(self, context): logging.info('Executing: ' + str(self.sql)) self.hook = PostgresHook(postgres_conn_id=self.postgres_conn_id) self.hook.run(self.sql, self.autocommit, parameters=self.parameters)
apache-2.0
tzabian/fuego-pootle
external_apps/djblets/util/testing.py
7
1900
# # djblets/util/testing.py - Some classes useful for unit testing django-based # applications # # Copyright (c) 2007 David Trowbridge # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY # CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, # TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE # SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # from django.template import Node from django.test import TestCase class StubNodeList(Node): def __init__(self, default_text): self.default_text = default_text def render(self, context): return self.default_text class StubParser: def __init__(self, default_text): self.default_text = default_text def parse(self, until): return StubNodeList(self.default_text) def delete_first_token(self): pass class TagTest(TestCase): """Base testing setup for custom template tags""" def setUp(self): self.parser = StubParser(self.getContentText()) def getContentText(self): return "content"
gpl-2.0
ANNarchy/ANNarchy
ANNarchy/extensions/convolution/Convolve.py
1
40983
# ============================================================================= # # Convolution.py # # This file is part of ANNarchy. # # Copyright (C) 2019 Julien Vitay <julien.vitay@gmail.com>, # Helge Uelo Dinkelbach <helge.dinkelbach@gmail.com> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # ANNarchy is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # ============================================================================= from __future__ import print_function import numpy as np from ANNarchy.core import Global from ANNarchy.core.Projection import Projection from ANNarchy.generator.Utils import tabify from .Utils import SharedSynapse # Indices used for each dimension indices = ['i', 'j', 'k', 'l', 'm', 'n'] class Convolution(Projection): """ Performs a convolution of a weight kernel on the pre-synaptic population. Despite its name, the operation performed is actually a cross-correlation, as is usual in computer vision and convolutional neural networks: $$g(x) = \sum_{k=-n}^n h(k) \, f(x + k)$$ The convolution operation benefits from giving a multi-dimensional geometry to the populations and filters, for example in 2D: ```python inp = Population(geometry=(100, 100), neuron=Neuron(parameters="r = 0.0")) pop = Population(geometry=(100, 100), neuron=Neuron(equations="r = sum(exc)")) proj = Convolution(inp, pop, 'exc') proj.connect_filter( [ [-1., 0., 1.], [-1., 0., 1.], [-1., 0., 1.] ]) ``` The maximum number of dimensions for populations and filters is 4, an error is thrown otherwise. Depending on the number of dimensions of the pre- and post-synaptic populations, as well as of the kernel, the convolution is implemented differentely. **Method connect_filter()** * If the pre- and post-populations have the same dimension as the kernel, the convolution is regular. Example: (100, 100) * (3, 3) -> (100, 100) * If the post-population has one dimension less than the pre-synaptic one, the last dimension of the kernel must match the last one of the pre-synaptic population. Example: (100, 100, 3) * (3, 3, 3) -> (100, 100) * If the kernel has less dimensions than the two populations, the number of neurons in the last dimension of the populations must be the same. The convolution will be calculated for each feature map in the last dimension. In this case, you must set ``keep_last_dimension`` to ``True``. Example: (100, 100, 16) * (3, 3) -> (100, 100, 16) **Method connect_filters()** * If the kernel has more dimensions than the pre-synaptic population, this means a bank of different filters will be applied on the pre-synaptic population (like a convolutional layer in a CNN). Attention: the first index of ``weights`` corresponds to the different filters, while the result will be accessible in the last dimension of the post-synaptic population. You must set the ``multiple`` argument to True. Example: (100, 100) * (16, 3, 3) -> (100, 100, 16) The convolution **always** uses padding for elements that would be outside the array (no equivalent of ``valid`` in tensorflow). It is 0.0 by default, but can be changed using the ``padding`` argument. Setting ``padding`` to the string ``border`` will repeat the value of the border elements. Sub-sampling will be automatically performed according to the populations' geometry. If these geometries do not match, an error will be thrown. Example: (100, 100) * (3, 3) -> (50, 50) You can redefine the sub-sampling by providing a list ``subsampling`` as argument, defining for each post-synaptic neuron the coordinates of the pre-synaptic neuron which will be the center of the filter/kernel. """ def __init__(self, pre, post, target, psp="pre.r * w", operation="sum", name=None, copied=False): """ :param pre: pre-synaptic population (either its name or a ``Population`` object). :param post: post-synaptic population (either its name or a ``Population`` object). :param target: type of the connection :param psp: continuous influence of a single synapse on the post-synaptic neuron (default for rate-coded: ``w*pre.r``). :param operation: operation (sum, max, min, mean) performed by the kernel (default: sum). """ # Create the description, but it will not be used for generation Projection.__init__( self, pre, post, target, synapse=SharedSynapse(psp=psp, operation=operation, name="Convolution operation", description="Convoluted kernel over the pre-synaptic population."), name=name, copied=copied ) # Disable saving self._saveable = False def connect_filter(self, weights, delays=0.0, keep_last_dimension=False, padding=0.0, subsampling=None): """ Applies a single filter on the pre-synaptic population. :param weights: numpy array or list of lists representing the matrix of weights for the filter. :param delays: delay in synaptic transmission (default: dt). Can only be the same value for all neurons. :param keep_last_dimension: defines if the last dimension of the pre- and post-synaptic will be convolved in parallel. The weights matrix must have one dimension less than the pre-synaptic population, and the number of neurons in the last dimension of the pre- and post-synaptic populations must match. Default: False. :param padding: value to be used for the rates outside the pre-synaptic population. If it is a floating value, the pre-synaptic population is virtually extended with this value above its boundaries. If it is equal to 'border', the values on the boundaries are repeated. Default: 0.0. :param subsampling: list for each post-synaptic neuron of coordinates in the pre-synaptic population defining the center of the kernel/filter. Default: None. """ # Process the weights self.weights = np.array(weights) # Process the delays self.delays = float(delays) if not isinstance(delays, (int, float)): Global._error('Convolutions can only have constant delays.') self.subsampling = subsampling self.keep_last_dimension = keep_last_dimension self.padding = padding self.multiple = False # Check dimensions of populations and weight matrix self.dim_kernel = self.weights.ndim self.dim_pre = self.pre.dimension self.dim_post = self.post.dimension if self.dim_post > 4: print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post) Global._error('Convolution: Too many dimensions for the post-synaptic population (maximum 4).') if self.dim_pre > 4: print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post) Global._error('Convolution: Too many dimensions for the pre-synaptic population (maximum 4).') if self.dim_kernel > 5 or (not self.multiple and self.dim_kernel > 4): print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post) Global._error('Convolution: Too many dimensions for the kernel (maximum 4).') # Check if the last axes match for parallel convolution (e.g. 3-2-3) if self.dim_kernel < self.dim_pre: if not self.keep_last_dimension: print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post) Global._error('Convolution: If the kernel has less dimensions than the pre-synaptic population, you need to set the flag keep_last_dimension to True.') if self.pre.geometry[-1] != self.post.geometry[-1]: print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post) Global._error('Convolution: If the kernel has fewer dimensions than the two populations (keep_last_dimension=True), these must have the same number of neurons in the last dimension.') # If the last dim of the kernel matches the last dim of the pre-pop, the last pop can have one dimension less. if self.dim_post < self.dim_pre: # OK, but check the last dimension of the kernel has the same size as the post-population if self.weights.shape[-1] != self.pre.geometry[-1]: print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post) Global._error('Convolution: If the post-synaptic population has less dimensions than the pre-synaptic one, the last dimension of the filter must be equal to the last of the pre-synaptic population.') # Check if it is a bank of filters if self.dim_kernel > self.dim_pre: print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post) Global._error('Convolution: If the kernel has more dimensions than the pre-synaptic population, you need to use the connect_filters() method.') # Generate the pre-synaptic coordinates self._generate_pre_coordinates() # Finish building the synapses self._create() return self def connect_filters(self, weights, delays=0.0, keep_last_dimension=False, padding=0.0, subsampling=None): """ Applies a set of different filters on the pre-synaptic population. The weights matrix must have one dimension more than the pre-synaptic populations, and the number of neurons in the last dimension of the post-synaptic population must be equal to the number of filters. :param weights: numpy array or list of lists representing the matrix of weights for the filter. :param delays: delay in synaptic transmission (default: dt). Can only be the same value for all neurons. :param keep_last_dimension: defines if the last dimension of the pre- and post-synaptic will be convolved in parallel. The weights matrix must have one dimension less than the pre-synaptic population, and the number of neurons in the last dimension of the pre- and post-synaptic populations must match. Default: False. :param padding: value to be used for the rates outside the pre-synaptic population. If it is a floating value, the pre-synaptic population is virtually extended with this value above its boundaries. If it is equal to 'border', the values on the boundaries are repeated. Default: 0.0. :param subsampling: list for each post-synaptic neuron of coordinates in the pre-synaptic population defining the center of the kernel/filter. Default: None. """ # Process the weights self.weights = np.array(weights) # Process the delays self.delays = float(delays) if not isinstance(delays, (int, float)): Global._error('Convolutions can only have constant delays.') self.subsampling = subsampling self.keep_last_dimension = keep_last_dimension self.padding = padding self.multiple = True # Check dimensions of populations and weight matrix self.dim_kernel = self.weights.ndim self.dim_pre = self.pre.dimension self.dim_post = self.post.dimension if self.dim_post > 4: print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post) Global._error('Convolution: Too many dimensions for the post-synaptic population (maximum 4).') if self.dim_pre > 4: print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post) Global._error('Convolution: Too many dimensions for the pre-synaptic population (maximum 4).') if self.dim_kernel > 5 or (not self.multiple and self.dim_kernel > 4): print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post) Global._error('Convolution: Too many dimensions for the kernel (maximum 4).') # Check if the last axes match for parallel convolution (e.g. 3-2-3) if self.dim_kernel < self.dim_pre: if not self.keep_last_dimension: print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post) Global._error('Convolution: If the kernel has less dimensions than the pre-synaptic population, you need to set the flag keep_last_dimension to True.') if self.pre.geometry[-1] != self.post.geometry[-1]: print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post) Global._error('Convolution: If the kernel has fewer dimensions than the two populations (keep_last_dimension=True), these must have the same number of neurons in the last dimension.') # If the last dim of the kernel matches the last dim of the pre-pop, the last pop can have one dimension less. if self.dim_post < self.dim_pre: # OK, but check the last dimension of the kernel has the same size as the post-population if self.weights.shape[-1] != self.pre.geometry[-1]: print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post) Global._error('Convolution: If the post-synaptic population has less dimensions than the pre-synaptic one, the last dimension of the filter must be equal to the last of the pre-synaptic population.') # The last dimension of the post population must correspond to the number of filters if self.weights.shape[0] != self.post.geometry[-1]: print("Convolution:", self.dim_pre, '*', self.dim_kernel, '->', self.dim_post) Global._error('Convolution: For multiple filters, the last dimension of the post-synaptic population must have as many neurons as there are filters.') # Generate the pre-synaptic coordinates self._generate_pre_coordinates_bank() # Finish building the synapses self._create() return self def _copy(self, pre, post): "Returns a copy of the projection when creating networks. Internal use only." raise NotImplementedError def _create(self): # create fake LIL object, just for compilation. try: from ANNarchy.core.cython_ext.Connector import LILConnectivity except Exception as e: Global._print(e) Global._error('ANNarchy was not successfully installed.') lil = LILConnectivity() lil.max_delay = self.delays lil.uniform_delay = self.delays self.connector_name = "Convolution" self.connector_description = "Convolution" self._store_connectivity(self._load_from_lil, (lil, ), self.delays) ################################ ### Create connection pattern ################################ def _connect(self, module): """ Builds up dendrites either from list or dictionary. Called by instantiate(). """ if not self._connection_method: Global._error('Convolution: The projection between ' + self.pre.name + ' and ' + self.post.name + ' is declared but not connected.') # Create the Cython instance proj = getattr(module, 'proj'+str(self.id)+'_wrapper') self.cyInstance = proj(self.weights, self.pre_coordinates) # Define the list of postsynaptic neurons self.post_ranks = list(range(self.post.size)) # Set delays after instantiation if self.delays > 0.0: self.cyInstance.set_delay(self.delays/Global.config['dt']) def _generate_pre_coordinates(self): " Returns a list for each post neuron of the corresponding center coordinates." # Check if the list is already defined: if self.subsampling: try: shape = np.array(self.subsampling).shape except: Global._error('Convolution: The sub-sampling list must have', self.post.size, 'elements of size', self.pre.dimension) return if shape != (self.post.size, self.pre.dimension): Global._error('Convolution: The sub-sampling list must have', self.post.size, 'elements of size', self.pre.dimension) return self.pre_coordinates = self.subsampling return # Otherwise create it, possibly with sub-sampling coords = [[] for i in range(self.post.size)] # Compute pre-indices idx_range= [] for dim in range(self.dim_pre): if dim < self.dim_post: pre_size = int(self.pre.geometry[dim]) post_size = int(self.post.geometry[dim]) sample = int(pre_size/post_size) if post_size * sample != pre_size: Global._error('Convolution: The pre-synaptic dimensions must be a multiple of the post-synaptic ones for down-sampling to work.') idx_range.append([int((sample-1)/2) + sample * i for i in range(post_size)]) else: # extra dimension if self.keep_last_dimension: idx_range.append(range(self.post.geometry[dim])) else: idx_range.append([self._center_filter(self.weights.shape[dim])]) # Generates coordinates TODO: Find a more robust way! if self.dim_pre == 1 : rk = 0 for i in idx_range[0]: coords[rk] = [i] rk += 1 elif self.dim_pre == 2 : rk = 0 for i in idx_range[0]: for j in idx_range[1]: coords[rk] = [i, j] rk += 1 elif self.dim_pre == 3 : rk = 0 for i in idx_range[0]: for j in idx_range[1]: for k in idx_range[2]: coords[rk] = [i, j, k] rk += 1 elif self.dim_pre == 4 : rk = 0 for i in idx_range[0]: for j in idx_range[1]: for k in idx_range[2]: for l in idx_range[3]: coords[rk] = [i, j, k, l] rk += 1 # Save the result self.pre_coordinates = coords def _generate_pre_coordinates_bank(self): " Returns a list for each post neuron of the corresponding center coordinates, when the filter is a bank." self.nb_filters = self.weights.shape[0] self.dim_single_filter = self.weights.shape[1:] # Check if the list is already defined: if self.subsampling: try: shape = np.array(self.subsampling).shape except: Global._error('Convolution: The sub-sampling list must have', self.post.size / self.post.geometry[-1], 'elements of size', self.pre.dimension) return if shape != (self.post.size/ self.post.geometry[-1], self.pre.dimension): Global._error('Convolution: The sub-sampling list must have', self.post.size/ self.post.geometry[-1], 'elements of size', self.pre.dimension) return self.pre_coordinates = [c + [d] for c in self.subsampling for d in range(self.nb_filters)] return # Otherwise create it, possibly with sub-sampling coords = [[] for i in range(self.post.size)] # Compute pre-indices idx_range= [] for dim in range(self.dim_pre): if dim < self.dim_post -1: pre_size = self.pre.geometry[dim] post_size = self.post.geometry[dim] sample = int(pre_size/post_size) if post_size * sample != pre_size: Global._error('Convolution: The pre-synaptic dimensions must be a multiple of the post-synaptic ones for down-sampling to work.') idx_range.append([int((sample-1)/2) + sample * i for i in range(post_size)]) else: # extra dimension if self.keep_last_dimension: idx_range.append(range(self.post.geometry[dim])) else: idx_range.append([self._center_filter(self.weights.shape[dim+1])]) # Generates coordinates TODO: Find a more robust way! if self.dim_pre == 1 : rk = 0 for i in idx_range[0]: for d in range(self.nb_filters): coords[rk] = [i, d] rk += 1 elif self.dim_pre == 2 : rk = 0 for i in idx_range[0]: for j in idx_range[1]: for d in range(self.nb_filters): coords[rk] = [i, j, d ] rk += 1 elif self.dim_pre == 3 : rk = 0 for i in idx_range[0]: for j in idx_range[1]: for k in idx_range[2]: for d in range(self.nb_filters): coords[rk] = [i, j, k, d] rk += 1 elif self.dim_pre == 4 : rk = 0 for i in idx_range[0]: for j in idx_range[1]: for k in idx_range[2]: for l in idx_range[3]: for d in range(self.nb_filters): coords[rk] = [i, j, k, l, d] rk += 1 # Save the result self.pre_coordinates = coords ################################ # Code generation ################################ def _generate(self): """ Overrides default code generation. This function is called during the code generation procedure. """ # Filter definition filter_definition, filter_pyx_definition = self._filter_definition() # Convolve_code if not self.multiple: convolve_code, sum_code = self._generate_convolve_code() else: convolve_code, sum_code = self._generate_bank_code() if Global._check_paradigm("openmp"): self._generate_omp(filter_definition, filter_pyx_definition, convolve_code, sum_code) elif Global._check_paradigm("cuda"): raise NotImplementedError else: raise NotImplementedError def _generate_omp(self, filter_definition, filter_pyx_definition, convolve_code, sum_code, kernel=True): """ OpenMP code generation. """ # Specific template for generation self._specific_template = { # Declare the connectivity matrix 'declare_connectivity_matrix': """ std::vector<int> post_rank; std::vector< std::vector<int> > pre_rank; """ + filter_definition.strip(), # Accessors for the connectivity matrix 'access_connectivity_matrix': """ // Accessor to connectivity data std::vector<int> get_post_rank() { return post_rank; } void set_post_rank(std::vector<int> ranks) { post_rank = ranks; } std::vector< std::vector<int> > get_pre_rank() { return pre_rank; } void set_pre_rank(std::vector< std::vector<int> > ranks) { pre_rank = ranks; } int nb_synapses(int n) { return pre_rank[n].size(); } """ , # Export the connectivity matrix 'export_connectivity': """ # Connectivity vector[int] get_post_rank() vector[vector[int]] get_pre_rank() void set_post_rank(vector[int]) void set_pre_rank(vector[vector[int]]) """, # Arguments to the wrapper constructor 'wrapper_args': "weights, coords", # Initialize the wrapper connectivity matrix 'wrapper_init_connectivity': """ proj%(id_proj)s.set_post_rank(list(range(%(size_post)s))) proj%(id_proj)s.set_pre_rank(coords) """ % {'id_proj': self.id, 'size_post': self.post.size}, # Delays 'wrapper_init_delay': "", # Wrapper access to connectivity matrix 'wrapper_access_connectivity': """ # Connectivity def post_rank(self): return proj%(id_proj)s.get_post_rank() def pre_rank(self, int n): return proj%(id_proj)s.get_pre_rank() """ % {'id_proj': self.id}, # Wrapper access to variables 'wrapper_access_parameters_variables' : "", # Variables for the psp code 'psp_prefix': """ int rk_pre; %(float_prec)s sum=0.0;""" % {'float_prec': Global.config['precision']} } # Kernel-based method: specify w with the correct dimension if kernel: self._specific_template['access_connectivity_matrix'] += """ // Local parameter w %(type_w)s get_w() { return w; } void set_w(%(type_w)s value) { w = value; } """ % {'type_w': filter_definition.replace(' w;', '')} self._specific_template['export_connectivity'] += """ # Local variable w %(type_w)s get_w() void set_w(%(type_w)s) """ % {'type_w': filter_pyx_definition.replace(' w', '')} self._specific_template['wrapper_init_connectivity'] += """ proj%(id_proj)s.set_w(weights) """ % {'id_proj': self.id} self._specific_template['wrapper_access_connectivity'] += """ # Local variable w def get_w(self): return proj%(id_proj)s.get_w() def set_w(self, value): proj%(id_proj)s.set_w( value ) def get_dendrite_w(self, int rank): return proj%(id_proj)s.get_w() def set_dendrite_w(self, int rank, value): proj%(id_proj)s.set_w(value) def get_synapse_w(self, int rank_post, int rank_pre): return 0.0 def set_synapse_w(self, int rank_post, int rank_pre, %(float_prec)s value): pass """ % {'id_proj': self.id, 'float_prec': Global.config['precision']} # Override the monitor to avoid recording the weights self._specific_template['monitor_class'] = "" self._specific_template['monitor_export'] = "" self._specific_template['monitor_wrapper'] = "" # OMP code omp_code = "" if Global.config['num_threads'] > 1: omp_code = """ #pragma omp parallel for private(sum, rk_pre, coord) %(psp_schedule)s""" % {'psp_schedule': "" if not 'psp_schedule' in self._omp_config.keys() else self._omp_config['psp_schedule']} # HD ( 16.10.2015 ): # pre-load delayed firing rate in a local array, so we # prevent multiple accesses to pop%(id_pre)s._delayed_r[delay-1] # wheareas delay is set available as variable # TODO HD: wouldn't it be much better to reduce delay globaly, instead of the substraction here??? if self.delays > Global.config['dt']: pre_load_r = """ // pre-load delayed firing rate auto delayed_r = pop%(id_pre)s._delayed_r[delay-1]; """% {'id_pre': self.pre.id} else: pre_load_r = "" # Compute sum wsum = """ if ( _transmission && pop%(id_pre)s._active ) { std::vector<int> coord; """ + pre_load_r + """ %(omp_code)s for(int i = 0; i < %(size_post)s; i++){ coord = pre_rank[i]; """ + convolve_code + """ pop%(id_post)s._sum_%(target)s[i] += """ + sum_code + """; } // for } // if """ self._specific_template['psp_code'] = wsum % \ { 'id_proj': self.id, 'target': self.target, 'id_pre': self.pre.id, 'name_pre': self.pre.name, 'size_pre': self.pre.size, 'id_post': self.post.id, 'name_post': self.post.name, 'size_post': self.post.size, 'omp_code': omp_code, 'convolve_code': convolve_code } self._specific_template['size_in_bytes'] = "//TODO:\n" ################################ ### Utilities ################################ def _center_filter(self, i): return int(i/2) if i%2==1 else int(i/2)-1 def _filter_definition(self): dim = self.dim_kernel cpp = Global.config['precision'] pyx = Global.config['precision'] for d in range(dim): cpp = 'std::vector< ' + cpp + ' >' pyx = 'vector[' + pyx + ']' cpp += ' w;' pyx += ' w' return cpp, pyx def _coordinates_to_rank(self, name, geometry): dim = len(geometry) txt = "" for d in range(dim): if txt == "" : # first coordinate is special txt = indices[0] + "_" + name else: txt = str(geometry[d]) + '*(' + txt + ') + ' + indices[d] + '_' + name return txt def _generate_convolve_code(self): # Operation to be performed: sum, max, min, mean operation = self.synapse_type.operation # Main code code = tabify("sum = 0.0;\n", 3) # Generate for loops for dim in range(self.dim_kernel): if dim == self.dim_kernel-1: inner_idx = "" for i in range(self.dim_kernel-1): inner_idx += "["+indices[i]+"_w]" code += "auto inner_line = w"+inner_idx+".data();\n" code += tabify(""" for(int %(index)s_w = 0; %(index)s_w < %(size)s;%(index)s_w++) { """ % { 'index': indices[dim], 'size': self.weights.shape[dim]}, dim) # Compute indices if dim < self.dim_kernel: code += tabify( """int %(index)s_pre = coord[%(dim)s] %(operator)s (%(index)s_w - %(center)s);""" % { 'id_proj': self.id, 'index': indices[dim], 'dim': dim, 'operator': '+' , 'center': self._center_filter(self.weights.shape[dim]) }, 1) else: code += tabify( """int %(index)s_pre = coord[%(dim)s];""" % { 'id_proj': self.id, 'index': indices[dim], 'dim': dim }, 1) # Check indices if operation in ['sum', 'mean']: if isinstance(self.padding, str): # 'border' code += tabify(""" if (%(index)s_pre < 0) %(index)s_pre = 0 ; if (%(index)s_pre > %(max_size)s) %(index)s_pre = %(max_size)s ; """ % { 'index': indices[dim], 'dim': dim, 'max_size': self.pre.geometry[dim] -1}, dim) else: code += tabify(""" if ((%(index)s_pre < 0) || (%(index)s_pre > %(max_size)s)){ sum += %(padding)s; continue; } """ % { 'index': indices[dim], 'padding': self.padding, 'max_size': self.pre.geometry[dim] -1}, dim) else: # min, max code += """ if ((%(index)s_pre < 0) || (%(index)s_pre > %(max_size)s)) { continue; } """ % { 'index': indices[dim], 'max_size': self.pre.geometry[dim] -1} # if True, we need to take the last dimension from coords if self.keep_last_dimension: id_dict = { 'index': indices[self.dim_kernel], 'dim': self.dim_kernel } code += "int %(index)s_pre = coord[%(dim)s];" % id_dict # Compute pre-synaptic rank code += tabify(""" rk_pre = %(value)s;""" % {'value': self._coordinates_to_rank('pre', self.pre.geometry)}, dim) # Compute the increment index = "" for dim in range(self.dim_kernel): index += '[' + indices[dim] + '_w]' increment = self.synapse_type.description['psp']['cpp'] % { 'id_pre': self.pre.id, 'id_post': self.post.id, 'local_index': index, 'global_index': '[i]', 'pre_index': '[rk_pre]', 'post_index': '[rk_post]', 'pre_prefix': 'pop'+str(self.pre.id)+'.', 'post_prefix': 'pop'+str(self.post.id)+'.' } # Delays if self.delays > Global.config['dt']: increment = increment.replace( 'pop%(id_pre)s.r[rk_pre]' % {'id_pre': self.pre.id}, 'delayed_r[rk_pre]' ) # Apply the operation if operation == "sum": if self.dim_kernel == 1: code += tabify(""" sum += %(increment)s""" % {'increment': increment}, dim) else: code += tabify(""" sum += %(increment)s""" % {'increment': increment.replace('w'+inner_idx, 'inner_line')}, dim) elif operation == "max": code += tabify(""" %(float_prec)s _psp = %(increment)s if(_psp > sum) sum = _psp;""" % {'increment': increment, 'float_prec': Global.config['precision']}, dim) elif operation == "min": code += tabify(""" %(float_prec)s _psp = %(increment)s if(_psp < sum) sum = _psp;""" % {'increment': increment, 'float_prec': Global.config['precision']}, dim) elif operation == "mean": code += tabify(""" sum += %(increment)s""" % {'increment': increment}, dim) else: Global._error('Convolution: Operation', operation, 'is not implemented yet for shared projections.') # Close for loops for dim in range(self.dim_kernel): code += tabify(""" }""", self.dim_kernel-1-dim) impl_code = code % {'id_proj': self.id, 'target': self.target, 'id_pre': self.pre.id, 'name_pre': self.pre.name, 'size_pre': self.pre.size, 'id_post': self.post.id, 'name_post': self.post.name, 'size_post': self.post.size } # sum code self.weights.size if operation == "mean": sum_code = """sum/%(filter_size)s""" % {'filter_size': self.weights.size} else: sum_code = "sum" return impl_code, sum_code def _generate_bank_code(self): # Operation to be performed: sum, max, min, mean operation = self.synapse_type.operation # Main code code = tabify("sum = 0.0;\n", 3) # Generate for loops for dim in range(self.dim_kernel-1): code += tabify(""" for(int %(index)s_w = 0; %(index)s_w < %(size)s;%(index)s_w++) { """ % { 'index': indices[dim], 'size': self.weights.shape[dim+1]}, dim) # Compute indices if dim < self.dim_kernel: code += tabify( """int %(index)s_pre = coord[%(dim)s] %(operator)s (%(index)s_w - %(center)s);""" % { 'id_proj': self.id, 'index': indices[dim], 'dim': dim, 'operator': '+', 'center': self._center_filter(self.weights.shape[dim+1]) }, 1) else: code += tabify( """int %(index)s_pre = coord[%(dim)s];""" % { 'id_proj': self.id, 'index': indices[dim], 'dim': dim }, 1) # Check indices if operation in ['sum', 'mean']: if isinstance(self.padding, str): # 'border' code += tabify(""" if (%(index)s_pre < 0) %(index)s_pre = 0 ; if (%(index)s_pre > %(max_size)s) %(index)s_pre = %(max_size)s ; """ % { 'index': indices[dim], 'dim': dim, 'max_size': self.pre.geometry[dim] -1}, 1+dim) else: code += tabify(""" if ((%(index)s_pre < 0) || (%(index)s_pre > %(max_size)s)) { sum += %(padding)s; continue; } """ % { 'index': indices[dim], 'padding': self.padding, 'max_size': self.pre.geometry[dim] -1}, 1+dim) else: # min, max code += tabify(""" if ((%(index)s_pre < 0) || (%(index)s_pre > %(max_size)s)){ continue; } """ % { 'index': indices[dim], 'max_size': self.pre.geometry[dim] -1}, 1+dim) # Compute pre-synaptic rank code +=tabify(""" rk_pre = %(value)s;""" % {'value': self._coordinates_to_rank('pre', self.pre.geometry)}, 1+dim) # Compute the increment index = "[coord["+str(self.dim_pre)+"]]" for dim in range(self.dim_kernel-1): index += '[' + indices[dim] + '_w]' increment = self.synapse_type.description['psp']['cpp'] % { 'id_pre': self.pre.id, 'id_post': self.post.id, 'local_index': index, 'global_index': '[i]', 'pre_index': '[rk_pre]', 'post_index': '[rk_post]', 'pre_prefix': 'pop'+str(self.pre.id)+'.', 'post_prefix': 'pop'+str(self.post.id)+'.'} # Delays if self.delays > Global.config['dt']: increment = increment.replace( 'pop%(id_pre)s.r[rk_pre]' % {'id_pre': self.pre.id}, 'delayed_r[rk_pre]' ) # Apply the operation if operation == "sum": code += tabify(""" sum += %(increment)s""" % {'increment': increment}, 1+dim) elif operation == "max": code += tabify(""" %(float_prec)s _psp = %(increment)s if(_psp > sum) sum = _psp;""" % {'increment': increment, 'float_prec': Global.config['precision']}, 1+dim) elif operation == "min": code += tabify(""" %(float_prec)s _psp = %(increment)s if(_psp < sum) sum = _psp;""" % {'increment': increment, 'float_prec': Global.config['precision']}, 1+dim) elif operation == "mean": code += tabify(""" sum += %(increment)s""" % {'increment': increment}, 1+dim) else: Global._error('SharedProjection: Operation', operation, 'is not implemented yet for shared projections.') # Close for loops for dim in range(self.dim_kernel-1): code += tabify(""" }""", self.dim_kernel-1-dim) impl_code = code % {'id_proj': self.id, 'target': self.target, 'id_pre': self.pre.id, 'name_pre': self.pre.name, 'size_pre': self.pre.size, 'id_post': self.post.id, 'name_post': self.post.name, 'size_post': self.post.size } # sum code if operation == "mean": sum_code = """sum/%(filter_size)s""" % {'filter_size': self.weights.size} else: sum_code = "sum" return impl_code, sum_code ############################## ## Override useless methods ############################## def _data(self): "Disable saving." desc = {} desc['post_ranks'] = self.post_ranks desc['attributes'] = self.attributes desc['parameters'] = self.parameters desc['variables'] = self.variables desc['dendrites'] = [] desc['number_of_synapses'] = 0 return desc def save_connectivity(self, filename): "Not available." Global._warning('Convolutional projections can not be saved.') def save(self, filename): "Not available." Global._warning('Convolutional projections can not be saved.') def load(self, filename): "Not available." Global._warning('Convolutional projections can not be loaded.') def receptive_fields(self, variable = 'w', in_post_geometry = True): "Not available." Global._warning('Convolutional projections can not display receptive fields.') def connectivity_matrix(self, fill=0.0): "Not available." Global._warning('Convolutional projections can not display connectivity matrices.')
gpl-2.0
ducngtuan/my-python3-koans-solution
python2/koans/about_sets.py
1
1706
#!/usr/bin/env python # -*- coding: utf-8 -*- from runner.koan import * class AboutSets(Koan): def test_sets_make_keep_lists_unique(self): highlanders = ['MacLeod', 'Ramirez', 'MacLeod', 'Matunas', 'MacLeod', 'Malcolm', 'MacLeod'] there_can_only_be_only_one = set(highlanders) self.assertEqual(set(['Malcolm', 'Matunas', 'MacLeod', 'Ramirez']), there_can_only_be_only_one) def test_sets_are_unordered(self): self.assertEqual(set(['1', '3', '2', '5', '4']), set('12345')) def test_convert_the_set_into_a_list_to_sort_it(self): self.assertEqual(['1', '2', '3', '4', '5'], sorted(set('13245'))) # ------------------------------------------------------------------ def test_set_have_arithmetic_operators(self): scotsmen = set(['MacLeod', 'Wallace', 'Willie']) warriors = set(['MacLeod', 'Wallace', 'Leonidas']) self.assertEqual(set(['Willie']), scotsmen - warriors) self.assertEqual(set(['Willie', 'MacLeod', 'Wallace', 'Leonidas']), scotsmen | warriors) self.assertEqual(set(['MacLeod', 'Wallace']), scotsmen & warriors) self.assertEqual(set(['Willie', 'Leonidas']), scotsmen ^ warriors) # ------------------------------------------------------------------ def test_we_can_query_set_membership(self): self.assertEqual(True, 127 in set([127, 0, 0, 1])) self.assertEqual(True, 'cow' not in set('apocalypse now')) def test_we_can_compare_subsets(self): self.assertEqual(True, set('cake') <= set('cherry cake')) self.assertEqual(True, set('cake').issubset(set('cherry cake'))) self.assertEqual(False, set('cake') > set('pie'))
mit
EventGhost/EventGhost
eg/Classes/Translation.py
2
30507
# -*- coding: utf-8 -*- # # This file is part of EventGhost. # Copyright © 2005-2020 EventGhost Project <http://www.eventghost.net/> # # EventGhost is free software: you can redistribute it and/or modify it under # the terms of the GNU General Public License as published by the Free # Software Foundation, either version 2 of the License, or (at your option) # any later version. # # EventGhost is distributed in the hope that it will be useful, but WITHOUT # ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for # more details. # # You should have received a copy of the GNU General Public License along # with EventGhost. If not, see <http://www.gnu.org/licenses/>. import wx LCID_TO_WX = { # Default custom sublanguage # sub: SUBLANG_CUSTOM_DEFAULT primary: LANG_NEUTRAL 0x0C00: wx.LANGUAGE_DEFAULT, # no x-ref wx.LANGUAGE_ABKHAZIAN # no x-ref wx.LANGUAGE_AFAR # South Africa (ZA) # sub: SUBLANG_AFRIKAANS_SOUTH_AFRICA primary: LANG_AFRIKAANS 0x0436: wx.LANGUAGE_AFRIKAANS, # Albania (AL) # sub: SUBLANG_ALBANIAN_ALBANIA primary: LANG_ALBANIAN 0x041C: wx.LANGUAGE_ALBANIAN, # Ethiopia (ET) # sub: SUBLANG_AMHARIC_ETHIOPIA primary: LANG_AMHARIC 0x045E: wx.LANGUAGE_AMHARIC, # Algeria (DZ) # sub: SUBLANG_ARABIC_ALGERIA primary: LANG_ARABIC 0x1401: wx.LANGUAGE_ARABIC_ALGERIA, # Bahrain (BH) # sub: SUBLANG_ARABIC_BAHRAIN primary: LANG_ARABIC 0x3C01: wx.LANGUAGE_ARABIC_BAHRAIN, # Egypt (EG) # sub: SUBLANG_ARABIC_EGYPT primary: LANG_ARABIC 0x0C01: wx.LANGUAGE_ARABIC_EGYPT, # Iraq (IQ) # sub: SUBLANG_ARABIC_IRAQ primary: LANG_ARABIC 0x0801: wx.LANGUAGE_ARABIC_IRAQ, # Jordan (JO) # sub: SUBLANG_ARABIC_JORDAN primary: LANG_ARABIC 0x2C01: wx.LANGUAGE_ARABIC_JORDAN, # Kuwait (KW) # sub: SUBLANG_ARABIC_KUWAIT primary: LANG_ARABIC 0x3401: wx.LANGUAGE_ARABIC_KUWAIT, # Lebanon (LB) # sub: SUBLANG_ARABIC_LEBANON primary: LANG_ARABIC 0x3001: wx.LANGUAGE_ARABIC_LEBANON, # Libya (LY) # sub: SUBLANG_ARABIC_LIBYA primary: LANG_ARABIC 0x1001: wx.LANGUAGE_ARABIC_LIBYA, # Morocco (MA) # sub: SUBLANG_ARABIC_MOROCCO primary: LANG_ARABIC 0x1801: wx.LANGUAGE_ARABIC_MOROCCO, # Oman (OM) # sub: SUBLANG_ARABIC_OMAN primary: LANG_ARABIC 0x2001: wx.LANGUAGE_ARABIC_OMAN, # Qatar (QA) # sub: SUBLANG_ARABIC_QATAR primary: LANG_ARABIC 0x4001: wx.LANGUAGE_ARABIC_QATAR, # Saudi Arabia (SA) # sub: SUBLANG_ARABIC_SAUDI_ARABIA primary: LANG_ARABIC 0x0401: wx.LANGUAGE_ARABIC_SAUDI_ARABIA, # no x-ref wx.LANGUAGE_ARABIC_SUDAN # Syria (SY) # sub: SUBLANG_ARABIC_SYRIA primary: LANG_ARABIC 0x2801: wx.LANGUAGE_ARABIC_SYRIA, # Tunisia (TN) # sub: SUBLANG_ARABIC_TUNISIA primary: LANG_ARABIC 0x1C01: wx.LANGUAGE_ARABIC_TUNISIA, # U.A.E. (AE) # sub: SUBLANG_ARABIC_UAE primary: LANG_ARABIC 0x3801: wx.LANGUAGE_ARABIC_UAE, # Yemen (YE) # sub: SUBLANG_ARABIC_YEMEN primary: LANG_ARABIC 0x2401: wx.LANGUAGE_ARABIC_YEMEN, # Armenia (AM) # sub: SUBLANG_ARMENIAN_ARMENIA primary: LANG_ARMENIAN 0x042B: wx.LANGUAGE_ARMENIAN, # India (IN) # sub: SUBLANG_ASSAMESE_INDIA primary: LANG_ASSAMESE 0x044D: wx.LANGUAGE_ASSAMESE, # no x-ref wx.LANGUAGE_ASTURIAN # no x-ref wx.LANGUAGE_AYMARA # Azerbaijan, Cyrillic (AZ) # sub: SUBLANG_AZERI_CYRILLIC primary: LANG_AZERI 0x082C: wx.LANGUAGE_AZERI_CYRILLIC, # Azerbaijan, Latin (AZ) # sub: SUBLANG_AZERI_LATIN primary: LANG_AZERI 0x042C: wx.LANGUAGE_AZERI_LATIN, # Russia (RU) # sub: SUBLANG_BASHKIR_RUSSIA primary: LANG_BASHKIR 0x046D: wx.LANGUAGE_BASHKIR, # Basque (Basque) # sub: SUBLANG_BASQUE_BASQUE primary: LANG_BASQUE 0x042D: wx.LANGUAGE_BASQUE, # Belarus (BY) # sub: SUBLANG_BELARUSIAN_BELARUS primary: LANG_BELARUSIAN 0x0423: wx.LANGUAGE_BELARUSIAN, # no x-ref wx.LANGUAGE_BENGALI # no x-ref wx.LANGUAGE_BHUTANI # no x-ref wx.LANGUAGE_BIHARI # no x-ref wx.LANGUAGE_BISLAMA # Bosnia and Herzegovina, Cyrillic (BA) # sub: primary: LANG_BOSNIAN_NEUTRAL # 0x781A: wx.LANGUAGE_BOSNIAN, # France (FR) # sub: SUBLANG_BRETON_FRANCE primary: LANG_BRETON 0x047E: wx.LANGUAGE_BRETON, # Bulgaria (BG) # sub: SUBLANG_BULGARIAN_BULGARIA primary: LANG_BULGARIAN 0x0402: wx.LANGUAGE_BULGARIAN, # no x-ref wx.LANGUAGE_BURMESE # Iraq (IQ) # sub: SUBLANG_CENTRAL_KURDISH_IRAQ primary: LANG_CENTRAL_KURDISH 0x0492: wx.LANGUAGE_KURDISH, # no x-ref wx.LANGUAGE_CAMBODIAN # Spain (ES) # sub: SUBLANG_CATALAN_CATALAN primary: LANG_CATALAN 0x0403: wx.LANGUAGE_CATALAN, # Hong Kong SAR, PRC (HK) # sub: SUBLANG_CHINESE_HONGKONG primary: LANG_CHINESE 0x0C04: wx.LANGUAGE_CHINESE_HONGKONG, # Macao SAR (MO) # sub: SUBLANG_CHINESE_MACAU primary: LANG_CHINESE 0x1404: wx.LANGUAGE_CHINESE_MACAU, # Singapore (SG) # sub: SUBLANG_CHINESE_SINGAPORE primary: LANG_CHINESE 0x1004: wx.LANGUAGE_CHINESE_SINGAPORE, # Simplified (Hans) # sub: SUBLANG_CHINESE_SIMPLIFIED primary: LANG_CHINESE_SIMPLIFIED 0x0004: wx.LANGUAGE_CHINESE_SIMPLIFIED, # no x-ref wx.LANGUAGE_CHINESE_TAIWAN # Traditional (Hant) # sub: SUBLANG_CHINESE_TRADITIONAL primary: LANG_CHINESE_TRADITIONAL 0x7C04: wx.LANGUAGE_CHINESE_TRADITIONAL, # France (FR) # sub: SUBLANG_CORSICAN_FRANCE primary: LANG_CORSICAN 0x0483: wx.LANGUAGE_CORSICAN, # Croatia (HR) # sub: SUBLANG_CROATIAN_CROATIA primary: LANG_CROATIAN 0x041A: wx.LANGUAGE_CROATIAN, # Czech Republic (CZ) # sub: SUBLANG_CZECH_CZECH_REPUBLIC primary: LANG_CZECH 0x0405: wx.LANGUAGE_CZECH, # Denmark (DK) # sub: SUBLANG_DANISH_DENMARK primary: LANG_DANISH 0x0406: wx.LANGUAGE_DANISH, # Netherlands (NL) # sub: SUBLANG_DUTCH primary: LANG_DUTCH 0x0413: wx.LANGUAGE_DUTCH, # Belgium (BE) # sub: SUBLANG_DUTCH_BELGIAN primary: LANG_DUTCH 0x0813: wx.LANGUAGE_DUTCH_BELGIAN, # Australia (AU) # sub: SUBLANG_ENGLISH_AUS primary: LANG_ENGLISH 0x0C09: wx.LANGUAGE_ENGLISH_AUSTRALIA, # Belize (BZ) # sub: SUBLANG_ENGLISH_BELIZE primary: LANG_ENGLISH 0x2809: wx.LANGUAGE_ENGLISH_BELIZE, # no x-ref wx.LANGUAGE_ENGLISH_BOTSWANA # Canada (CA) # sub: SUBLANG_ENGLISH_CAN primary: LANG_ENGLISH 0x1009: wx.LANGUAGE_ENGLISH_CANADA, # Caribbean (029) # sub: SUBLANG_ENGLISH_CARIBBEAN primary: LANG_ENGLISH 0x2409: wx.LANGUAGE_ENGLISH_CARIBBEAN, # no x-ref wx.LANGUAGE_ENGLISH_DENMARK # Ireland (IE); see note 3 # sub: SUBLANG_ENGLISH_EIRE primary: LANG_ENGLISH 0x1809: wx.LANGUAGE_ENGLISH_EIRE, # Jamaica (JM) # sub: SUBLANG_ENGLISH_JAMAICA primary: LANG_ENGLISH 0x2009: wx.LANGUAGE_ENGLISH_JAMAICA, # Malaysia (MY) # sub: SUBLANG_ENGLISH_MALAYSIA primary: LANG_ENGLISH 0x4409: wx.LANGUAGE_MALAY, # New Zealand (NZ) # sub: SUBLANG_ENGLISH_NZ primary: LANG_ENGLISH 0x1409: wx.LANGUAGE_ENGLISH_NEW_ZEALAND, # Philippines (PH) # sub: SUBLANG_ENGLISH_PHILIPPINES primary: LANG_ENGLISH 0x3409: wx.LANGUAGE_ENGLISH_PHILIPPINES, # South Africa (ZA) # sub: SUBLANG_ENGLISH_SOUTH_AFRICA primary: LANG_ENGLISH 0x1c09: wx.LANGUAGE_ENGLISH_SOUTH_AFRICA, # Trinidad and Tobago (TT) # sub: SUBLANG_ENGLISH_TRINIDAD primary: LANG_ENGLISH 0x2C09: wx.LANGUAGE_ENGLISH_TRINIDAD, # United Kingdom (GB) # sub: SUBLANG_ENGLISH_UK primary: LANG_ENGLISH 0x0809: wx.LANGUAGE_ENGLISH_UK, # United States (US) # sub: SUBLANG_ENGLISH_US primary: LANG_ENGLISH 0x0409: wx.LANGUAGE_ENGLISH_US, # Zimbabwe (ZW) # sub: SUBLANG_ENGLISH_ZIMBABWE primary: LANG_ENGLISH 0x3009: wx.LANGUAGE_ENGLISH_ZIMBABWE, # no x-ref wx.LANGUAGE_ESPERANTO # Estonia (EE) # sub: SUBLANG_ESTONIAN_ESTONIA primary: LANG_ESTONIAN 0x0425: wx.LANGUAGE_ESTONIAN, # Faroe Islands (FO) # sub: SUBLANG_FAEROESE_FAROE_ISLANDS primary: LANG_FAEROESE 0x0438: wx.LANGUAGE_FAEROESE, # no x-ref wx.LANGUAGE_FARSI # no x-ref wx.LANGUAGE_FIJI # Finland (FI) # sub: SUBLANG_FINNISH_FINLAND primary: LANG_FINNISH 0x040B: wx.LANGUAGE_FINNISH, # Belgium (BE) # sub: SUBLANG_FRENCH_BELGIAN primary: LANG_FRENCH 0x080c: wx.LANGUAGE_FRENCH_BELGIAN, # Canada (CA) # sub: SUBLANG_FRENCH_CANADIAN primary: LANG_FRENCH 0x0C0C: wx.LANGUAGE_FRENCH_CANADIAN, # France (FR) # sub: SUBLANG_FRENCH primary: LANG_FRENCH 0x040c: wx.LANGUAGE_FRENCH, # Luxembourg (LU) # sub: SUBLANG_FRENCH_LUXEMBOURG primary: LANG_FRENCH 0x140C: wx.LANGUAGE_FRENCH_LUXEMBOURG, # Monaco (MC) # sub: SUBLANG_FRENCH_MONACO primary: LANG_FRENCH 0x180C: wx.LANGUAGE_FRENCH_MONACO, # Switzerland (CH) # sub: SUBLANG_FRENCH_SWISS primary: LANG_FRENCH 0x100C: wx.LANGUAGE_FRENCH_SWISS, # Netherlands (NL) # sub: SUBLANG_FRISIAN_NETHERLANDS primary: LANG_FRISIAN 0x0462: wx.LANGUAGE_FRISIAN, # Spain (ES) # sub: SUBLANG_GALICIAN_GALICIAN primary: LANG_GALICIAN 0x0456: wx.LANGUAGE_GALICIAN, # Georgia (GE) # sub: SUBLANG_GEORGIAN_GEORGIA primary: LANG_GEORGIAN 0x0437: wx.LANGUAGE_GEORGIAN, # Austria (AT) # sub: SUBLANG_GERMAN_AUSTRIAN primary: LANG_GERMAN 0x0C07: wx.LANGUAGE_GERMAN_AUSTRIAN, # Germany (DE) # sub: SUBLANG_GERMAN primary: LANG_GERMAN 0x0407: wx.LANGUAGE_GERMAN, # no x-ref wx.LANGUAGE_GERMAN_BELGIUM # Liechtenstein (LI) # sub: SUBLANG_GERMAN_LIECHTENSTEIN primary: LANG_GERMAN 0x1407: wx.LANGUAGE_GERMAN_LIECHTENSTEIN, # Luxembourg (LU) # sub: SUBLANG_GERMAN_LUXEMBOURG primary: LANG_GERMAN 0x1007: wx.LANGUAGE_GERMAN_LUXEMBOURG, # Switzerland (CH) # sub: SUBLANG_GERMAN_SWISS primary: LANG_GERMAN 0x0807: wx.LANGUAGE_GERMAN_SWISS, # Greece (GR) # sub: SUBLANG_GREEK_GREECE primary: LANG_GREEK 0x0408: wx.LANGUAGE_GREEK, # Greenland (GL) # sub: SUBLANG_GREENLANDIC_GREENLAND primary: LANG_GREENLANDIC 0x046F: wx.LANGUAGE_GREENLANDIC, # no x-ref wx.LANGUAGE_GUARANI # India (IN) # sub: SUBLANG_GUJARATI_INDIA primary: LANG_GUJARATI 0x0447: wx.LANGUAGE_GUJARATI, # Nigeria (NG) # sub: SUBLANG_HAUSA_NIGERIA_LATIN primary: LANG_HAUSA 0x0468: wx.LANGUAGE_HAUSA, # Israel (IL) # sub: SUBLANG_HEBREW_ISRAEL primary: LANG_HEBREW 0x040D: wx.LANGUAGE_HEBREW, # India (IN) # sub: SUBLANG_HINDI_INDIA primary: LANG_HINDI 0x0439: wx.LANGUAGE_HINDI, # Hungary (HU) # sub: SUBLANG_HUNGARIAN_HUNGARY primary: LANG_HUNGARIAN 0x040E: wx.LANGUAGE_HUNGARIAN, # Iceland (IS) # sub: SUBLANG_ICELANDIC_ICELAND primary: LANG_ICELANDIC 0x040F: wx.LANGUAGE_ICELANDIC, # Indonesia (ID) # sub: SUBLANG_INDONESIAN_INDONESIA primary: LANG_INDONESIAN 0x0421: wx.LANGUAGE_INDONESIAN, # no x-ref wx.LANGUAGE_INTERLINGUA # no x-ref wx.LANGUAGE_INTERLINGUE # Canada (CA), Latin # sub: SUBLANG_INUKTITUT_CANADA_LATIN primary: LANG_INUKTITUT 0x085D: wx.LANGUAGE_INUKTITUT, # no x-ref wx.LANGUAGE_INUPIAK # Ireland (IE) # sub: SUBLANG_IRISH_IRELAND primary: LANG_IRISH 0x083C: wx.LANGUAGE_IRISH, # South Africa (ZA) # sub: SUBLANG_XHOSA_SOUTH_AFRICA primary: LANG_XHOSA 0x0434: wx.LANGUAGE_XHOSA, # South Africa (ZA) # sub: SUBLANG_ZULU_SOUTH_AFRICA primary: LANG_ZULU 0x0435: wx.LANGUAGE_ZULU, # Italy (IT) # sub: SUBLANG_ITALIAN primary: LANG_ITALIAN 0x0410: wx.LANGUAGE_ITALIAN, # Switzerland (CH) # sub: SUBLANG_ITALIAN_SWISS primary: LANG_ITALIAN 0x0810: wx.LANGUAGE_ITALIAN_SWISS, # Japan (JP) # sub: SUBLANG_JAPANESE_JAPAN primary: LANG_JAPANESE 0x0411: wx.LANGUAGE_JAPANESE, # no x-ref wx.LANGUAGE_JAVANESE # no x-ref wx.LANGUAGE_KABYLE # India (IN) # sub: SUBLANG_KANNADA_INDIA primary: LANG_KANNADA 0x044B: wx.LANGUAGE_KANNADA, # no x-ref wx.LANGUAGE_KASHMIRI # (reserved) # sub: SUBLANG_KASHMIRI_INDIA primary: LANG_KASHMIRI # ______: wx.LANGUAGE_KASHMIRI_INDIA, # Kazakhstan (KZ) # sub: SUBLANG_KAZAK_KAZAKHSTAN primary: LANG_KAZAK 0x043F: wx.LANGUAGE_KAZAKH, # no x-ref wx.LANGUAGE_KERNEWEK # Rwanda (RW) # sub: SUBLANG_KINYARWANDA_RWANDA primary: LANG_KINYARWANDA 0x0487: wx.LANGUAGE_KINYARWANDA, # no x-ref wx.LANGUAGE_KIRGHIZ # no x-ref wx.LANGUAGE_KIRUNDI # India (IN) # sub: SUBLANG_KONKANI_INDIA primary: LANG_KONKANI 0x0457: wx.LANGUAGE_KONKANI, # Korea (KR) # sub: SUBLANG_KOREAN primary: LANG_KOREAN 0x0412: wx.LANGUAGE_KOREAN, # no x-ref wx.LANGUAGE_LAOTHIAN # Latvia (LV) # sub: SUBLANG_LATVIAN_LATVIA primary: LANG_LATVIAN 0x0426: wx.LANGUAGE_LATVIAN, # no x-ref wx.LANGUAGE_LINGALA # Lithuanian (LT); see note 5 # sub: SUBLANG_LITHUANIAN_LITHUANIA primary: LANG_LITHUANIAN 0x0427: wx.LANGUAGE_LITHUANIAN, # Macedonia (FYROM) (MK) # sub: SUBLANG_MACEDONIAN_MACEDONIA primary: LANG_MACEDONIAN 0x042F: wx.LANGUAGE_MACEDONIAN, # Brunei Darassalam (BN) # sub: SUBLANG_MALAY_BRUNEI_DARUSSALAM primary: LANG_MALAY 0x083E: wx.LANGUAGE_MALAY_BRUNEI_DARUSSALAM, # Malaysia (MY) # sub: SUBLANG_MALAY_MALAYSIA primary: LANG_MALAY 0x043e: wx.LANGUAGE_MALAY_MALAYSIA, # no x-ref wx.LANGUAGE_MALAGASY # India (IN) # sub: SUBLANG_MALAYALAM_INDIA primary: LANG_MALAYALAM 0x044C: wx.LANGUAGE_MALAYALAM, # Malta (MT) # sub: SUBLANG_MALTESE_MALTA primary: LANG_MALTESE 0x043A: wx.LANGUAGE_MALTESE, # no x-ref wx.LANGUAGE_MANIPURI # New Zealand (NZ) # sub: SUBLANG_MAORI_NEW_ZEALAND primary: LANG_MAORI 0x0481: wx.LANGUAGE_MAORI, # India (IN) # sub: SUBLANG_MARATHI_INDIA primary: LANG_MARATHI 0x044E: wx.LANGUAGE_MARATHI, # no x-ref wx.LANGUAGE_MOLDAVIAN # Mongolia, Mong (MN) # sub: SUBLANG_MONGOLIAN_PRC primary: LANG_MONGOLIAN 0x0850: wx.LANGUAGE_MONGOLIAN, # no x-ref wx.LANGUAGE_NAURU # Nepal (NP) # sub: SUBLANG_NEPALI_NEPAL primary: LANG_NEPALI 0x0461: wx.LANGUAGE_NEPALI, # no x-ref wx.LANGUAGE_NEPALI_INDIA # Bokmål, Norway (NO) # sub: SUBLANG_NORWEGIAN_BOKMAL primary: LANG_NORWEGIAN 0x0414: wx.LANGUAGE_NORWEGIAN_BOKMAL, # Nynorsk, Norway (NO) # sub: SUBLANG_NORWEGIAN_NYNORSK primary: LANG_NORWEGIAN 0x0814: wx.LANGUAGE_NORWEGIAN_NYNORSK, # France (FR) # sub: SUBLANG_OCCITAN_FRANCE primary: LANG_OCCITAN 0x0482: wx.LANGUAGE_OCCITAN, # India (IN) # sub: SUBLANG_ORIYA_INDIA primary: LANG_ORIYA 0x0448: wx.LANGUAGE_ORIYA, # no x-ref wx.LANGUAGE_OROMO # Afghanistan (AF) # sub: SUBLANG_PASHTO_AFGHANISTAN primary: LANG_PASHTO 0x0463: wx.LANGUAGE_PASHTO, # Poland (PL) # sub: SUBLANG_POLISH_POLAND primary: LANG_POLISH 0x0415: wx.LANGUAGE_POLISH, # Brazil (BR) # sub: SUBLANG_PORTUGUESE_BRAZILIAN primary: LANG_PORTUGUESE 0x0416: wx.LANGUAGE_PORTUGUESE_BRAZILIAN, # Portugal (PT); see note 7 # sub: SUBLANG_PORTUGUESE primary: LANG_PORTUGUESE 0x0816: wx.LANGUAGE_PORTUGUESE, # India, Gurmukhi script (IN) # sub: SUBLANG_PUNJABI_INDIA primary: LANG_PUNJABI 0x0446: wx.LANGUAGE_PUNJABI, # Bolivia (BO) # sub: SUBLANG_QUECHUA_BOLIVIA primary: LANG_QUECHUA 0x046B: wx.LANGUAGE_QUECHUA, # no x-ref wx.LANGUAGE_RHAETO_ROMANCE # Romania (RO) # sub: SUBLANG_ROMANIAN_ROMANIA primary: LANG_ROMANIAN 0x0418: wx.LANGUAGE_ROMANIAN, # Russia (RU) # sub: SUBLANG_RUSSIAN_RUSSIA primary: LANG_RUSSIAN 0x0419: wx.LANGUAGE_RUSSIAN, # no x-ref wx.LANGUAGE_RUSSIAN_UKRAINE # Inari, Finland (FI) # sub: SUBLANG_SAMI_INARI_FINLAND primary: LANG_SAMI 0x243B: wx.LANGUAGE_SAMI, # no x-ref wx.LANGUAGE_SAMOAN # no x-ref wx.LANGUAGE_SANGHO # India (IN) # sub: SUBLANG_SANSKRIT_INDIA primary: LANG_SANSKRIT 0x044F: wx.LANGUAGE_SANSKRIT, # no x-ref wx.LANGUAGE_SCOTS_GAELIC # Serbian (sr) # sub: primary: LANG_SERBIAN 0x1a: wx.LANGUAGE_SERBIAN, # no x-ref wx.LANGUAGE_SERBO_CROATIAN # Serbia and Montenegro (former), Cyrillic (CS) # sub: SUBLANG_SERBIAN_CYRILLIC primary: LANG_SERBIAN 0x0C1A: wx.LANGUAGE_SERBIAN_CYRILLIC, # Serbia and Montenegro (former), Latin (CS) # sub: SUBLANG_SERBIAN_LATIN primary: LANG_SERBIAN 0x081A: wx.LANGUAGE_SERBIAN_LATIN, # no x-ref wx.LANGUAGE_SESOTHO # no x-ref wx.LANGUAGE_SETSWANA # no x-ref wx.LANGUAGE_SHONA # (reserved) # sub: primary: LANG_TSWANA 0x59: wx.LANGUAGE_SINDHI, # Sri Lanka (LK) # sub: SUBLANG_SINHALESE_SRI_LANKA primary: LANG_SINHALESE 0x045B: wx.LANGUAGE_SINHALESE, # no x-ref wx.LANGUAGE_SISWATI # Slovakia (SK) # sub: SUBLANG_SLOVAK_SLOVAKIA primary: LANG_SLOVAK 0x041B: wx.LANGUAGE_SLOVAK, # Slovenia (SI) # sub: SUBLANG_SLOVENIAN_SLOVENIA primary: LANG_SLOVENIAN 0x0424: wx.LANGUAGE_SLOVENIAN, # no x-ref wx.LANGUAGE_SOMALI # Spain, Traditional Sort (ES) # sub: SUBLANG_SPANISH primary: LANG_SPANISH 0x040A: wx.LANGUAGE_SPANISH, # Bolivia (BO) # sub: SUBLANG_SPANISH_BOLIVIA primary: LANG_SPANISH 0x400A: wx.LANGUAGE_SPANISH_BOLIVIA, # Chile (CL) # sub: SUBLANG_SPANISH_CHILE primary: LANG_SPANISH 0x340A: wx.LANGUAGE_SPANISH_CHILE, # Colombia (CO) # sub: SUBLANG_SPANISH_COLOMBIA primary: LANG_SPANISH 0x240A: wx.LANGUAGE_SPANISH_COLOMBIA, # Costa Rica (CR) # sub: SUBLANG_SPANISH_COSTA_RICA primary: LANG_SPANISH 0x140A: wx.LANGUAGE_SPANISH_COSTA_RICA, # Dominican Republic (DO) # sub: SUBLANG_SPANISH_DOMINICAN_REPUBLIC primary: LANG_SPANISH 0x1C0A: wx.LANGUAGE_SPANISH_DOMINICAN_REPUBLIC, # Ecuador (EC) # sub: SUBLANG_SPANISH_ECUADOR primary: LANG_SPANISH 0x300A: wx.LANGUAGE_SPANISH_ECUADOR, # El Salvador (SV) # sub: SUBLANG_SPANISH_EL_SALVADOR primary: LANG_SPANISH 0x440A: wx.LANGUAGE_SPANISH_EL_SALVADOR, # Guatemala (GT) # sub: SUBLANG_SPANISH_GUATEMALA primary: LANG_SPANISH 0x100A: wx.LANGUAGE_SPANISH_GUATEMALA, # Honduras (HN) # sub: SUBLANG_SPANISH_HONDURAS primary: LANG_SPANISH 0x480A: wx.LANGUAGE_SPANISH_HONDURAS, # Mexico (MX) # sub: SUBLANG_SPANISH_MEXICAN primary: LANG_SPANISH 0x080A: wx.LANGUAGE_SPANISH_MEXICAN, # Nicaragua (NI) # sub: SUBLANG_SPANISH_NICARAGUA primary: LANG_SPANISH 0x4C0A: wx.LANGUAGE_SPANISH_NICARAGUA, # Panama (PA) # sub: SUBLANG_SPANISH_PANAMA primary: LANG_SPANISH 0x180A: wx.LANGUAGE_SPANISH_PANAMA, # Paraguay (PY) # sub: SUBLANG_SPANISH_PARAGUAY primary: LANG_SPANISH 0x3C0A: wx.LANGUAGE_SPANISH_PARAGUAY, # Peru (PE) # sub: SUBLANG_SPANISH_PERU primary: LANG_SPANISH 0x280A: wx.LANGUAGE_SPANISH_PERU, # Puerto Rico (PR) # sub: SUBLANG_SPANISH_PUERTO_RICO primary: LANG_SPANISH 0x500A: wx.LANGUAGE_SPANISH_PUERTO_RICO, # Spain, Modern Sort (ES) # sub: SUBLANG_SPANISH_MODERN primary: LANG_SPANISH 0x0C0A: wx.LANGUAGE_SPANISH_MODERN, # Argentina (AR) # sub: SUBLANG_SPANISH_ARGENTINA primary: LANG_SPANISH 0x2C0A: wx.LANGUAGE_SPANISH_ARGENTINA, # United States (US) # sub: SUBLANG_SPANISH_US primary: LANG_SPANISH 0x540A: wx.LANGUAGE_SPANISH_US, # Uruguay (UY) # sub: SUBLANG_SPANISH_URUGUAY primary: LANG_SPANISH 0x380A: wx.LANGUAGE_SPANISH_URUGUAY, # Venezuela (VE) # sub: SUBLANG_SPANISH_VENEZUELA primary: LANG_SPANISH 0x200A: wx.LANGUAGE_SPANISH_VENEZUELA, # no x-ref wx.LANGUAGE_SUNDANESE # Kenya (KE) # sub: SUBLANG_SWAHILI primary: LANG_SWAHILI 0x0441: wx.LANGUAGE_SWAHILI, # Finland (FI) # sub: SUBLANG_SWEDISH_FINLAND primary: LANG_SWEDISH 0x081D: wx.LANGUAGE_SWEDISH_FINLAND, # Sweden (SE); see note 8 # sub: SUBLANG_SWEDISH primary: LANG_SWEDISH 0x041D: wx.LANGUAGE_SWEDISH, # no x-ref wx.LANGUAGE_TAGALOG # Tajikistan, Cyrillic (TJ) # sub: SUBLANG_TAJIK_TAJIKISTAN primary: LANG_TAJIK 0x0428: wx.LANGUAGE_TAJIK, # India (IN) # sub: SUBLANG_TAMIL_INDIA primary: LANG_TAMIL 0x0449: wx.LANGUAGE_TAMIL, # Russia (RU) # sub: SUBLANG_TATAR_RUSSIA primary: LANG_TATAR 0x0444: wx.LANGUAGE_TATAR, # India (IN) # sub: SUBLANG_TELUGU_INDIA primary: LANG_TELUGU 0x044A: wx.LANGUAGE_TELUGU, # Thailand (TH) # sub: SUBLANG_THAI_THAILAND primary: LANG_THAI 0x041E: wx.LANGUAGE_THAI, # PRC (CN) # sub: SUBLANG_TIBETAN_PRC primary: LANG_TIBETAN 0x0451: wx.LANGUAGE_TIBETAN, # Eritrea (ER) # sub: SUBLANG_TIGRINYA_ERITREA primary: LANG_TIGRINYA 0x0873: wx.LANGUAGE_TIGRINYA, # no x-ref wx.LANGUAGE_TONGA # no x-ref wx.LANGUAGE_TSONGA # Turkey (TR) # sub: SUBLANG_TURKISH_TURKEY primary: LANG_TURKISH 0x041F: wx.LANGUAGE_TURKISH, # Turkmenistan (TM) # sub: SUBLANG_TURKMEN_TURKMENISTAN primary: LANG_TURKMEN 0x0442: wx.LANGUAGE_TURKMEN, # no x-ref wx.LANGUAGE_TWI # Ukraine (UA) # sub: SUBLANG_UKRAINIAN_UKRAINE primary: LANG_UKRAINIAN 0x0422: wx.LANGUAGE_UKRAINIAN, # (reserved) # sub: SUBLANG_URDU_INDIA primary: LANG_URDU 0x0820: wx.LANGUAGE_URDU_INDIA, # Urdu (ur) # sub: primary: LANG_URDU 0x20: wx.LANGUAGE_URDU, # Pakistan (PK) # sub: SUBLANG_URDU_PAKISTAN primary: LANG_URDU 0x0420: wx.LANGUAGE_URDU_PAKISTAN, # PRC (CN) # sub: SUBLANG_UIGHUR_PRC primary: LANG_UIGHUR 0x0480: wx.LANGUAGE_UIGHUR, # Uzbek (uz) # sub: primary: LANG_UZBEK 0x43: wx.LANGUAGE_UZBEK, # Uzbekistan, Cyrillic (UZ) # sub: SUBLANG_UZBEK_CYRILLIC primary: LANG_UZBEK 0x0843: wx.LANGUAGE_UZBEK_CYRILLIC, # Uzbekistan, Latin (UZ) # sub: SUBLANG_UZBEK_LATIN primary: LANG_UZBEK 0x0443: wx.LANGUAGE_UZBEK_LATIN, # Valencia (ES-Valencia) # sub: SUBLANG_VALENCIAN_VALENCIA primary: LANG_VALENCIAN 0x0803: wx.LANGUAGE_VALENCIAN, # Vietnam (VN) # sub: SUBLANG_VIETNAMESE_VIETNAM primary: LANG_VIETNAMESE 0x042A: wx.LANGUAGE_VIETNAMESE, # no x-ref wx.LANGUAGE_VOLAPUK # United Kingdom (GB) # sub: SUBLANG_WELSH_UNITED_KINGDOM primary: LANG_WELSH 0x0452: wx.LANGUAGE_WELSH, # Senegal (SN) # sub: SUBLANG_WOLOF_SENEGAL primary: LANG_WOLOF 0x0488: wx.LANGUAGE_WOLOF, # no x-ref wx.LANGUAGE_YIDDISH # Nigeria (NG) # sub: SUBLANG_YORUBA_NIGERIA primary: LANG_YORUBA 0x046A: wx.LANGUAGE_YORUBA, # no x-ref wx.LANGUAGE_ZHUANG } class Translation: languageNames = { 'aa_AA': u'Afar', 'ab_AB': u'Abkhazian', 'af_AF': u'Afrikaans', 'am_AM': u'አማርኛ', 'ar_AE': u'العربية - الامارات العربية المتحدة', 'ar_AR': u'Arabic', 'ar_BH': u'العربية - البحرين', 'ar_DZ': u'العربية - الجزائر', 'ar_EG': u'اللغة العربية - مصر', 'ar_IQ': u'عربي - العراق', 'ar_JO': u'عربي - الاردن', 'ar_KW': u'العربية - الكويت', 'ar_LB': u'العربية - لبنان', 'ar_LY': u'العربية - ليبيا', 'ar_MA': u'اللغة العربية - المغرب', 'ar_OM': u'العربية - عمان', 'ar_QA': u'العربية - قطر', 'ar_SA': u'العربية - السعودية', 'ar_SY': u'العربية - سوريا', 'ar_TN': u'العربية - تونس', 'ar_YE': u'العربية - اليمن', 'as_AS': u'Assamese', 'ay_AY': u'Aymara', 'az_AZ': u'Azeri - Latin', 'ba_BA': u'Bashkir', 'be_BE': u'беларускі', 'bg_BG': u'български', 'bh_BH': u'Bihari', 'bi_BI': u'Bislama', 'bn_BN': u'Bengali', 'bo_BO': u'Tibetan', 'br_BR': u'Breton', 'bs_BS': u'Bosanski', 'ca_CA': u'Català', 'co_CO': u'Corsican', 'cs_CS': u'čeština', 'cy_CY': u'Cymraeg', 'da_DA': u'dansk', 'de_AT': u'Deutsch - Österreich', 'de_CH': u'Deutsch - Schweiz', 'de_DE': u'Deutschland', 'de_LI': u'Deutsch - Liechtenstein', 'de_LU': u'Deutsch - Luxemburg', 'dv_DV': u'Divehi; Dhivehi; Maldivian', 'dz_DZ': u'Bhutani', 'el_EL': u'Ελληνικά', 'en_AU': u'English - Australia', 'en_BZ': u'English - Belize', 'en_CA': u'English - Canada', 'en_CB': u'English - Caribbean', 'en_EN': u'English', 'en_GB': u'English - Great Britain', 'en_IE': u'English - Ireland', 'en_IN': u'English - India', 'en_JM': u'English - Jamaica', 'en_NZ': u'English - New Zealand', 'en_PH': u'English - Phillippines', 'en_TT': u'English - Trinidad', 'en_US': u'English - United States', 'en_ZA': u'English - Southern Africa', 'eo_EO': u'Esperanto', 'es_AR': u'Español - argentina', 'es_BO': u'Español - bolivia', 'es_CL': u'Español - chile', 'es_CO': u'Español - colombia', 'es_CR': u'Español - costa rica', 'es_DO': u'Español - republica dominicana', 'es_EC': u'Español - ecuador', 'es_ES': u'Español - españa (tradicional)', 'es_GT': u'Español - guatemala', 'es_HN': u'Español - honduras', 'es_MX': u'Español - mexico', 'es_NI': u'Español - nicaragua', 'es_PA': u'Español - panama', 'es_PE': u'Español - peru', 'es_PR': u'Español - puerto rico', 'es_PY': u'Español - paraguay', 'es_SV': u'Español - el salvador', 'es_UY': u'Español - uruguay', 'es_VE': u'Español - venezuela', 'et_ET': u'Eesti keel', 'eu_EU': u'Euskal', 'fa_FA': u'فارسی - فارسی', 'fi_FI': u'Suomalainen', 'fj_FJ': u'Fiji', 'fo_FO': u'føroyskt', 'fr_BE': u'Français - Belgique', 'fr_CA': u'Français - Canada', 'fr_CH': u'Français - Suisse', 'fr_FR': u'France francaise', 'fr_LU': u'Français - Luxembourg', 'fy_FY': u'Frisian', 'ga_GA': u'Irish', 'gd_GD': u'Gàidhlig - Alba', 'gd_IE': u'Gàidhlig - Èirinn', 'gl_GL': u'Galician', 'gn_GN': u'Guarani - Paraguay', 'gu_GU': u'ગુજરાતી', 'ha_HA': u'Hausa', 'he_HE': u'עברית', 'hi_HI': u'हिंदी', 'hr_HR': u'Hrvatski', 'hu_HU': u'Magyar', 'hy_HY': u'հայերեն', 'ia_IA': u'Interlingua', 'id_ID': u'bahasa Indonesia', 'ie_IE': u'Interlingue', 'ik_IK': u'Inupiak', 'in_IN': u'Indonesian', 'is_IS': u'Íslensku', 'it_CH': u'Italiano - Svizzera', 'it_IT': u'Italiano - Italia', 'iw_IW': u'Hebrew', 'ja_JA': u'日本人', 'ji_JI': u'ייִדיש', 'jw_JW': u'Javanese', 'ka_KA': u'Georgian', 'kk_KK': u'Қазақша', 'kl_KL': u'Greenlandic', 'km_KM': u'ភាសាខ្មែរ', 'kn_KN': u'ಕನ್ನಡ', 'ko_KO': u'한국어', 'ks_KS': u'Kashmiri', 'ku_KU': u'Kurdish', 'ky_KY': u'Kirghiz', 'la_LA': u'Latine', 'ln_LN': u'Lingala', 'lo_LO': u'ລາວ', 'lt_LT': u'Lietuviškai', 'lv_LV': u'Latviešu', 'mg_MG': u'Malagasy', 'mi_MI': u'Maori', 'mk_MK': u'БЈР Македонија', 'ml_ML': u'മലയാളം', 'mn_MN': u'Монгол хэл', 'mo_MO': u'Moldavian', 'mr_MR': u'मराठी', 'ms_BN': u'Malay - Brunei', 'ms_MS': u'Malay', 'ms_MY': u'Malay - Malaysia', 'mt_MT': u'Malti', 'my_MY': u'Burmese', 'na_NA': u'Nauru', 'ne_NE': u'नेपाली', 'nl_NL': u'Nederlands', 'no_NO': u'Norwegian - Nynorsk', 'oc_OC': u'Occitan', 'om_OM': u'Oromo/Afan', 'or_OR': u'Oriya', 'pa_PA': u'ਪੰਜਾਬੀ', 'pl_PL': u'Polskie', 'ps_PS': u'Pashto/Pushto', 'pt_BR': u'Português - Brasil', 'pt_PT': u'Português - portugal', 'qu_QU': u'Quechua', 'rm_RM': u'Raeto-Romance', 'rn_RN': u'Kirundi', 'ro_MO': u'Romanian - Moldova', 'ro_RO': u'Romanian - Romania', 'ru_MO': u'Россия - Молдова', 'ru_RU': u'русский', 'rw_RW': u'Kinyarwanda', 'sa_SA': u'Sanskrit', 'sb_SB': u'Sorbian', 'sd_SD': u'سنڌي', 'sg_SG': u'Sangro', 'sh_SH': u'Serbo-Croatian', 'si_SI': u'සිංහල', 'sk_SK': u'slovenský', 'sl_SL': u'Slovenščina', 'sm_SM': u'Samoan', 'sn_SN': u'Shona', 'so_SO': u'Somali', 'sq_SQ': u'shqiptar', 'sr_SP': u'Serbian - Latin', 'sr_SR': u'Serbian', 'ss_SS': u'Siswati', 'st_ST': u'Sesotho', 'su_SU': u'Sudanese', 'sv_FI': u'Svenska - finska', 'sv_SE': u'Svenska - sverige', 'sv_SV': u'Swedish', 'sw_SW': u'Kiswahili', 'ta_TA': u'தமிழ்', 'te_TE': u'తెలుగు', 'tg_TG': u'Тоҷикӣ', 'th_TH': u'ไทย', 'ti_TI': u'Tigrinya', 'tk_TK': u'Turkmen', 'tl_TL': u'Tagalog', 'tn_TN': u'Setsuana', 'to_TO': u'Tonga', 'tr_TR': u'Türk', 'ts_TS': u'Tsonga', 'tt_TT': u'Tatar', 'tw_TW': u'Twi', 'uk_UK': u'Українська', 'ur_UR': u'اردو', 'uz_UZ': u'Uzbecorum - Latina', 'vi_VI': u'Tiếng Việt', 'vo_VO': u'Volapuk', 'wo_WO': u'Wolof', 'xh_XH': u'IsiXhosa', 'yo_YO': u'Yorùbá', 'zh_HK': u'中國 - 香港特別行政區', 'zh_MO': u'中國 - 澳門特區', 'zh_SG': u'中文 - 新加坡', 'zh_TW': u'中文 - 台灣', 'zh_ZH': u'Chinese', 'zu_ZU': u'Zulu', }
gpl-2.0
chatcannon/scipy
scipy/signal/tests/test_windows.py
74
9476
from __future__ import division, print_function, absolute_import import warnings import numpy as np from numpy import array from numpy.testing import (assert_array_almost_equal, assert_array_equal, run_module_suite, assert_raises, assert_allclose) from scipy import signal window_funcs = [ ('boxcar', ()), ('triang', ()), ('parzen', ()), ('bohman', ()), ('blackman', ()), ('nuttall', ()), ('blackmanharris', ()), ('flattop', ()), ('bartlett', ()), ('hanning', ()), ('barthann', ()), ('hamming', ()), ('kaiser', (1,)), ('gaussian', (0.5,)), ('general_gaussian', (1.5, 2)), ('chebwin', (1,)), ('slepian', (2,)), ('cosine', ()), ('hann', ()), ('exponential', ()), ('tukey', (0.5,)), ] cheb_odd_true = array([0.200938, 0.107729, 0.134941, 0.165348, 0.198891, 0.235450, 0.274846, 0.316836, 0.361119, 0.407338, 0.455079, 0.503883, 0.553248, 0.602637, 0.651489, 0.699227, 0.745266, 0.789028, 0.829947, 0.867485, 0.901138, 0.930448, 0.955010, 0.974482, 0.988591, 0.997138, 1.000000, 0.997138, 0.988591, 0.974482, 0.955010, 0.930448, 0.901138, 0.867485, 0.829947, 0.789028, 0.745266, 0.699227, 0.651489, 0.602637, 0.553248, 0.503883, 0.455079, 0.407338, 0.361119, 0.316836, 0.274846, 0.235450, 0.198891, 0.165348, 0.134941, 0.107729, 0.200938]) cheb_even_true = array([0.203894, 0.107279, 0.133904, 0.163608, 0.196338, 0.231986, 0.270385, 0.311313, 0.354493, 0.399594, 0.446233, 0.493983, 0.542378, 0.590916, 0.639071, 0.686302, 0.732055, 0.775783, 0.816944, 0.855021, 0.889525, 0.920006, 0.946060, 0.967339, 0.983557, 0.994494, 1.000000, 1.000000, 0.994494, 0.983557, 0.967339, 0.946060, 0.920006, 0.889525, 0.855021, 0.816944, 0.775783, 0.732055, 0.686302, 0.639071, 0.590916, 0.542378, 0.493983, 0.446233, 0.399594, 0.354493, 0.311313, 0.270385, 0.231986, 0.196338, 0.163608, 0.133904, 0.107279, 0.203894]) class TestChebWin(object): def test_cheb_odd_high_attenuation(self): with warnings.catch_warnings(): warnings.simplefilter("ignore", UserWarning) cheb_odd = signal.chebwin(53, at=-40) assert_array_almost_equal(cheb_odd, cheb_odd_true, decimal=4) def test_cheb_even_high_attenuation(self): with warnings.catch_warnings(): warnings.simplefilter("ignore", UserWarning) cheb_even = signal.chebwin(54, at=-40) assert_array_almost_equal(cheb_even, cheb_even_true, decimal=4) def test_cheb_odd_low_attenuation(self): cheb_odd_low_at_true = array([1.000000, 0.519052, 0.586405, 0.610151, 0.586405, 0.519052, 1.000000]) with warnings.catch_warnings(): warnings.simplefilter("ignore", UserWarning) cheb_odd = signal.chebwin(7, at=-10) assert_array_almost_equal(cheb_odd, cheb_odd_low_at_true, decimal=4) def test_cheb_even_low_attenuation(self): cheb_even_low_at_true = array([1.000000, 0.451924, 0.51027, 0.541338, 0.541338, 0.51027, 0.451924, 1.000000]) with warnings.catch_warnings(): warnings.simplefilter("ignore", UserWarning) cheb_even = signal.chebwin(8, at=-10) assert_array_almost_equal(cheb_even, cheb_even_low_at_true, decimal=4) exponential_data = { (4, None, 0.2, False): array([4.53999297624848542e-05, 6.73794699908546700e-03, 1.00000000000000000e+00, 6.73794699908546700e-03]), (4, None, 0.2, True): array([0.00055308437014783, 0.0820849986238988, 0.0820849986238988, 0.00055308437014783]), (4, None, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1., 0.36787944117144233]), (4, None, 1.0, True): array([0.22313016014842982, 0.60653065971263342, 0.60653065971263342, 0.22313016014842982]), (4, 2, 0.2, False): array([4.53999297624848542e-05, 6.73794699908546700e-03, 1.00000000000000000e+00, 6.73794699908546700e-03]), (4, 2, 0.2, True): None, (4, 2, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1., 0.36787944117144233]), (4, 2, 1.0, True): None, (5, None, 0.2, False): array([4.53999297624848542e-05, 6.73794699908546700e-03, 1.00000000000000000e+00, 6.73794699908546700e-03, 4.53999297624848542e-05]), (5, None, 0.2, True): array([4.53999297624848542e-05, 6.73794699908546700e-03, 1.00000000000000000e+00, 6.73794699908546700e-03, 4.53999297624848542e-05]), (5, None, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1., 0.36787944117144233, 0.1353352832366127]), (5, None, 1.0, True): array([0.1353352832366127, 0.36787944117144233, 1., 0.36787944117144233, 0.1353352832366127]), (5, 2, 0.2, False): array([4.53999297624848542e-05, 6.73794699908546700e-03, 1.00000000000000000e+00, 6.73794699908546700e-03, 4.53999297624848542e-05]), (5, 2, 0.2, True): None, (5, 2, 1.0, False): array([0.1353352832366127, 0.36787944117144233, 1., 0.36787944117144233, 0.1353352832366127]), (5, 2, 1.0, True): None } def test_exponential(): for k, v in exponential_data.items(): if v is None: assert_raises(ValueError, signal.exponential, *k) else: win = signal.exponential(*k) assert_allclose(win, v, rtol=1e-14) tukey_data = { (4, 0.5, True): array([0.0, 1.0, 1.0, 0.0]), (4, 0.9, True): array([0.0, 0.84312081893436686, 0.84312081893436686, 0.0]), (4, 1.0, True): array([0.0, 0.75, 0.75, 0.0]), (4, 0.5, False): array([0.0, 1.0, 1.0, 1.0]), (4, 0.9, False): array([0.0, 0.58682408883346526, 1.0, 0.58682408883346526]), (4, 1.0, False): array([0.0, 0.5, 1.0, 0.5]), (5, 0.0, True): array([1.0, 1.0, 1.0, 1.0, 1.0]), (5, 0.8, True): array([0.0, 0.69134171618254492, 1.0, 0.69134171618254492, 0.0]), (5, 1.0, True): array([0.0, 0.5, 1.0, 0.5, 0.0]), } def test_tukey(): # Test against hardcoded data for k, v in tukey_data.items(): if v is None: assert_raises(ValueError, signal.tukey, *k) else: win = signal.tukey(*k) assert_allclose(win, v, rtol=1e-14) # Test extremes of alpha correspond to boxcar and hann tuk0 = signal.tukey(100,0) tuk1 = signal.tukey(100,1) box0 = signal.boxcar(100) han1 = signal.hann(100) assert_array_almost_equal(tuk0, box0) assert_array_almost_equal(tuk1, han1) class TestGetWindow(object): def test_boxcar(self): w = signal.get_window('boxcar', 12) assert_array_equal(w, np.ones_like(w)) def test_cheb_odd(self): with warnings.catch_warnings(): warnings.simplefilter("ignore", UserWarning) w = signal.get_window(('chebwin', -40), 53, fftbins=False) assert_array_almost_equal(w, cheb_odd_true, decimal=4) def test_cheb_even(self): with warnings.catch_warnings(): warnings.simplefilter("ignore", UserWarning) w = signal.get_window(('chebwin', -40), 54, fftbins=False) assert_array_almost_equal(w, cheb_even_true, decimal=4) def test_array_as_window(self): # github issue 3603 osfactor = 128 sig = np.arange(128) win = signal.get_window(('kaiser', 8.0), osfactor // 2) assert_raises(ValueError, signal.resample, (sig, len(sig) * osfactor), {'window': win}) def test_windowfunc_basics(): for window_name, params in window_funcs: window = getattr(signal, window_name) with warnings.catch_warnings(record=True): # window is not suitable... w1 = window(7, *params, sym=True) w2 = window(7, *params, sym=False) assert_array_almost_equal(w1, w2) # just check the below runs window(6, *params, sym=True) window(6, *params, sym=False) def test_needs_params(): for winstr in ['kaiser', 'ksr', 'gaussian', 'gauss', 'gss', 'general gaussian', 'general_gaussian', 'general gauss', 'general_gauss', 'ggs', 'slepian', 'optimal', 'slep', 'dss', 'dpss', 'chebwin', 'cheb', 'exponential', 'poisson', 'tukey', 'tuk']: assert_raises(ValueError, signal.get_window, winstr, 7) if __name__ == "__main__": run_module_suite()
bsd-3-clause
jmartinm/InvenioAuthorLists
modules/websubmit/lib/functions/Move_to_Done.py
8
2441
## This file is part of Invenio. ## Copyright (C) 2004, 2005, 2006, 2007, 2008, 2010, 2011 CERN. ## ## Invenio is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 2 of the ## License, or (at your option) any later version. ## ## Invenio is distributed in the hope that it will be useful, but ## WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with Invenio; if not, write to the Free Software Foundation, Inc., ## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. __revision__ = "$Id$" ## Description: function Move_to_Done ## This function move the current working directory to the ## /done directory and compress it ## Author: T.Baron ## PARAMETERS: - import os import re import time from invenio.config import \ CFG_PATH_GZIP, \ CFG_PATH_TAR, \ CFG_WEBSUBMIT_STORAGEDIR from invenio.websubmit_config import InvenioWebSubmitFunctionError def Move_to_Done(parameters, curdir, form, user_info=None): """ This function moves the existing submission directory to the /opt/invenio/var/data/submit/storage/done directory. Then it tars and gzips the directory. """ global rn data = re.search(".*/([^/]*)/([^/]*)/[^/]*$",curdir) dir = data.group(1) doctype = data.group(2) DONEDIR = "%s/done/%s/%s" % (CFG_WEBSUBMIT_STORAGEDIR,dir,doctype) if not os.path.exists(DONEDIR): try: os.makedirs(DONEDIR) except: raise InvenioWebSubmitFunctionError("Cannot create done directory %s" % DONEDIR) # Moves the files to the done diectory and creates an archive rn = rn.replace("/","-") namedir = "%s_%s" % (rn,time.strftime("%Y%m%d%H%M%S")) FINALDIR = "%s/%s" % (DONEDIR,namedir) os.rename(curdir,FINALDIR) if CFG_PATH_TAR != "" and CFG_PATH_GZIP != "": os.chdir(DONEDIR) tar_txt = "%s -cf - %s > %s.tar" % (CFG_PATH_TAR,namedir,namedir) os.system(tar_txt) zip_txt = "%s %s.tar" % (CFG_PATH_GZIP,namedir) os.system(zip_txt) rm_txt = "rm -R %s" % namedir os.system(rm_txt) return ""
gpl-2.0
mancoast/CPythonPyc_test
fail/341_test_winsound.py
84
9070
# Ridiculously simple test of the winsound module for Windows. import unittest from test import support support.requires('audio') import time import os import subprocess winsound = support.import_module('winsound') ctypes = support.import_module('ctypes') import winreg def has_sound(sound): """Find out if a particular event is configured with a default sound""" try: # Ask the mixer API for the number of devices it knows about. # When there are no devices, PlaySound will fail. if ctypes.windll.winmm.mixerGetNumDevs() == 0: return False key = winreg.OpenKeyEx(winreg.HKEY_CURRENT_USER, "AppEvents\Schemes\Apps\.Default\{0}\.Default".format(sound)) return winreg.EnumValue(key, 0)[1] != "" except OSError: return False class BeepTest(unittest.TestCase): # As with PlaySoundTest, incorporate the _have_soundcard() check # into our test methods. If there's no audio device present, # winsound.Beep returns 0 and GetLastError() returns 127, which # is: ERROR_PROC_NOT_FOUND ("The specified procedure could not # be found"). (FWIW, virtual/Hyper-V systems fall under this # scenario as they have no sound devices whatsoever (not even # a legacy Beep device).) def test_errors(self): self.assertRaises(TypeError, winsound.Beep) self.assertRaises(ValueError, winsound.Beep, 36, 75) self.assertRaises(ValueError, winsound.Beep, 32768, 75) def test_extremes(self): self._beep(37, 75) self._beep(32767, 75) def test_increasingfrequency(self): for i in range(100, 2000, 100): self._beep(i, 75) def _beep(self, *args): # these tests used to use _have_soundcard(), but it's quite # possible to have a soundcard, and yet have the beep driver # disabled. So basically, we have no way of knowing whether # a beep should be produced or not, so currently if these # tests fail we're ignoring them # # XXX the right fix for this is to define something like # _have_enabled_beep_driver() and use that instead of the # try/except below try: winsound.Beep(*args) except RuntimeError: pass class MessageBeepTest(unittest.TestCase): def tearDown(self): time.sleep(0.5) def test_default(self): self.assertRaises(TypeError, winsound.MessageBeep, "bad") self.assertRaises(TypeError, winsound.MessageBeep, 42, 42) winsound.MessageBeep() def test_ok(self): winsound.MessageBeep(winsound.MB_OK) def test_asterisk(self): winsound.MessageBeep(winsound.MB_ICONASTERISK) def test_exclamation(self): winsound.MessageBeep(winsound.MB_ICONEXCLAMATION) def test_hand(self): winsound.MessageBeep(winsound.MB_ICONHAND) def test_question(self): winsound.MessageBeep(winsound.MB_ICONQUESTION) class PlaySoundTest(unittest.TestCase): def test_errors(self): self.assertRaises(TypeError, winsound.PlaySound) self.assertRaises(TypeError, winsound.PlaySound, "bad", "bad") self.assertRaises( RuntimeError, winsound.PlaySound, "none", winsound.SND_ASYNC | winsound.SND_MEMORY ) @unittest.skipUnless(has_sound("SystemAsterisk"), "No default SystemAsterisk") def test_alias_asterisk(self): if _have_soundcard(): winsound.PlaySound('SystemAsterisk', winsound.SND_ALIAS) else: self.assertRaises( RuntimeError, winsound.PlaySound, 'SystemAsterisk', winsound.SND_ALIAS ) @unittest.skipUnless(has_sound("SystemExclamation"), "No default SystemExclamation") def test_alias_exclamation(self): if _have_soundcard(): winsound.PlaySound('SystemExclamation', winsound.SND_ALIAS) else: self.assertRaises( RuntimeError, winsound.PlaySound, 'SystemExclamation', winsound.SND_ALIAS ) @unittest.skipUnless(has_sound("SystemExit"), "No default SystemExit") def test_alias_exit(self): if _have_soundcard(): winsound.PlaySound('SystemExit', winsound.SND_ALIAS) else: self.assertRaises( RuntimeError, winsound.PlaySound, 'SystemExit', winsound.SND_ALIAS ) @unittest.skipUnless(has_sound("SystemHand"), "No default SystemHand") def test_alias_hand(self): if _have_soundcard(): winsound.PlaySound('SystemHand', winsound.SND_ALIAS) else: self.assertRaises( RuntimeError, winsound.PlaySound, 'SystemHand', winsound.SND_ALIAS ) @unittest.skipUnless(has_sound("SystemQuestion"), "No default SystemQuestion") def test_alias_question(self): if _have_soundcard(): winsound.PlaySound('SystemQuestion', winsound.SND_ALIAS) else: self.assertRaises( RuntimeError, winsound.PlaySound, 'SystemQuestion', winsound.SND_ALIAS ) def test_alias_fallback(self): # In the absense of the ability to tell if a sound was actually # played, this test has two acceptable outcomes: success (no error, # sound was theoretically played; although as issue #19987 shows # a box without a soundcard can "succeed") or RuntimeError. Any # other error is a failure. try: winsound.PlaySound('!"$%&/(#+*', winsound.SND_ALIAS) except RuntimeError: pass def test_alias_nofallback(self): if _have_soundcard(): # Note that this is not the same as asserting RuntimeError # will get raised: you cannot convert this to # self.assertRaises(...) form. The attempt may or may not # raise RuntimeError, but it shouldn't raise anything other # than RuntimeError, and that's all we're trying to test # here. The MS docs aren't clear about whether the SDK # PlaySound() with SND_ALIAS and SND_NODEFAULT will return # True or False when the alias is unknown. On Tim's WinXP # box today, it returns True (no exception is raised). What # we'd really like to test is that no sound is played, but # that requires first wiring an eardrum class into unittest # <wink>. try: winsound.PlaySound( '!"$%&/(#+*', winsound.SND_ALIAS | winsound.SND_NODEFAULT ) except RuntimeError: pass else: self.assertRaises( RuntimeError, winsound.PlaySound, '!"$%&/(#+*', winsound.SND_ALIAS | winsound.SND_NODEFAULT ) def test_stopasync(self): if _have_soundcard(): winsound.PlaySound( 'SystemQuestion', winsound.SND_ALIAS | winsound.SND_ASYNC | winsound.SND_LOOP ) time.sleep(0.5) try: winsound.PlaySound( 'SystemQuestion', winsound.SND_ALIAS | winsound.SND_NOSTOP ) except RuntimeError: pass else: # the first sound might already be finished pass winsound.PlaySound(None, winsound.SND_PURGE) else: # Issue 8367: PlaySound(None, winsound.SND_PURGE) # does not raise on systems without a sound card. pass def _get_cscript_path(): """Return the full path to cscript.exe or None.""" for dir in os.environ.get("PATH", "").split(os.pathsep): cscript_path = os.path.join(dir, "cscript.exe") if os.path.exists(cscript_path): return cscript_path __have_soundcard_cache = None def _have_soundcard(): """Return True iff this computer has a soundcard.""" global __have_soundcard_cache if __have_soundcard_cache is None: cscript_path = _get_cscript_path() if cscript_path is None: # Could not find cscript.exe to run our VBScript helper. Default # to True: most computers these days *do* have a soundcard. return True check_script = os.path.join(os.path.dirname(__file__), "check_soundcard.vbs") p = subprocess.Popen([cscript_path, check_script], stdout=subprocess.PIPE) __have_soundcard_cache = not p.wait() p.stdout.close() return __have_soundcard_cache def test_main(): support.run_unittest(BeepTest, MessageBeepTest, PlaySoundTest) if __name__=="__main__": test_main()
gpl-3.0
nunogt/tempest
tempest/api/compute/servers/test_list_server_filters.py
6
14862
# Copyright 2012 OpenStack Foundation # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from tempest_lib import exceptions as lib_exc from tempest.api.compute import base from tempest.api import utils from tempest.common import fixed_network from tempest.common.utils import data_utils from tempest.common import waiters from tempest import config from tempest import test CONF = config.CONF class ListServerFiltersTestJSON(base.BaseV2ComputeTest): @classmethod def setup_credentials(cls): cls.set_network_resources(network=True, subnet=True, dhcp=True) super(ListServerFiltersTestJSON, cls).setup_credentials() @classmethod def setup_clients(cls): super(ListServerFiltersTestJSON, cls).setup_clients() cls.client = cls.servers_client @classmethod def resource_setup(cls): super(ListServerFiltersTestJSON, cls).resource_setup() # Check to see if the alternate image ref actually exists... images_client = cls.images_client images = images_client.list_images() if cls.image_ref != cls.image_ref_alt and \ any([image for image in images if image['id'] == cls.image_ref_alt]): cls.multiple_images = True else: cls.image_ref_alt = cls.image_ref # Do some sanity checks here. If one of the images does # not exist, fail early since the tests won't work... try: cls.images_client.show_image(cls.image_ref) except lib_exc.NotFound: raise RuntimeError("Image %s (image_ref) was not found!" % cls.image_ref) try: cls.images_client.show_image(cls.image_ref_alt) except lib_exc.NotFound: raise RuntimeError("Image %s (image_ref_alt) was not found!" % cls.image_ref_alt) network = cls.get_tenant_network() if network: cls.fixed_network_name = network.get('name') else: cls.fixed_network_name = None network_kwargs = fixed_network.set_networks_kwarg(network) cls.s1_name = data_utils.rand_name(cls.__name__ + '-instance') cls.s1 = cls.create_test_server(name=cls.s1_name, wait_until='ACTIVE', **network_kwargs) cls.s2_name = data_utils.rand_name(cls.__name__ + '-instance') cls.s2 = cls.create_test_server(name=cls.s2_name, image_id=cls.image_ref_alt, wait_until='ACTIVE') cls.s3_name = data_utils.rand_name(cls.__name__ + '-instance') cls.s3 = cls.create_test_server(name=cls.s3_name, flavor=cls.flavor_ref_alt, wait_until='ACTIVE') @test.idempotent_id('05e8a8e7-9659-459a-989d-92c2f501f4ba') @utils.skip_unless_attr('multiple_images', 'Only one image found') def test_list_servers_filter_by_image(self): # Filter the list of servers by image params = {'image': self.image_ref} body = self.client.list_servers(**params) servers = body['servers'] self.assertIn(self.s1['id'], map(lambda x: x['id'], servers)) self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers)) self.assertIn(self.s3['id'], map(lambda x: x['id'], servers)) @test.idempotent_id('573637f5-7325-47bb-9144-3476d0416908') def test_list_servers_filter_by_flavor(self): # Filter the list of servers by flavor params = {'flavor': self.flavor_ref_alt} body = self.client.list_servers(**params) servers = body['servers'] self.assertNotIn(self.s1['id'], map(lambda x: x['id'], servers)) self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers)) self.assertIn(self.s3['id'], map(lambda x: x['id'], servers)) @test.idempotent_id('9b067a7b-7fee-4f6a-b29c-be43fe18fc5a') def test_list_servers_filter_by_server_name(self): # Filter the list of servers by server name params = {'name': self.s1_name} body = self.client.list_servers(**params) servers = body['servers'] self.assertIn(self.s1_name, map(lambda x: x['name'], servers)) self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers)) self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers)) @test.idempotent_id('ca78e20e-fddb-4ce6-b7f7-bcbf8605e66e') def test_list_servers_filter_by_server_status(self): # Filter the list of servers by server status params = {'status': 'active'} body = self.client.list_servers(**params) servers = body['servers'] self.assertIn(self.s1['id'], map(lambda x: x['id'], servers)) self.assertIn(self.s2['id'], map(lambda x: x['id'], servers)) self.assertIn(self.s3['id'], map(lambda x: x['id'], servers)) @test.idempotent_id('451dbbb2-f330-4a9f-b0e1-5f5d2cb0f34c') def test_list_servers_filter_by_shutoff_status(self): # Filter the list of servers by server shutoff status params = {'status': 'shutoff'} self.client.stop(self.s1['id']) waiters.wait_for_server_status(self.client, self.s1['id'], 'SHUTOFF') body = self.client.list_servers(**params) self.client.start(self.s1['id']) waiters.wait_for_server_status(self.client, self.s1['id'], 'ACTIVE') servers = body['servers'] self.assertIn(self.s1['id'], map(lambda x: x['id'], servers)) self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers)) self.assertNotIn(self.s3['id'], map(lambda x: x['id'], servers)) @test.idempotent_id('614cdfc1-d557-4bac-915b-3e67b48eee76') def test_list_servers_filter_by_limit(self): # Verify only the expected number of servers are returned params = {'limit': 1} servers = self.client.list_servers(**params) self.assertEqual(1, len([x for x in servers['servers'] if 'id' in x])) @test.idempotent_id('b1495414-2d93-414c-8019-849afe8d319e') def test_list_servers_filter_by_zero_limit(self): # Verify only the expected number of servers are returned params = {'limit': 0} servers = self.client.list_servers(**params) self.assertEqual(0, len(servers['servers'])) @test.idempotent_id('37791bbd-90c0-4de0-831e-5f38cba9c6b3') def test_list_servers_filter_by_exceed_limit(self): # Verify only the expected number of servers are returned params = {'limit': 100000} servers = self.client.list_servers(**params) all_servers = self.client.list_servers() self.assertEqual(len([x for x in all_servers['servers'] if 'id' in x]), len([x for x in servers['servers'] if 'id' in x])) @test.idempotent_id('b3304c3b-97df-46d2-8cd3-e2b6659724e7') @utils.skip_unless_attr('multiple_images', 'Only one image found') def test_list_servers_detailed_filter_by_image(self): # Filter the detailed list of servers by image params = {'image': self.image_ref} body = self.client.list_servers(detail=True, **params) servers = body['servers'] self.assertIn(self.s1['id'], map(lambda x: x['id'], servers)) self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers)) self.assertIn(self.s3['id'], map(lambda x: x['id'], servers)) @test.idempotent_id('80c574cc-0925-44ba-8602-299028357dd9') def test_list_servers_detailed_filter_by_flavor(self): # Filter the detailed list of servers by flavor params = {'flavor': self.flavor_ref_alt} body = self.client.list_servers(detail=True, **params) servers = body['servers'] self.assertNotIn(self.s1['id'], map(lambda x: x['id'], servers)) self.assertNotIn(self.s2['id'], map(lambda x: x['id'], servers)) self.assertIn(self.s3['id'], map(lambda x: x['id'], servers)) @test.idempotent_id('f9eb2b70-735f-416c-b260-9914ac6181e4') def test_list_servers_detailed_filter_by_server_name(self): # Filter the detailed list of servers by server name params = {'name': self.s1_name} body = self.client.list_servers(detail=True, **params) servers = body['servers'] self.assertIn(self.s1_name, map(lambda x: x['name'], servers)) self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers)) self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers)) @test.idempotent_id('de2612ab-b7dd-4044-b0b1-d2539601911f') def test_list_servers_detailed_filter_by_server_status(self): # Filter the detailed list of servers by server status params = {'status': 'active'} body = self.client.list_servers(detail=True, **params) servers = body['servers'] test_ids = [s['id'] for s in (self.s1, self.s2, self.s3)] self.assertIn(self.s1['id'], map(lambda x: x['id'], servers)) self.assertIn(self.s2['id'], map(lambda x: x['id'], servers)) self.assertIn(self.s3['id'], map(lambda x: x['id'], servers)) self.assertEqual(['ACTIVE'] * 3, [x['status'] for x in servers if x['id'] in test_ids]) @test.idempotent_id('e9f624ee-92af-4562-8bec-437945a18dcb') def test_list_servers_filtered_by_name_wildcard(self): # List all servers that contains '-instance' in name params = {'name': '-instance'} body = self.client.list_servers(**params) servers = body['servers'] self.assertIn(self.s1_name, map(lambda x: x['name'], servers)) self.assertIn(self.s2_name, map(lambda x: x['name'], servers)) self.assertIn(self.s3_name, map(lambda x: x['name'], servers)) # Let's take random part of name and try to search it part_name = self.s1_name[6:-1] params = {'name': part_name} body = self.client.list_servers(**params) servers = body['servers'] self.assertIn(self.s1_name, map(lambda x: x['name'], servers)) self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers)) self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers)) @test.idempotent_id('24a89b0c-0d55-4a28-847f-45075f19b27b') def test_list_servers_filtered_by_name_regex(self): # list of regex that should match s1, s2 and s3 regexes = ['^.*\-instance\-[0-9]+$', '^.*\-instance\-.*$'] for regex in regexes: params = {'name': regex} body = self.client.list_servers(**params) servers = body['servers'] self.assertIn(self.s1_name, map(lambda x: x['name'], servers)) self.assertIn(self.s2_name, map(lambda x: x['name'], servers)) self.assertIn(self.s3_name, map(lambda x: x['name'], servers)) # Let's take random part of name and try to search it part_name = self.s1_name[-10:] params = {'name': part_name} body = self.client.list_servers(**params) servers = body['servers'] self.assertIn(self.s1_name, map(lambda x: x['name'], servers)) self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers)) self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers)) @test.idempotent_id('43a1242e-7b31-48d1-88f2-3f72aa9f2077') def test_list_servers_filtered_by_ip(self): # Filter servers by ip # Here should be listed 1 server if not self.fixed_network_name: msg = 'fixed_network_name needs to be configured to run this test' raise self.skipException(msg) self.s1 = self.client.show_server(self.s1['id']) for addr_spec in self.s1['addresses'][self.fixed_network_name]: ip = addr_spec['addr'] if addr_spec['version'] == 4: params = {'ip': ip} break else: msg = "Skipped until bug 1450859 is resolved" raise self.skipException(msg) body = self.client.list_servers(**params) servers = body['servers'] self.assertIn(self.s1_name, map(lambda x: x['name'], servers)) self.assertNotIn(self.s2_name, map(lambda x: x['name'], servers)) self.assertNotIn(self.s3_name, map(lambda x: x['name'], servers)) @test.idempotent_id('a905e287-c35e-42f2-b132-d02b09f3654a') def test_list_servers_filtered_by_ip_regex(self): # Filter servers by regex ip # List all servers filtered by part of ip address. # Here should be listed all servers if not self.fixed_network_name: msg = 'fixed_network_name needs to be configured to run this test' raise self.skipException(msg) self.s1 = self.client.show_server(self.s1['id']) addr_spec = self.s1['addresses'][self.fixed_network_name][0] ip = addr_spec['addr'][0:-3] if addr_spec['version'] == 4: params = {'ip': ip} else: params = {'ip6': ip} # capture all servers in case something goes wrong all_servers = self.client.list_servers(detail=True) body = self.client.list_servers(**params) servers = body['servers'] self.assertIn(self.s1_name, map(lambda x: x['name'], servers), "%s not found in %s, all servers %s" % (self.s1_name, servers, all_servers)) self.assertIn(self.s2_name, map(lambda x: x['name'], servers), "%s not found in %s, all servers %s" % (self.s2_name, servers, all_servers)) self.assertIn(self.s3_name, map(lambda x: x['name'], servers), "%s not found in %s, all servers %s" % (self.s3_name, servers, all_servers)) @test.idempotent_id('67aec2d0-35fe-4503-9f92-f13272b867ed') def test_list_servers_detailed_limit_results(self): # Verify only the expected number of detailed results are returned params = {'limit': 1} servers = self.client.list_servers(detail=True, **params) self.assertEqual(1, len(servers['servers']))
apache-2.0
noam09/deluge-telegramer
telegramer/include/future/utils/__init__.py
8
20325
""" A selection of cross-compatible functions for Python 2 and 3. This module exports useful functions for 2/3 compatible code: * bind_method: binds functions to classes * ``native_str_to_bytes`` and ``bytes_to_native_str`` * ``native_str``: always equal to the native platform string object (because this may be shadowed by imports from future.builtins) * lists: lrange(), lmap(), lzip(), lfilter() * iterable method compatibility: - iteritems, iterkeys, itervalues - viewitems, viewkeys, viewvalues These use the original method if available, otherwise they use items, keys, values. * types: * text_type: unicode in Python 2, str in Python 3 * binary_type: str in Python 2, bytes in Python 3 * string_types: basestring in Python 2, str in Python 3 * bchr(c): Take an integer and make a 1-character byte string * bord(c) Take the result of indexing on a byte string and make an integer * tobytes(s) Take a text string, a byte string, or a sequence of characters taken from a byte string, and make a byte string. * raise_from() * raise_with_traceback() This module also defines these decorators: * ``python_2_unicode_compatible`` * ``with_metaclass`` * ``implements_iterator`` Some of the functions in this module come from the following sources: * Jinja2 (BSD licensed: see https://github.com/mitsuhiko/jinja2/blob/master/LICENSE) * Pandas compatibility module pandas.compat * six.py by Benjamin Peterson * Django """ import types import sys import numbers import functools import copy import inspect PY3 = sys.version_info[0] == 3 PY35_PLUS = sys.version_info[0:2] >= (3, 5) PY36_PLUS = sys.version_info[0:2] >= (3, 6) PY2 = sys.version_info[0] == 2 PY26 = sys.version_info[0:2] == (2, 6) PY27 = sys.version_info[0:2] == (2, 7) PYPY = hasattr(sys, 'pypy_translation_info') def python_2_unicode_compatible(cls): """ A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3, this decorator is a no-op. To support Python 2 and 3 with a single code base, define a __str__ method returning unicode text and apply this decorator to the class, like this:: >>> from future.utils import python_2_unicode_compatible >>> @python_2_unicode_compatible ... class MyClass(object): ... def __str__(self): ... return u'Unicode string: \u5b54\u5b50' >>> a = MyClass() Then, after this import: >>> from future.builtins import str the following is ``True`` on both Python 3 and 2:: >>> str(a) == a.encode('utf-8').decode('utf-8') True and, on a Unicode-enabled terminal with the right fonts, these both print the Chinese characters for Confucius:: >>> print(a) >>> print(str(a)) The implementation comes from django.utils.encoding. """ if not PY3: cls.__unicode__ = cls.__str__ cls.__str__ = lambda self: self.__unicode__().encode('utf-8') return cls def with_metaclass(meta, *bases): """ Function from jinja2/_compat.py. License: BSD. Use it like this:: class BaseForm(object): pass class FormType(type): pass class Form(with_metaclass(FormType, BaseForm)): pass This requires a bit of explanation: the basic idea is to make a dummy metaclass for one level of class instantiation that replaces itself with the actual metaclass. Because of internal type checks we also need to make sure that we downgrade the custom metaclass for one level to something closer to type (that's why __call__ and __init__ comes back from type etc.). This has the advantage over six.with_metaclass of not introducing dummy classes into the final MRO. """ class metaclass(meta): __call__ = type.__call__ __init__ = type.__init__ def __new__(cls, name, this_bases, d): if this_bases is None: return type.__new__(cls, name, (), d) return meta(name, bases, d) return metaclass('temporary_class', None, {}) # Definitions from pandas.compat and six.py follow: if PY3: def bchr(s): return bytes([s]) def bstr(s): if isinstance(s, str): return bytes(s, 'latin-1') else: return bytes(s) def bord(s): return s string_types = str, integer_types = int, class_types = type, text_type = str binary_type = bytes else: # Python 2 def bchr(s): return chr(s) def bstr(s): return str(s) def bord(s): return ord(s) string_types = basestring, integer_types = (int, long) class_types = (type, types.ClassType) text_type = unicode binary_type = str ### if PY3: def tobytes(s): if isinstance(s, bytes): return s else: if isinstance(s, str): return s.encode('latin-1') else: return bytes(s) else: # Python 2 def tobytes(s): if isinstance(s, unicode): return s.encode('latin-1') else: return ''.join(s) tobytes.__doc__ = """ Encodes to latin-1 (where the first 256 chars are the same as ASCII.) """ if PY3: def native_str_to_bytes(s, encoding='utf-8'): return s.encode(encoding) def bytes_to_native_str(b, encoding='utf-8'): return b.decode(encoding) def text_to_native_str(t, encoding=None): return t else: # Python 2 def native_str_to_bytes(s, encoding=None): from future.types import newbytes # to avoid a circular import return newbytes(s) def bytes_to_native_str(b, encoding=None): return native(b) def text_to_native_str(t, encoding='ascii'): """ Use this to create a Py2 native string when "from __future__ import unicode_literals" is in effect. """ return unicode(t).encode(encoding) native_str_to_bytes.__doc__ = """ On Py3, returns an encoded string. On Py2, returns a newbytes type, ignoring the ``encoding`` argument. """ if PY3: # list-producing versions of the major Python iterating functions def lrange(*args, **kwargs): return list(range(*args, **kwargs)) def lzip(*args, **kwargs): return list(zip(*args, **kwargs)) def lmap(*args, **kwargs): return list(map(*args, **kwargs)) def lfilter(*args, **kwargs): return list(filter(*args, **kwargs)) else: import __builtin__ # Python 2-builtin ranges produce lists lrange = __builtin__.range lzip = __builtin__.zip lmap = __builtin__.map lfilter = __builtin__.filter def isidentifier(s, dotted=False): ''' A function equivalent to the str.isidentifier method on Py3 ''' if dotted: return all(isidentifier(a) for a in s.split('.')) if PY3: return s.isidentifier() else: import re _name_re = re.compile(r"[a-zA-Z_][a-zA-Z0-9_]*$") return bool(_name_re.match(s)) def viewitems(obj, **kwargs): """ Function for iterating over dictionary items with the same set-like behaviour on Py2.7 as on Py3. Passes kwargs to method.""" func = getattr(obj, "viewitems", None) if not func: func = obj.items return func(**kwargs) def viewkeys(obj, **kwargs): """ Function for iterating over dictionary keys with the same set-like behaviour on Py2.7 as on Py3. Passes kwargs to method.""" func = getattr(obj, "viewkeys", None) if not func: func = obj.keys return func(**kwargs) def viewvalues(obj, **kwargs): """ Function for iterating over dictionary values with the same set-like behaviour on Py2.7 as on Py3. Passes kwargs to method.""" func = getattr(obj, "viewvalues", None) if not func: func = obj.values return func(**kwargs) def iteritems(obj, **kwargs): """Use this only if compatibility with Python versions before 2.7 is required. Otherwise, prefer viewitems(). """ func = getattr(obj, "iteritems", None) if not func: func = obj.items return func(**kwargs) def iterkeys(obj, **kwargs): """Use this only if compatibility with Python versions before 2.7 is required. Otherwise, prefer viewkeys(). """ func = getattr(obj, "iterkeys", None) if not func: func = obj.keys return func(**kwargs) def itervalues(obj, **kwargs): """Use this only if compatibility with Python versions before 2.7 is required. Otherwise, prefer viewvalues(). """ func = getattr(obj, "itervalues", None) if not func: func = obj.values return func(**kwargs) def bind_method(cls, name, func): """Bind a method to class, python 2 and python 3 compatible. Parameters ---------- cls : type class to receive bound method name : basestring name of method on class instance func : function function to be bound as method Returns ------- None """ # only python 2 has an issue with bound/unbound methods if not PY3: setattr(cls, name, types.MethodType(func, None, cls)) else: setattr(cls, name, func) def getexception(): return sys.exc_info()[1] def _get_caller_globals_and_locals(): """ Returns the globals and locals of the calling frame. Is there an alternative to frame hacking here? """ caller_frame = inspect.stack()[2] myglobals = caller_frame[0].f_globals mylocals = caller_frame[0].f_locals return myglobals, mylocals def _repr_strip(mystring): """ Returns the string without any initial or final quotes. """ r = repr(mystring) if r.startswith("'") and r.endswith("'"): return r[1:-1] else: return r if PY3: def raise_from(exc, cause): """ Equivalent to: raise EXCEPTION from CAUSE on Python 3. (See PEP 3134). """ myglobals, mylocals = _get_caller_globals_and_locals() # We pass the exception and cause along with other globals # when we exec(): myglobals = myglobals.copy() myglobals['__python_future_raise_from_exc'] = exc myglobals['__python_future_raise_from_cause'] = cause execstr = "raise __python_future_raise_from_exc from __python_future_raise_from_cause" exec(execstr, myglobals, mylocals) def raise_(tp, value=None, tb=None): """ A function that matches the Python 2.x ``raise`` statement. This allows re-raising exceptions with the cls value and traceback on Python 2 and 3. """ if value is not None and isinstance(tp, Exception): raise TypeError("instance exception may not have a separate value") if value is not None: exc = tp(value) else: exc = tp if exc.__traceback__ is not tb: raise exc.with_traceback(tb) raise exc def raise_with_traceback(exc, traceback=Ellipsis): if traceback == Ellipsis: _, _, traceback = sys.exc_info() raise exc.with_traceback(traceback) else: def raise_from(exc, cause): """ Equivalent to: raise EXCEPTION from CAUSE on Python 3. (See PEP 3134). """ # Is either arg an exception class (e.g. IndexError) rather than # instance (e.g. IndexError('my message here')? If so, pass the # name of the class undisturbed through to "raise ... from ...". if isinstance(exc, type) and issubclass(exc, Exception): e = exc() # exc = exc.__name__ # execstr = "e = " + _repr_strip(exc) + "()" # myglobals, mylocals = _get_caller_globals_and_locals() # exec(execstr, myglobals, mylocals) else: e = exc e.__suppress_context__ = False if isinstance(cause, type) and issubclass(cause, Exception): e.__cause__ = cause() e.__suppress_context__ = True elif cause is None: e.__cause__ = None e.__suppress_context__ = True elif isinstance(cause, BaseException): e.__cause__ = cause e.__suppress_context__ = True else: raise TypeError("exception causes must derive from BaseException") e.__context__ = sys.exc_info()[1] raise e exec(''' def raise_(tp, value=None, tb=None): raise tp, value, tb def raise_with_traceback(exc, traceback=Ellipsis): if traceback == Ellipsis: _, _, traceback = sys.exc_info() raise exc, None, traceback '''.strip()) raise_with_traceback.__doc__ = ( """Raise exception with existing traceback. If traceback is not passed, uses sys.exc_info() to get traceback.""" ) # Deprecated alias for backward compatibility with ``future`` versions < 0.11: reraise = raise_ def implements_iterator(cls): ''' From jinja2/_compat.py. License: BSD. Use as a decorator like this:: @implements_iterator class UppercasingIterator(object): def __init__(self, iterable): self._iter = iter(iterable) def __iter__(self): return self def __next__(self): return next(self._iter).upper() ''' if PY3: return cls else: cls.next = cls.__next__ del cls.__next__ return cls if PY3: get_next = lambda x: x.next else: get_next = lambda x: x.__next__ def encode_filename(filename): if PY3: return filename else: if isinstance(filename, unicode): return filename.encode('utf-8') return filename def is_new_style(cls): """ Python 2.7 has both new-style and old-style classes. Old-style classes can be pesky in some circumstances, such as when using inheritance. Use this function to test for whether a class is new-style. (Python 3 only has new-style classes.) """ return hasattr(cls, '__class__') and ('__dict__' in dir(cls) or hasattr(cls, '__slots__')) # The native platform string and bytes types. Useful because ``str`` and # ``bytes`` are redefined on Py2 by ``from future.builtins import *``. native_str = str native_bytes = bytes def istext(obj): """ Deprecated. Use:: >>> isinstance(obj, str) after this import: >>> from future.builtins import str """ return isinstance(obj, type(u'')) def isbytes(obj): """ Deprecated. Use:: >>> isinstance(obj, bytes) after this import: >>> from future.builtins import bytes """ return isinstance(obj, type(b'')) def isnewbytes(obj): """ Equivalent to the result of ``isinstance(obj, newbytes)`` were ``__instancecheck__`` not overridden on the newbytes subclass. In other words, it is REALLY a newbytes instance, not a Py2 native str object? """ # TODO: generalize this so that it works with subclasses of newbytes # Import is here to avoid circular imports: from future.types.newbytes import newbytes return type(obj) == newbytes def isint(obj): """ Deprecated. Tests whether an object is a Py3 ``int`` or either a Py2 ``int`` or ``long``. Instead of using this function, you can use: >>> from future.builtins import int >>> isinstance(obj, int) The following idiom is equivalent: >>> from numbers import Integral >>> isinstance(obj, Integral) """ return isinstance(obj, numbers.Integral) def native(obj): """ On Py3, this is a no-op: native(obj) -> obj On Py2, returns the corresponding native Py2 types that are superclasses for backported objects from Py3: >>> from builtins import str, bytes, int >>> native(str(u'ABC')) u'ABC' >>> type(native(str(u'ABC'))) unicode >>> native(bytes(b'ABC')) b'ABC' >>> type(native(bytes(b'ABC'))) bytes >>> native(int(10**20)) 100000000000000000000L >>> type(native(int(10**20))) long Existing native types on Py2 will be returned unchanged: >>> type(native(u'ABC')) unicode """ if hasattr(obj, '__native__'): return obj.__native__() else: return obj # Implementation of exec_ is from ``six``: if PY3: import builtins exec_ = getattr(builtins, "exec") else: def exec_(code, globs=None, locs=None): """Execute code in a namespace.""" if globs is None: frame = sys._getframe(1) globs = frame.f_globals if locs is None: locs = frame.f_locals del frame elif locs is None: locs = globs exec("""exec code in globs, locs""") # Defined here for backward compatibility: def old_div(a, b): """ DEPRECATED: import ``old_div`` from ``past.utils`` instead. Equivalent to ``a / b`` on Python 2 without ``from __future__ import division``. TODO: generalize this to other objects (like arrays etc.) """ if isinstance(a, numbers.Integral) and isinstance(b, numbers.Integral): return a // b else: return a / b def as_native_str(encoding='utf-8'): ''' A decorator to turn a function or method call that returns text, i.e. unicode, into one that returns a native platform str. Use it as a decorator like this:: from __future__ import unicode_literals class MyClass(object): @as_native_str(encoding='ascii') def __repr__(self): return next(self._iter).upper() ''' if PY3: return lambda f: f else: def encoder(f): @functools.wraps(f) def wrapper(*args, **kwargs): return f(*args, **kwargs).encode(encoding=encoding) return wrapper return encoder # listvalues and listitems definitions from Nick Coghlan's (withdrawn) # PEP 496: try: dict.iteritems except AttributeError: # Python 3 def listvalues(d): return list(d.values()) def listitems(d): return list(d.items()) else: # Python 2 def listvalues(d): return d.values() def listitems(d): return d.items() if PY3: def ensure_new_type(obj): return obj else: def ensure_new_type(obj): from future.types.newbytes import newbytes from future.types.newstr import newstr from future.types.newint import newint from future.types.newdict import newdict native_type = type(native(obj)) # Upcast only if the type is already a native (non-future) type if issubclass(native_type, type(obj)): # Upcast if native_type == str: # i.e. Py2 8-bit str return newbytes(obj) elif native_type == unicode: return newstr(obj) elif native_type == int: return newint(obj) elif native_type == long: return newint(obj) elif native_type == dict: return newdict(obj) else: return obj else: # Already a new type assert type(obj) in [newbytes, newstr] return obj __all__ = ['PY2', 'PY26', 'PY3', 'PYPY', 'as_native_str', 'bind_method', 'bord', 'bstr', 'bytes_to_native_str', 'encode_filename', 'ensure_new_type', 'exec_', 'get_next', 'getexception', 'implements_iterator', 'is_new_style', 'isbytes', 'isidentifier', 'isint', 'isnewbytes', 'istext', 'iteritems', 'iterkeys', 'itervalues', 'lfilter', 'listitems', 'listvalues', 'lmap', 'lrange', 'lzip', 'native', 'native_bytes', 'native_str', 'native_str_to_bytes', 'old_div', 'python_2_unicode_compatible', 'raise_', 'raise_with_traceback', 'reraise', 'text_to_native_str', 'tobytes', 'viewitems', 'viewkeys', 'viewvalues', 'with_metaclass' ]
gpl-3.0
deschler/django-filebrowser
filebrowser/signals.py
21
1706
# coding: utf-8 # DJANGO IMPORTS from django.dispatch import Signal # upload signals # path: Absolute server path to the file/folder # name: Name of the file/folder # site: Current FileBrowserSite instance filebrowser_pre_upload = Signal(providing_args=["path", "file", "site"]) filebrowser_post_upload = Signal(providing_args=["path", "file", "site"]) # mkdir signals # path: Absolute server path to the file/folder # name: Name of the file/folder # site: Current FileBrowserSite instance filebrowser_pre_createdir = Signal(providing_args=["path", "name", "site"]) filebrowser_post_createdir = Signal(providing_args=["path", "name", "site"]) # delete signals # path: Absolute server path to the file/folder # name: Name of the file/folder # site: Current FileBrowserSite instance filebrowser_pre_delete = Signal(providing_args=["path", "name", "site"]) filebrowser_post_delete = Signal(providing_args=["path", "name", "site"]) # rename signals # path: Absolute server path to the file/folder # name: Name of the file/folder # site: Current FileBrowserSite instance # new_name: New name of the file/folder filebrowser_pre_rename = Signal(providing_args=["path", "name", "new_name", "site"]) filebrowser_post_rename = Signal(providing_args=["path", "name", "new_name", "site"]) # action signals # action_name: Name of the custom action # fileobjects: A list of fileobjects the action will be applied to # site: Current FileBrowserSite instance # result: The response you defined with your custom action filebrowser_actions_pre_apply = Signal(providing_args=['action_name', 'fileobjects', 'site']) filebrowser_actions_post_apply = Signal(providing_args=['action_name', 'filebjects', 'result', 'site'])
bsd-3-clause
kracwarlock/convnet
py/test_conv.py
2
18678
import sys from convnet import * import numpy as np import conv_cpu test_gemm = True def DivUp(a, b): return (a + b - 1) / b def TestConvUp(images_shape, conv_desc): filters_shape = (conv_desc.num_output_channels, conv_desc.kernel_size_x, conv_desc.kernel_size_y, conv_desc.num_input_channels) output_shape = cm.GetOutputShape4D(images_shape, conv_desc) images = np.random.randn(images_shape[0], images_shape[1] * images_shape[2] * images_shape[3]).astype(np.float32) filters = np.random.randn(filters_shape[0], filters_shape[1] * filters_shape[2] * filters_shape[3]).astype(np.float32) images_gpu = cm.CUDAMatrix(images, shape=images_shape) filters_gpu = cm.CUDAMatrix(filters, shape=filters_shape) output_gpu = cm.empty(output_shape) if test_gemm: cc_gemm.convUp(images_gpu, filters_gpu, output_gpu, conv_desc) else: cc.convUp(images_gpu, filters_gpu, output_gpu, conv_desc) output_cpu = conv_cpu.ConvUp(images, filters, images_shape, cm.GetConvDescTuple(conv_desc)) diff = Diff(output_cpu, output_gpu.asarray()) images_gpu.free_device_memory() filters_gpu.free_device_memory() output_gpu.free_device_memory() return diff def TestConvDown(images_shape, conv_desc): deriv_shape = cm.GetOutputShape4D(images_shape, conv_desc) filters_shape = (conv_desc.num_output_channels, conv_desc.kernel_size_x, conv_desc.kernel_size_y, conv_desc.num_input_channels) derivs = np.random.randn(deriv_shape[0], deriv_shape[1] * deriv_shape[2] * deriv_shape[3]).astype(np.float32) filters = np.random.randn(filters_shape[0], filters_shape[1] * filters_shape[2] * filters_shape[3]).astype(np.float32) derivs_gpu = cm.CUDAMatrix(derivs, shape=deriv_shape) filters_gpu = cm.CUDAMatrix(filters, shape=filters_shape) images_gpu = cm.empty(images_shape) if test_gemm: cc_gemm.convDown(derivs_gpu, filters_gpu, images_gpu, conv_desc) else: cc.convDown(derivs_gpu, filters_gpu, images_gpu, conv_desc) images_cpu = conv_cpu.ConvDown(derivs, filters, images_shape, cm.GetConvDescTuple(conv_desc)) diff = Diff(images_cpu, images_gpu.asarray()) images_gpu.free_device_memory() filters_gpu.free_device_memory() derivs_gpu.free_device_memory() return diff def TestConvOutp(images_shape, conv_desc, partial_sum_y=0, partial_sum_x=0): filters_shape = (conv_desc.num_output_channels, conv_desc.kernel_size_x, conv_desc.kernel_size_y, conv_desc.num_input_channels) deriv_shape = cm.GetOutputShape4D(images_shape, conv_desc) batch_size, num_modules_x, num_modules_y, num_output_channels = deriv_shape images = np.random.randn(images_shape[0], images_shape[1] * images_shape[2] * images_shape[3]).astype(np.float32) derivs = np.random.randn(deriv_shape[0], deriv_shape[1] * deriv_shape[2] * deriv_shape[3]).astype(np.float32) images_gpu = cm.CUDAMatrix(images, shape=images_shape) filters_gpu = cm.empty(filters_shape) derivs_gpu = cm.CUDAMatrix(derivs, shape=deriv_shape) if test_gemm: cc_gemm.convOutp(images_gpu, derivs_gpu, filters_gpu, conv_desc) else: if partial_sum_x == 0: partial_sum_x = num_modules_x if partial_sum_y == 0: partial_sum_y = num_modules_y partial_sum_locs_y = DivUp(num_modules_y, partial_sum_y) partial_sum_locs_x = DivUp(num_modules_x, partial_sum_x) filters_temp_gpu = cm.empty((filters_shape[0], filters_shape[1], filters_shape[2], filters_shape[3] * partial_sum_locs_x * partial_sum_locs_y)) cc.convOutp(images_gpu, derivs_gpu, filters_gpu, conv_desc, partialSumY=partial_sum_y, partialSumX=partial_sum_x, temp=filters_temp_gpu) filters_cpu, filters_temp_cpu = conv_cpu.ConvOutp(images, derivs, images_shape, cm.GetConvDescTuple(conv_desc), partial_sum_y=partial_sum_y, partial_sum_x=partial_sum_x) diff1 = Diff(filters_gpu.asarray(), filters_cpu) if test_gemm: diff2 = 0 else: diff2 = Diff(filters_temp_gpu.asarray(), filters_temp_cpu) filters_temp_gpu.free_device_memory() images_gpu.free_device_memory() filters_gpu.free_device_memory() derivs_gpu.free_device_memory() return diff1, diff2 def TestMaxPool(images_shape, conv_desc): output_shape = cm.GetOutputShape4D(images_shape, conv_desc) images = np.random.randn(images_shape[0], images_shape[1] * images_shape[2] * images_shape[3]).astype(np.float32) images_gpu = cm.CUDAMatrix(images, shape=images_shape) output_gpu = cm.empty(output_shape) if test_gemm: cc_gemm.MaxPool(images_gpu, output_gpu, conv_desc) else: cc.MaxPool(images_gpu, output_gpu, conv_desc) output_cpu = conv_cpu.MaxPool(images, images_shape, cm.GetConvDescTuple(conv_desc)) diff = Diff(output_cpu, output_gpu.asarray()) images_gpu.free_device_memory() output_gpu.free_device_memory() return diff def TestMaxPoolUndo(images_shape, conv_desc): deriv_shape = cm.GetOutputShape4D(images_shape, conv_desc) images = np.random.rand(images_shape[0], images_shape[1] * images_shape[2] * images_shape[3]).astype(np.float32) derivs = np.random.randn(deriv_shape[0], deriv_shape[1] * deriv_shape[2] * deriv_shape[3]).astype(np.float32) maxes = conv_cpu.MaxPool(images, images_shape, cm.GetConvDescTuple(conv_desc)) images_gpu = cm.CUDAMatrix(images, shape=images_shape) derivs_gpu = cm.CUDAMatrix(derivs, shape=deriv_shape) maxes_gpu = cm.CUDAMatrix(maxes, shape=deriv_shape) targets_gpu = cm.empty(images_shape) if test_gemm: cc_gemm.MaxPoolUndo(images_gpu, derivs_gpu, maxes_gpu, targets_gpu, conv_desc) else: cc.MaxPoolUndo(images_gpu, derivs_gpu, maxes_gpu, targets_gpu, conv_desc) output_cpu = conv_cpu.MaxPoolUndo(images, maxes, derivs, images_shape, deriv_shape, cm.GetConvDescTuple(conv_desc)) diff = Diff(output_cpu, targets_gpu.asarray()) images_gpu.free_device_memory() derivs_gpu.free_device_memory() maxes_gpu.free_device_memory() targets_gpu.free_device_memory() return diff def TestAvgPool(images_shape, conv_desc): output_shape = cm.GetOutputShape4D(images_shape, conv_desc) images = np.random.randn(images_shape[0], images_shape[1] * images_shape[2] * images_shape[3]).astype(np.float32) images_gpu = cm.CUDAMatrix(images, shape=images_shape) output_gpu = cm.empty(output_shape) if test_gemm: cc_gemm.AvgPool(images_gpu, output_gpu, conv_desc) else: cc.AvgPool(images_gpu, output_gpu, conv_desc) output_cpu = conv_cpu.AvgPool(images, images_shape, cm.GetConvDescTuple(conv_desc)) diff = Diff(output_cpu, output_gpu.asarray()) images_gpu.free_device_memory() output_gpu.free_device_memory() return diff def TestAvgPoolUndo(images_shape, conv_desc): deriv_shape = cm.GetOutputShape4D(images_shape, conv_desc) derivs = np.random.randn(deriv_shape[0], deriv_shape[1] * deriv_shape[2] * deriv_shape[3]).astype(np.float32) derivs_gpu = cm.CUDAMatrix(derivs, shape=deriv_shape) targets_gpu = cm.empty(images_shape) if test_gemm: cc_gemm.AvgPoolUndo(derivs_gpu, targets_gpu, conv_desc) else: cc.AvgPoolUndo(derivs_gpu, targets_gpu, conv_desc) output_cpu = conv_cpu.AvgPoolUndo(derivs, images_shape, cm.GetConvDescTuple(conv_desc)) diff = Diff(output_cpu, targets_gpu.asarray()) derivs_gpu.free_device_memory() targets_gpu.free_device_memory() return diff def TestResponseNormCrossMap(images_shape, numF, add_scale, pow_scale, blocked): images = np.random.randn(images_shape[0], images_shape[1] * images_shape[2] * images_shape[3]).astype(np.float32) images_gpu = cm.CUDAMatrix(images, shape=images_shape) targets_gpu = cm.empty(images_shape) if test_gemm: cc_gemm.ResponseNormCrossMap(images_gpu, targets_gpu, numF, add_scale, pow_scale, blocked) else: cc.ResponseNormCrossMap(images_gpu, targets_gpu, numF, add_scale, pow_scale, blocked) output_cpu = conv_cpu.ResponseNormCrossMap(images, images_shape, numF, add_scale, pow_scale, blocked) diff = Diff(output_cpu, targets_gpu.asarray()) images_gpu.free_device_memory() targets_gpu.free_device_memory() return diff def TestResponseNormCrossMapUndo(images_shape, numF, add_scale, pow_scale, blocked): images = np.random.randn(images_shape[0], images_shape[1] * images_shape[2] * images_shape[3]).astype(np.float32) derivs = np.random.randn(images_shape[0], images_shape[1] * images_shape[2] * images_shape[3]).astype(np.float32) images_gpu = cm.CUDAMatrix(images, shape=images_shape) derivs_gpu = cm.CUDAMatrix(derivs, shape=images_shape) targets_gpu = cm.empty(images_shape) if test_gemm: cc_gemm.ResponseNormCrossMapUndo(derivs_gpu, images_gpu, targets_gpu, numF, add_scale, pow_scale, blocked) else: acts_gpu = cm.empty(images_shape) cc.ResponseNormCrossMap(images_gpu, acts_gpu, numF, add_scale, pow_scale, blocked) cc.ResponseNormCrossMapUndo(derivs_gpu, images_gpu, acts_gpu, targets_gpu, numF, add_scale, pow_scale, blocked) acts_gpu.free_device_memory() output_cpu = conv_cpu.ResponseNormCrossMapUndo(derivs, images, images_shape, numF, add_scale, pow_scale, blocked) diff = Diff(output_cpu, targets_gpu.asarray()) images_gpu.free_device_memory() targets_gpu.free_device_memory() return diff def TestConvUp3D(images_shape, conv_desc): filters_shape = (conv_desc.num_output_channels, conv_desc.kernel_size_x, conv_desc.kernel_size_y, conv_desc.num_input_channels * conv_desc.kernel_size_t) output_shape = cm.GetOutputShape5D(images_shape, conv_desc) images = np.random.randn(images_shape[0], images_shape[1] * images_shape[2] * images_shape[3] * images_shape[4]).astype(np.float32) filters = np.random.randn(filters_shape[0], filters_shape[1] * filters_shape[2] * filters_shape[3]).astype(np.float32) images_gpu = cm.CUDAMatrix(images, shape=images_shape) filters_gpu = cm.CUDAMatrix(filters, shape=filters_shape) output_gpu = cm.empty(output_shape) assert test_gemm cc_gemm.convUp3D(images_gpu, filters_gpu, output_gpu, conv_desc) output_cpu = conv_cpu.ConvUp3D(images, filters, images_shape, cm.GetConvDescTuple3D(conv_desc)) diff = Diff(output_cpu, output_gpu.asarray()) images_gpu.free_device_memory() filters_gpu.free_device_memory() output_gpu.free_device_memory() return diff def TestConvDown3D(images_shape, conv_desc): filters_shape = (conv_desc.num_output_channels, conv_desc.kernel_size_x, conv_desc.kernel_size_y, conv_desc.num_input_channels * conv_desc.kernel_size_t) deriv_shape = cm.GetOutputShape5D(images_shape, conv_desc) derivs = np.random.randn(deriv_shape[0], deriv_shape[1] * deriv_shape[2] * deriv_shape[3] * deriv_shape[4]).astype(np.float32) filters = np.random.randn(filters_shape[0], filters_shape[1] * filters_shape[2] * filters_shape[3]).astype(np.float32) derivs_gpu = cm.CUDAMatrix(derivs, shape=deriv_shape) filters_gpu = cm.CUDAMatrix(filters, shape=filters_shape) images_gpu = cm.empty(images_shape) assert test_gemm cc_gemm.convDown3D(derivs_gpu, filters_gpu, images_gpu, conv_desc) images_cpu = conv_cpu.ConvDown3D(derivs, filters, images_shape, cm.GetConvDescTuple3D(conv_desc)) diff = Diff(images_cpu, images_gpu.asarray()) images_gpu.free_device_memory() filters_gpu.free_device_memory() derivs_gpu.free_device_memory() return diff def TestConvOutp3D(images_shape, conv_desc, partial_sum_y=0, partial_sum_x=0): filters_shape = (conv_desc.num_output_channels, conv_desc.kernel_size_x, conv_desc.kernel_size_y, conv_desc.num_input_channels * conv_desc.kernel_size_t) deriv_shape = cm.GetOutputShape5D(images_shape, conv_desc) batch_size, num_modules_x, num_modules_y, num_output_channels, num_modules_t = deriv_shape images = np.random.randn(images_shape[0], images_shape[1] * images_shape[2] * images_shape[3] * images_shape[4]).astype(np.float32) derivs = np.random.randn(deriv_shape[0], deriv_shape[1] * deriv_shape[2] * deriv_shape[3] * deriv_shape[4]).astype(np.float32) images_gpu = cm.CUDAMatrix(images, shape=images_shape) filters_gpu = cm.empty(filters_shape) derivs_gpu = cm.CUDAMatrix(derivs, shape=deriv_shape) cc_gemm.convOutp3D(images_gpu, derivs_gpu, filters_gpu, conv_desc) filters_cpu = conv_cpu.ConvOutp3D(images, derivs, images_shape, cm.GetConvDescTuple3D(conv_desc)) diff = Diff(filters_gpu.asarray(), filters_cpu) images_gpu.free_device_memory() filters_gpu.free_device_memory() derivs_gpu.free_device_memory() return diff def TestMaxPool3D(images_shape, conv_desc): output_shape = cm.GetOutputShape5D(images_shape, conv_desc) images = np.random.randn(images_shape[0], images_shape[1] * images_shape[2] * images_shape[3] * images_shape[4]).astype(np.float32) images_gpu = cm.CUDAMatrix(images, shape=images_shape) output_gpu = cm.empty(output_shape) assert test_gemm cc_gemm.MaxPool3D(images_gpu, output_gpu, conv_desc) output_cpu = conv_cpu.MaxPool3D(images, images_shape, cm.GetConvDescTuple3D(conv_desc)) diff = Diff(output_cpu, output_gpu.asarray()) images_gpu.free_device_memory() output_gpu.free_device_memory() return diff def TestMaxPool3DUndo(images_shape, conv_desc): deriv_shape = cm.GetOutputShape5D(images_shape, conv_desc) images = np.random.rand(images_shape[0], images_shape[1] * images_shape[2] * images_shape[3] * images_shape[4]).astype(np.float32) derivs = np.random.randn(deriv_shape[0], deriv_shape[1] * deriv_shape[2] * deriv_shape[3] * deriv_shape[4]).astype(np.float32) maxes = conv_cpu.MaxPool3D(images, images_shape, cm.GetConvDescTuple3D(conv_desc)) images_gpu = cm.CUDAMatrix(images, shape=images_shape) derivs_gpu = cm.CUDAMatrix(derivs, shape=deriv_shape) maxes_gpu = cm.CUDAMatrix(maxes, shape=deriv_shape) targets_gpu = cm.empty(images_shape) assert test_gemm cc_gemm.MaxPool3DUndo(images_gpu, derivs_gpu, maxes_gpu, targets_gpu, conv_desc) output_cpu = conv_cpu.MaxPool3DUndo(images, maxes, derivs, images_shape, deriv_shape, cm.GetConvDescTuple3D(conv_desc)) diff = Diff(output_cpu, targets_gpu.asarray()) images_gpu.free_device_memory() derivs_gpu.free_device_memory() maxes_gpu.free_device_memory() targets_gpu.free_device_memory() return diff def TestAvgPool3D(images_shape, conv_desc): output_shape = cm.GetOutputShape5D(images_shape, conv_desc) images = np.random.randn(images_shape[0], images_shape[1] * images_shape[2] * images_shape[3] * images_shape[4]).astype(np.float32) images_gpu = cm.CUDAMatrix(images, shape=images_shape) output_gpu = cm.empty(output_shape) assert test_gemm cc_gemm.AvgPool3D(images_gpu, output_gpu, conv_desc) output_cpu = conv_cpu.AvgPool3D(images, images_shape, cm.GetConvDescTuple3D(conv_desc)) diff = Diff(output_cpu, output_gpu.asarray()) images_gpu.free_device_memory() output_gpu.free_device_memory() return diff def TestAvgPool3DUndo(images_shape, conv_desc): deriv_shape = cm.GetOutputShape5D(images_shape, conv_desc) derivs = np.random.randn(deriv_shape[0], deriv_shape[1] * deriv_shape[2] * deriv_shape[3] * deriv_shape[4]).astype(np.float32) derivs_gpu = cm.CUDAMatrix(derivs, shape=deriv_shape) targets_gpu = cm.empty(images_shape) assert test_gemm cc_gemm.AvgPool3DUndo(derivs_gpu, targets_gpu, conv_desc) output_cpu = conv_cpu.AvgPool3DUndo(derivs, images_shape, cm.GetConvDescTuple3D(conv_desc)) diff = Diff(output_cpu, targets_gpu.asarray()) derivs_gpu.free_device_memory() targets_gpu.free_device_memory() return diff def Diff(a, b): scale = np.abs(a + b).mean() diff = np.abs(a - b).max() / scale return diff def Check(diff, tol=1e-4): if diff < tol: result = 'PASSED' else: result = 'FAILED' print diff, result def Test2D(): batch_size = 128 image_size_x = 12 image_size_y = 12 num_input_channels = 32 sizeF = 8 add_scale = 0.005 pow_scale = 0.75 blocked = False num_output_channels = 64 kernel_size_y = 3 kernel_size_x = 3 stride_y = 2 stride_x = 2 padding_y = 1 padding_x = 1 partial_sum = 0 images_shape = (batch_size, image_size_x, image_size_y, num_input_channels) conv_desc = cm.GetConvDesc(num_input_channels, num_output_channels, kernel_size_y, kernel_size_x, stride_y, stride_x, padding_y, padding_x) pool_desc = cm.GetConvDesc(num_input_channels, num_input_channels, kernel_size_y, kernel_size_x, stride_y, stride_x, padding_y, padding_x) print 'ConvUp' Check(TestConvUp(images_shape, conv_desc)) print 'ConvDown' Check(TestConvDown(images_shape, conv_desc)) print 'ConvOutp' d1, d2 = TestConvOutp(images_shape, conv_desc, partial_sum_y=partial_sum, partial_sum_x=partial_sum) Check(d1) print 'MaxPool' Check(TestMaxPool(images_shape, pool_desc)) print 'AvgPool' Check(TestAvgPool(images_shape, pool_desc)) print 'MaxPoolUndo' Check(TestMaxPoolUndo(images_shape, pool_desc)) print 'AvgPoolUndo' Check(TestAvgPoolUndo(images_shape, pool_desc)) print 'ResponseNormCrossMap' Check(TestResponseNormCrossMap(images_shape, sizeF, add_scale, pow_scale, blocked)) print 'ResponseNormCrossMapUndo' Check(TestResponseNormCrossMapUndo(images_shape, sizeF, add_scale, pow_scale, blocked)) def Test3D(): batch_size = 128 image_size_x = 32 image_size_y = 24 image_size_t = 12 num_input_channels = 3 num_output_channels = 64 kernel_size_y = 7 kernel_size_x = 7 kernel_size_t = 3 stride_y = 2 stride_x = 2 stride_t = 2 padding_y = 1 padding_x = 1 padding_t = 0 images_shape = (batch_size, image_size_x, image_size_y, num_input_channels, image_size_t) conv_desc = cm.GetConvDesc(num_input_channels, num_output_channels, kernel_size_y, kernel_size_x, stride_y, stride_x, padding_y, padding_x, kernel_size_t, stride_t, padding_t) pool_desc = cm.GetConvDesc(num_input_channels, num_input_channels, kernel_size_y, kernel_size_x, stride_y, stride_x, padding_y, padding_x, kernel_size_t, stride_t, padding_t) print 'ConvUp' Check(TestConvUp3D(images_shape, conv_desc)) print 'ConvDown' Check(TestConvDown3D(images_shape, conv_desc)) print 'ConvOutp' Check(TestConvOutp3D(images_shape, conv_desc)) print 'MaxPool' Check(TestMaxPool3D(images_shape, pool_desc)) print 'MaxPoolUndo' Check(TestMaxPool3DUndo(images_shape, pool_desc)) print 'AvgPool' Check(TestAvgPool3D(images_shape, pool_desc)) print 'AvgPoolUndo' Check(TestAvgPool3DUndo(images_shape, pool_desc)) def main(): print "Testing 2D convolutions" Test2D() print "Testing 3D convolutions" Test3D() if __name__ == '__main__': board = LockGPU() print 'Using board', board main() FreeGPU(board)
bsd-2-clause
jerryz1982/neutron
neutron/db/migration/alembic_migrations/versions/1955efc66455_weight_scheduler.py
47
1036
# Copyright 2015 OpenStack Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # """weight_scheduler Revision ID: 1955efc66455 Revises: 35a0f3365720 Create Date: 2015-03-12 22:11:37.607390 """ # revision identifiers, used by Alembic. revision = '1955efc66455' down_revision = '35a0f3365720' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('agents', sa.Column('load', sa.Integer(), server_default='0', nullable=False))
apache-2.0
savoirfairelinux/OpenUpgrade
addons/stock_account/wizard/stock_valuation_history.py
33
8129
from openerp import tools from openerp.osv import fields, osv from openerp.tools.translate import _ class wizard_valuation_history(osv.osv_memory): _name = 'wizard.valuation.history' _description = 'Wizard that opens the stock valuation history table' _columns = { 'choose_date': fields.boolean('Choose a Particular Date'), 'date': fields.datetime('Date', required=True), } _defaults = { 'choose_date': False, 'date': fields.datetime.now, } def open_table(self, cr, uid, ids, context=None): if context is None: context = {} data = self.read(cr, uid, ids, context=context)[0] ctx = context.copy() ctx['history_date'] = data['date'] ctx['search_default_group_by_product'] = True ctx['search_default_group_by_location'] = True return { 'domain': "[('date', '<=', '" + data['date'] + "')]", 'name': _('Stock Value At Date'), 'view_type': 'form', 'view_mode': 'tree,graph', 'res_model': 'stock.history', 'type': 'ir.actions.act_window', 'context': ctx, } class stock_history(osv.osv): _name = 'stock.history' _auto = False _order = 'date asc' def read_group(self, cr, uid, domain, fields, groupby, offset=0, limit=None, context=None, orderby=False, lazy=True): res = super(stock_history, self).read_group(cr, uid, domain, fields, groupby, offset=offset, limit=limit, context=context, orderby=orderby, lazy=lazy) if context is None: context = {} date = context.get('history_date') prod_dict = {} if 'inventory_value' in fields: for line in res: if '__domain' in line: lines = self.search(cr, uid, line['__domain'], context=context) inv_value = 0.0 product_tmpl_obj = self.pool.get("product.template") lines_rec = self.browse(cr, uid, lines, context=context) for line_rec in lines_rec: if not line_rec.product_id.id in prod_dict: if line_rec.product_id.cost_method == 'real': prod_dict[line_rec.product_id.id] = line_rec.price_unit_on_quant else: prod_dict[line_rec.product_id.id] = product_tmpl_obj.get_history_price(cr, uid, line_rec.product_id.product_tmpl_id.id, line_rec.company_id.id, date=date, context=context) inv_value += prod_dict[line_rec.product_id.id] * line_rec.quantity line['inventory_value'] = inv_value return res def _get_inventory_value(self, cr, uid, ids, name, attr, context=None): if context is None: context = {} date = context.get('history_date') product_tmpl_obj = self.pool.get("product.template") res = {} for line in self.browse(cr, uid, ids, context=context): if line.product_id.cost_method == 'real': res[line.id] = line.quantity * line.price_unit_on_quant else: res[line.id] = line.quantity * product_tmpl_obj.get_history_price(cr, uid, line.product_id.product_tmpl_id.id, line.company_id.id, date=date, context=context) return res _columns = { 'move_id': fields.many2one('stock.move', 'Stock Move', required=True), 'location_id': fields.many2one('stock.location', 'Location', required=True), 'company_id': fields.many2one('res.company', 'Company'), 'product_id': fields.many2one('product.product', 'Product', required=True), 'product_categ_id': fields.many2one('product.category', 'Product Category', required=True), 'quantity': fields.integer('Product Quantity'), 'date': fields.datetime('Operation Date'), 'price_unit_on_quant': fields.float('Value'), 'inventory_value': fields.function(_get_inventory_value, string="Inventory Value", type='float', readonly=True), 'source': fields.char('Source') } def init(self, cr): tools.drop_view_if_exists(cr, 'stock_history') cr.execute(""" CREATE OR REPLACE VIEW stock_history AS ( SELECT MIN(id) as id, move_id, location_id, company_id, product_id, product_categ_id, SUM(quantity) as quantity, date, price_unit_on_quant, source FROM ((SELECT stock_move.id::text || '-' || quant.id::text AS id, quant.id AS quant_id, stock_move.id AS move_id, dest_location.id AS location_id, dest_location.company_id AS company_id, stock_move.product_id AS product_id, product_template.categ_id AS product_categ_id, quant.qty AS quantity, stock_move.date AS date, quant.cost as price_unit_on_quant, stock_move.origin AS source FROM stock_quant as quant, stock_quant_move_rel, stock_move LEFT JOIN stock_location dest_location ON stock_move.location_dest_id = dest_location.id LEFT JOIN stock_location source_location ON stock_move.location_id = source_location.id LEFT JOIN product_product ON product_product.id = stock_move.product_id LEFT JOIN product_template ON product_template.id = product_product.product_tmpl_id WHERE stock_move.state = 'done' AND dest_location.usage in ('internal', 'transit') AND stock_quant_move_rel.quant_id = quant.id AND stock_quant_move_rel.move_id = stock_move.id AND ((source_location.company_id is null and dest_location.company_id is not null) or (source_location.company_id is not null and dest_location.company_id is null) or source_location.company_id != dest_location.company_id) ) UNION (SELECT '-' || stock_move.id::text || '-' || quant.id::text AS id, quant.id AS quant_id, stock_move.id AS move_id, source_location.id AS location_id, source_location.company_id AS company_id, stock_move.product_id AS product_id, product_template.categ_id AS product_categ_id, - quant.qty AS quantity, stock_move.date AS date, quant.cost as price_unit_on_quant, stock_move.origin AS source FROM stock_quant as quant, stock_quant_move_rel, stock_move LEFT JOIN stock_location source_location ON stock_move.location_id = source_location.id LEFT JOIN stock_location dest_location ON stock_move.location_dest_id = dest_location.id LEFT JOIN product_product ON product_product.id = stock_move.product_id LEFT JOIN product_template ON product_template.id = product_product.product_tmpl_id WHERE stock_move.state = 'done' AND source_location.usage in ('internal', 'transit') AND stock_quant_move_rel.quant_id = quant.id AND stock_quant_move_rel.move_id = stock_move.id AND ((dest_location.company_id is null and source_location.company_id is not null) or (dest_location.company_id is not null and source_location.company_id is null) or dest_location.company_id != source_location.company_id) )) AS foo GROUP BY move_id, location_id, company_id, product_id, product_categ_id, date, price_unit_on_quant, source )""")
agpl-3.0
pyronimous/flappy
flappy/display/stage.py
2
19536
# encoding: utf-8 import time import flappy from flappy import _core from flappy.events import Event, KeyboardEvent, MouseEvent, TouchEvent from flappy.events import FocusEvent from flappy.geom import Point from flappy.display import DisplayObject, DisplayObjectContainer class StageQuality(object): LOW = 'low' MEDIUM = 'medium' HIGH = 'high' BEST = 'best' _ENUM = [LOW, MEDIUM, HIGH, BEST] class Stage(DisplayObjectContainer, _core._Stage): #public constants DOUBLE_CLICK_INTERVAL = 0.25 #public methods def invalidate(self): self._invalid = True #public properties @property def frameRate(self): return self._frame_rate @frameRate.setter def frameRate(self, value): self._frame_rate = value self._frame_period = value if value <= 0 else 1.0 / value @property def stage(self): return self @property def stageWidth(self): return self.getStageWidth() @property def stageHeight(self): return self.getStageHeight() @property def focus(self): obj_id = self._get_focus_id() return self._find_by_id(obj_id) @focus.setter def focus(self, focus_obj): self._set_focus(focus_obj) @property def quality(self): return StageQuality._ENUM[self.getQuality()] @quality.setter def quality(self, quality): self.setQuality(StageQuality._ENUM.index(quality)) @property def color(self): return self.opaqueBackground @color.setter def color(self, color): self.opaqueBackground = color #private constants _EF_LEFT_DOWN = 0x0001 _EF_SHIFT_DOWN = 0x0002 _EF_CTRL_DOWN = 0x0004 _EF_ALT_DOWN = 0x0008 _EF_COMMAND_DOWN = 0x0010 _EF_LOCATION_RIGHT = 0x4000 _EF_NO_NATIVE_CLICK = 0x10000 _MOUSE_CHANGES = ( MouseEvent.MOUSE_OUT, MouseEvent.MOUSE_OVER, MouseEvent.ROLL_OUT, MouseEvent.ROLL_OVER ) _TOUCH_CHANGES = ( TouchEvent.TOUCH_OUT, TouchEvent.TOUCH_OVER, TouchEvent.TOUCH_ROLL_OUT, TouchEvent.TOUCH_ROLL_OVER ) _S_CLICK_EVENTS = ( MouseEvent.CLICK, MouseEvent.MIDDLE_CLICK, MouseEvent.RIGHT_CLICK ) _S_DOWN_EVENTS = ( MouseEvent.MOUSE_DOWN, MouseEvent.MIDDLE_MOUSE_DOWN, MouseEvent.RIGHT_MOUSE_DOWN ) _S_UP_EVENTS = ( MouseEvent.MOUSE_UP, MouseEvent.MIDDLE_MOUSE_UP, MouseEvent.RIGHT_MOUSE_UP ) _EARLY_WAKE_UP = 0.005 #private class variables _current_stage = None #private methods @classmethod def _get_instance(cls): if not cls._current_stage: cls._current_stage = cls() return cls._current_stage def _native_init(self): _core._Stage.__init__(self) def __init__(self): DisplayObjectContainer.__init__(self, "Stage") _core._set_event_handler(self._process_stage_event) self._mouse_over_objects = [] self._focus_over_objects = [] self.active = True self._invalid = False self._last_render = 0.0 self._last_down = [None, None, None] self._last_click_time = 0.0 self._touch_info = {} self._joy_axis_data = {} self._drag_bounds = None self._frame_rate = 0 self._frame_period = 0 self._drag_object = None self._drag_offset_x = 0.0 self._drag_offset_y = 0.0 self.frameRate = 100 if _core._request_render: _core._request_render() self._event_map = { _core.etKeyDown: lambda event: self._on_key(event, KeyboardEvent.KEY_DOWN), _core.etKeyUp: lambda event: self._on_key(event, KeyboardEvent.KEY_UP), _core.etMouseMove: lambda event: self._on_mouse(event, MouseEvent.MOUSE_MOVE, True), _core.etMouseDown: lambda event: self._on_mouse(event, MouseEvent.MOUSE_DOWN, True), _core.etMouseUp: lambda event: self._on_mouse(event, MouseEvent.MOUSE_UP, True), _core.etMouseClick: lambda event: self._on_mouse(event, MouseEvent.CLICK, True), _core.etResize: lambda event: self._on_resize(), _core.etPoll: lambda event: self._poll_timers(), _core.etQuit: lambda event: self._on_quit(), _core.etFocus: self._on_focus, _core.etShouldRotate: self._should_rotate, _core.etDestroyHandler: lambda event: None, _core.etRedraw: lambda event: self._render(True), _core.etTouchBegin: self._on_touch_begin, _core.etTouchMove: self._on_touch_move, _core.etTouchEnd: self._on_touch_end, _core.etChange: self._on_change, _core.etActivate: lambda event: self._set_active(True), _core.etDeactivate: lambda event: self._set_active(False), _core.etGotInputFocus: lambda event: self._on_got_input_focus(), _core.etLostInputFocus: lambda event: self._on_lost_input_focus(), _core.etJoyAxisMove: lambda event: self._on_joystick(event, 0), _core.etJoyBallMove: lambda event: self._on_joystick(event, 0), _core.etJoyHatMove: lambda event: self._on_joystick(event, 0), _core.etJoyButtonDown: lambda event: self._on_joystick(event, 0), _core.etJoyButtonUp: lambda event: self._on_joystick(event, 0), } def _process_stage_event(self, event): self._event_map[event.type](event) self._update_next_wake() def _check_render(self): if self.frameRate > 0: now = time.time() if now >= (self._last_render + self._frame_period): self._last_render = now if _core._request_render: _core._request_render() else: self._render(True) def _render(self, send_enterframe): if not self.active: return if send_enterframe: self._broadcast(Event(Event.ENTER_FRAME)) if self._invalid: self._invalid = False self._broadcast(Event(Event.RENDER)) self._render_stage() def _on_quit(self): flappy.stop() def _on_key(self, event, etype): stack = [] obj = self._find_by_id(event.id) if obj is not None: obj._get_interactive_object_stack(stack) if stack: value = event.value if ord('a') <= value <= ord('z'): value -= (ord('a') - ord('A')) obj = stack[0] flags = event.flags key_location = 1 if (flags & self._EF_LOCATION_RIGHT ) else 0 ctrl_down = (flags & self._EF_CTRL_DOWN) != 0 alt_down = (flags & self._EF_ALT_DOWN) != 0 shift_down = (flags & self._EF_SHIFT_DOWN) != 0 evt = KeyboardEvent(etype, bubbles=True, cancelable=True, charCodeValue=event.code, keyCodeValue=value, keyLocationValue=key_location, ctrlKeyValue=ctrl_down, altKeyValue=alt_down, shiftKeyValue=shift_down) obj._fire_event(evt) def _on_change(self, event): obj = self._find_by_id(event.id) if obj is not None: obj._fire_event(Event(Event.CHANGE)) def _on_got_input_focus(self): evt = Event(FocusEvent.FOCUS_IN) self._dispatch_event(evt) def _on_lost_input_focus(self): evt = Event(FocusEvent.FOCUS_OUT) self._dispatch_event(evt) def _on_focus(self, event): stack = [] obj = self._find_by_id(event.id) if obj is not None: obj._get_interactive_object_stack(stack) if stack and (event.value == 1 or event.value == 2): obj = stack[0] if event.value == 1: etype = FocusEvent.MOUSE_FOCUS_CHANGE else: etype = FocusEvent.KEY_FOCUS_CHANGE relobj = None if self._focus_over_objects: relobj = self._focus_over_objects[0] evt = FocusEvent(etype, bubbles=True, cancelable=True, relatedObject=relobj, shiftKey=(event.flags > 0), keyCode=event.code) obj._fire_event(evt) if evt.isCancelled: event.result = 1 stack.reverse() self._checkFocusInOuts(event, stack) def _checkFocusInOuts(self, event, stack): new_n = len(stack) new_obj = stack[-1] if stack else None old_n = len(self._focus_over_objects) if self._focus_over_objects: old_obj = self._focus_over_objects[-1] else: old_obj = None if new_obj != old_obj: common = 0 while (common < new_n) and \ (common < old_n) and \ (stack[common] == self._focus_over_objects[common]): common += 1 fout = FocusEvent(FocusEvent.FOCUS_OUT, bubbles=False, cancelable=False, relatedObject=new_obj, shiftKey=(event.flags > 0), keyCode=event.code) i = old_n - 1 while i >= common: self._focus_over_objects[i]._dispatch_event(fout) i -= 1 fin = FocusEvent(FocusEvent.FOCUS_IN, bubbles=False, cancelable=False, relatedObject=old_obj, shiftKey=(event.flags > 0), keyCode=event.code) i = new_n - 1 while i >= common: stack[i]._dispatch_event(fin) i -= 1 self._focus_over_objects = stack def _on_mouse(self, event, event_type, from_mouse): etype = event_type button = event.value if not from_mouse: button = 0 wheel = 0 if event_type == MouseEvent.MOUSE_DOWN: if button > 2: return etype = Stage._S_DOWN_EVENTS[button] elif event_type == MouseEvent.MOUSE_UP: if button > 2: etype = MouseEvent.MOUSE_WHEEL wheel = 1 if button == 3 else -1 else: etype = Stage._S_UP_EVENTS[button] if self._drag_object != None: self._drag(Point(event.x, event.y)) stack = [] obj = self._find_by_id(event.id) if obj is not None: obj._get_interactive_object_stack(stack) local = None if stack: obj = stack[0] stack.reverse() local = obj.globalToLocal(Point(event.x, event.y)) evt = MouseEvent._create(etype, event, local, obj) evt.delta = wheel if from_mouse: self._check_in_outs(evt, stack) obj._fire_event(evt) else: local = Point(event.x, event.y) evt = MouseEvent._create(etype, event, local, None) evt.delta = wheel if from_mouse: self._check_in_outs(evt, stack) click_obj = stack[-1] if len(stack) else self if event_type == MouseEvent.MOUSE_DOWN and button < 3: self._last_down[button] = click_obj elif event_type == MouseEvent.MOUSE_UP and button < 3: if click_obj == self._last_down[button]: evt = MouseEvent._create(Stage._S_CLICK_EVENTS[button], event, local, click_obj) click_obj._fire_event(evt) if button == 0 and click_obj.doubleClickEnabled: now = time.time() diff = now - self._last_click_time if diff <= self.DOUBLE_CLICK_INTERVAL: evt = MouseEvent._create(MouseEvent.DOUBLE_CLICK, event, local, click_obj) click_obj._fire_event(evt) self._last_click_time = now self._last_down[button] = None def _check_in_outs(self, event, stack, touch_info=None): if touch_info is not None: prev = touch_info.touchOverObjects mevents = self._TOUCH_CHANGES else: prev = self._mouse_over_objects mevents = self._MOUSE_CHANGES new_n = len(stack) new_obj = stack[-1] if new_n else None old_n = len(prev) old_obj = prev[-1] if old_n else None if new_obj != old_obj: if old_obj is not None: nevent = event._create_similar(mevents[0], new_obj, old_obj) old_obj._fire_event(nevent) if new_obj != None: nevent = event._create_similar(mevents[1], old_obj) new_obj._fire_event(nevent) common = 0 while (common < new_n) and \ (common < old_n) and \ (stack[common] == prev[common]): common += 1 nevent = event._create_similar(mevents[2], new_obj, old_obj) i = old_n - 1 while i >= common: prev[i]._dispatch_event(nevent) i -= 1 nevent = event._create_similar(mevents[3], old_obj) i = new_n - 1 while i >= common: stack[i]._dispatch_event(nevent) i -= 1 if touch_info: touch_info.touchOverObjects = stack else: self._mouse_over_objects = stack def _on_touch_begin(self, event): touch_info = _TouchInfo() self._touch_info[event.value] = touch_info self._on_touch(event, TouchEvent.TOUCH_BEGIN, touch_info) if event.flags & TouchEvent.efPrimaryTouch: self._on_mouse(event, MouseEvent.MOUSE_DOWN, False) def _on_touch_move(self, event): touch_info = self._touch_info[event.value] self._on_touch(event, TouchEvent.TOUCH_MOVE, touch_info) def _on_touch_end(self, event): touch_info = self._touch_info[event.value] self._on_touch(event, TouchEvent.TOUCH_END, touch_info) del self._touch_info[event.value] if event.flags & TouchEvent.efPrimaryTouch: self._on_mouse(event, MouseEvent.MOUSE_UP, False) def _on_touch(self, event, etype, touch_info): stack = [] obj = self._find_by_id(event.id) if obj is not None: obj._get_interactive_object_stack(stack) if stack: obj = stack[0] stack.reverse() local = obj.globalToLocal(Point(event.x, event.y)) evt = TouchEvent._create(etype, event, local, obj, event.scaleX, event.scaleY) evt.touchPointID = event.value evt.isPrimaryTouchPoint = \ bool(event.flags & TouchEvent.efPrimaryTouch) self._check_in_outs(evt, stack, touch_info) obj._fire_event(evt) if evt.isPrimaryTouchPoint and etype == TouchEvent.TOUCH_MOVE: if self._drag_object: self._drag(Point(event.x, event.y)) evt = MouseEvent._create(MouseEvent.MOUSE_MOVE, event, local, obj) obj._fire_event(evt) else: evt = TouchEvent._create(etype, event, Point(event.x, event.y), None, event.scaleX, event.scaleY) evt.touchPointID = event.value evt.isPrimaryTouchPoint = \ bool(event.flags & TouchEvent.efPrimaryTouch) self._check_in_outs(evt, stack, touch_info) def _drag(self, mouse): parent = self._drag_object.parent if parent is not None: mouse = parent.globalToLocal(mouse) dragobj_x = mouse.x - self._drag_offset_x dragobj_y = mouse.y - self._drag_offset_y if self._drag_bounds: if dragobj_x < self._drag_bounds.x: dragobj_x = self._drag_bounds.x elif dragobj_x > self._drag_bounds.right: dragobj_x = self._drag_bounds.right if dragobj_y < self._drag_bounds.y: dragobj_y = self._drag_bounds.y elif dragobj_y > self._drag_bounds.bottom: dragobj_y = self._drag_bounds.bottom self._drag_object.x = dragobj_x self._drag_object.y = dragobj_y def _on_joystick(self, event, event_type): #TODO: joystick handler pass def _should_rotate(self, event): #TODO: 'should rotate' handler pass def _start_drag(self, obj, lock_center, bounds): self._drag_bounds = None if bounds: self._drag_bounds = bounds.clone() self._drag_object = obj if lock_center: self._drag_offset_x = -obj.width * 0.5 self._drag_offset_y = -obj.height * 0.5 else: mouse = Point(self.mouseX, self.mouseY) parent = self._drag_object.parent if parent is not None: mouse = parent.globalToLocal(mouse) self._drag_offset_x = self._drag_object.x - mouse.x self._drag_offset_y = self._drag_object.y - mouse.y def _stop_drag(self, obj): self._drag_bounds = None self._drag_object = None def _on_resize(self): event = Event(Event.RESIZE) self._broadcast(event) if _core._request_render is None: self._render(False) def _poll_timers(self): self._check_render() def _set_active(self, active): if active != self.active: self.active = active if not active: self._last_render = time.time() event = Event(Event.ACTIVATE if active else Event.DEACTIVATE) self._broadcast(event) if active: self._poll_timers() def _update_next_wake(self): next_wake = self._next_frame_due(60.0) self._set_next_wake_delay(next_wake) def _next_frame_due(self, other_timers): if not self.active: return other_timers if self.frameRate > 0: next = self._last_render - time.time() next += (self._frame_period - Stage._EARLY_WAKE_UP) if next < other_timers: return next return other_timers class _TouchInfo(object): def __init__(self): self.touchOverObjects = []
mit
mortada/tensorflow
tensorflow/contrib/learn/python/learn/tests/dataframe/transform_test.py
62
3360
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests of the Transform class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.learn.python import learn from tensorflow.contrib.learn.python.learn.dataframe.transform import _make_list_of_series from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks from tensorflow.python.platform import test class TransformTest(test.TestCase): """Tests of the Transform class.""" def test_make_list_of_column(self): col1 = mocks.MockSeries("foo", []) col2 = mocks.MockSeries("bar", []) self.assertEqual([], _make_list_of_series(None)) self.assertEqual([col1], _make_list_of_series(col1)) self.assertEqual([col1], _make_list_of_series([col1])) self.assertEqual([col1, col2], _make_list_of_series([col1, col2])) self.assertEqual([col1, col2], _make_list_of_series((col1, col2))) def test_cache(self): z = mocks.MockSeries("foobar", []) t = mocks.MockTwoOutputTransform("thb", "nth", "snt") cache = {} t.build_transitive([z], cache) self.assertEqual(2, len(cache)) expected_keys = [ "MockTransform(" "{'param_one': 'thb', 'param_three': 'snt', 'param_two': 'nth'})" "(foobar)[out1]", "MockTransform(" "{'param_one': 'thb', 'param_three': 'snt', 'param_two': 'nth'})" "(foobar)[out2]" ] self.assertEqual(expected_keys, sorted(cache.keys())) def test_parameters(self): t = mocks.MockTwoOutputTransform("a", "b", "c") self.assertEqual({ "param_one": "a", "param_three": "c", "param_two": "b" }, t.parameters()) def test_parameters_inherited_combined(self): t = mocks.MockTwoOutputTransform("thb", "nth", "snt") expected = {"param_one": "thb", "param_two": "nth", "param_three": "snt"} self.assertEqual(expected, t.parameters()) def test_return_type(self): t = mocks.MockTwoOutputTransform("a", "b", "c") rt = t.return_type self.assertEqual("ReturnType", rt.__name__) self.assertEqual(("out1", "out2"), rt._fields) def test_call(self): t = mocks.MockTwoOutputTransform("a", "b", "c") # MockTwoOutputTransform has input valency 1 input1 = mocks.MockSeries("foobar", []) out1, out2 = t([input1]) # pylint: disable=not-callable self.assertEqual(learn.TransformedSeries, type(out1)) # self.assertEqual(out1.transform, t) # self.assertEqual(out1.output_name, "output1") self.assertEqual(learn.TransformedSeries, type(out2)) # self.assertEqual(out2.transform, t) # self.assertEqual(out2.output_name, "output2") if __name__ == "__main__": test.main()
apache-2.0
mattvick/phantomjs
src/qt/qtwebkit/Tools/Scripts/webkitpy/port/qt.py
113
7883
# Copyright (C) 2010 Google Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following disclaimer # in the documentation and/or other materials provided with the # distribution. # * Neither the Google name nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT # OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT # LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY # THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """QtWebKit implementation of the Port interface.""" import glob import logging import re import sys import os import platform from webkitpy.common.memoized import memoized from webkitpy.layout_tests.models.test_configuration import TestConfiguration from webkitpy.port.base import Port from webkitpy.port.xvfbdriver import XvfbDriver _log = logging.getLogger(__name__) class QtPort(Port): ALL_VERSIONS = ['linux', 'win', 'mac'] port_name = "qt" def _wk2_port_name(self): return "qt-wk2" def _port_flag_for_scripts(self): return "--qt" @classmethod def determine_full_port_name(cls, host, options, port_name): if port_name and port_name != cls.port_name: return port_name return port_name + '-' + host.platform.os_name # sys_platform exists only for unit testing. def __init__(self, host, port_name, **kwargs): super(QtPort, self).__init__(host, port_name, **kwargs) self._operating_system = port_name.replace('qt-', '') # FIXME: Why is this being set at all? self._version = self.operating_system() def _generate_all_test_configurations(self): configurations = [] for version in self.ALL_VERSIONS: for build_type in self.ALL_BUILD_TYPES: configurations.append(TestConfiguration(version=version, architecture='x86', build_type=build_type)) return configurations def _build_driver(self): # The Qt port builds DRT as part of the main build step return True def supports_per_test_timeout(self): return True def _path_to_driver(self): return self._build_path('bin/%s' % self.driver_name()) def _path_to_image_diff(self): return self._build_path('bin/ImageDiff') def _path_to_webcore_library(self): if self.operating_system() == 'mac': return self._build_path('lib/QtWebKitWidgets.framework/QtWebKitWidgets') else: return self._build_path('lib/libQt5WebKitWidgets.so') def _modules_to_search_for_symbols(self): # We search in every library to be reliable in the case of building with CONFIG+=force_static_libs_as_shared. if self.operating_system() == 'mac': frameworks = glob.glob(os.path.join(self._build_path('lib'), '*.framework')) return [os.path.join(framework, os.path.splitext(os.path.basename(framework))[0]) for framework in frameworks] else: suffix = 'dll' if self.operating_system() == 'win' else 'so' return glob.glob(os.path.join(self._build_path('lib'), 'lib*.' + suffix)) @memoized def qt_version(self): version = '' try: for line in self._executive.run_command(['qmake', '-v']).split('\n'): match = re.search('Qt\sversion\s(?P<version>\d\.\d)', line) if match: version = match.group('version') break except OSError: version = '5.0' return version def _search_paths(self): # qt-mac-wk2 # / # qt-wk1 qt-wk2 # \/ # qt-5.x # \ # (qt-linux|qt-mac|qt-win) # | # qt search_paths = [] if self.get_option('webkit_test_runner'): if self.operating_system() == 'mac': search_paths.append('qt-mac-wk2') search_paths.append('qt-wk2') else: search_paths.append('qt-wk1') search_paths.append('qt-' + self.qt_version()) search_paths.append(self.port_name + '-' + self.operating_system()) search_paths.append(self.port_name) return search_paths def default_baseline_search_path(self): return map(self._webkit_baseline_path, self._search_paths()) def _port_specific_expectations_files(self): paths = self._search_paths() if self.get_option('webkit_test_runner'): paths.append('wk2') # expectations_files() uses the directories listed in _search_paths reversed. # e.g. qt -> qt-linux -> qt-5.x -> qt-wk1 return list(reversed([self._filesystem.join(self._webkit_baseline_path(p), 'TestExpectations') for p in paths])) def setup_environ_for_server(self, server_name=None): clean_env = super(QtPort, self).setup_environ_for_server(server_name) clean_env['QTWEBKIT_PLUGIN_PATH'] = self._build_path('lib/plugins') self._copy_value_from_environ_if_set(clean_env, 'QT_DRT_WEBVIEW_MODE') self._copy_value_from_environ_if_set(clean_env, 'DYLD_IMAGE_SUFFIX') self._copy_value_from_environ_if_set(clean_env, 'QT_WEBKIT_LOG') self._copy_value_from_environ_if_set(clean_env, 'DISABLE_NI_WARNING') self._copy_value_from_environ_if_set(clean_env, 'QT_WEBKIT_PAUSE_UI_PROCESS') self._copy_value_from_environ_if_set(clean_env, 'QT_QPA_PLATFORM_PLUGIN_PATH') self._copy_value_from_environ_if_set(clean_env, 'QT_WEBKIT_DISABLE_UIPROCESS_DUMPPIXELS') return clean_env # FIXME: We should find a way to share this implmentation with Gtk, # or teach run-launcher how to call run-safari and move this down to Port. def show_results_html_file(self, results_filename): run_launcher_args = [] if self.get_option('webkit_test_runner'): run_launcher_args.append('-2') run_launcher_args.append("file://%s" % results_filename) self._run_script("run-launcher", run_launcher_args) def operating_system(self): return self._operating_system def check_sys_deps(self, needs_http): result = super(QtPort, self).check_sys_deps(needs_http) if not 'WEBKIT_TESTFONTS' in os.environ: _log.error('\nThe WEBKIT_TESTFONTS environment variable is not defined or not set properly.') _log.error('You must set it before running the tests.') _log.error('Use git to grab the actual fonts from http://gitorious.org/qtwebkit/testfonts') return False return result # Qt port is not ready for parallel testing, see https://bugs.webkit.org/show_bug.cgi?id=77730 for details. def default_child_processes(self): return 1
bsd-3-clause
peterlauri/django
tests/model_indexes/tests.py
22
3199
from django.db import models from django.test import TestCase from .models import Book class IndexesTests(TestCase): def test_repr(self): index = models.Index(fields=['title']) multi_col_index = models.Index(fields=['title', 'author']) self.assertEqual(repr(index), "<Index: fields='title'>") self.assertEqual(repr(multi_col_index), "<Index: fields='title, author'>") def test_eq(self): index = models.Index(fields=['title']) same_index = models.Index(fields=['title']) another_index = models.Index(fields=['title', 'author']) index.model = Book same_index.model = Book another_index.model = Book self.assertEqual(index, same_index) self.assertNotEqual(index, another_index) def test_index_fields_type(self): with self.assertRaisesMessage(ValueError, 'Index.fields must be a list.'): models.Index(fields='title') def test_raises_error_without_field(self): msg = 'At least one field is required to define an index.' with self.assertRaisesMessage(ValueError, msg): models.Index() def test_max_name_length(self): msg = 'Index names cannot be longer than 30 characters.' with self.assertRaisesMessage(ValueError, msg): models.Index(fields=['title'], name='looooooooooooong_index_name_idx') def test_name_constraints(self): msg = 'Index names cannot start with an underscore (_).' with self.assertRaisesMessage(ValueError, msg): models.Index(fields=['title'], name='_name_starting_with_underscore') msg = 'Index names cannot start with a number (0-9).' with self.assertRaisesMessage(ValueError, msg): models.Index(fields=['title'], name='5name_starting_with_number') def test_name_auto_generation(self): index = models.Index(fields=['author']) index.set_name_with_model(Book) self.assertEqual(index.name, 'model_index_author_0f5565_idx') # '-' for DESC columns should be accounted for in the index name. index = models.Index(fields=['-author']) index.set_name_with_model(Book) self.assertEqual(index.name, 'model_index_author_708765_idx') # fields may be truncated in the name. db_column is used for naming. long_field_index = models.Index(fields=['pages']) long_field_index.set_name_with_model(Book) self.assertEqual(long_field_index.name, 'model_index_page_co_69235a_idx') # suffix can't be longer than 3 characters. long_field_index.suffix = 'suff' msg = 'Index too long for multiple database support. Is self.suffix longer than 3 characters?' with self.assertRaisesMessage(AssertionError, msg): long_field_index.set_name_with_model(Book) def test_deconstruction(self): index = models.Index(fields=['title']) index.set_name_with_model(Book) path, args, kwargs = index.deconstruct() self.assertEqual(path, 'django.db.models.Index') self.assertEqual(args, ()) self.assertEqual(kwargs, {'fields': ['title'], 'name': 'model_index_title_196f42_idx'})
bsd-3-clause
weimingtom/python-for-android
python3-alpha/python3-src/Doc/includes/sqlite3/text_factory.py
45
1355
import sqlite3 con = sqlite3.connect(":memory:") cur = con.cursor() # Create the table con.execute("create table person(lastname, firstname)") AUSTRIA = "\xd6sterreich" # by default, rows are returned as Unicode cur.execute("select ?", (AUSTRIA,)) row = cur.fetchone() assert row[0] == AUSTRIA # but we can make sqlite3 always return bytestrings ... con.text_factory = str cur.execute("select ?", (AUSTRIA,)) row = cur.fetchone() assert type(row[0]) == str # the bytestrings will be encoded in UTF-8, unless you stored garbage in the # database ... assert row[0] == AUSTRIA.encode("utf-8") # we can also implement a custom text_factory ... # here we implement one that will ignore Unicode characters that cannot be # decoded from UTF-8 con.text_factory = lambda x: str(x, "utf-8", "ignore") cur.execute("select ?", ("this is latin1 and would normally create errors" + "\xe4\xf6\xfc".encode("latin1"),)) row = cur.fetchone() assert type(row[0]) == str # sqlite3 offers a built-in optimized text_factory that will return bytestring # objects, if the data is in ASCII only, and otherwise return unicode objects con.text_factory = sqlite3.OptimizedUnicode cur.execute("select ?", (AUSTRIA,)) row = cur.fetchone() assert type(row[0]) == str cur.execute("select ?", ("Germany",)) row = cur.fetchone() assert type(row[0]) == str
apache-2.0
rhattersley/iris
lib/iris/coords.py
4
74738
# (C) British Crown Copyright 2010 - 2016, Met Office # # This file is part of Iris. # # Iris is free software: you can redistribute it and/or modify it under # the terms of the GNU Lesser General Public License as published by the # Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Iris is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with Iris. If not, see <http://www.gnu.org/licenses/>. """ Definitions of coordinates. """ from __future__ import (absolute_import, division, print_function) from six.moves import (filter, input, map, range, zip) # noqa import six from abc import ABCMeta, abstractproperty import collections import copy from itertools import chain from six.moves import zip_longest import operator import warnings import zlib import biggus import netcdftime import numpy as np import iris.aux_factory import iris.exceptions import iris.time import iris.util from iris._cube_coord_common import CFVariableMixin from iris.util import is_regular class CoordDefn(collections.namedtuple('CoordDefn', ['standard_name', 'long_name', 'var_name', 'units', 'attributes', 'coord_system'])): """ Criterion for identifying a specific type of :class:`DimCoord` or :class:`AuxCoord` based on its metadata. """ __slots__ = () def name(self, default='unknown'): """ Returns a human-readable name. First it tries self.standard_name, then it tries the 'long_name' attribute, then the 'var_name' attribute, before falling back to the value of `default` (which itself defaults to 'unknown'). """ return self.standard_name or self.long_name or self.var_name or default def __lt__(self, other): if not isinstance(other, CoordDefn): return NotImplemented def _sort_key(defn): # Emulate Python 2 behaviour with None return (defn.standard_name is not None, defn.standard_name, defn.long_name is not None, defn.long_name, defn.var_name is not None, defn.var_name, defn.units is not None, defn.units, defn.coord_system is not None, defn.coord_system) return _sort_key(self) < _sort_key(other) class CoordExtent(collections.namedtuple('_CoordExtent', ['name_or_coord', 'minimum', 'maximum', 'min_inclusive', 'max_inclusive'])): """Defines a range of values for a coordinate.""" def __new__(cls, name_or_coord, minimum, maximum, min_inclusive=True, max_inclusive=True): """ Create a CoordExtent for the specified coordinate and range of values. Args: * name_or_coord Either a coordinate name or a coordinate, as defined in :meth:`iris.cube.Cube.coords()`. * minimum The minimum value of the range to select. * maximum The maximum value of the range to select. Kwargs: * min_inclusive If True, coordinate values equal to `minimum` will be included in the selection. Default is True. * max_inclusive If True, coordinate values equal to `maximum` will be included in the selection. Default is True. """ return super(CoordExtent, cls).__new__(cls, name_or_coord, minimum, maximum, min_inclusive, max_inclusive) __slots__ = () # Coordinate cell styles. Used in plot and cartography. POINT_MODE = 0 BOUND_MODE = 1 BOUND_POSITION_START = 0 BOUND_POSITION_MIDDLE = 0.5 BOUND_POSITION_END = 1 # Private named tuple class for coordinate groups. _GroupbyItem = collections.namedtuple('GroupbyItem', 'groupby_point, groupby_slice') class Cell(collections.namedtuple('Cell', ['point', 'bound'])): """ An immutable representation of a single cell of a coordinate, including the sample point and/or boundary position. Notes on cell comparison: Cells are compared in two ways, depending on whether they are compared to another Cell, or to a number/string. Cell-Cell comparison is defined to produce a strict ordering. If two cells are not exactly equal (i.e. including whether they both define bounds or not) then they will have a consistent relative order. Cell-number and Cell-string comparison is defined to support Constraint matching. The number/string will equal the Cell if, and only if, it is within the Cell (including on the boundary). The relative comparisons (lt, le, ..) are defined to be consistent with this interpretation. So for a given value `n` and Cell `cell`, only one of the following can be true: | n < cell | n == cell | n > cell Similarly, `n <= cell` implies either `n < cell` or `n == cell`. And `n >= cell` implies either `n > cell` or `n == cell`. """ # This subclass adds no attributes. __slots__ = () def __new__(cls, point=None, bound=None): """ Construct a Cell from point or point-and-bound information. """ if point is None: raise ValueError('Point must be defined.') if bound is not None: bound = tuple(bound) if isinstance(point, np.ndarray): point = tuple(point.flatten()) if isinstance(point, (tuple, list)): if len(point) != 1: raise ValueError('Point may only be a list or tuple if it has ' 'length 1.') point = point[0] return super(Cell, cls).__new__(cls, point, bound) def __mod__(self, mod): point = self.point bound = self.bound if point is not None: point = point % mod if bound is not None: bound = tuple([val % mod for val in bound]) return Cell(point, bound) def __add__(self, mod): point = self.point bound = self.bound if point is not None: point = point + mod if bound is not None: bound = tuple([val + mod for val in bound]) return Cell(point, bound) def __hash__(self): return super(Cell, self).__hash__() def __eq__(self, other): """ Compares Cell equality depending on the type of the object to be compared. """ if isinstance(other, (int, float, np.number)) or \ hasattr(other, 'timetuple'): if self.bound is not None: return self.contains_point(other) else: return self.point == other elif isinstance(other, Cell): return (self.point == other.point) and (self.bound == other.bound) elif (isinstance(other, six.string_types) and self.bound is None and isinstance(self.point, six.string_types)): return self.point == other else: return NotImplemented # Must supply __ne__, Python does not defer to __eq__ for negative equality def __ne__(self, other): result = self.__eq__(other) if result is not NotImplemented: result = not result return result def __common_cmp__(self, other, operator_method): """ Common method called by the rich comparison operators. The method of checking equality depends on the type of the object to be compared. Cell vs Cell comparison is used to define a strict order. Non-Cell vs Cell comparison is used to define Constraint matching. """ if not (isinstance(other, (int, float, np.number, Cell)) or hasattr(other, 'timetuple')): raise TypeError("Unexpected type of other " "{}.".format(type(other))) if operator_method not in (operator.gt, operator.lt, operator.ge, operator.le): raise ValueError("Unexpected operator_method") # Prevent silent errors resulting from missing netcdftime # behaviour. if (isinstance(other, netcdftime.datetime) or (isinstance(self.point, netcdftime.datetime) and not isinstance(other, iris.time.PartialDateTime))): raise TypeError('Cannot determine the order of ' 'netcdftime.datetime objects') if isinstance(other, Cell): # Cell vs Cell comparison for providing a strict sort order if self.bound is None: if other.bound is None: # Point vs point # - Simple ordering result = operator_method(self.point, other.point) else: # Point vs point-and-bound # - Simple ordering of point values, but if the two # points are equal, we make the arbitrary choice # that the point-only Cell is defined as less than # the point-and-bound Cell. if self.point == other.point: result = operator_method in (operator.lt, operator.le) else: result = operator_method(self.point, other.point) else: if other.bound is None: # Point-and-bound vs point # - Simple ordering of point values, but if the two # points are equal, we make the arbitrary choice # that the point-only Cell is defined as less than # the point-and-bound Cell. if self.point == other.point: result = operator_method in (operator.gt, operator.ge) else: result = operator_method(self.point, other.point) else: # Point-and-bound vs point-and-bound # - Primarily ordered on minimum-bound. If the # minimum-bounds are equal, then ordered on # maximum-bound. If the maximum-bounds are also # equal, then ordered on point values. if self.bound[0] == other.bound[0]: if self.bound[1] == other.bound[1]: result = operator_method(self.point, other.point) else: result = operator_method(self.bound[1], other.bound[1]) else: result = operator_method(self.bound[0], other.bound[0]) else: # Cell vs number (or string, or datetime-like) for providing # Constraint behaviour. if self.bound is None: # Point vs number # - Simple matching me = self.point else: if hasattr(other, 'timetuple'): raise TypeError('Cannot determine whether a point lies ' 'within a bounded region for ' 'datetime-like objects.') # Point-and-bound vs number # - Match if "within" the Cell if operator_method in [operator.gt, operator.le]: me = min(self.bound) else: me = max(self.bound) result = operator_method(me, other) return result def __ge__(self, other): return self.__common_cmp__(other, operator.ge) def __le__(self, other): return self.__common_cmp__(other, operator.le) def __gt__(self, other): return self.__common_cmp__(other, operator.gt) def __lt__(self, other): return self.__common_cmp__(other, operator.lt) def __str__(self): if self.bound is not None: return repr(self) else: return str(self.point) def contains_point(self, point): """ For a bounded cell, returns whether the given point lies within the bounds. .. note:: The test carried out is equivalent to min(bound) <= point <= max(bound). """ if self.bound is None: raise ValueError('Point cannot exist inside an unbounded cell.') if hasattr(point, 'timetuple') or np.any([hasattr(val, 'timetuple') for val in self.bound]): raise TypeError('Cannot determine whether a point lies within ' 'a bounded region for datetime-like objects.') return np.min(self.bound) <= point <= np.max(self.bound) class Coord(six.with_metaclass(ABCMeta, CFVariableMixin)): """ Abstract superclass for coordinates. """ _MODE_ADD = 1 _MODE_SUB = 2 _MODE_MUL = 3 _MODE_DIV = 4 _MODE_RDIV = 5 _MODE_SYMBOL = {_MODE_ADD: '+', _MODE_SUB: '-', _MODE_MUL: '*', _MODE_DIV: '/', _MODE_RDIV: '/'} def __init__(self, points, standard_name=None, long_name=None, var_name=None, units='1', bounds=None, attributes=None, coord_system=None): """ Constructs a single coordinate. Args: * points: The values (or value in the case of a scalar coordinate) of the coordinate for each cell. Kwargs: * standard_name: CF standard name of coordinate * long_name: Descriptive name of coordinate * var_name: CF variable name of coordinate * units The :class:`~cf_units.Unit` of the coordinate's values. Can be a string, which will be converted to a Unit object. * bounds An array of values describing the bounds of each cell. Given n bounds for each cell, the shape of the bounds array should be points.shape + (n,). For example, a 1d coordinate with 100 points and two bounds per cell would have a bounds array of shape (100, 2) * attributes A dictionary containing other cf and user-defined attributes. * coord_system A :class:`~iris.coord_systems.CoordSystem`, e.g. a :class:`~iris.coord_systems.GeogCS` for a longitude Coord. """ #: CF standard name of the quantity that the coordinate represents. self.standard_name = standard_name #: Descriptive name of the coordinate. self.long_name = long_name #: The CF variable name for the coordinate. self.var_name = var_name #: Unit of the quantity that the coordinate represents. self.units = units #: Other attributes, including user specified attributes that #: have no meaning to Iris. self.attributes = attributes #: Relevant CoordSystem (if any). self.coord_system = coord_system self.points = points self.bounds = bounds def __getitem__(self, key): """ Returns a new Coord whose values are obtained by conventional array indexing. .. note:: Indexing of a circular coordinate results in a non-circular coordinate if the overall shape of the coordinate changes after indexing. """ # Turn the key(s) into a full slice spec - i.e. one entry for # each dimension of the coord. full_slice = iris.util._build_full_slice_given_keys(key, self.ndim) # If it's a "null" indexing operation (e.g. coord[:, :]) then # we can preserve deferred loading by avoiding promoting _points # and _bounds to full ndarray instances. def is_full_slice(s): return isinstance(s, slice) and s == slice(None, None) if all(is_full_slice(s) for s in full_slice): points = self._points bounds = self._bounds else: points = self._points if isinstance(points, iris.aux_factory._LazyArray): # This triggers the LazyArray to compute its values # (if it hasn't already), which will also trigger any # deferred loading of its dependencies. points = points.view() bounds = self._bounds if isinstance(bounds, iris.aux_factory._LazyArray): bounds = bounds.view() # Make indexing on the cube column based by using the # column_slices_generator (potentially requires slicing the # data multiple times). _, slice_gen = iris.util.column_slices_generator(full_slice, self.ndim) for keys in slice_gen: if points is not None: points = points[keys] if points.shape and min(points.shape) == 0: raise IndexError('Cannot index with zero length ' 'slice.') if bounds is not None: bounds = bounds[keys + (Ellipsis, )] new_coord = self.copy(points=points, bounds=bounds) return new_coord def copy(self, points=None, bounds=None): """ Returns a copy of this coordinate. Kwargs: * points: A points array for the new coordinate. This may be a different shape to the points of the coordinate being copied. * bounds: A bounds array for the new coordinate. Given n bounds for each cell, the shape of the bounds array should be points.shape + (n,). For example, a 1d coordinate with 100 points and two bounds per cell would have a bounds array of shape (100, 2). .. note:: If the points argument is specified and bounds are not, the resulting coordinate will have no bounds. """ if points is None and bounds is not None: raise ValueError('If bounds are specified, points must also be ' 'specified') new_coord = copy.deepcopy(self) if points is not None: # Explicitly not using the points property as we don't want the # shape the new points to be constrained by the shape of # self.points new_coord._points = None new_coord.points = points # Regardless of whether bounds are provided as an argument, new # points will result in new bounds, discarding those copied from # self. new_coord.bounds = bounds return new_coord @abstractproperty def points(self): """Property containing the points values as a numpy array""" @abstractproperty def bounds(self): """Property containing the bound values as a numpy array""" def _repr_other_metadata(self): fmt = '' if self.long_name: fmt = ', long_name={self.long_name!r}' if self.var_name: fmt += ', var_name={self.var_name!r}' if len(self.attributes) > 0: fmt += ', attributes={self.attributes}' if self.coord_system: fmt += ', coord_system={self.coord_system}' result = fmt.format(self=self) return result def _str_dates(self, dates_as_numbers): date_obj_array = self.units.num2date(dates_as_numbers) kwargs = {'separator': ', ', 'prefix': ' '} try: # With NumPy 1.7 we need to ask for 'str' formatting. result = np.core.arrayprint.array2string( date_obj_array, formatter={'numpystr': str}, **kwargs) except TypeError: # But in 1.6 we don't need to ask, and the option doesn't # even exist! result = np.core.arrayprint.array2string(date_obj_array, **kwargs) return result def __str__(self): if self.units.is_time_reference(): fmt = '{cls}({points}{bounds}' \ ', standard_name={self.standard_name!r}' \ ', calendar={self.units.calendar!r}{other_metadata})' points = self._str_dates(self.points) bounds = '' if self.bounds is not None: bounds = ', bounds=' + self._str_dates(self.bounds) result = fmt.format(self=self, cls=type(self).__name__, points=points, bounds=bounds, other_metadata=self._repr_other_metadata()) else: result = repr(self) return result def __repr__(self): fmt = '{cls}({self.points!r}{bounds}' \ ', standard_name={self.standard_name!r}, units={self.units!r}' \ '{other_metadata})' bounds = '' if self.bounds is not None: bounds = ', bounds=' + repr(self.bounds) result = fmt.format(self=self, cls=type(self).__name__, bounds=bounds, other_metadata=self._repr_other_metadata()) return result def __eq__(self, other): eq = NotImplemented # If the other object has a means of getting its definition, and # whether or not it has_points and has_bounds, then do the # comparison, otherwise return a NotImplemented to let Python try to # resolve the operator elsewhere. if hasattr(other, '_as_defn'): # metadata comparison eq = self._as_defn() == other._as_defn() # points comparison if eq: eq = iris.util.array_equal(self.points, other.points) # bounds comparison if eq: if self.bounds is not None and other.bounds is not None: eq = iris.util.array_equal(self.bounds, other.bounds) else: eq = self.bounds is None and other.bounds is None return eq # Must supply __ne__, Python does not defer to __eq__ for negative equality def __ne__(self, other): result = self.__eq__(other) if result is not NotImplemented: result = not result return result def _as_defn(self): defn = CoordDefn(self.standard_name, self.long_name, self.var_name, self.units, self.attributes, self.coord_system) return defn def __binary_operator__(self, other, mode_constant): """ Common code which is called by add, sub, mult and div Mode constant is one of ADD, SUB, MUL, DIV, RDIV .. note:: The unit is *not* changed when doing scalar operations on a coordinate. This means that a coordinate which represents "10 meters" when multiplied by a scalar i.e. "1000" would result in a coordinate of "10000 meters". An alternative approach could be taken to multiply the *unit* by 1000 and the resultant coordinate would represent "10 kilometers". """ if isinstance(other, Coord): raise iris.exceptions.NotYetImplementedError( 'coord %s coord' % Coord._MODE_SYMBOL[mode_constant]) elif isinstance(other, (int, float, np.number)): if mode_constant == Coord._MODE_ADD: points = self.points + other elif mode_constant == Coord._MODE_SUB: points = self.points - other elif mode_constant == Coord._MODE_MUL: points = self.points * other elif mode_constant == Coord._MODE_DIV: points = self.points / other elif mode_constant == Coord._MODE_RDIV: points = other / self.points if self.bounds is not None: if mode_constant == Coord._MODE_ADD: bounds = self.bounds + other elif mode_constant == Coord._MODE_SUB: bounds = self.bounds - other elif mode_constant == Coord._MODE_MUL: bounds = self.bounds * other elif mode_constant == Coord._MODE_DIV: bounds = self.bounds / other elif mode_constant == Coord._MODE_RDIV: bounds = other / self.bounds else: bounds = None new_coord = self.copy(points, bounds) return new_coord else: return NotImplemented def __add__(self, other): return self.__binary_operator__(other, Coord._MODE_ADD) def __sub__(self, other): return self.__binary_operator__(other, Coord._MODE_SUB) def __mul__(self, other): return self.__binary_operator__(other, Coord._MODE_MUL) def __div__(self, other): return self.__binary_operator__(other, Coord._MODE_DIV) def __truediv__(self, other): return self.__binary_operator__(other, Coord._MODE_DIV) def __radd__(self, other): return self + other def __rsub__(self, other): return (-self) + other def __rdiv__(self, other): return self.__binary_operator__(other, Coord._MODE_RDIV) def __rtruediv__(self, other): return self.__binary_operator__(other, Coord._MODE_RDIV) def __rmul__(self, other): return self * other def __neg__(self): return self.copy(-self.points, -self.bounds if self.bounds is not None else None) def convert_units(self, unit): """ Change the coordinate's units, converting the values in its points and bounds arrays. For example, if a coordinate's :attr:`~iris.coords.Coord.units` attribute is set to radians then:: coord.convert_units('degrees') will change the coordinate's :attr:`~iris.coords.Coord.units` attribute to degrees and multiply each value in :attr:`~iris.coords.Coord.points` and :attr:`~iris.coords.Coord.bounds` by 180.0/:math:`\pi`. """ # If the coord has units convert the values in points (and bounds if # present). if not self.units.is_unknown(): self.points = self.units.convert(self.points, unit) if self.bounds is not None: self.bounds = self.units.convert(self.bounds, unit) self.units = unit def cells(self): """ Returns an iterable of Cell instances for this Coord. For example:: for cell in self.cells(): ... """ return _CellIterator(self) def _sanity_check_contiguous(self): if self.ndim != 1: raise iris.exceptions.CoordinateMultiDimError( 'Invalid operation for {!r}. Contiguous bounds are not defined' ' for multi-dimensional coordinates.'.format(self.name())) if self.nbounds != 2: raise ValueError( 'Invalid operation for {!r}, with {} bounds. Contiguous bounds' ' are only defined for coordinates with 2 bounds.'.format( self.name(), self.nbounds)) def is_contiguous(self, rtol=1e-05, atol=1e-08): """ Return True if, and only if, this Coord is bounded with contiguous bounds to within the specified relative and absolute tolerances. Args: * rtol: The relative tolerance parameter (default is 1e-05). * atol: The absolute tolerance parameter (default is 1e-08). Returns: Boolean. """ if self.bounds is not None: self._sanity_check_contiguous() return np.allclose(self.bounds[1:, 0], self.bounds[:-1, 1], rtol=rtol, atol=atol) else: return False def contiguous_bounds(self): """ Returns the N+1 bound values for a contiguous bounded coordinate of length N. .. note:: If the coordinate is does not have bounds, this method will return bounds positioned halfway between the coordinate's points. """ if self.bounds is None: warnings.warn('Coordinate {!r} is not bounded, guessing ' 'contiguous bounds.'.format(self.name())) bounds = self._guess_bounds() else: self._sanity_check_contiguous() bounds = self.bounds c_bounds = np.resize(bounds[:, 0], bounds.shape[0] + 1) c_bounds[-1] = bounds[-1, 1] return c_bounds def is_monotonic(self): """Return True if, and only if, this Coord is monotonic.""" if self.ndim != 1: raise iris.exceptions.CoordinateMultiDimError(self) if self.shape == (1,): return True if self.points is not None: if not iris.util.monotonic(self.points, strict=True): return False if self.bounds is not None: for b_index in range(self.nbounds): if not iris.util.monotonic(self.bounds[..., b_index], strict=True): return False return True def is_compatible(self, other, ignore=None): """ Return whether the coordinate is compatible with another. Compatibility is determined by comparing :meth:`iris.coords.Coord.name()`, :attr:`iris.coords.Coord.units`, :attr:`iris.coords.Coord.coord_system` and :attr:`iris.coords.Coord.attributes` that are present in both objects. Args: * other: An instance of :class:`iris.coords.Coord` or :class:`iris.coords.CoordDefn`. * ignore: A single attribute key or iterable of attribute keys to ignore when comparing the coordinates. Default is None. To ignore all attributes, set this to other.attributes. Returns: Boolean. """ compatible = (self.name() == other.name() and self.units == other.units and self.coord_system == other.coord_system) if compatible: common_keys = set(self.attributes).intersection(other.attributes) if ignore is not None: if isinstance(ignore, six.string_types): ignore = (ignore,) common_keys = common_keys.difference(ignore) for key in common_keys: if np.any(self.attributes[key] != other.attributes[key]): compatible = False break return compatible @property def dtype(self): """ Abstract property which returns the Numpy data type of the Coordinate. """ return self.points.dtype @property def ndim(self): """ Return the number of dimensions of the coordinate (not including the bounded dimension). """ return len(self.shape) @property def nbounds(self): """ Return the number of bounds that this coordinate has (0 for no bounds). """ nbounds = 0 if self.bounds is not None: nbounds = self.bounds.shape[-1] return nbounds def has_bounds(self): return self.bounds is not None @property def shape(self): """The fundamental shape of the Coord, expressed as a tuple.""" # Access the underlying _points attribute to avoid triggering # a deferred load unnecessarily. return self._points.shape def cell(self, index): """ Return the single :class:`Cell` instance which results from slicing the points/bounds with the given index. .. note:: If `iris.FUTURE.cell_datetime_objects` is True, then this method will return Cell objects whose `points` and `bounds` attributes contain either datetime.datetime instances or netcdftime.datetime instances (depending on the calendar). """ index = iris.util._build_full_slice_given_keys(index, self.ndim) point = tuple(np.array(self.points[index], ndmin=1).flatten()) if len(point) != 1: raise IndexError('The index %s did not uniquely identify a single ' 'point to create a cell with.' % (index, )) bound = None if self.bounds is not None: bound = tuple(np.array(self.bounds[index], ndmin=1).flatten()) if iris.FUTURE.cell_datetime_objects: if self.units.is_time_reference(): point = self.units.num2date(point) if bound is not None: bound = self.units.num2date(bound) return Cell(point, bound) def collapsed(self, dims_to_collapse=None): """ Returns a copy of this coordinate, which has been collapsed along the specified dimensions. Replaces the points & bounds with a simple bounded region. .. note:: You cannot partially collapse a multi-dimensional coordinate. See :ref:`cube.collapsed <partially_collapse_multi-dim_coord>` for more information. """ if isinstance(dims_to_collapse, (int, np.integer)): dims_to_collapse = [dims_to_collapse] if dims_to_collapse is not None and \ set(range(self.ndim)) != set(dims_to_collapse): raise ValueError('Cannot partially collapse a coordinate (%s).' % self.name()) if np.issubdtype(self.dtype, np.str): # Collapse the coordinate by serializing the points and # bounds as strings. serialize = lambda x: '|'.join([str(i) for i in x.flatten()]) bounds = None string_type_fmt = 'S{}' if six.PY2 else 'U{}' if self.bounds is not None: shape = self.bounds.shape[1:] bounds = [] for index in np.ndindex(shape): index_slice = (slice(None),) + tuple(index) bounds.append(serialize(self.bounds[index_slice])) dtype = np.dtype(string_type_fmt.format(max(map(len, bounds)))) bounds = np.array(bounds, dtype=dtype).reshape((1,) + shape) points = serialize(self.points) dtype = np.dtype(string_type_fmt.format(len(points))) # Create the new collapsed coordinate. coord = self.copy(points=np.array(points, dtype=dtype), bounds=bounds) else: # Collapse the coordinate by calculating the bounded extremes. if self.ndim > 1: msg = 'Collapsing a multi-dimensional coordinate. ' \ 'Metadata may not be fully descriptive for {!r}.' warnings.warn(msg.format(self.name())) elif not self.is_contiguous(): msg = 'Collapsing a non-contiguous coordinate. ' \ 'Metadata may not be fully descriptive for {!r}.' warnings.warn(msg.format(self.name())) # Create bounds for the new collapsed coordinate. item = self.bounds if self.bounds is not None else self.points lower, upper = np.min(item), np.max(item) bounds_dtype = item.dtype bounds = [lower, upper] # Create points for the new collapsed coordinate. points_dtype = self.points.dtype points = [(lower + upper) * 0.5] # Create the new collapsed coordinate. coord = self.copy(points=np.array(points, dtype=points_dtype), bounds=np.array(bounds, dtype=bounds_dtype)) return coord def _guess_bounds(self, bound_position=0.5): """ Return bounds for this coordinate based on its points. Kwargs: * bound_position: The desired position of the bounds relative to the position of the points. Returns: A numpy array of shape (len(self.points), 2). .. note:: This method only works for coordinates with ``coord.ndim == 1``. .. note:: If `iris.FUTURE.clip_latitudes` is True, then this method will clip the coordinate bounds to the range [-90, 90] when: - it is a `latitude` or `grid_latitude` coordinate, - the units are degrees, - all the points are in the range [-90, 90]. """ # XXX Consider moving into DimCoord # ensure we have monotonic points if not self.is_monotonic(): raise ValueError("Need monotonic points to generate bounds for %s" % self.name()) if self.ndim != 1: raise iris.exceptions.CoordinateMultiDimError(self) if self.shape[0] < 2: raise ValueError('Cannot guess bounds for a coordinate of length ' '1.') if self.bounds is not None: raise ValueError('Coord already has bounds. Remove the bounds ' 'before guessing new ones.') if getattr(self, 'circular', False): points = np.empty(self.points.shape[0] + 2) points[1:-1] = self.points direction = 1 if self.points[-1] > self.points[0] else -1 points[0] = self.points[-1] - (self.units.modulus * direction) points[-1] = self.points[0] + (self.units.modulus * direction) diffs = np.diff(points) else: diffs = np.diff(self.points) diffs = np.insert(diffs, 0, diffs[0]) diffs = np.append(diffs, diffs[-1]) min_bounds = self.points - diffs[:-1] * bound_position max_bounds = self.points + diffs[1:] * (1 - bound_position) bounds = np.array([min_bounds, max_bounds]).transpose() if (iris.FUTURE.clip_latitudes and self.name() in ('latitude', 'grid_latitude') and self.units == 'degree'): points = self.points if (points >= -90).all() and (points <= 90).all(): np.clip(bounds, -90, 90, out=bounds) return bounds def guess_bounds(self, bound_position=0.5): """ Add contiguous bounds to a coordinate, calculated from its points. Puts a cell boundary at the specified fraction between each point and the next, plus extrapolated lowermost and uppermost bound points, so that each point lies within a cell. With regularly spaced points, the resulting bounds will also be regular, and all points lie at the same position within their cell. With irregular points, the first and last cells are given the same widths as the ones next to them. Kwargs: * bound_position: The desired position of the bounds relative to the position of the points. .. note:: An error is raised if the coordinate already has bounds, is not one-dimensional, or is not monotonic. .. note:: Unevenly spaced values, such from a wrapped longitude range, can produce unexpected results : In such cases you should assign suitable values directly to the bounds property, instead. .. note:: If `iris.FUTURE.clip_latitudes` is True, then this method will clip the coordinate bounds to the range [-90, 90] when: - it is a `latitude` or `grid_latitude` coordinate, - the units are degrees, - all the points are in the range [-90, 90]. """ self.bounds = self._guess_bounds(bound_position) def intersect(self, other, return_indices=False): """ Returns a new coordinate from the intersection of two coordinates. Both coordinates must be compatible as defined by :meth:`~iris.coords.Coord.is_compatible`. Kwargs: * return_indices: If True, changes the return behaviour to return the intersection indices for the "self" coordinate. """ if not self.is_compatible(other): msg = 'The coordinates cannot be intersected. They are not ' \ 'compatible because of differing metadata.' raise ValueError(msg) # Cache self.cells for speed. We can also use the index operation on a # list conveniently. self_cells = [cell for cell in self.cells()] # Maintain a list of indices on self for which cells exist in both self # and other. self_intersect_indices = [] for cell in other.cells(): try: self_intersect_indices.append(self_cells.index(cell)) except ValueError: pass if return_indices is False and self_intersect_indices == []: raise ValueError('No intersection between %s coords possible.' % self.name()) self_intersect_indices = np.array(self_intersect_indices) # Return either the indices, or a Coordinate instance of the # intersection. if return_indices: return self_intersect_indices else: return self[self_intersect_indices] def nearest_neighbour_index(self, point): """ Returns the index of the cell nearest to the given point. Only works for one-dimensional coordinates. .. note:: If the coordinate contains bounds, these will be used to determine the nearest neighbour instead of the point values. .. note:: For circular coordinates, the 'nearest' point can wrap around to the other end of the values. """ points = self.points bounds = self.bounds if self.has_bounds() else np.array([]) if self.ndim != 1: raise ValueError('Nearest-neighbour is currently limited' ' to one-dimensional coordinates.') do_circular = getattr(self, 'circular', False) if do_circular: wrap_modulus = self.units.modulus # wrap 'point' to a range based on lowest points or bounds value. wrap_origin = np.min(np.hstack((points, bounds.flatten()))) point = wrap_origin + (point - wrap_origin) % wrap_modulus # Calculate the nearest neighbour. # The algorithm: given a single value (V), # if coord has bounds, # make bounds cells complete and non-overlapping # return first cell containing V # else (no bounds), # find the point which is closest to V # or if two are equally close, return the lowest index if self.has_bounds(): # make bounds ranges complete+separate, so point is in at least one increasing = self.bounds[0, 1] > self.bounds[0, 0] bounds = bounds.copy() # sort the bounds cells by their centre values sort_inds = np.argsort(np.mean(bounds, axis=1)) bounds = bounds[sort_inds] # replace all adjacent bounds with their averages if increasing: mid_bounds = 0.5 * (bounds[:-1, 1] + bounds[1:, 0]) bounds[:-1, 1] = mid_bounds bounds[1:, 0] = mid_bounds else: mid_bounds = 0.5 * (bounds[:-1, 0] + bounds[1:, 1]) bounds[:-1, 0] = mid_bounds bounds[1:, 1] = mid_bounds # if point lies beyond either end, fix the end cell to include it bounds[0, 0] = min(point, bounds[0, 0]) bounds[-1, 1] = max(point, bounds[-1, 1]) # get index of first-occurring cell that contains the point inside_cells = np.logical_and(point >= np.min(bounds, axis=1), point <= np.max(bounds, axis=1)) result_index = np.where(inside_cells)[0][0] # return the original index of the cell (before the bounds sort) result_index = sort_inds[result_index] # Or, if no bounds, we always have points ... else: if do_circular: # add an extra, wrapped max point (simpler than bounds case) # NOTE: circular implies a DimCoord, so *must* be monotonic if points[-1] >= points[0]: # ascending value order : add wrapped lowest value to end index_offset = 0 points = np.hstack((points, points[0] + wrap_modulus)) else: # descending order : add wrapped lowest value at start index_offset = 1 points = np.hstack((points[-1] + wrap_modulus, points)) # return index of first-occurring nearest point distances = np.abs(points - point) result_index = np.where(distances == np.min(distances))[0][0] if do_circular: # convert index back from circular-adjusted points result_index = (result_index - index_offset) % self.shape[0] return result_index def xml_element(self, doc): """Return a DOM element describing this Coord.""" # Create the XML element as the camelCaseEquivalent of the # class name. element_name = type(self).__name__ element_name = element_name[0].lower() + element_name[1:] element = doc.createElement(element_name) element.setAttribute('id', self._xml_id()) if self.standard_name: element.setAttribute('standard_name', str(self.standard_name)) if self.long_name: element.setAttribute('long_name', str(self.long_name)) if self.var_name: element.setAttribute('var_name', str(self.var_name)) element.setAttribute('units', repr(self.units)) if self.attributes: attributes_element = doc.createElement('attributes') for name in sorted(six.iterkeys(self.attributes)): attribute_element = doc.createElement('attribute') attribute_element.setAttribute('name', name) attribute_element.setAttribute('value', str(self.attributes[name])) attributes_element.appendChild(attribute_element) element.appendChild(attributes_element) # Add a coord system sub-element? if self.coord_system: element.appendChild(self.coord_system.xml_element(doc)) # Add the values element.setAttribute('value_type', str(self._value_type_name())) element.setAttribute('shape', str(self.shape)) if hasattr(self.points, 'to_xml_attr'): element.setAttribute('points', self.points.to_xml_attr()) else: element.setAttribute('points', iris.util.format_array(self.points)) if self.bounds is not None: if hasattr(self.bounds, 'to_xml_attr'): element.setAttribute('bounds', self.bounds.to_xml_attr()) else: element.setAttribute('bounds', iris.util.format_array(self.bounds)) return element def _xml_id(self): # Returns a consistent, unique string identifier for this coordinate. unique_value = b'' if self.standard_name: unique_value += self.standard_name.encode('utf-8') unique_value += b'\0' if self.long_name: unique_value += self.long_name.encode('utf-8') unique_value += b'\0' unique_value += str(self.units).encode('utf-8') + b'\0' for k, v in sorted(self.attributes.items()): unique_value += (str(k) + ':' + str(v)).encode('utf-8') + b'\0' unique_value += str(self.coord_system).encode('utf-8') + b'\0' # Mask to ensure consistency across Python versions & platforms. crc = zlib.crc32(unique_value) & 0xffffffff return '%08x' % (crc, ) def _value_type_name(self): """ A simple, readable name for the data type of the Coord point/bound values. """ values = self.points dtype = values.dtype kind = dtype.kind if kind in 'SU': # Establish the basic type name for 'string' type data. # N.B. this means "unicode" in Python3, and "str" in Python2. value_type_name = 'string' # Override this if not the 'native' string type. if six.PY3: if kind == 'S': value_type_name = 'bytes' else: if kind == 'U': value_type_name = 'unicode' else: value_type_name = dtype.name return value_type_name class DimCoord(Coord): """ A coordinate that is 1D, numeric, and strictly monotonic. """ @staticmethod def from_coord(coord): """Create a new DimCoord from the given coordinate.""" return DimCoord(coord.points, standard_name=coord.standard_name, long_name=coord.long_name, var_name=coord.var_name, units=coord.units, bounds=coord.bounds, attributes=coord.attributes, coord_system=copy.deepcopy(coord.coord_system), circular=getattr(coord, 'circular', False)) @classmethod def from_regular(cls, zeroth, step, count, standard_name=None, long_name=None, var_name=None, units='1', attributes=None, coord_system=None, circular=False, with_bounds=False): """ Create a :class:`DimCoord` with regularly spaced points, and optionally bounds. The majority of the arguments are defined as for :meth:`Coord.__init__`, but those which differ are defined below. Args: * zeroth: The value *prior* to the first point value. * step: The numeric difference between successive point values. * count: The number of point values. Kwargs: * with_bounds: If True, the resulting DimCoord will possess bound values which are equally spaced around the points. Otherwise no bounds values will be defined. Defaults to False. """ coord = DimCoord.__new__(cls) coord.standard_name = standard_name coord.long_name = long_name coord.var_name = var_name coord.units = units coord.attributes = attributes coord.coord_system = coord_system coord.circular = circular points = (zeroth+step) + step*np.arange(count, dtype=np.float32) points.flags.writeable = False coord._points = points if not is_regular(coord) and count > 1: points = (zeroth+step) + step*np.arange(count, dtype=np.float64) points.flags.writeable = False coord._points = points if with_bounds: delta = 0.5 * step bounds = np.concatenate([[points - delta], [points + delta]]).T bounds.flags.writeable = False coord._bounds = bounds else: coord._bounds = None return coord def __init__(self, points, standard_name=None, long_name=None, var_name=None, units='1', bounds=None, attributes=None, coord_system=None, circular=False): """ Create a 1D, numeric, and strictly monotonic :class:`Coord` with read-only points and bounds. """ Coord.__init__(self, points, standard_name=standard_name, long_name=long_name, var_name=var_name, units=units, bounds=bounds, attributes=attributes, coord_system=coord_system) #: Whether the coordinate wraps by ``coord.units.modulus``. self.circular = bool(circular) def __deepcopy__(self, memo): """ coord.__deepcopy__() -> Deep copy of coordinate. Used if copy.deepcopy is called on a coordinate. """ new_coord = copy.deepcopy(super(Coord, self), memo) # Ensure points and bounds arrays are read-only new_coord._points.flags.writeable = False if new_coord._bounds is not None: new_coord._bounds.flags.writeable = False return new_coord def copy(self, points=None, bounds=None): new_coord = super(DimCoord, self).copy(points=points, bounds=bounds) # Make the array read-only. new_coord._points.flags.writeable = False if new_coord._bounds is not None: new_coord._bounds.flags.writeable = False return new_coord def __eq__(self, other): # TODO investigate equality of AuxCoord and DimCoord if circular is # False. result = NotImplemented if isinstance(other, DimCoord): result = (Coord.__eq__(self, other) and self.circular == other.circular) return result # The __ne__ operator from Coord implements the not __eq__ method. # This is necessary for merging, but probably shouldn't be used otherwise. # See #962 and #1772. def __hash__(self): return hash(id(self)) def __getitem__(self, key): coord = super(DimCoord, self).__getitem__(key) coord.circular = self.circular and coord.shape == self.shape return coord def collapsed(self, dims_to_collapse=None): coord = Coord.collapsed(self, dims_to_collapse=dims_to_collapse) if self.circular and self.units.modulus is not None: bnds = coord.bounds.copy() bnds[0, 1] = coord.bounds[0, 0] + self.units.modulus coord.bounds = bnds coord.points = np.array(np.sum(coord.bounds) * 0.5, dtype=self.points.dtype) # XXX This isn't actually correct, but is ported from the old world. coord.circular = False return coord def _repr_other_metadata(self): result = Coord._repr_other_metadata(self) if self.circular: result += ', circular=%r' % self.circular return result @property def points(self): """The local points values as a read-only NumPy array.""" points = self._points.view() return points @points.setter def points(self, points): points = np.array(points, ndmin=1) # If points are already defined for this coordinate, if hasattr(self, '_points') and self._points is not None: # Check that setting these points wouldn't change self.shape if points.shape != self.shape: raise ValueError("New points shape must match existing points " "shape.") # Checks for 1d, numeric, monotonic if points.ndim != 1: raise ValueError('The points array must be 1-dimensional.') if not np.issubdtype(points.dtype, np.number): raise ValueError('The points array must be numeric.') if len(points) > 1 and not iris.util.monotonic(points, strict=True): raise ValueError('The points array must be strictly monotonic.') # Make the array read-only. points.flags.writeable = False self._points = points @property def bounds(self): """ The bounds values as a read-only NumPy array, or None if no bounds have been set. """ bounds = None if self._bounds is not None: bounds = self._bounds.view() return bounds @bounds.setter def bounds(self, bounds): if bounds is not None: # Ensure the bounds are a compatible shape. bounds = np.array(bounds, ndmin=2) if self.shape != bounds.shape[:-1]: raise ValueError( "The shape of the bounds array should be " "points.shape + (n_bounds,)") # Checks for numeric and monotonic if not np.issubdtype(bounds.dtype, np.number): raise ValueError('The bounds array must be numeric.') n_bounds = bounds.shape[-1] n_points = bounds.shape[0] if n_points > 1: directions = set() for b_index in range(n_bounds): monotonic, direction = iris.util.monotonic( bounds[:, b_index], strict=True, return_direction=True) if not monotonic: raise ValueError('The bounds array must be strictly ' 'monotonic.') directions.add(direction) if len(directions) != 1: raise ValueError('The direction of monotonicity must be ' 'consistent across all bounds') # Ensure the array is read-only. bounds.flags.writeable = False self._bounds = bounds def is_monotonic(self): return True def xml_element(self, doc): """Return DOM element describing this :class:`iris.coords.DimCoord`.""" element = super(DimCoord, self).xml_element(doc) if self.circular: element.setAttribute('circular', str(self.circular)) return element class AuxCoord(Coord): """A CF auxiliary coordinate.""" @staticmethod def from_coord(coord): """Create a new AuxCoord from the given coordinate.""" new_coord = AuxCoord(coord.points, standard_name=coord.standard_name, long_name=coord.long_name, var_name=coord.var_name, units=coord.units, bounds=coord.bounds, attributes=coord.attributes, coord_system=copy.deepcopy(coord.coord_system)) return new_coord def _sanitise_array(self, src, ndmin): # Ensure the array is writeable. # NB. Returns the *same object* if src is already writeable. result = np.require(src, requirements='W') # Ensure the array has enough dimensions. # NB. Returns the *same object* if result.ndim >= ndmin result = np.array(result, ndmin=ndmin, copy=False) # We don't need to copy the data, but we do need to have our # own view so we can control the shape, etc. result = result.view() return result @property def points(self): """Property containing the points values as a numpy array""" points = self._points if isinstance(points, biggus.Array): points = points.ndarray() self._points = points return points.view() @points.setter def points(self, points): # Set the points to a new array - as long as it's the same shape. # With the exception of LazyArrays, ensure points has an ndmin # of 1 and is either a numpy or biggus array. # This will avoid Scalar coords with points of shape () rather # than the desired (1,) if isinstance(points, biggus.Array): if points.shape == (): points = biggus.ConstantArray((1,), points.ndarray(), points.dtype) elif not isinstance(points, iris.aux_factory._LazyArray): points = self._sanitise_array(points, 1) # If points are already defined for this coordinate, if hasattr(self, '_points') and self._points is not None: # Check that setting these points wouldn't change self.shape if points.shape != self.shape: raise ValueError("New points shape must match existing points " "shape.") self._points = points @property def bounds(self): """ Property containing the bound values, as a numpy array, or None if no bound values are defined. .. note:: The shape of the bound array should be: ``points.shape + (n_bounds, )``. """ if self._bounds is not None: bounds = self._bounds if isinstance(bounds, biggus.Array): bounds = bounds.ndarray() self._bounds = bounds bounds = bounds.view() else: bounds = None return bounds @bounds.setter def bounds(self, bounds): # Ensure the bounds are a compatible shape. if bounds is not None: if not isinstance(bounds, (iris.aux_factory._LazyArray, biggus.Array)): bounds = self._sanitise_array(bounds, 2) # NB. Use _points to avoid triggering any lazy array. if self._points.shape != bounds.shape[:-1]: raise ValueError("Bounds shape must be compatible with points " "shape.") self._bounds = bounds # This is necessary for merging, but probably shouldn't be used otherwise. # See #962 and #1772. def __hash__(self): return hash(id(self)) class CellMeasure(six.with_metaclass(ABCMeta, CFVariableMixin)): """ A CF Cell Measure, providing area or volume properties of a cell where these cannot be inferred from the Coordinates and Coordinate Reference System. """ def __init__(self, data, standard_name=None, long_name=None, var_name=None, units='1', attributes=None, measure=None): """ Constructs a single cell measure. Args: * data: The values of the measure for each cell. Kwargs: * standard_name: CF standard name of coordinate * long_name: Descriptive name of coordinate * var_name: CF variable name of coordinate * units The :class:`~cf_units.Unit` of the coordinate's values. Can be a string, which will be converted to a Unit object. * attributes A dictionary containing other CF and user-defined attributes. * measure A string describing the type of measure. 'area' and 'volume' are the only valid entries. """ #: CF standard name of the quantity that the coordinate represents. self.standard_name = standard_name #: Descriptive name of the coordinate. self.long_name = long_name #: The CF variable name for the coordinate. self.var_name = var_name #: Unit of the quantity that the coordinate represents. self.units = units #: Other attributes, including user specified attributes that #: have no meaning to Iris. self.attributes = attributes self.data = data self.measure = measure @property def measure(self): return self._measure @property def data(self): """Property containing the data values as a numpy array""" data = self._data if isinstance(data, biggus.Array): data = data.ndarray() self._data = data return data.view() @data.setter def data(self, data): # Set the data to a new array - as long as it's the same shape. # If data are already defined for this CellMeasure, if data is None: raise ValueError('The data payload of a CellMeasure may not be ' 'None; it must be a numpy array or equivalent.') if data.shape == (): data = np.array(data, ndmin=1) if hasattr(self, '_data') and self._data is not None: # Check that setting these data wouldn't change self.shape if data.shape != self.shape: raise ValueError("New data shape must match existing data " "shape.") self._data = data @property def shape(self): """The fundamental shape of the Cell Measure, expressed as a tuple.""" # Access the underlying _data attribute to avoid triggering # a deferred load unnecessarily. return self._data.shape @property def ndim(self): """ Return the number of dimensions of the cell measure. """ return self._data.ndim @measure.setter def measure(self, measure): if measure not in ['area', 'volume']: raise ValueError("measure must be 'area' or 'volume', " "not {}".format(measure)) self._measure = measure def __getitem__(self, key): """ Returns a new CellMeasure whose values are obtained by conventional array indexing. """ # Turn the key(s) into a full slice spec - i.e. one entry for # each dimension of the cell_measure. full_slice = iris.util._build_full_slice_given_keys(key, self.ndim) # If it's a "null" indexing operation (e.g. cell_measure[:, :]) then # we can preserve deferred loading by avoiding promoting _data # and _bounds to full ndarray instances. def is_full_slice(s): return isinstance(s, slice) and s == slice(None, None) data = self._data if not all(is_full_slice(s) for s in full_slice): data = self._data # Make indexing on the cube column based by using the # column_slices_generator (potentially requires slicing the # data multiple times). _, slice_gen = iris.util.column_slices_generator(full_slice, self.ndim) for keys in slice_gen: if data is not None: data = data[keys] if data.shape and min(data.shape) == 0: raise IndexError('Cannot index with zero length ' 'slice.') new_cell_measure = self.copy(data=data) return new_cell_measure def copy(self, data=None): """ Returns a copy of this CellMeasure. Kwargs: * data: A data array for the new cell_measure. This may be a different shape to the data of the cell_measure being copied. """ new_cell_measure = copy.deepcopy(self) if data is not None: # Explicitly not using the data property as we don't want the # shape the new data to be constrained by the shape of # self.data new_cell_measure._data = None new_cell_measure.data = data return new_cell_measure def _repr_other_metadata(self): fmt = '' if self.long_name: fmt = ', long_name={self.long_name!r}' if self.var_name: fmt += ', var_name={self.var_name!r}' if len(self.attributes) > 0: fmt += ', attributes={self.attributes}' result = fmt.format(self=self) return result def __str__(self): result = repr(self) return result def __repr__(self): fmt = ('{cls}({self.data!r}' ', measure={self.measure}, standard_name={self.standard_name!r}' ', units={self.units!r}{other_metadata})') result = fmt.format(self=self, cls=type(self).__name__, other_metadata=self._repr_other_metadata()) return result def _as_defn(self): defn = (self.standard_name, self.long_name, self.var_name, self.units, self.attributes, self.measure) return defn def __eq__(self, other): eq = NotImplemented if isinstance(other, CellMeasure): eq = self._as_defn() == other._as_defn() if eq: eq = (self.data == other.data).all() return eq def __ne__(self, other): result = self.__eq__(other) if result is not NotImplemented: result = not result return result class CellMethod(iris.util._OrderedHashable): """ Represents a sub-cell pre-processing operation. """ # Declare the attribute names relevant to the _OrderedHashable behaviour. _names = ('method', 'coord_names', 'intervals', 'comments') #: The name of the operation that was applied. e.g. "mean", "max", etc. method = None #: The tuple of coordinate names over which the operation was applied. coord_names = None #: A description of the original intervals over which the operation #: was applied. intervals = None #: Additional comments. comments = None def __init__(self, method, coords=None, intervals=None, comments=None): """ Args: * method: The name of the operation. Kwargs: * coords: A single instance or sequence of :class:`.Coord` instances or coordinate names. * intervals: A single string, or a sequence strings, describing the intervals within the cell method. * comments: A single string, or a sequence strings, containing any additional comments. """ if not isinstance(method, six.string_types): raise TypeError("'method' must be a string - got a '%s'" % type(method)) _coords = [] if coords is None: pass elif isinstance(coords, Coord): _coords.append(coords.name()) elif isinstance(coords, six.string_types): _coords.append(coords) else: normalise = (lambda coord: coord.name() if isinstance(coord, Coord) else coord) _coords.extend([normalise(coord) for coord in coords]) _intervals = [] if intervals is None: pass elif isinstance(intervals, six.string_types): _intervals = [intervals] else: _intervals.extend(intervals) _comments = [] if comments is None: pass elif isinstance(comments, six.string_types): _comments = [comments] else: _comments.extend(comments) self._init(method, tuple(_coords), tuple(_intervals), tuple(_comments)) def __str__(self): """Return a custom string representation of CellMethod""" # Group related coord names intervals and comments together cell_components = zip_longest(self.coord_names, self.intervals, self.comments, fillvalue="") collection_summaries = [] cm_summary = "%s: " % self.method for coord_name, interval, comment in cell_components: other_info = ", ".join(filter(None, chain((interval, comment)))) if other_info: coord_summary = "%s (%s)" % (coord_name, other_info) else: coord_summary = "%s" % coord_name collection_summaries.append(coord_summary) return cm_summary + ", ".join(collection_summaries) def __add__(self, other): # Disable the default tuple behaviour of tuple concatenation raise NotImplementedError() def xml_element(self, doc): """ Return a dom element describing itself """ cellMethod_xml_element = doc.createElement('cellMethod') cellMethod_xml_element.setAttribute('method', self.method) for coord_name, interval, comment in zip_longest(self.coord_names, self.intervals, self.comments): coord_xml_element = doc.createElement('coord') if coord_name is not None: coord_xml_element.setAttribute('name', coord_name) if interval is not None: coord_xml_element.setAttribute('interval', interval) if comment is not None: coord_xml_element.setAttribute('comment', comment) cellMethod_xml_element.appendChild(coord_xml_element) return cellMethod_xml_element # See Coord.cells() for the description/context. class _CellIterator(collections.Iterator): def __init__(self, coord): self._coord = coord if coord.ndim != 1: raise iris.exceptions.CoordinateMultiDimError(coord) self._indices = iter(range(coord.shape[0])) def __next__(self): # NB. When self._indices runs out it will raise StopIteration for us. i = next(self._indices) return self._coord.cell(i) next = __next__ # See ExplicitCoord._group() for the description/context. class _GroupIterator(collections.Iterator): def __init__(self, points): self._points = points self._start = 0 def __next__(self): num_points = len(self._points) if self._start >= num_points: raise StopIteration stop = self._start + 1 m = self._points[self._start] while stop < num_points and self._points[stop] == m: stop += 1 group = _GroupbyItem(m, slice(self._start, stop)) self._start = stop return group next = __next__
lgpl-3.0
Tatsh-ansible/ansible
lib/ansible/modules/cloud/azure/azure_rm_publicipaddress_facts.py
10
5699
#!/usr/bin/python # # Copyright (c) 2016 Matt Davis, <mdavis@ansible.com> # Chris Houseknecht, <house@redhat.com> # # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = {'metadata_version': '1.0', 'status': ['preview'], 'supported_by': 'curated'} DOCUMENTATION = ''' --- module: azure_rm_publicipaddress_facts version_added: "2.1" short_description: Get public IP facts. description: - Get facts for a specific public IP or all public IPs within a resource group. options: name: description: - Only show results for a specific Public IP. required: false default: null resource_group: description: - Limit results by resource group. Required when using name parameter. required: false default: null tags: description: - Limit results by providing a list of tags. Format tags as 'key' or 'key:value'. required: false default: null extends_documentation_fragment: - azure author: - "Chris Houseknecht (@chouseknecht)" - "Matt Davis (@nitzmahone)" ''' EXAMPLES = ''' - name: Get facts for one Public IP azure_rm_publicip_facts: resource_group: Testing name: publicip001 - name: Get facts for all Public IPs within a resource groups azure_rm_publicip_facts: resource_group: Testing ''' RETURN = ''' azure_publicipaddresses: description: List of public IP address dicts. returned: always type: list example: [{ "etag": 'W/"a31a6d7d-cb18-40a5-b16d-9f4a36c1b18a"', "id": "/subscriptions/XXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXX/resourceGroups/Testing/providers/Microsoft.Network/publicIPAddresses/pip2001", "location": "eastus2", "name": "pip2001", "properties": { "idleTimeoutInMinutes": 4, "provisioningState": "Succeeded", "publicIPAllocationMethod": "Dynamic", "resourceGuid": "29de82f4-a7da-440e-bd3d-9cabb79af95a" }, "type": "Microsoft.Network/publicIPAddresses" }] ''' try: from msrestazure.azure_exceptions import CloudError from azure.common import AzureMissingResourceHttpError, AzureHttpError except: # This is handled in azure_rm_common pass from ansible.module_utils.azure_rm_common import AzureRMModuleBase AZURE_OBJECT_CLASS = 'PublicIp' class AzureRMPublicIPFacts(AzureRMModuleBase): def __init__(self): self.module_arg_spec = dict( name=dict(type='str'), resource_group=dict(type='str'), tags=dict(type='list') ) self.results = dict( changed=False, ansible_facts=dict(azure_publicipaddresses=[]) ) self.name = None self.resource_group = None self.tags = None super(AzureRMPublicIPFacts, self).__init__(self.module_arg_spec, supports_tags=False, facts_module=True) def exec_module(self, **kwargs): for key in self.module_arg_spec: setattr(self, key, kwargs[key]) if self.name and not self.resource_group: self.fail("Parameter error: resource group required when filtering by name.") if self.name: self.results['ansible_facts']['azure_publicipaddresses'] = self.get_item() elif self.resource_group: self.results['ansible_facts']['azure_publicipaddresses'] = self.list_resource_group() else: self.results['ansible_facts']['azure_publicipaddresses'] = self.list_all() return self.results def get_item(self): self.log('Get properties for {0}'.format(self.name)) item = None result = [] try: item = self.network_client.public_ip_addresses.get(self.resource_group, self.name) except CloudError: pass if item and self.has_tags(item.tags, self.tags): pip = self.serialize_obj(item, AZURE_OBJECT_CLASS) pip['name'] = item.name pip['type'] = item.type result = [pip] return result def list_resource_group(self): self.log('List items in resource groups') try: response = self.network_client.public_ip_addresses.list(self.resource_group) except AzureHttpError as exc: self.fail("Error listing items in resource groups {0} - {1}".format(self.resource_group, str(exc))) results = [] for item in response: if self.has_tags(item.tags, self.tags): pip = self.serialize_obj(item, AZURE_OBJECT_CLASS) pip['name'] = item.name pip['type'] = item.type results.append(pip) return results def list_all(self): self.log('List all items') try: response = self.network_client.public_ip_addresses.list_all() except AzureHttpError as exc: self.fail("Error listing all items - {0}".format(str(exc))) results = [] for item in response: if self.has_tags(item.tags, self.tags): pip = self.serialize_obj(item, AZURE_OBJECT_CLASS) pip['name'] = item.name pip['type'] = item.type results.append(pip) return results def main(): AzureRMPublicIPFacts() if __name__ == '__main__': main()
gpl-3.0
authbox-lib/thrift
test/crossrunner/prepare.py
50
1686
# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # import os import subprocess from crossrunner.collect import collect_testlibs def prepare(config_dict, testdir, server_match, client_match): libs, libs2 = collect_testlibs(config_dict, server_match, client_match) libs.extend(libs2) def prepares(): for lib in libs: pre = lib.get('prepare') if pre: yield pre, lib['workdir'] def files(): for lib in libs: workdir = os.path.join(testdir, lib['workdir']) for c in lib['command']: if not c.startswith('-'): p = os.path.join(workdir, c) if not os.path.exists(p): yield os.path.split(p) def make(p): d, f = p with open(os.devnull, 'w') as devnull: return subprocess.Popen(['make', f], cwd=d, stderr=devnull) for pre, d in prepares(): subprocess.Popen(pre, cwd=d).wait() for p in list(map(make, set(files()))): p.wait() return True
apache-2.0
xianjunzhengbackup/Cloud-Native-Python
env/lib/python3.5/site-packages/pip/_vendor/requests/packages/urllib3/request.py
714
5988
from __future__ import absolute_import try: from urllib.parse import urlencode except ImportError: from urllib import urlencode from .filepost import encode_multipart_formdata __all__ = ['RequestMethods'] class RequestMethods(object): """ Convenience mixin for classes who implement a :meth:`urlopen` method, such as :class:`~urllib3.connectionpool.HTTPConnectionPool` and :class:`~urllib3.poolmanager.PoolManager`. Provides behavior for making common types of HTTP request methods and decides which type of request field encoding to use. Specifically, :meth:`.request_encode_url` is for sending requests whose fields are encoded in the URL (such as GET, HEAD, DELETE). :meth:`.request_encode_body` is for sending requests whose fields are encoded in the *body* of the request using multipart or www-form-urlencoded (such as for POST, PUT, PATCH). :meth:`.request` is for making any kind of request, it will look up the appropriate encoding format and use one of the above two methods to make the request. Initializer parameters: :param headers: Headers to include with all requests, unless other headers are given explicitly. """ _encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS']) def __init__(self, headers=None): self.headers = headers or {} def urlopen(self, method, url, body=None, headers=None, encode_multipart=True, multipart_boundary=None, **kw): # Abstract raise NotImplemented("Classes extending RequestMethods must implement " "their own ``urlopen`` method.") def request(self, method, url, fields=None, headers=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the appropriate encoding of ``fields`` based on the ``method`` used. This is a convenience method that requires the least amount of manual effort. It can be used in most situations, while still having the option to drop down to more specific methods when necessary, such as :meth:`request_encode_url`, :meth:`request_encode_body`, or even the lowest level :meth:`urlopen`. """ method = method.upper() if method in self._encode_url_methods: return self.request_encode_url(method, url, fields=fields, headers=headers, **urlopen_kw) else: return self.request_encode_body(method, url, fields=fields, headers=headers, **urlopen_kw) def request_encode_url(self, method, url, fields=None, headers=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the url. This is useful for request methods like GET, HEAD, DELETE, etc. """ if headers is None: headers = self.headers extra_kw = {'headers': headers} extra_kw.update(urlopen_kw) if fields: url += '?' + urlencode(fields) return self.urlopen(method, url, **extra_kw) def request_encode_body(self, method, url, fields=None, headers=None, encode_multipart=True, multipart_boundary=None, **urlopen_kw): """ Make a request using :meth:`urlopen` with the ``fields`` encoded in the body. This is useful for request methods like POST, PUT, PATCH, etc. When ``encode_multipart=True`` (default), then :meth:`urllib3.filepost.encode_multipart_formdata` is used to encode the payload with the appropriate content type. Otherwise :meth:`urllib.urlencode` is used with the 'application/x-www-form-urlencoded' content type. Multipart encoding must be used when posting files, and it's reasonably safe to use it in other times too. However, it may break request signing, such as with OAuth. Supports an optional ``fields`` parameter of key/value strings AND key/filetuple. A filetuple is a (filename, data, MIME type) tuple where the MIME type is optional. For example:: fields = { 'foo': 'bar', 'fakefile': ('foofile.txt', 'contents of foofile'), 'realfile': ('barfile.txt', open('realfile').read()), 'typedfile': ('bazfile.bin', open('bazfile').read(), 'image/jpeg'), 'nonamefile': 'contents of nonamefile field', } When uploading a file, providing a filename (the first parameter of the tuple) is optional but recommended to best mimick behavior of browsers. Note that if ``headers`` are supplied, the 'Content-Type' header will be overwritten because it depends on the dynamic random boundary string which is used to compose the body of the request. The random boundary string can be explicitly set with the ``multipart_boundary`` parameter. """ if headers is None: headers = self.headers extra_kw = {'headers': {}} if fields: if 'body' in urlopen_kw: raise TypeError( "request got values for both 'fields' and 'body', can only specify one.") if encode_multipart: body, content_type = encode_multipart_formdata(fields, boundary=multipart_boundary) else: body, content_type = urlencode(fields), 'application/x-www-form-urlencoded' extra_kw['body'] = body extra_kw['headers'] = {'Content-Type': content_type} extra_kw['headers'].update(headers) extra_kw.update(urlopen_kw) return self.urlopen(method, url, **extra_kw)
mit
ClearCorp-dev/odoo-clearcorp
TODO-8.0/account_bank_statement_reconcile_ccorp/account_bank_statement_reconcile_ccorp.py
4
5712
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Addons modules by CLEARCORP S.A. # Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv class BankStatement(osv.Model): _inherit = 'account.bank.statement' def button_confirm_bank(self, cr, uid, ids, context=None): res = super(BankStatement,self).button_confirm_bank(cr, uid, ids, context=context) for st in self.browse(cr, uid, ids, context=context): for move_line in st.move_line_ids: # Skip if move_line is reconciled. Legacy due to account_banking_ccorp if move_line.reconcile: continue if not move_line.account_id.reconcile: continue # Check if move line is debit if not move_line.debit == 0.0: cr.execute('''SELECT line.id, line.ref, line.debit, line.credit, line.date FROM (SELECT line.id, line.ref, line.debit, line.credit, line.date FROM account_move_line AS line WHERE line.reconcile_id IS NULL AND line.reconcile_partial_id IS NULL AND line.company_id = %s AND line.period_id = %s AND line.account_id = %s AND line.state = 'valid' AND line.id != %s) AS line WHERE line.ref = %s AND line.credit - %s = 0 AND line.debit = 0;''', (st.company_id.id, st.period_id.id, move_line.account_id.id, move_line.id, move_line.ref or '', move_line.debit)) # Move line is credit else: cr.execute('''SELECT line.id, line.ref, line.debit, line.credit, line.date FROM (SELECT line.id, line.ref, line.debit, line.credit, line.date FROM account_move_line AS line WHERE line.reconcile_id IS NULL AND line.reconcile_partial_id IS NULL AND line.company_id = %s AND line.period_id = %s AND line.account_id = %s AND line.state = 'valid' AND line.id != %s) AS line WHERE line.ref = %s AND line.debit - %s = 0 AND line.credit = 0;''', (st.company_id.id, st.period_id.id, move_line.account_id.id, move_line.id, move_line.ref or '', move_line.credit)) result = cr.dictfetchall() # Skip if there are more than one match if len(result) > 1: continue # Try to look for results using dates if # no result was found elif len(result) == 0: # Check again if move line is debit if not move_line.debit == 0.0: cr.execute('''SELECT line.id, line.ref, line.debit, line.credit, line.date FROM (SELECT line.id, line.ref, line.debit, line.credit, line.date FROM account_move_line AS line WHERE line.reconcile_id IS NULL AND line.reconcile_partial_id IS NULL AND line.company_id = %s AND line.period_id = %s AND line.account_id = %s AND line.state = 'valid' AND line.id != %s) AS line WHERE (line.date + 1 = date %s OR line.date - 1 = date %s) AND line.credit - %s = 0 AND line.debit = 0;''', (st.company_id.id, st.period_id.id, move_line.account_id.id, move_line.id, move_line.date, move_line.date, move_line.debit)) # Move line is credit else: cr.execute('''SELECT line.id, line.ref, line.debit, line.credit, line.date FROM (SELECT line.id, line.ref, line.debit, line.credit, line.date FROM account_move_line AS line WHERE line.reconcile_id IS NULL AND line.reconcile_partial_id IS NULL AND line.company_id = %s AND line.period_id = %s AND line.account_id = %s AND line.state = 'valid' AND line.id != %s) AS line WHERE (line.date + 1 = date %s OR line.date - 1 = date %s) AND line.debit - %s = 0 AND line.credit = 0;''', (st.company_id.id, st.period_id.id, move_line.account_id.id, move_line.id, move_line.date, move_line.date, move_line.credit)) result = cr.dictfetchall() # Skip if there are more than one match. Or None at all if len(result) > 1 or len(result) == 0: continue # Do the full reconcile account_move_line_obj = self.pool.get('account.move.line') account_move_line_obj.reconcile(cr, uid, [move_line.id, result[0].get('id')], 'manual', move_line.account_id.id, st.period_id.id, False, context=context) return res
agpl-3.0
o5k/openerp-oemedical-v0.1
openerp/addons/email_template/wizard/mail_compose_message.py
31
9734
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>) # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # ############################################################################## from openerp import tools from openerp.osv import osv, fields def _reopen(self, res_id, model): return {'type': 'ir.actions.act_window', 'view_mode': 'form', 'view_type': 'form', 'res_id': res_id, 'res_model': self._name, 'target': 'new', # save original model in context, because selecting the list of available # templates requires a model in context 'context': { 'default_model': model, }, } class mail_compose_message(osv.TransientModel): _inherit = 'mail.compose.message' def _get_templates(self, cr, uid, context=None): if context is None: context = {} model = False email_template_obj = self.pool.get('email.template') message_id = context.get('default_parent_id', context.get('message_id', context.get('active_id'))) if context.get('default_composition_mode') == 'reply' and message_id: message_data = self.pool.get('mail.message').browse(cr, uid, message_id, context=context) if message_data: model = message_data.model else: model = context.get('default_model', context.get('active_model')) record_ids = email_template_obj.search(cr, uid, [('model', '=', model)], context=context) return email_template_obj.name_get(cr, uid, record_ids, context) + [(False, '')] _columns = { # incredible hack of the day: size=-1 means we want an int db column instead of an str one 'template_id': fields.selection(_get_templates, 'Template', size=-1), } def send_mail(self, cr, uid, ids, context=None): """ Override of send_mail to duplicate attachments linked to the email.template. Indeed, basic mail.compose.message wizard duplicates attachments in mass mailing mode. But in 'single post' mode, attachments of an email template also have to be duplicated to avoid changing their ownership. """ for wizard in self.browse(cr, uid, ids, context=context): if not wizard.attachment_ids or wizard.composition_mode == 'mass_mail' or not wizard.template_id: continue template = self.pool.get('email.template').browse(cr, uid, wizard.template_id, context=context) new_attachment_ids = [] for attachment in wizard.attachment_ids: if attachment in template.attachment_ids: new_attachment_ids.append(self.pool.get('ir.attachment').copy(cr, uid, attachment.id, {'res_model': 'mail.compose.message', 'res_id': wizard.id}, context=context)) else: new_attachment_ids.append(attachment.id) self.write(cr, uid, wizard.id, {'attachment_ids': [(6, 0, new_attachment_ids)]}, context=context) return super(mail_compose_message, self).send_mail(cr, uid, ids, context=context) def onchange_template_id(self, cr, uid, ids, template_id, composition_mode, model, res_id, context=None): """ - mass_mailing: we cannot render, so return the template values - normal mode: return rendered values """ if template_id and composition_mode == 'mass_mail': values = self.pool.get('email.template').read(cr, uid, template_id, ['subject', 'body_html', 'attachment_ids'], context) values.pop('id') elif template_id: values = self.generate_email_for_composer(cr, uid, template_id, res_id, context=context) # transform attachments into attachment_ids; not attached to the document because this will # be done further in the posting process, allowing to clean database if email not send values['attachment_ids'] = values.pop('attachment_ids', []) ir_attach_obj = self.pool.get('ir.attachment') for attach_fname, attach_datas in values.pop('attachments', []): data_attach = { 'name': attach_fname, 'datas': attach_datas, 'datas_fname': attach_fname, 'res_model': 'mail.compose.message', 'res_id': 0, 'type': 'binary', # override default_type from context, possibly meant for another model! } values['attachment_ids'].append(ir_attach_obj.create(cr, uid, data_attach, context=context)) else: values = self.default_get(cr, uid, ['body', 'subject', 'partner_ids', 'attachment_ids'], context=context) if values.get('body_html'): values['body'] = values.pop('body_html') return {'value': values} def save_as_template(self, cr, uid, ids, context=None): """ hit save as template button: current form value will be a new template attached to the current document. """ email_template = self.pool.get('email.template') ir_model_pool = self.pool.get('ir.model') for record in self.browse(cr, uid, ids, context=context): model_ids = ir_model_pool.search(cr, uid, [('model', '=', record.model)], context=context) model_id = model_ids and model_ids[0] or False model_name = '' if model_id: model_name = ir_model_pool.browse(cr, uid, model_id, context=context).name template_name = "%s: %s" % (model_name, tools.ustr(record.subject)) values = { 'name': template_name, 'subject': record.subject or False, 'body_html': record.body or False, 'model_id': model_id or False, 'attachment_ids': [(6, 0, [att.id for att in record.attachment_ids])] } template_id = email_template.create(cr, uid, values, context=context) record.write(record.onchange_template_id(template_id, record.composition_mode, record.model, record.res_id)['value']) return _reopen(self, record.id, record.model) #------------------------------------------------------ # Wizard validation and send #------------------------------------------------------ def generate_email_for_composer(self, cr, uid, template_id, res_id, context=None): """ Call email_template.generate_email(), get fields relevant for mail.compose.message, transform email_cc and email_to into partner_ids """ template_values = self.pool.get('email.template').generate_email(cr, uid, template_id, res_id, context=context) # filter template values fields = ['body_html', 'subject', 'email_to', 'email_recipients', 'email_cc', 'attachment_ids', 'attachments'] values = dict((field, template_values[field]) for field in fields if template_values.get(field)) values['body'] = values.pop('body_html', '') # transform email_to, email_cc into partner_ids partner_ids = set() mails = tools.email_split(values.pop('email_to', '')) + tools.email_split(values.pop('email_cc', '')) ctx = dict((k, v) for k, v in (context or {}).items() if not k.startswith('default_')) for mail in mails: partner_id = self.pool.get('res.partner').find_or_create(cr, uid, mail, context=ctx) partner_ids.add(partner_id) email_recipients = values.pop('email_recipients', '') if email_recipients: for partner_id in email_recipients.split(','): if partner_id: # placeholders could generate '', 3, 2 due to some empty field values partner_ids.add(int(partner_id)) # legacy template behavior: void values do not erase existing values and the # related key is removed from the values dict if partner_ids: values['partner_ids'] = list(partner_ids) return values def render_message(self, cr, uid, wizard, res_id, context=None): """ Override to handle templates. """ # generate the composer email if wizard.template_id: values = self.generate_email_for_composer(cr, uid, wizard.template_id, res_id, context=context) else: values = {} # remove attachments as they should not be rendered values.pop('attachment_ids', None) # get values to return email_dict = super(mail_compose_message, self).render_message(cr, uid, wizard, res_id, context) values.update(email_dict) return values def render_template(self, cr, uid, template, model, res_id, context=None): return self.pool.get('email.template').render_template(cr, uid, template, model, res_id, context=context) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
sarahn/ganeti
test/py/ganeti.impexpd_unittest.py
9
8733
#!/usr/bin/python # # Copyright (C) 2010 Google Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. """Script for testing ganeti.impexpd""" import os import sys import re import unittest import socket from ganeti import constants from ganeti import objects from ganeti import compat from ganeti import utils from ganeti import errors from ganeti import impexpd import testutils class CmdBuilderConfig(objects.ConfigObject): __slots__ = [ "bind", "key", "cert", "ca", "host", "port", "ipv4", "ipv6", "compress", "magic", "connect_timeout", "connect_retries", "cmd_prefix", "cmd_suffix", ] def CheckCmdWord(cmd, word): wre = re.compile(r"\b%s\b" % re.escape(word)) return compat.any(wre.search(i) for i in cmd) class TestCommandBuilder(unittest.TestCase): def test(self): for mode in [constants.IEM_IMPORT, constants.IEM_EXPORT]: if mode == constants.IEM_IMPORT: comprcmd = "gunzip" elif mode == constants.IEM_EXPORT: comprcmd = "gzip" for compress in [constants.IEC_NONE, constants.IEC_GZIP]: for magic in [None, 10 * "-", "HelloWorld", "J9plh4nFo2", "24A02A81-2264-4B51-A882-A2AB9D85B420"]: opts = CmdBuilderConfig(magic=magic, compress=compress) builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3) magic_cmd = builder._GetMagicCommand() dd_cmd = builder._GetDdCommand() if magic: self.assert_(("M=%s" % magic) in magic_cmd) self.assert_(("M=%s" % magic) in dd_cmd) else: self.assertFalse(magic_cmd) for host in ["localhost", "198.51.100.4", "192.0.2.99"]: for port in [0, 1, 1234, 7856, 45452]: for cmd_prefix in [None, "PrefixCommandGoesHere|", "dd if=/dev/hda bs=1048576 |"]: for cmd_suffix in [None, "< /some/file/name", "| dd of=/dev/null"]: opts = CmdBuilderConfig(host=host, port=port, compress=compress, cmd_prefix=cmd_prefix, cmd_suffix=cmd_suffix) builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3) # Check complete command cmd = builder.GetCommand() self.assert_(isinstance(cmd, list)) if compress == constants.IEC_GZIP: self.assert_(CheckCmdWord(cmd, comprcmd)) if cmd_prefix is not None: self.assert_(compat.any(cmd_prefix in i for i in cmd)) if cmd_suffix is not None: self.assert_(compat.any(cmd_suffix in i for i in cmd)) # Check socat command socat_cmd = builder._GetSocatCommand() if mode == constants.IEM_IMPORT: ssl_addr = socat_cmd[-2].split(",") self.assert_(("OPENSSL-LISTEN:%s" % port) in ssl_addr) elif mode == constants.IEM_EXPORT: ssl_addr = socat_cmd[-1].split(",") self.assert_(("OPENSSL:%s:%s" % (host, port)) in ssl_addr) self.assert_("verify=1" in ssl_addr) def testIPv6(self): for mode in [constants.IEM_IMPORT, constants.IEM_EXPORT]: opts = CmdBuilderConfig(host="localhost", port=6789, ipv4=False, ipv6=False) builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3) cmd = builder._GetSocatCommand() self.assert_(compat.all("pf=" not in i for i in cmd)) # IPv4 opts = CmdBuilderConfig(host="localhost", port=6789, ipv4=True, ipv6=False) builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3) cmd = builder._GetSocatCommand() self.assert_(compat.any(",pf=ipv4" in i for i in cmd)) # IPv6 opts = CmdBuilderConfig(host="localhost", port=6789, ipv4=False, ipv6=True) builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3) cmd = builder._GetSocatCommand() self.assert_(compat.any(",pf=ipv6" in i for i in cmd)) # IPv4 and IPv6 opts = CmdBuilderConfig(host="localhost", port=6789, ipv4=True, ipv6=True) builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3) self.assertRaises(AssertionError, builder._GetSocatCommand) def testCommaError(self): opts = CmdBuilderConfig(host="localhost", port=1234, ca="/some/path/with,a/,comma") for mode in [constants.IEM_IMPORT, constants.IEM_EXPORT]: builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3) self.assertRaises(errors.GenericError, builder.GetCommand) def testOptionLengthError(self): testopts = [ CmdBuilderConfig(bind="0.0.0.0" + ("A" * impexpd.SOCAT_OPTION_MAXLEN), port=1234, ca="/tmp/ca"), CmdBuilderConfig(host="localhost", port=1234, ca="/tmp/ca" + ("B" * impexpd.SOCAT_OPTION_MAXLEN)), CmdBuilderConfig(host="localhost", port=1234, key="/tmp/key" + ("B" * impexpd.SOCAT_OPTION_MAXLEN)), ] for opts in testopts: for mode in [constants.IEM_IMPORT, constants.IEM_EXPORT]: builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3) self.assertRaises(errors.GenericError, builder.GetCommand) opts.host = "localhost" + ("A" * impexpd.SOCAT_OPTION_MAXLEN) builder = impexpd.CommandBuilder(constants.IEM_EXPORT, opts, 1, 2, 3) self.assertRaises(errors.GenericError, builder.GetCommand) def testModeError(self): mode = "foobarbaz" assert mode not in [constants.IEM_IMPORT, constants.IEM_EXPORT] opts = CmdBuilderConfig(host="localhost", port=1234) builder = impexpd.CommandBuilder(mode, opts, 1, 2, 3) self.assertRaises(errors.GenericError, builder.GetCommand) class TestVerifyListening(unittest.TestCase): def test(self): self.assertEqual(impexpd._VerifyListening(socket.AF_INET, "192.0.2.7", 1234), ("192.0.2.7", 1234)) self.assertEqual(impexpd._VerifyListening(socket.AF_INET6, "::1", 9876), ("::1", 9876)) self.assertEqual(impexpd._VerifyListening(socket.AF_INET6, "[::1]", 4563), ("::1", 4563)) self.assertEqual(impexpd._VerifyListening(socket.AF_INET6, "[2001:db8::1:4563]", 4563), ("2001:db8::1:4563", 4563)) def testError(self): for family in [socket.AF_UNIX, socket.AF_INET, socket.AF_INET6]: self.assertRaises(errors.GenericError, impexpd._VerifyListening, family, "", 1234) self.assertRaises(errors.GenericError, impexpd._VerifyListening, family, "192", 999) for family in [socket.AF_UNIX, socket.AF_INET6]: self.assertRaises(errors.GenericError, impexpd._VerifyListening, family, "192.0.2.7", 1234) self.assertRaises(errors.GenericError, impexpd._VerifyListening, family, "[2001:db8::1", 1234) self.assertRaises(errors.GenericError, impexpd._VerifyListening, family, "2001:db8::1]", 1234) for family in [socket.AF_UNIX, socket.AF_INET]: self.assertRaises(errors.GenericError, impexpd._VerifyListening, family, "::1", 1234) class TestCalcThroughput(unittest.TestCase): def test(self): self.assertEqual(impexpd._CalcThroughput([]), None) self.assertEqual(impexpd._CalcThroughput([(0, 0)]), None) samples = [ (0.0, 0.0), (10.0, 100.0), ] self.assertAlmostEqual(impexpd._CalcThroughput(samples), 10.0, 3) samples = [ (5.0, 7.0), (10.0, 100.0), (16.0, 181.0), ] self.assertAlmostEqual(impexpd._CalcThroughput(samples), 15.818, 3) if __name__ == "__main__": testutils.GanetiTestProgram()
gpl-2.0